Skip to content

Commit c3f89c4

Browse files
committed
[None][feat] Make 2-model spec dec use the 1-model kernels (Hopper)
Signed-off-by: Mike Iovine <[email protected]>
1 parent ddf2d01 commit c3f89c4

File tree

2 files changed

+7
-13
lines changed

2 files changed

+7
-13
lines changed

tensorrt_llm/_torch/speculative/interface.py

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55

66
import torch
77

8-
from ..._utils import get_sm_version
98
from ..attention_backend.trtllm import AttentionBackend, TrtllmAttention
109
from ..pyexecutor.resource_manager import BaseResourceManager
1110

@@ -117,17 +116,11 @@ def extend_ctx(self, attention_backend: Type[AttentionBackend]):
117116
# 1-model has separate logic for handling draft tokens
118117
return False
119118

120-
if issubclass(attention_backend,
121-
TrtllmAttention) and self.is_mtp_eagle():
122-
# TRTLLM MLA does not work with the chunked context mode.
123-
return False
124-
125-
return not issubclass(attention_backend,
126-
TrtllmAttention) or get_sm_version() != 100
119+
return not issubclass(attention_backend, TrtllmAttention)
127120

128121
def attention_need_spec_dec_mode(
129122
self,
130-
spec_resource_manager: BaseResourceManager,
123+
spec_resource_manager: Optional[BaseResourceManager],
131124
is_draft_model: bool,
132125
attention_backend: Type[AttentionBackend],
133126
use_chain_drafter: bool,
@@ -137,9 +130,10 @@ def attention_need_spec_dec_mode(
137130
If true, the attention backend kernel needs to run in spec-dec mode (multi-token query mode).
138131
"""
139132
is_trtllm_attention = issubclass(attention_backend, TrtllmAttention)
140-
return self.is_eagle3_one_model() or (
133+
134+
return self.is_eagle3_one_model() or not is_draft_model or (
141135
self.is_eagle3() and spec_resource_manager.is_first_draft
142-
and is_trtllm_attention and use_chain_drafter and is_draft_model)
136+
and is_trtllm_attention)
143137

144138
@staticmethod
145139
def from_string(name: Optional[str]) -> "SpeculativeDecodingMode":

tests/unittest/_torch/speculative/test_eagle3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ def test_llama_eagle3(use_cuda_graph: bool, attn_backend: str,
136136
num_tokens = len(new_tokens)
137137

138138
accept_rate = num_accepted / num_drafted
139-
assert accept_rate > 0.15
139+
assert accept_rate > 0.10
140140

141141
# Output tests
142142
sampling_params = SamplingParams(max_tokens=10, temperature=0)
@@ -182,7 +182,7 @@ def test_llama_eagle3_long_prompt(use_cuda_graph):
182182
speculative_config=spec_config,
183183
max_batch_size=1,
184184
cuda_graph_config=cuda_graph_config,
185-
disable_overlap_scheduler=False)
185+
disable_overlap_scheduler=True)
186186

187187
prompt = [", ".join(str(i) for i in range(1000))]
188188

0 commit comments

Comments
 (0)