Skip to content

Commit b146eef

Browse files
committed
[None][feat] Make 2-model spec dec use the 1-model kernels (Hopper)
Signed-off-by: Mike Iovine <[email protected]>
1 parent 3d0e38e commit b146eef

File tree

2 files changed

+4
-12
lines changed

2 files changed

+4
-12
lines changed

tensorrt_llm/_torch/speculative/interface.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55

66
import torch
77

8-
from ..._utils import get_sm_version
98
from ..attention_backend.trtllm import AttentionBackend, TrtllmAttention
109
from ..pyexecutor.resource_manager import BaseResourceManager
1110

@@ -117,13 +116,7 @@ def extend_ctx(self, attention_backend: Type[AttentionBackend]):
117116
# 1-model has separate logic for handling draft tokens
118117
return False
119118

120-
if issubclass(attention_backend,
121-
TrtllmAttention) and self.is_mtp_eagle():
122-
# TRTLLM MLA does not work with the chunked context mode.
123-
return False
124-
125-
return not issubclass(attention_backend,
126-
TrtllmAttention) or get_sm_version() != 100
119+
return not issubclass(attention_backend, TrtllmAttention)
127120

128121
def attention_need_spec_dec_mode(
129122
self,
@@ -137,9 +130,8 @@ def attention_need_spec_dec_mode(
137130
If true, the attention backend kernel needs to run in spec-dec mode (multi-token query mode).
138131
"""
139132
is_trtllm_attention = issubclass(attention_backend, TrtllmAttention)
140-
return self.is_eagle3_one_model() or (
141-
self.is_eagle3() and spec_resource_manager.is_first_draft
142-
and is_trtllm_attention and use_chain_drafter and is_draft_model)
133+
return self.is_eagle3_one_model() or not is_draft_model or (
134+
spec_resource_manager.is_first_draft and is_trtllm_attention)
143135

144136
@staticmethod
145137
def from_string(name: Optional[str]) -> "SpeculativeDecodingMode":

tests/unittest/_torch/speculative/test_eagle3.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ def test_llama_eagle3(use_cuda_graph: bool, attn_backend: str,
136136
num_tokens = len(new_tokens)
137137

138138
accept_rate = num_accepted / num_drafted
139-
assert accept_rate > 0.15
139+
assert accept_rate > 0.10
140140

141141
# Output tests
142142
sampling_params = SamplingParams(max_tokens=10, temperature=0)

0 commit comments

Comments
 (0)