[Feat] Adapted mtp function to Qwen3-next (#3918)
### What this PR does / why we need it?
Adapts mtp function to Qwen3-next.
- vLLM version: v0.11.0
- vLLM main:
83f478bb19
Signed-off-by: drslark <slarksblood@qq.com>
This commit is contained in:
@@ -1852,7 +1852,7 @@ class NPUModelRunner(LoRAModelRunnerMixin):
|
||||
extra_attn_metadata_args = dict(
|
||||
num_accepted_tokens=self.num_accepted_tokens.
|
||||
gpu[:num_reqs],
|
||||
num_draft_tokens=self.num_draft_tokens.
|
||||
num_decode_draft_tokens_cpu=self.num_draft_tokens.
|
||||
gpu[:num_reqs],
|
||||
)
|
||||
attn_metadata_i = builder.build(
|
||||
@@ -1948,11 +1948,10 @@ class NPUModelRunner(LoRAModelRunnerMixin):
|
||||
attn_state = AscendAttentionState.SpecDecoding
|
||||
# Speculative decoding.
|
||||
elif np.all(num_valid_tokens == 1):
|
||||
if self.drafter and (self.drafter.name == SpecDcodeType.EAGLE
|
||||
or self.drafter.name == SpecDcodeType.EAGLE3):
|
||||
attn_state = AscendAttentionState.ChunkedPrefill
|
||||
else:
|
||||
if self.speculative_config and self.speculative_config.method == 'deepseek_mtp':
|
||||
attn_state = AscendAttentionState.SpecDecoding
|
||||
else:
|
||||
attn_state = AscendAttentionState.ChunkedPrefill
|
||||
# splitfuse
|
||||
elif not ascend_config.ascend_scheduler_config.enabled or self.chunked_prefill_enabled:
|
||||
attn_state = AscendAttentionState.ChunkedPrefill
|
||||
@@ -2548,7 +2547,7 @@ class NPUModelRunner(LoRAModelRunnerMixin):
|
||||
with ProfileExecuteDuration().capture_async("Draft"):
|
||||
if self.speculative_config:
|
||||
use_padded_batch_for_eagle = self.speculative_config and \
|
||||
self.speculative_config.method == "deepseek_mtp" and \
|
||||
self.speculative_config.method in ("deepseek_mtp", "qwen3_next_mtp") and \
|
||||
not self.speculative_config.disable_padded_drafter_batch
|
||||
if use_padded_batch_for_eagle:
|
||||
# EAGLE speculative decoding can use the GPU sampled tokens
|
||||
|
||||
Reference in New Issue
Block a user