From f4605c2b3cb09dbc97887261a239b8e5478871b9 Mon Sep 17 00:00:00 2001 From: Yizhou <136800916+yiz-liu@users.noreply.github.com> Date: Thu, 8 Jan 2026 19:41:08 +0800 Subject: [PATCH] [Fix] Fixes speculative decode indexing and unpad condition for attention metadata (#5626) ### What this PR does / why we need it? This addresses the issue brought up by #5356 and #4963, and we believe the unnecessary conditions are the root cause. Change the unpad trigger to be driven by actual size mismatches (num_reqs vs base_num_reqs or scheduled vs input token counts) rather than specific speculative-method flags. Then remove brittle workarounds that forced request counts and sliced query start locations. This prevents incorrect indexing and length mismatches during speculative decoding and makes metadata unpadding more robust across scheduling modes. ### Does this PR introduce _any_ user-facing change? None. ### How was this patch tested? Tested by existing cases. - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/8be6432bdaf6275664d857b1e5e9bf8ed1ce299e --------- Signed-off-by: Yizhou Liu --- vllm_ascend/attention/utils.py | 7 +++++-- vllm_ascend/spec_decode/eagle_proposer.py | 13 +++---------- vllm_ascend/worker/model_runner_v1.py | 4 +--- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/vllm_ascend/attention/utils.py b/vllm_ascend/attention/utils.py index 9168224c..be073c46 100644 --- a/vllm_ascend/attention/utils.py +++ b/vllm_ascend/attention/utils.py @@ -117,8 +117,11 @@ class AscendCommonAttentionMetadata(CommonAttentionMetadata): num_actual_tokens=num_actual_tokens, max_query_len=self.max_query_len, decode_token_per_req=self.decode_token_per_req, - block_table_tensor=self.block_table_tensor[:num_actual_reqs], - slot_mapping=self.slot_mapping[:num_actual_tokens], + # NOTE: keep all tokens for block_table_tensor and slot_mapping otherwise + # there will be error about shape mismatch during reshape and cache. + # This is really strange since vLLM slices them as well + block_table_tensor=self.block_table_tensor, + slot_mapping=self.slot_mapping, causal=self.causal, actual_seq_lengths_q=self.actual_seq_lengths_q[:num_actual_tokens], positions=self.positions[:num_actual_tokens], diff --git a/vllm_ascend/spec_decode/eagle_proposer.py b/vllm_ascend/spec_decode/eagle_proposer.py index 694f40c0..ab186457 100644 --- a/vllm_ascend/spec_decode/eagle_proposer.py +++ b/vllm_ascend/spec_decode/eagle_proposer.py @@ -749,13 +749,6 @@ class EagleProposer(VllmEagleProposer): num_reqs = common_attn_metadata.num_reqs device = valid_sampled_tokens_count.device - if num_reqs != spec_decode_metadata.cu_num_draft_tokens.shape[0]: - # TODO: This is a serious issue and should be taken care of ASAP - # In short, why input_batch.num_reqs != attn_metadata.num_reqs? - # Previously in #4963, we modified `query_start_loc`, but this - # problem remains unsolved. - num_reqs = spec_decode_metadata.cu_num_draft_tokens.shape[0] - token_indices_to_sample = torch.empty((num_reqs, ), dtype=torch.int32, device=device) @@ -787,9 +780,9 @@ class EagleProposer(VllmEagleProposer): torch.zeros_like(num_draft_tokens_gpu), ) - query_start_loc = common_attn_metadata.query_start_loc[ - 1:1 + num_rejected_tokens_gpu.shape[0]] - token_indices_to_sample = query_start_loc - 1 - num_rejected_tokens_gpu + token_indices_to_sample = ( + common_attn_metadata.query_start_loc[1:] - 1 - + num_rejected_tokens_gpu) query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index a1788d8d..817c226c 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -1013,9 +1013,7 @@ class NPUModelRunner(GPUModelRunner): if self.speculative_config and \ self.spec_decode_common_attn_metadata is None: self.spec_decode_common_attn_metadata = common_attn_metadata - if self.speculative_config.method in ("eagle", "eagle3") and \ - (self.vllm_config.speculative_config.enforce_eager \ - or self.use_async_scheduling): + if num_reqs != base_num_reqs or total_num_scheduled_tokens != num_input_tokens: self.spec_decode_common_attn_metadata = \ self.spec_decode_common_attn_metadata.unpadded( total_num_scheduled_tokens, base_num_reqs)