From ff0a1e012a78e72a8edec17b1cd42906c66e2b9b Mon Sep 17 00:00:00 2001 From: Wang Yixuan <88923622+hust17yixuan@users.noreply.github.com> Date: Tue, 16 Dec 2025 16:40:35 +0800 Subject: [PATCH] [BugFix]Fix FIA input err in DSv3.1 (#5059) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What this PR does / why we need it? When use mtp, full decdoe only and async_scheduling together, finding a input err for FIA ops due to the non-increasing input of the 'actual_seq_lengths'. This bug is caused by the filling the variable ‘query_start_loc’. We need to fill the query_start_loc' s end by the 'cu_num_tokens' instead of '-1' ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? - vLLM version: v0.12.0 - vLLM main: https://github.com/vllm-project/vllm/commit/ad32e3e19ccf0526cb6744a5fed09a138a5fb2f9 Signed-off-by: hust17yixuan <303660421@qq.com> --- vllm_ascend/worker/model_runner_v1.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 8f17a1f7..ad0f50b9 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -769,6 +769,7 @@ class NPUModelRunner(GPUModelRunner): self.query_start_loc.np[0] = 0 self.query_start_loc.np[1:num_reqs + 1] = cu_num_tokens + self.query_start_loc.np[num_reqs + 1:].fill(cu_num_tokens[-1]) self.query_start_loc.copy_to_gpu() self.seq_lens.np[:num_reqs] = ( @@ -776,8 +777,6 @@ class NPUModelRunner(GPUModelRunner): num_scheduled_tokens) self.seq_lens.copy_to_gpu() - # Fill unused with -1. Needed for reshape_and_cache - self.query_start_loc.gpu[num_reqs + 1:].fill_(-1) self.seq_lens.gpu[num_reqs:].fill_(0) self.query_lens = torch.from_numpy(num_scheduled_tokens)