From 7671ce1bf12b3df45aaa92c6316b40d8f954230a Mon Sep 17 00:00:00 2001 From: Yuzhou Tong <48299280+YzTongNiar@users.noreply.github.com> Date: Wed, 17 Dec 2025 20:19:02 +0800 Subject: [PATCH] Fix a data conversion bug introduced by commit 3b7eb51 in main#4655 (#5115) ### What this PR does / why we need it? [Fix a data conversion bug introduced by [main#4655](https://github.com/vllm-project/vllm-ascend/commit/3b7eb5179f2b5d52e9bd693095d51de604ba5ece) ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.12.0 - vLLM main: https://github.com/vllm-project/vllm/commit/ad32e3e19ccf0526cb6744a5fed09a138a5fb2f9 Signed-off-by: tongyuzhou Co-authored-by: tongyuzhou Co-authored-by: weijinqian0 <1184188277@qq.com> --- vllm_ascend/worker/model_runner_v1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 5739c0f3..ffc2ee04 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -3079,7 +3079,7 @@ class NPUModelRunner(GPUModelRunner): (2 * self.pcp_size)).astype(np.int32) * (2 * self.pcp_size) num_padded_scheduled_tokens[:num_decode_reqs] = ( tokens[:num_decode_reqs] * self.pcp_size) - self.num_pcp_pads = num_padded_scheduled_tokens - tokens + self.num_pcp_pads = torch.tensor(num_padded_scheduled_tokens - tokens) cu_padded_tokens, pcp_padded_arange = \ self._get_cumsum_and_arange(num_padded_scheduled_tokens) unpad_mask = torch.from_numpy(