From f15dc3fa02b7df28e74fd7100ab87769212d7ec7 Mon Sep 17 00:00:00 2001 From: Qiu Date: Sun, 4 Jan 2026 17:25:40 +0800 Subject: [PATCH] [bugfix](pcp) expand max_num_tokens for pcp pad (#5478) ### What this PR does / why we need it? Since the [PR](https://github.com/vllm-project/vllm/pull/28988) for PCP modifications to `GPUModelRunner` has not yet been merged into vLLM, this PR temporarily requires adjustments to certain buffer sizes. These changes can be reverted once the original [PR](https://github.com/vllm-project/vllm/pull/28988) is merged. ### Does this PR introduce _any_ user-facing change? No - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/5326c89803566a131c928f7fdd2100b75c981a42 Signed-off-by: QiuChunshuo --- vllm_ascend/worker/model_runner_v1.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 7510b175..d62f76dc 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -183,8 +183,15 @@ class ExecuteModelState(NamedTuple): class NPUModelRunner(GPUModelRunner): def __init__(self, vllm_config: VllmConfig, device: torch.device): + # TODO(qcs): These manual pad and unpad for GPUModelRunner are + # used to expand some buffers, which need to be reverted after + # the following PR is merged: + # https://github.com/vllm-project/vllm/pull/28988 + max_pcp_pad_tokens = vllm_config.parallel_config.prefill_context_parallel_size * 2 * vllm_config.scheduler_config.max_num_seqs + vllm_config.scheduler_config.max_num_batched_tokens += max_pcp_pad_tokens with _torch_cuda_wrapper(): super().__init__(vllm_config, device) + vllm_config.scheduler_config.max_num_batched_tokens -= max_pcp_pad_tokens self.max_num_tokens = self.scheduler_config.max_num_batched_tokens self.max_num_reqs = self.scheduler_config.max_num_seqs self.dp_size = vllm_config.parallel_config.data_parallel_size