diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 7510b175..d62f76dc 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -183,8 +183,15 @@ class ExecuteModelState(NamedTuple): class NPUModelRunner(GPUModelRunner): def __init__(self, vllm_config: VllmConfig, device: torch.device): + # TODO(qcs): These manual pad and unpad for GPUModelRunner are + # used to expand some buffers, which need to be reverted after + # the following PR is merged: + # https://github.com/vllm-project/vllm/pull/28988 + max_pcp_pad_tokens = vllm_config.parallel_config.prefill_context_parallel_size * 2 * vllm_config.scheduler_config.max_num_seqs + vllm_config.scheduler_config.max_num_batched_tokens += max_pcp_pad_tokens with _torch_cuda_wrapper(): super().__init__(vllm_config, device) + vllm_config.scheduler_config.max_num_batched_tokens -= max_pcp_pad_tokens self.max_num_tokens = self.scheduler_config.max_num_batched_tokens self.max_num_reqs = self.scheduler_config.max_num_seqs self.dp_size = vllm_config.parallel_config.data_parallel_size