[bugfix] align max_num_batched_tokens with tp*pcp when using FLASHCOMM1 (#6000)

### What this PR does / why we need it?
Align max_num_batched_tokens with tp*pcp when using FLASHCOMM1 to avoid
assert error in `NPUModelRunner._dummy_run`.

- vLLM version: v0.13.0
- vLLM main:
2c24bc6996

---------

Signed-off-by: QiuChunshuo <qiuchunshuo@huawei.com>
This commit is contained in:
Qiu
2026-01-23 14:19:49 +08:00
committed by GitHub
parent f8d03d21f1
commit 749e24f81e
4 changed files with 33 additions and 3 deletions

View File

@@ -18,6 +18,7 @@ from typing import TYPE_CHECKING
from vllm.logger import logger
from vllm.triton_utils import HAS_TRITON
from vllm.utils.math_utils import cdiv
if TYPE_CHECKING:
from vllm.config import VllmConfig
@@ -62,10 +63,25 @@ class AscendConfig:
additional_config.get("enable_shared_expert_dp", False)
and vllm_config.parallel_config.enable_expert_parallel
)
if self.enable_shared_expert_dp:
from vllm_ascend.utils import enable_sp
from vllm_ascend.utils import enable_sp
if self.enable_shared_expert_dp:
assert enable_sp(vllm_config=vllm_config, enable_shared_expert_dp=True)
if vllm_config.parallel_config.prefill_context_parallel_size > 1 and enable_sp(vllm_config=vllm_config):
tp_pcp_size = (
vllm_config.parallel_config.tensor_parallel_size
* vllm_config.parallel_config.prefill_context_parallel_size
)
if vllm_config.scheduler_config.max_num_batched_tokens % tp_pcp_size != 0:
vllm_config.scheduler_config.max_num_batched_tokens = (
cdiv(vllm_config.scheduler_config.max_num_batched_tokens, tp_pcp_size) * tp_pcp_size
)
logger.warning_once(
f"When using FLASHCOMM1, the max_num_batched_tokens should be divisible"
f"by tp_size * pcp_size ({tp_pcp_size}). It has been adjusted to"
f"{vllm_config.scheduler_config.max_num_batched_tokens}."
)
self.multistream_overlap_shared_expert = additional_config.get("multistream_overlap_shared_expert", False)
self.multistream_overlap_gate = additional_config.get("multistream_overlap_gate", False)
self.recompute_scheduler_enable = additional_config.get("recompute_scheduler_enable", False)