bugfix for mtp fullgraph (#3845)

### What this PR does / why we need it?
bugfix for mtp fullgraph

### Does this PR introduce _any_ user-facing change?
no

### How was this patch tested?

- vLLM version: v0.11.0rc3
- vLLM main:
83f478bb19

Signed-off-by: zouyida2052 <zouyida2002@gmail.com>
This commit is contained in:
zouyida2052
2025-10-29 23:50:13 +08:00
committed by GitHub
parent d6ef3df3b3
commit adadd50613
3 changed files with 51 additions and 27 deletions

View File

@@ -306,6 +306,7 @@ class NPUPlatform(Platform):
**********************************************************************************\033[0m
"""
logger.warning(warning_message)
update_aclgraph_sizes(vllm_config)
else:
logger.info(
"%s cudagraph_mode is not support on NPU. falling back to NONE",
@@ -343,6 +344,7 @@ class NPUPlatform(Platform):
**********************************************************************************\033[0m
"""
logger.warning(warning_message)
update_aclgraph_sizes(vllm_config)
else:
logger.info(
"%s cudagraph_mode is not support on NPU. falling back to NONE",

View File

@@ -349,6 +349,12 @@ def update_cudagraph_capture_sizes(vllm_config: VllmConfig,
def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
"""Update ACL graph capture sizes based on hardware limitations"""
from vllm.config.compilation import CUDAGraphMode
if vllm_config.compilation_config.cudagraph_mode == CUDAGraphMode.FULL_DECODE_ONLY:
if vllm_config.speculative_config is not None and \
vllm_config.speculative_config.num_speculative_tokens > 1:
_update_spec_aclgraph_sizes(vllm_config)
return
# NOTE: Currently, we can only capture 1800 graphs at most,
# due to the limitation of ACL graph. This number is bounded by
# the number of streams, which is 2048, we save 248 streams
@@ -459,25 +465,48 @@ def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
vllm_config.model_config.architectures[0], num_hidden_layers,
len(original_sizes))
if vllm_config.speculative_config is not None and \
vllm_config.speculative_config.num_speculative_tokens > 1:
_update_spec_aclgraph_sizes(vllm_config)
def _update_spec_aclgraph_sizes(vllm_config: VllmConfig) -> None:
# default or defined cudagraph_capture_sizes may not consider num_speculative_tokens>1 scenario
# the maximum size cudagraph_capture_sizes[0] should be greater or equal than
# (num_speculative_tokens+1)*max_num_seqs, otherwise draft model will run in eager mode
if vllm_config.speculative_config is not None and \
vllm_config.speculative_config.num_speculative_tokens > 1:
from vllm.config.compilation import CUDAGraphMode
compilation_config = vllm_config.compilation_config
num_speculative_tokens = vllm_config.speculative_config.num_speculative_tokens
uniform_decode_query_len = num_speculative_tokens + 1
max_num_seqs = vllm_config.scheduler_config.max_num_seqs
max_num_tokens = max_num_seqs * uniform_decode_query_len
original_sizes, compilation_config.cudagraph_capture_sizes = \
compilation_config.cudagraph_capture_sizes, None
assert len(original_sizes) > 0
if original_sizes[0] < (num_speculative_tokens + 1) * max_num_seqs:
enlarged_sizes = [(num_speculative_tokens + 1) * size
for size in original_sizes]
if vllm_config.compilation_config.cudagraph_mode == CUDAGraphMode.FULL_DECODE_ONLY and \
not all(size % uniform_decode_query_len == 0 for size in original_sizes):
enlarged_sizes = [
size * uniform_decode_query_len for size in original_sizes
if size >= uniform_decode_query_len and size *
uniform_decode_query_len <= max_num_tokens
]
if vllm_version_is("0.11.0"):
compilation_config.init_with_cudagraph_sizes(enlarged_sizes)
else:
update_cudagraph_capture_sizes(vllm_config, enlarged_sizes)
logger.info(
"Adjusted ACL graphs: %s%s for speculative decoding",
logger.info("Adjusted ACL graphs: %s%s for speculative decoding",
original_sizes, enlarged_sizes)
elif original_sizes[0] < max_num_tokens:
enlarged_sizes = [
size * uniform_decode_query_len for size in original_sizes
]
if vllm_version_is("0.11.0"):
compilation_config.init_with_cudagraph_sizes(enlarged_sizes)
else:
update_cudagraph_capture_sizes(vllm_config, enlarged_sizes)
logger.info("Adjusted ACL graphs: %s%s for speculative decoding",
original_sizes, enlarged_sizes)
else:
compilation_config.cudagraph_capture_sizes = original_sizes

View File

@@ -3885,7 +3885,7 @@ class NPUModelRunner(LoRAModelRunnerMixin):
if aclgraph_mode.mixed_mode() != CUDAGraphMode.NONE:
aclgraph_runtime_mode = aclgraph_mode.mixed_mode()
compilation_cases = list(reversed(self.aclgraph_batch_sizes))
compilation_cases = sorted(self.aclgraph_batch_sizes)
try:
self._capture_aclgraphs(
@@ -3914,14 +3914,7 @@ class NPUModelRunner(LoRAModelRunnerMixin):
if aclgraph_mode.decode_mode() == CUDAGraphMode.FULL and \
aclgraph_mode.separate_routine():
max_num_tokens = self.scheduler_config.max_num_seqs * \
self.uniform_decode_query_len
decode_cudagraph_batch_sizes = [
x for x in self.aclgraph_batch_sizes if x <= max_num_tokens
and x >= self.uniform_decode_query_len
]
compilation_cases_decode = list(
reversed(decode_cudagraph_batch_sizes))
compilation_cases_decode = sorted(self.aclgraph_batch_sizes)
self._capture_aclgraphs(
compilation_cases=compilation_cases_decode,
aclgraph_runtime_mode=CUDAGraphMode.FULL,