[v0.11.0] cherry-pick Fix performance degradation when mtp>1 (#3597) (#3630)

### What this PR does / why we need it?
cherry-pick Fix performance degradation when mtp>1 (#3597)

This PR aims to fix performance degradation when mtp>1. Since mtp>1 may
result in more tokens (i.e. larger batch size) than acl graph maximum
batch size, this will cause draft model to run in eager mode.

### How was this patch tested?
by ci

---------

Signed-off-by: Zetong Li <slippersss@126.com>
This commit is contained in:
Zetong Li
2025-10-22 22:07:39 +08:00
committed by GitHub
parent a989fef5de
commit 6e72bfdc50
2 changed files with 21 additions and 0 deletions

View File

@@ -272,6 +272,7 @@ class TestUtils(TestBase):
len(test_vllm_config.compilation_config.cudagraph_capture_sizes))
test_vllm_config.speculative_config = mock.MagicMock()
test_vllm_config.speculative_config.num_speculative_tokens = 2
test_vllm_config.speculative_config.draft_model_config = mock.MagicMock(
)
test_vllm_config.speculative_config.draft_model_config.hf_config = mock.MagicMock(

View File

@@ -410,6 +410,26 @@ def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
vllm_config.model_config.architectures[0], num_hidden_layers,
len(original_sizes))
# default or defined cudagraph_capture_sizes may not consider num_speculative_tokens>1 scenario
# the maximum size cudagraph_capture_sizes[0] should be greater or equal than
# (num_speculative_tokens+1)*max_num_seqs, otherwise draft model will run in eager mode
if vllm_config.speculative_config is not None and \
vllm_config.speculative_config.num_speculative_tokens > 1:
num_speculative_tokens = vllm_config.speculative_config.num_speculative_tokens
max_num_seqs = vllm_config.scheduler_config.max_num_seqs
original_sizes, compilation_config.cudagraph_capture_sizes = \
compilation_config.cudagraph_capture_sizes, None
assert len(original_sizes) > 0
if original_sizes[0] < (num_speculative_tokens + 1) * max_num_seqs:
enlarged_sizes = [(num_speculative_tokens + 1) * size
for size in original_sizes]
compilation_config.init_with_cudagraph_sizes(enlarged_sizes)
logger.info(
"Adjusted ACL graphs: %s%s for speculative decoding",
original_sizes, enlarged_sizes)
else:
compilation_config.cudagraph_capture_sizes = original_sizes
# TODO(wxy): Move to ops module
def dispose_tensor(x: torch.Tensor):