From 6e72bfdc501200151e92a6f569c92d5949907e91 Mon Sep 17 00:00:00 2001 From: Zetong Li <48438720+slippersss@users.noreply.github.com> Date: Wed, 22 Oct 2025 22:07:39 +0800 Subject: [PATCH] [v0.11.0] cherry-pick Fix performance degradation when mtp>1 (#3597) (#3630) ### What this PR does / why we need it? cherry-pick Fix performance degradation when mtp>1 (#3597) This PR aims to fix performance degradation when mtp>1. Since mtp>1 may result in more tokens (i.e. larger batch size) than acl graph maximum batch size, this will cause draft model to run in eager mode. ### How was this patch tested? by ci --------- Signed-off-by: Zetong Li --- tests/ut/test_utils.py | 1 + vllm_ascend/utils.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/tests/ut/test_utils.py b/tests/ut/test_utils.py index 7bc8f5b..32f2d7b 100644 --- a/tests/ut/test_utils.py +++ b/tests/ut/test_utils.py @@ -272,6 +272,7 @@ class TestUtils(TestBase): len(test_vllm_config.compilation_config.cudagraph_capture_sizes)) test_vllm_config.speculative_config = mock.MagicMock() + test_vllm_config.speculative_config.num_speculative_tokens = 2 test_vllm_config.speculative_config.draft_model_config = mock.MagicMock( ) test_vllm_config.speculative_config.draft_model_config.hf_config = mock.MagicMock( diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 0929e40..d4ddbd9 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -410,6 +410,26 @@ def update_aclgraph_sizes(vllm_config: VllmConfig) -> None: vllm_config.model_config.architectures[0], num_hidden_layers, len(original_sizes)) + # default or defined cudagraph_capture_sizes may not consider num_speculative_tokens>1 scenario + # the maximum size cudagraph_capture_sizes[0] should be greater or equal than + # (num_speculative_tokens+1)*max_num_seqs, otherwise draft model will run in eager mode + if vllm_config.speculative_config is not None and \ + vllm_config.speculative_config.num_speculative_tokens > 1: + num_speculative_tokens = vllm_config.speculative_config.num_speculative_tokens + max_num_seqs = vllm_config.scheduler_config.max_num_seqs + original_sizes, compilation_config.cudagraph_capture_sizes = \ + compilation_config.cudagraph_capture_sizes, None + assert len(original_sizes) > 0 + if original_sizes[0] < (num_speculative_tokens + 1) * max_num_seqs: + enlarged_sizes = [(num_speculative_tokens + 1) * size + for size in original_sizes] + compilation_config.init_with_cudagraph_sizes(enlarged_sizes) + logger.info( + "Adjusted ACL graphs: %s → %s for speculative decoding", + original_sizes, enlarged_sizes) + else: + compilation_config.cudagraph_capture_sizes = original_sizes + # TODO(wxy): Move to ops module def dispose_tensor(x: torch.Tensor):