diff --git a/tests/ut/test_utils.py b/tests/ut/test_utils.py index 7bc8f5b..32f2d7b 100644 --- a/tests/ut/test_utils.py +++ b/tests/ut/test_utils.py @@ -272,6 +272,7 @@ class TestUtils(TestBase): len(test_vllm_config.compilation_config.cudagraph_capture_sizes)) test_vllm_config.speculative_config = mock.MagicMock() + test_vllm_config.speculative_config.num_speculative_tokens = 2 test_vllm_config.speculative_config.draft_model_config = mock.MagicMock( ) test_vllm_config.speculative_config.draft_model_config.hf_config = mock.MagicMock( diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 0929e40..d4ddbd9 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -410,6 +410,26 @@ def update_aclgraph_sizes(vllm_config: VllmConfig) -> None: vllm_config.model_config.architectures[0], num_hidden_layers, len(original_sizes)) + # default or defined cudagraph_capture_sizes may not consider num_speculative_tokens>1 scenario + # the maximum size cudagraph_capture_sizes[0] should be greater or equal than + # (num_speculative_tokens+1)*max_num_seqs, otherwise draft model will run in eager mode + if vllm_config.speculative_config is not None and \ + vllm_config.speculative_config.num_speculative_tokens > 1: + num_speculative_tokens = vllm_config.speculative_config.num_speculative_tokens + max_num_seqs = vllm_config.scheduler_config.max_num_seqs + original_sizes, compilation_config.cudagraph_capture_sizes = \ + compilation_config.cudagraph_capture_sizes, None + assert len(original_sizes) > 0 + if original_sizes[0] < (num_speculative_tokens + 1) * max_num_seqs: + enlarged_sizes = [(num_speculative_tokens + 1) * size + for size in original_sizes] + compilation_config.init_with_cudagraph_sizes(enlarged_sizes) + logger.info( + "Adjusted ACL graphs: %s → %s for speculative decoding", + original_sizes, enlarged_sizes) + else: + compilation_config.cudagraph_capture_sizes = original_sizes + # TODO(wxy): Move to ops module def dispose_tensor(x: torch.Tensor):