[Feat] Merge the multi eagle graphs to one graph (#5940)

### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.

#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.

#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
11b6af5280

Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
This commit is contained in:
anon189Ty
2026-01-23 08:37:02 +08:00
committed by GitHub
parent 63d3921208
commit 7725314b26
5 changed files with 396 additions and 218 deletions

View File

@@ -20,6 +20,7 @@ class TestEagleProposerInitialization(TestBase):
self.vllm_config.model_config = MagicMock()
self.device = torch.device("cpu")
self.runner = MagicMock()
self.runner.pin_memory = False
self.vllm_config.cache_config.block_size = 16
self.vllm_config.scheduler_config.max_num_batched_tokens = 1024
@@ -93,6 +94,23 @@ class TestEagleProposerInitialization(TestBase):
self.vllm_config.scheduler_config.async_scheduling = True
init_ascend_config(self.vllm_config)
proposer = EagleProposer(vllm_config=self.vllm_config,
device=self.device,
runner=self.runner)
self.assertEqual(proposer.hidden_size, 2048)
self.assertTrue(proposer.use_cuda_graph)
self.assertEqual(proposer.hidden_states.shape, (1024, 2048))
def test_initialization_mtp_full_graph_async(self):
self.vllm_config.speculative_config.method = "mtp"
self.vllm_config.speculative_config.draft_model_config.get_hidden_size.return_value = 2048
self.vllm_config.compilation_config.mode = CompilationMode.VLLM_COMPILE
self.vllm_config.model_config.enforce_eager = False
self.vllm_config.speculative_config.enforce_eager = False
self.vllm_config.scheduler_config.async_scheduling = True
init_ascend_config(self.vllm_config)
proposer = EagleProposer(vllm_config=self.vllm_config,
device=self.device,
runner=self.runner)
@@ -110,6 +128,7 @@ class TestEagleProposerLoadModel(TestBase):
self.vllm_config.speculative_config.method = "eagle"
self.device = torch.device("cpu")
self.runner = MagicMock()
self.runner.pin_memory = False
self.vllm_config.cache_config.block_size = 16
self.vllm_config.scheduler_config.max_num_batched_tokens = 1024
@@ -252,6 +271,7 @@ class TestEagleProposerDummyRun(TestBase):
self.runner = MagicMock()
self.runner.pcp_size = 1
self.runner.dcp_size = 1
self.runner.pin_memory = False
self.vllm_config.cache_config.block_size = 16
self.vllm_config.scheduler_config.max_num_batched_tokens = 1024
@@ -279,6 +299,7 @@ class TestEagleProposerDummyRun(TestBase):
device=self.device,
runner=self.runner)
self.proposer.model = MagicMock()
self.proposer._runnable = MagicMock()
self.proposer.update_stream = MagicMock()
def tearDown(self):
@@ -298,7 +319,7 @@ class TestEagleProposerDummyRun(TestBase):
self.proposer.dummy_run(num_tokens=num_tokens,
with_prefill=with_prefill)
self.assertTrue(self.proposer.model.call_count == 4)
self.assertTrue(self.proposer._runnable.call_count == 1)
# cpu does not support parallel-group, let alone `sp`
@patch("vllm_ascend.spec_decode.eagle_proposer.get_forward_context",
@@ -309,7 +330,7 @@ class TestEagleProposerDummyRun(TestBase):
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
self.proposer.enable_shared_expert_dp = False
self.proposer.dummy_run(num_tokens=64, with_prefill=True, num_reqs=4)
self.assertTrue(self.proposer.model.call_count == 4)
self.assertTrue(self.proposer._runnable.call_count == 1)
@patch("vllm_ascend.spec_decode.eagle_proposer.update_attn_params")
@patch("vllm_ascend.spec_decode.eagle_proposer.get_forward_context")
@@ -329,7 +350,7 @@ class TestEagleProposerDummyRun(TestBase):
self.proposer.dummy_run(num_tokens=64,
in_graph_capturing=True,
aclgraph_runtime_mode=CUDAGraphMode.FULL)
self.assertTrue(self.proposer.model.call_count == 4)
self.assertTrue(self.proposer._runnable.call_count == 1)
mock_update_attn_params.assert_not_called()
self.proposer.use_cuda_graph = last_use_cuda_graph
@@ -351,8 +372,8 @@ class TestEagleProposerDummyRun(TestBase):
self.proposer.dummy_run(num_tokens=64,
in_graph_capturing=False,
aclgraph_runtime_mode=CUDAGraphMode.FULL)
self.assertTrue(self.proposer.model.call_count == 4)
self.assertTrue(mock_update_attn_params.call_count == 4)
self.assertTrue(self.proposer._runnable.call_count == 1)
self.assertTrue(mock_update_attn_params.call_count == 1)
self.proposer.use_cuda_graph = last_use_cuda_graph
@@ -369,6 +390,7 @@ class TestEagleProposerHelperMethods(TestBase):
self.runner.input_batch.req_ids = [0, 1, 2]
self.runner.arange_np = np.arange(10)
self.runner.input_batch.num_reqs = 3
self.runner.pin_memory = False
self.vllm_config.cache_config.block_size = 16
self.vllm_config.scheduler_config.max_num_batched_tokens = 1024

View File

@@ -74,6 +74,7 @@ class TestMtpProposer:
runner.max_num_reqs = 256
runner._use_aclgraph.return_value = False
runner.reserved_mc2_mask = None
runner.pin_memory = False
return runner
@patch("vllm.v1.spec_decode.eagle.CpuGpuBuffer")