[Feature] Support to use fullgraph with eagle (#5118)
### What this PR does / why we need it?
We support to use full graph with eagle.
Change list:
1. Distinguish between processing graph_params and draft_graph_params in
attention_v1.
2. Adapt the full-graph mode in eagle_proposer, include:
1). If use full graph, make Fullgraph Wrapper when load model.
2). Build a new meatadata, set running mode in FULL and mark attention
update in dummy_run when in Fullgraph mode.
3). Fixed and fill any attn_metadata, such as
attn_metadata.slot_mapping.
4). Add a descriptor.
5). Set running mode and triggered update metadata.
3. Trans is_mtp_model to is_draft_model, and add the update of
workspace.
NOTE:
When set async_scheduling=True, the draft model will enforce execution
in eager mode.
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
Co-authored-by: Yizhou Liu <liu_yizhou@outlook.com>
Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com>
This commit is contained in:
@@ -2,7 +2,7 @@ from unittest.mock import MagicMock, patch
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from vllm.config import CacheConfig, CompilationMode, VllmConfig
|
||||
from vllm.config import CacheConfig, CompilationMode, CUDAGraphMode, VllmConfig
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.spec_decode.eagle_proposer import EagleProposer
|
||||
@@ -33,11 +33,13 @@ class TestEagleProposerInitialization(TestBase):
|
||||
def tearDown(self):
|
||||
self.mock_cpugpubuffer.stop()
|
||||
|
||||
def test_initialization_eagle(self):
|
||||
def test_initialization_eagle_graph(self):
|
||||
self.vllm_config.speculative_config.method = "eagle"
|
||||
self.vllm_config.speculative_config.draft_model_config.get_hidden_size.return_value = 4096
|
||||
self.vllm_config.compilation_config.mode = CompilationMode.VLLM_COMPILE
|
||||
self.vllm_config.model_config.enforce_eager = False
|
||||
self.vllm_config.speculative_config.enforce_eager = False
|
||||
self.vllm_config.scheduler_config.async_scheduling = False
|
||||
|
||||
proposer = EagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
@@ -53,7 +55,7 @@ class TestEagleProposerInitialization(TestBase):
|
||||
self.assertEqual(proposer.hidden_states.shape, (1024, 4096))
|
||||
self.assertEqual(proposer.arange.shape, (1024, ))
|
||||
|
||||
def test_initialization_eagle3(self):
|
||||
def test_initialization_eagle3_enforce_eager(self):
|
||||
self.vllm_config.speculative_config.method = "eagle3"
|
||||
self.vllm_config.speculative_config.draft_model_config.get_hidden_size.return_value = 2048
|
||||
self.vllm_config.compilation_config.mode = CompilationMode.NONE
|
||||
@@ -68,6 +70,23 @@ class TestEagleProposerInitialization(TestBase):
|
||||
self.assertFalse(proposer.use_cuda_graph)
|
||||
self.assertEqual(proposer.hidden_states.shape, (1024, 2048))
|
||||
|
||||
def test_initialization_eagle3_full_graph_async(self):
|
||||
self.vllm_config.speculative_config.method = "eagle3"
|
||||
self.vllm_config.speculative_config.draft_model_config.get_hidden_size.return_value = 2048
|
||||
self.vllm_config.compilation_config.mode = CompilationMode.VLLM_COMPILE
|
||||
self.vllm_config.model_config.enforce_eager = False
|
||||
self.vllm_config.speculative_config.enforce_eager = False
|
||||
self.vllm_config.scheduler_config.async_scheduling = True
|
||||
|
||||
proposer = EagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
|
||||
self.assertEqual(proposer.name, SpecDcodeType.EAGLE3)
|
||||
self.assertEqual(proposer.hidden_size, 2048)
|
||||
self.assertFalse(proposer.use_cuda_graph)
|
||||
self.assertEqual(proposer.hidden_states.shape, (1024, 2048))
|
||||
|
||||
|
||||
class TestEagleProposerLoadModel(TestBase):
|
||||
|
||||
@@ -176,6 +195,7 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
def setUp(self):
|
||||
self.vllm_config = MagicMock(spec=VllmConfig)
|
||||
self.vllm_config.speculative_config = MagicMock()
|
||||
self.vllm_config.speculative_config.num_speculative_tokens = 4
|
||||
self.device = torch.device("cpu")
|
||||
self.runner = MagicMock()
|
||||
|
||||
@@ -192,25 +212,64 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
self.proposer.model = MagicMock()
|
||||
self.proposer.update_stream = MagicMock()
|
||||
|
||||
def tearDown(self):
|
||||
self.mock_cpugpubuffer.stop()
|
||||
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_forward_context")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.set_ascend_forward_context")
|
||||
def test_dummy_run_basic(self, mock_context):
|
||||
def test_dummy_run_basic(self, mock_context, mock_get_context):
|
||||
num_tokens = 32
|
||||
with_prefill = False
|
||||
|
||||
self.proposer.dummy_run(num_tokens=num_tokens,
|
||||
with_prefill=with_prefill)
|
||||
|
||||
mock_context.assert_called_once()
|
||||
self.assertTrue(self.proposer.model.call_count == 4)
|
||||
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_forward_context")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.set_ascend_forward_context")
|
||||
def test_dummy_run_with_prefill(self, mock_context):
|
||||
def test_dummy_run_with_prefill(self, mock_context, mock_get_context):
|
||||
mock_context.return_value.__enter__.return_value = None
|
||||
self.proposer.dummy_run(num_tokens=64, with_prefill=True, num_reqs=4)
|
||||
self.proposer.model.assert_called_once()
|
||||
self.assertTrue(self.proposer.model.call_count == 4)
|
||||
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.update_attn_params")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_forward_context")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.set_ascend_forward_context")
|
||||
def test_dummy_run_in_graph_capture(self, mock_context, mock_get_context,
|
||||
mock_update_attn_params):
|
||||
last_use_cuda_graph = self.proposer.use_cuda_graph
|
||||
mock_return_context = MagicMock()
|
||||
mock_return_context.cudagraph_runtime_mode = CUDAGraphMode.FULL
|
||||
mock_return_context.capturing = True
|
||||
mock_get_context.return_value = mock_return_context
|
||||
self.proposer.use_cuda_graph = True
|
||||
self.proposer.dummy_run(num_tokens=64,
|
||||
in_graph_capturing=True,
|
||||
aclgraph_runtime_mode=CUDAGraphMode.FULL)
|
||||
self.assertTrue(self.proposer.model.call_count == 4)
|
||||
mock_update_attn_params.assert_not_called()
|
||||
self.proposer.use_cuda_graph = last_use_cuda_graph
|
||||
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.update_attn_params")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_forward_context")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.set_ascend_forward_context")
|
||||
def test_dummy_run_in_graph_run(self, mock_context, mock_get_context,
|
||||
mock_update_attn_params):
|
||||
last_use_cuda_graph = self.proposer.use_cuda_graph
|
||||
mock_return_context = MagicMock()
|
||||
mock_return_context.cudagraph_runtime_mode = CUDAGraphMode.FULL
|
||||
mock_return_context.capturing = False
|
||||
mock_get_context.return_value = mock_return_context
|
||||
self.proposer.use_cuda_graph = True
|
||||
self.proposer.dummy_run(num_tokens=64,
|
||||
in_graph_capturing=False,
|
||||
aclgraph_runtime_mode=CUDAGraphMode.FULL)
|
||||
self.assertTrue(self.proposer.model.call_count == 4)
|
||||
self.assertTrue(mock_update_attn_params.call_count == 4)
|
||||
self.proposer.use_cuda_graph = last_use_cuda_graph
|
||||
|
||||
|
||||
class TestEagleProposerGenerateTokenIds(TestBase):
|
||||
|
||||
Reference in New Issue
Block a user