[feat][spec decode]Unified draft parallel (#6766)
### What this PR does / why we need it?
Implement a unified parallelized speculative decoding in VLLM
Ascend,which can simultaneously support parallel speculative inference
schemes such as Pard, P-Eagle, etc. refer to
https://github.com/vllm-project/vllm-ascend/pull/6565 and
https://github.com/vllm-project/vllm-ascend/pull/4078
### How was this patch tested?
run with parallel drafting script:
export target=/model/Llama-3.1-8B-Instruct
export draft=/model/PARD-Llama-3.2-1B
export CUDA_VISIBLE_DEVICES=6
export ASCEND_RT_VISIBLE_DEVICES=6
vllm serve $target \
--tensor-parallel-size 1 \
--max-model-len 4096 \
--no-enable-prefix-caching \
--port 8811 \
--speculative-config '{"model": "/model/PARD-Llama-3.2-1B", "method":
"draft_model", "num_speculative_tokens": 8, "parallel_drafting": true}'
base script:
export target=/model/Llama-3.1-8B-Instruct
export draft=/model/PARD-Llama-3.2-1B
export CUDA_VISIBLE_DEVICES=6
export ASCEND_RT_VISIBLE_DEVICES=6
vllm serve $target \
--tensor-parallel-size 1 \
--max-model-len 4096 \
--no-enable-prefix-caching \
--port 8811
benchmark script:
MAX_CONCURRENCY=1
NUM_PROMPTS=80
vllm bench serve --port 8811 \
--temperature 0 \
--model /model/Llama-3.1-8B-Instruct \
--backend openai-chat \
--endpoint /v1/chat/completions \
--dataset-name hf \
--dataset-path philschmid/mt-bench \
--num-prompts ${NUM_PROMPTS} \
--max-concurrency ${MAX_CONCURRENCY} \
--seed 1234
test results :
base(without spec decode): TTFT 79.46ms TPOT 26.99ms
output_tokens_throughput 36.75 tok/s
this pr(with parallel drafting): TTFT 72.24ms TPOT 13.45ms
output_tokens_throughput 72.98 tok/s
per-position acceptance(from position 0 to 7):
79.48%、56.93%、40%、27.90%、19.79%、14.25%、10.57%、7.61%.
----------------------------------------------------------------------
run on qwen3 model script :
export target=/model/Qwen3-1.7B
export draft=/model/PARD-Qwen3-0.6B
export CUDA_VISIBLE_DEVICES=1
export ASCEND_RT_VISIBLE_DEVICES=1
vllm serve $target \
--tensor-parallel-size 1 \
--max-model-len 4096 \
--no-enable-prefix-caching \
--port 8811 \
--speculative-config '{"model": "/model/PARD-Qwen3-0.6B", "method":
"draft_model", "num_speculative_tokens": 8, "parallel_drafting": true}'
cc @NickJudyHvv
- vLLM version: v0.15.0
- vLLM main:
9562912cea
---------
Signed-off-by: 01267596 <xiongkai123@cmbchina.com>
Signed-off-by: kx <1670186653@qq.com>
Signed-off-by: HF-001 <1670186653@qq.com>
Co-authored-by: 01267596 <xiongkai123@cmbchina.com>
This commit is contained in:
@@ -10,14 +10,15 @@ from vllm_ascend.spec_decode.eagle_proposer import AscendEagleProposer
|
||||
|
||||
|
||||
class TestEagleProposerInitialization(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.vllm_config = MagicMock(spec=VllmConfig)
|
||||
self.vllm_config.speculative_config = MagicMock()
|
||||
self.vllm_config.cache_config = MagicMock(spec=CacheConfig)
|
||||
self.vllm_config.scheduler_config = MagicMock()
|
||||
self.vllm_config.model_config = MagicMock()
|
||||
self.vllm_config.model_config.hf_text_config = MagicMock(spec=[]) # Empty spec to prevent hasattr from returning True
|
||||
self.vllm_config.model_config.hf_text_config = MagicMock(
|
||||
spec=[]
|
||||
) # Empty spec to prevent hasattr from returning True
|
||||
self.vllm_config.model_config.hf_text_config.to_dict = MagicMock(return_value={})
|
||||
self.vllm_config.compilation_config = MagicMock()
|
||||
self.device = torch.device("cpu")
|
||||
@@ -40,20 +41,16 @@ class TestEagleProposerInitialization(TestBase):
|
||||
self.vllm_config.parallel_config.enable_expert_parallel = False
|
||||
self.vllm_config.speculative_config.draft_tensor_parallel_size = 1
|
||||
self.vllm_config.speculative_config.num_speculative_tokens = 2
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(2)
|
||||
])
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([(i + 1) * (0,) for i in range(2)])
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_mrope = False
|
||||
self.vllm_config.speculative_config.disable_padded_drafter_batch = False
|
||||
self.vllm_config.additional_config = None
|
||||
|
||||
self.mock_cpugpubuffer = patch(
|
||||
"vllm.v1.spec_decode.eagle.CpuGpuBuffer")
|
||||
self.mock_cpugpubuffer = patch("vllm.v1.spec_decode.eagle.CpuGpuBuffer")
|
||||
self.mock_cpugpubuffer.start()
|
||||
self.mock_supports_multimodal_inputs = patch(
|
||||
"vllm.multimodal.registry.MultiModalRegistry.supports_multimodal_inputs",
|
||||
return_value=False
|
||||
"vllm.multimodal.registry.MultiModalRegistry.supports_multimodal_inputs", return_value=False
|
||||
)
|
||||
self.mock_supports_multimodal_inputs.start()
|
||||
|
||||
@@ -78,18 +75,16 @@ class TestEagleProposerInitialization(TestBase):
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
proposer = AscendEagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
proposer = AscendEagleProposer(vllm_config=self.vllm_config, device=self.device, runner=self.runner)
|
||||
|
||||
self.assertEqual(proposer.hidden_size, 4096)
|
||||
self.assertTrue(proposer.use_cuda_graph)
|
||||
|
||||
expected_max_num_tokens = proposer.max_num_tokens
|
||||
self.assertEqual(proposer.input_ids.shape, (expected_max_num_tokens, ))
|
||||
self.assertEqual(proposer.positions.shape, (expected_max_num_tokens, ))
|
||||
self.assertEqual(proposer.input_ids.shape, (expected_max_num_tokens,))
|
||||
self.assertEqual(proposer.positions.shape, (expected_max_num_tokens,))
|
||||
self.assertEqual(proposer.hidden_states.shape, (expected_max_num_tokens, 4096))
|
||||
self.assertEqual(proposer.arange.shape, (expected_max_num_tokens, ))
|
||||
self.assertEqual(proposer.arange.shape, (expected_max_num_tokens,))
|
||||
|
||||
def test_initialization_eagle3_enforce_eager(self):
|
||||
self.vllm_config.speculative_config.method = "eagle3"
|
||||
@@ -101,9 +96,7 @@ class TestEagleProposerInitialization(TestBase):
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
proposer = AscendEagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
proposer = AscendEagleProposer(vllm_config=self.vllm_config, device=self.device, runner=self.runner)
|
||||
|
||||
self.assertEqual(proposer.hidden_size, 2048)
|
||||
self.assertFalse(proposer.use_cuda_graph)
|
||||
@@ -120,9 +113,7 @@ class TestEagleProposerInitialization(TestBase):
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
proposer = AscendEagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
proposer = AscendEagleProposer(vllm_config=self.vllm_config, device=self.device, runner=self.runner)
|
||||
|
||||
self.assertEqual(proposer.hidden_size, 2048)
|
||||
self.assertTrue(proposer.use_cuda_graph)
|
||||
@@ -139,9 +130,7 @@ class TestEagleProposerInitialization(TestBase):
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
proposer = AscendEagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
proposer = AscendEagleProposer(vllm_config=self.vllm_config, device=self.device, runner=self.runner)
|
||||
|
||||
self.assertEqual(proposer.hidden_size, 2048)
|
||||
self.assertFalse(proposer.use_cuda_graph)
|
||||
@@ -150,7 +139,6 @@ class TestEagleProposerInitialization(TestBase):
|
||||
|
||||
|
||||
class TestEagleProposerLoadModel(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.vllm_config = MagicMock(spec=VllmConfig)
|
||||
self.vllm_config.speculative_config = MagicMock()
|
||||
@@ -175,29 +163,24 @@ class TestEagleProposerLoadModel(TestBase):
|
||||
self.vllm_config.parallel_config.enable_expert_parallel = False
|
||||
self.vllm_config.speculative_config.draft_tensor_parallel_size = 1
|
||||
self.vllm_config.speculative_config.num_speculative_tokens = 2
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(2)
|
||||
])
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([(i + 1) * (0,) for i in range(2)])
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_mrope = False
|
||||
self.vllm_config.speculative_config.disable_padded_drafter_batch = False
|
||||
self.vllm_config.additional_config = None
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
self.mock_cpugpubuffer = patch(
|
||||
"vllm.v1.spec_decode.eagle.CpuGpuBuffer")
|
||||
self.mock_cpugpubuffer = patch("vllm.v1.spec_decode.eagle.CpuGpuBuffer")
|
||||
self.mock_cpugpubuffer.start()
|
||||
self.mock_supports_multimodal_inputs = patch(
|
||||
"vllm.multimodal.registry.MultiModalRegistry.supports_multimodal_inputs",
|
||||
return_value=False
|
||||
"vllm.multimodal.registry.MultiModalRegistry.supports_multimodal_inputs", return_value=False
|
||||
)
|
||||
self.mock_supports_multimodal_inputs.start()
|
||||
|
||||
# Set the current vllm config
|
||||
set_current_vllm_config(self.vllm_config)
|
||||
self.proposer = AscendEagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
self.proposer = AscendEagleProposer(vllm_config=self.vllm_config, device=self.device, runner=self.runner)
|
||||
self.proposer.parallel_drafting = False
|
||||
|
||||
def tearDown(self):
|
||||
self.mock_cpugpubuffer.stop()
|
||||
@@ -205,24 +188,21 @@ class TestEagleProposerLoadModel(TestBase):
|
||||
# Clear the current vllm config
|
||||
set_current_vllm_config(None)
|
||||
|
||||
@patch(
|
||||
"vllm_ascend.spec_decode.eagle_proposer.get_layers_from_vllm_config")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_layers_from_vllm_config")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_model")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_pp_group")
|
||||
def test_load_model_pp1(self, mock_pp_group, mock_get_model,
|
||||
mock_get_layers):
|
||||
def test_load_model_pp1(self, mock_pp_group, mock_get_model, mock_get_layers):
|
||||
mock_pp_group.return_value.world_size = 1
|
||||
mock_target_layer1 = MagicMock()
|
||||
mock_target_layer2 = MagicMock()
|
||||
mock_draft_layer1 = MagicMock()
|
||||
mock_draft_layer3 = MagicMock()
|
||||
mock_get_layers.side_effect = [{
|
||||
"layer1": mock_target_layer1,
|
||||
"layer2": mock_target_layer2
|
||||
}, {}, {}, {
|
||||
"layer1": mock_draft_layer1,
|
||||
"layer3": mock_draft_layer3
|
||||
}]
|
||||
mock_get_layers.side_effect = [
|
||||
{"layer1": mock_target_layer1, "layer2": mock_target_layer2},
|
||||
{},
|
||||
{},
|
||||
{"layer1": mock_draft_layer1, "layer3": mock_draft_layer3},
|
||||
]
|
||||
|
||||
weight = torch.zeros(0)
|
||||
|
||||
@@ -241,61 +221,45 @@ class TestEagleProposerLoadModel(TestBase):
|
||||
self.proposer.load_model(mock_model)
|
||||
mock_get_model.assert_called_once()
|
||||
self.assertEqual(self.proposer.attn_layer_names, ["layer3"])
|
||||
self.assertIs(self.proposer.model.model.embed_tokens,
|
||||
mock_model.model.embed_tokens)
|
||||
self.assertIs(self.proposer.model.model.embed_tokens, mock_model.model.embed_tokens)
|
||||
|
||||
@patch(
|
||||
"vllm_ascend.spec_decode.eagle_proposer.get_layers_from_vllm_config")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_layers_from_vllm_config")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_model")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_pp_group")
|
||||
def test_load_model_pp_gt1(self, mock_pp_group, mock_get_model,
|
||||
mock_get_layers):
|
||||
def test_load_model_pp_gt1(self, mock_pp_group, mock_get_model, mock_get_layers):
|
||||
mock_pp_group.return_value.world_size = 2
|
||||
mock_target_layer1 = MagicMock()
|
||||
mock_draft_layer2 = MagicMock()
|
||||
|
||||
mock_get_layers.side_effect = [{
|
||||
"layer1": mock_target_layer1
|
||||
}, {}, {}, {
|
||||
"layer2": mock_draft_layer2
|
||||
}]
|
||||
mock_get_layers.side_effect = [{"layer1": mock_target_layer1}, {}, {}, {"layer2": mock_draft_layer2}]
|
||||
|
||||
mock_model = MagicMock()
|
||||
original_embed = MagicMock()
|
||||
mock_model.multimodal_cpu_fields = None
|
||||
mock_model.merge_by_field_config = None
|
||||
mock_get_model.return_value = MagicMock(model=MagicMock(
|
||||
embed_tokens=original_embed))
|
||||
mock_get_model.return_value = MagicMock(model=MagicMock(embed_tokens=original_embed))
|
||||
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
self.proposer.load_model(mock_model)
|
||||
|
||||
self.assertIsNot(self.proposer.model.model.embed_tokens,
|
||||
mock_model.model.embed_tokens)
|
||||
self.assertIsNot(self.proposer.model.model.embed_tokens, mock_model.model.embed_tokens)
|
||||
self.assertEqual(self.proposer.attn_layer_names, ["layer2"])
|
||||
|
||||
@patch(
|
||||
"vllm_ascend.spec_decode.eagle_proposer.get_layers_from_vllm_config")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_layers_from_vllm_config")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_model")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.get_pp_group")
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.supports_multimodal")
|
||||
def test_load_model_multimodal(self, mock_supports_multi, mock_pp_group,
|
||||
mock_get_model, mock_get_layers):
|
||||
def test_load_model_multimodal(self, mock_supports_multi, mock_pp_group, mock_get_model, mock_get_layers):
|
||||
mock_model = MagicMock()
|
||||
mock_model.get_language_model.return_value.lm_head = MagicMock()
|
||||
mock_supports_multi.return_value = True
|
||||
original_embed = MagicMock()
|
||||
mock_get_model.return_value = MagicMock(model=MagicMock(
|
||||
embed_tokens=original_embed))
|
||||
mock_get_model.return_value = MagicMock(model=MagicMock(embed_tokens=original_embed))
|
||||
|
||||
mock_target_layer1 = MagicMock()
|
||||
mock_draft_layer2 = MagicMock()
|
||||
|
||||
mock_get_layers.side_effect = [{
|
||||
"layer1": mock_target_layer1
|
||||
}, {}, {}, {
|
||||
"layer2": mock_draft_layer2
|
||||
}]
|
||||
mock_get_layers.side_effect = [{"layer1": mock_target_layer1}, {}, {}, {"layer2": mock_draft_layer2}]
|
||||
mock_pp_group.return_value.world_size = 2
|
||||
|
||||
self.proposer.model = MagicMock()
|
||||
@@ -303,12 +267,10 @@ class TestEagleProposerLoadModel(TestBase):
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
self.proposer.load_model(mock_model)
|
||||
self.assertEqual(mock_model.get_language_model.call_count, 2)
|
||||
self.assertIs(self.proposer.model.lm_head,
|
||||
mock_model.get_language_model.return_value.lm_head)
|
||||
self.assertIs(self.proposer.model.lm_head, mock_model.get_language_model.return_value.lm_head)
|
||||
|
||||
|
||||
class TestEagleProposerDummyRun(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.vllm_config = MagicMock(spec=VllmConfig)
|
||||
self.vllm_config.speculative_config = MagicMock()
|
||||
@@ -328,51 +290,43 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
self.vllm_config.model_config.uses_mrope = False
|
||||
self.vllm_config.model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.model_config.use_mla = False
|
||||
self.vllm_config.model_config.hf_text_config = MagicMock(spec=[]) # Empty spec to prevent hasattr from returning True
|
||||
self.vllm_config.model_config.hf_text_config = MagicMock(
|
||||
spec=[]
|
||||
) # Empty spec to prevent hasattr from returning True
|
||||
self.vllm_config.model_config.hf_text_config.to_dict = MagicMock(return_value={})
|
||||
self.vllm_config.parallel_config.tensor_parallel_size = 1
|
||||
self.vllm_config.parallel_config.data_parallel_rank = 0
|
||||
self.vllm_config.parallel_config.data_parallel_size = 1
|
||||
self.vllm_config.parallel_config.prefill_context_parallel_size = 1
|
||||
self.vllm_config.speculative_config.draft_tensor_parallel_size = 1
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(4)
|
||||
])
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([(i + 1) * (0,) for i in range(4)])
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_mrope = False
|
||||
self.vllm_config.speculative_config.disable_padded_drafter_batch = False
|
||||
self.vllm_config.additional_config = None
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
self.mock_cpugpubuffer = patch(
|
||||
"vllm.v1.spec_decode.eagle.CpuGpuBuffer")
|
||||
self.mock_cpugpubuffer = patch("vllm.v1.spec_decode.eagle.CpuGpuBuffer")
|
||||
self.mock_cpugpubuffer.start()
|
||||
self.mock_supports_multimodal_inputs = patch(
|
||||
"vllm.multimodal.registry.MultiModalRegistry.supports_multimodal_inputs",
|
||||
return_value=False
|
||||
"vllm.multimodal.registry.MultiModalRegistry.supports_multimodal_inputs", return_value=False
|
||||
)
|
||||
self.mock_supports_multimodal_inputs.start()
|
||||
|
||||
# Mock parallel state functions
|
||||
self.mock_tp_world_size = patch(
|
||||
"vllm_ascend.ascend_forward_context.get_tensor_model_parallel_world_size",
|
||||
return_value=1
|
||||
"vllm_ascend.ascend_forward_context.get_tensor_model_parallel_world_size", return_value=1
|
||||
)
|
||||
self.mock_tp_world_size.start()
|
||||
|
||||
mock_dp_group = MagicMock()
|
||||
mock_dp_group.world_size = 1
|
||||
self.mock_dp_group = patch(
|
||||
"vllm_ascend.ascend_forward_context.get_dp_group",
|
||||
return_value=mock_dp_group
|
||||
)
|
||||
self.mock_dp_group = patch("vllm_ascend.ascend_forward_context.get_dp_group", return_value=mock_dp_group)
|
||||
self.mock_dp_group.start()
|
||||
|
||||
# Set the current vllm config
|
||||
set_current_vllm_config(self.vllm_config)
|
||||
self.proposer = AscendEagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
self.proposer = AscendEagleProposer(vllm_config=self.vllm_config, device=self.device, runner=self.runner)
|
||||
self.proposer.model = MagicMock()
|
||||
self.proposer._runnable = MagicMock()
|
||||
self.proposer.update_stream = MagicMock()
|
||||
@@ -397,8 +351,7 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
self.proposer.enable_shared_expert_dp = False
|
||||
self.proposer.dummy_run(num_tokens=num_tokens,
|
||||
with_prefill=with_prefill)
|
||||
self.proposer.dummy_run(num_tokens=num_tokens, with_prefill=with_prefill)
|
||||
|
||||
self.assertTrue(self.proposer._runnable.call_count == 1)
|
||||
|
||||
@@ -433,9 +386,7 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
self.proposer.enable_shared_expert_dp = False
|
||||
self.proposer.dummy_run(num_tokens=64,
|
||||
in_graph_capturing=True,
|
||||
aclgraph_runtime_mode=CUDAGraphMode.FULL)
|
||||
self.proposer.dummy_run(num_tokens=64, in_graph_capturing=True, aclgraph_runtime_mode=CUDAGraphMode.FULL)
|
||||
self.assertTrue(self.proposer._runnable.call_count == 1)
|
||||
mock_update_full_graph_params.assert_not_called()
|
||||
self.proposer.use_cuda_graph = last_use_cuda_graph
|
||||
@@ -458,16 +409,13 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
self.proposer.enable_shared_expert_dp = False
|
||||
self.proposer.dummy_run(num_tokens=64,
|
||||
in_graph_capturing=False,
|
||||
aclgraph_runtime_mode=CUDAGraphMode.FULL)
|
||||
self.proposer.dummy_run(num_tokens=64, in_graph_capturing=False, aclgraph_runtime_mode=CUDAGraphMode.FULL)
|
||||
self.assertTrue(self.proposer._runnable.call_count == 1)
|
||||
self.assertTrue(mock_update_full_graph_params.call_count == 1)
|
||||
self.proposer.use_cuda_graph = last_use_cuda_graph
|
||||
|
||||
|
||||
class TestEagleProposerHelperMethods(TestBase):
|
||||
|
||||
# TODO: Can add some tests about prepare_next_token_ids in future.
|
||||
|
||||
def setUp(self):
|
||||
@@ -497,29 +445,23 @@ class TestEagleProposerHelperMethods(TestBase):
|
||||
self.vllm_config.parallel_config.enable_expert_parallel = False
|
||||
self.vllm_config.speculative_config.draft_tensor_parallel_size = 1
|
||||
self.vllm_config.speculative_config.num_speculative_tokens = 2
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(2)
|
||||
])
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([(i + 1) * (0,) for i in range(2)])
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_mrope = False
|
||||
self.vllm_config.speculative_config.disable_padded_drafter_batch = False
|
||||
self.vllm_config.additional_config = None
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
self.mock_cpugpubuffer = patch(
|
||||
"vllm.v1.spec_decode.eagle.CpuGpuBuffer")
|
||||
self.mock_cpugpubuffer = patch("vllm.v1.spec_decode.eagle.CpuGpuBuffer")
|
||||
self.mock_cpugpubuffer.start()
|
||||
self.mock_supports_multimodal_inputs = patch(
|
||||
"vllm.multimodal.registry.MultiModalRegistry.supports_multimodal_inputs",
|
||||
return_value=False
|
||||
"vllm.multimodal.registry.MultiModalRegistry.supports_multimodal_inputs", return_value=False
|
||||
)
|
||||
self.mock_supports_multimodal_inputs.start()
|
||||
|
||||
# Set the current vllm config
|
||||
set_current_vllm_config(self.vllm_config)
|
||||
self.proposer = AscendEagleProposer(vllm_config=self.vllm_config,
|
||||
device=self.device,
|
||||
runner=self.runner)
|
||||
self.proposer = AscendEagleProposer(vllm_config=self.vllm_config, device=self.device, runner=self.runner)
|
||||
|
||||
def tearDown(self):
|
||||
self.mock_cpugpubuffer.stop()
|
||||
@@ -536,11 +478,9 @@ class TestEagleProposerHelperMethods(TestBase):
|
||||
num_rejected = torch.tensor([1, 0, 1], device=self.device)
|
||||
mock_return_attn = MagicMock()
|
||||
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
with patch.object(self.proposer,
|
||||
'prepare_inputs',
|
||||
return_value=(mock_return_attn,
|
||||
torch.tensor([1, 2, 4]))):
|
||||
return_attn, indices = self.proposer.prepare_inputs(
|
||||
mock_attn, num_rejected)
|
||||
self.assertEqual(indices.tolist(), [1, 2, 4])
|
||||
with (
|
||||
set_current_vllm_config(self.vllm_config),
|
||||
patch.object(self.proposer, "prepare_inputs", return_value=(mock_return_attn, torch.tensor([1, 2, 4]))),
|
||||
):
|
||||
return_attn, indices = self.proposer.prepare_inputs(mock_attn, num_rejected)
|
||||
self.assertEqual(indices.tolist(), [1, 2, 4])
|
||||
|
||||
Reference in New Issue
Block a user