[Feat][Bugfix][main] Adapted SP to eagle3 (#5562)
### What this PR does / why we need it?
Adapted sp to eagle3.
There may still be some problems, e.g., accuracy in some scenes,
`sp`+`dp`...
We will fix them later.
### Does this PR introduce _any_ user-facing change?
N/A
### How was this patch tested?
We tested it mainly in a new `e2e`.
```shell
pytest -s tests/e2e/singlecard/spec_decode/test_v1_spec_decode.py::test_llama_qwen_eagle_acceptance
```
```text
.
=============================== warnings summary ===============================
<frozen importlib._bootstrap>:241
<frozen importlib._bootstrap>:241: DeprecationWarning: builtin type SwigPyPacked has no __module__ attribute
<frozen importlib._bootstrap>:241
<frozen importlib._bootstrap>:241: DeprecationWarning: builtin type SwigPyObject has no __module__ attribute
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
============= 3 passed, 1 skipped, 2 warnings in 142.05s (0:02:22) =============
```
It passed.
- vLLM version: v0.13.0
- vLLM main:
7157596103
Signed-off-by: drslark <slarksblood@qq.com>
This commit is contained in:
@@ -275,6 +275,8 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
num_tokens = 32
|
||||
with_prefill = False
|
||||
|
||||
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
|
||||
self.proposer.enable_shared_expert_dp = False
|
||||
self.proposer.dummy_run(num_tokens=num_tokens,
|
||||
with_prefill=with_prefill)
|
||||
|
||||
@@ -284,6 +286,8 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
@patch("vllm_ascend.spec_decode.eagle_proposer.set_ascend_forward_context")
|
||||
def test_dummy_run_with_prefill(self, mock_context, mock_get_context):
|
||||
mock_context.return_value.__enter__.return_value = None
|
||||
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
|
||||
self.proposer.enable_shared_expert_dp = False
|
||||
self.proposer.dummy_run(num_tokens=64, with_prefill=True, num_reqs=4)
|
||||
self.assertTrue(self.proposer.model.call_count == 4)
|
||||
|
||||
@@ -298,6 +302,8 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
mock_return_context.capturing = True
|
||||
mock_get_context.return_value = mock_return_context
|
||||
self.proposer.use_cuda_graph = True
|
||||
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
|
||||
self.proposer.enable_shared_expert_dp = False
|
||||
self.proposer.dummy_run(num_tokens=64,
|
||||
in_graph_capturing=True,
|
||||
aclgraph_runtime_mode=CUDAGraphMode.FULL)
|
||||
@@ -316,6 +322,8 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
mock_return_context.capturing = False
|
||||
mock_get_context.return_value = mock_return_context
|
||||
self.proposer.use_cuda_graph = True
|
||||
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
|
||||
self.proposer.enable_shared_expert_dp = False
|
||||
self.proposer.dummy_run(num_tokens=64,
|
||||
in_graph_capturing=False,
|
||||
aclgraph_runtime_mode=CUDAGraphMode.FULL)
|
||||
|
||||
Reference in New Issue
Block a user