### What this PR does / why we need it? Backport: https://github.com/vllm-project/vllm-ascend/pull/252 This support speculative decoding in Ascend, including speculating with a draft model、by matching n-grams in the prompt、using MLP speculators and using EAGLE based draft models. Backport: https://github.com/vllm-project/vllm-ascend/pull/423 spec decode MultiStepWorker support TP1DraftModelRunner fully, support run the draft_model_runner with multi-step prepare on the NPU directly and support draft_model_runner use MLA. 1. before this pr, `MultiStepWorker` would not step into the branch using NPU prepare, but only into the branch using CPU prepare (`line 52` of `vllm_ascend/patch/patch_multi_step_worker.py`). Although this has `no effect` on the `correct operation` of speculative decoding and the performance of the two branches is basically the same as of the current version, I support entering this branch in this PR. In general, there are two main changes in `patch_multi_step_worker.py`: first, the `is_cuda_like()` check is removed and the `TP1DraftModelRunner` rewritten in vllm_ascend is used; second, the `supports_gpu_multi_step()` function is made to return true on NPU devices when outer Multi_step_worker could work correct. 3. before this pr, `TP1DraftModelRunner` only supports Attention on NPU, but not MLA. The relevant adaptation is in `vllm_ascend/worker/draft_model_runner.py`. Although I don’t know why the `input_positions` of `model_input.attn_metadata` in vllm-ascend needs to be added in `execute_model`, it is done in `model_runner.py`, so I also made corresponding changes. Otherwise, when atten_backend is MLA, it will prompt that input_positions cannot be found. 4. I commented out two lines in `draft_model_runner.py` in `line118` to support the scenario of K>1. ``` # lora_mapping=model_input.lora_mapping, # lora_requests=model_input.lora_requests, ``` I added comments. In the future, when vllm-ascend supports lora feature, the changes here can be restored. TODO: - [ ] revert the patch when the related issues are addressed in vllm ### How was this patch tested? CI passed with new added test. - e2e test for medusa proposer: tests/singlecard/spec_decode/e2e/test_medusa_correctness.py - e2e test for mlp proposer: tests/singlecard/spec_decode/e2e/test_mlp_correctness.py - e2e test for n-gram proposer: tests/singlecard/spec_decode/e2e/test_ngram_correctness.py Tests for patched files: - tests/singlecard/spec_decode/test_dynamic_spec_decode.py - tests/singlecard/spec_decode/test_multi_step_worker.py - tests/singlecard/spec_decode/test_ngram_worker.py - tests/singlecard/spec_decode/test_spec_decode_worker.py --------- Signed-off-by: MengqingCao <cmq0113@163.com> Co-authored-by: mengwei805 <mengwei25@huawei.com>
166 lines
5.5 KiB
Python
166 lines
5.5 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# This file is a part of the vllm-ascend project.
|
|
# Adapted from vllm-project/vllm/tests/spec_decode/test_utils.py
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
from unittest.mock import MagicMock
|
|
|
|
import pytest
|
|
import torch
|
|
from vllm.model_executor.layers.rejection_sampler import RejectionSampler
|
|
from vllm.model_executor.layers.sampler import _get_ranks
|
|
from vllm.model_executor.layers.typical_acceptance_sampler import \
|
|
TypicalAcceptanceSampler
|
|
from vllm.sequence import SequenceGroupMetadata, get_all_seq_ids
|
|
from vllm.spec_decode.util import (get_sampled_token_logprobs,
|
|
split_batch_by_proposal_len)
|
|
|
|
|
|
def test_get_all_seq_ids():
|
|
"""Verify get_all_seq_ids extracts all seq ids.
|
|
"""
|
|
expected_seq_ids = list(range(10)) + list(range(100, 110))
|
|
|
|
seq_group_metadata_list = [
|
|
SequenceGroupMetadata(
|
|
request_id=str(seq_id),
|
|
is_prompt=True,
|
|
seq_data={
|
|
seq_id: MagicMock(),
|
|
},
|
|
sampling_params=MagicMock(),
|
|
block_tables={
|
|
seq_id: MagicMock(),
|
|
},
|
|
lora_request=None,
|
|
) for seq_id in expected_seq_ids
|
|
]
|
|
|
|
actual_seq_ids = get_all_seq_ids(seq_group_metadata_list)
|
|
assert actual_seq_ids == expected_seq_ids
|
|
|
|
|
|
@pytest.fixture
|
|
def fake_sequence_group_metadata():
|
|
seq_ids = list(range(3))
|
|
return [
|
|
SequenceGroupMetadata(
|
|
request_id=str(i),
|
|
is_prompt=True,
|
|
seq_data={
|
|
i: MagicMock(),
|
|
},
|
|
sampling_params=MagicMock(),
|
|
block_tables={
|
|
i: MagicMock(),
|
|
},
|
|
lora_request=None,
|
|
) for i in seq_ids
|
|
]
|
|
|
|
|
|
def test_filter_zero_length_proposals(fake_sequence_group_metadata):
|
|
proposal_lens = [0, 1, 0]
|
|
_, (filtered_groups,
|
|
indices) = split_batch_by_proposal_len(fake_sequence_group_metadata,
|
|
proposal_lens)
|
|
|
|
expected_groups = [
|
|
fake_sequence_group_metadata[0], fake_sequence_group_metadata[2]
|
|
]
|
|
expected_indices = [0, 2]
|
|
|
|
assert filtered_groups == expected_groups
|
|
assert indices == expected_indices
|
|
|
|
|
|
def test_filter_non_zero_length_proposals(fake_sequence_group_metadata):
|
|
proposal_lens = [0, 1, 2]
|
|
(filtered_groups,
|
|
indices), _ = split_batch_by_proposal_len(fake_sequence_group_metadata,
|
|
proposal_lens)
|
|
|
|
expected_groups = [
|
|
fake_sequence_group_metadata[1], fake_sequence_group_metadata[2]
|
|
]
|
|
expected_indices = [1, 2]
|
|
|
|
assert filtered_groups == expected_groups
|
|
assert indices == expected_indices
|
|
|
|
|
|
def test_empty_inputs():
|
|
_, (filtered_groups, indices) = split_batch_by_proposal_len([], [])
|
|
|
|
assert filtered_groups == []
|
|
assert indices == []
|
|
|
|
|
|
def test_all_zero_with_non_zero_filter(fake_sequence_group_metadata):
|
|
proposal_lens = [0, 0, 0]
|
|
(filtered_groups,
|
|
indices), _ = split_batch_by_proposal_len(fake_sequence_group_metadata,
|
|
proposal_lens)
|
|
|
|
assert filtered_groups == []
|
|
assert indices == []
|
|
|
|
|
|
def test_all_non_zero_with_zero_filter(fake_sequence_group_metadata):
|
|
proposal_lens = [1, 1, 1]
|
|
_, (filtered_groups,
|
|
indices) = split_batch_by_proposal_len(fake_sequence_group_metadata,
|
|
proposal_lens)
|
|
|
|
assert filtered_groups == []
|
|
assert indices == []
|
|
|
|
|
|
def mock_spec_decode_sampler(acceptance_sampler_method):
|
|
"""
|
|
Returns either a RejectionSampler or TypicalAcceptanceSampler
|
|
object depending on whether acceptance_sampler_method is
|
|
'rejection_sampler' or 'typical_acceptance_sampler' respectively.
|
|
"""
|
|
if acceptance_sampler_method == "rejection_sampler":
|
|
sampler = MagicMock(spec=RejectionSampler)
|
|
sampler.token_id_dtype = torch.int64
|
|
return sampler
|
|
elif acceptance_sampler_method == "typical_acceptance_sampler":
|
|
sampler = MagicMock(spec=TypicalAcceptanceSampler)
|
|
sampler.token_id_dtype = torch.int64
|
|
return sampler
|
|
else:
|
|
raise ValueError(f"Invalid sampler name {acceptance_sampler_method}")
|
|
|
|
|
|
def test_get_sampled_token_logprobs():
|
|
"""Verify get_sampled_token_logprobs returns consistent rankings
|
|
with regular get_ranks when probabilities match exactly.
|
|
"""
|
|
logprob_tensor = torch.tensor(
|
|
[[[-.1, -.1]] * 2]) # shape (num_steps, batch_size, vocab_size)
|
|
sampled_token_tensor = torch.tensor([[1,
|
|
0]]) # shape (num_steps, batch_size)
|
|
ranks_spec_dec, _ = get_sampled_token_logprobs(logprob_tensor,
|
|
sampled_token_tensor)
|
|
|
|
ranks_regular = _get_ranks(logprob_tensor.reshape((2, -1)),
|
|
sampled_token_tensor.reshape(-1))
|
|
|
|
assert torch.equal(ranks_spec_dec.reshape(-1), ranks_regular)
|