<!-- Thanks for sending a pull request! BEFORE SUBMITTING, PLEASE READ https://docs.vllm.ai/en/latest/contributing/overview.html --> ### What this PR does / why we need it? As custom deepseek modeling do some changes to support graph mode in https://github.com/vllm-project/vllm-ascend/pull/585, so i follow it to change custom deepseek_mtp modeling. And some modifications for k>1 were not carried over by the https://github.com/vllm-project/vllm-ascend/pull/429, now i add it. In order to better take care of the MTP feature in the vllm-ascend repository, I added cases related to graph mode(torchair), but i skip it since torchair can not correctly clean up memory in vllmrunner. Also i add some case for MTP quantization weights, but test weight is not ready, so i skip it and i will open it when test quant weights is ready. https://github.com/vllm-project/vllm-ascend/pull/648 did not completely fix the sample change(https://github.com/vllm-project/vllm-ascend/issues/660) issue, I added the relevant changes. ### Does this PR introduce _any_ user-facing change? now, u can use following method to use mtp in deepseek v3/r1 float or quant weights with eager mode. ```python llm = LLM( model="wemaster/deepseek_mtp_main_random_bf16", tensor_parallel_size=2, speculative_config={ "num_speculative_tokens": 1, }, enforce_eager=True, trust_remote_code=True, disable_log_stats=False, gpu_memory_utilization=0.8, max_model_len=64, ) ``` or use mtp in deepseek v3/r1 float or quant weights with graph mode(torchair) ```python llm = LLM( model="wemaster/deepseek_mtp_main_random_bf16", tensor_parallel_size=2, speculative_config={ "num_speculative_tokens": 1, }, trust_remote_code=True, additional_config={ 'enable_graph_mode': True, }, disable_log_stats=False, gpu_memory_utilization=0.8, max_model_len=64, ) ``` add notes: 1. now, we support k>1, so u can set num_speculative_tokens > 1 if there is sufficient redundant computing power; 2. MTP is not supported in V1, we will support it when vLLM does it in https://github.com/vllm-project/vllm/issues/13500. 3. if u run MTP failed by `segmentation fault`, u can follow v0.7.3 patch https://github.com/vllm-project/vllm-ascend/pull/236 file `vllm_ascend/patch/patch_metrics.py` method `__npu_async_metrics_collector_init__` ### How was this patch tested? local tested passed and test by CI Signed-off-by: mengwei805 <mengwei25@huawei.com>
106 lines
4.5 KiB
Python
106 lines
4.5 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# This file is a part of the vllm-ascend project.
|
|
# Adapted from vllm-project/vllm/tests/spec_decode/test_dynamic_spec_decode.py
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
import pytest
|
|
import torch
|
|
from vllm.sequence import ExecuteModelRequest
|
|
from vllm.spec_decode.metrics import AsyncMetricsCollector
|
|
from vllm.spec_decode.multi_step_worker import MultiStepWorker
|
|
from vllm.spec_decode.spec_decode_worker import SpecDecodeWorker
|
|
from vllm.spec_decode.top1_proposer import Top1Proposer
|
|
|
|
from tests.singlecard.spec_decode.test_utils import mock_spec_decode_sampler
|
|
from tests.singlecard.spec_decode.utils import create_batch, mock_worker
|
|
|
|
|
|
@pytest.mark.parametrize('queue_size', [4])
|
|
@pytest.mark.parametrize('batch_size', [1])
|
|
@pytest.mark.parametrize('k', [1])
|
|
@pytest.mark.parametrize("acceptance_sampler_method",
|
|
["rejection_sampler", "typical_acceptance_sampler"])
|
|
@torch.inference_mode()
|
|
def test_disable_spec_tokens(queue_size: int, batch_size: int, k: int,
|
|
acceptance_sampler_method: str):
|
|
"""Verify that speculative tokens are disabled when the batch size
|
|
exceeds the threshold.
|
|
"""
|
|
disable_by_batch_size = 3
|
|
draft_worker = mock_worker(cls=MultiStepWorker)
|
|
target_worker = mock_worker()
|
|
metrics_collector = MagicMock(spec=AsyncMetricsCollector)
|
|
worker = SpecDecodeWorker(proposer_worker=draft_worker,
|
|
scorer_worker=target_worker,
|
|
spec_decode_sampler=mock_spec_decode_sampler(
|
|
acceptance_sampler_method),
|
|
disable_logprobs=False,
|
|
metrics_collector=metrics_collector,
|
|
disable_by_batch_size=disable_by_batch_size)
|
|
|
|
exception_secret = 'artificial stop'
|
|
draft_worker.get_spec_proposals.side_effect = ValueError(exception_secret)
|
|
|
|
seq_group_metadata_list, _, _ = create_batch(batch_size, k)
|
|
execute_model_req = ExecuteModelRequest(
|
|
seq_group_metadata_list=seq_group_metadata_list,
|
|
num_lookahead_slots=k,
|
|
running_queue_size=queue_size)
|
|
|
|
if queue_size > disable_by_batch_size:
|
|
with patch.object(worker,
|
|
'_run_no_spec',
|
|
side_effect=ValueError(exception_secret)), \
|
|
pytest.raises(ValueError, match=exception_secret):
|
|
worker.execute_model(execute_model_req=execute_model_req)
|
|
|
|
# When the batch size is larger than the threshold,
|
|
# we expect no speculative tokens (0).
|
|
expected_num_spec_tokens = None if queue_size < disable_by_batch_size else 0
|
|
assert seq_group_metadata_list[
|
|
0].num_speculative_tokens == expected_num_spec_tokens
|
|
|
|
draft_worker.sampler_output.side_effect = ValueError(exception_secret)
|
|
|
|
proposer = Top1Proposer(
|
|
worker=draft_worker,
|
|
device='cpu', # not used
|
|
vocab_size=100, # not used
|
|
# Must be long enough to avoid being skipped due to length.
|
|
max_proposal_len=1024,
|
|
)
|
|
|
|
if queue_size < disable_by_batch_size:
|
|
# Should raise exception when executing the mocked draft model.
|
|
with pytest.raises(ValueError, match=exception_secret):
|
|
proposer.get_spec_proposals(
|
|
execute_model_req=ExecuteModelRequest(
|
|
seq_group_metadata_list=seq_group_metadata_list,
|
|
num_lookahead_slots=k),
|
|
seq_ids_with_bonus_token_in_last_step=set())
|
|
else:
|
|
# Should not execute the draft model because spec decode is disabled
|
|
# for all requests. Accordingly, the proposal length should be 0.
|
|
proposals = proposer.get_spec_proposals(
|
|
execute_model_req=ExecuteModelRequest(
|
|
seq_group_metadata_list=seq_group_metadata_list,
|
|
num_lookahead_slots=k),
|
|
seq_ids_with_bonus_token_in_last_step=set())
|
|
assert proposals.proposal_lens.tolist() == [0] * batch_size
|