<!-- Thanks for sending a pull request! BEFORE SUBMITTING, PLEASE READ https://docs.vllm.ai/en/latest/contributing/overview.html --> ### What this PR does / why we need it? As custom deepseek modeling do some changes to support graph mode in https://github.com/vllm-project/vllm-ascend/pull/585, so i follow it to change custom deepseek_mtp modeling. And some modifications for k>1 were not carried over by the https://github.com/vllm-project/vllm-ascend/pull/429, now i add it. In order to better take care of the MTP feature in the vllm-ascend repository, I added cases related to graph mode(torchair), but i skip it since torchair can not correctly clean up memory in vllmrunner. Also i add some case for MTP quantization weights, but test weight is not ready, so i skip it and i will open it when test quant weights is ready. https://github.com/vllm-project/vllm-ascend/pull/648 did not completely fix the sample change(https://github.com/vllm-project/vllm-ascend/issues/660) issue, I added the relevant changes. ### Does this PR introduce _any_ user-facing change? now, u can use following method to use mtp in deepseek v3/r1 float or quant weights with eager mode. ```python llm = LLM( model="wemaster/deepseek_mtp_main_random_bf16", tensor_parallel_size=2, speculative_config={ "num_speculative_tokens": 1, }, enforce_eager=True, trust_remote_code=True, disable_log_stats=False, gpu_memory_utilization=0.8, max_model_len=64, ) ``` or use mtp in deepseek v3/r1 float or quant weights with graph mode(torchair) ```python llm = LLM( model="wemaster/deepseek_mtp_main_random_bf16", tensor_parallel_size=2, speculative_config={ "num_speculative_tokens": 1, }, trust_remote_code=True, additional_config={ 'enable_graph_mode': True, }, disable_log_stats=False, gpu_memory_utilization=0.8, max_model_len=64, ) ``` add notes: 1. now, we support k>1, so u can set num_speculative_tokens > 1 if there is sufficient redundant computing power; 2. MTP is not supported in V1, we will support it when vLLM does it in https://github.com/vllm-project/vllm/issues/13500. 3. if u run MTP failed by `segmentation fault`, u can follow v0.7.3 patch https://github.com/vllm-project/vllm-ascend/pull/236 file `vllm_ascend/patch/patch_metrics.py` method `__npu_async_metrics_collector_init__` ### How was this patch tested? local tested passed and test by CI Signed-off-by: mengwei805 <mengwei25@huawei.com>
238 lines
7.8 KiB
Python
238 lines
7.8 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# This file is a part of the vllm-ascend project.
|
|
# Adapted from vllm-project/vllm/tests/spec_decode/test_ngram_worker.py
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
import torch
|
|
from vllm.sequence import ExecuteModelRequest
|
|
from vllm.spec_decode.ngram_worker import NGramWorker
|
|
from vllm.spec_decode.top1_proposer import Top1Proposer
|
|
|
|
from tests.singlecard.spec_decode.utils import (
|
|
create_seq_group_metadata_from_prompts, create_worker)
|
|
|
|
|
|
def test_ngram_algo_correctness_for_single_no_match():
|
|
"""Verify our ngram algo find the right candidate in the prompt
|
|
|
|
For the scenario cannot find any candidate in one single batch
|
|
"""
|
|
block_size = 32
|
|
num_gpu_blocks = 2048 // block_size
|
|
seed = 100
|
|
model_name = 'JackFram/llama-68m'
|
|
vocab_size = 32_000
|
|
device = 'npu:0'
|
|
|
|
ngram_worker = create_worker(
|
|
NGramWorker,
|
|
model_name,
|
|
block_size,
|
|
num_gpu_blocks,
|
|
seed,
|
|
)
|
|
|
|
proposer = Top1Proposer(
|
|
worker=ngram_worker,
|
|
device=device,
|
|
vocab_size=vocab_size,
|
|
max_proposal_len=20,
|
|
)
|
|
|
|
# set ngram window [1, 3], which is window=1/2/3
|
|
ngram_worker.set_ngram_window_size(1, 3)
|
|
|
|
prompts = [
|
|
# shall find no candidate
|
|
[1, 2, 3, 4, 5, 6, 7],
|
|
]
|
|
|
|
proposal_len = 5
|
|
final_prompt_lens = [len(prompt) + proposal_len for prompt in prompts]
|
|
seq_group_metadata_list = create_seq_group_metadata_from_prompts(
|
|
prompts,
|
|
num_gpu_blocks,
|
|
block_size,
|
|
final_prompt_lens=final_prompt_lens)
|
|
|
|
proposals = proposer.get_spec_proposals(
|
|
execute_model_req=ExecuteModelRequest(
|
|
seq_group_metadata_list=seq_group_metadata_list,
|
|
num_lookahead_slots=proposal_len),
|
|
seq_ids_with_bonus_token_in_last_step=None)
|
|
|
|
assert torch.is_tensor(proposals.proposal_token_ids)
|
|
assert torch.is_tensor(proposals.proposal_probs)
|
|
|
|
assert proposals.proposal_token_ids.shape == torch.Size([1, proposal_len])
|
|
assert proposals.proposal_probs.shape[:-1] == torch.Size([1, proposal_len])
|
|
assert proposals.proposal_lens.shape == torch.Size([1])
|
|
assert proposals.proposal_lens.tolist() == [0]
|
|
|
|
|
|
def test_ngram_algo_correctness_for_batches_not_match_all():
|
|
"""Verify our ngram algo find the right candidate in the prompt
|
|
|
|
For the scenario find some candidate not full in batchs
|
|
"""
|
|
block_size = 32
|
|
num_gpu_blocks = 2048 // block_size
|
|
seed = 100
|
|
model_name = 'JackFram/llama-68m'
|
|
vocab_size = 32_000
|
|
device = 'npu:0'
|
|
|
|
ngram_worker = create_worker(
|
|
NGramWorker,
|
|
model_name,
|
|
block_size,
|
|
num_gpu_blocks,
|
|
seed,
|
|
)
|
|
|
|
proposer = Top1Proposer(
|
|
worker=ngram_worker,
|
|
device=device,
|
|
vocab_size=vocab_size,
|
|
max_proposal_len=20,
|
|
)
|
|
|
|
# set ngram window [1, 3], which is window=1/2/3
|
|
ngram_worker.set_ngram_window_size(1, 3)
|
|
|
|
prompts = [
|
|
# shall find no candidate
|
|
[1, 2, 3, 4, 5, 6, 7],
|
|
# shall find candidate 12,13,14,15,16
|
|
[11, 12, 13, 14, 15, 16, 11],
|
|
# shall find candidate 23,24,25,26,21
|
|
[21, 21, 22, 23, 24, 25, 26, 21, 22],
|
|
# shall find candidate 34,35,36,37,38
|
|
[31, 32, 31, 32, 33, 34, 35, 36, 37, 38, 31, 32, 33],
|
|
# shall find no candidate as exceed max_proposal_len
|
|
[
|
|
31, 32, 31, 32, 31, 32, 31, 32, 31, 32, 31, 32, 33, 34, 35, 36, 37,
|
|
38, 31, 32, 33
|
|
],
|
|
]
|
|
|
|
proposal_len = 5
|
|
final_prompt_lens = [len(prompt) + proposal_len for prompt in prompts]
|
|
seq_group_metadata_list = create_seq_group_metadata_from_prompts(
|
|
prompts,
|
|
num_gpu_blocks,
|
|
block_size,
|
|
final_prompt_lens=final_prompt_lens)
|
|
for sg in seq_group_metadata_list:
|
|
sg.is_prompt = False
|
|
proposals = proposer.get_spec_proposals(
|
|
execute_model_req=ExecuteModelRequest(
|
|
seq_group_metadata_list=seq_group_metadata_list,
|
|
num_lookahead_slots=proposal_len),
|
|
seq_ids_with_bonus_token_in_last_step=None)
|
|
|
|
assert torch.is_tensor(proposals.proposal_token_ids)
|
|
assert torch.is_tensor(proposals.proposal_probs)
|
|
|
|
assert proposals.proposal_token_ids.shape == torch.Size([5, proposal_len])
|
|
assert proposals.proposal_probs.shape[:-1] == torch.Size([5, proposal_len])
|
|
assert proposals.proposal_lens.shape == torch.Size([5])
|
|
|
|
# the first sequence has no match so proposal_len should be overwritten to 0
|
|
assert proposals.proposal_lens.tolist(
|
|
) == [0] + [proposal_len for _ in range(3)] + [0]
|
|
|
|
for i in range(proposal_len):
|
|
assert proposals.proposal_token_ids[0][i] == -1
|
|
assert proposals.proposal_token_ids[1][i] == prompts[1][i + 1]
|
|
assert proposals.proposal_token_ids[2][i] == prompts[2][i + 3]
|
|
assert proposals.proposal_token_ids[3][i] == prompts[3][i + 5]
|
|
assert proposals.proposal_token_ids[4][i] == -1
|
|
|
|
|
|
def test_ngram_algo_correctness_for_batches_match_all():
|
|
"""Verify our ngram algo find the right candidate in the prompt
|
|
|
|
For the scenario find candidate in all batches
|
|
"""
|
|
|
|
block_size = 32
|
|
num_gpu_blocks = 2048 // block_size
|
|
seed = 100
|
|
model_name = 'JackFram/llama-68m'
|
|
vocab_size = 32_000
|
|
device = 'npu:0'
|
|
|
|
ngram_worker = create_worker(
|
|
NGramWorker,
|
|
model_name,
|
|
block_size,
|
|
num_gpu_blocks,
|
|
seed,
|
|
)
|
|
|
|
proposer = Top1Proposer(
|
|
worker=ngram_worker,
|
|
device=device,
|
|
vocab_size=vocab_size,
|
|
max_proposal_len=20,
|
|
)
|
|
|
|
# set ngram window [0, 3], which is window=1/2/3
|
|
ngram_worker.set_ngram_window_size(1, 3)
|
|
|
|
prompts = [
|
|
# shall find candidate 12,13,14,15,16
|
|
[11, 12, 13, 14, 15, 16, 11],
|
|
# shall find candidate 23,24,25,26,21
|
|
[21, 21, 22, 23, 24, 25, 26, 21, 22],
|
|
# shall find candidate 34,35,36,37,38
|
|
[31, 32, 31, 32, 33, 34, 35, 36, 37, 38, 31, 32, 33],
|
|
]
|
|
|
|
proposal_len = 5
|
|
final_prompt_lens = [len(prompt) + proposal_len for prompt in prompts]
|
|
seq_group_metadata_list = create_seq_group_metadata_from_prompts(
|
|
prompts,
|
|
num_gpu_blocks,
|
|
block_size,
|
|
final_prompt_lens=final_prompt_lens)
|
|
|
|
# Normally drafter is run on decode requests only; here we check the output
|
|
# of the ngram worker as it is the sole proposer that has no forward.
|
|
for sg in seq_group_metadata_list:
|
|
sg.is_prompt = False
|
|
proposals = proposer.get_spec_proposals(
|
|
execute_model_req=ExecuteModelRequest(
|
|
seq_group_metadata_list=seq_group_metadata_list,
|
|
num_lookahead_slots=proposal_len),
|
|
seq_ids_with_bonus_token_in_last_step=None)
|
|
|
|
assert torch.is_tensor(proposals.proposal_token_ids)
|
|
assert torch.is_tensor(proposals.proposal_probs)
|
|
|
|
assert proposals.proposal_token_ids.shape == torch.Size([3, proposal_len])
|
|
assert proposals.proposal_probs.shape[:-1] == torch.Size([3, proposal_len])
|
|
assert proposals.proposal_lens.shape == torch.Size([3])
|
|
|
|
assert proposals.proposal_lens.tolist() == [proposal_len for _ in range(3)]
|
|
|
|
for i in range(proposal_len):
|
|
assert proposals.proposal_token_ids[0][i] == prompts[0][i + 1]
|
|
assert proposals.proposal_token_ids[1][i] == prompts[1][i + 3]
|
|
assert proposals.proposal_token_ids[2][i] == prompts[2][i + 5]
|