2025-04-16 09:28:58 +08:00
|
|
|
#
|
|
|
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
|
|
|
# This file is a part of the vllm-ascend project.
|
|
|
|
|
#
|
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
|
|
|
|
#
|
|
|
|
|
|
2025-10-21 20:19:46 +08:00
|
|
|
from vllm.triton_utils import HAS_TRITON
|
|
|
|
|
|
|
|
|
|
if HAS_TRITON:
|
|
|
|
|
import vllm_ascend.patch.worker.patch_triton
|
2026-03-25 09:08:44 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_triton # noqa
|
2025-10-21 20:19:46 +08:00
|
|
|
|
2026-03-10 23:28:58 +08:00
|
|
|
|
2025-10-21 20:19:46 +08:00
|
|
|
# isort: off
|
2026-03-16 22:49:05 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_weight_utils # noqa
|
2025-10-24 00:06:45 +08:00
|
|
|
import vllm_ascend.patch.platform.patch_sched_yield # noqa
|
2026-01-19 09:28:07 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_unquantized_gemm # noqa
|
2025-12-10 11:37:57 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_bert # noqa
|
2025-10-21 20:19:46 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_distributed # noqa
|
2026-03-11 00:12:02 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_minimax_m2 # noqa
|
|
|
|
|
import vllm_ascend.patch.worker.patch_minimax_m2_linear_attn # noqa
|
2026-03-15 09:44:09 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_mamba_utils # noqa
|
2025-10-21 20:19:46 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_multimodal_merge # noqa
|
2026-03-22 23:09:23 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_gdn_attn # noqa
|
2025-12-18 11:31:04 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_qwen3_next # noqa
|
2025-12-10 22:54:24 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_qwen3_next_mtp # noqa
|
2026-03-13 16:14:15 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_qwen3_5 # noqa
|
2025-12-16 11:32:26 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_rejection_sampler # noqa
|
2026-03-25 09:08:44 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_eagle # noqa
|
|
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_uva # noqa
|
2026-02-05 19:31:17 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_huanyuan_vl # noqa
|
2026-02-26 10:22:47 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_routed_experts_capturer # noqa
|
2026-02-12 08:44:06 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_npugraph_ex_triton # noqa
|
2026-02-25 14:51:46 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_kimi_k25 # noqa
|
2026-03-09 10:43:06 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_draft_quarot # noqa
|
[main][bugfix] Fixed the problem of speculative decoding in FULL mode (#7148)
### What this PR does / why we need it?
Fixed the error of speculative decoding in FULL mode when `num_spec + 1`
not in `cudagraph_capture_sizes`.
Now, we can run speculative decoding in FULL mode, but with drafter as
eager.
It depends on https://github.com/vllm-project/vllm-ascend/pull/7144 .
### Does this PR introduce _any_ user-facing change?
N/A
### How was this patch tested?
Test code is shown as below:
```python
prompts = [
"1.Who are you?",
"2. Who are you?",
]
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, top_k=40, max_tokens=200)
llm = LLM(
model="/home/some-model/Meta-Llama-3.1-8B-Instruct",
tensor_parallel_size=1,
max_num_seqs=32,
# enforce_eager=True,
disable_log_stats=False,
distributed_executor_backend="mp",
gpu_memory_utilization=0.7,
async_scheduling=True,
speculative_config={
"enforce_eager": True,
"model": "/home/some-model/EAGLE3-LLaMA3.1-Instruct-8B",
"disable_padded_drafter_batch": False,
"method": "eagle3",
"num_speculative_tokens": 2,
},
compilation_config={
"cudagraph_mode": "FULL",
"cudagraph_num_of_warmups": 1,
},
max_model_len=4096,
enable_prefix_caching=False,
)
outputs = llm.generate(prompts, sampling_params)
```
The result before:
```text
File "/vllm-workspace/vllm/vllm/v1/cudagraph_dispatcher.py", line 140, in _create_padded_batch_descriptor
assert num_tokens_padded % uniform_decode_query_len == 0
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AssertionError
```
The result after:
```text
--------------------------------------------------
total_num_output_tokens: 400
num_drafts: 249
num_draft_tokens: 498
num_accepted_tokens: 149
mean acceptance length: 1.60
--------------------------------------------------
acceptance at token 0: 0.43
acceptance at token 1: 0.17
```
- vLLM version: v0.16.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/4034c3d32e30d01639459edd3ab486f56993876d
Signed-off-by: drslark <slarksblood@qq.com>
2026-03-12 14:51:12 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_cudagraph # noqa
|
2026-03-12 20:01:24 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_deepseek_mtp # noqa
|
2026-03-25 09:08:44 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_input_batch # noqa
|
|
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_model_state # noqa
|
|
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_block_table # noqa
|
2026-03-31 14:49:29 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_deepencoder2 # noqa
|