2025-04-16 09:28:58 +08:00
|
|
|
#
|
|
|
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
|
|
|
# This file is a part of the vllm-ascend project.
|
|
|
|
|
#
|
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
|
|
|
|
#
|
|
|
|
|
|
2025-10-21 20:19:46 +08:00
|
|
|
from vllm.triton_utils import HAS_TRITON
|
|
|
|
|
|
|
|
|
|
if HAS_TRITON:
|
|
|
|
|
import vllm_ascend.patch.worker.patch_triton
|
2026-03-25 09:08:44 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_triton # noqa
|
2025-10-21 20:19:46 +08:00
|
|
|
|
2026-03-10 23:28:58 +08:00
|
|
|
|
2025-10-21 20:19:46 +08:00
|
|
|
# isort: off
|
2026-03-16 22:49:05 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_weight_utils # noqa
|
2025-10-24 00:06:45 +08:00
|
|
|
import vllm_ascend.patch.platform.patch_sched_yield # noqa
|
2026-01-19 09:28:07 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_unquantized_gemm # noqa
|
2025-12-10 11:37:57 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_bert # noqa
|
2025-10-21 20:19:46 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_distributed # noqa
|
2026-03-11 00:12:02 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_minimax_m2 # noqa
|
|
|
|
|
import vllm_ascend.patch.worker.patch_minimax_m2_linear_attn # noqa
|
2026-03-15 09:44:09 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_mamba_utils # noqa
|
2025-10-21 20:19:46 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_multimodal_merge # noqa
|
2026-03-22 23:09:23 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_gdn_attn # noqa
|
2025-12-18 11:31:04 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_qwen3_next # noqa
|
2025-12-10 22:54:24 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_qwen3_next_mtp # noqa
|
2026-03-13 16:14:15 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_qwen3_5 # noqa
|
2025-12-16 11:32:26 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_rejection_sampler # noqa
|
2026-03-25 09:08:44 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_eagle # noqa
|
|
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_uva # noqa
|
2026-02-05 19:31:17 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_huanyuan_vl # noqa
|
2026-02-26 10:22:47 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_routed_experts_capturer # noqa
|
2026-02-12 08:44:06 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_npugraph_ex_triton # noqa
|
2026-02-25 14:51:46 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_kimi_k25 # noqa
|
2026-03-09 10:43:06 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_draft_quarot # noqa
|
[main][bugfix] Fixed the problem of speculative decoding in FULL mode (#7148)
### What this PR does / why we need it?
Fixed the error of speculative decoding in FULL mode when `num_spec + 1`
not in `cudagraph_capture_sizes`.
Now, we can run speculative decoding in FULL mode, but with drafter as
eager.
It depends on https://github.com/vllm-project/vllm-ascend/pull/7144 .
### Does this PR introduce _any_ user-facing change?
N/A
### How was this patch tested?
Test code is shown as below:
```python
prompts = [
"1.Who are you?",
"2. Who are you?",
]
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, top_k=40, max_tokens=200)
llm = LLM(
model="/home/some-model/Meta-Llama-3.1-8B-Instruct",
tensor_parallel_size=1,
max_num_seqs=32,
# enforce_eager=True,
disable_log_stats=False,
distributed_executor_backend="mp",
gpu_memory_utilization=0.7,
async_scheduling=True,
speculative_config={
"enforce_eager": True,
"model": "/home/some-model/EAGLE3-LLaMA3.1-Instruct-8B",
"disable_padded_drafter_batch": False,
"method": "eagle3",
"num_speculative_tokens": 2,
},
compilation_config={
"cudagraph_mode": "FULL",
"cudagraph_num_of_warmups": 1,
},
max_model_len=4096,
enable_prefix_caching=False,
)
outputs = llm.generate(prompts, sampling_params)
```
The result before:
```text
File "/vllm-workspace/vllm/vllm/v1/cudagraph_dispatcher.py", line 140, in _create_padded_batch_descriptor
assert num_tokens_padded % uniform_decode_query_len == 0
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AssertionError
```
The result after:
```text
--------------------------------------------------
total_num_output_tokens: 400
num_drafts: 249
num_draft_tokens: 498
num_accepted_tokens: 149
mean acceptance length: 1.60
--------------------------------------------------
acceptance at token 0: 0.43
acceptance at token 1: 0.17
```
- vLLM version: v0.16.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/4034c3d32e30d01639459edd3ab486f56993876d
Signed-off-by: drslark <slarksblood@qq.com>
2026-03-12 14:51:12 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_cudagraph # noqa
|
2026-03-12 20:01:24 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_deepseek_mtp # noqa
|
2026-03-25 09:08:44 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_input_batch # noqa
|
|
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_model_state # noqa
|
|
|
|
|
import vllm_ascend.patch.worker.patch_v2.patch_block_table # noqa
|
2026-04-02 17:46:50 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_qwen3vl # noqa
|
2026-03-31 14:49:29 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_deepencoder2 # noqa
|
[v0.18.0]feat(quant): add C8 INT8 KV cache support for GQA attention models (#7474) (#8007)
backport of #7474
This PR adds C8 (INT8) KV cache quantization support for standard GQA
attention models (e.g., Qwen3-32B W8A8C8). C8 uses static per-channel
quantization scales to store KV cache in INT8, reducing KV cache memory
by ~50% compared to BF16, enabling higher batch concurrency and longer
context lengths on the same hardware.
**Key changes:**
1. **`attention_v1.py`** — New `AscendC8AttentionBackendImpl` subclass
of `AscendAttentionBackendImpl`:
- `_prepare_c8_scales`: Shards per-channel scales/offsets to the current
TP rank and pre-computes BF16 BNSD-shaped antiquant tensors (one-time
per layer).
- `_quantize_kv_to_int8`: Quantizes BF16 K/V to INT8 before
`reshape_and_cache`, using pre-cached inverse scales.
- `_forward_c8_decode`: FIA V1 BNSD paged attention with native INT8 KV
and `perchannel` antiquant mode.
- `_forward_c8_chunked_prefill`: Splits decode (FIA V1 BNSD paged INT8)
and prefill (FIA V1 TND float) into two kernel calls.
- `_forward_c8_fused_infer_attention`: Handles `PrefillNoCache` and
`PrefillCacheHit` states.
2. **`quantization/methods/kv_c8.py`** — New
`AscendC8KVCacheAttentionMethod` scheme:
- Creates `k/v_cache_scale/offset` parameters via
`_c8_kv_scale_weight_loader`, which handles per-channel scale shapes and
lazy resizing.
- Sets `layer.kv_cache_torch_dtype = torch.int8` so
`get_kv_cache_spec()` returns INT8 dtype automatically.
- Upgrades `layer.impl` to `AscendC8AttentionBackendImpl` via class
surgery.
3. **`quantization/modelslim_config.py`** — C8 branch in
`get_quant_method()` activates when `kv_cache_type == "C8"` in
`quant_model_description.json`.
4. **`patch/worker/patch_qwen3_c8.py`** — Intercepts per-channel C8
scale/offset weights before `AutoWeightsLoader` discards them, routing
them to the parameters created by `AscendC8KVCacheAttentionMethod`.
5. **`tests/ut/quantization/test_kv_c8.py`** — Unit tests covering
`_c8_kv_scale_weight_loader`, `AscendC8KVCacheAttentionMethod`, and
`AscendC8AttentionBackendImpl` scale helpers.
Yes. Users can now serve Qwen3-32B W8A8C8 quantized models with INT8 KV
cache on Ascend NPU. The model checkpoint must contain a
`quant_model_description.json` with `"kv_cache_type": "C8"` and
per-channel scale/offset tensors in safetensors.
No changes to the serving CLI — the feature activates automatically when
the quantization config is detected.
Benchmarked with `vllm serve` (TP=8, `max_num_seqs=256`,
`max_model_len=131072`, `enable_chunked_prefill=true`) + `random_bench`
(input_len=10240, output_len=2048, 960 prompts, max_concurrency=192):
```
============ Serving Benchmark Result ============
Successful requests: 960
Failed requests: 0
Maximum request concurrency: 192
Benchmark duration (s): 1359.81
Total input tokens: 9830400
Total generated tokens: 1966080
Request throughput (req/s): 0.71
Output token throughput (tok/s): 1445.85
Peak output token throughput (tok/s): 2304.00
Total token throughput (tok/s): 8675.12
---------------Time to First Token----------------
Mean TTFT (ms): 24598.51
Median TTFT (ms): 23167.02
P50 TTFT (ms): 23167.02
P90 TTFT (ms): 47717.08
P99 TTFT (ms): 84402.61
-----Time per Output Token (excl. 1st token)------
Mean TPOT (ms): 120.76
Median TPOT (ms): 121.50
P50 TPOT (ms): 121.50
P90 TPOT (ms): 127.05
P99 TPOT (ms): 130.13
---------------Inter-token Latency----------------
Mean ITL (ms): 120.70
Median ITL (ms): 90.34
P50 ITL (ms): 90.34
P90 ITL (ms): 93.79
P99 ITL (ms): 101.80
==================================================
```
All attention states verified: `PrefillNoCache`, `PrefillCacheHit`,
`ChunkedPrefill`, `DecodeOnly`.
- vLLM version: v0.17.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/8b6325758cce5f9c36d38f2462edbd368b97a07c
Signed-off-by: lico67373 <918688502@qq.com>
Co-authored-by: LICO67373 <110013619+LICO1314@users.noreply.github.com>
2026-04-08 10:51:58 +08:00
|
|
|
import vllm_ascend.patch.worker.patch_qwen3_c8 # noqa
|