[Bugfix] fix bug about model type of qwen3_vl_8b_instruct_w8a8 (#7383)

### What this PR does / why we need it?
Adapt to the model type of Qwen3-VL-8B-Instruct-W8A8

- vLLM version: v0.17.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: betta18 <jiangmengyu1@huawei.com>
Co-authored-by: betta18 <jiangmengyu1@huawei.com>
This commit is contained in:
jiangmengyu18
2026-03-18 20:30:03 +08:00
committed by GitHub
parent fb8e22ec00
commit 305820f1a9
4 changed files with 15 additions and 1 deletions

View File

@@ -222,6 +222,7 @@
"vllm-ascend/Qwen3-Next-80B-A3B-Instruct-W8A8-Pruning",
"vllm-ascend/Qwen3-Omni-30B-A3B-Thinking",
"vllm-ascend/Qwen3-VL-8B-Instruct",
"vllm-ascend/Qwen3-VL-8B-Instruct-W8A8",
"vllm-ascend/TinyLlama-1.1B-Chat-v0.3",
"vllm-ascend/benchmark",
"vllm-ascend/ilama-3.2-1B",

View File

@@ -250,6 +250,7 @@ jobs:
- name: accuracy-group-1
os: linux-aarch64-a2b3-1
model_list:
- Qwen3-VL-8B-Instruct-W8A8
- Qwen3-8B
- Qwen2-Audio-7B-Instruct
- Qwen3-8B-W8A8

View File

@@ -0,0 +1,12 @@
model_name: "vllm-ascend/Qwen3-VL-8B-Instruct-W8A8"
hardware: "Atlas A2 Series"
model: "vllm-vlm"
tasks:
- name: "mmmu_val"
metrics:
- name: "acc,none"
value: 0.52
max_model_len: 8192
batch_size: 32
gpu_memory_utilization: 0.8
quantization: ascend

View File

@@ -57,7 +57,7 @@ QUANT_MODEL_PREFIX_MAPPINGS: dict[str, dict[str, str]] = {
"language_model.lm_head.": "lm_head.",
"language_model.model.": "model.language_model.",
},
"qwen3_vl_text": {
"qwen3_vl": {
"visual.": "model.visual.",
"language_model.lm_head.": "lm_head.",
"language_model.model.": "model.language_model.",