[Bugfix] fix bug about model type of qwen3_vl_8b_instruct_w8a8 (#7383)
### What this PR does / why we need it?
Adapt to the model type of Qwen3-VL-8B-Instruct-W8A8
- vLLM version: v0.17.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: betta18 <jiangmengyu1@huawei.com>
Co-authored-by: betta18 <jiangmengyu1@huawei.com>
This commit is contained in:
12
tests/e2e/models/configs/Qwen3-VL-8B-Instruct-W8A8.yaml
Normal file
12
tests/e2e/models/configs/Qwen3-VL-8B-Instruct-W8A8.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
model_name: "vllm-ascend/Qwen3-VL-8B-Instruct-W8A8"
|
||||
hardware: "Atlas A2 Series"
|
||||
model: "vllm-vlm"
|
||||
tasks:
|
||||
- name: "mmmu_val"
|
||||
metrics:
|
||||
- name: "acc,none"
|
||||
value: 0.52
|
||||
max_model_len: 8192
|
||||
batch_size: 32
|
||||
gpu_memory_utilization: 0.8
|
||||
quantization: ascend
|
||||
Reference in New Issue
Block a user