[Test]Add accuracy test for multiple models (#3823)

### What this PR does / why we need it?
Add accuracy test for multiple models:
- Meta_Llama_3.1_8B_Instruct
- Qwen2.5-Omni-7B
- Qwen3-VL-8B-Instruct

- vLLM version: v0.11.0
- vLLM main:
83f478bb19

---------

Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
ZengSilong
2025-11-04 14:46:39 +08:00
committed by GitHub
parent e9bb4491ec
commit dc1a6cb503
9 changed files with 46 additions and 8 deletions

View File

@@ -49,8 +49,9 @@ jobs:
model_name: Qwen3-8B
- runner: a2-1
model_name: Qwen2.5-VL-7B-Instruct
- runner: a2-1
model_name: Qwen2-Audio-7B-Instruct
# To do: This model has a bug that needs to be fixed and readded
# - runner: a2-1
# model_name: Qwen2-Audio-7B-Instruct
- runner: a2-2
model_name: Qwen3-30B-A3B
- runner: a2-2
@@ -61,6 +62,12 @@ jobs:
model_name: Qwen3-Next-80B-A3B-Instruct
- runner: a2-1
model_name: Qwen3-8B-W8A8
- runner: a2-1
model_name: Qwen3-VL-8B-Instruct
- runner: a2-1
model_name: Qwen2.5-Omni-7B
- runner: a2-1
model_name: Meta-Llama-3.1-8B-Instruct
fail-fast: false
# test will be triggered when tag 'accuracy-test' & 'ready-for-test'
if: >-

View File

@@ -1,5 +1,4 @@
model_name: "deepseek-ai/DeepSeek-V2-Lite"
runner: "linux-aarch64-a2-2"
hardware: "Atlas A2 Series"
tasks:
- name: "gsm8k"

View File

@@ -0,0 +1,11 @@
model_name: "LLM-Research/Meta-Llama-3.1-8B-Instruct"
hardware: "Atlas A2 Series"
tasks:
- name: "gsm8k"
metrics:
- name: "exact_match,strict-match"
value: 0.82
- name: "exact_match,flexible-extract"
value: 0.84
num_fewshot: 5

View File

@@ -0,0 +1,10 @@
model_name: "Qwen/Qwen2.5-Omni-7B"
hardware: "Atlas A2 Series"
model: "vllm-vlm"
tasks:
- name: "mmmu_val"
metrics:
- name: "acc,none"
value: 0.52
max_model_len: 8192
gpu_memory_utilization: 0.7

View File

@@ -1,5 +1,4 @@
model_name: "Qwen/Qwen2.5-VL-7B-Instruct"
runner: "linux-aarch64-a2-1"
hardware: "Atlas A2 Series"
model: "vllm-vlm"
tasks:
@@ -7,4 +6,4 @@ tasks:
metrics:
- name: "acc,none"
value: 0.51
max_model_len: 8192
max_model_len: 8192

View File

@@ -1,5 +1,4 @@
model_name: "Qwen/Qwen3-30B-A3B"
runner: "linux-aarch64-a2-2"
hardware: "Atlas A2 Series"
tasks:
- name: "gsm8k"
@@ -17,4 +16,4 @@ gpu_memory_utilization: 0.6
enable_expert_parallel: True
tensor_parallel_size: 2
apply_chat_template: False
fewshot_as_multiturn: False
fewshot_as_multiturn: False

View File

@@ -1,5 +1,4 @@
model_name: "Qwen/Qwen3-8B-Base"
runner: "linux-aarch64-a2-1"
hardware: "Atlas A2 Series"
tasks:
- name: "gsm8k"

View File

@@ -0,0 +1,11 @@
model_name: "Qwen/Qwen3-VL-8B-Instruct"
hardware: "Atlas A2 Series"
model: "vllm-vlm"
tasks:
- name: "mmmu_val"
metrics:
- name: "acc,none"
value: 0.55
max_model_len: 8192
batch_size: 32
gpu_memory_utilization: 0.7

View File

@@ -6,3 +6,6 @@ Qwen2-7B.yaml
Qwen2-VL-7B-Instruct.yaml
Qwen2-Audio-7B-Instruct.yaml
Qwen3-VL-30B-A3B-Instruct.yaml
Qwen3-VL-8B-Instruct.yaml
Qwen2.5-Omni-7B.yaml
Meta-Llama-3.1-8B-Instruct.yaml