Fix accuracy test config and add DeepSeek-V2-Lite test (#2261)
### What this PR does / why we need it?
This PR fix accuracy test related to
https://github.com/vllm-project/vllm-ascend/pull/2073, users can now
perform accuracy tests on multiple models simultaneously and generate
different report files by running:
```bash
cd ~/vllm-ascend
pytest -sv ./tests/e2e/models/test_lm_eval_correctness.py \
--config-list-file ./tests/e2e/models/configs/accuracy.txt
```
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
<img width="1648" height="511" alt="image"
src="https://github.com/user-attachments/assets/1757e3b8-a6b7-44e5-b701-80940dc756cd"
/>
- vLLM version: v0.10.0
- vLLM main:
766bc8162c
---------
Signed-off-by: Icey <1790571317@qq.com>
This commit is contained in:
13
tests/e2e/models/configs/DeepSeek-V2-Lite.yaml
Normal file
13
tests/e2e/models/configs/DeepSeek-V2-Lite.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
model_name: "deepseek-ai/DeepSeek-V2-Lite"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.375
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.375
|
||||
tensor_parallel_size: 2
|
||||
apply_chat_template: False
|
||||
fewshot_as_multiturn: False
|
||||
trust_remote_code: True
|
||||
enforce_eager: True
|
||||
8
tests/e2e/models/configs/Qwen2.5-VL-7B-Instruct.yaml
Normal file
8
tests/e2e/models/configs/Qwen2.5-VL-7B-Instruct.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
model_name: "Qwen/Qwen2.5-VL-7B-Instruct"
|
||||
model: "vllm-vlm"
|
||||
tasks:
|
||||
- name: "mmmu_val"
|
||||
metrics:
|
||||
- name: "acc,none"
|
||||
value: 0.51
|
||||
max_model_len: 8192
|
||||
18
tests/e2e/models/configs/Qwen3-30B-A3B.yaml
Normal file
18
tests/e2e/models/configs/Qwen3-30B-A3B.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
model_name: "Qwen/Qwen3-30B-A3B"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.89
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.85
|
||||
- name: "ceval-valid"
|
||||
metrics:
|
||||
- name: "acc,none"
|
||||
value: 0.84
|
||||
num_fewshot: 5
|
||||
gpu_memory_utilization: 0.6
|
||||
enable_expert_parallel: True
|
||||
tensor_parallel_size: 2
|
||||
apply_chat_template: False
|
||||
fewshot_as_multiturn: False
|
||||
13
tests/e2e/models/configs/Qwen3-8B-Base.yaml
Normal file
13
tests/e2e/models/configs/Qwen3-8B-Base.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
model_name: "Qwen/Qwen3-8B-Base"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.82
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.83
|
||||
- name: "ceval-valid"
|
||||
metrics:
|
||||
- name: "acc,none"
|
||||
value: 0.82
|
||||
num_fewshot: 5
|
||||
3
tests/e2e/models/configs/accuracy.txt
Normal file
3
tests/e2e/models/configs/accuracy.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
Qwen3-8B-Base.yaml
|
||||
Qwen2.5-VL-7B-Instruct.yaml
|
||||
Qwen3-30B-A3B.yaml
|
||||
Reference in New Issue
Block a user