[E2E] Optimize nightly testcase. (#4886)
### What this PR does / why we need it?
Optimize nightly testcase.
Changes:
- tests/e2e/nightly/multi_node/config/models/Qwen3-235B-A3B.yaml: Add
accuracy and performance benchmark
- tests/e2e/models/configs/Qwen3-8B-Base.yaml: Delete
- tests/e2e/models/configs/internlm-7b.yaml: Change to
internlm3-8b-instruct
- tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py: Change to
DeepSeek-R1-0528-W8A8 model
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
Signed-off-by: menogrey <1299267905@qq.com>
This commit is contained in:
@@ -1,14 +0,0 @@
|
||||
model_name: "Qwen/Qwen3-8B-Base"
|
||||
hardware: "Atlas A2 Series"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.82
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.83
|
||||
- name: "ceval-valid"
|
||||
metrics:
|
||||
- name: "acc,none"
|
||||
value: 0.82
|
||||
num_fewshot: 5
|
||||
@@ -12,6 +12,6 @@ InternVL3-8B.yaml
|
||||
InternVL3_5-8B.yaml
|
||||
ERNIE-4.5-21B-A3B-PT.yaml
|
||||
gemma-3-4b-it.yaml
|
||||
internlm-7b.yaml
|
||||
internlm3-8b-instruct.yaml
|
||||
Molmo-7B-D-0924.yaml
|
||||
llava-1.5-7b-hf.yaml
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
model_name: "Shanghai_AI_Laboratory/internlm-7b"
|
||||
model_name: "Shanghai_AI_Laboratory/internlm3-8b-instruct"
|
||||
hardware: "Atlas A2 Series"
|
||||
tasks:
|
||||
- name: "ceval-valid"
|
||||
@@ -21,7 +21,7 @@ def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--config",
|
||||
action="store",
|
||||
default="./tests/e2e/models/configs/Qwen3-8B-Base.yaml",
|
||||
default="./tests/e2e/models/configs/Qwen3-8B.yaml",
|
||||
help="Path to the model config YAML file",
|
||||
)
|
||||
parser.addoption(
|
||||
|
||||
@@ -25,7 +25,7 @@ from tests.e2e.conftest import RemoteOpenAIServer
|
||||
from tools.aisbench import run_aisbench_cases
|
||||
|
||||
MODELS = [
|
||||
"vllm-ascend/DeepSeek-R1-W8A8",
|
||||
"vllm-ascend/DeepSeek-R1-0528-W8A8",
|
||||
]
|
||||
|
||||
prompts = [
|
||||
@@ -48,6 +48,17 @@ deployment:
|
||||
--no-enable-prefix-caching
|
||||
--gpu-memory-utilization 0.9
|
||||
benchmarks:
|
||||
perf:
|
||||
case_type: performance
|
||||
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
|
||||
request_conf: vllm_api_stream_chat
|
||||
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
||||
num_prompts: 2800
|
||||
max_out_len: 1500
|
||||
batch_size: 700
|
||||
request_rate: 11.2
|
||||
baseline: 1
|
||||
threshold: 0.97
|
||||
acc:
|
||||
case_type: accuracy
|
||||
dataset_path: vllm-ascend/gsm8k
|
||||
Reference in New Issue
Block a user