【CI】add nightly cases: MiniMax-M2.5-W8A8 Qwen3.5-27B-w8a8 Qwen3.5-397B-A1… (#7968)

### What this PR does / why we need it?
This PR Qwen3.5-27B ;MiniMax-M2.5-w8a8 ;Qwen3.5-397B-w8a8-mtp acc/perf 3
cases on A3, we need test them daily.

- vLLM version: v0.18.0
- vLLM main:
35141a7eed

Signed-off-by: guxin108 <1252896542@qq.com>
This commit is contained in:
guxin108
2026-04-03 17:50:59 +08:00
committed by GitHub
parent 3f462d251e
commit 81c6f51a45
5 changed files with 206 additions and 1 deletions

View File

@@ -245,6 +245,9 @@
"xlangai/OpenCUA-7B", "xlangai/OpenCUA-7B",
"Eco-Tech/GLM-5-w4a8", "Eco-Tech/GLM-5-w4a8",
"Eco-Tech/GLM-4.7-W8A8-floatmtp", "Eco-Tech/GLM-4.7-W8A8-floatmtp",
"MiniMax/MiniMax-M2.5" "MiniMax/MiniMax-M2.5",
"Eco-Tech/Qwen3.5-27B-w8a8-mtp",
"Eco-Tech/MiniMax-M2.5-w8a8-QuaRot",
"Eco-Tech/Qwen3.5-397B-A17B-w8a8-mtp"
] ]
} }

View File

@@ -223,6 +223,15 @@ jobs:
- name: deepseek-r1-0528-w8a8-prefix-cache - name: deepseek-r1-0528-w8a8-prefix-cache
os: linux-aarch64-a3-16 os: linux-aarch64-a3-16
config_file_path: Prefix-Cache-DeepSeek-R1-0528-W8A8.yaml config_file_path: Prefix-Cache-DeepSeek-R1-0528-W8A8.yaml
- name: Qwen3.5-27B-w8a8-A3
os: linux-aarch64-a3-2
config_file_path: Qwen3.5-27B-w8a8-A3.yaml
- name: MiniMax-M2.5-w8a8
os: linux-aarch64-a3-16
config_file_path: MiniMax-M2.5-W8A8-A3.yaml
- name: Qwen3.5-397B-A17B-w8a8-mtp
os: linux-aarch64-a3-16
config_file_path: Qwen3.5-397B-A17B-W8A8-mtp-A3.yaml
uses: ./.github/workflows/_e2e_nightly_single_node.yaml uses: ./.github/workflows/_e2e_nightly_single_node.yaml
with: with:
runner: ${{ matrix.test_config.os }} runner: ${{ matrix.test_config.os }}

View File

@@ -0,0 +1,62 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "MiniMax-M2.5-w8a8"
model: "Eco-Tech/MiniMax-M2.5-w8a8-QuaRot"
envs:
HCCL_BUFFSIZE: "512"
HCCL_OP_EXPANSION_MODE: "AIV"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
OMP_NUM_THREADS: "1"
TASK_QUEUE_ENABLE: "1"
VLLM-ASCEND_ENABLE_FUSED_MC2: "2"
VLLM_ASCEND_ENABLE_FLASHCOMM1: "1"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--no-enable-prefix-caching"
- "--tensor-parallel-size"
- "4"
- "--data-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--enable-expert-parallel"
- "--max-num-seqs"
- "128"
- "--max-model-len"
- "153600"
- "--max-num-batched-tokens"
- "16384"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.8"
- "--quantization"
- "ascend"
- "--compilation-config"
- '{"cudagraph_mode":"FULL_DECODE_ONLY"}'
- "--additional-config"
- '{"enable_cpu_binding":true}'
- "--async-scheduling"
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 95
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 80
max_out_len: 1500
batch_size: 20
request_rate: 0
baseline: 730.0832
threshold: 0.97

View File

@@ -0,0 +1,64 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3.5-27B-w8a8"
model: "Eco-Tech/Qwen3.5-27B-w8a8-mtp"
envs:
VLLM_USE_MODELSCOPE: "true"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
HCCL_BUFFSIZE: "512"
OMP_PROC_BIND: "false"
OMP_NUM_THREADS: "1"
TASK_QUEUE_ENABLE: "1"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--tensor-parallel-size"
- "2"
- "--data-parallel-size"
- "1"
- "--port"
- "$SERVER_PORT"
- "--seed"
- "1024"
- "--max-num-seqs"
- "32"
- "--quantization"
- "ascend"
- "--max-model-len"
- "133000"
- "--max-num-batched-tokens"
- "8096"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
- "--no-enable-prefix-caching"
- "--additional-config"
- '{"enable_cpu_binding":true}'
- "--speculative_config"
- '{"method": "qwen3_5_mtp", "num_speculative_tokens": 3,"enforce_eager": true}'
- "--compilation-config"
- '{"cudagraph_mode":"FULL_DECODE_ONLY"}'
- "--async-scheduling"
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 95
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs8000-qwen3
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 80
max_out_len: 1500
batch_size: 20
request_rate: 0
baseline: 664.6449
threshold: 0.97

View File

@@ -0,0 +1,67 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3.5-397B-A17B-w8a8-mtp"
model: "Eco-Tech/Qwen3.5-397B-A17B-w8a8-mtp"
envs:
VLLM_USE_MODELSCOPE: "true"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
HCCL_BUFFSIZE: "512"
OMP_PROC_BIND: "false"
OMP_NUM_THREADS: "1"
TASK_QUEUE_ENABLE: "1"
SERVER_PORT: "DEFAULT_PORT"
VLLM_ASCEND_ENABLE_FUSED_MC2: "1"
VLLM_ASCEND_ENABLE_FLASHCOMM1: "1"
server_cmd:
- "--tensor-parallel-size"
- "16"
- "--data-parallel-size"
- "1"
- "--enable-expert-parallel"
- "--port"
- "$SERVER_PORT"
- "--seed"
- "1024"
- "--max-num-seqs"
- "32"
- "--quantization"
- "ascend"
- "--max-model-len"
- "133000"
- "--max-num-batched-tokens"
- "8096"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
- "--no-enable-prefix-caching"
- "--additional-config"
- '{"enable_cpu_binding":true,"multistream_overlap_shared_expert":true}'
- "--speculative_config"
- '{"method": "qwen3_5_mtp","num_speculative_tokens": 3,"enforce_eager": true}'
- "--compilation-config"
- '{"cudagraph_mode":"FULL_DECODE_ONLY"}'
- "--async-scheduling"
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 95
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs8000-qwen3
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 80
max_out_len: 1500
batch_size: 20
request_rate: 0
baseline: 768.6894
threshold: 0.97