Files
xc-llm-ascend/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml
LeeWenquan 9615bc33fd Fix Qwen3Next CI Config (#7561)
### What this PR does / why we need it?
This pr modifies qwen3Next nightly CI config. 
(1) Add a nightly CI .
(2) Set a more precise accuracy standard

- vLLM version: v0.18.0
- vLLM main:
6a9cceb219

Signed-off-by: Your Name <you@example.com>
Co-authored-by: Your Name <you@example.com>
2026-03-24 17:08:17 +08:00

78 lines
1.8 KiB
YAML

# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
OMP_NUM_THREADS: "10"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--trust-remote-code"
- "--async-scheduling"
- "--no-enable-prefix-caching"
- "--enable-expert-parallel"
- "--gpu-memory-utilization"
- "0.8"
- "--max-num-seqs"
- "64"
- "--compilation_config"
- '{"cudagraph_mode": "FULL_DECODE_ONLY"}'
_benchmarks: &benchmarks
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 256
max_out_len: 1500
batch_size: 64
baseline: 1
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 64
top_k: 20
baseline: 96
threshold: 3
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-Next-80B-A3B-Instruct-aclgraph-8192-a3"
model: "Qwen/Qwen3-Next-80B-A3B-Instruct"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "8192"
benchmarks:
<<: *benchmarks
- name: "Qwen3-Next-80B-A3B-Instruct-aclgraph-32768-a3"
model: "Qwen/Qwen3-Next-80B-A3B-Instruct"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "32768"
benchmarks:
<<: *benchmarks