Files
xc-llm-ascend/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml
LeeWenquan 65eae6de7b Add Ascend Ops recurrent_gated_delta_rule (#6725)
### What this PR does / why we need it?
Change recurrent_gated_delta_rule ops from triton to ascend C version
for better performance.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?

- vLLM version: v0.15.0
- vLLM main:
9562912cea

---------

Signed-off-by: SunnyLee219 <3294305115@qq.com>
2026-03-09 14:14:14 +08:00

78 lines
1.8 KiB
YAML

# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
OMP_NUM_THREADS: "10"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--trust-remote-code"
- "--async-scheduling"
- "--no-enable-prefix-caching"
- "--enable-expert-parallel"
- "--gpu-memory-utilization"
- "0.8"
- "--max-num-seqs"
- "64"
- "--compilation-config"
- '{"cudagraph_capture_sizes": [64]}'
_benchmarks: &benchmarks
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 256
max_out_len: 1500
batch_size: 64
baseline: 1
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 64
top_k: 20
baseline: 95
threshold: 5
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-Next-80B-A3B-Instruct-aclgraph-8192-a3"
model: "Qwen/Qwen3-Next-80B-A3B-Instruct"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "8192"
benchmarks:
<<: *benchmarks
- name: "Qwen3-Next-80B-A3B-Instruct-aclgraph-32768-a3"
model: "Qwen/Qwen3-Next-80B-A3B-Instruct"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "32768"
benchmarks:
<<: *benchmarks