[Nightly][Refactor]Migrate nightly single-node model tests from .py to .yaml (#6503)

### What this PR does / why we need it?
This PR refactors the nightly single-node model test by migrating test
configurations from Python scripts to a more maintainable `YAML-based`
format.

| Original PR | Python (`.py`) | YAML (`.yaml`) |
| :--- | :--- | :--- |
| [#3568](https://github.com/vllm-project/vllm-ascend/pull/3568) |
`test_deepseek_r1_0528_w8a8_eplb.py` | `DeepSeek-R1-0528-W8A8.yaml` |
| [#3631](https://github.com/vllm-project/vllm-ascend/pull/3631) |
`test_deepseek_r1_0528_w8a8.py` | `DeepSeek-R1-0528-W8A8.yaml` |
| [#5874](https://github.com/vllm-project/vllm-ascend/pull/5874) |
`test_deepseek_r1_w8a8_hbm.py` | `DeepSeek-R1-W8A8-HBM.yaml` |
| [#3908](https://github.com/vllm-project/vllm-ascend/pull/3908) |
`test_deepseek_v3_2_w8a8.py` | `DeepSeek-V3.2-W8A8.yaml` |
| [#5682](https://github.com/vllm-project/vllm-ascend/pull/5682) |
`test_kimi_k2_thinking.py` | `Kimi-K2-Thinking.yaml` |
| [#4111](https://github.com/vllm-project/vllm-ascend/pull/4111) |
`test_mtpx_deepseek_r1_0528_w8a8.py` | `MTPX-DeepSeek-R1-0528-W8A8.yaml`
|
| [#3733](https://github.com/vllm-project/vllm-ascend/pull/3733) |
`test_prefix_cache_deepseek_r1_0528_w8a8.py` |
`Prefix-Cache-DeepSeek-R1-0528-W8A8.yaml` |
| [#6543](https://github.com/vllm-project/vllm-ascend/pull/6543) |
`test_qwen3_235b_w8a8.py` | `Qwen3-235B-A22B-W8A8.yaml` |
| [#6543](https://github.com/vllm-project/vllm-ascend/pull/6543) |
`test_qwen3_235b_a22b_w8a8_eplb.py` | `Qwen3-235B-A22B-W8A8.yaml` |
| [#3973](https://github.com/vllm-project/vllm-ascend/pull/3973) |
`test_qwen3_30b_w8a8.py` | `Qwen3-30B-A3B-W8A8.yaml` |
| [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) |
`test_qwen3_32b_int8.py` | `Qwen3-32B-Int8.yaml` |
| [#3757](https://github.com/vllm-project/vllm-ascend/pull/3757) |
`test_qwq_32b.py` | `QwQ-32B.yaml` |
| [#5616](https://github.com/vllm-project/vllm-ascend/pull/5616) |
`test_qwen3_next_w8a8.py` | `Qwen3-Next-80B-A3B-Instruct-W8A8.yaml` |
| [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) |
`test_qwen2_5_vl_7b.py` | `Qwen2.5-VL-7B-Instruct.yaml` |
| [#5301](https://github.com/vllm-project/vllm-ascend/pull/5301) |
`test_qwen2_5_vl_7b_epd.py` | `Qwen2.5-VL-7B-Instruct-EPD.yaml` |
| [#3707](https://github.com/vllm-project/vllm-ascend/pull/3707) |
`test_qwen2_5_vl_32b.py` | `Qwen2.5-VL-32B-Instruct.yaml` |
| [#3676](https://github.com/vllm-project/vllm-ascend/pull/3676) |
`test_qwen3_32b_int8_a3_feature_stack3.py` |
`Qwen3-32B-Int8-A3-Feature-Stack3.yaml` |
| [#3709](https://github.com/vllm-project/vllm-ascend/pull/3709) |
`test_prefix_cache_qwen3_32b_int8.py` |
`Prefix-Cache-Qwen3-32B-Int8.yaml` |
| [#5395](https://github.com/vllm-project/vllm-ascend/pull/5395) |
`test_qwen3_next.py` | `Qwen3-Next-80B-A3B-Instruct-A2.yaml` |
| [#3474](https://github.com/vllm-project/vllm-ascend/pull/3474) |
`test_qwen3_32b.py` | `Qwen3-32B.yaml` |
| [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) |
`test_qwen3_32b_int8.py` | `Qwen3-32B-Int8-A2.yaml` |
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.15.0
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0

---------

Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
SILONG ZENG
2026-03-03 20:13:43 +08:00
committed by GitHub
parent a0a904a3d4
commit 859f2c25b9
51 changed files with 2265 additions and 2336 deletions

View File

@@ -0,0 +1,94 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
OMP_NUM_THREADS: "10"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--quantization"
- "ascend"
- "--data-parallel-size"
- "2"
- "--tensor-parallel-size"
- "8"
- "--enable-expert-parallel"
- "--port"
- "$SERVER_PORT"
- "--seed"
- "1024"
- "--max-model-len"
- "36864"
- "--max-num-batched-tokens"
- "4096"
- "--max-num-seqs"
- "16"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
- "--speculative-config"
- '{"num_speculative_tokens": 1, "method": "mtp"}'
- "--additional-config"
- '{"enable_weight_nz_layout": true}'
_benchmarks_acc: &benchmarks_acc
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 95
threshold: 5
_benchmarks_perf: &benchmarks_perf
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 400
max_out_len: 1500
batch_size: 1000
baseline: 1
threshold: 0.97
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "DeepSeek-R1-0528-W8A8-single"
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--enforce-eager"
benchmarks:
- name: "DeepSeek-R1-0528-W8A8-aclgraph"
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
benchmarks:
<<: *benchmarks_acc
<<: *benchmarks_perf
- name: "DeepSeek-R1-0528-W8A8-EPLB"
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
envs:
<<: *envs
DYNAMIC_EPLB: "true"
server_cmd: *server_cmd
server_cmd_extra:
- "--additional-config"
- '{"enable_weight_nz_layout": true, "eplb_config": {"dynamic_eplb": "true", "expert_heat_collection_interval": 1000, "algorithm_execution_interval": 50, "eplb_policy_type": 3}}'
benchmarks:
<<: *benchmarks_acc

View File

@@ -0,0 +1,42 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "DeepSeek-R1-W8A8-HBM-single"
model: "vllm-ascend/DeepSeek-R1-W8A8"
envs:
HCCL_BUFFSIZE: "1024"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--quantization"
- "ascend"
- "--port"
- "$SERVER_PORT"
- "--data-parallel-size"
- "8"
- "--data-parallel-size-local"
- "8"
- "--data-parallel-rpc-port"
- "13389"
- "--tensor-parallel-size"
- "2"
- "--enable-expert-parallel"
- "--seed"
- "1024"
- "--max-num-seqs"
- "32"
- "--max-model-len"
- "6000"
- "--max-num-batched-tokens"
- "6000"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.92"
- "--no-enable-prefix-caching"
- "--reasoning-parser"
- "deepseek_r1"
- "--enforce-eager"
- "--additional-config"
- '{"ascend_scheduler_config": {"enabled": false}, "torchair_graph_config": {"enabled": false, "enable_multistream_shared_expert": false}}'
benchmarks:

View File

@@ -0,0 +1,78 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "DeepSeek-V3.2-W8A8-TP8-DP2"
model: "vllm-ascend/DeepSeek-V3.2-W8A8"
envs:
HCCL_OP_EXPANSION_MODE: "AIV"
OMP_PROC_BIND: "false"
OMP_NUM_THREADS: "1"
HCCL_BUFFSIZE: "1024"
VLLM_ASCEND_ENABLE_MLAPO: "1"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
VLLM_ASCEND_ENABLE_FLASHCOMM1: "1"
VLLM_ENGINE_READY_TIMEOUT_S: "1800"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--enable-expert-parallel"
- "--tensor-parallel-size"
- "8"
- "--data-parallel-size"
- "2"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "8192"
- "--max-num-batched-tokens"
- "8192"
- "--max-num-seqs"
- "4"
- "--trust-remote-code"
- "--quantization"
- "ascend"
- "--gpu-memory-utilization"
- "0.98"
- "--compilation-config"
- '{"cudagraph_capture_sizes":[8, 16, 24, 32, 40, 48], "cudagraph_mode":"FULL_DECODE_ONLY"}'
- "--speculative-config"
- '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}'
- "--additional-config"
- '{"layer_sharding": ["q_b_proj", "o_proj"]}'
- "--reasoning-parser"
- "deepseek_v3"
- "--tokenizer_mode"
- "deepseek_v32"
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 4096
batch_size: 8
baseline: 95
threshold: 5
perf_1:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 1
max_out_len: 1500
batch_size: 1
request_rate: 11.2
baseline: 134
threshold: 0.97
perf_2:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 100
max_out_len: 1500
batch_size: 4
request_rate: 11.2
baseline: 134
threshold: 0.97

View File

@@ -0,0 +1,72 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
HCCL_BUFFSIZE: "1024"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--no-enable-prefix-caching"
- "--enable-expert-parallel"
- "--tensor-parallel-size"
- "8"
- "--data-parallel-size"
- "2"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "8192"
- "--max-num-batched-tokens"
- "8192"
- "--block-size"
- "16"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
_benchmarks: &benchmarks
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 4096
batch_size: 8
baseline: 95
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 16
max_out_len: 1500
batch_size: 8
request_rate: 0
baseline: 1
threshold: 0.97
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "GLM-4.5-TP8-DP2-fullgraph"
model: "ZhipuAI/GLM-4.5"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--compilation-config"
- '{"cudagraph_capture": [1,2,4,8,16], "cudagraph_model":"FULL_DECODE_ONLY"}'
benchmarks:
<<: *benchmarks
- name: "GLM-4.5-TP8-DP2-eager"
model: "ZhipuAI/GLM-4.5"
envs:
<<: *envs
server_cmd: *server_cmd
benchmarks:
<<: *benchmarks

View File

@@ -0,0 +1,52 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Kimi-K2-Thinking-TP16-Case"
model: "moonshotai/Kimi-K2-Thinking"
envs:
HCCL_BUFFSIZE: "1024"
TASK_QUEUE_ENABLE: "1"
OMP_PROC_BIND: "false"
HCCL_OP_EXPANSION_MODE: "AIV"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--tensor-parallel-size"
- "16"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "8192"
- "--max-num-batched-tokens"
- "8192"
- "--max-num-seqs"
- "12"
- "--gpu-memory-utilization"
- "0.9"
- "--trust-remote-code"
- "--enable-expert-parallel"
- "--no-enable-prefix-caching"
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 4096
batch_size: 32
baseline: 95
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 512
max_out_len: 256
batch_size: 64
trust_remote_code: true
request_rate: 11.2
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,90 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
OMP_NUM_THREADS: "100"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
VLLM_RPC_TIMEOUT: "3600000"
VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS: "3600000"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--quantization"
- "ascend"
- "--seed"
- "1024"
- "--no-enable-prefix-caching"
- "--data-parallel-size"
- "2"
- "--tensor-parallel-size"
- "8"
- "--enable-expert-parallel"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--max-num-seqs"
- "14"
- "--trust-remote-code"
_benchmarks_gsm8k: &benchmarks_gsm8k
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 95
threshold: 5
_benchmarks_aime: &benchmarks_aime
acc:
case_type: accuracy
dataset_path: vllm-ascend/aime2024
request_conf: vllm_api_general_chat
dataset_conf: aime2024/aime2024_gen_0_shot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 86.67
threshold: 7
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "MTPX-DeepSeek-R1-0528-W8A8-mtp2"
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "4096"
- "--speculative-config"
- '{"num_speculative_tokens": 2, "method": "mtp"}'
- "--gpu-memory-utilization"
- "0.92"
benchmarks:
<<: *benchmarks_gsm8k
- name: "MTPX-DeepSeek-R1-0528-W8A8-mtp3"
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
envs:
<<: *envs
HCCL_OP_EXPANSION_MODE: "AIV"
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "2048"
- "--speculative-config"
- '{"num_speculative_tokens": 3, "method": "mtp"}'
- "--gpu-memory-utilization"
- "0.9"
- "--compilation-config"
- '{"cudagraph_capture_sizes": [56], "cudagraph_mode": "FULL_DECODE_ONLY"}'
benchmarks:
<<: *benchmarks_aime

View File

@@ -0,0 +1,77 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "prefix-cache-deepseek-r1-0528-w8a8"
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
envs:
OMP_NUM_THREADS: "10"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--quantization"
- "ascend"
- "--data-parallel-size"
- "2"
- "--tensor-parallel-size"
- "8"
- "--enable-expert-parallel"
- "--port"
- "$SERVER_PORT"
- "--seed"
- "1024"
- "--max-model-len"
- "5200"
- "--max-num-batched-tokens"
- "4096"
- "--max-num-seqs"
- "16"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
- "--additional-config"
- '{"enable_weight_nz_layout": true}'
- "--speculative-config"
- '{"num_speculative_tokens": 1, "method": "mtp"}'
test_content:
- "benchmark_comparisons"
benchmark_comparisons_args:
- metric: "TTFT"
baseline: "prefix0"
target: "prefix75"
ratio: 0.8
operator: "<"
benchmarks:
warm_up:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in1024-bs210
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 210
max_out_len: 2
batch_size: 1000
baseline: 0
threshold: 0.97
prefix0:
case_type: performance
dataset_path: vllm-ascend/prefix0-in3500-bs210
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 210
max_out_len: 1500
batch_size: 18
baseline: 1
threshold: 0.97
prefix75:
case_type: performance
dataset_path: vllm-ascend/prefix75-in3500-bs210
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 210
max_out_len: 1500
batch_size: 18
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,70 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "prefix-cache-qwen3-32b-w8a8"
model: "vllm-ascend/Qwen3-32B-W8A8"
envs:
TASK_QUEUE_ENABLE: "1"
HCCL_OP_EXPANSION_MODE: "AIV"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--quantization"
- "ascend"
- "--reasoning-parser"
- "qwen3"
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "8192"
- "--max-num-batched-tokens"
- "8192"
- "--max-num-seqs"
- "256"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
- "--additional-config"
- '{"enable_weight_nz_layout": true}'
test_content:
- "benchmark_comparisons"
benchmark_comparisons_args:
- metric: "TTFT"
baseline: "prefix0"
target: "prefix75"
ratio: 0.8
operator: "<"
benchmarks:
warm_up:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in1024-bs210
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 210
max_out_len: 2
batch_size: 1000
baseline: 0
threshold: 0.97
prefix0:
case_type: performance
dataset_path: vllm-ascend/prefix0-in3500-bs210
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 210
max_out_len: 1500
batch_size: 48
baseline: 1
threshold: 0.97
prefix75:
case_type: performance
dataset_path: vllm-ascend/prefix75-in3500-bs210
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 210
max_out_len: 1500
batch_size: 48
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,78 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
TASK_QUEUE_ENABLE: "1"
OMP_PROC_BIND: "false"
HCCL_OP_EXPANSION_MODE: "AIV"
VLLM_ASCEND_ENABLE_FLASHCOMM: "1"
VLLM_ASCEND_ENABLE_DEBSE_OPTIMIZE: "1"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "36864"
- "--max-num-batched-tokens"
- "36864"
- "--block-size"
- "128"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
- "--reasoning-parser"
- "deepseek_r1"
- "--distributed_executor_backend"
- "mp"
- "--additional-config"
- '{"weight_prefetch_config":{"enabled":true}}'
_benchmarks: &benchmarks
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 95
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 240
max_out_len: 1500
batch_size: 60
baseline: 1
threshold: 0.97
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "QwQ-32B-aclgraph"
model: "Qwen/QwQ-32B"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--compilation_config"
- '{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes": [1, 8, 24, 48, 60]}'
benchmarks:
<<: *benchmarks
- name: "QwQ-32B-single"
model: "Qwen/QwQ-32B"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--enforce-eager"
benchmarks:

View File

@@ -0,0 +1,63 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen2.5-VL-32B-Instruct-a3"
model: "Qwen/Qwen2.5-VL-32B-Instruct"
envs:
TASK_QUEUE_ENABLE: "1"
VLLM_ASCEND_ENABLE_NZ: "0"
HCCL_OP_EXPANSION_MODE: "AIV"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--no-enable-prefix-caching"
- "--mm-processor-cache-gb"
- "0"
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "30000"
- "--max-num-batched-tokens"
- "40000"
- "--max-num-seqs"
- "400"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.8"
- "--compilation_config"
- '{"cudagraph_mode": "FULL_DECODE_ONLY"}'
test_content:
- "completion"
- "image"
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/textvqa-lite
request_conf: vllm_api_stream_chat
dataset_conf: textvqa/textvqa_gen_base64
max_out_len: 2048
batch_size: 128
baseline: 76.22
temperature: 0
top_k: -1
top_p: 1
repetition_penalty: 1
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/textvqa-perf-1080p
request_conf: vllm_api_stream_chat
dataset_conf: textvqa/textvqa_gen_base64
num_prompts: 512
max_out_len: 256
batch_size: 128
temperature: 0
top_k: -1
top_p: 1
repetition_penalty: 1
request_rate: 0
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,92 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen2.5-VL-7B-Instruct-epd"
model: "Qwen/Qwen2.5-VL-7B-Instruct"
service_mode: "epd"
envs:
ENCODE_PORT: "DEFAULT_PORT"
PD_PORT: "DEFAULT_PORT"
PROXY_PORT: "DEFAULT_PORT"
epd_server_cmds:
- - "--port"
- "$ENCODE_PORT"
- "--model"
- "Qwen/Qwen2.5-VL-7B-Instruct"
- "--gpu-memory-utilization"
- "0.01"
- "--tensor-parallel-size"
- "1"
- "--enforce-eager"
- "--no-enable-prefix-caching"
- "--max-model-len"
- "10000"
- "--max-num-batched-tokens"
- "10000"
- "--max-num-seqs"
- "1"
- "--ec-transfer-config"
- '{"ec_connector_extra_config":{"shared_storage_path":"/dev/shm/epd/storage"},"ec_connector":"ECExampleConnector","ec_role": "ec_producer"}'
- - "--port"
- "$PD_PORT"
- "--model"
- "Qwen/Qwen2.5-VL-7B-Instruct"
- "--gpu-memory-utilization"
- "0.95"
- "--tensor-parallel-size"
- "1"
- "--enforce-eager"
- "--max-model-len"
- "10000"
- "--max-num-batched-tokens"
- "10000"
- "--max-num-seqs"
- "128"
- "--ec-transfer-config"
- '{"ec_connector_extra_config":{"shared_storage_path":"/dev/shm/epd/storage"},"ec_connector":"ECExampleConnector","ec_role": "ec_consumer"}'
epd_proxy_args:
- "--host"
- "127.0.0.1"
- "--port"
- "$PROXY_PORT"
- "--encode-servers-urls"
- "http://localhost:$ENCODE_PORT"
- "--decode-servers-urls"
- "http://localhost:$PD_PORT"
- "--prefill-servers-urls"
- "disable"
test_content:
benchmarks:
warm_up:
case_type: performance
dataset_path: vllm-ascend/textvqa-perf-1080p
request_conf: vllm_api_stream_chat
dataset_conf: textvqa/textvqa_gen_base64
num_prompts: 50
max_out_len: 20
batch_size: 32
request_rate: 0
baseline: 1
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/textvqa-lite
request_conf: vllm_api_stream_chat
dataset_conf: textvqa/textvqa_gen_base64
max_out_len: 2048
batch_size: 128
baseline: 82.05
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/textvqa-perf-1080p
request_conf: vllm_api_stream_chat
dataset_conf: textvqa/textvqa_gen_base64
num_prompts: 512
max_out_len: 256
batch_size: 128
request_rate: 0
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,55 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen2.5-VL-7B-Instruct"
model: "Qwen/Qwen2.5-VL-7B-Instruct"
envs:
TASK_QUEUE_ENABLE: "1"
VLLM_ASCEND_ENABLE_NZ: "0"
HCCL_OP_EXPANSION_MODE: "AIV"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--no-enable-prefix-caching"
- "--mm-processor-cache-gb"
- "0"
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "30000"
- "--max-num-batched-tokens"
- "40000"
- "--max-num-seqs"
- "400"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.8"
- "--compilation_config"
- '{"cudagraph_mode": "FULL_DECODE_ONLY"}'
test_content:
- "completion"
- "image"
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/textvqa-lite
request_conf: vllm_api_stream_chat
dataset_conf: textvqa/textvqa_gen_base64
max_out_len: 2048
batch_size: 128
baseline: 82.05
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/textvqa-perf-1080p
request_conf: vllm_api_stream_chat
dataset_conf: textvqa/textvqa_gen_base64
num_prompts: 512
max_out_len: 256
batch_size: 128
request_rate: 0
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,85 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
OMP_NUM_THREADS: "10"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
VLLM_ASCEND_ENABLE_FLASHCOMM1: "1"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--quantization"
- "ascend"
- "--async-scheduling"
- "--data-parallel-size"
- "4"
- "--tensor-parallel-size"
- "4"
- "--enable-expert-parallel"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--max-num-batched-tokens"
- "8192"
- "--max-num-seqs"
- "12"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
_benchmarks: &benchmarks
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
top_k: 20
baseline: 95
threshold: 5
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-235B-A22B-W8A8-full_graph"
model: "vllm-ascend/Qwen3-235B-A22B-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--compilation-config"
- '{"cudagraph_mode": "FULL_DECODE_ONLY"}'
benchmarks:
<<: *benchmarks
- name: "Qwen3-235B-A22B-W8A8-piecewise"
model: "vllm-ascend/Qwen3-235B-A22B-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--compilation-config"
- '{"cudagraph_mode": "PIECEWISE"}'
benchmarks:
<<: *benchmarks
- name: "Qwen3-235B-A22B-W8A8-EPLB"
model: "vllm-ascend/Qwen3-235B-A22B-W8A8"
envs:
<<: *envs
DYNAMIC_EPLB: "true"
server_cmd: *server_cmd
server_cmd_extra:
- "--additional-config"
- '{"eplb_config": {"dynamic_eplb": "true", "expert_heat_collection_interval": 600, "algorithm_execution_interval": 50, "num_redundant_experts": 16, "eplb_policy_type": 2}}'
- "--compilation-config"
- '{"cudagraph_mode": "FULL_DECODE_ONLY"}'
benchmarks:
<<: *benchmarks

View File

@@ -0,0 +1,46 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-30B-A3B-W8A8-TP1"
model: "vllm-ascend/Qwen3-30B-A3B-W8A8"
envs:
OMP_PROC_BIND: "false"
OMP_NUM_THREADS: "10"
HCCL_BUFFSIZE: "1024"
HCCL_OP_EXPANSION_MODE: "AIV"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--quantization"
- "ascend"
- "--async-scheduling"
- "--no-enable-prefix-caching"
- "--tensor-parallel-size"
- "1"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "5600"
- "--max-num-batched-tokens"
- "16384"
- "--max-num-seqs"
- "100"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
- "--compilation-config"
- '{"cudagraph_mode": "FULL_DECODE_ONLY"}'
benchmarks:
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 180
max_out_len: 1500
batch_size: 45
request_rate: 0
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,79 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
TASK_QUEUE_ENABLE: "1"
HCCL_OP_EXPANSION_MODE: "AIV"
VLLM_ASCEND_ENABLE_FLASHCOMM: "1"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--quantization"
- "ascend"
- "--no-enable-prefix-caching"
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--max-num-batched-tokens"
- "40960"
- "--block-size"
- "128"
- "--trust-remote-code"
- "--reasoning-parser"
- "qwen3"
- "--gpu-memory-utilization"
- "0.9"
- "--async-scheduling"
- "--additional-config"
- '{"weight_prefetch_config":{"enabled":true}}'
_benchmarks: &benchmarks
acc:
case_type: accuracy
dataset_path: vllm-ascend/aime2024
request_conf: vllm_api_general_chat
dataset_conf: aime2024/aime2024_gen_0_shot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 83.33
threshold: 7
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 288
max_out_len: 1500
batch_size: 72
baseline: 1
threshold: 0.97
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-32B-W8A8-aclgraph-a2"
model: "vllm-ascend/Qwen3-32B-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--compilation-config"
- '{"cudagraph_mode":"FULL_DECODE_ONLY","cudagraph_capture_sizes":[1,12,16,20,24,32,48,60,64,68,72,76,80]}'
benchmarks:
<<: *benchmarks
- name: "Qwen3-32B-W8A8-single-a2"
model: "vllm-ascend/Qwen3-32B-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--enforce-eager"
benchmarks:

View File

@@ -0,0 +1,69 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-32B-W8A8-a3-feature-stack3"
model: "vllm-ascend/Qwen3-32B-W8A8"
envs:
VLLM_USE: "1"
TASK_QUEUE_ENABLE: "1"
HCCL_OP_EXPANSION_MODE: "AIV"
OMP_PROC_BIND: "false"
VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE: "1"
VLLM_ASCEND_ENABLE_FLASHCOMM: "1"
SERVER_PORT: "DEFAULT_PORT"
prompts:
- "9.11 and 9.8, which is greater?"
api_keyword_args:
chat_template_kwargs:
enable_thinking: true
server_cmd:
- "--quantization"
- "ascend"
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--trust-remote-code"
- "--reasoning-parser"
- "qwen3"
- "--distributed_executor_backend"
- "mp"
- "--gpu-memory-utilization"
- "0.9"
- "--block-size"
- "128"
- "--max-num-seqs"
- "256"
- "--enforce-eager"
- "--max-model-len"
- "35840"
- "--max-num-batched-tokens"
- "35840"
- "--additional-config"
- '{"enable_weight_nz_layout":true, "weight_prefetch_config":{"enabled": true}}'
- "--compilation-config"
- '{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes":[1,8,24,48,60]}'
test_content:
- "chat_completion"
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_noncot_chat_prompt
max_out_len: 10240
batch_size: 32
baseline: 96
threshold: 4
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 240
max_out_len: 1500
batch_size: 60
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,78 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
TASK_QUEUE_ENABLE: "1"
HCCL_OP_EXPANSION_MODE: "AIV"
VLLM_ASCEND_ENABLE_FLASHCOMM: "1"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--quantization"
- "ascend"
- "--no-enable-prefix-caching"
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--max-num-batched-tokens"
- "40960"
- "--block-size"
- "128"
- "--trust-remote-code"
- "--reasoning-parser"
- "qwen3"
- "--gpu-memory-utilization"
- "0.9"
- "--async-scheduling"
- "--additional-config"
- '{"weight_prefetch_config":{"enabled":true}}'
_benchmarks: &benchmarks
acc:
case_type: accuracy
dataset_path: vllm-ascend/aime2024
request_conf: vllm_api_general_chat
dataset_conf: aime2024/aime2024_gen_0_shot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 83.33
threshold: 7
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 304
max_out_len: 1500
batch_size: 76
baseline: 1
threshold: 0.97
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-32B-W8A8-aclgraph-a3"
model: "vllm-ascend/Qwen3-32B-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--compilation-config"
- '{"cudagraph_mode":"FULL_DECODE_ONLY","cudagraph_capture_sizes":[1,12,16,20,24,32,48,60,64,68,72,76,80]}'
benchmarks:
<<: *benchmarks
- name: "Qwen3-32B-W8A8-single-a3"
model: "vllm-ascend/Qwen3-32B-W8A8"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--enforce-eager"
benchmarks:

View File

@@ -0,0 +1,51 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-32B-TP4"
model: "Qwen/Qwen3-32B"
envs:
TASK_QUEUE_ENABLE: "1"
OMP_PROC_BIND: "false"
HCCL_OP_EXPANSION_MODE: "AIV"
PAGED_ATTENTION_MASK_LEN: "5500"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--no-enable-prefix-caching"
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "36864"
- "--max-num-batched-tokens"
- "36864"
- "--block-size"
- "128"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.9"
- "--additional-config"
- '{"enable_weight_nz_layout":true}'
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 95
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 80
max_out_len: 1500
batch_size: 20
request_rate: 0
baseline: 1
threshold: 0.97

View File

@@ -0,0 +1,75 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
OMP_NUM_THREADS: "10"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--trust-remote-code"
- "--async-scheduling"
- "--no-enable-prefix-caching"
- "--enable-expert-parallel"
- "--gpu-memory-utilization"
- "0.8"
- "--max-num-seqs"
- "64"
_benchmarks: &benchmarks
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 256
max_out_len: 1500
batch_size: 64
baseline: 1
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
top_k: 20
baseline: 95
threshold: 5
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-Next-80B-A3B-Instruct-aclgraph-8192-a2"
model: "Qwen/Qwen3-Next-80B-A3B-Instruct"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "8192"
benchmarks:
<<: *benchmarks
- name: "Qwen3-Next-80B-A3B-Instruct-aclgraph-32768-a2"
model: "Qwen/Qwen3-Next-80B-A3B-Instruct"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "32768"
benchmarks:
<<: *benchmarks

View File

@@ -0,0 +1,45 @@
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-Next-80B-A3B-Instruct-W8A8"
model: "vllm-ascend/Qwen3-Next-80B-A3B-Instruct-W8A8"
envs:
OMP_NUM_THREADS: "10"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
SERVER_PORT: "DEFAULT_PORT"
server_cmd:
- "--quantization"
- "ascend"
- "--async-scheduling"
- "--no-enable-prefix-caching"
- "--data-parallel-size"
- "1"
- "--tensor-parallel-size"
- "4"
- "--enable-expert-parallel"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--max-num-batched-tokens"
- "8192"
- "--max-num-seqs"
- "32"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.65"
- "--compilation-config"
- '{"cudagraph_capture_sizes": [32]}'
benchmarks:
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
baseline: 95
threshold: 5

View File

@@ -0,0 +1,75 @@
# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
OMP_NUM_THREADS: "10"
OMP_PROC_BIND: "false"
HCCL_BUFFSIZE: "1024"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
SERVER_PORT: "DEFAULT_PORT"
_server_cmd: &server_cmd
- "--tensor-parallel-size"
- "4"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "40960"
- "--trust-remote-code"
- "--async-scheduling"
- "--no-enable-prefix-caching"
- "--enable-expert-parallel"
- "--gpu-memory-utilization"
- "0.8"
- "--max-num-seqs"
- "64"
_benchmarks: &benchmarks
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 256
max_out_len: 1500
batch_size: 64
baseline: 1
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 32768
batch_size: 32
top_k: 20
baseline: 95
threshold: 5
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "Qwen3-Next-80B-A3B-Instruct-aclgraph-8192-a3"
model: "Qwen/Qwen3-Next-80B-A3B-Instruct"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "8192"
benchmarks:
<<: *benchmarks
- name: "Qwen3-Next-80B-A3B-Instruct-aclgraph-32768-a3"
model: "Qwen/Qwen3-Next-80B-A3B-Instruct"
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--max-num-batched-tokens"
- "32768"
benchmarks:
<<: *benchmarks