# ========================================== # Shared Configurations # ========================================== _envs: &envs TASK_QUEUE_ENABLE: "1" HCCL_OP_EXPANSION_MODE: "AIV" VLLM_ASCEND_ENABLE_FLASHCOMM: "1" SERVER_PORT: "DEFAULT_PORT" _server_cmd: &server_cmd - "--quantization" - "ascend" - "--no-enable-prefix-caching" - "--tensor-parallel-size" - "4" - "--port" - "$SERVER_PORT" - "--max-model-len" - "40960" - "--max-num-batched-tokens" - "40960" - "--block-size" - "128" - "--trust-remote-code" - "--reasoning-parser" - "qwen3" - "--gpu-memory-utilization" - "0.9" - "--async-scheduling" - "--additional-config" - '{"weight_prefetch_config":{"enabled":true}}' _benchmarks: &benchmarks acc: case_type: accuracy dataset_path: vllm-ascend/aime2024 request_conf: vllm_api_general_chat dataset_conf: aime2024/aime2024_gen_0_shot_chat_prompt max_out_len: 32768 batch_size: 32 baseline: 83.33 threshold: 7 perf: case_type: performance dataset_path: vllm-ascend/GSM8K-in3500-bs400 request_conf: vllm_api_stream_chat dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf num_prompts: 288 max_out_len: 1500 batch_size: 72 baseline: 1 threshold: 0.97 # ========================================== # ACTUAL TEST CASES # ========================================== test_cases: - name: "Qwen3-32B-W8A8-aclgraph-a2" model: "vllm-ascend/Qwen3-32B-W8A8" envs: <<: *envs server_cmd: *server_cmd server_cmd_extra: - "--compilation-config" - '{"cudagraph_mode":"FULL_DECODE_ONLY","cudagraph_capture_sizes":[1,12,16,20,24,32,48,60,64,68,72,76,80]}' benchmarks: <<: *benchmarks - name: "Qwen3-32B-W8A8-single-a2" model: "vllm-ascend/Qwen3-32B-W8A8" envs: <<: *envs server_cmd: *server_cmd server_cmd_extra: - "--enforce-eager" benchmarks: