# ========================================== # ACTUAL TEST CASES # ========================================== test_cases: - name: "Qwen3-32B-W8A8-a3-feature-stack3" model: "vllm-ascend/Qwen3-32B-W8A8" envs: VLLM_USE: "1" TASK_QUEUE_ENABLE: "1" HCCL_OP_EXPANSION_MODE: "AIV" OMP_PROC_BIND: "false" VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE: "1" VLLM_ASCEND_ENABLE_FLASHCOMM: "1" SERVER_PORT: "DEFAULT_PORT" prompts: - "9.11 and 9.8, which is greater?" api_keyword_args: chat_template_kwargs: enable_thinking: true server_cmd: - "--quantization" - "ascend" - "--tensor-parallel-size" - "4" - "--port" - "$SERVER_PORT" - "--trust-remote-code" - "--reasoning-parser" - "qwen3" - "--distributed_executor_backend" - "mp" - "--gpu-memory-utilization" - "0.9" - "--block-size" - "128" - "--max-num-seqs" - "256" - "--enforce-eager" - "--max-model-len" - "35840" - "--max-num-batched-tokens" - "35840" - "--additional-config" - '{"enable_weight_nz_layout":true, "weight_prefetch_config":{"enabled": true}}' - "--compilation-config" - '{"cudagraph_mode":"FULL_DECODE_ONLY", "cudagraph_capture_sizes":[1,8,24,48,60]}' test_content: - "chat_completion" benchmarks: acc: case_type: accuracy dataset_path: vllm-ascend/gsm8k-lite request_conf: vllm_api_general_chat dataset_conf: gsm8k/gsm8k_gen_0_shot_noncot_chat_prompt max_out_len: 10240 batch_size: 32 baseline: 96 threshold: 4 perf: case_type: performance dataset_path: vllm-ascend/GSM8K-in3500-bs400 request_conf: vllm_api_stream_chat dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf num_prompts: 240 max_out_len: 1500 batch_size: 60 baseline: 1 threshold: 0.97