# ========================================== # ACTUAL TEST CASES # ========================================== test_cases: - name: "Qwen3.5-27B-w8a8" model: "Eco-Tech/Qwen3.5-27B-w8a8-mtp" envs: VLLM_USE_MODELSCOPE: "true" PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True" HCCL_BUFFSIZE: "512" OMP_PROC_BIND: "false" OMP_NUM_THREADS: "1" TASK_QUEUE_ENABLE: "1" SERVER_PORT: "DEFAULT_PORT" server_cmd: - "--tensor-parallel-size" - "2" - "--data-parallel-size" - "1" - "--port" - "$SERVER_PORT" - "--seed" - "1024" - "--max-num-seqs" - "32" - "--quantization" - "ascend" - "--max-model-len" - "133000" - "--max-num-batched-tokens" - "8096" - "--trust-remote-code" - "--gpu-memory-utilization" - "0.9" - "--no-enable-prefix-caching" - "--additional-config" - '{"enable_cpu_binding":true}' - "--speculative_config" - '{"method": "qwen3_5_mtp", "num_speculative_tokens": 3,"enforce_eager": true}' - "--compilation-config" - '{"cudagraph_mode":"FULL_DECODE_ONLY"}' - "--async-scheduling" benchmarks: acc: case_type: accuracy dataset_path: vllm-ascend/gsm8k-lite request_conf: vllm_api_general_chat dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt max_out_len: 32768 batch_size: 32 baseline: 95 threshold: 5 perf: case_type: performance dataset_path: vllm-ascend/GSM8K-in3500-bs8000-qwen3 request_conf: vllm_api_stream_chat dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf num_prompts: 80 max_out_len: 1500 batch_size: 20 request_rate: 0 baseline: 664.6449 threshold: 0.97