# ========================================== # ACTUAL TEST CASES # ========================================== test_cases: - name: "DeepSeek-V3.2-W8A8-TP8-DP2" model: "vllm-ascend/DeepSeek-V3.2-W8A8" envs: HCCL_OP_EXPANSION_MODE: "AIV" OMP_PROC_BIND: "false" OMP_NUM_THREADS: "1" HCCL_BUFFSIZE: "1024" VLLM_ASCEND_ENABLE_MLAPO: "1" PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True" VLLM_ASCEND_ENABLE_FLASHCOMM1: "1" VLLM_ENGINE_READY_TIMEOUT_S: "1800" SERVER_PORT: "DEFAULT_PORT" server_cmd: - "--enable-expert-parallel" - "--tensor-parallel-size" - "8" - "--data-parallel-size" - "2" - "--port" - "$SERVER_PORT" - "--max-model-len" - "8192" - "--max-num-batched-tokens" - "8192" - "--max-num-seqs" - "4" - "--trust-remote-code" - "--quantization" - "ascend" - "--gpu-memory-utilization" - "0.98" - "--compilation-config" - '{"cudagraph_capture_sizes":[8, 16, 24, 32, 40, 48], "cudagraph_mode":"FULL_DECODE_ONLY"}' - "--speculative-config" - '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}' - "--additional-config" - '{"layer_sharding": ["q_b_proj", "o_proj"]}' - "--reasoning-parser" - "deepseek_v3" - "--tokenizer_mode" - "deepseek_v32" benchmarks: acc: case_type: accuracy dataset_path: vllm-ascend/gsm8k-lite request_conf: vllm_api_general_chat dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt max_out_len: 4096 batch_size: 8 baseline: 95 threshold: 5 perf_1: case_type: performance dataset_path: vllm-ascend/GSM8K-in3500-bs400 request_conf: vllm_api_stream_chat dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf num_prompts: 1 max_out_len: 1500 batch_size: 1 request_rate: 11.2 baseline: 134 threshold: 0.97 perf_2: case_type: performance dataset_path: vllm-ascend/GSM8K-in3500-bs400 request_conf: vllm_api_stream_chat dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf num_prompts: 100 max_out_len: 1500 batch_size: 4 request_rate: 11.2 baseline: 134 threshold: 0.97