diff --git a/.github/workflows/misc/model_list.json b/.github/workflows/misc/model_list.json index 6c135ac9..94b7c552 100644 --- a/.github/workflows/misc/model_list.json +++ b/.github/workflows/misc/model_list.json @@ -245,6 +245,9 @@ "xlangai/OpenCUA-7B", "Eco-Tech/GLM-5-w4a8", "Eco-Tech/GLM-4.7-W8A8-floatmtp", - "MiniMax/MiniMax-M2.5" + "MiniMax/MiniMax-M2.5", + "Eco-Tech/Qwen3.5-27B-w8a8-mtp", + "Eco-Tech/MiniMax-M2.5-w8a8-QuaRot", + "Eco-Tech/Qwen3.5-397B-A17B-w8a8-mtp" ] } diff --git a/.github/workflows/schedule_nightly_test_a3.yaml b/.github/workflows/schedule_nightly_test_a3.yaml index d59df004..8deb59ca 100644 --- a/.github/workflows/schedule_nightly_test_a3.yaml +++ b/.github/workflows/schedule_nightly_test_a3.yaml @@ -223,6 +223,15 @@ jobs: - name: deepseek-r1-0528-w8a8-prefix-cache os: linux-aarch64-a3-16 config_file_path: Prefix-Cache-DeepSeek-R1-0528-W8A8.yaml + - name: Qwen3.5-27B-w8a8-A3 + os: linux-aarch64-a3-2 + config_file_path: Qwen3.5-27B-w8a8-A3.yaml + - name: MiniMax-M2.5-w8a8 + os: linux-aarch64-a3-16 + config_file_path: MiniMax-M2.5-W8A8-A3.yaml + - name: Qwen3.5-397B-A17B-w8a8-mtp + os: linux-aarch64-a3-16 + config_file_path: Qwen3.5-397B-A17B-W8A8-mtp-A3.yaml uses: ./.github/workflows/_e2e_nightly_single_node.yaml with: runner: ${{ matrix.test_config.os }} diff --git a/tests/e2e/nightly/single_node/models/configs/MiniMax-M2.5-W8A8-A3.yaml b/tests/e2e/nightly/single_node/models/configs/MiniMax-M2.5-W8A8-A3.yaml new file mode 100644 index 00000000..a49fb5f0 --- /dev/null +++ b/tests/e2e/nightly/single_node/models/configs/MiniMax-M2.5-W8A8-A3.yaml @@ -0,0 +1,62 @@ +# ========================================== +# ACTUAL TEST CASES +# ========================================== + +test_cases: + - name: "MiniMax-M2.5-w8a8" + model: "Eco-Tech/MiniMax-M2.5-w8a8-QuaRot" + envs: + HCCL_BUFFSIZE: "512" + HCCL_OP_EXPANSION_MODE: "AIV" + PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True" + OMP_NUM_THREADS: "1" + TASK_QUEUE_ENABLE: "1" + VLLM-ASCEND_ENABLE_FUSED_MC2: "2" + VLLM_ASCEND_ENABLE_FLASHCOMM1: "1" + SERVER_PORT: "DEFAULT_PORT" + server_cmd: + - "--no-enable-prefix-caching" + - "--tensor-parallel-size" + - "4" + - "--data-parallel-size" + - "4" + - "--port" + - "$SERVER_PORT" + - "--enable-expert-parallel" + - "--max-num-seqs" + - "128" + - "--max-model-len" + - "153600" + - "--max-num-batched-tokens" + - "16384" + - "--trust-remote-code" + - "--gpu-memory-utilization" + - "0.8" + - "--quantization" + - "ascend" + - "--compilation-config" + - '{"cudagraph_mode":"FULL_DECODE_ONLY"}' + - "--additional-config" + - '{"enable_cpu_binding":true}' + - "--async-scheduling" + benchmarks: + acc: + case_type: accuracy + dataset_path: vllm-ascend/gsm8k-lite + request_conf: vllm_api_general_chat + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt + max_out_len: 32768 + batch_size: 32 + baseline: 95 + threshold: 5 + perf: + case_type: performance + dataset_path: vllm-ascend/GSM8K-in3500-bs2800 + request_conf: vllm_api_stream_chat + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf + num_prompts: 80 + max_out_len: 1500 + batch_size: 20 + request_rate: 0 + baseline: 730.0832 + threshold: 0.97 diff --git a/tests/e2e/nightly/single_node/models/configs/Qwen3.5-27B-w8a8-A3.yaml b/tests/e2e/nightly/single_node/models/configs/Qwen3.5-27B-w8a8-A3.yaml new file mode 100644 index 00000000..1cdceded --- /dev/null +++ b/tests/e2e/nightly/single_node/models/configs/Qwen3.5-27B-w8a8-A3.yaml @@ -0,0 +1,64 @@ +# ========================================== +# ACTUAL TEST CASES +# ========================================== + +test_cases: + - name: "Qwen3.5-27B-w8a8" + model: "Eco-Tech/Qwen3.5-27B-w8a8-mtp" + envs: + VLLM_USE_MODELSCOPE: "true" + PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True" + HCCL_BUFFSIZE: "512" + OMP_PROC_BIND: "false" + OMP_NUM_THREADS: "1" + TASK_QUEUE_ENABLE: "1" + SERVER_PORT: "DEFAULT_PORT" + server_cmd: + - "--tensor-parallel-size" + - "2" + - "--data-parallel-size" + - "1" + - "--port" + - "$SERVER_PORT" + - "--seed" + - "1024" + - "--max-num-seqs" + - "32" + - "--quantization" + - "ascend" + - "--max-model-len" + - "133000" + - "--max-num-batched-tokens" + - "8096" + - "--trust-remote-code" + - "--gpu-memory-utilization" + - "0.9" + - "--no-enable-prefix-caching" + - "--additional-config" + - '{"enable_cpu_binding":true}' + - "--speculative_config" + - '{"method": "qwen3_5_mtp", "num_speculative_tokens": 3,"enforce_eager": true}' + - "--compilation-config" + - '{"cudagraph_mode":"FULL_DECODE_ONLY"}' + - "--async-scheduling" + benchmarks: + acc: + case_type: accuracy + dataset_path: vllm-ascend/gsm8k-lite + request_conf: vllm_api_general_chat + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt + max_out_len: 32768 + batch_size: 32 + baseline: 95 + threshold: 5 + perf: + case_type: performance + dataset_path: vllm-ascend/GSM8K-in3500-bs8000-qwen3 + request_conf: vllm_api_stream_chat + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf + num_prompts: 80 + max_out_len: 1500 + batch_size: 20 + request_rate: 0 + baseline: 664.6449 + threshold: 0.97 diff --git a/tests/e2e/nightly/single_node/models/configs/Qwen3.5-397B-A17B-W8A8-mtp-A3.yaml b/tests/e2e/nightly/single_node/models/configs/Qwen3.5-397B-A17B-W8A8-mtp-A3.yaml new file mode 100644 index 00000000..06264059 --- /dev/null +++ b/tests/e2e/nightly/single_node/models/configs/Qwen3.5-397B-A17B-W8A8-mtp-A3.yaml @@ -0,0 +1,67 @@ +# ========================================== +# ACTUAL TEST CASES +# ========================================== + +test_cases: + - name: "Qwen3.5-397B-A17B-w8a8-mtp" + model: "Eco-Tech/Qwen3.5-397B-A17B-w8a8-mtp" + envs: + VLLM_USE_MODELSCOPE: "true" + PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True" + HCCL_BUFFSIZE: "512" + OMP_PROC_BIND: "false" + OMP_NUM_THREADS: "1" + TASK_QUEUE_ENABLE: "1" + SERVER_PORT: "DEFAULT_PORT" + VLLM_ASCEND_ENABLE_FUSED_MC2: "1" + VLLM_ASCEND_ENABLE_FLASHCOMM1: "1" + server_cmd: + - "--tensor-parallel-size" + - "16" + - "--data-parallel-size" + - "1" + - "--enable-expert-parallel" + - "--port" + - "$SERVER_PORT" + - "--seed" + - "1024" + - "--max-num-seqs" + - "32" + - "--quantization" + - "ascend" + - "--max-model-len" + - "133000" + - "--max-num-batched-tokens" + - "8096" + - "--trust-remote-code" + - "--gpu-memory-utilization" + - "0.9" + - "--no-enable-prefix-caching" + - "--additional-config" + - '{"enable_cpu_binding":true,"multistream_overlap_shared_expert":true}' + - "--speculative_config" + - '{"method": "qwen3_5_mtp","num_speculative_tokens": 3,"enforce_eager": true}' + - "--compilation-config" + - '{"cudagraph_mode":"FULL_DECODE_ONLY"}' + - "--async-scheduling" + benchmarks: + acc: + case_type: accuracy + dataset_path: vllm-ascend/gsm8k-lite + request_conf: vllm_api_general_chat + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt + max_out_len: 32768 + batch_size: 32 + baseline: 95 + threshold: 5 + perf: + case_type: performance + dataset_path: vllm-ascend/GSM8K-in3500-bs8000-qwen3 + request_conf: vllm_api_stream_chat + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf + num_prompts: 80 + max_out_len: 1500 + batch_size: 20 + request_rate: 0 + baseline: 768.6894 + threshold: 0.97