From 9615bc33fda0d62d6eadfb79ae58efd0fb4fb597 Mon Sep 17 00:00:00 2001 From: LeeWenquan <83354342+SunnyLee151064@users.noreply.github.com> Date: Tue, 24 Mar 2026 17:08:17 +0800 Subject: [PATCH] Fix Qwen3Next CI Config (#7561) ### What this PR does / why we need it? This pr modifies qwen3Next nightly CI config. (1) Add a nightly CI . (2) Set a more precise accuracy standard - vLLM version: v0.18.0 - vLLM main: https://github.com/vllm-project/vllm/commit/6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209 Signed-off-by: Your Name Co-authored-by: Your Name --- .github/workflows/schedule_nightly_test_a3.yaml | 3 +++ .../models/configs/Qwen3-Next-80B-A3B-Instruct-A2.yaml | 6 ++++-- .../models/configs/Qwen3-Next-80B-A3B-Instruct-W8A8.yaml | 4 ++-- .../models/configs/Qwen3-Next-80B-A3B-Instruct.yaml | 8 ++++---- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.github/workflows/schedule_nightly_test_a3.yaml b/.github/workflows/schedule_nightly_test_a3.yaml index 885c0854..e2ada87f 100644 --- a/.github/workflows/schedule_nightly_test_a3.yaml +++ b/.github/workflows/schedule_nightly_test_a3.yaml @@ -282,6 +282,9 @@ jobs: - name: qwen3-30b-a3b-w8a8 os: linux-aarch64-a3-4 config_file_path: Qwen3-30B-A3B-W8A8.yaml + - name: qwen3-next-80b-a3b-instruct + os: linux-aarch64-a3-4 + config_file_path: Qwen3-Next-80B-A3B-Instruct.yaml - name: qwen3-next-80b-a3b-instruct-w8a8 os: linux-aarch64-a3-4 config_file_path: Qwen3-Next-80B-A3B-Instruct-W8A8.yaml diff --git a/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct-A2.yaml b/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct-A2.yaml index 56a653fe..0d9f1452 100644 --- a/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct-A2.yaml +++ b/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct-A2.yaml @@ -24,6 +24,8 @@ _server_cmd: &server_cmd - "0.8" - "--max-num-seqs" - "64" + - "--compilation_config" + - '{"cudagraph_mode": "FULL_DECODE_ONLY"}' _benchmarks: &benchmarks perf: @@ -44,8 +46,8 @@ _benchmarks: &benchmarks max_out_len: 32768 batch_size: 32 top_k: 20 - baseline: 95 - threshold: 5 + baseline: 96 + threshold: 3 # ========================================== # ACTUAL TEST CASES diff --git a/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct-W8A8.yaml b/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct-W8A8.yaml index 3deddc75..cd47eef4 100644 --- a/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct-W8A8.yaml +++ b/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct-W8A8.yaml @@ -41,5 +41,5 @@ test_cases: dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt max_out_len: 32768 batch_size: 32 - baseline: 95 - threshold: 5 + baseline: 96 + threshold: 3 diff --git a/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml b/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml index fc126efa..02b03bdd 100644 --- a/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml +++ b/tests/e2e/nightly/single_node/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml @@ -24,8 +24,8 @@ _server_cmd: &server_cmd - "0.8" - "--max-num-seqs" - "64" - - "--compilation-config" - - '{"cudagraph_capture_sizes": [64]}' + - "--compilation_config" + - '{"cudagraph_mode": "FULL_DECODE_ONLY"}' _benchmarks: &benchmarks perf: @@ -46,8 +46,8 @@ _benchmarks: &benchmarks max_out_len: 32768 batch_size: 64 top_k: 20 - baseline: 95 - threshold: 5 + baseline: 96 + threshold: 3 # ========================================== # ACTUAL TEST CASES