From c1165243791fed859f2b8bfd7c3a7aea4f2ee406 Mon Sep 17 00:00:00 2001 From: jiangyunfan1 Date: Sat, 8 Nov 2025 18:49:28 +0800 Subject: [PATCH] [TEST]Add qwen3-235b-w8a8 and qwen3-30b-w8a8 nightly test (#3973) ### What this PR does / why we need it? This PR adds some qwen3-235b-w8a8 cases qwen3-30b-w8a8 cases, we need test them daily ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? by running the test - vLLM version: v0.11.0 - vLLM main: https://github.com/vllm-project/vllm/commit/83f478bb19489b41e9d208b47b4bb5a95ac171ac Signed-off-by: jiangyunfan1 --- .../vllm_ascend_test_nightly_a3.yaml | 6 + .../nightly/models/test_qwen3_235b_w8a8.py | 107 ++++++++++++++++++ .../e2e/nightly/models/test_qwen3_30b_w8a8.py | 92 +++++++++++++++ 3 files changed, 205 insertions(+) create mode 100644 tests/e2e/nightly/models/test_qwen3_235b_w8a8.py create mode 100644 tests/e2e/nightly/models/test_qwen3_30b_w8a8.py diff --git a/.github/workflows/vllm_ascend_test_nightly_a3.yaml b/.github/workflows/vllm_ascend_test_nightly_a3.yaml index 74741b95..00e05659 100644 --- a/.github/workflows/vllm_ascend_test_nightly_a3.yaml +++ b/.github/workflows/vllm_ascend_test_nightly_a3.yaml @@ -78,6 +78,12 @@ jobs: - name: qwq-32b-a3 os: linux-aarch64-a3-4 tests: tests/e2e/nightly/models/test_qwq_32b.py + - name: qwen3-30b-w8a8 + os: linux-aarch64-a3-2 + tests: tests/e2e/nightly/models/test_qwen3_30b_w8a8.py + - name: qwen3-235b-w8a8 + os: linux-aarch64-a3-16 + tests: tests/e2e/nightly/models/test_qwen3_235b_w8a8.py uses: ./.github/workflows/_e2e_nightly_single_node.yaml with: vllm: v0.11.0 diff --git a/tests/e2e/nightly/models/test_qwen3_235b_w8a8.py b/tests/e2e/nightly/models/test_qwen3_235b_w8a8.py new file mode 100644 index 00000000..8220e4d5 --- /dev/null +++ b/tests/e2e/nightly/models/test_qwen3_235b_w8a8.py @@ -0,0 +1,107 @@ +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# +import json +from typing import Any + +import openai +import pytest +from vllm.utils import get_open_port + +from tests.e2e.conftest import RemoteOpenAIServer +from tools.aisbench import run_aisbench_cases + +MODELS = [ + "vllm-ascend/Qwen3-235B-A22B-W8A8", +] + +MODES = ["full_graph", "piecewise"] + +prompts = [ + "San Francisco is a", +] + +api_keyword_args = { + "max_tokens": 10, +} + +aisbench_cases = [{ + "case_type": "accuracy", + "dataset_path": "vllm-ascend/gsm8k-lite", + "request_conf": "vllm_api_general_chat", + "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt", + "max_out_len": 32768, + "batch_size": 32, + "top_k": 20, + "baseline": 95, + "threshold": 5 +}] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("mode", MODES) +async def test_models(model: str, mode: str) -> None: + port = get_open_port() + env_dict = { + "OMP_NUM_THREADS": "10", + "OMP_PROC_BIND": "false", + "HCCL_BUFFSIZE": "1024", + "PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True", + "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1" + } + additional_config: dict[str, Any] = { + "ascend_scheduler_config": { + "enabled": False + }, + } + compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"} + server_args = [ + "--quantization", "ascend", "--async-scheduling", + "--data-parallel-size", "4", "--tensor-parallel-size", "4", + "--enable-expert-parallel", "--port", + str(port), "--max-model-len", "40960", "--max-num-batched-tokens", + "8192", "--max-num-seqs", "12", "--trust-remote-code", + "--gpu-memory-utilization", "0.9" + ] + if mode == "piecewise": + compilation_config["cudagraph_mode"] = "PIECEWISE" + server_args.extend( + ["--compilation-config", + json.dumps(compilation_config)]) + server_args.extend(["--additional-config", json.dumps(additional_config)]) + request_keyword_args: dict[str, Any] = { + **api_keyword_args, + } + with RemoteOpenAIServer(model, + server_args, + server_port=port, + env_dict=env_dict, + auto_port=False) as server: + client = server.get_async_client() + batch = await client.completions.create( + model=model, + prompt=prompts, + **request_keyword_args, + ) + choices: list[openai.types.CompletionChoice] = batch.choices + assert choices[0].text, "empty response" + print(choices) + # aisbench test + run_aisbench_cases(model, + port, + aisbench_cases, + server_args=server_args) diff --git a/tests/e2e/nightly/models/test_qwen3_30b_w8a8.py b/tests/e2e/nightly/models/test_qwen3_30b_w8a8.py new file mode 100644 index 00000000..307a1575 --- /dev/null +++ b/tests/e2e/nightly/models/test_qwen3_30b_w8a8.py @@ -0,0 +1,92 @@ +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# +from typing import Any + +import openai +import pytest +from vllm.utils import get_open_port + +from tests.e2e.conftest import RemoteOpenAIServer +from tools.aisbench import run_aisbench_cases + +MODELS = [ + "vllm-ascend/Qwen3-30B-A3B-W8A8", +] + +TENSOR_PARALLELS = [1] + +prompts = [ + "San Francisco is a", +] + +api_keyword_args = { + "max_tokens": 10, +} + +aisbench_cases = [{ + "case_type": "performance", + "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", + "request_conf": "vllm_api_stream_chat", + "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf", + "num_prompts": 180, + "max_out_len": 1500, + "batch_size": 45, + "request_rate": 0, + "baseline": 1, + "threshold": 0.97 +}] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS) +async def test_models(model: str, tp_size: int) -> None: + port = get_open_port() + env_dict = { + "OMP_PROC_BIND": "false", + "OMP_NUM_THREADS": "10", + "HCCL_BUFFSIZE": "1024", + "HCCL_OP_EXPANSION_MODE": "AIV", + "PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True" + } + server_args = [ + "--quantization", "ascend", "--async-scheduling", + "--no-enable-prefix-caching", "--tensor-parallel-size", + str(tp_size), "--port", + str(port), "--max-model-len", "5600", "--max-num-batched-tokens", + "16384", "--max-num-seqs", "100", "--trust-remote-code", + "--gpu-memory-utilization", "0.9", "--compilation-config", + '{"cudagraph_mode": "FULL_DECODE_ONLY"}' + ] + request_keyword_args: dict[str, Any] = { + **api_keyword_args, + } + with RemoteOpenAIServer(model, + server_args, + server_port=port, + env_dict=env_dict, + auto_port=False) as server: + client = server.get_async_client() + batch = await client.completions.create( + model=model, + prompt=prompts, + **request_keyword_args, + ) + choices: list[openai.types.CompletionChoice] = batch.choices + assert choices[0].text, "empty response" + # aisbench test + run_aisbench_cases(model, port, aisbench_cases)