[TEST]Add qwen3-235b-w8a8 and qwen3-30b-w8a8 nightly test (#3973)
### What this PR does / why we need it?
This PR adds some qwen3-235b-w8a8 cases qwen3-30b-w8a8 cases, we need
test them daily
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
by running the test
- vLLM version: v0.11.0
- vLLM main:
83f478bb19
Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
This commit is contained in:
@@ -78,6 +78,12 @@ jobs:
|
|||||||
- name: qwq-32b-a3
|
- name: qwq-32b-a3
|
||||||
os: linux-aarch64-a3-4
|
os: linux-aarch64-a3-4
|
||||||
tests: tests/e2e/nightly/models/test_qwq_32b.py
|
tests: tests/e2e/nightly/models/test_qwq_32b.py
|
||||||
|
- name: qwen3-30b-w8a8
|
||||||
|
os: linux-aarch64-a3-2
|
||||||
|
tests: tests/e2e/nightly/models/test_qwen3_30b_w8a8.py
|
||||||
|
- name: qwen3-235b-w8a8
|
||||||
|
os: linux-aarch64-a3-16
|
||||||
|
tests: tests/e2e/nightly/models/test_qwen3_235b_w8a8.py
|
||||||
uses: ./.github/workflows/_e2e_nightly_single_node.yaml
|
uses: ./.github/workflows/_e2e_nightly_single_node.yaml
|
||||||
with:
|
with:
|
||||||
vllm: v0.11.0
|
vllm: v0.11.0
|
||||||
|
|||||||
107
tests/e2e/nightly/models/test_qwen3_235b_w8a8.py
Normal file
107
tests/e2e/nightly/models/test_qwen3_235b_w8a8.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
# Copyright 2023 The vLLM team.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# This file is a part of the vllm-ascend project.
|
||||||
|
#
|
||||||
|
import json
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import openai
|
||||||
|
import pytest
|
||||||
|
from vllm.utils import get_open_port
|
||||||
|
|
||||||
|
from tests.e2e.conftest import RemoteOpenAIServer
|
||||||
|
from tools.aisbench import run_aisbench_cases
|
||||||
|
|
||||||
|
MODELS = [
|
||||||
|
"vllm-ascend/Qwen3-235B-A22B-W8A8",
|
||||||
|
]
|
||||||
|
|
||||||
|
MODES = ["full_graph", "piecewise"]
|
||||||
|
|
||||||
|
prompts = [
|
||||||
|
"San Francisco is a",
|
||||||
|
]
|
||||||
|
|
||||||
|
api_keyword_args = {
|
||||||
|
"max_tokens": 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
aisbench_cases = [{
|
||||||
|
"case_type": "accuracy",
|
||||||
|
"dataset_path": "vllm-ascend/gsm8k-lite",
|
||||||
|
"request_conf": "vllm_api_general_chat",
|
||||||
|
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
|
||||||
|
"max_out_len": 32768,
|
||||||
|
"batch_size": 32,
|
||||||
|
"top_k": 20,
|
||||||
|
"baseline": 95,
|
||||||
|
"threshold": 5
|
||||||
|
}]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
|
@pytest.mark.parametrize("mode", MODES)
|
||||||
|
async def test_models(model: str, mode: str) -> None:
|
||||||
|
port = get_open_port()
|
||||||
|
env_dict = {
|
||||||
|
"OMP_NUM_THREADS": "10",
|
||||||
|
"OMP_PROC_BIND": "false",
|
||||||
|
"HCCL_BUFFSIZE": "1024",
|
||||||
|
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
|
||||||
|
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
|
||||||
|
}
|
||||||
|
additional_config: dict[str, Any] = {
|
||||||
|
"ascend_scheduler_config": {
|
||||||
|
"enabled": False
|
||||||
|
},
|
||||||
|
}
|
||||||
|
compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"}
|
||||||
|
server_args = [
|
||||||
|
"--quantization", "ascend", "--async-scheduling",
|
||||||
|
"--data-parallel-size", "4", "--tensor-parallel-size", "4",
|
||||||
|
"--enable-expert-parallel", "--port",
|
||||||
|
str(port), "--max-model-len", "40960", "--max-num-batched-tokens",
|
||||||
|
"8192", "--max-num-seqs", "12", "--trust-remote-code",
|
||||||
|
"--gpu-memory-utilization", "0.9"
|
||||||
|
]
|
||||||
|
if mode == "piecewise":
|
||||||
|
compilation_config["cudagraph_mode"] = "PIECEWISE"
|
||||||
|
server_args.extend(
|
||||||
|
["--compilation-config",
|
||||||
|
json.dumps(compilation_config)])
|
||||||
|
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
||||||
|
request_keyword_args: dict[str, Any] = {
|
||||||
|
**api_keyword_args,
|
||||||
|
}
|
||||||
|
with RemoteOpenAIServer(model,
|
||||||
|
server_args,
|
||||||
|
server_port=port,
|
||||||
|
env_dict=env_dict,
|
||||||
|
auto_port=False) as server:
|
||||||
|
client = server.get_async_client()
|
||||||
|
batch = await client.completions.create(
|
||||||
|
model=model,
|
||||||
|
prompt=prompts,
|
||||||
|
**request_keyword_args,
|
||||||
|
)
|
||||||
|
choices: list[openai.types.CompletionChoice] = batch.choices
|
||||||
|
assert choices[0].text, "empty response"
|
||||||
|
print(choices)
|
||||||
|
# aisbench test
|
||||||
|
run_aisbench_cases(model,
|
||||||
|
port,
|
||||||
|
aisbench_cases,
|
||||||
|
server_args=server_args)
|
||||||
92
tests/e2e/nightly/models/test_qwen3_30b_w8a8.py
Normal file
92
tests/e2e/nightly/models/test_qwen3_30b_w8a8.py
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
# Copyright 2023 The vLLM team.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# This file is a part of the vllm-ascend project.
|
||||||
|
#
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import openai
|
||||||
|
import pytest
|
||||||
|
from vllm.utils import get_open_port
|
||||||
|
|
||||||
|
from tests.e2e.conftest import RemoteOpenAIServer
|
||||||
|
from tools.aisbench import run_aisbench_cases
|
||||||
|
|
||||||
|
MODELS = [
|
||||||
|
"vllm-ascend/Qwen3-30B-A3B-W8A8",
|
||||||
|
]
|
||||||
|
|
||||||
|
TENSOR_PARALLELS = [1]
|
||||||
|
|
||||||
|
prompts = [
|
||||||
|
"San Francisco is a",
|
||||||
|
]
|
||||||
|
|
||||||
|
api_keyword_args = {
|
||||||
|
"max_tokens": 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
aisbench_cases = [{
|
||||||
|
"case_type": "performance",
|
||||||
|
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
||||||
|
"request_conf": "vllm_api_stream_chat",
|
||||||
|
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
|
||||||
|
"num_prompts": 180,
|
||||||
|
"max_out_len": 1500,
|
||||||
|
"batch_size": 45,
|
||||||
|
"request_rate": 0,
|
||||||
|
"baseline": 1,
|
||||||
|
"threshold": 0.97
|
||||||
|
}]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
|
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
|
||||||
|
async def test_models(model: str, tp_size: int) -> None:
|
||||||
|
port = get_open_port()
|
||||||
|
env_dict = {
|
||||||
|
"OMP_PROC_BIND": "false",
|
||||||
|
"OMP_NUM_THREADS": "10",
|
||||||
|
"HCCL_BUFFSIZE": "1024",
|
||||||
|
"HCCL_OP_EXPANSION_MODE": "AIV",
|
||||||
|
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True"
|
||||||
|
}
|
||||||
|
server_args = [
|
||||||
|
"--quantization", "ascend", "--async-scheduling",
|
||||||
|
"--no-enable-prefix-caching", "--tensor-parallel-size",
|
||||||
|
str(tp_size), "--port",
|
||||||
|
str(port), "--max-model-len", "5600", "--max-num-batched-tokens",
|
||||||
|
"16384", "--max-num-seqs", "100", "--trust-remote-code",
|
||||||
|
"--gpu-memory-utilization", "0.9", "--compilation-config",
|
||||||
|
'{"cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||||
|
]
|
||||||
|
request_keyword_args: dict[str, Any] = {
|
||||||
|
**api_keyword_args,
|
||||||
|
}
|
||||||
|
with RemoteOpenAIServer(model,
|
||||||
|
server_args,
|
||||||
|
server_port=port,
|
||||||
|
env_dict=env_dict,
|
||||||
|
auto_port=False) as server:
|
||||||
|
client = server.get_async_client()
|
||||||
|
batch = await client.completions.create(
|
||||||
|
model=model,
|
||||||
|
prompt=prompts,
|
||||||
|
**request_keyword_args,
|
||||||
|
)
|
||||||
|
choices: list[openai.types.CompletionChoice] = batch.choices
|
||||||
|
assert choices[0].text, "empty response"
|
||||||
|
# aisbench test
|
||||||
|
run_aisbench_cases(model, port, aisbench_cases)
|
||||||
Reference in New Issue
Block a user