[Tests] Add qwen3-8b nightly test (#5597)
### What this PR does / why we need it?
Add qwen3-8b nightly test
- vLLM version: v0.13.0
- vLLM main:
7157596103
---------
Signed-off-by: wxsIcey <1790571317@qq.com>
This commit is contained in:
3
.github/workflows/nightly_test_a2.yaml
vendored
3
.github/workflows/nightly_test_a2.yaml
vendored
@@ -49,6 +49,9 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
test_config:
|
||||
- name: qwen3-8b
|
||||
os: linux-aarch64-a2-1
|
||||
tests: tests/e2e/nightly/single_node/models/test_qwen3_8b.py
|
||||
- name: qwen3-32b
|
||||
os: linux-aarch64-a2-4
|
||||
tests: tests/e2e/nightly/single_node/models/test_qwen3_32b.py
|
||||
|
||||
99
tests/e2e/nightly/single_node/models/test_qwen3_8b.py
Normal file
99
tests/e2e/nightly/single_node/models/test_qwen3_8b.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
from typing import Any
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
from vllm.utils.network_utils import get_open_port
|
||||
|
||||
from tests.e2e.conftest import RemoteOpenAIServer
|
||||
from tools.vllm_bench import run_vllm_bench_case
|
||||
|
||||
MODELS = [
|
||||
"Qwen/Qwen3-8B",
|
||||
]
|
||||
|
||||
prompts = [
|
||||
"San Francisco is a",
|
||||
]
|
||||
|
||||
api_keyword_args = {
|
||||
"max_tokens": 10,
|
||||
}
|
||||
|
||||
vllm_bench_cases = {
|
||||
"dataset-name": "random",
|
||||
"num_prompts": 1000,
|
||||
"request_rate": 20,
|
||||
"random_input_len": 128,
|
||||
"max_concurrency": 40,
|
||||
"random_output_len": 100,
|
||||
}
|
||||
|
||||
baseline_throughput = 1622.08 # baseline throughput for Qwen3-8B
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.asyncio
|
||||
async def test_models(model: str) -> None:
|
||||
port = get_open_port()
|
||||
env_dict = {
|
||||
"TASK_QUEUE_ENABLE": "1",
|
||||
"HCCL_OP_EXPANSION_MODE": "AIV",
|
||||
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1",
|
||||
}
|
||||
server_args = [
|
||||
"--async-scheduling",
|
||||
"--distributed-executor-backend",
|
||||
"mp",
|
||||
"--tensor-parallel-size",
|
||||
"1",
|
||||
"--port",
|
||||
str(port),
|
||||
"--max-model-len",
|
||||
"5500",
|
||||
"--max-num-batched-tokens",
|
||||
"40960",
|
||||
"--compilation-config",
|
||||
'{"cudagraph_mode": "FULL_DECODE_ONLY"}',
|
||||
"--additional-config",
|
||||
'{"pa_shape_list":[48,64,72,80]}',
|
||||
"--block-size",
|
||||
"128",
|
||||
"--trust-remote-code",
|
||||
"--gpu-memory-utilization",
|
||||
"0.9",
|
||||
]
|
||||
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
}
|
||||
with RemoteOpenAIServer(model,
|
||||
server_args,
|
||||
server_port=port,
|
||||
env_dict=env_dict,
|
||||
auto_port=False) as server:
|
||||
client = server.get_async_client()
|
||||
batch = await client.completions.create(
|
||||
model=model,
|
||||
prompt=prompts,
|
||||
**request_keyword_args,
|
||||
)
|
||||
choices: list[openai.types.CompletionChoice] = batch.choices
|
||||
assert choices[0].text, "empty response"
|
||||
# vllm bench test
|
||||
run_vllm_bench_case(model, port, vllm_bench_cases, baseline_throughput)
|
||||
@@ -47,6 +47,8 @@ class VllmbenchRunner:
|
||||
model_name: str,
|
||||
port: int,
|
||||
config: dict,
|
||||
baseline: float,
|
||||
threshold: float = 0.97,
|
||||
model_path: str = "",
|
||||
host_ip: str = "localhost"):
|
||||
self.model_name = model_name
|
||||
@@ -60,10 +62,12 @@ class VllmbenchRunner:
|
||||
curr_time = datetime.now().strftime('%Y%m%d%H%M%S')
|
||||
self.result_filename = f"result_vllm_bench_{curr_time}.json"
|
||||
self.config = config
|
||||
self.baseline = baseline
|
||||
self.threshold = threshold
|
||||
|
||||
self._run_vllm_bench_task()
|
||||
self._wait_for_task()
|
||||
self._get_result()
|
||||
self._performance_verify()
|
||||
|
||||
def _concat_config_args(self, vllm_bench_cmd):
|
||||
if "ignore_eos" in self.config:
|
||||
@@ -87,16 +91,30 @@ class VllmbenchRunner:
|
||||
self.proc.kill()
|
||||
|
||||
def _wait_for_task(self):
|
||||
result_msg = "========================="
|
||||
while True:
|
||||
line = self.proc.stdout.readline().strip()
|
||||
if line:
|
||||
print(line)
|
||||
if result_msg in line:
|
||||
return
|
||||
if "ERROR" in line:
|
||||
error_msg = f"Some errors happened to vllm_bench runtime, the first error is {line}"
|
||||
raise RuntimeError(error_msg) from None
|
||||
"""Wait for the vllm bench command to complete and check the execution result"""
|
||||
|
||||
stdout, stderr = self.proc.communicate()
|
||||
|
||||
if self.proc.returncode != 0:
|
||||
logging.error(
|
||||
f"vllm bench command failed, return code: {self.proc.returncode}"
|
||||
)
|
||||
logging.error(f"Standard output: {stdout}")
|
||||
logging.error(f"Standard error: {stderr}")
|
||||
raise RuntimeError(
|
||||
f"vllm bench command execution failed: {stderr}")
|
||||
|
||||
logging.info(
|
||||
f"vllm bench command completed, return code: {self.proc.returncode}"
|
||||
)
|
||||
if stdout:
|
||||
lines = stdout.split('\n')
|
||||
last_lines = lines[-100:] if len(lines) > 100 else lines
|
||||
logging.info(f"Last {len(last_lines)} lines of standard output:")
|
||||
for line in last_lines:
|
||||
logging.info(line)
|
||||
else:
|
||||
logging.info("Standard output is empty")
|
||||
|
||||
def _get_result(self):
|
||||
result_file = os.path.join(os.getcwd(), self.result_filename)
|
||||
@@ -104,16 +122,27 @@ class VllmbenchRunner:
|
||||
with open(result_file, 'r', encoding='utf-8') as f:
|
||||
self.result = json.load(f)
|
||||
|
||||
def _performance_verify(self):
|
||||
self._get_result()
|
||||
output_throughput = self.result["output_throughput"]
|
||||
assert float(
|
||||
output_throughput
|
||||
) >= self.baseline * self.threshold, f"Performance verification failed. The current Output Token Throughput is {output_throughput} token/s, which is not greater than or equal to {self.threshold} * baseline {self.baseline}."
|
||||
|
||||
|
||||
def run_vllm_bench_case(model_name,
|
||||
port,
|
||||
config,
|
||||
baseline,
|
||||
threshold=0.97,
|
||||
model_path="",
|
||||
host_ip="localhost"):
|
||||
try:
|
||||
with VllmbenchRunner(model_name,
|
||||
port,
|
||||
config,
|
||||
baseline,
|
||||
threshold,
|
||||
model_path=model_path,
|
||||
host_ip=host_ip) as vllm_bench:
|
||||
vllm_bench_result = vllm_bench.result
|
||||
|
||||
Reference in New Issue
Block a user