### What this PR does / why we need it?
Add qwen3-8b nightly test
- vLLM version: v0.13.0
- vLLM main:
7157596103
---------
Signed-off-by: wxsIcey <1790571317@qq.com>
100 lines
2.8 KiB
Python
100 lines
2.8 KiB
Python
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
from typing import Any
|
|
|
|
import openai
|
|
import pytest
|
|
from vllm.utils.network_utils import get_open_port
|
|
|
|
from tests.e2e.conftest import RemoteOpenAIServer
|
|
from tools.vllm_bench import run_vllm_bench_case
|
|
|
|
MODELS = [
|
|
"Qwen/Qwen3-8B",
|
|
]
|
|
|
|
prompts = [
|
|
"San Francisco is a",
|
|
]
|
|
|
|
api_keyword_args = {
|
|
"max_tokens": 10,
|
|
}
|
|
|
|
vllm_bench_cases = {
|
|
"dataset-name": "random",
|
|
"num_prompts": 1000,
|
|
"request_rate": 20,
|
|
"random_input_len": 128,
|
|
"max_concurrency": 40,
|
|
"random_output_len": 100,
|
|
}
|
|
|
|
baseline_throughput = 1622.08 # baseline throughput for Qwen3-8B
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
@pytest.mark.asyncio
|
|
async def test_models(model: str) -> None:
|
|
port = get_open_port()
|
|
env_dict = {
|
|
"TASK_QUEUE_ENABLE": "1",
|
|
"HCCL_OP_EXPANSION_MODE": "AIV",
|
|
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1",
|
|
}
|
|
server_args = [
|
|
"--async-scheduling",
|
|
"--distributed-executor-backend",
|
|
"mp",
|
|
"--tensor-parallel-size",
|
|
"1",
|
|
"--port",
|
|
str(port),
|
|
"--max-model-len",
|
|
"5500",
|
|
"--max-num-batched-tokens",
|
|
"40960",
|
|
"--compilation-config",
|
|
'{"cudagraph_mode": "FULL_DECODE_ONLY"}',
|
|
"--additional-config",
|
|
'{"pa_shape_list":[48,64,72,80]}',
|
|
"--block-size",
|
|
"128",
|
|
"--trust-remote-code",
|
|
"--gpu-memory-utilization",
|
|
"0.9",
|
|
]
|
|
|
|
request_keyword_args: dict[str, Any] = {
|
|
**api_keyword_args,
|
|
}
|
|
with RemoteOpenAIServer(model,
|
|
server_args,
|
|
server_port=port,
|
|
env_dict=env_dict,
|
|
auto_port=False) as server:
|
|
client = server.get_async_client()
|
|
batch = await client.completions.create(
|
|
model=model,
|
|
prompt=prompts,
|
|
**request_keyword_args,
|
|
)
|
|
choices: list[openai.types.CompletionChoice] = batch.choices
|
|
assert choices[0].text, "empty response"
|
|
# vllm bench test
|
|
run_vllm_bench_case(model, port, vllm_bench_cases, baseline_throughput)
|