Enable nightly test and add qwen3 32b test case (#3370)

### What this PR does / why we need it?
This PR adds a nightly test case for qwen3_32b bf16
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
by running the case

- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0

---------

Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
Signed-off-by: wangli <wangli858794774@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Co-authored-by: wangli <wangli858794774@gmail.com>
Co-authored-by: Yikun Jiang <yikunkero@gmail.com>
This commit is contained in:
jiangyunfan1
2025-10-12 15:46:28 +08:00
committed by GitHub
parent 0d59a3c317
commit d05d29ff0e
5 changed files with 238 additions and 4 deletions

View File

@@ -110,10 +110,10 @@ class RemoteOpenAIServer:
def __init__(self,
model: str,
server_host: str,
server_port: int,
vllm_serve_args: list[str],
*,
server_host: str = "0.0.0.0",
server_port: int = 8080,
env_dict: Optional[dict[str, str]] = None,
seed: Optional[int] = 0,
auto_port: bool = True,

View File

@@ -30,9 +30,9 @@ def test_multi_dp(config: MultiNodeConfig) -> None:
with RemoteOpenAIServer(
model_name,
config.server_host,
config.server_port,
server_args,
server_host=config.server_host,
server_port=config.server_port,
env_dict=env_dict,
auto_port=False,
seed=1024,

View File

@@ -0,0 +1,71 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import openai
import pytest
from tests.e2e.conftest import RemoteOpenAIServer
MODELS = [
"Qwen/Qwen3-32B",
]
TENSOR_PARALLELS = [4]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, tp_size: int) -> None:
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500"
}
server_args = [
"--no-enable-prefix-caching", "--tensor-parallel-size",
str(tp_size), "--port", "20002", "--max-model-len", "36864",
"--max-num-batched-tokens", "36864", "--block-size", "128",
"--trust-remote-code", "--gpu-memory-utilization", "0.9",
"--additional-config", '{"enable_weight_nz_layout":true}'
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=20002,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"