[TEST] Add initial aisbench support and Qwen3 32B acc/perf test (#3474)

### What this PR does / why we need it?
This PR adds the first aisbench case for nightly test, it lays a
foundation for following performance and accuracy tests in nightly test.

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?
By running the test

- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0

Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Co-authored-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
This commit is contained in:
jiangyunfan1
2025-10-20 09:33:17 +08:00
committed by GitHub
parent 58a37ce189
commit 9e59fc1510
3 changed files with 262 additions and 5 deletions

View File

@@ -18,8 +18,10 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
MODELS = [
"Qwen/Qwen3-32B",
@@ -35,11 +37,34 @@ api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 80,
"max_out_len": 1500,
"batch_size": 20,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, tp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
@@ -48,17 +73,18 @@ async def test_models(model: str, tp_size: int) -> None:
}
server_args = [
"--no-enable-prefix-caching", "--tensor-parallel-size",
str(tp_size), "--port", "20002", "--max-model-len", "36864",
"--max-num-batched-tokens", "36864", "--block-size", "128",
"--trust-remote-code", "--gpu-memory-utilization", "0.9",
"--additional-config", '{"enable_weight_nz_layout":true}'
str(tp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--gpu-memory-utilization", "0.9", "--additional-config",
'{"enable_weight_nz_layout":true}'
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=20002,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
@@ -69,3 +95,5 @@ async def test_models(model: str, tp_size: int) -> None:
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)