[TEST] Add Qwen3-32b-w8a8 acc/perf A2/A3 test (#3541)

### What this PR does / why we need it?
This PR Qwen3-32b-w8a8 acc/perf 8 cases on A2 and A3, we need test them
daily.

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?
by running the test


- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0

---------

Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
Signed-off-by: wangli <wangli858794774@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: root <root@hostname-2pbfv.foreman.pxe>
Co-authored-by: wangli <wangli858794774@gmail.com>
Co-authored-by: Yikun Jiang <yikunkero@gmail.com>
This commit is contained in:
jiangyunfan1
2025-10-21 17:34:48 +08:00
committed by GitHub
parent ec1d2b5c04
commit 80b8df881f
6 changed files with 307 additions and 3 deletions

View File

@@ -109,6 +109,7 @@ jobs:
env:
VLLM_WORKER_MULTIPROC_METHOD: spawn
VLLM_USE_MODELSCOPE: True
VLLM_CI_RUNNER: ${{ inputs.runner }}
run: |
# TODO: enable more tests
pytest -sv ${{ inputs.tests }}

View File

@@ -41,7 +41,7 @@ defaults:
# and ignore the lint / 1 card / 4 cards test type
concurrency:
group: ascend-nightly-${{ github.ref }}
cancel-in-progress: true
#cancel-in-progress: true
jobs:
qwen3-32b:
@@ -56,3 +56,22 @@ jobs:
vllm: v0.11.0
runner: ${{ matrix.os }}
tests: tests/e2e/nightly/models/test_qwen3_32b.py
qwen3-32b-in8-a3:
strategy:
matrix:
os: [linux-aarch64-a3-4]
uses: ./.github/workflows/_e2e_nightly.yaml
with:
vllm: v0.11.0
runner: ${{ matrix.os }}
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
tests: tests/e2e/nightly/models/test_qwen3_32b_int8.py
qwen3-32b-in8-a2:
strategy:
matrix:
os: [linux-aarch64-a2-4]
uses: ./.github/workflows/_e2e_nightly.yaml
with:
vllm: v0.11.0
runner: ${{ matrix.os }}
tests: tests/e2e/nightly/models/test_qwen3_32b_int8.py

View File

@@ -0,0 +1,110 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
from tools.send_mm_request import send_image_request
MODELS = [
"Qwen/Qwen2.5-VL-7B-Instruct",
]
TENSOR_PARALLELS = [4]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/textvqa-lite",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "textvqa/textvqa_gen_base64",
"max_out_len": 2048,
"batch_size": 128,
"baseline": 81,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/textvqa-perf-1080p",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "textvqa/textvqa_gen_base64",
"num_prompts": 512,
"max_out_len": 256,
"batch_size": 128,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, tp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"VLLM_ASCEND_ENABLE_NZ": "0",
"HCCL_OP_EXPANSION_MODE": "AIV"
}
server_args = [
"--no-enable-prefix-caching",
"--disable-mm-preprocessor-cache",
"--tensor-parallel-size",
str(tp_size),
"--port",
str(port),
"--max-model-len",
"30000",
"--max-num-batched-tokens",
"40000",
"--max-num-seqs",
"400",
"--trust-remote-code",
"--gpu-memory-utilization",
"0.8",
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
print(choices)
send_image_request(model, server)
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)

View File

@@ -0,0 +1,118 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import os
from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
MODELS = [
"vllm-ascend/Qwen3-32B-W8A8",
]
MODES = [
"aclgraph",
"single",
]
TENSOR_PARALLELS = [4]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
batch_size_dict = {
"linux-aarch64-a2-4": 44,
"linux-aarch64-a3-4": 46,
}
VLLM_CI_RUNNER = os.getenv("VLLM_CI_RUNNER", "linux-aarch64-a2-4")
performance_batch_size = batch_size_dict.get(VLLM_CI_RUNNER, 1)
aisbench_cases = [{
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 4 * performance_batch_size,
"max_out_len": 1500,
"batch_size": performance_batch_size,
"baseline": 1,
"threshold": 0.97
}, {
"case_type": "accuracy",
"dataset_path": "vllm-ascend/aime2024",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "aime2024/aime2024_gen_0_shot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 83.33,
"threshold": 17
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("mode", MODES)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, mode: str, tp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500"
}
server_args = [
"--quantization", "ascend", "--no-enable-prefix-caching",
"--tensor-parallel-size",
str(tp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--gpu-memory-utilization", "0.9", "--additional-config",
'{"enable_weight_nz_layout":true}'
]
if mode == "single":
server_args.append("--enforce-eager")
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
print(choices)
if mode == "single":
return
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)

View File

@@ -101,6 +101,9 @@ class AisbenchRunner:
if self.task_type == "performance":
conf_path = os.path.join(DATASET_CONF_DIR,
f'{self.dataset_conf}.py')
if self.dataset_conf.startswith("textvqa"):
self.dataset_path = os.path.join(self.dataset_path,
"textvqa_val.jsonl")
with open(conf_path, 'r', encoding='utf-8') as f:
content = f.read()
content = re.sub(r'path=.*', f'path="{self.dataset_path}",',
@@ -180,9 +183,13 @@ class AisbenchRunner:
def _get_result_performance(self):
result_dir = re.search(r'Performance Result files locate in (.*)',
self.result_line).group(1)[:-1]
result_csv_file = os.path.join(result_dir, "gsm8kdataset.csv")
result_json_file = os.path.join(result_dir, "gsm8kdataset.json")
dataset_type = self.dataset_conf.split('/')[0]
result_csv_file = os.path.join(result_dir,
f"{dataset_type}dataset.csv")
result_json_file = os.path.join(result_dir,
f"{dataset_type}dataset.json")
self.result_csv = pd.read_csv(result_csv_file)
print("Getting performance results from file: ", result_json_file)
with open(result_json_file, 'r', encoding='utf-8') as f:
self.result_json = json.load(f)

49
tools/send_mm_request.py Normal file
View File

@@ -0,0 +1,49 @@
import base64
import os
import requests
from modelscope import snapshot_download # type: ignore
mm_dir = snapshot_download("vllm-ascend/mm_request", repo_type='dataset')
image_path = os.path.join(mm_dir, "test_mm2.jpg")
with open(image_path, 'rb') as image_file:
image_data = base64.b64encode(image_file.read()).decode('utf-8')
data = {
"messages": [{
"role":
"user",
"content": [{
"type": "text",
"text": "What is the content of this image?"
}, {
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image_data}"
}
}]
}],
"eos_token_id": [1, 106],
"pad_token_id":
0,
"top_k":
64,
"top_p":
0.95,
"max_tokens":
8192,
"stream":
False
}
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
def send_image_request(model, server):
data["model"] = model
url = server.url_for("v1", "chat", "completions")
response = requests.post(url, headers=headers, json=data)
print("Status Code:", response.status_code)
response_json = response.json()
print("Response:", response_json)
assert response_json["choices"][0]["message"]["content"], "empty response"