[TEST]Add initial multi modal cases for nightly test and deepseek-r1 tests (#3631)
### What this PR does / why we need it? This PR adds the initial multi modal model for nightly test, including 3 cases for Qwen2.5-vl-7b acc/perf test on A3, we need test them daily. It also inclues 8 cases for deepseek-r1-0528-w8a8 func, acc and perf tests ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? by running the test - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 --------- Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
This commit is contained in:
32
.github/workflows/vllm_ascend_test_nightly.yaml
vendored
32
.github/workflows/vllm_ascend_test_nightly.yaml
vendored
@@ -26,7 +26,7 @@ on:
|
||||
branches:
|
||||
- 'main'
|
||||
- '*-dev'
|
||||
types: [labeled]
|
||||
types: [labeled,opened,synchronize]
|
||||
|
||||
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
|
||||
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
|
||||
@@ -80,10 +80,7 @@ jobs:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'run-nightly')
|
||||
strategy:
|
||||
matrix:
|
||||
# should add A3 chip runner when available
|
||||
os: [ linux-aarch64-a3-16 ]
|
||||
# Note (yikun): If CI resource are limited we can split job into two chain jobs
|
||||
# only trigger e2e test after lint passed and the change is e2e related with pull request.
|
||||
uses: ./.github/workflows/_e2e_nightly.yaml
|
||||
with:
|
||||
vllm: v0.11.0
|
||||
@@ -94,15 +91,32 @@ jobs:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'run-nightly')
|
||||
strategy:
|
||||
matrix:
|
||||
# should add A3 chip runner when available
|
||||
os: [ linux-aarch64-a3-16 ]
|
||||
# Note (yikun): If CI resource are limited we can split job into two chain jobs
|
||||
# only trigger e2e test after lint passed and the change is e2e related with pull request.
|
||||
uses: ./.github/workflows/_e2e_nightly.yaml
|
||||
with:
|
||||
vllm: v0.11.0
|
||||
runner: ${{ matrix.os }}
|
||||
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
|
||||
tests: tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py
|
||||
|
||||
|
||||
qwen2-5-vl-7b:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'run-nightly')
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ linux-aarch64-a3-4 ]
|
||||
uses: ./.github/workflows/_e2e_nightly.yaml
|
||||
with:
|
||||
vllm: v0.11.0
|
||||
runner: ${{ matrix.os }}
|
||||
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
|
||||
tests: tests/e2e/nightly/models/test_qwen2_5_vl_7b.py
|
||||
deepseek-r1-0528-w8a8:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'run-nightly')
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ linux-aarch64-a3-16 ]
|
||||
uses: ./.github/workflows/_e2e_nightly.yaml
|
||||
with:
|
||||
vllm: v0.11.0
|
||||
runner: ${{ matrix.os }}
|
||||
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
|
||||
tests: tests/e2e/nightly/models/test_deepseek_r1_0528_w8a8.py
|
||||
|
||||
136
tests/e2e/nightly/models/test_deepseek_r1_0528_w8a8.py
Normal file
136
tests/e2e/nightly/models/test_deepseek_r1_0528_w8a8.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
from vllm.utils import get_open_port
|
||||
|
||||
from tests.e2e.conftest import RemoteOpenAIServer
|
||||
from tools.aisbench import run_aisbench_cases
|
||||
|
||||
MODELS = [
|
||||
"vllm-ascend/DeepSeek-R1-0528-W8A8",
|
||||
]
|
||||
|
||||
MODES = [
|
||||
"torchair",
|
||||
"single",
|
||||
"aclgraph",
|
||||
"no_chunkprefill",
|
||||
]
|
||||
|
||||
prompts = [
|
||||
"San Francisco is a",
|
||||
]
|
||||
|
||||
api_keyword_args = {
|
||||
"max_tokens": 10,
|
||||
}
|
||||
|
||||
aisbench_cases = [{
|
||||
"case_type": "accuracy",
|
||||
"dataset_path": "vllm-ascend/gsm8k-lite",
|
||||
"request_conf": "vllm_api_general_chat",
|
||||
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
|
||||
"max_out_len": 32768,
|
||||
"batch_size": 32,
|
||||
"baseline": 95,
|
||||
"threshold": 5
|
||||
}, {
|
||||
"case_type": "performance",
|
||||
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
||||
"request_conf": "vllm_api_stream_chat",
|
||||
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
|
||||
"num_prompts": 400,
|
||||
"max_out_len": 1500,
|
||||
"batch_size": 1000,
|
||||
"baseline": 1,
|
||||
"threshold": 0.97
|
||||
}]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("mode", MODES)
|
||||
async def test_models(model: str, mode: str) -> None:
|
||||
port = get_open_port()
|
||||
env_dict = {
|
||||
"OMP_NUM_THREADS": "10",
|
||||
"OMP_PROC_BIND": "false",
|
||||
"HCCL_BUFFSIZE": "1024",
|
||||
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True"
|
||||
}
|
||||
speculative_config = {
|
||||
"num_speculative_tokens": 1,
|
||||
"method": "deepseek_mtp"
|
||||
}
|
||||
additional_config = {
|
||||
"ascend_scheduler_config": {
|
||||
"enabled": False
|
||||
},
|
||||
"torchair_graph_config": {
|
||||
"enabled": True,
|
||||
"enable_multistream_moe": False,
|
||||
"enable_multistream_mla": True,
|
||||
"graph_batch_sizes": [16],
|
||||
"use_cached_graph": True
|
||||
},
|
||||
"chunked_prefill_for_mla": True,
|
||||
"enable_weight_nz_layout": True
|
||||
}
|
||||
server_args = [
|
||||
"--quantization", "ascend", "--data-parallel-size", "2",
|
||||
"--tensor-parallel-size", "8", "--enable-expert-parallel", "--port",
|
||||
str(port), "--seed", "1024", "--max-model-len", "36864",
|
||||
"--max-num-batched-tokens", "4096", "--max-num-seqs", "16",
|
||||
"--trust-remote-code", "--gpu-memory-utilization", "0.9",
|
||||
"--speculative-config",
|
||||
json.dumps(speculative_config)
|
||||
]
|
||||
if mode == "single":
|
||||
server_args.append("--enforce-eager")
|
||||
additional_config["torchair_graph_config"] = {"enabled": False}
|
||||
if mode == "aclgraph":
|
||||
additional_config["torchair_graph_config"] = {"enabled": False}
|
||||
if mode == "no_chunkprefill":
|
||||
additional_config["ascend_scheduler_config"] = {"enabled": True}
|
||||
i = server_args.index("--max-num-batched-tokens") + 1
|
||||
server_args[i] = "36864"
|
||||
server_args.extend(["--additional-config", json.dumps(additional_config)])
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
}
|
||||
with RemoteOpenAIServer(model,
|
||||
server_args,
|
||||
server_port=port,
|
||||
env_dict=env_dict,
|
||||
auto_port=False) as server:
|
||||
client = server.get_async_client()
|
||||
batch = await client.completions.create(
|
||||
model=model,
|
||||
prompt=prompts,
|
||||
**request_keyword_args,
|
||||
)
|
||||
choices: list[openai.types.CompletionChoice] = batch.choices
|
||||
assert choices[0].text, "empty response"
|
||||
print(choices)
|
||||
if mode in ["single", "no_chunkprefill"]:
|
||||
return
|
||||
# aisbench test
|
||||
run_aisbench_cases(model, port, aisbench_cases)
|
||||
@@ -51,6 +51,15 @@ VLLM_CI_RUNNER = os.getenv("VLLM_CI_RUNNER", "linux-aarch64-a2-4")
|
||||
performance_batch_size = batch_size_dict.get(VLLM_CI_RUNNER, 1)
|
||||
|
||||
aisbench_cases = [{
|
||||
"case_type": "accuracy",
|
||||
"dataset_path": "vllm-ascend/aime2024",
|
||||
"request_conf": "vllm_api_general_chat",
|
||||
"dataset_conf": "aime2024/aime2024_gen_0_shot_chat_prompt",
|
||||
"max_out_len": 32768,
|
||||
"batch_size": 32,
|
||||
"baseline": 83.33,
|
||||
"threshold": 17
|
||||
}, {
|
||||
"case_type": "performance",
|
||||
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
||||
"request_conf": "vllm_api_stream_chat",
|
||||
@@ -60,15 +69,6 @@ aisbench_cases = [{
|
||||
"batch_size": performance_batch_size,
|
||||
"baseline": 1,
|
||||
"threshold": 0.97
|
||||
}, {
|
||||
"case_type": "accuracy",
|
||||
"dataset_path": "vllm-ascend/aime2024",
|
||||
"request_conf": "vllm_api_general_chat",
|
||||
"dataset_conf": "aime2024/aime2024_gen_0_shot_chat_prompt",
|
||||
"max_out_len": 32768,
|
||||
"batch_size": 32,
|
||||
"baseline": 83.33,
|
||||
"threshold": 17
|
||||
}]
|
||||
|
||||
|
||||
|
||||
@@ -43,13 +43,12 @@ class AisbenchRunner:
|
||||
if self.task_type == "accuracy":
|
||||
aisbench_cmd = [
|
||||
'ais_bench', '--models', f'{self.request_conf}_custom',
|
||||
'--datasets', f'{dataset_conf}', '--debug'
|
||||
'--datasets', f'{dataset_conf}'
|
||||
]
|
||||
if self.task_type == "performance":
|
||||
aisbench_cmd = [
|
||||
'ais_bench', '--models', f'{self.request_conf}_custom',
|
||||
'--datasets', f'{dataset_conf}_custom', '--debug', '--mode',
|
||||
'perf'
|
||||
'--datasets', f'{dataset_conf}_custom', '--mode', 'perf'
|
||||
]
|
||||
if self.num_prompts:
|
||||
aisbench_cmd.extend(['--num-prompts', str(self.num_prompts)])
|
||||
@@ -64,9 +63,11 @@ class AisbenchRunner:
|
||||
port: int,
|
||||
aisbench_config: dict,
|
||||
verify=True):
|
||||
self.result_line = None
|
||||
self.dataset_path = snapshot_download(aisbench_config["dataset_path"],
|
||||
repo_type='dataset')
|
||||
self.model = model
|
||||
self.model_path = snapshot_download(model)
|
||||
self.port = port
|
||||
self.task_type = aisbench_config["case_type"]
|
||||
self.request_conf = aisbench_config["request_conf"]
|
||||
self.dataset_conf = aisbench_config.get("dataset_conf")
|
||||
@@ -74,10 +75,13 @@ class AisbenchRunner:
|
||||
self.max_out_len = aisbench_config["max_out_len"]
|
||||
self.batch_size = aisbench_config["batch_size"]
|
||||
self.request_rate = aisbench_config.get("request_rate", 0)
|
||||
self.model = model
|
||||
self.model_path = snapshot_download(model)
|
||||
self.port = port
|
||||
self.temperature = aisbench_config.get("temperature")
|
||||
self.top_k = aisbench_config.get("top_k")
|
||||
self.top_p = aisbench_config.get("top_p")
|
||||
self.seed = aisbench_config.get("seed")
|
||||
self.repetition_penalty = aisbench_config.get("repetition_penalty")
|
||||
self.exp_folder = None
|
||||
self.result_line = None
|
||||
self._init_dataset_conf()
|
||||
self._init_request_conf()
|
||||
self._run_aisbench_task()
|
||||
@@ -138,6 +142,19 @@ class AisbenchRunner:
|
||||
content = re.sub(
|
||||
r"temperature.*",
|
||||
"temperature = 0.6,\n ignore_eos = False,", content)
|
||||
if self.temperature:
|
||||
content = re.sub(r"temperature.*",
|
||||
f"temperature = {self.temperature}", content)
|
||||
if self.top_p:
|
||||
content = re.sub(r"#?top_p.*", f"top_p = {self.top_p}", content)
|
||||
if self.top_k:
|
||||
content = re.sub(r"#top_k.*", f"top_k = {self.top_k}", content)
|
||||
if self.seed:
|
||||
content = re.sub(r"#seed.*", f"seed = {self.seed}", content)
|
||||
if self.repetition_penalty:
|
||||
content = re.sub(
|
||||
r"#repetition_penalty.*",
|
||||
f"repetition_penalty = {self.repetition_penalty}", content)
|
||||
conf_path_new = os.path.join(REQUEST_CONF_DIR,
|
||||
f'{self.request_conf}_custom.py')
|
||||
with open(conf_path_new, 'w', encoding='utf-8') as f:
|
||||
|
||||
Reference in New Issue
Block a user