[CI] Add DeepSeek R1 W8A8 HMB nightly ci (#5874)

### What this PR does / why we need it?

Add DeepSeek R1 W8A8 HMB nightly ci

- vLLM version: v0.13.0
- vLLM main:
bde38c11df

Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
zhangxinyuehfad
2026-01-15 20:48:20 +08:00
committed by GitHub
parent 44d3b4d61a
commit 372f979aa5
2 changed files with 126 additions and 0 deletions

View File

@@ -154,6 +154,9 @@ jobs:
# - name: deepseek3_2-exp-w8a8
# os: linux-aarch64-a3-16
# tests: tests/e2e/nightly/single_node/models/test_deepseek_v3_2_exp_w8a8.py
- name: deepseek-r1-w8a8-hmb
os: linux-aarch64-a3-16
tests: tests/e2e/nightly/single_node/models/test_deepseek_r1_w8a8_hmb.py
uses: ./.github/workflows/_e2e_nightly_single_node.yaml
with:
vllm: v0.13.0

View File

@@ -0,0 +1,123 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import json
from typing import Any
import openai
import pytest
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
MODELS = [
"vllm-ascend/DeepSeek-R1-W8A8",
]
MODES = [
"single",
]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 6000,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 32,
"max_out_len": 1500,
"batch_size": 32,
"baseline": 1,
"threshold": 0.97
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("mode", MODES)
async def test_models(model: str, mode: str) -> None:
port = get_open_port()
env_dict = {
"HCCL_BUFFSIZE": "1024",
}
additional_config = {
"ascend_scheduler_config": {
"enabled": False
},
"torchair_graph_config": {
"enabled": False,
"enable_multistream_shared_expert": False
}
}
server_args = [
"--quantization", "ascend", "--port",
str(port), "--data-parallel-size", "8", "--data-parallel-size-local",
"8", "--data-parallel-rpc-port", "13389", "--tensor-parallel-size",
"2", "--enable-expert-parallel", "--seed", "1024", "--max-num-seqs",
"32", "--max-model-len", "6000", "--max-num-batched-tokens", "6000",
"--trust-remote-code", "--gpu-memory-utilization", "0.92",
"--no-enable-prefix-caching", "--reasoning-parser", "deepseek_r1"
]
if mode == "single":
server_args.append("--enforce-eager")
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
print(choices)
# aisbench test
if mode in ["single"]:
return
run_aisbench_cases(model,
port,
aisbench_cases,
server_args=server_args)