[Test] Add GLM-4.5 nightly test (#4225)
### What this PR does / why we need it? Add GLM-4.5 nightly test - vLLM version: v0.11.2 Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
111
tests/e2e/nightly/models/test_glm4_5.py
Normal file
111
tests/e2e/nightly/models/test_glm4_5.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
from typing import Any
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
from vllm.utils import get_open_port
|
||||
|
||||
from tests.e2e.conftest import RemoteOpenAIServer
|
||||
from tools.aisbench import run_aisbench_cases
|
||||
|
||||
MODELS = [
|
||||
"ZhipuAI/GLM-4.5",
|
||||
]
|
||||
|
||||
TENSOR_PARALLELS = [8]
|
||||
DATA_PARALLELS = [2]
|
||||
|
||||
prompts = [
|
||||
"San Francisco is a",
|
||||
]
|
||||
|
||||
api_keyword_args = {
|
||||
"max_tokens": 10,
|
||||
}
|
||||
|
||||
aisbench_cases = [{
|
||||
"case_type": "accuracy",
|
||||
"dataset_path": "vllm-ascend/gsm8k-lite",
|
||||
"request_conf": "vllm_api_general_chat",
|
||||
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
|
||||
"max_out_len": 4096,
|
||||
"batch_size": 8,
|
||||
"baseline": 95,
|
||||
"threshold": 5
|
||||
}, {
|
||||
"case_type": "performance",
|
||||
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
||||
"request_conf": "vllm_api_stream_chat",
|
||||
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
|
||||
"num_prompts": 16,
|
||||
"max_out_len": 1500,
|
||||
"batch_size": 8,
|
||||
"request_rate": 0,
|
||||
"baseline": 1,
|
||||
"threshold": 0.97
|
||||
}]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
|
||||
@pytest.mark.parametrize("dp_size", DATA_PARALLELS)
|
||||
async def test_models(
|
||||
model: str,
|
||||
tp_size: int,
|
||||
dp_size: int,
|
||||
) -> None:
|
||||
port = get_open_port()
|
||||
env_dict = {"HCCL_BUFFSIZE": "1024"}
|
||||
server_args = [
|
||||
"--no-enable-prefix-caching",
|
||||
"--enable-expert-parallel",
|
||||
"--tensor-parallel-size",
|
||||
str(tp_size),
|
||||
"--data-parallel-size",
|
||||
str(dp_size),
|
||||
"--port",
|
||||
str(port),
|
||||
"--max-model-len",
|
||||
"8192",
|
||||
"--max-num-batched-tokens",
|
||||
"8192",
|
||||
"--block-size",
|
||||
"16",
|
||||
"--trust-remote-code",
|
||||
"--gpu-memory-utilization",
|
||||
"0.9",
|
||||
]
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
}
|
||||
with RemoteOpenAIServer(model,
|
||||
server_args,
|
||||
server_port=port,
|
||||
env_dict=env_dict,
|
||||
auto_port=False) as server:
|
||||
client = server.get_async_client()
|
||||
batch = await client.completions.create(
|
||||
model=model,
|
||||
prompt=prompts,
|
||||
**request_keyword_args,
|
||||
)
|
||||
choices: list[openai.types.CompletionChoice] = batch.choices
|
||||
assert choices[0].text, "empty response"
|
||||
# aisbench test
|
||||
run_aisbench_cases(model, port, aisbench_cases)
|
||||
@@ -13,7 +13,7 @@ env_common:
|
||||
deployment:
|
||||
-
|
||||
server_cmd: >
|
||||
vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \
|
||||
vllm serve "Yanguan/DeepSeek-V3.2-Exp-bf16"
|
||||
--host 0.0.0.0
|
||||
--port $SERVER_PORT
|
||||
--data-parallel-address $LOCAL_IP
|
||||
@@ -33,7 +33,7 @@ deployment:
|
||||
|
||||
-
|
||||
server_cmd: >
|
||||
vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \
|
||||
vllm serve "Yanguan/DeepSeek-V3.2-Exp-bf16"
|
||||
--headless
|
||||
--data-parallel-size 2
|
||||
--data-parallel-size-local 1
|
||||
|
||||
@@ -108,8 +108,8 @@ install_extra_components() {
|
||||
fi
|
||||
pip install custom_ops-1.0-cp311-cp311-linux_aarch64.whl
|
||||
|
||||
export ASCEND_CUSTOM_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize:${ASCEND_CUSTOM_OPP_PATH}
|
||||
export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize/op_api/lib/:${LD_LIBRARY_PATH}
|
||||
export ASCEND_CUSTOM_OPP_PATH="/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize${ASCEND_CUSTOM_OPP_PATH:+:${ASCEND_CUSTOM_OPP_PATH}}"
|
||||
export LD_LIBRARY_PATH="/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize/op_api/lib/${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}"
|
||||
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
||||
|
||||
rm -f CANN-custom_ops-sfa-linux.aarch64.run \
|
||||
|
||||
Reference in New Issue
Block a user