diff --git a/.github/workflows/vllm_ascend_test_nightly_a3.yaml b/.github/workflows/vllm_ascend_test_nightly_a3.yaml index 4abbdef4..d0dc99c2 100644 --- a/.github/workflows/vllm_ascend_test_nightly_a3.yaml +++ b/.github/workflows/vllm_ascend_test_nightly_a3.yaml @@ -134,6 +134,9 @@ jobs: - name: deepseek3_2-exp-w8a8 os: linux-aarch64-a3-16 tests: tests/e2e/nightly/models/test_deepseek_v3_2_exp_w8a8.py + - name: glm-4-5 + os: linux-aarch64-a3-16 + tests: tests/e2e/nightly/models/test_glm4_5.py uses: ./.github/workflows/_e2e_nightly_single_node.yaml with: vllm: v0.11.2 diff --git a/tests/e2e/nightly/models/test_glm4_5.py b/tests/e2e/nightly/models/test_glm4_5.py new file mode 100644 index 00000000..aeb71f68 --- /dev/null +++ b/tests/e2e/nightly/models/test_glm4_5.py @@ -0,0 +1,111 @@ +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# +from typing import Any + +import openai +import pytest +from vllm.utils import get_open_port + +from tests.e2e.conftest import RemoteOpenAIServer +from tools.aisbench import run_aisbench_cases + +MODELS = [ + "ZhipuAI/GLM-4.5", +] + +TENSOR_PARALLELS = [8] +DATA_PARALLELS = [2] + +prompts = [ + "San Francisco is a", +] + +api_keyword_args = { + "max_tokens": 10, +} + +aisbench_cases = [{ + "case_type": "accuracy", + "dataset_path": "vllm-ascend/gsm8k-lite", + "request_conf": "vllm_api_general_chat", + "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt", + "max_out_len": 4096, + "batch_size": 8, + "baseline": 95, + "threshold": 5 +}, { + "case_type": "performance", + "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", + "request_conf": "vllm_api_stream_chat", + "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf", + "num_prompts": 16, + "max_out_len": 1500, + "batch_size": 8, + "request_rate": 0, + "baseline": 1, + "threshold": 0.97 +}] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS) +@pytest.mark.parametrize("dp_size", DATA_PARALLELS) +async def test_models( + model: str, + tp_size: int, + dp_size: int, +) -> None: + port = get_open_port() + env_dict = {"HCCL_BUFFSIZE": "1024"} + server_args = [ + "--no-enable-prefix-caching", + "--enable-expert-parallel", + "--tensor-parallel-size", + str(tp_size), + "--data-parallel-size", + str(dp_size), + "--port", + str(port), + "--max-model-len", + "8192", + "--max-num-batched-tokens", + "8192", + "--block-size", + "16", + "--trust-remote-code", + "--gpu-memory-utilization", + "0.9", + ] + request_keyword_args: dict[str, Any] = { + **api_keyword_args, + } + with RemoteOpenAIServer(model, + server_args, + server_port=port, + env_dict=env_dict, + auto_port=False) as server: + client = server.get_async_client() + batch = await client.completions.create( + model=model, + prompt=prompts, + **request_keyword_args, + ) + choices: list[openai.types.CompletionChoice] = batch.choices + assert choices[0].text, "empty response" + # aisbench test + run_aisbench_cases(model, port, aisbench_cases) diff --git a/tests/e2e/nightly/multi_node/config/models/DeepSeek-V3_2-Exp-bf16.yaml b/tests/e2e/nightly/multi_node/config/models/DeepSeek-V3_2-Exp-bf16.yaml index 40ac6476..93e76ca5 100644 --- a/tests/e2e/nightly/multi_node/config/models/DeepSeek-V3_2-Exp-bf16.yaml +++ b/tests/e2e/nightly/multi_node/config/models/DeepSeek-V3_2-Exp-bf16.yaml @@ -13,7 +13,7 @@ env_common: deployment: - server_cmd: > - vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \ + vllm serve "Yanguan/DeepSeek-V3.2-Exp-bf16" --host 0.0.0.0 --port $SERVER_PORT --data-parallel-address $LOCAL_IP @@ -33,7 +33,7 @@ deployment: - server_cmd: > - vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \ + vllm serve "Yanguan/DeepSeek-V3.2-Exp-bf16" --headless --data-parallel-size 2 --data-parallel-size-local 1 diff --git a/tests/e2e/nightly/multi_node/scripts/run.sh b/tests/e2e/nightly/multi_node/scripts/run.sh index 48d1c39d..0c134441 100644 --- a/tests/e2e/nightly/multi_node/scripts/run.sh +++ b/tests/e2e/nightly/multi_node/scripts/run.sh @@ -108,8 +108,8 @@ install_extra_components() { fi pip install custom_ops-1.0-cp311-cp311-linux_aarch64.whl - export ASCEND_CUSTOM_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize:${ASCEND_CUSTOM_OPP_PATH} - export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize/op_api/lib/:${LD_LIBRARY_PATH} + export ASCEND_CUSTOM_OPP_PATH="/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize${ASCEND_CUSTOM_OPP_PATH:+:${ASCEND_CUSTOM_OPP_PATH}}" + export LD_LIBRARY_PATH="/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize/op_api/lib/${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}" source /usr/local/Ascend/ascend-toolkit/set_env.sh rm -f CANN-custom_ops-sfa-linux.aarch64.run \