### What this PR does / why we need it?
Add e2e test for 310p:
trigger conditions:tag, labels(ready-for-test, e2e-310p-test), schedule
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-310p-ubuntu22.04-py3.10
runner: linux-aarch64-310p-1, linux-aarch64-310p-4
model: IntervitensInc/pangu-pro-moe-model, Qwen/Qwen3-0.6B-Base,
Qwen/Qwen2.5-7B-Instruct
- vLLM version: v0.10.0
- vLLM main:
b917da442b
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
63 lines
2.1 KiB
Python
63 lines
2.1 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
import pytest
|
|
import vllm # noqa: F401
|
|
|
|
import vllm_ascend # noqa: F401
|
|
from tests.e2e.conftest import VllmRunner
|
|
|
|
# Pangu local model path
|
|
MODELS = [
|
|
"IntervitensInc/pangu-pro-moe-model",
|
|
]
|
|
# set additional config for ascend scheduler and torchair graph
|
|
ADDITIONAL_CONFIG = [{
|
|
"additional_config": {
|
|
"torchair_graph_config": {
|
|
"enabled": True
|
|
},
|
|
"ascend_scheduler_config": {
|
|
"enabled": True,
|
|
}
|
|
}
|
|
}]
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
@pytest.mark.parametrize("dtype", ["float16"])
|
|
@pytest.mark.parametrize("max_tokens", [5])
|
|
@pytest.mark.parametrize("enfore_eager", [True, False])
|
|
@pytest.mark.parametrize("additional_config", ADDITIONAL_CONFIG)
|
|
def test_pangu_model(model: str, dtype: str, max_tokens: int,
|
|
enfore_eager: bool, additional_config: dict) -> None:
|
|
if enfore_eager:
|
|
additional_config = {}
|
|
example_prompts = [
|
|
"Hello, my name is",
|
|
"The future of AI is",
|
|
]
|
|
|
|
with VllmRunner(model,
|
|
tensor_parallel_size=4,
|
|
dtype=dtype,
|
|
max_model_len=1024,
|
|
enforce_eager=True,
|
|
enable_expert_parallel=True,
|
|
additional_config=additional_config,
|
|
distributed_executor_backend="mp") as vllm_model:
|
|
vllm_model.generate_greedy(example_prompts, max_tokens)
|