[Feature]: Support 310P device run qwen2.5/3 dense and qwen2.5vl models (#5776)

### What this PR does / why we need it?
Add basic 310p support. Only dense models work with eager mode now.

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

---------

Signed-off-by: Tflowers-0129 <2906339855@qq.com>
Signed-off-by: Shaoxu Cheng <2906339855@qq.com>
This commit is contained in:
Shaoxu Cheng
2026-01-17 11:49:18 +08:00
committed by GitHub
parent 7feb74590b
commit 1ffca8673f
17 changed files with 682 additions and 23 deletions

View File

@@ -36,7 +36,9 @@ def test_llm_models(dtype: str, max_tokens: int) -> None:
vllm_model.generate_greedy(example_prompts, max_tokens)
def test_multimodal_vl():
@pytest.mark.skip(reason="310P: multimodal test skipped, offline is ok")
@pytest.mark.parametrize("dtype", ["float16"])
def test_multimodal_vl(dtype: str):
image = ImageAsset("cherry_blossom").pil_image.convert("RGB")
img_questions = [
@@ -60,6 +62,7 @@ def test_multimodal_vl():
"max_pixels": 1280 * 28 * 28,
"fps": 1,
},
dtype=dtype,
max_model_len=8192,
enforce_eager=True,
limit_mm_per_prompt={"image": 1}) as vllm_model:

View File

@@ -1,3 +1,20 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
import pytest
from tests.e2e.conftest import VllmRunner
@@ -5,7 +22,6 @@ from tests.e2e.conftest import VllmRunner
@pytest.mark.parametrize("dtype", ["float16"])
@pytest.mark.parametrize("max_tokens", [5])
@pytest.skip("310p does not support parallel inference now. Fix me")
def test_models(dtype: str, max_tokens: int) -> None:
example_prompts = [
"Hello, my name is",