[CI] Enable linux-aarch64-a2 (64GB) and tp2 * 2 max-parallel to speed up CI (#2065)

### What this PR does / why we need it?
Currently our workflow run time takes about 3 hours in total, which
seriously affects the developer experience, so it is urgent to have a
optimization, after this pr, It is expected that the running time of the
full CI can be shortened to 1h40min.

- Enable linux-aarch64-a2 (64GB) to replace linux-arm64-npu (32GB)
- Change TP4 ---> TP2 * 2 max-parallel
- Move DeepSeek-V2-Lite-W8A8 to single card test

### Does this PR introduce _any_ user-facing change?
No


- vLLM version: v0.10.0
- vLLM main:
a2480251ec

---------

Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
Li Wang
2025-07-29 18:59:05 +08:00
committed by GitHub
parent ca8007f584
commit f60bb474f9
14 changed files with 75 additions and 75 deletions

View File

@@ -91,9 +91,9 @@ MORE_ARGS = {
"Qwen/Qwen2.5-0.5B-Instruct":
None,
"Qwen/Qwen3-30B-A3B":
"tensor_parallel_size=4,enable_expert_parallel=True,enforce_eager=True",
"tensor_parallel_size=2,enable_expert_parallel=True,enforce_eager=True",
"deepseek-ai/DeepSeek-V2-Lite":
"tensor_parallel_size=4,trust_remote_code=True,enforce_eager=True"
"tensor_parallel_size=2,trust_remote_code=True,enforce_eager=True"
}
multiprocessing.set_start_method("spawn", force=True)

View File

@@ -46,7 +46,7 @@ def test_generate_with_allgather():
sampling_params = SamplingParams(max_tokens=100, temperature=0.0)
with VllmRunner(snapshot_download("vllm-ascend/DeepSeek-V3-Pruning"),
tensor_parallel_size=4,
tensor_parallel_size=2,
enforce_eager=True,
max_model_len=1024,
dtype="auto",
@@ -74,7 +74,7 @@ def test_generate_with_alltoall():
sampling_params = SamplingParams(max_tokens=100, temperature=0.0)
with VllmRunner(snapshot_download("vllm-ascend/DeepSeek-V3-Pruning"),
tensor_parallel_size=4,
tensor_parallel_size=2,
enforce_eager=True,
max_model_len=1024,
dtype="auto",

View File

@@ -42,7 +42,7 @@ def test_models_distributed_QwQ():
with VllmRunner(
"Qwen/QwQ-32B",
dtype=dtype,
tensor_parallel_size=4,
tensor_parallel_size=2,
distributed_executor_backend="mp",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@@ -57,7 +57,7 @@ def test_models_distributed_DeepSeek_multistream_moe():
with VllmRunner(
"vllm-ascend/DeepSeek-V3-Pruning",
dtype=dtype,
tensor_parallel_size=4,
tensor_parallel_size=2,
distributed_executor_backend="mp",
additional_config={
"torchair_graph_config": {
@@ -82,7 +82,7 @@ def test_models_distributed_DeepSeek_dbo():
with VllmRunner(
"deepseek-ai/DeepSeek-V2-Lite",
dtype=dtype,
tensor_parallel_size=4,
tensor_parallel_size=2,
distributed_executor_backend="mp",
) as vllm_model:
model_arch = 'DeepseekV2ForCausalLM'
@@ -106,7 +106,7 @@ def test_models_distributed_DeepSeekV3_dbo():
with VllmRunner(
"vllm-ascend/DeepSeek-V3-Pruning",
dtype=dtype,
tensor_parallel_size=4,
tensor_parallel_size=2,
distributed_executor_backend="mp",
) as vllm_model:
model_arch = 'DeepseekV3ForCausalLM'
@@ -118,24 +118,6 @@ def test_models_distributed_DeepSeekV3_dbo():
vllm_model.generate(example_prompts, sampling_params)
@pytest.mark.skip(reason="Due to OOM,waiting for 1311pr to merge in")
def test_models_distributed_DeepSeek_W8A8():
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
snapshot_download("vllm-ascend/DeepSeek-V2-Lite-W8A8"),
max_model_len=8192,
enforce_eager=True,
dtype="auto",
tensor_parallel_size=4,
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
def test_models_distributed_pangu():
example_prompts = [
"Hello, my name is",
@@ -147,7 +129,7 @@ def test_models_distributed_pangu():
max_model_len=8192,
enforce_eager=True,
dtype="auto",
tensor_parallel_size=4,
tensor_parallel_size=2,
distributed_executor_backend="mp",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@@ -169,7 +151,7 @@ def test_models_distributed_topk() -> None:
with VllmRunner(
"deepseek-ai/DeepSeek-V2-Lite",
dtype=dtype,
tensor_parallel_size=4,
tensor_parallel_size=2,
distributed_executor_backend="mp",
) as vllm_model:
vllm_model.generate(example_prompts, sampling_params)
@@ -186,7 +168,7 @@ def test_models_distributed_Qwen3_W8A8():
max_model_len=8192,
enforce_eager=True,
dtype="auto",
tensor_parallel_size=4,
tensor_parallel_size=2,
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -22,7 +22,7 @@ MODELS = [
"Qwen/Qwen3-0.6B",
]
TENSOR_PARALLELS = [2]
TENSOR_PARALLELS = [1]
PIPELINE_PARALLELS = [2]
DIST_EXECUTOR_BACKEND = ["mp", "ray"]

View File

@@ -30,7 +30,7 @@ os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
def _deepseek_torchair_test_fixture(
additional_config: Dict,
*,
tensor_parallel_size=4,
tensor_parallel_size=2,
):
example_prompts = [
"Hello, my name is",
@@ -98,7 +98,7 @@ def test_e2e_deepseekv3_with_torchair_ms_mla():
def _pangu_torchair_test_fixture(
additional_config: Dict,
*,
tensor_parallel_size=4,
tensor_parallel_size=2,
):
example_prompts = [
"Hello, my name is",

View File

@@ -0,0 +1,42 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import pytest
from modelscope import snapshot_download # type: ignore[import-untyped]
from tests.e2e.conftest import VllmRunner
MODELS = [
"vllm-ascend/DeepSeek-V2-Lite-W8A8",
"vllm-ascend/Qwen2.5-0.5B-Instruct-W8A8"
]
@pytest.mark.parametrize("model", MODELS)
def test_quant_W8A8(example_prompts, model):
max_tokens = 5
model_path = snapshot_download(model)
with VllmRunner(
model_path,
max_model_len=8192,
enforce_eager=True,
dtype="auto",
gpu_memory_utilization=0.7,
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -25,7 +25,6 @@ from unittest.mock import patch
import pytest
import vllm # noqa: F401
from modelscope import snapshot_download # type: ignore[import-untyped]
from vllm import SamplingParams
from vllm.assets.audio import AudioAsset
from vllm.assets.image import ImageAsset
@@ -40,9 +39,6 @@ MODELS = [
MULTIMODALITY_VL_MODELS = ["Qwen/Qwen2.5-VL-3B-Instruct"]
MULTIMODALITY_AUDIO_MODELS = ["Qwen/Qwen2-Audio-7B-Instruct"]
QUANTIZATION_MODELS = [
"vllm-ascend/Qwen2.5-0.5B-Instruct-W8A8",
]
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
AUDIO_ASSETS = [AudioAsset("mary_had_lamb"), AudioAsset("winning_call")]
AUDIO_PROMPT_TEMPLATES = {
@@ -70,27 +66,6 @@ def test_models(model: str, dtype: str, max_tokens: int) -> None:
vllm_model.generate_greedy(example_prompts, max_tokens)
@pytest.mark.parametrize("model", QUANTIZATION_MODELS)
@pytest.mark.parametrize("max_tokens", [5])
def test_quantization_models(model: str, max_tokens: int) -> None:
prompt = "The following numbers of the sequence " + ", ".join(
str(i) for i in range(1024)) + " are:"
example_prompts = [prompt]
# NOTE: Using quantized model repo id from modelscope encounters an issue,
# this pr (https://github.com/vllm-project/vllm/pull/19212) fix the issue,
# after it is being merged, there's no need to download model explicitly.
model_path = snapshot_download(model)
with VllmRunner(model_path,
max_model_len=8192,
enforce_eager=True,
dtype="auto",
gpu_memory_utilization=0.7,
quantization="ascend") as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@pytest.mark.parametrize("model", MULTIMODALITY_VL_MODELS)
def test_multimodal_vl(model, prompt_template, vllm_runner):
image = ImageAsset("cherry_blossom") \