[CI] cleanup single/multi-card test (#5623)

1. speed up e2e light test.
2. create `2-cards` and `4-cards` folder in multicard
3. move ops to nightly
4. run test in Alphabetical Order

- vLLM version: v0.13.0
- vLLM main:
8be6432bda

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2026-01-07 14:13:34 +08:00
committed by GitHub
parent 1afbc01ed4
commit 6f7a81cd9f
30 changed files with 114 additions and 117 deletions

View File

@@ -40,7 +40,7 @@ DEVICE_NAME = torch_npu.npu.get_device_name(0)[:10]
def test_qwen3_external_launcher(model):
script = Path(
__file__
).parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
env = os.environ.copy()
# TODO: Change to 2 when ci machine has 4 cards
cmd = [
@@ -81,7 +81,7 @@ def test_qwen3_external_launcher(model):
def test_qwen3_moe_external_launcher_ep_tp2(model):
script = Path(
__file__
).parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
env = os.environ.copy()
# TODO: Change to 2 when ci machine has 4 cards
cmd = [
@@ -112,7 +112,7 @@ def test_qwen3_moe_external_launcher_ep_tp2(model):
def test_qwen3_external_launcher_with_sleepmode():
script = Path(
__file__
).parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
env = os.environ.copy()
# TODO: Change to 2 when ci machine has 4 cards
cmd = [
@@ -157,7 +157,7 @@ def test_qwen3_external_launcher_with_sleepmode():
def test_qwen3_external_launcher_with_sleepmode_level2():
script = Path(
__file__
).parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
env = os.environ.copy()
model_path = snapshot_download("Qwen/Qwen3-8B")
# TODO: Add moe model test
@@ -213,7 +213,7 @@ def test_qwen3_external_launcher_with_sleepmode_level2():
def test_qwen3_external_launcher_with_matmul_allreduce(model):
script = Path(
__file__
).parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
env = os.environ.copy()
cmd = [
sys.executable,

View File

@@ -16,11 +16,6 @@
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
"""Compare the short outputs of HF and vLLM when using greedy sampling.
Run `pytest tests/e2e/multicard/test_qwen3_moe.py`.
"""
import os
from vllm import SamplingParams

View File

@@ -45,10 +45,6 @@ DEEPSEEK_W4A8_MODELS = [
"vllm-ascend/DeepSeek-V3.1-W4A8-puring",
]
KIMI_W4A16_MODELS = [
"vllm-ascend/Kimi-K2-Thinking-Pruning",
]
def test_deepseek_multistream_moe_tp2():
example_prompts = [
@@ -216,24 +212,3 @@ def test_qwen3_dense_prefetch_mlp_weight_tp2(model):
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@pytest.mark.parametrize("model", KIMI_W4A16_MODELS)
def test_kimi_k2_thinking_w4a16_tp4(model):
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
model,
max_model_len=8192,
dtype="auto",
tensor_parallel_size=4,
enable_expert_parallel=True,
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [1],
},
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -34,7 +34,7 @@ MODELS = ["Qwen/Qwen3-30B-A3B"]
def test_qwen3_offline_load_and_sleepmode_tp2(model):
script = Path(
__file__
).parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
env = os.environ.copy()
cmd = [
sys.executable,

View File

@@ -16,10 +16,6 @@
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
"""Compare the short outputs of HF and vLLM when using greedy sampling.
Run `pytest tests/e2e/multicard/test_quantization.py`.
"""
from modelscope import snapshot_download # type: ignore
from tests.e2e.conftest import VllmRunner

View File

@@ -16,11 +16,6 @@
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
"""Compare the short outputs of HF and vLLM when using greedy sampling.
Run `pytest tests/e2e/multicard/test_qwen3_moe.py`.
"""
import json
import os
from unittest.mock import patch

View File

@@ -16,11 +16,6 @@
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
"""Compare the short outputs of HF and vLLM when using greedy sampling.
Run `pytest tests/e2e/multicard/test_qwen3_moe.py`.
"""
import os
from vllm import SamplingParams

View File

@@ -16,11 +16,6 @@
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
"""Compare the short outputs of HF and vLLM when using greedy sampling.
Run `pytest tests/e2e/multicard/test_qwen3_moe.py`.
"""
import os
import random
import string

View File

@@ -1,7 +1,3 @@
"""
Run `pytest tests/e2e/multicard/test_data_parallel_tp2.py`.
"""
import os
import subprocess
import sys

View File

@@ -0,0 +1,44 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
import os
from tests.e2e.conftest import VllmRunner
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
def test_kimi_k2_thinking_w4a16_tp4():
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
"vllm-ascend/Kimi-K2-Thinking-Pruning",
max_model_len=8192,
dtype="auto",
tensor_parallel_size=4,
enable_expert_parallel=True,
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [1],
},
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -16,11 +16,6 @@
# This file is a part of the vllm-ascend project.
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
#
"""Compare the short outputs of HF and vLLM when using greedy sampling.
Run `pytest tests/e2e/multicard/test_qwen3_next.py`.
"""
import os
from unittest.mock import patch

View File

View File

@@ -36,7 +36,7 @@ MODELS = [
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [32])
def test_models_output_between_eager_and_aclgraph(
def test_models_output(
model: str,
max_tokens: int,
) -> None:

View File

@@ -46,7 +46,7 @@ def test_qwen3_w8a8_quant():
outputs_0_lst=vllm_target_outputs,
outputs_1_lst=vllm_quant_w8a8_outputs,
name_0="vllm_target_outputs",
name_1="vllm_w8a16_outputs",
name_1="vllm_quant_w8a8_outputs",
)
@@ -75,5 +75,5 @@ def test_qwen3_dense_w8a16():
outputs_0_lst=vllm_target_outputs,
outputs_1_lst=vllm_quant_w8a16_outputs,
name_0="vllm_target_outputs",
name_1="vllm_w8a16_outputs",
name_1="vllm_quant_w8a16_outputs",
)