From b2598c3271af2d55a948e29799e90bc0cfcda188 Mon Sep 17 00:00:00 2001 From: Ronald1995 Date: Thu, 7 Aug 2025 17:19:23 +0800 Subject: [PATCH] enable mm allreduce test (#2192) ### What this PR does / why we need it? This PR is to add e2e test for using npu_mm_all_reduce_base fusion kernel. ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? not involved - vLLM version: v0.10.0 - vLLM main: https://github.com/vllm-project/vllm/commit/5d5d419ca6aa55034eef0144f24e66789b486cb5 Signed-off-by: Ronald1995 --- tests/e2e/multicard/test_external_launcher.py | 38 +++++++++++++++++++ .../patch/worker/patch_common/patch_linear.py | 2 + 2 files changed, 40 insertions(+) diff --git a/tests/e2e/multicard/test_external_launcher.py b/tests/e2e/multicard/test_external_launcher.py index c5eecab..24c66bf 100644 --- a/tests/e2e/multicard/test_external_launcher.py +++ b/tests/e2e/multicard/test_external_launcher.py @@ -24,11 +24,14 @@ import os import subprocess import sys from pathlib import Path +from unittest.mock import patch import pytest +import torch_npu MODELS = ["Qwen/Qwen3-0.6B"] MOE_MODELS = ["Qwen/Qwen3-30B-A3B"] +DEVICE_NAME = torch_npu.npu.get_device_name(0)[:10] @pytest.mark.parametrize("model", MODELS) @@ -147,3 +150,38 @@ def test_external_launcher_and_sleepmode(): assert "Generated text:" in output assert "Sleep and wake up successfully!!" in output assert proc.returncode == 0 + + +@pytest.mark.skipif( + DEVICE_NAME != "Ascend910B", + reason="This test is only for Ascend910B devices.", +) +@pytest.mark.parametrize("model", MODELS) +@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_MATMUL_ALLREDUCE": "1"}) +def test_mm_allreduce(model): + script = Path( + __file__ + ).parent.parent.parent.parent / "examples" / "offline_external_launcher.py" + env = os.environ.copy() + cmd = [ + sys.executable, + str(script), + "--model", + model, + "--trust-remote-code", + ] + + print(f"Running subprocess: {' '.join(cmd)}") + proc = subprocess.run( + cmd, + env=env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + timeout=600, + ) + + output = proc.stdout.decode() + print(output) + + assert "Generated text:" in output + assert proc.returncode == 0 diff --git a/vllm_ascend/patch/worker/patch_common/patch_linear.py b/vllm_ascend/patch/worker/patch_common/patch_linear.py index f5fbcec..57cc4e0 100644 --- a/vllm_ascend/patch/worker/patch_common/patch_linear.py +++ b/vllm_ascend/patch/worker/patch_common/patch_linear.py @@ -25,6 +25,7 @@ from torch.nn.parameter import Parameter from vllm.distributed import (get_tensor_model_parallel_rank, split_tensor_along_last_dim) from vllm.distributed.parallel_state import get_tp_group +from vllm.logger import logger from vllm.model_executor.layers.linear import RowParallelLinear from vllm_ascend import envs @@ -142,4 +143,5 @@ class AscendRowParallelLinear(RowParallelLinear): if envs.VLLM_ASCEND_ENABLE_MATMUL_ALLREDUCE: + logger.info("AscendRowParallelLinear: Matmul all-reduce is enabled. ") vllm.model_executor.layers.linear.RowParallelLinear = AscendRowParallelLinear