[Fusion] [Graph]Add Matmul Allreduce Rmsnorm fusion Pass (#5034)

This PR add `MatmulAllreduceRmsnorm` operator and introduces a graph
fusion pass for `matmul_allreduce_rmsnorm` operations. The
implementation includes a new configuration flag, a pattern matching
pass using `torch._inductor.pattern_matcher`.

Co-authored-by: Trunrain [270250579@qq.com](mailto:270250579@qq.com)

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: wxsIcey <1790571317@qq.com>
Signed-off-by: tongrunze <t00574058@china.huawei.com>
This commit is contained in:
Icey
2026-01-19 09:28:07 +08:00
committed by GitHub
parent 9cad1a8349
commit c929bd1e8d
8 changed files with 251 additions and 1 deletions

View File

@@ -127,12 +127,15 @@
# 1. `vllm.distributed.parallel_state.GroupCoordinator`
# Why:
# vllm doesn't support all_to_all for GroupCoordinator.
# all_reduce in vLLM not is a customop, which will make MatmulAllReduceAddRMSNorm fusion failure.
# How
# Add all_to_all implementation for GroupCoordinator.
# make all_reduce as a customop.
# Related PR (if no, explain why):
# No, we should use vlLM all2all manager to support all_to_all for npu.
# Future Plan:
# Remove this patch when the refactor of all2all manager is done.
# Remove this patch when vLLM support all_reduce as customop.
#
# ** 3. File: worker/patch_minicpm.py **
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -276,3 +279,12 @@
# Future Plan:
# Remove this patch when cann fix the gather bug.
#
# ** 13. File: worker/patch_unquantized_gemm.py**
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. `vllm.model_executor.layers.utils.default_unquantized_gemm`
# Why:
# unquantized_gemm in vLLM not is a customop, which will make MatmulAllReduceAddRMSNorm fusion failure.
# How
# make unquantized_gemm as a customop.
# Future Plan:
# Remove this patch when vLLM support the operator as customop.

View File

@@ -22,6 +22,7 @@ if HAS_TRITON:
# isort: off
import vllm_ascend.patch.platform.patch_sched_yield # noqa
import vllm_ascend.patch.worker.patch_unquantized_gemm # noqa
import vllm_ascend.patch.worker.patch_bert # noqa
import vllm_ascend.patch.worker.patch_distributed # noqa
import vllm_ascend.patch.worker.patch_deepseek # noqa

View File

@@ -112,5 +112,10 @@ class GroupCoordinatorPatch(GroupCoordinator):
gather_dim, scatter_sizes,
gather_sizes)
def all_reduce(self, input_):
if self.world_size == 1:
return input_
return torch.ops.vllm.all_reduce(input_, group_name=self.unique_name)
vllm.distributed.parallel_state.GroupCoordinator = GroupCoordinatorPatch

View File

@@ -0,0 +1,57 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# This file is a part of the vllm-ascend project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import vllm.model_executor.layers.utils
from vllm.utils.torch_utils import direct_register_custom_op
def unquantized_gemm(
x: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
return torch.nn.functional.linear(x, weight, bias)
def unquantized_gemm_fake(
x: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
output_shape = (x.shape[0], weight.shape[0])
return torch.empty(output_shape, dtype=x.dtype, device=x.device)
direct_register_custom_op(op_name="unquantized_gemm",
op_func=unquantized_gemm,
fake_impl=unquantized_gemm_fake,
mutates_args=[],
dispatch_key="PrivateUse1")
def default_unquantized_gemm(
layer: torch.nn.Module,
x: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
if x.device.type == "npu":
return torch.ops.vllm.unquantized_gemm(x, weight, bias)
else:
return torch.nn.functional.linear(x, weight, bias)
vllm.model_executor.layers.utils.default_unquantized_gemm = default_unquantized_gemm