[Graph][Fusion] Integrating inductor pass and npugraph ex pass (#6354)

### What this PR does / why we need it?
Integrating inductor pass and npugraph ex pass, see RFC:
https://github.com/vllm-project/vllm-ascend/issues/6347

### Does this PR introduce _any_ user-facing change?
N/A

### How was this patch tested?
all tests passed.

- vLLM version: v0.14.1
- vLLM main:
dc917cceb8

---------

Signed-off-by: wxsIcey <1790571317@qq.com>
This commit is contained in:
Icey
2026-02-13 15:34:55 +08:00
committed by GitHub
parent 87a0b7b7c7
commit 7164990904
16 changed files with 220 additions and 909 deletions

View File

@@ -15,26 +15,37 @@
# limitations under the License.
#
import torch
import torch._inductor.pattern_matcher as pm
from torch._inductor.pattern_matcher import PatternMatcherPass, PatternPrettyPrinter
from torch._inductor.pattern_matcher import Match, PatternMatcherPass, PatternPrettyPrinter
from vllm.config import VllmConfig
from vllm.config.compilation import Range
from vllm.distributed import get_tensor_model_parallel_world_size, tensor_model_parallel_all_reduce
from vllm.distributed.parallel_state import get_tp_group
from vllm.logger import logger
from vllm_ascend.compilation.passes.base_pattern import BasePattern
from vllm_ascend.compilation.passes.utils.npugraph_ex_utils_check import extra_stream_scope_check
from vllm_ascend.utils import vllm_version_is
if vllm_version_is("0.15.0"):
from vllm.compilation.inductor_pass import get_pass_context # type: ignore
from vllm.compilation.vllm_inductor_pass import VllmInductorPass # type: ignore
else:
from vllm.compilation.passes.inductor_pass import get_pass_context
from vllm.compilation.passes.vllm_inductor_pass import VllmInductorPass
# computation-communication tiling block is 512
ALLREDUCE_NORM_FUSE_THREHOLD = 512
class MiddleLayerMatmulAllReduceAddRMSNormPattern:
def get_compile_range_and_extra_stream_check():
def check_func(match: Match) -> bool:
compile_range = get_pass_context().compile_range
return extra_stream_scope_check(match) and compile_range.start > ALLREDUCE_NORM_FUSE_THREHOLD
return check_func
class MiddleLayerMatmulAllReduceAddRMSNormPattern(BasePattern):
"""
recognizing the Matmul+AllReduce+AddRMSNorm computation pattern
AllReduce is optimized in the fusion operator to a two-stage communication of ReduceScatter+AllGather
@@ -58,7 +69,7 @@ class MiddleLayerMatmulAllReduceAddRMSNormPattern:
rms_norm_weight = torch.randn(hidden_size, device="npu")
return [x, weight, residual, rms_norm_weight]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(x, weight, residual, rms_norm_weight):
mm = torch.ops.vllm.unquantized_gemm(x, weight, None)
all_reduce_ = tensor_model_parallel_all_reduce(mm)
@@ -68,6 +79,9 @@ class MiddleLayerMatmulAllReduceAddRMSNormPattern:
return out0, out1
return pattern
def get_replacement(self):
def replacement(x, weight, residual, rms_norm_weight):
out0, out1 = torch.ops._C_ascend.matmul_allreduce_add_rmsnorm(
x,
@@ -83,13 +97,15 @@ class MiddleLayerMatmulAllReduceAddRMSNormPattern:
)
return out0, out1
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
def get_extra_stream_scope_check(self):
return get_compile_range_and_extra_stream_check()
class LastLayerMatmulAllReduceAddRMSNormPattern:
class LastLayerMatmulAllReduceAddRMSNormPattern(BasePattern):
def __init__(self, vllm_config, eps=1e-6):
self.vllm_config = vllm_config
self.eps = eps
super().__init__(vllm_config, eps)
device_group = get_tp_group().device_group
backend = device_group._get_backend(torch.device("npu"))
self.local_rank = torch.distributed.get_rank(group=device_group)
@@ -105,7 +121,7 @@ class LastLayerMatmulAllReduceAddRMSNormPattern:
rms_norm_weight = torch.randn(hidden_size, device="npu")
return [x, weight, residual, rms_norm_weight]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(x, weight, residual, rms_norm_weight):
mm = torch.ops.vllm.unquantized_gemm(x, weight, None)
all_reduce_ = tensor_model_parallel_all_reduce(mm)
@@ -113,6 +129,9 @@ class LastLayerMatmulAllReduceAddRMSNormPattern:
return output[0]
return pattern
def get_replacement(self):
def replacement(x, weight, residual, rms_norm_weight):
out0, _ = torch.ops._C_ascend.matmul_allreduce_add_rmsnorm(
x,
@@ -126,9 +145,11 @@ class LastLayerMatmulAllReduceAddRMSNormPattern:
True,
False,
)
return out0
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
def get_extra_stream_scope_check(self):
return get_compile_range_and_extra_stream_check()
class MatmulAllReduceAddRMSNormPass(VllmInductorPass):

View File

@@ -0,0 +1,59 @@
from abc import ABC, abstractmethod
from collections.abc import Callable
import torch
import torch._inductor.pattern_matcher as pm
import torchair
from torch._inductor.pattern_matcher import PatternMatcherPass
from vllm.config import VllmConfig
from vllm_ascend.compilation.passes.utils.npugraph_ex_utils_check import extra_stream_scope_check
# Global set to track registered patterns and prevent duplicates
_registered_patterns: set[str] = set()
class BasePattern(ABC):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
@abstractmethod
def get_inputs(self) -> list[torch.Tensor]:
pass
@abstractmethod
def get_pattern(self) -> Callable:
pass
@abstractmethod
def get_replacement(self) -> Callable:
pass
def get_extra_stream_scope_check(self):
return extra_stream_scope_check
def register(self, pm_pass: PatternMatcherPass) -> None:
# Create a unique identifier for this pattern based on class name and eps
pattern_id = f"{self.__class__.__name__}_{self.eps}"
# Skip registration if this pattern has already been registered globally
if pattern_id in _registered_patterns:
return
pattern_fn = self.get_pattern()
replacement_fn = self.get_replacement()
example_inputs = self.get_inputs()
pm.register_replacement(pattern_fn, replacement_fn, example_inputs, pm.fwd_only, pm_pass)
torchair.register_replacement(
search_fn=pattern_fn,
replace_fn=replacement_fn,
example_inputs=example_inputs,
extra_check=self.get_extra_stream_scope_check(),
)
# Mark this pattern as registered
_registered_patterns.add(pattern_id)

View File

@@ -16,12 +16,12 @@
# limitations under the License.
#
import torch
import torch._inductor.pattern_matcher as pm
from torch._inductor.pattern_matcher import PatternMatcherPass
from vllm.config import VllmConfig
from vllm.config.compilation import Range
from vllm.logger import logger
from vllm_ascend.compilation.passes.base_pattern import BasePattern
from vllm_ascend.utils import enable_custom_op, vllm_version_is
if vllm_version_is("0.15.0"):
@@ -30,11 +30,9 @@ else:
from vllm.compilation.passes.vllm_inductor_pass import VllmInductorPass
class AddRMSNormQuantPattern:
class AddRMSNormQuantPattern(BasePattern):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
super().__init__(vllm_config, eps)
def get_inputs(self):
"""
@@ -48,7 +46,7 @@ class AddRMSNormQuantPattern:
offset = torch.zeros(4, device="npu", dtype=self.dtype)
return [rms_norm_input, residual, rms_norm_weight, scale, scale_reciprocal, offset]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -68,6 +66,9 @@ class AddRMSNormQuantPattern:
quantized_output = torch.ops.vllm.quantize(out0, scale, scale_reciprocal, offset)
return quantized_output, out1
return pattern
def get_replacement(self):
def replacement(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -86,14 +87,12 @@ class AddRMSNormQuantPattern:
out1 = output[2]
return quantized_output, out1
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class AddRMSNormQuantPatternWithBias:
class AddRMSNormQuantPatternWithBias(BasePattern):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
super().__init__(vllm_config, eps)
def get_inputs(self):
"""
@@ -108,7 +107,7 @@ class AddRMSNormQuantPatternWithBias:
offset = torch.zeros(4, device="npu", dtype=self.dtype)
return [rms_norm_input, residual, rms_norm_weight, scale, scale_reciprocal, offset, rmsnorm_bias]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -129,6 +128,9 @@ class AddRMSNormQuantPatternWithBias:
quantized_output = torch.ops.vllm.quantize(out0, scale, scale_reciprocal, offset)
return quantized_output, out1
return pattern
def get_replacement(self):
def replacement(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -148,14 +150,12 @@ class AddRMSNormQuantPatternWithBias:
out1 = output[2]
return quantized_output, out1
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class AddRMSNormQuantSPPattern:
class AddRMSNormQuantSPPattern(BasePattern):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
super().__init__(vllm_config, eps)
def get_inputs(self):
"""
@@ -169,7 +169,7 @@ class AddRMSNormQuantSPPattern:
offset = torch.zeros(4, device="npu", dtype=self.dtype)
return [rms_norm_input, residual, rms_norm_weight, scale, scale_reciprocal, offset]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -190,6 +190,9 @@ class AddRMSNormQuantSPPattern:
quantized_output = torch.ops.vllm.quantize(out0, scale, scale_reciprocal, offset)
return quantized_output, out1
return pattern
def get_replacement(self):
def replacement(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -209,14 +212,12 @@ class AddRMSNormQuantSPPattern:
quantized_output = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(quantized_output, True)
return quantized_output, out1
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class AddRMSNormQuantSPPatternWithBias:
class AddRMSNormQuantSPPatternWithBias(BasePattern):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
super().__init__(vllm_config, eps)
def get_inputs(self):
"""
@@ -231,7 +232,7 @@ class AddRMSNormQuantSPPatternWithBias:
offset = torch.zeros(4, device="npu", dtype=self.dtype)
return [rms_norm_input, residual, rms_norm_weight, scale, scale_reciprocal, offset, rmsnorm_bias]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -253,6 +254,9 @@ class AddRMSNormQuantSPPatternWithBias:
quantized_output = torch.ops.vllm.quantize(out0, scale, scale_reciprocal, offset)
return quantized_output, out1
return pattern
def get_replacement(self):
def replacement(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -273,14 +277,12 @@ class AddRMSNormQuantSPPatternWithBias:
quantized_output = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(quantized_output, True)
return quantized_output, out1
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class AddRMSNormDynamicQuantPattern:
class AddRMSNormDynamicQuantPattern(BasePattern):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
super().__init__(vllm_config, eps)
def get_inputs(self):
"""
@@ -291,7 +293,7 @@ class AddRMSNormDynamicQuantPattern:
rms_norm_weight = torch.randn(4, device="npu", dtype=self.dtype)
return [rms_norm_input, residual, rms_norm_weight]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(rms_norm_input: torch.Tensor, residual: torch.Tensor, rms_norm_weight: torch.Tensor):
"""
Pattern for AddRMSNormQuant fusion.
@@ -302,6 +304,9 @@ class AddRMSNormDynamicQuantPattern:
quantized_output = torch.ops.npu.npu_dynamic_quant(out0)
return quantized_output[0], quantized_output[1], out1
return pattern
def get_replacement(self):
def replacement(rms_norm_input: torch.Tensor, residual: torch.Tensor, rms_norm_weight: torch.Tensor):
"""
Replacement for the AddRMSNormQuant fusion.
@@ -315,14 +320,12 @@ class AddRMSNormDynamicQuantPattern:
output[2],
)
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class AddRMSNormDynamicQuantPatternWithBias:
class AddRMSNormDynamicQuantPatternWithBias(BasePattern):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
super().__init__(vllm_config, eps)
def get_inputs(self):
"""
@@ -334,7 +337,7 @@ class AddRMSNormDynamicQuantPatternWithBias:
rmsnorm_bias = torch.randn(4, device="npu", dtype=self.dtype)
return [rms_norm_input, residual, rms_norm_weight, rmsnorm_bias]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -352,6 +355,9 @@ class AddRMSNormDynamicQuantPatternWithBias:
quantized_output = torch.ops.npu.npu_dynamic_quant(out0)
return quantized_output[0], quantized_output[1], out1
return pattern
def get_replacement(self):
def replacement(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -370,14 +376,12 @@ class AddRMSNormDynamicQuantPatternWithBias:
output[2],
)
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class AddRMSNormDynamicQuantSPPattern:
class AddRMSNormDynamicQuantSPPattern(BasePattern):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
super().__init__(vllm_config, eps)
def get_inputs(self):
"""
@@ -388,7 +392,7 @@ class AddRMSNormDynamicQuantSPPattern:
rms_norm_weight = torch.randn(4, device="npu", dtype=self.dtype)
return [rms_norm_input, residual, rms_norm_weight]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(rms_norm_input: torch.Tensor, residual: torch.Tensor, rms_norm_weight: torch.Tensor):
"""
Pattern for AddRMSNormQuant fusion.
@@ -400,6 +404,9 @@ class AddRMSNormDynamicQuantSPPattern:
quantized_output = torch.ops.npu.npu_dynamic_quant(out0)
return quantized_output[0], quantized_output[1], out1
return pattern
def get_replacement(self):
def replacement(rms_norm_input: torch.Tensor, residual: torch.Tensor, rms_norm_weight: torch.Tensor):
"""
Replacement for the AddRMSNormQuant fusion.
@@ -412,14 +419,12 @@ class AddRMSNormDynamicQuantSPPattern:
out3 = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(out3, True)
return quantized_output, out3, output[2]
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class AddRMSNormDynamicQuantSPPatternWithBias:
class AddRMSNormDynamicQuantSPPatternWithBias(BasePattern):
def __init__(self, vllm_config: VllmConfig, eps: float = 1e-6):
self.vllm_config = vllm_config
self.dtype = vllm_config.model_config.dtype
self.eps = eps
super().__init__(vllm_config, eps)
def get_inputs(self):
"""
@@ -431,7 +436,7 @@ class AddRMSNormDynamicQuantSPPatternWithBias:
rmsnorm_bias = torch.randn(4, device="npu", dtype=self.dtype)
return [rms_norm_input, residual, rms_norm_weight, rmsnorm_bias]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -450,6 +455,9 @@ class AddRMSNormDynamicQuantSPPatternWithBias:
quantized_output = torch.ops.npu.npu_dynamic_quant(out0)
return quantized_output[0], quantized_output[1], out1
return pattern
def get_replacement(self):
def replacement(
rms_norm_input: torch.Tensor,
residual: torch.Tensor,
@@ -467,7 +475,7 @@ class AddRMSNormDynamicQuantSPPatternWithBias:
out3 = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(out3, True)
return quantized_output, out3, output[2]
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class AddRMSNormQuantFusionPass(VllmInductorPass):

View File

@@ -16,12 +16,12 @@
# limitations under the License.
#
import torch
import torch._inductor.pattern_matcher as pm
from torch._inductor.pattern_matcher import PatternMatcherPass, PatternPrettyPrinter
from vllm.config import VllmConfig, get_layers_from_vllm_config
from vllm.config.compilation import Range
from vllm.logger import logger
from vllm_ascend.compilation.passes.base_pattern import BasePattern
from vllm_ascend.utils import vllm_version_is
if vllm_version_is("v0.15.0"):
@@ -32,15 +32,14 @@ else:
from vllm.model_executor.layers.attention import Attention
class QKNormRopeFusionPattern:
class QKNormRopeFusionPattern(BasePattern):
def __init__(self, vllm_config, head_dim, num_heads, num_kv_heads, eps=1e-6):
self.vllm_config = vllm_config
super().__init__(vllm_config, eps)
self.head_dim = head_dim
self.num_heads = num_heads
self.num_kv_heads = num_kv_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.eps = eps
self.device = vllm_config.device_config.device if vllm_config.device_config else None
def get_inputs(self):
@@ -53,7 +52,7 @@ class QKNormRopeFusionPattern:
positions = torch.ones(T, dtype=torch.int64, device="npu")
return [qkv, q_weight, k_weight, cos_sin_cache, positions]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(
qkv: torch.Tensor,
q_weight: torch.Tensor,
@@ -77,6 +76,9 @@ class QKNormRopeFusionPattern:
return q_rope, k_rope, v
return pattern
def get_replacement(self):
def replacement(
qkv: torch.Tensor,
q_weight: torch.Tensor,
@@ -100,18 +102,17 @@ class QKNormRopeFusionPattern:
return results
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class QKNormRopeFusionPatternWithBias:
class QKNormRopeFusionPatternWithBias(BasePattern):
def __init__(self, vllm_config, head_dim, num_heads, num_kv_heads, eps=1e-6):
super().__init__(vllm_config, eps)
self.head_dim = head_dim
self.num_heads = num_heads
self.num_kv_heads = num_kv_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.eps = eps
self.vllm_config = vllm_config
self.device = vllm_config.device_config.device if vllm_config.device_config else None
def get_inputs(self):
@@ -127,7 +128,7 @@ class QKNormRopeFusionPatternWithBias:
return [qkv, q_weight, k_weight, q_bias, k_bias, cos_sin_cache, positions]
def register(self, pm_pass: PatternMatcherPass):
def get_pattern(self):
def pattern(
qkv: torch.Tensor,
q_weight: torch.Tensor,
@@ -155,6 +156,9 @@ class QKNormRopeFusionPatternWithBias:
return q_rope, k_rope, v
return pattern
def get_replacement(self):
def replacement(
qkv: torch.Tensor,
q_weight: torch.Tensor,
@@ -179,7 +183,7 @@ class QKNormRopeFusionPatternWithBias:
)
return results
pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass)
return replacement
class QKNormRopeFusionPass(VllmInductorPass):

View File

@@ -0,0 +1,75 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# This file is a part of the vllm-ascend project.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from torch._inductor.pattern_matcher import Match
from vllm.logger import logger
def extra_stream_scope_check(match: Match) -> bool:
"""
Checks if all nodes in the same stream.
"""
non_default_streams = set()
has_default = False
for node in match.nodes:
if node.op == "call_function":
current_stream = node.meta.get("stream_label")
if current_stream is None:
has_default = True
else:
non_default_streams.add(current_stream)
if len(non_default_streams) > 1:
logger.debug(
f"Cross-stream operation detected in pattern match for AddRMSNormQuant. "
f"Multiple streams found: {non_default_streams}. "
f"Fusion is not supported for cross-stream operations."
)
return False
if has_default and len(non_default_streams) > 0:
logger.debug(
f"Cross-stream operation detected in pattern match for AddRMSNormQuant. "
f"Multiple streams found: {non_default_streams}. "
f"Fusion is not supported for cross-stream operations."
)
return False
return True
_register_patterns = set()
def check_and_register_fusion_pass(pattern_class: type, **kwargs):
global _register_patterns
eps = kwargs.get("eps", 1e-6)
pattern_key = str(pattern_class.__name__) + str(eps)
if pattern_key in _register_patterns:
return
pattern = pattern_class(**kwargs)
try:
pattern.register()
_register_patterns.add(pattern_key)
except RuntimeError as e:
if "Duplicate pattern" in str(e):
logger.warning(f"Pattern {pattern_class.__name__} eps {eps} has been registered")
_register_patterns.add(pattern_key)
else:
raise e