[Fusion] [Graph]Add Matmul Allreduce Rmsnorm fusion Pass (#5034)
This PR add `MatmulAllreduceRmsnorm` operator and introduces a graph
fusion pass for `matmul_allreduce_rmsnorm` operations. The
implementation includes a new configuration flag, a pattern matching
pass using `torch._inductor.pattern_matcher`.
Co-authored-by: Trunrain [270250579@qq.com](mailto:270250579@qq.com)
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: wxsIcey <1790571317@qq.com>
Signed-off-by: tongrunze <t00574058@china.huawei.com>
This commit is contained in:
@@ -169,7 +169,9 @@ class AscendCompilationConfig:
|
||||
deployed on Ascend platforms.
|
||||
"""
|
||||
|
||||
def __init__(self, fuse_norm_quant: bool = True, fuse_qknorm_rope: bool = False, **kwargs):
|
||||
def __init__(
|
||||
self, fuse_norm_quant: bool = True, fuse_qknorm_rope: bool = False, fuse_allreduce_rms: bool = False, **kwargs
|
||||
):
|
||||
"""
|
||||
Initialize the configuration.
|
||||
|
||||
@@ -179,10 +181,13 @@ class AscendCompilationConfig:
|
||||
Default: True
|
||||
fuse_qknorm_rope (bool): Whether to enable qknorm and rope fusion optimization.
|
||||
Default: False
|
||||
fuse_allreduce_rms (bool): Whether to enable allreduce and addrmsnorm fusion optimization.
|
||||
Default: False
|
||||
**kwargs: Additional optional parameters for forward compatibility and configuration extension.
|
||||
"""
|
||||
self.fuse_norm_quant = fuse_norm_quant
|
||||
self.fuse_qknorm_rope = HAS_TRITON or fuse_qknorm_rope
|
||||
self.fuse_allreduce_rms = fuse_allreduce_rms
|
||||
|
||||
|
||||
class XliteGraphConfig:
|
||||
|
||||
Reference in New Issue
Block a user