Adopt inductor fusion and define quantization fusion pass (#4168)

### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239

This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.

### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config

### How was this patch tested?
```python
def main():
    prompts = [
        "The president of the United States is Mr.",
    ]

    # Create a sampling params object.
    sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
    # Create an LLM.
    llm = LLM(
        model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
              # enforce_eager=True,
              tensor_parallel_size=1,
              trust_remote_code=True,
              gpu_memory_utilization=0.7,
              quantization="ascend",
              )

    # Generate texts from the prompts.
    outputs = llm.generate(prompts, sampling_params)
    for output in outputs:
        prompt = output.prompt
        generated_text = output.outputs[0].text
        print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```

```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden.  \nB. Mr. Trump is not Mr. Biden.  \nC. The president of the United States is not Mr. Trump.  \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```


- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
86e178f7c4

---------

Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
This commit is contained in:
Icey
2025-12-04 10:29:48 +08:00
committed by GitHub
parent c4a71fc6d5
commit 178ca1607e
13 changed files with 593 additions and 267 deletions

View File

@@ -0,0 +1,219 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import deepcopy
from typing import Any, Callable, List, Optional, Sequence
import pytest
import torch
import torch.fx as fx
import torch.nn as nn
import torch_npu
import vllm.config
from torch._inductor.decomposition import select_decomp_table
from vllm.compilation.fx_utils import OpOverload
from vllm.config import ModelConfig, VllmConfig, get_current_vllm_config
from vllm_ascend.compilation.compiler_interface import compile_fx
from vllm_ascend.compilation.passes.quant_fusion_pass import \
AddRMSNormQuantFusionPass
class TestModel(nn.Module):
"""
A minimal test model that simulates the pattern:
AddRMSNorm → Quantization
"""
def __init__(self, hidden_size: int, eps: float = 1e-6, device="npu"):
super().__init__()
self.hidden_size = hidden_size
self.eps = eps
self.rms_norm_weight = nn.Parameter(
torch.randn(hidden_size, device=device))
self.quant_scale = torch.tensor([1.0], device=device)
self.quant_offset = torch.tensor([0.0], device=device)
def forward(self, x):
"""
Forward pass:
1. Perform npu_add_rms_norm
2. Quantize the normalized output to int8
Returns both quantized output and updated residual.
"""
residual = torch.zeros_like(x)
norm_output, _, new_residual = torch_npu.npu_add_rms_norm(
x, residual, self.rms_norm_weight, self.eps)
quantized_output = torch_npu.npu_quantize(norm_output,
self.quant_scale,
self.quant_offset,
torch.qint8, -1, False)
return quantized_output, new_residual
def ops_in_model_before(self) -> List[OpOverload]:
"""Return the list of expected operators BEFORE fusion."""
return [
torch.ops.npu.npu_add_rms_norm.default,
torch.ops.npu.npu_quantize.default
]
def ops_in_model_after(self) -> List[OpOverload]:
"""Return the list of expected operators AFTER successful fusion."""
return [torch.ops.npu.npu_add_rms_norm_quant.default]
class TestBackend:
"""
A custom compilation backend for testing operator fusion passes.
It applies the AddRMSNormQuantFusionPass during graph compilation and
records the FX graph before and after the transformation.
"""
def __init__(self):
vllm_config = get_current_vllm_config()
compile_config = vllm_config.compilation_config
self.custom_passes = [
AddRMSNormQuantFusionPass(vllm_config=vllm_config)
]
self.inductor_config = compile_config.inductor_compile_config
self.inductor_config["graph_fusion_manager"] = self.post_pass
# Placeholders to store FX graphs for verification
self.graph_pre_pass = None
self.graph_post_pass = None
def post_pass(self,
graph: fx.Graph,
runtime_shape: int | None = None) -> fx.Graph:
"""
Apply custom graph transformation passes.
"""
self.graph_pre_pass = deepcopy(graph)
for pass_ in self.custom_passes:
pass_(graph)
self.graph_post_pass = deepcopy(graph)
return graph
def compile(
self,
graph: fx.GraphModule,
example_inputs: list[Any],
compiler_config: dict[str, Any],
runtime_shape: Optional[int] = None,
key: Optional[str] = None
) -> tuple[Optional[Callable], Optional[Any]]:
"""
Compile the FX graph using vLLM's Ascend compiler interface.
Wraps the post-pass logic into the inner_compile callback.
"""
def compile_inner(graph, example_inputs):
current_pass_manager = compiler_config["graph_fusion_manager"]
return current_pass_manager(graph, runtime_shape)
decompositions = select_decomp_table()
compiled_fn = compile_fx(
graph=graph,
example_inputs=example_inputs,
inner_compile=compile_inner,
decompositions=decompositions,
)
return compiled_fn, None
def __call__(self, gm: fx.GraphModule, example_inputs: List[Any]):
"""
Make the backend callable by torch.compile().
Returns a compiled executable function.
"""
compiled_fn, _ = self.compile(
gm,
example_inputs,
compiler_config={"graph_fusion_manager": self.post_pass},
runtime_shape=None,
key=None,
)
return compiled_fn
def find_nodes_by_target(self, graph: fx.GraphModule,
target: OpOverload) -> List[fx.Node]:
"""Helper to find all FX nodes that call a specific operator."""
return [
node for node in graph.graph.nodes
if hasattr(node, 'target') and node.target == target
]
def check_before_ops(self,
ops: Sequence[OpOverload],
fully_replaced: bool = True):
"""
Verify that the original (unfused) operators exist before the pass
and are fully removed afterward (if fully_replaced=True).
"""
for op in ops:
num_pre = len(self.find_nodes_by_target(self.graph_pre_pass, op))
num_post = len(self.find_nodes_by_target(self.graph_post_pass, op))
print(f"Op {op}: pre={num_pre}, post={num_post}")
assert num_pre > 0, f"Op {op} not found in pre-pass graph"
if fully_replaced:
assert num_post == 0, f"Unexpected op {op} in post-pass graph: {num_post} nodes remain"
def check_after_ops(self, ops: Sequence[OpOverload]):
"""Verify that the fused operator appears in the transformed graph."""
for op in ops:
num_post = len(self.find_nodes_by_target(self.graph_post_pass, op))
print(f"Op {op}: post={num_post}")
assert num_post > 0, f"Op {op} not found in post-pass graph"
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("hidden_size", [64])
@pytest.mark.parametrize("num_tokens", [257])
@pytest.mark.parametrize("eps", [1e-5, 1e-6])
def test_rmsnorm_quant_fusion(dtype: torch.dtype, hidden_size: int,
num_tokens: int, eps: float):
"""
End-to-end test for AddRMSNorm+Quantize fusion.
Compares: Operator presence/absence before and after graph transformation
"""
torch.set_default_dtype(dtype)
torch.manual_seed(1)
vllm_config = VllmConfig(model_config=ModelConfig(dtype=dtype))
with vllm.config.set_current_vllm_config(vllm_config):
backend = TestBackend()
model = TestModel(hidden_size, eps, device="npu")
model = model.to("npu")
x = torch.rand(num_tokens,
hidden_size,
device="npu",
dtype=dtype,
requires_grad=False)
result_unfused = model(x)
print("Unfused result:", [t.shape for t in result_unfused])
model_fused = torch.compile(model, backend=backend)
result_fused = model_fused(x)
print("Fused result:", [t.shape for t in result_fused])
print("=== Checking operator fusion ===")
backend.check_before_ops(model.ops_in_model_before())
backend.check_after_ops(model.ops_in_model_after())

View File

@@ -1,16 +1,17 @@
import unittest
from unittest.mock import patch
import pytest
import torch
from pytest_mock import MockerFixture
from vllm.model_executor.layers.layernorm import RMSNorm
from tests.ut.base import PytestBase
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
from vllm_ascend.utils import AscendDeviceType
@pytest.fixture
def dummy_tensor():
return torch.randn(4, 8, dtype=torch.float16)
def mock_rms_norm(x, weight, eps):
return x + 1, None
@@ -19,145 +20,38 @@ def mock_add_rms_norm(x, residual, weight, eps):
return 2 * x, None, 2 * residual
def mock_add_rms_norm_quant_with_bias(x, residual, weight, quant_scale,
quant_offset, beta, epsilon):
x_out = 2 * x
residual_out = 2 * residual
x_out_quant = x_out.to(torch.int8)
residual_out_quant = residual_out.to(torch.int8)
return x_out_quant, None, residual_out_quant
@pytest.mark.parametrize("is_310p", [True, False])
@pytest.mark.parametrize("residual",
[None, torch.randn(4, 8, dtype=torch.float32)])
@patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
@patch("torch_npu.npu_add_rms_norm", side_effect=mock_add_rms_norm)
def test_RMSNorm_forward(mock_add_rmsnorm, mock_rmsnorm, is_310p, residual,
dummy_tensor):
class TestAscendRMSNorm(PytestBase):
@pytest.fixture(autouse=True)
def context(self, mocker: MockerFixture):
mocker.patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
mocker.patch("torch_npu.npu_add_rms_norm",
side_effect=mock_add_rms_norm)
mocker.patch("torch_npu.npu_add_rms_norm_quant",
side_effect=mock_add_rms_norm_quant_with_bias)
mocker.patch("torch.ops.vllm.maybe_wait_prefetch_done",
side_effect=lambda x: None)
# Test case for the most common and basic scenario
@pytest.mark.parametrize(
"residual", [None, torch.randn(4, 8, dtype=torch.float16)])
@patch("torch.ops.vllm.maybe_chunk_residual")
def test_forward_oot_basic(self, mock_maybe_chunk_residual, residual):
mock_maybe_chunk_residual.side_effect = lambda x, residual: residual
with patch("vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P
if is_310p else AscendDeviceType._910_93):
layer = RMSNorm(hidden_size=8, eps=1e-05)
x = torch.randn(4, 8, dtype=torch.float16)
if residual is not None:
x_out, residual_out = layer.forward_oot(x, residual)
out_x, out_residual = layer.forward_oot(dummy_tensor, residual)
x_out_expected = 2 * x
residual_out_expected = 2 * residual
if is_310p:
expected_arg_x = dummy_tensor + residual.to(dummy_tensor.dtype)
expected_out_x = expected_arg_x + 1
expected_out_residual = expected_arg_x.to(residual.dtype)
assert torch.allclose(x_out, x_out_expected)
assert torch.allclose(residual_out, residual_out_expected)
mock_rmsnorm.assert_called_once()
assert torch.allclose(out_x, expected_out_x)
assert torch.allclose(out_residual, expected_out_residual)
else:
expected_out_x = 2 * dummy_tensor
expected_out_residual = 2 * residual
mock_add_rmsnorm.assert_called_once()
assert torch.allclose(out_x, expected_out_x)
assert torch.allclose(out_residual, expected_out_residual)
else:
x_out = layer.forward(x, residual)
x_out_expected = x + 1
out_x = layer.forward_oot(dummy_tensor, residual)
expected_out_x = dummy_tensor + 1
assert torch.allclose(x_out, x_out_expected)
# Test case for addrmsnorm + w8a8 quant fusion
def test_forward_oot_with_quant_fusion(self, mocker: MockerFixture):
mock_soc_version = mocker.patch(
"vllm_ascend.utils.get_ascend_device_type")
mock_soc_version.return_value = AscendDeviceType._910_93
mock_get_forward_context = mocker.patch(
"vllm_ascend.ops.layernorm.get_forward_context")
# Simulating a scenario with quant_fusion enabled
mock_forward_context = mocker.MagicMock()
mock_model_instance = mocker.MagicMock()
mock_forward_context.model_instance = mock_model_instance
num_hidden_layers = 3
mock_model_instance.model.layers = [
mocker.MagicMock() for _ in range(num_hidden_layers)
]
mock_layer_0 = mock_model_instance.model.layers[0]
mock_layer_0.self_attn.qkv_proj = mocker.MagicMock()
mock_layer_0.mlp.gate_up_proj = mocker.MagicMock()
mock_layer_1 = mock_model_instance.model.layers[1]
mock_layer_1.self_attn.qkv_proj = mocker.MagicMock()
mock_layer_1.mlp.gate_up_proj = mocker.MagicMock()
mock_quant_method_0_qkv = mocker.MagicMock()
mock_quant_method_0_qkv.quant_method = AscendW8A8LinearMethod()
mock_quant_method_0_gate_up = mocker.MagicMock()
mock_quant_method_0_gate_up.quant_method = AscendW8A8LinearMethod()
mock_layer_0.self_attn.qkv_proj.quant_method = mock_quant_method_0_qkv
mock_layer_0.mlp.gate_up_proj.quant_method = mock_quant_method_0_gate_up
mock_quant_method_1_qkv = mocker.MagicMock()
mock_quant_method_1_qkv.quant_method = AscendW8A8LinearMethod()
mock_quant_method_1_gate_up = mocker.MagicMock()
mock_quant_method_1_gate_up.quant_method = AscendW8A8LinearMethod()
mock_layer_1.self_attn.qkv_proj.quant_method = mock_quant_method_1_qkv
mock_layer_1.mlp.gate_up_proj.quant_method = mock_quant_method_1_gate_up
mock_get_forward_context.return_value = mock_forward_context
mock_forward_context.addrmsnorm_quant_fusion_enabled = True
mock_forward_context.prefetch_mlp_enabled = False
mock_forward_context.layer_idx = 0
mock_forward_context.num_hidden_layers = num_hidden_layers
mock_forward_context.fusion_linear = "gate_up_dense"
mock_forward_context.weight_prefetch_method = None
mocker.patch("torch.ops.vllm.maybe_chunk_residual",
lambda x, residual: residual)
# Ensure fusion and layer_idx increment are handled correctly
x = torch.randn(4, 8, dtype=torch.float16)
residual = torch.randn(4, 8, dtype=torch.float16)
layer = RMSNorm(hidden_size=8, eps=1e-05)
x_out, residual_out = layer.forward_oot(x, residual)
assert mock_get_forward_context.call_count == 2
assert mock_forward_context.fusion_linear == "qkv_dense"
assert mock_forward_context.layer_idx == 1
x_out, residual_out = layer.forward_oot(x, residual)
assert mock_get_forward_context.call_count == 4
assert mock_forward_context.fusion_linear == "gate_up_dense"
assert mock_forward_context.layer_idx == 1
mock_forward_context.fusion_linear = "gate_moe"
x_out, residual_out = layer.forward_oot(x, residual)
assert mock_get_forward_context.call_count == 5
fusion_linear_expected = "qkv_moe"
assert mock_forward_context.fusion_linear == fusion_linear_expected
assert mock_forward_context.layer_idx == 2
x_out, residual_out = layer.forward_oot(x, residual)
assert mock_get_forward_context.call_count == 6
fusion_linear_expected = "gate_moe"
assert mock_forward_context.fusion_linear == fusion_linear_expected
assert mock_forward_context.layer_idx == 2
# last layer returned directly
x_out, residual_out = layer.forward_oot(x, residual)
assert mock_get_forward_context.call_count == 7
assert mock_forward_context.fusion_linear == "qkv_moe"
assert mock_forward_context.layer_idx == 3
x_out, residual_out = layer.forward_oot(x, residual)
assert mock_get_forward_context.call_count == 8
assert mock_forward_context.fusion_linear == "qkv_moe"
assert mock_forward_context.layer_idx == 3
if __name__ == '__main__':
unittest.main()
mock_rmsnorm.assert_called_once()
assert torch.allclose(out_x, expected_out_x)

View File

@@ -56,6 +56,9 @@ class TestAscendConfig(TestBase):
self.assertTrue(torchair_graph_config.enable_frozen_parameter)
self.assertFalse(torchair_graph_config.enable_kv_nz)
ascend_compilation_config = ascend_config.ascend_compilation_config
self.assertTrue(ascend_compilation_config.enable_quantization_fusion)
@_clean_up_ascend_config
def test_init_ascend_config_with_additional_config(self):
test_vllm_config = VllmConfig()
@@ -70,6 +73,9 @@ class TestAscendConfig(TestBase):
"enable_frozen_parameter": True,
"enable_kv_nz": True
},
"ascend_compilation_config": {
"enable_quantization_fusion": False,
},
"multistream_overlap_shared_expert": True,
"expert_map_path": "test_expert_map_path",
"refresh": True,
@@ -87,6 +93,8 @@ class TestAscendConfig(TestBase):
self.assertTrue(torchair_graph_config.enable_view_optimize)
self.assertTrue(torchair_graph_config.enable_frozen_parameter)
self.assertTrue(torchair_graph_config.enable_kv_nz)
ascend_compilation_config = ascend_config.ascend_compilation_config
self.assertFalse(ascend_compilation_config.enable_quantization_fusion)
@_clean_up_ascend_config
def test_init_ascend_config_with_refresh(self):

View File

@@ -685,9 +685,12 @@ class TestNPUPlatform(TestBase):
importlib.reload(platform)
self.platform.check_and_update_config(VllmConfig)
target_msg = "PIECEWISE compilation enabled on NPU. use_inductor not supported - using only ACL Graph mode"
found = any(target_msg in log for log in cm.output)
self.assertTrue(
"PIECEWISE compilation enabled on NPU. use_inductor not supported - "
"using only ACL Graph mode" in cm.output[0])
found,
f"Expected log message not found. Captured logs: {cm.output}")
self.assertEqual(
VllmConfig.compilation_config.mode,