[Feat.][310P] addrmsnorm for 300I DUO (#6704)
### What this PR does / why we need it? This PR integrates the `npu_add_rms_norm` fused kernel for RMSNorm operations with residual connections on 310P devices. This change optimizes the computation by replacing a two-step process (manual residual addition followed by RMSNorm) with a single, more efficient fused operation. This is needed to improve the performance of models utilizing RMSNorm with residual connections on the 310P architecture. Fixes # ### Does this PR introduce _any_ user-facing change? No, this PR introduces an internal optimization and does not change any user-facing APIs or behaviors. ### How was this patch tested? This patch was tested with updated unit tests (`test_RMSNorm_forward_310p`) that mock the `npu_add_rms_norm` operation to verify the correctness of the fused kernel integration. --------- Signed-off-by: Tflowers-0129 <2906339855@qq.com>
This commit is contained in:
@@ -5,7 +5,7 @@ import torch
|
||||
from vllm.config import set_current_vllm_config
|
||||
from vllm.model_executor.layers.layernorm import RMSNorm
|
||||
|
||||
from vllm_ascend.utils import AscendDeviceType, enable_custom_op
|
||||
from vllm_ascend.utils import enable_custom_op
|
||||
from vllm_ascend.utils import is_310p as is_310p_hw
|
||||
|
||||
enable_custom_op()
|
||||
@@ -39,8 +39,8 @@ def default_vllm_config():
|
||||
with set_current_vllm_config(mock_config):
|
||||
yield mock_config
|
||||
|
||||
@pytest.mark.skip(
|
||||
"Skip as register_kernels has NPU SocName checking in CANN 8.5.0.")
|
||||
|
||||
@pytest.mark.skip("Skip as register_kernels has NPU SocName checking in CANN 8.5.0.")
|
||||
@pytest.mark.skipif(is_310p_hw(), reason="non_310P device unittest case.")
|
||||
@pytest.mark.parametrize("residual", [None, torch.randn(4, 8, dtype=torch.float32)])
|
||||
@patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
|
||||
@@ -68,19 +68,18 @@ def test_RMSNorm_forward(
|
||||
@pytest.mark.skipif(not is_310p_hw(), reason="310P device unittest case.")
|
||||
@pytest.mark.parametrize("residual", [None, torch.randn(4, 8, dtype=torch.float16)])
|
||||
@patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
|
||||
def test_RMSNorm_forward_310p(
|
||||
mock_rmsnorm, residual, dummy_tensor, default_vllm_config
|
||||
):
|
||||
@patch("torch_npu.npu_add_rms_norm", side_effect=mock_add_rms_norm)
|
||||
def test_RMSNorm_forward_310p(mock_add_rmsnorm, mock_rmsnorm, residual, dummy_tensor, default_vllm_config):
|
||||
layer = RMSNorm(hidden_size=8, eps=1e-05)
|
||||
if residual is not None:
|
||||
out_x, out_residual = layer.forward_oot(dummy_tensor, residual)
|
||||
expected_out_residual = dummy_tensor + residual
|
||||
expected_out_x = expected_out_residual + 1
|
||||
mock_rmsnorm.assert_called_once()
|
||||
expected_out_x = 2 * dummy_tensor
|
||||
expected_out_residual = 2 * residual
|
||||
mock_add_rmsnorm.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
assert torch.allclose(out_residual, expected_out_residual)
|
||||
else:
|
||||
out_x = layer.forward_oot(dummy_tensor, residual)
|
||||
expected_out_x = dummy_tensor + 1
|
||||
mock_rmsnorm.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
|
||||
Reference in New Issue
Block a user