[310p]: add rmsnorm gated fallback and unit test (#7424)

### What this PR does / why we need it?
RFC #7394
310P cannot use the fused `rmsnormgated` operator and must fall back to
the native implementation.

### Does this PR introduce _any_ user-facing change?
NO
### How was this patch tested?
ut
- vLLM version: v0.17.0
- vLLM main:
4497431df6

---------

Signed-off-by: Tflowers-0129 <2906339855@qq.com>
This commit is contained in:
Shaoxu Cheng
2026-03-24 09:00:11 +08:00
committed by GitHub
parent 1de805ce0a
commit 83bd77c983
3 changed files with 59 additions and 1 deletions

View File

@@ -0,0 +1,41 @@
from unittest.mock import MagicMock, patch
import pytest
import torch
from vllm.config import set_current_vllm_config
from vllm.model_executor.layers.layernorm import RMSNormGated
from vllm_ascend._310p.ops.layernorm import AscendRMSNormGated310
@pytest.fixture(autouse=True)
def default_vllm_config():
mock_config = MagicMock()
mock_config.compilation_config.custom_ops = ["all"]
with set_current_vllm_config(mock_config):
yield mock_config
def test_rmsnorm_gated_310_forward_oot_uses_forward_native():
layer = AscendRMSNormGated310(hidden_size=8, eps=1e-5)
x = torch.randn(2, 8, dtype=torch.float32)
z = torch.randn(2, 8, dtype=torch.float32)
expected = torch.randn(2, 8, dtype=torch.float32)
with patch.object(RMSNormGated, "forward_native", autospec=True, return_value=expected) as mock_forward_native:
out = layer.forward_oot(x, z)
mock_forward_native.assert_called_once_with(layer, x, z)
assert out is expected
def test_rmsnorm_gated_310_forward_oot_uses_forward_native_without_gate():
layer = AscendRMSNormGated310(hidden_size=8, eps=1e-5)
x = torch.randn(2, 8, dtype=torch.float32)
expected = torch.randn(2, 8, dtype=torch.float32)
with patch.object(RMSNormGated, "forward_native", autospec=True, return_value=expected) as mock_forward_native:
out = layer.forward_oot(x, None)
mock_forward_native.assert_called_once_with(layer, x, None)
assert out is expected

View File

@@ -1,5 +1,6 @@
import torch import torch
import torch_npu import torch_npu
from vllm.model_executor.layers.layernorm import RMSNormGated
from vllm_ascend.ops.layernorm import AscendGemmaRMSNorm, AscendRMSNorm from vllm_ascend.ops.layernorm import AscendGemmaRMSNorm, AscendRMSNorm
@@ -37,3 +38,14 @@ class AscendGemmaRMSNorm310(AscendGemmaRMSNorm):
x, _ = torch_npu.npu_rms_norm(x, 1.0 + self.weight, self.variance_epsilon) x, _ = torch_npu.npu_rms_norm(x, 1.0 + self.weight, self.variance_epsilon)
return x return x
class AscendRMSNormGated310(RMSNormGated):
def forward_oot(
self,
x: torch.Tensor,
z: torch.Tensor | None = None,
) -> torch.Tensor:
# 310P should not depend on the Triton-gated layernorm path.
# Reuse the upstream native implementation directly.
return super().forward_native(x, z)

View File

@@ -661,7 +661,11 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None):
if is_310p(): if is_310p():
from vllm_ascend._310p.fused_moe.fused_moe import AscendFusedMoE310, AscendSharedFusedMoE310 from vllm_ascend._310p.fused_moe.fused_moe import AscendFusedMoE310, AscendSharedFusedMoE310
from vllm_ascend._310p.ops.activation import AscendSiluAndMul310 from vllm_ascend._310p.ops.activation import AscendSiluAndMul310
from vllm_ascend._310p.ops.layernorm import AscendGemmaRMSNorm310, AscendRMSNorm310 from vllm_ascend._310p.ops.layernorm import (
AscendGemmaRMSNorm310,
AscendRMSNorm310,
AscendRMSNormGated310,
)
from vllm_ascend._310p.ops.mm_encoder_attention import AscendMMEncoderAttention310 from vllm_ascend._310p.ops.mm_encoder_attention import AscendMMEncoderAttention310
from vllm_ascend._310p.ops.rotary_embedding import AscendRotaryEmbedding310 from vllm_ascend._310p.ops.rotary_embedding import AscendRotaryEmbedding310
from vllm_ascend._310p.ops.vocab_parallel_embedding import ( from vllm_ascend._310p.ops.vocab_parallel_embedding import (
@@ -675,6 +679,7 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None):
"RotaryEmbedding": AscendRotaryEmbedding310, "RotaryEmbedding": AscendRotaryEmbedding310,
"RMSNorm": AscendRMSNorm310, "RMSNorm": AscendRMSNorm310,
"GemmaRMSNorm": AscendGemmaRMSNorm310, "GemmaRMSNorm": AscendGemmaRMSNorm310,
"RMSNormGated": AscendRMSNormGated310,
"FusedMoE": AscendFusedMoE310, "FusedMoE": AscendFusedMoE310,
"SharedFusedMoE": AscendSharedFusedMoE310, "SharedFusedMoE": AscendSharedFusedMoE310,
"ParallelLMHead": AscendParallelLMHead310, "ParallelLMHead": AscendParallelLMHead310,