[UT]: refactoring 310p ops ut (#6296)

### What this PR does / why we need it?
Refactor swiglu and rms_norm unittest case for 310P and 910B.
Apply attention_v1 get_kv_cache_shape and build metadata on all of
platforms

### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
CI UT test
- vLLM version: v0.14.1
- vLLM main:
dc917cceb8

---------

Signed-off-by: pu-zhe <zpuaa@outlook.com>
This commit is contained in:
pu-zhe
2026-01-27 16:31:51 +08:00
committed by GitHub
parent 57fd6e4bd9
commit 21b6779a33
3 changed files with 80 additions and 62 deletions

View File

@@ -52,8 +52,7 @@ def test_QuickGELU_forward(mock_gelu, dummy_tensor, default_vllm_config):
mock_gelu.assert_called_once()
@pytest.mark.skipif(is_310p_hw(), reason="310P operator classes have already been refactored.")
@pytest.mark.parametrize("is_310p", [True, False])
@pytest.mark.skipif(is_310p_hw(), reason="non_310P device unittest case.")
@patch("torch_npu.npu_swiglu", side_effect=lambda x: x + 1)
@patch("torch.ops.vllm.maybe_wait_prefetch_done", side_effect=lambda x: None)
@patch("torch.ops.vllm.maybe_prefetch_mlp_down_proj", side_effect=lambda x: None)
@@ -61,36 +60,56 @@ def test_SiluAndMul_forward(
mock_maybe_prefetch_mlp_down_proj,
mock_maybe_wait_prefetch_done,
mock_swiglu,
is_310p,
dummy_tensor,
default_vllm_config,
):
if is_310p and (not is_310p_hw()):
pytest.skip("Pseudo-310P param case is not valid on non-310P CI after refactor.")
layer = SiluAndMul()
out = layer.forward(dummy_tensor)
expected_arg = dummy_tensor
with patch(
"vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P if is_310p else AscendDeviceType.A3,
):
layer = SiluAndMul()
out = layer.forward(dummy_tensor)
# assert mock_maybe_prefetch_mlp_down_proj.call_count == 1
mock_maybe_prefetch_mlp_down_proj.assert_called_once()
if is_310p:
expected_arg = dummy_tensor.to(torch.float32)
else:
expected_arg = dummy_tensor
# assert mock_swiglu.call_count == 1
mock_swiglu.assert_called_once()
# assert mock_maybe_prefetch_mlp_down_proj.call_count == 1
mock_maybe_prefetch_mlp_down_proj.assert_called_once()
# assert mock_maybe_wait_prefetch_done.call_count == 1
mock_maybe_wait_prefetch_done.assert_called_once()
# assert mock_swiglu.call_count == 1
mock_swiglu.assert_called_once()
actual_arg = mock_swiglu.call_args[0][0]
assert torch.allclose(actual_arg, expected_arg), "npu_swiglu called with unexpected input"
# assert mock_maybe_wait_prefetch_done.call_count == 1
mock_maybe_wait_prefetch_done.assert_called_once()
expected_out = dummy_tensor + 1
assert torch.allclose(out, expected_out)
actual_arg = mock_swiglu.call_args[0][0]
assert torch.allclose(actual_arg, expected_arg), "npu_swiglu called with unexpected input"
expected_out = dummy_tensor + 1
assert torch.allclose(out, expected_out)
@pytest.mark.skipif(not is_310p_hw(), reason="310P device unittest case.")
@patch("torch.nn.functional.silu", side_effect=lambda x: x + 1)
@patch("torch.ops.vllm.maybe_wait_prefetch_done", side_effect=lambda x: None)
@patch("torch.ops.vllm.maybe_prefetch_mlp_down_proj", side_effect=lambda x: None)
def test_SiluAndMul_forward_310p(
mock_maybe_prefetch_mlp_down_proj,
mock_maybe_wait_prefetch_done,
mock_silu,
dummy_tensor,
default_vllm_config,
):
layer = SiluAndMul()
out = layer.forward(dummy_tensor)
h = dummy_tensor.shape[-1] // 2
expected_arg = dummy_tensor[..., :h]
# assert mock_maybe_prefetch_mlp_down_proj.call_count == 1
mock_maybe_prefetch_mlp_down_proj.assert_called_once()
# assert mock_silu.call_count == 1
mock_silu.assert_called_once()
# assert mock_maybe_wait_prefetch_done.call_count == 1
mock_maybe_wait_prefetch_done.assert_called_once()
actual_arg = mock_silu.call_args[0][0]
assert torch.allclose(actual_arg, expected_arg), "swiglu called with unexpected input"
expected_out = (dummy_tensor[..., :h] + 1) * dummy_tensor[..., h:]
assert torch.allclose(out, expected_out)