[UT]: refactoring 310p ops ut (#6296)
### What this PR does / why we need it?
Refactor swiglu and rms_norm unittest case for 310P and 910B.
Apply attention_v1 get_kv_cache_shape and build metadata on all of
platforms
### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
CI UT test
- vLLM version: v0.14.1
- vLLM main:
dc917cceb8
---------
Signed-off-by: pu-zhe <zpuaa@outlook.com>
This commit is contained in:
@@ -40,43 +40,46 @@ def default_vllm_config():
|
||||
yield mock_config
|
||||
|
||||
|
||||
@pytest.mark.skipif(is_310p_hw(), reason="310P operator classes have already been refactored.")
|
||||
@pytest.mark.parametrize("is_310p", [True, False])
|
||||
@pytest.mark.skipif(is_310p_hw(), reason="non_310P device unittest case.")
|
||||
@pytest.mark.parametrize("residual", [None, torch.randn(4, 8, dtype=torch.float32)])
|
||||
@patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
|
||||
@patch("torch_npu.npu_add_rms_norm", side_effect=mock_add_rms_norm)
|
||||
@patch("torch.ops._C_ascend.npu_add_rms_norm_bias", side_effect=mock_add_rms_norm_bias)
|
||||
def test_RMSNorm_forward(
|
||||
mock_add_rms_norm_bias, mock_add_rmsnorm, mock_rmsnorm, is_310p, residual, dummy_tensor, default_vllm_config
|
||||
mock_add_rms_norm_bias, mock_add_rmsnorm, mock_rmsnorm, residual, dummy_tensor, default_vllm_config
|
||||
):
|
||||
if is_310p and (not is_310p_hw()):
|
||||
pytest.skip("Pseudo-310P branch is invalid on non-310P CI after refactor.")
|
||||
layer = RMSNorm(hidden_size=8, eps=1e-05)
|
||||
if residual is not None:
|
||||
out_x, out_residual = layer.forward_oot(dummy_tensor, residual)
|
||||
expected_out_x = 2 * dummy_tensor
|
||||
expected_out_residual = 2 * residual
|
||||
mock_add_rms_norm_bias.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
assert torch.allclose(out_residual, expected_out_residual)
|
||||
else:
|
||||
out_x = layer.forward_oot(dummy_tensor, residual)
|
||||
expected_out_x = dummy_tensor + 1
|
||||
|
||||
with patch(
|
||||
"vllm_ascend.utils.get_ascend_device_type",
|
||||
return_value=AscendDeviceType._310P if is_310p else AscendDeviceType.A3,
|
||||
):
|
||||
layer = RMSNorm(hidden_size=8, eps=1e-05)
|
||||
if residual is not None:
|
||||
out_x, out_residual = layer.forward_oot(dummy_tensor, residual)
|
||||
mock_rmsnorm.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
|
||||
if is_310p:
|
||||
expected_arg_x = dummy_tensor + residual.to(dummy_tensor.dtype)
|
||||
expected_out_x = expected_arg_x + 1
|
||||
expected_out_residual = expected_arg_x.to(residual.dtype)
|
||||
|
||||
mock_rmsnorm.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
assert torch.allclose(out_residual, expected_out_residual)
|
||||
else:
|
||||
expected_out_x = 2 * dummy_tensor
|
||||
expected_out_residual = 2 * residual
|
||||
mock_add_rms_norm_bias.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
assert torch.allclose(out_residual, expected_out_residual)
|
||||
else:
|
||||
out_x = layer.forward_oot(dummy_tensor, residual)
|
||||
expected_out_x = dummy_tensor + 1
|
||||
|
||||
mock_rmsnorm.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
@pytest.mark.skipif(not is_310p_hw(), reason="310P device unittest case.")
|
||||
@pytest.mark.parametrize("residual", [None, torch.randn(4, 8, dtype=torch.float16)])
|
||||
@patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
|
||||
def test_RMSNorm_forward_310p(
|
||||
mock_rmsnorm, residual, dummy_tensor, default_vllm_config
|
||||
):
|
||||
layer = RMSNorm(hidden_size=8, eps=1e-05)
|
||||
if residual is not None:
|
||||
out_x, out_residual = layer.forward_oot(dummy_tensor, residual)
|
||||
expected_out_residual = dummy_tensor + residual
|
||||
expected_out_x = expected_out_residual + 1
|
||||
mock_rmsnorm.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
assert torch.allclose(out_residual, expected_out_residual)
|
||||
else:
|
||||
out_x = layer.forward_oot(dummy_tensor, residual)
|
||||
expected_out_x = dummy_tensor + 1
|
||||
mock_rmsnorm.assert_called_once()
|
||||
assert torch.allclose(out_x, expected_out_x)
|
||||
Reference in New Issue
Block a user