2025-09-16 22:31:38 +08:00
|
|
|
import unittest
|
2025-08-14 17:18:30 +08:00
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
import torch
|
2025-09-16 22:31:38 +08:00
|
|
|
from pytest_mock import MockerFixture
|
2025-08-14 17:18:30 +08:00
|
|
|
from vllm.model_executor.layers.layernorm import RMSNorm
|
|
|
|
|
|
2025-09-16 22:31:38 +08:00
|
|
|
from tests.ut.base import PytestBase
|
|
|
|
|
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
|
2025-08-14 17:18:30 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def mock_rms_norm(x, weight, eps):
|
|
|
|
|
return x + 1, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def mock_add_rms_norm(x, residual, weight, eps):
|
|
|
|
|
return 2 * x, None, 2 * residual
|
|
|
|
|
|
|
|
|
|
|
2025-10-17 09:30:51 +08:00
|
|
|
def mock_add_rms_norm_quant_with_bias(x, residual, weight, quant_scale,
|
|
|
|
|
quant_offset, beta, epsilon):
|
|
|
|
|
x_out = 2 * x
|
|
|
|
|
residual_out = 2 * residual
|
|
|
|
|
x_out_quant = x_out.to(torch.int8)
|
|
|
|
|
residual_out_quant = residual_out.to(torch.int8)
|
|
|
|
|
return x_out_quant, None, residual_out_quant
|
|
|
|
|
|
|
|
|
|
|
2025-09-16 22:31:38 +08:00
|
|
|
class TestAscendRMSNorm(PytestBase):
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
|
def context(self, mocker: MockerFixture):
|
|
|
|
|
mocker.patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
|
|
|
|
|
mocker.patch("torch_npu.npu_add_rms_norm",
|
|
|
|
|
side_effect=mock_add_rms_norm)
|
|
|
|
|
mocker.patch("torch_npu.npu_add_rms_norm_quant",
|
2025-10-31 22:14:26 +08:00
|
|
|
side_effect=mock_add_rms_norm_quant_with_bias)
|
2025-09-16 22:31:38 +08:00
|
|
|
mocker.patch("torch.ops.vllm.maybe_wait_prefetch_done",
|
|
|
|
|
side_effect=lambda x: None)
|
|
|
|
|
|
|
|
|
|
# Test case for the most common and basic scenario
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"residual", [None, torch.randn(4, 8, dtype=torch.float16)])
|
2025-10-21 22:24:30 +08:00
|
|
|
def test_forward_oot_basic(self, residual):
|
2025-09-08 22:52:24 +08:00
|
|
|
layer = RMSNorm(hidden_size=8, eps=1e-05)
|
2025-09-16 22:31:38 +08:00
|
|
|
x = torch.randn(4, 8, dtype=torch.float16)
|
2025-08-14 17:18:30 +08:00
|
|
|
if residual is not None:
|
2025-09-16 22:31:38 +08:00
|
|
|
x_out, residual_out = layer.forward_oot(x, residual)
|
|
|
|
|
|
|
|
|
|
x_out_expected = 2 * x
|
|
|
|
|
residual_out_expected = 2 * residual
|
|
|
|
|
|
|
|
|
|
assert torch.allclose(x_out, x_out_expected)
|
|
|
|
|
assert torch.allclose(residual_out, residual_out_expected)
|
2025-08-14 17:18:30 +08:00
|
|
|
else:
|
2025-09-16 22:31:38 +08:00
|
|
|
x_out = layer.forward(x, residual)
|
|
|
|
|
x_out_expected = x + 1
|
|
|
|
|
|
|
|
|
|
assert torch.allclose(x_out, x_out_expected)
|
|
|
|
|
|
|
|
|
|
# Test case for addrmsnorm + w8a8 quant fusion
|
|
|
|
|
def test_forward_oot_with_quant_fusion(self, mocker: MockerFixture):
|
|
|
|
|
mock_is_310p = mocker.patch("vllm_ascend.utils.is_310p")
|
|
|
|
|
mock_is_310p.return_value = False
|
|
|
|
|
mock_get_forward_context = mocker.patch(
|
|
|
|
|
"vllm_ascend.ops.layernorm.get_forward_context")
|
|
|
|
|
|
|
|
|
|
# Simulating a scenario with quant_fusion enabled
|
|
|
|
|
mock_forward_context = mocker.MagicMock()
|
|
|
|
|
|
|
|
|
|
mock_model_instance = mocker.MagicMock()
|
|
|
|
|
mock_forward_context.model_instance = mock_model_instance
|
2025-10-31 22:14:26 +08:00
|
|
|
num_hidden_layers = 3
|
2025-09-16 22:31:38 +08:00
|
|
|
mock_model_instance.model.layers = [
|
2025-10-17 09:30:51 +08:00
|
|
|
mocker.MagicMock() for _ in range(num_hidden_layers)
|
2025-09-16 22:31:38 +08:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
mock_layer_0 = mock_model_instance.model.layers[0]
|
|
|
|
|
mock_layer_0.self_attn.qkv_proj = mocker.MagicMock()
|
|
|
|
|
mock_layer_0.mlp.gate_up_proj = mocker.MagicMock()
|
|
|
|
|
|
|
|
|
|
mock_layer_1 = mock_model_instance.model.layers[1]
|
|
|
|
|
mock_layer_1.self_attn.qkv_proj = mocker.MagicMock()
|
|
|
|
|
mock_layer_1.mlp.gate_up_proj = mocker.MagicMock()
|
|
|
|
|
|
|
|
|
|
mock_quant_method_0_qkv = mocker.MagicMock()
|
|
|
|
|
mock_quant_method_0_qkv.quant_method = AscendW8A8LinearMethod()
|
|
|
|
|
mock_quant_method_0_gate_up = mocker.MagicMock()
|
|
|
|
|
mock_quant_method_0_gate_up.quant_method = AscendW8A8LinearMethod()
|
|
|
|
|
mock_layer_0.self_attn.qkv_proj.quant_method = mock_quant_method_0_qkv
|
|
|
|
|
mock_layer_0.mlp.gate_up_proj.quant_method = mock_quant_method_0_gate_up
|
|
|
|
|
|
|
|
|
|
mock_quant_method_1_qkv = mocker.MagicMock()
|
|
|
|
|
mock_quant_method_1_qkv.quant_method = AscendW8A8LinearMethod()
|
|
|
|
|
mock_quant_method_1_gate_up = mocker.MagicMock()
|
|
|
|
|
mock_quant_method_1_gate_up.quant_method = AscendW8A8LinearMethod()
|
|
|
|
|
mock_layer_1.self_attn.qkv_proj.quant_method = mock_quant_method_1_qkv
|
|
|
|
|
mock_layer_1.mlp.gate_up_proj.quant_method = mock_quant_method_1_gate_up
|
|
|
|
|
|
|
|
|
|
mock_get_forward_context.return_value = mock_forward_context
|
|
|
|
|
|
|
|
|
|
mock_forward_context.addrmsnorm_quant_fusion_enabled = True
|
|
|
|
|
mock_forward_context.prefetch_mlp_enabled = False
|
|
|
|
|
mock_forward_context.layer_idx = 0
|
2025-10-17 09:30:51 +08:00
|
|
|
mock_forward_context.num_hidden_layers = num_hidden_layers
|
2025-09-16 22:31:38 +08:00
|
|
|
mock_forward_context.fusion_linear = "gate_up_dense"
|
[v0.11.0][Feat] Prefetching Attention QKV Linear Weight With `AddRmsNormQuant` Custom Op (#3649)
### What this PR does / why we need it?
- `qkv_proj.weight` prefetching has been implemented with `Quant` op,
when `AddRmsNormQuant` is enabled (#3465) `qkv_proj.weight` prefetching
won't work
- Implement `qkv_proj.weight` prefetching with `AddRmsNormQuant`, which
has been merged on `main` branch (#3517)
### Does this PR introduce _any_ user-facing change?
None.
### How was this patch tested?
Tested on `Qwen3-235B-A22B-W8A8`
<img width="1868" height="109" alt="image"
src="https://github.com/user-attachments/assets/0bc28082-0287-4d5c-b8f6-f907c3134d36"
/>
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
Signed-off-by: zhoux77899 <zhouxiang100@huawei.com>
2025-10-27 09:42:09 +08:00
|
|
|
mock_forward_context.weight_prefetch_method = None
|
2025-09-16 22:31:38 +08:00
|
|
|
|
|
|
|
|
# Ensure fusion and layer_idx increment are handled correctly
|
|
|
|
|
x = torch.randn(4, 8, dtype=torch.float16)
|
|
|
|
|
residual = torch.randn(4, 8, dtype=torch.float16)
|
|
|
|
|
layer = RMSNorm(hidden_size=8, eps=1e-05)
|
|
|
|
|
|
|
|
|
|
x_out, residual_out = layer.forward_oot(x, residual)
|
|
|
|
|
|
[v0.11.0][Feat] Prefetching Attention QKV Linear Weight With `AddRmsNormQuant` Custom Op (#3649)
### What this PR does / why we need it?
- `qkv_proj.weight` prefetching has been implemented with `Quant` op,
when `AddRmsNormQuant` is enabled (#3465) `qkv_proj.weight` prefetching
won't work
- Implement `qkv_proj.weight` prefetching with `AddRmsNormQuant`, which
has been merged on `main` branch (#3517)
### Does this PR introduce _any_ user-facing change?
None.
### How was this patch tested?
Tested on `Qwen3-235B-A22B-W8A8`
<img width="1868" height="109" alt="image"
src="https://github.com/user-attachments/assets/0bc28082-0287-4d5c-b8f6-f907c3134d36"
/>
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
Signed-off-by: zhoux77899 <zhouxiang100@huawei.com>
2025-10-27 09:42:09 +08:00
|
|
|
assert mock_get_forward_context.call_count == 2
|
2025-09-16 22:31:38 +08:00
|
|
|
assert mock_forward_context.fusion_linear == "qkv_dense"
|
|
|
|
|
assert mock_forward_context.layer_idx == 1
|
|
|
|
|
|
|
|
|
|
x_out, residual_out = layer.forward_oot(x, residual)
|
|
|
|
|
|
[v0.11.0][Feat] Prefetching Attention QKV Linear Weight With `AddRmsNormQuant` Custom Op (#3649)
### What this PR does / why we need it?
- `qkv_proj.weight` prefetching has been implemented with `Quant` op,
when `AddRmsNormQuant` is enabled (#3465) `qkv_proj.weight` prefetching
won't work
- Implement `qkv_proj.weight` prefetching with `AddRmsNormQuant`, which
has been merged on `main` branch (#3517)
### Does this PR introduce _any_ user-facing change?
None.
### How was this patch tested?
Tested on `Qwen3-235B-A22B-W8A8`
<img width="1868" height="109" alt="image"
src="https://github.com/user-attachments/assets/0bc28082-0287-4d5c-b8f6-f907c3134d36"
/>
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
Signed-off-by: zhoux77899 <zhouxiang100@huawei.com>
2025-10-27 09:42:09 +08:00
|
|
|
assert mock_get_forward_context.call_count == 4
|
2025-09-16 22:31:38 +08:00
|
|
|
assert mock_forward_context.fusion_linear == "gate_up_dense"
|
|
|
|
|
assert mock_forward_context.layer_idx == 1
|
|
|
|
|
|
2025-10-31 22:14:26 +08:00
|
|
|
mock_forward_context.fusion_linear = "gate_moe"
|
2025-09-16 22:31:38 +08:00
|
|
|
x_out, residual_out = layer.forward_oot(x, residual)
|
|
|
|
|
|
2025-10-31 22:14:26 +08:00
|
|
|
assert mock_get_forward_context.call_count == 5
|
|
|
|
|
fusion_linear_expected = "qkv_moe"
|
2025-10-17 09:30:51 +08:00
|
|
|
assert mock_forward_context.fusion_linear == fusion_linear_expected
|
2025-09-16 22:31:38 +08:00
|
|
|
assert mock_forward_context.layer_idx == 2
|
|
|
|
|
|
|
|
|
|
x_out, residual_out = layer.forward_oot(x, residual)
|
|
|
|
|
|
2025-10-31 22:14:26 +08:00
|
|
|
assert mock_get_forward_context.call_count == 6
|
|
|
|
|
fusion_linear_expected = "gate_moe"
|
2025-10-17 09:30:51 +08:00
|
|
|
assert mock_forward_context.fusion_linear == fusion_linear_expected
|
2025-09-16 22:31:38 +08:00
|
|
|
assert mock_forward_context.layer_idx == 2
|
|
|
|
|
|
2025-10-17 09:30:51 +08:00
|
|
|
# last layer returned directly
|
|
|
|
|
x_out, residual_out = layer.forward_oot(x, residual)
|
|
|
|
|
|
2025-10-31 22:14:26 +08:00
|
|
|
assert mock_get_forward_context.call_count == 7
|
2025-10-17 09:30:51 +08:00
|
|
|
assert mock_forward_context.fusion_linear == "qkv_moe"
|
|
|
|
|
assert mock_forward_context.layer_idx == 3
|
|
|
|
|
|
|
|
|
|
x_out, residual_out = layer.forward_oot(x, residual)
|
|
|
|
|
|
2025-10-31 22:14:26 +08:00
|
|
|
assert mock_get_forward_context.call_count == 8
|
2025-10-17 09:30:51 +08:00
|
|
|
assert mock_forward_context.fusion_linear == "qkv_moe"
|
|
|
|
|
assert mock_forward_context.layer_idx == 3
|
|
|
|
|
|
2025-09-16 22:31:38 +08:00
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
unittest.main()
|