# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is a part of the vllm-ascend project. # from unittest.mock import MagicMock, patch import pytest import torch from vllm.config import set_current_vllm_config from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul from vllm_ascend.utils import AscendDeviceType from vllm_ascend.utils import is_310p as is_310p_hw @pytest.fixture def dummy_tensor(): return torch.randn(4, 8, dtype=torch.float16) @pytest.fixture def default_vllm_config(): mock_config = MagicMock() mock_config.compilation_config.dispatch_forward_backend = "eager" mock_config.compilation_config.custom_ops = ["all"] with set_current_vllm_config(mock_config): yield mock_config @patch("torch_npu.npu_fast_gelu", side_effect=lambda x: x + 1) def test_QuickGELU_forward(mock_gelu, dummy_tensor, default_vllm_config): layer = QuickGELU() out = layer.forward(dummy_tensor) expected_out = dummy_tensor + 1 assert torch.allclose(out, expected_out) mock_gelu.assert_called_once() @pytest.mark.skipif(is_310p_hw(), reason="non_310P device unittest case.") @patch("vllm_ascend.ops.activation.get_weight_prefetch_method", return_value=MagicMock()) @patch("torch_npu.npu_swiglu", side_effect=lambda x: x + 1) def test_SiluAndMul_forward( mock_swiglu, mock_get_weight_prefetch_method, dummy_tensor, default_vllm_config, ): layer = SiluAndMul() out = layer.forward(dummy_tensor) expected_arg = dummy_tensor # assert mock_swiglu.call_count == 1 mock_swiglu.assert_called_once() actual_arg = mock_swiglu.call_args[0][0] assert torch.allclose(actual_arg, expected_arg), "npu_swiglu called with unexpected input" expected_out = dummy_tensor + 1 assert torch.allclose(out, expected_out) @pytest.mark.skipif(not is_310p_hw(), reason="310P device unittest case.") @patch("torch.nn.functional.silu", side_effect=lambda x: x + 1) def test_SiluAndMul_forward_310p( mock_silu, dummy_tensor, default_vllm_config, ): layer = SiluAndMul() out = layer.forward(dummy_tensor) h = dummy_tensor.shape[-1] // 2 expected_arg = dummy_tensor[..., :h] # assert mock_silu.call_count == 1 mock_silu.assert_called_once() actual_arg = mock_silu.call_args[0][0] assert torch.allclose(actual_arg, expected_arg), "swiglu called with unexpected input" expected_out = (dummy_tensor[..., :h] + 1) * dummy_tensor[..., h:] assert torch.allclose(out, expected_out)