### What this PR does / why we need it?
Upgrade vllm commit to 0109 (bde38c11df0ea066a740efe9b77fff5418be45df)
1. remove `init_cached_hf_modules ` due to
https://github.com/vllm-project/vllm/pull/31786
2. fix spec_decode e2e test due to
https://github.com/vllm-project/vllm/pull/29821 break
3. fix `vllm.v1.attention.backends.utils` duo to
https://github.com/vllm-project/vllm/pull/31891
4. fix `self.seq_lens - query_lens` on same device due to
https://github.com/vllm-project/vllm/pull/31773
5. skip model_runner_v2 e2e test due to `'_OpNamespace' '_C' object has
no attribute 'get_cuda_view_from_cpu_tensor'`
- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
90 lines
2.9 KiB
Python
90 lines
2.9 KiB
Python
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
import pytest
|
|
import torch
|
|
from vllm.config import set_current_vllm_config
|
|
from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul
|
|
|
|
from vllm_ascend.utils import AscendDeviceType
|
|
|
|
|
|
@pytest.fixture
|
|
def dummy_tensor():
|
|
return torch.randn(4, 8, dtype=torch.float16)
|
|
|
|
|
|
@pytest.fixture
|
|
def default_vllm_config():
|
|
mock_config = MagicMock()
|
|
|
|
mock_config.compilation_config.dispatch_forward_backend = "eager"
|
|
|
|
mock_config.compilation_config.custom_ops = ["all"]
|
|
|
|
with set_current_vllm_config(mock_config):
|
|
yield mock_config
|
|
|
|
|
|
@patch("torch_npu.npu_fast_gelu", side_effect=lambda x: x + 1)
|
|
def test_QuickGELU_forward(mock_gelu, dummy_tensor, default_vllm_config):
|
|
layer = QuickGELU()
|
|
out = layer.forward(dummy_tensor)
|
|
|
|
expected_out = dummy_tensor + 1
|
|
assert torch.allclose(out, expected_out)
|
|
|
|
mock_gelu.assert_called_once()
|
|
|
|
|
|
@pytest.mark.parametrize("is_310p", [True, False])
|
|
@patch("torch_npu.npu_swiglu", side_effect=lambda x: x + 1)
|
|
@patch("torch.ops.vllm.maybe_wait_prefetch_done", side_effect=lambda x: None)
|
|
@patch("torch.ops.vllm.maybe_prefetch_mlp_down_proj",
|
|
side_effect=lambda x: None)
|
|
def test_SiluAndMul_forward(mock_maybe_prefetch_mlp_down_proj,
|
|
mock_maybe_wait_prefetch_done, mock_swiglu,
|
|
is_310p, dummy_tensor, default_vllm_config):
|
|
|
|
with patch("vllm_ascend.utils.get_ascend_device_type",
|
|
return_value=AscendDeviceType._310P
|
|
if is_310p else AscendDeviceType.A3):
|
|
layer = SiluAndMul()
|
|
out = layer.forward(dummy_tensor)
|
|
|
|
if is_310p:
|
|
expected_arg = dummy_tensor.to(torch.float32)
|
|
else:
|
|
expected_arg = dummy_tensor
|
|
|
|
# assert mock_maybe_prefetch_mlp_down_proj.call_count == 1
|
|
mock_maybe_prefetch_mlp_down_proj.assert_called_once()
|
|
|
|
# assert mock_swiglu.call_count == 1
|
|
mock_swiglu.assert_called_once()
|
|
|
|
# assert mock_maybe_wait_prefetch_done.call_count == 1
|
|
mock_maybe_wait_prefetch_done.assert_called_once()
|
|
|
|
actual_arg = mock_swiglu.call_args[0][0]
|
|
assert torch.allclose(
|
|
actual_arg,
|
|
expected_arg), "npu_swiglu called with unexpected input"
|
|
|
|
expected_out = dummy_tensor + 1
|
|
assert torch.allclose(out, expected_out)
|