### What this PR does / why we need it?
Currently, there are two paths to judge the chip type in code,
`get_ascend_soc_version` use `get_soc_version` api in torch_npu, and
`is_310p` `use _build_info.__soc_version__`, which generate when
install. We need to unify the two paths.
We need to unify these codes based on the following points:
1. We need to ensure consistency in chip type judgment between compiling
and running states;
2. In compiling state, we need chip type to complete op's compilation,
but in running state, we only need device
type(910B/910_93/310P/910_95/etc) to make code branch judgement;
3. In compiling state, torch_npu may not have been installed yet, so we
can't use torch_npu's api.
Based on the above points, we have made the following changes:
1. When user set env `SOC_VERSION`, use it; when not set, query
soc_version by `npu-smi`;
2. generate device_type based on soc_version when compiling, and write
`__device_type__` instead of `__soc_version__` in `_build_info.py`;
3. In running state, use `__device_type__` to judge code branch.
### Does this PR introduce _any_ user-facing change?
When not set env `SOC_VERSION`, it will not be `ASCEND910B1` by default,
we will query soc_version by `npu-smi`. And env `SOC_VERSION` must be in
the list `soc_to_device` in `setup.py`.
- vLLM version: v0.11.0
- vLLM main:
2918c1b49c
Signed-off-by: zzzzwwjj <1183291235@qq.com>
77 lines
2.5 KiB
Python
77 lines
2.5 KiB
Python
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
|
|
from unittest.mock import patch
|
|
|
|
import pytest
|
|
import torch
|
|
from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul
|
|
|
|
from vllm_ascend.utils import AscendDeviceType
|
|
|
|
|
|
@pytest.fixture
|
|
def dummy_tensor():
|
|
return torch.randn(4, 8, dtype=torch.float16)
|
|
|
|
|
|
@patch("torch_npu.npu_fast_gelu", side_effect=lambda x: x + 1)
|
|
def test_QuickGELU_forward(mock_gelu, dummy_tensor):
|
|
layer = QuickGELU()
|
|
out = layer.forward(dummy_tensor)
|
|
|
|
expected_out = dummy_tensor + 1
|
|
assert torch.allclose(out, expected_out)
|
|
|
|
mock_gelu.assert_called_once()
|
|
|
|
|
|
@pytest.mark.parametrize("is_310p", [True, False])
|
|
@patch("torch_npu.npu_swiglu", side_effect=lambda x: x + 1)
|
|
@patch("torch.ops.vllm.maybe_wait_prefetch_done", side_effect=lambda x: None)
|
|
@patch("torch.ops.vllm.maybe_prefetch_mlp_down_proj",
|
|
side_effect=lambda x: None)
|
|
def test_SiluAndMul_forward(mock_maybe_prefetch_mlp_down_proj,
|
|
mock_maybe_wait_prefetch_done, mock_swiglu,
|
|
is_310p, dummy_tensor):
|
|
|
|
with patch("vllm_ascend.utils.get_ascend_device_type",
|
|
return_value=AscendDeviceType._310P
|
|
if is_310p else AscendDeviceType._910_93):
|
|
layer = SiluAndMul()
|
|
out = layer.forward(dummy_tensor)
|
|
|
|
if is_310p:
|
|
expected_arg = dummy_tensor.to(torch.float32)
|
|
else:
|
|
expected_arg = dummy_tensor
|
|
|
|
# assert mock_maybe_prefetch_mlp_down_proj.call_count == 1
|
|
mock_maybe_prefetch_mlp_down_proj.assert_called_once()
|
|
|
|
# assert mock_swiglu.call_count == 1
|
|
mock_swiglu.assert_called_once()
|
|
|
|
# assert mock_maybe_wait_prefetch_done.call_count == 1
|
|
mock_maybe_wait_prefetch_done.assert_called_once()
|
|
|
|
actual_arg = mock_swiglu.call_args[0][0]
|
|
assert torch.allclose(
|
|
actual_arg,
|
|
expected_arg), "npu_swiglu called with unexpected input"
|
|
|
|
expected_out = dummy_tensor + 1
|
|
assert torch.allclose(out, expected_out)
|