[310P]: refactoring for 310p kvcache and some ops class (#6117)
### What this PR does / why we need it?
* Refactor the LayerNorm and activation operator classes to decouple the
310P device implementation from the main branch.
* Refactor `mm_encoder_attention` on 310P to use the
`torch_npu._npu_flash_attention_unpad` operator.
* Refactor the QKV inputs in the prefill stage of `attention_v1` on 310P
so they are no longer padded to 16× alignment.
* Refactor `model_runner` on 310P to align the KV-cache initialization
logic with the mainline implementation.
### Does this PR introduce _any_ user-facing change?
NO
### How was this patch tested?
use the e2e tests.
- vLLM version: v0.13.0
- vLLM main:
d68209402d
---------
Signed-off-by: Tflowers-0129 <2906339855@qq.com>
This commit is contained in:
@@ -21,6 +21,7 @@ from vllm.config import set_current_vllm_config
|
||||
from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul
|
||||
|
||||
from vllm_ascend.utils import AscendDeviceType
|
||||
from vllm_ascend.utils import is_310p as is_310p_hw
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -51,18 +52,26 @@ def test_QuickGELU_forward(mock_gelu, dummy_tensor, default_vllm_config):
|
||||
mock_gelu.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.skipif(is_310p_hw(), reason="310P operator classes have already been refactored.")
|
||||
@pytest.mark.parametrize("is_310p", [True, False])
|
||||
@patch("torch_npu.npu_swiglu", side_effect=lambda x: x + 1)
|
||||
@patch("torch.ops.vllm.maybe_wait_prefetch_done", side_effect=lambda x: None)
|
||||
@patch("torch.ops.vllm.maybe_prefetch_mlp_down_proj",
|
||||
side_effect=lambda x: None)
|
||||
def test_SiluAndMul_forward(mock_maybe_prefetch_mlp_down_proj,
|
||||
mock_maybe_wait_prefetch_done, mock_swiglu,
|
||||
is_310p, dummy_tensor, default_vllm_config):
|
||||
@patch("torch.ops.vllm.maybe_prefetch_mlp_down_proj", side_effect=lambda x: None)
|
||||
def test_SiluAndMul_forward(
|
||||
mock_maybe_prefetch_mlp_down_proj,
|
||||
mock_maybe_wait_prefetch_done,
|
||||
mock_swiglu,
|
||||
is_310p,
|
||||
dummy_tensor,
|
||||
default_vllm_config,
|
||||
):
|
||||
if is_310p and (not is_310p_hw()):
|
||||
pytest.skip("Pseudo-310P param case is not valid on non-310P CI after refactor.")
|
||||
|
||||
with patch("vllm_ascend.utils.get_ascend_device_type",
|
||||
return_value=AscendDeviceType._310P
|
||||
if is_310p else AscendDeviceType.A3):
|
||||
with patch(
|
||||
"vllm_ascend.utils.get_ascend_device_type",
|
||||
return_value=AscendDeviceType._310P if is_310p else AscendDeviceType.A3,
|
||||
):
|
||||
layer = SiluAndMul()
|
||||
out = layer.forward(dummy_tensor)
|
||||
|
||||
@@ -81,9 +90,7 @@ def test_SiluAndMul_forward(mock_maybe_prefetch_mlp_down_proj,
|
||||
mock_maybe_wait_prefetch_done.assert_called_once()
|
||||
|
||||
actual_arg = mock_swiglu.call_args[0][0]
|
||||
assert torch.allclose(
|
||||
actual_arg,
|
||||
expected_arg), "npu_swiglu called with unexpected input"
|
||||
assert torch.allclose(actual_arg, expected_arg), "npu_swiglu called with unexpected input"
|
||||
|
||||
expected_out = dummy_tensor + 1
|
||||
assert torch.allclose(out, expected_out)
|
||||
|
||||
Reference in New Issue
Block a user