[310P]: refactoring for 310p kvcache and some ops class (#6117)

### What this PR does / why we need it?
* Refactor the LayerNorm and activation operator classes to decouple the
310P device implementation from the main branch.
* Refactor `mm_encoder_attention` on 310P to use the
`torch_npu._npu_flash_attention_unpad` operator.
* Refactor the QKV inputs in the prefill stage of `attention_v1` on 310P
so they are no longer padded to 16× alignment.
* Refactor `model_runner` on 310P to align the KV-cache initialization
logic with the mainline implementation.

### Does this PR introduce _any_ user-facing change?
NO

### How was this patch tested?
use the e2e tests.

- vLLM version: v0.13.0
- vLLM main:
d68209402d

---------

Signed-off-by: Tflowers-0129 <2906339855@qq.com>
This commit is contained in:
Shaoxu Cheng
2026-01-24 20:34:29 +08:00
committed by GitHub
parent 5b746f3e83
commit fbae41697e
12 changed files with 289 additions and 203 deletions

View File

@@ -5,8 +5,9 @@ import torch
from vllm.config import set_current_vllm_config
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm_ascend.utils import AscendDeviceType
from vllm_ascend.utils import enable_custom_op
from vllm_ascend.utils import AscendDeviceType, enable_custom_op
from vllm_ascend.utils import is_310p as is_310p_hw
enable_custom_op()
@@ -22,12 +23,12 @@ def mock_rms_norm(x, weight, eps):
def mock_add_rms_norm(x, residual, weight, eps):
return 2 * x, None, 2 * residual
def mock_add_rms_norm_bias(x, residual, weight, bias, eps):
if bias is None:
return 2 * x, None, 2 * residual
else:
return 2 * x + bias, None, 2 * residual
@pytest.fixture(autouse=True)
@@ -39,18 +40,22 @@ def default_vllm_config():
yield mock_config
@pytest.mark.skipif(is_310p_hw(), reason="310P operator classes have already been refactored.")
@pytest.mark.parametrize("is_310p", [True, False])
@pytest.mark.parametrize("residual",
[None, torch.randn(4, 8, dtype=torch.float32)])
@pytest.mark.parametrize("residual", [None, torch.randn(4, 8, dtype=torch.float32)])
@patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
@patch("torch_npu.npu_add_rms_norm", side_effect=mock_add_rms_norm)
@patch("torch.ops._C_ascend.npu_add_rms_norm_bias", side_effect=mock_add_rms_norm_bias)
def test_RMSNorm_forward(mock_add_rms_norm_bias, mock_add_rmsnorm, mock_rmsnorm, is_310p, residual,
dummy_tensor, default_vllm_config):
def test_RMSNorm_forward(
mock_add_rms_norm_bias, mock_add_rmsnorm, mock_rmsnorm, is_310p, residual, dummy_tensor, default_vllm_config
):
if is_310p and (not is_310p_hw()):
pytest.skip("Pseudo-310P branch is invalid on non-310P CI after refactor.")
with patch("vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P
if is_310p else AscendDeviceType.A3):
with patch(
"vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P if is_310p else AscendDeviceType.A3,
):
layer = RMSNorm(hidden_size=8, eps=1e-05)
if residual is not None:
out_x, out_residual = layer.forward_oot(dummy_tensor, residual)