[310P]: refactoring for 310p kvcache and some ops class (#6117)

### What this PR does / why we need it?
* Refactor the LayerNorm and activation operator classes to decouple the
310P device implementation from the main branch.
* Refactor `mm_encoder_attention` on 310P to use the
`torch_npu._npu_flash_attention_unpad` operator.
* Refactor the QKV inputs in the prefill stage of `attention_v1` on 310P
so they are no longer padded to 16× alignment.
* Refactor `model_runner` on 310P to align the KV-cache initialization
logic with the mainline implementation.

### Does this PR introduce _any_ user-facing change?
NO

### How was this patch tested?
use the e2e tests.

- vLLM version: v0.13.0
- vLLM main:
d68209402d

---------

Signed-off-by: Tflowers-0129 <2906339855@qq.com>
This commit is contained in:
Shaoxu Cheng
2026-01-24 20:34:29 +08:00
committed by GitHub
parent 5b746f3e83
commit fbae41697e
12 changed files with 289 additions and 203 deletions

View File

@@ -33,12 +33,7 @@ class AscendSiluAndMul(SiluAndMul):
def forward_oot(self, x: torch.Tensor) -> torch.Tensor:
import torch_npu
from vllm_ascend.utils import AscendDeviceType, get_ascend_device_type
torch.ops.vllm.maybe_prefetch_mlp_down_proj(x)
if get_ascend_device_type() == AscendDeviceType._310P:
out = torch_npu.npu_swiglu(x.to(torch.float32)).to(torch.float16)
else:
out = torch_npu.npu_swiglu(x)
out = torch_npu.npu_swiglu(x)
torch.ops.vllm.maybe_wait_prefetch_done(out)
return out

View File

@@ -52,15 +52,8 @@ class AscendRMSNorm(RMSNorm):
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
import torch_npu
from vllm_ascend.utils import AscendDeviceType, get_ascend_device_type
if residual is not None:
if get_ascend_device_type() == AscendDeviceType._310P:
orig_dtype = residual.dtype
x = x + residual.to(x.dtype)
residual = x.to(orig_dtype)
x, _ = torch_npu.npu_rms_norm(x, self.weight,
self.variance_epsilon)
elif enable_custom_op():
if enable_custom_op():
x, _, residual = torch.ops._C_ascend.npu_add_rms_norm_bias(
x, residual, self.weight, self.bias, self.variance_epsilon)
else:
@@ -88,13 +81,7 @@ class AscendGemmaRMSNorm(GemmaRMSNorm):
from vllm_ascend.utils import AscendDeviceType, get_ascend_device_type
if residual is not None:
if get_ascend_device_type() == AscendDeviceType._310P:
orig_dtype = residual.dtype
x = x + residual.to(x.dtype)
residual = x.to(orig_dtype)
x, _ = torch_npu.npu_rms_norm(x, 1.0 + self.weight,
self.variance_epsilon)
elif enable_custom_op():
if enable_custom_op():
x, _, residual = torch.ops._C_ascend.npu_add_rms_norm_bias(
x, residual, 1.0 + self.weight, None,
self.variance_epsilon)