### What this PR does / why we need it?
* Refactor the LayerNorm and activation operator classes to decouple the
310P device implementation from the main branch.
* Refactor `mm_encoder_attention` on 310P to use the
`torch_npu._npu_flash_attention_unpad` operator.
* Refactor the QKV inputs in the prefill stage of `attention_v1` on 310P
so they are no longer padded to 16× alignment.
* Refactor `model_runner` on 310P to align the KV-cache initialization
logic with the mainline implementation.
### Does this PR introduce _any_ user-facing change?
NO
### How was this patch tested?
use the e2e tests.
- vLLM version: v0.13.0
- vLLM main:
d68209402d
---------
Signed-off-by: Tflowers-0129 <2906339855@qq.com>
45 lines
1.4 KiB
Python
45 lines
1.4 KiB
Python
import torch
|
|
import torch_npu
|
|
|
|
from vllm_ascend.ops.layernorm import AscendGemmaRMSNorm, AscendRMSNorm
|
|
|
|
|
|
class AscendRMSNorm310(AscendRMSNorm):
|
|
def forward_oot(
|
|
self,
|
|
x: torch.Tensor,
|
|
residual: torch.Tensor | None = None,
|
|
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
|
if residual is not None:
|
|
orig_dtype = residual.dtype
|
|
if x is None or x.numel() == 0 or x.shape[-1] == 0:
|
|
x = residual.to(dtype=residual.dtype)
|
|
else:
|
|
x = x + residual.to(x.dtype)
|
|
|
|
residual = x.to(orig_dtype)
|
|
x, _ = torch_npu.npu_rms_norm(x, self.weight, self.variance_epsilon)
|
|
return x, residual
|
|
|
|
x, residual = torch_npu.npu_rms_norm(x, self.weight, self.variance_epsilon)
|
|
if self.bias is not None:
|
|
x.add_(self.bias)
|
|
return x
|
|
|
|
|
|
class AscendGemmaRMSNorm310(AscendGemmaRMSNorm):
|
|
def forward_oot(
|
|
self,
|
|
x: torch.Tensor,
|
|
residual: torch.Tensor | None = None,
|
|
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
|
if residual is not None:
|
|
orig_dtype = residual.dtype
|
|
x = x + residual.to(x.dtype)
|
|
residual = x.to(orig_dtype)
|
|
x, _ = torch_npu.npu_rms_norm(x, 1.0 + self.weight, self.variance_epsilon)
|
|
return x, residual
|
|
|
|
x, _ = torch_npu.npu_rms_norm(x, 1.0 + self.weight, self.variance_epsilon)
|
|
return x
|