[Refactor]refactor 310p ops and add ut (#6591)

### What this PR does / why we need it?
This pull request focuses on a significant refactoring effort within the
vllm-ascend project, specifically targeting operations optimized for the
Ascend 310P hardware. The changes aim to streamline the implementation
of core components like quantization and multi-head attention, making
the codebase more maintainable and robust. Concurrently, new unit tests
have been introduced to ensure the correctness and reliability of these
refactored modules.

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?
E2E test with qwen3-32b w8a8

- vLLM version: v0.15.0
- vLLM main:
d7e17aaacd

---------

Signed-off-by: pu-zhe <zpuaa@outlook.com>
This commit is contained in:
pu-zhe
2026-02-07 09:25:17 +08:00
committed by GitHub
parent 6c49f95da2
commit 23524f2ca4
6 changed files with 173 additions and 28 deletions

View File

@@ -19,16 +19,10 @@ import torch
import torch.nn.functional as F
from vllm_ascend.ops.activation import AscendSiluAndMul
from vllm_ascend.utils import get_weight_prefetch_method
class AscendSiluAndMul310(AscendSiluAndMul):
def forward(self, x: torch.Tensor) -> torch.Tensor:
weight_prefetch_method = get_weight_prefetch_method()
if weight_prefetch_method:
weight_prefetch_method.maybe_prefetch_mlp_weight_preprocess(weight_prefetch_method.MLP_DOWN, x)
h = x.shape[-1] // 2
out = (F.silu(x[..., :h].to(torch.float32)) * x[..., h:].to(torch.float32)).to(torch.float16)
if weight_prefetch_method:
weight_prefetch_method.maybe_prefetch_mlp_weight_postprocess(out)
return out

View File

@@ -15,7 +15,6 @@
# This file is a part of the vllm-ascend project.
#
import einops
import torch
import torch_npu
@@ -37,31 +36,26 @@ class AscendMMEncoderAttention310(AscendMMEncoderAttention):
):
bsz, q_len = query.size()[:2]
kv_len = key.size(1)
q, k, v = self.reshape_qkv_to_3d(query, key, value, bsz, q_len, kv_len)
query = query.view(bsz * q_len, self.num_heads, self.head_size)
key = key.view(bsz * kv_len, self.num_kv_heads, self.head_size)
value = value.view(bsz * kv_len, self.num_kv_heads, self.head_size)
if cu_seqlens is None:
cu_seqlens = torch.arange(
0,
(bsz + 1) * q_len,
step=q_len,
dtype=torch.int32,
device=query.device,
)
seq_len = torch.tensor([q_len] * bsz, device="cpu", dtype=torch.int32)
else:
seq_len = torch.diff(cu_seqlens.to("cpu", dtype=torch.int32))
seq_len = torch.diff(cu_seqlens).to("cpu", dtype=torch.int32)
context_layer = torch.empty_like(q)
output = torch.empty_like(query)
torch_npu._npu_flash_attention_unpad(
query=q,
key=k,
value=v,
query=query,
key=key,
value=value,
seq_len=seq_len,
scale_value=self.head_size**-0.5,
num_heads=self.num_heads,
num_kv_heads=self.num_kv_heads,
out=context_layer,
out=output,
)
context_layer = einops.rearrange(context_layer, "(b s) h d -> b s h d", b=bsz).contiguous()
return context_layer
output = output.view(bsz, -1, self.num_heads, self.head_size)
return output

View File

@@ -26,7 +26,7 @@ from .registry import register_scheme
@register_scheme("W8A8", "linear")
class AscendW8A8LinearMethod310P(AscendLinearScheme):
class AscendW8A8LinearMethod310(AscendLinearScheme):
"""310P-only W8A8 static linear scheme.
Notes:

View File

@@ -46,7 +46,7 @@ from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD
logger = init_logger(__name__)
def create_scheme_for_layer_310p(
def create_scheme_for_layer(
cfg: AscendModelSlimConfig,
quant_description: dict[str, Any],
prefix: str,
@@ -140,7 +140,7 @@ class AscendModelSlimConfig310(AscendModelSlimConfig):
return AscendUnquantizedLinearMethod()
scheme = create_scheme_for_layer_310p(
scheme = create_scheme_for_layer(
cfg=self,
quant_description=self.quant_description,
prefix=prefix,