From 34c2996ab8832f3a475dd908d34864648b846a82 Mon Sep 17 00:00:00 2001 From: Zhu Yi Lin <116337067+GDzhu01@users.noreply.github.com> Date: Mon, 20 Oct 2025 19:53:32 +0800 Subject: [PATCH] [main] v_proj combining transpose and matmul (#3545) ### What this PR does / why we need it? v_proj combining transpose and matmul ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? CI passed with new added/existing test. - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 Signed-off-by: GDzhu1 <809721801@qq.com> --- vllm_ascend/attention/mla_v1.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/vllm_ascend/attention/mla_v1.py b/vllm_ascend/attention/mla_v1.py index 2196858..f88e01a 100644 --- a/vllm_ascend/attention/mla_v1.py +++ b/vllm_ascend/attention/mla_v1.py @@ -559,12 +559,13 @@ class AscendMLAImpl(MLAAttentionImpl): self.speculative_config = vllm_config.speculative_config def _v_up_proj(self, x): - # Convert from (B, N, L) to (N, B, L) - x = x.view(-1, self.num_heads, self.kv_lora_rank).transpose(0, 1) - # Multiply (N, B, L) x (N, L, V) -> (N, B, V) - x = torch.bmm(x, self.W_UV) - # Convert from (N, B, V) to (B, N * V) - x = x.transpose(0, 1).reshape(-1, self.num_heads * self.v_head_dim) + x = x.view(-1, self.num_heads, self.kv_lora_rank) + x = torch_npu.npu_transpose_batchmatmul(x, + self.W_UV, + perm_x1=[1, 0, 2], + perm_x2=[0, 1, 2], + perm_y=[1, 0, 2]) + x = x.reshape(-1, self.num_heads * self.v_head_dim) return x # Return `ql_nope`, `q_pe`