From a813eadd2d2fb5d4f6179fbed860aaebfe2b3db6 Mon Sep 17 00:00:00 2001 From: Shanshan Shen <467638484@qq.com> Date: Fri, 6 Mar 2026 14:26:37 +0800 Subject: [PATCH] [MM][Perf] Enable 2.7x faster for convolution computation with aclnn BatchMatMulV2 (#7017) ### What this PR does / why we need it? Currently, we are using https://github.com/vllm-project/vllm/blob/e2b31243c092e9f4ade5ffe4bf9a5d5ddae06ca7/vllm/model_executor/layers/conv.py#L219-L232 for convolution computation, which is used in patch embedding for VL models. After profiling, we find that this linear method will take about **6.87 ms**, which is much slower than just using `F.conv3d()`. In `F.conv3d()`, it will call aclnn `BatchMatMulV2` with optimization on Ascend NPU, which only take about **2.50 ms** and is **2.7x faster** than linear method. - vLLM version: v0.16.0 - vLLM main: https://github.com/vllm-project/vllm/commit/15d76f74e2fdb12a95ea00f0ca283acf6219a2b7 --------- Signed-off-by: shen-shanshan <467638484@qq.com> --- vllm_ascend/ops/conv.py | 32 ++++++++++++++++++++++++++++++++ vllm_ascend/utils.py | 3 +++ 2 files changed, 35 insertions(+) create mode 100644 vllm_ascend/ops/conv.py diff --git a/vllm_ascend/ops/conv.py b/vllm_ascend/ops/conv.py new file mode 100644 index 00000000..08fdb1d1 --- /dev/null +++ b/vllm_ascend/ops/conv.py @@ -0,0 +1,32 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# This file is a part of the vllm-ascend project. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import torch +from vllm.model_executor.layers.conv import Conv2dLayer, Conv3dLayer + + +class AscendConv2dLayer(Conv2dLayer): + def forward_oot(self, x: torch.Tensor) -> torch.Tensor: + # Use aclnn BatchMatMulV2 for better performance on Ascend NPU. + return self._forward_conv(x) + + +class AscendConv3dLayer(Conv3dLayer): + def forward_oot(self, x: torch.Tensor) -> torch.Tensor: + # Use aclnn BatchMatMulV2 for better performance on Ascend NPU. + return self._forward_conv(x) diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index da11e35b..1c52627a 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -597,6 +597,7 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None): from vllm.model_executor.custom_op import CustomOp from vllm_ascend.ops.activation import AscendQuickGELU, AscendSiluAndMul + from vllm_ascend.ops.conv import AscendConv2dLayer, AscendConv3dLayer from vllm_ascend.ops.fused_moe.fused_moe import AscendFusedMoE, AscendSharedFusedMoE from vllm_ascend.ops.layernorm import AscendGemmaRMSNorm, AscendRMSNorm, AscendRMSNormGated from vllm_ascend.ops.linear import ( @@ -645,6 +646,8 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None): "MMEncoderAttention": AscendMMEncoderAttention, "ApplyRotaryEmb": AscendApplyRotaryEmb, "RMSNormGated": AscendRMSNormGated, + "Conv2dLayer": AscendConv2dLayer, + "Conv3dLayer": AscendConv3dLayer, } # 310P: override selected ops with 310P implementations (keep minimal changes outside _310p)