From dd7e08c6db5f777933db3426c514d31d41bb20ef Mon Sep 17 00:00:00 2001 From: zyz111222 Date: Mon, 20 Apr 2026 17:20:40 +0800 Subject: [PATCH] [Performance] Use forward_native for Conv3dLayer and add UT (#8375) What this PR does / why we need it? switch Ascend conv3d forward_oot to use forward_native and add ut Does this PR introduce any user-facing change? No How was this patch tested? by CI --------- Signed-off-by: zouyizhou --- tests/ut/_310p/ops/test_conv_310.py | 35 +++++++++++++++++++++++++++++ vllm_ascend/_310p/ops/conv.py | 27 ++++++++++++++++++++++ vllm_ascend/utils.py | 2 ++ 3 files changed, 64 insertions(+) create mode 100644 tests/ut/_310p/ops/test_conv_310.py create mode 100644 vllm_ascend/_310p/ops/conv.py diff --git a/tests/ut/_310p/ops/test_conv_310.py b/tests/ut/_310p/ops/test_conv_310.py new file mode 100644 index 00000000..ed123cd4 --- /dev/null +++ b/tests/ut/_310p/ops/test_conv_310.py @@ -0,0 +1,35 @@ +from unittest.mock import MagicMock, patch + +import pytest +import torch +from vllm.config import set_current_vllm_config +from vllm.model_executor.layers.conv import Conv3dLayer + +from vllm_ascend._310p.ops.conv import AscendConv3dLayer310 + + +@pytest.fixture(autouse=True) +def default_vllm_config(): + mock_config = MagicMock() + mock_config.compilation_config.custom_ops = ["all"] + with set_current_vllm_config(mock_config): + yield mock_config + + +def test_conv3d_310_forward_oot_uses_forward_native(): + layer = AscendConv3dLayer310( + in_channels=2, + out_channels=4, + kernel_size=(2, 2, 2), + stride=(2, 2, 2), + bias=True, + params_dtype=torch.float32, + ) + x = torch.randn(1, 2, 4, 4, 4, dtype=torch.float32) + expected = torch.randn(1, 4, 2, 2, 2, dtype=torch.float32) + + with patch.object(Conv3dLayer, "forward_native", autospec=True, return_value=expected) as mock_forward_native: + out = layer.forward_oot(x) + + mock_forward_native.assert_called_once_with(layer, x) + assert out is expected diff --git a/vllm_ascend/_310p/ops/conv.py b/vllm_ascend/_310p/ops/conv.py new file mode 100644 index 00000000..0572ff2a --- /dev/null +++ b/vllm_ascend/_310p/ops/conv.py @@ -0,0 +1,27 @@ +# +# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved. +# This file is a part of the vllm-ascend project. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch + +from vllm_ascend.ops.conv import AscendConv3dLayer + + +class AscendConv3dLayer310(AscendConv3dLayer): + def forward_oot(self, x: torch.Tensor) -> torch.Tensor: + # 310P should avoid the aclnn BatchMatMulV2 Conv3D path used by + # AscendConv3dLayer and keep vLLM's native Conv3d dispatch behavior. + return super().forward_native(x) diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 2d477240..2f9d9007 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -662,6 +662,7 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None): if is_310p(): from vllm_ascend._310p.fused_moe.fused_moe import AscendFusedMoE310, AscendSharedFusedMoE310 from vllm_ascend._310p.ops.activation import AscendSiluAndMul310 + from vllm_ascend._310p.ops.conv import AscendConv3dLayer310 from vllm_ascend._310p.ops.layernorm import ( AscendGemmaRMSNorm310, AscendRMSNorm310, @@ -686,6 +687,7 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None): "ParallelLMHead": AscendParallelLMHead310, "VocabParallelEmbedding": AscendVocabParallelEmbedding310, "MMEncoderAttention": AscendMMEncoderAttention310, + "Conv3dLayer": AscendConv3dLayer310, } )