[Performance] Use forward_native for Conv3dLayer and add UT (#8375)

What this PR does / why we need it?
switch Ascend conv3d forward_oot to use forward_native and  add ut

Does this PR introduce any user-facing change?
No

How was this patch tested?
by CI

---------

Signed-off-by: zouyizhou <zouyizhou@huawei.com>
This commit is contained in:
zyz111222
2026-04-20 17:20:40 +08:00
committed by GitHub
parent c124e8df07
commit dd7e08c6db
3 changed files with 64 additions and 0 deletions

View File

@@ -0,0 +1,35 @@
from unittest.mock import MagicMock, patch
import pytest
import torch
from vllm.config import set_current_vllm_config
from vllm.model_executor.layers.conv import Conv3dLayer
from vllm_ascend._310p.ops.conv import AscendConv3dLayer310
@pytest.fixture(autouse=True)
def default_vllm_config():
mock_config = MagicMock()
mock_config.compilation_config.custom_ops = ["all"]
with set_current_vllm_config(mock_config):
yield mock_config
def test_conv3d_310_forward_oot_uses_forward_native():
layer = AscendConv3dLayer310(
in_channels=2,
out_channels=4,
kernel_size=(2, 2, 2),
stride=(2, 2, 2),
bias=True,
params_dtype=torch.float32,
)
x = torch.randn(1, 2, 4, 4, 4, dtype=torch.float32)
expected = torch.randn(1, 4, 2, 2, 2, dtype=torch.float32)
with patch.object(Conv3dLayer, "forward_native", autospec=True, return_value=expected) as mock_forward_native:
out = layer.forward_oot(x)
mock_forward_native.assert_called_once_with(layer, x)
assert out is expected

View File

@@ -0,0 +1,27 @@
#
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
# This file is a part of the vllm-ascend project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from vllm_ascend.ops.conv import AscendConv3dLayer
class AscendConv3dLayer310(AscendConv3dLayer):
def forward_oot(self, x: torch.Tensor) -> torch.Tensor:
# 310P should avoid the aclnn BatchMatMulV2 Conv3D path used by
# AscendConv3dLayer and keep vLLM's native Conv3d dispatch behavior.
return super().forward_native(x)

View File

@@ -662,6 +662,7 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None):
if is_310p():
from vllm_ascend._310p.fused_moe.fused_moe import AscendFusedMoE310, AscendSharedFusedMoE310
from vllm_ascend._310p.ops.activation import AscendSiluAndMul310
from vllm_ascend._310p.ops.conv import AscendConv3dLayer310
from vllm_ascend._310p.ops.layernorm import (
AscendGemmaRMSNorm310,
AscendRMSNorm310,
@@ -686,6 +687,7 @@ def register_ascend_customop(vllm_config: VllmConfig | None = None):
"ParallelLMHead": AscendParallelLMHead310,
"VocabParallelEmbedding": AscendVocabParallelEmbedding310,
"MMEncoderAttention": AscendMMEncoderAttention310,
"Conv3dLayer": AscendConv3dLayer310,
}
)