[Refactor] cleanup converting_weight_acl_format_format (#2482)

move maybe_converting_weight_acl_format_format to torchair module, it's
only used with 310p+torchair

- vLLM version: v0.10.1.1
- vLLM main:
49ab23b3cc

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-08-25 19:48:55 +08:00
committed by GitHub
parent 0f81e032f0
commit de7649492d
6 changed files with 75 additions and 108 deletions

View File

@@ -117,79 +117,6 @@ class TestUtils(TestBase):
output_tensor = utils.aligned_16(input_tensor)
self.assertEqual(output_tensor.shape[0], 32)
@mock.patch('torch_npu.get_npu_format')
@mock.patch('torch_npu.npu_format_cast')
@mock.patch('vllm.model_executor.layers.fused_moe.layer.FusedMoE',
new=mock.MagicMock)
@mock.patch('vllm_ascend.utils.is_310p')
@mock.patch('vllm_ascend.utils.get_ascend_config')
def test_maybe_converting_weight_acl_format(self, mock_get_config,
mock_310p, mock_npu_cast,
mock_get_format):
ACL_FORMAT_FRACTAL_NZ = 29
mock_310p.return_value = True
mock_config = mock.MagicMock()
mock_config.torchair_graph_config.enabled = True
mock_get_config.return_value = mock_config
mock_get_format.return_value = 1
mock_npu_cast.return_value = 1
fused_moe = mock.MagicMock()
fused_moe.w13_weight = mock.MagicMock()
fused_moe.w2_weight = mock.MagicMock()
fused_moe.w13_weight.data = torch.randn(128, 256)
fused_moe.w2_weight.data = torch.randn(256, 128)
model = mock.MagicMock()
model.modules.return_value = [fused_moe]
utils.maybe_converting_weight_acl_format(model, ACL_FORMAT_FRACTAL_NZ)
self.assertEqual(fused_moe.w13_weight.data, 1)
@mock.patch('torch_npu.get_npu_format')
@mock.patch('torch_npu.npu_format_cast')
@mock.patch('vllm.model_executor.layers.fused_moe.layer.FusedMoE',
new=mock.MagicMock)
@mock.patch('vllm_ascend.utils.is_310p')
@mock.patch('vllm_ascend.utils.get_ascend_config')
def test_maybe_converting_weight_acl_format_format_true(
self, mock_get_config, mock_310p, mock_npu_cast, mock_get_format):
ACL_FORMAT_FRACTAL_NZ = 29
mock_310p.return_value = True
mock_config = mock.MagicMock()
mock_config.torchair_graph_config.enabled = True
mock_get_config.return_value = mock_config
mock_get_format.return_value = ACL_FORMAT_FRACTAL_NZ
mock_npu_cast.return_value = 1
fused_moe = mock.MagicMock()
fused_moe.w13_weight = mock.MagicMock()
fused_moe.w2_weight = mock.MagicMock()
fused_moe.w13_weight.data = torch.randn(128, 256)
fused_moe.w2_weight.data = torch.randn(256, 128)
model = mock.MagicMock()
model.modules.return_value = [fused_moe]
mock_get_format.return_value = ACL_FORMAT_FRACTAL_NZ
utils.maybe_converting_weight_acl_format(model, ACL_FORMAT_FRACTAL_NZ)
@mock.patch('vllm_ascend.utils.get_ascend_config')
@mock.patch('vllm_ascend.utils.is_310p', return_value=False)
def test_maybe_converting_weight_acl_format_not_310_not_graph(
self, mock_310p, mock_get_config):
mock_config = mock.MagicMock()
mock_config.torchair_graph_config.enabled = False
mock_get_config.return_value = mock_config
mock_constant = mock.MagicMock()
mock_model = mock.MagicMock()
utils.maybe_converting_weight_acl_format(mock_model, mock_constant)
@mock.patch('importlib.util.find_spec')
@mock.patch('importlib.import_module')
def test_try_register_lib(self, mock_import_module, mock_find_spec):

View File

@@ -1,7 +1,10 @@
import os
from concurrent.futures import ThreadPoolExecutor
from unittest import mock
from unittest.mock import MagicMock, patch
import torch
from tests.ut.base import TestBase
from vllm_ascend.torchair import utils
@@ -75,3 +78,45 @@ class TestTorchairUtils(TestBase):
args, kwargs = call_args_list[i]
self.assertEqual(args[0], expected_name)
self.assertEqual(args[1], expected_path)
@mock.patch('torch_npu.get_npu_format')
@mock.patch('torch_npu.npu_format_cast')
@mock.patch('vllm.model_executor.layers.fused_moe.layer.FusedMoE',
new=mock.MagicMock)
def test_converting_weight_acl_format(self, mock_npu_cast,
mock_get_format):
ACL_FORMAT_FRACTAL_NZ = 29
mock_get_format.return_value = 1
mock_npu_cast.return_value = 1
fused_moe = mock.MagicMock()
fused_moe.w13_weight = mock.MagicMock()
fused_moe.w2_weight = mock.MagicMock()
fused_moe.w13_weight.data = torch.randn(128, 256)
fused_moe.w2_weight.data = torch.randn(256, 128)
model = mock.MagicMock()
model.modules.return_value = [fused_moe]
utils.converting_weight_acl_format(model, ACL_FORMAT_FRACTAL_NZ)
self.assertEqual(fused_moe.w13_weight.data, 1)
@mock.patch('torch_npu.get_npu_format')
@mock.patch('torch_npu.npu_format_cast')
@mock.patch('vllm.model_executor.layers.fused_moe.layer.FusedMoE',
new=mock.MagicMock)
def test_converting_weight_acl_format_format_true(self, mock_npu_cast,
mock_get_format):
ACL_FORMAT_FRACTAL_NZ = 29
mock_get_format.return_value = ACL_FORMAT_FRACTAL_NZ
mock_npu_cast.return_value = 1
fused_moe = mock.MagicMock()
fused_moe.w13_weight = mock.MagicMock()
fused_moe.w2_weight = mock.MagicMock()
fused_moe.w13_weight.data = torch.randn(128, 256)
fused_moe.w2_weight.data = torch.randn(256, 128)
model = mock.MagicMock()
model.modules.return_value = [fused_moe]
utils.converting_weight_acl_format(model, ACL_FORMAT_FRACTAL_NZ)
mock_npu_cast.assert_not_called()