[refact] unified soc_version code (#4359)

### What this PR does / why we need it?

Currently, there are two paths to judge the chip type in code,
`get_ascend_soc_version` use `get_soc_version` api in torch_npu, and
`is_310p` `use _build_info.__soc_version__`, which generate when
install. We need to unify the two paths.

We need to unify these codes based on the following points:

1. We need to ensure consistency in chip type judgment between compiling
and running states;
2. In compiling state, we need chip type to complete op's compilation,
but in running state, we only need device
type(910B/910_93/310P/910_95/etc) to make code branch judgement;
3. In compiling state, torch_npu may not have been installed yet, so we
can't use torch_npu's api.

Based on the above points, we have made the following changes:

1. When user set env `SOC_VERSION`, use it; when not set, query
soc_version by `npu-smi`;
2. generate device_type based on soc_version when compiling, and write
`__device_type__` instead of `__soc_version__` in `_build_info.py`;
3. In running state, use `__device_type__` to judge code branch.

### Does this PR introduce _any_ user-facing change?

When not set env `SOC_VERSION`, it will not be `ASCEND910B1` by default,
we will query soc_version by `npu-smi`. And env `SOC_VERSION` must be in
the list `soc_to_device` in `setup.py`.

- vLLM version: v0.11.0
- vLLM main:
2918c1b49c

Signed-off-by: zzzzwwjj <1183291235@qq.com>
This commit is contained in:
zzzzwwjj
2025-11-26 14:28:55 +08:00
committed by GitHub
parent a91e76cd84
commit 136ea9ff56
42 changed files with 361 additions and 243 deletions

View File

@@ -9,6 +9,7 @@ from vllm_ascend.attention.attention_v1 import (AscendAttentionBackend,
AscendAttentionMetadataBuilder,
AscendAttentionState)
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
from vllm_ascend.utils import AscendDeviceType
class TestAscendAttentionBackend(TestBase):
@@ -24,14 +25,15 @@ class TestAscendAttentionBackend(TestBase):
self.assertEqual(AscendAttentionBackend.get_builder_cls(),
AscendAttentionMetadataBuilder)
@patch('vllm_ascend.attention.attention_v1.is_310p')
def test_get_kv_cache_shape_310p(self, mock_is_310p):
mock_is_310p.return_value = True
@patch('vllm_ascend.attention.attention_v1.get_ascend_device_type',
return_value=AscendDeviceType._310P)
def test_get_kv_cache_shape_310p(self, mock_soc_version):
result = AscendAttentionBackend.get_kv_cache_shape(10, 20, 30, 40)
self.assertEqual(result, (2, 10, 30 * 40 // 16, 20, 16))
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=False)
def test_get_kv_cache_shape_not_310p(self, mock_is_310p):
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
def test_get_kv_cache_shape_not_310p(self, mock_soc_version):
result = AscendAttentionBackend.get_kv_cache_shape(10, 20, 30, 40)
self.assertEqual(result, (2, 10, 20, 30, 40))
@@ -96,8 +98,9 @@ class TestAscendAttentionMetadataBuilder(TestBase):
@patch('vllm_ascend.attention.attention_v1.AscendMetadata')
@patch('torch_npu.npu_format_cast')
@patch('vllm_ascend.utils.nd_to_nz_2d')
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=True)
def test_build_prefill_no_cache(self, mock_is_310p, mock_nd_to_nz_2d,
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._310P)
def test_build_prefill_no_cache(self, mock_soc_version, mock_nd_to_nz_2d,
mock_npu_format_cast,
mock_ascend_metadata):
common_attn_metadata = AscendCommonAttentionMetadata(
@@ -128,10 +131,11 @@ class TestAscendAttentionMetadataBuilder(TestBase):
@patch('vllm_ascend.attention.attention_v1.AscendMetadata')
@patch('torch_npu.npu_format_cast')
@patch('vllm_ascend.utils.nd_to_nz_spec')
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=True)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._310P)
@patch('vllm_ascend.attention.attention_v1.AscendAttentionState')
def test_build_chunked_prefill(self, mock_ascend_attention_state,
mock_is_310p, mock_nd_to_nz_spec,
mock_soc_version, mock_nd_to_nz_spec,
mock_npu_format_cast, mock_ascend_metadata):
common_attn_metadata = AscendCommonAttentionMetadata(
query_start_loc=torch.tensor([0, 2, 5, 9]),
@@ -162,8 +166,9 @@ class TestAscendAttentionMetadataBuilder(TestBase):
self.builder.build(1, common_attn_metadata, mock_model)
@patch('vllm_ascend.attention.attention_v1.AscendMetadata')
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=False)
def test_build_non_310p(self, mock_is_310p, mock_ascend_metadata):
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
def test_build_non_310p(self, mock_soc_version, mock_ascend_metadata):
common_attn_metadata = AscendCommonAttentionMetadata(
query_start_loc=torch.tensor([0, 2, 5, 9]),
query_start_loc_cpu=torch.tensor([0, 2, 5, 9]),
@@ -450,12 +455,13 @@ class TestAscendAttentionBackendImpl(TestBase):
assert output.shape == (10, 8 * 64)
@patch('vllm_ascend.attention.attention_v1.get_forward_context')
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch('torch_npu._npu_reshape_and_cache')
@patch('vllm_ascend.attention.attention_v1.vanilla_chunked_prefill')
def test_forward_head_size_192(self, mock_vanilla_prefill,
mock_npu_reshape_and_cache, mock_is_310p,
mock_get_forward_context):
mock_npu_reshape_and_cache,
mock_soc_version, mock_get_forward_context):
"""Test forward pass when head_size is 192"""
self.impl.head_size = 192
@@ -522,9 +528,11 @@ class TestAscendAttentionBackendImpl(TestBase):
@patch('torch_npu.npu_format_cast')
@patch('torch_npu._npu_reshape_and_cache')
@patch('torch_npu.npu_fused_infer_attention_score')
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=True)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._310P)
@patch('vllm_ascend.attention.attention_v1.get_forward_context')
def test_forward_310p_device(self, mock_get_forward_context, mock_is_310p,
def test_forward_310p_device(self, mock_get_forward_context,
mock_soc_version,
mock_npu_fused_infer_attention_score,
mock_npu_reshape_and_cache,
mock_npu_format_cast):

View File

@@ -92,7 +92,7 @@ def mock_distributed():
with patch("vllm_ascend.ops.fused_moe.fused_moe.get_current_vllm_config", return_value=mock_vllm_config), \
patch("vllm_ascend.ops.fused_moe.token_dispatcher.torch.distributed.get_rank", return_value=0), \
patch("vllm_ascend.ops.fused_moe.token_dispatcher.get_ascend_soc_version", return_value=None), \
patch("vllm_ascend.ops.fused_moe.token_dispatcher.get_ascend_device_type", return_value=None), \
patch.dict("vllm.distributed.parallel_state.__dict__", _TP=tp_group, _EP=ep_group, _DP=dp_group,
_PP=pp_group), \
patch.dict("vllm_ascend.distributed.parallel_state.__dict__", _MC2=ep_group), \

View File

@@ -19,6 +19,8 @@ import pytest
import torch
from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul
from vllm_ascend.utils import AscendDeviceType
@pytest.fixture
def dummy_tensor():
@@ -36,20 +38,22 @@ def test_QuickGELU_forward(mock_gelu, dummy_tensor):
mock_gelu.assert_called_once()
@pytest.mark.parametrize("is_310p_return", [True, False])
@pytest.mark.parametrize("is_310p", [True, False])
@patch("torch_npu.npu_swiglu", side_effect=lambda x: x + 1)
@patch("torch.ops.vllm.maybe_wait_prefetch_done", side_effect=lambda x: None)
@patch("torch.ops.vllm.maybe_prefetch_mlp_down_proj",
side_effect=lambda x: None)
def test_SiluAndMul_forward(mock_maybe_prefetch_mlp_down_proj,
mock_maybe_wait_prefetch_done, mock_swiglu,
is_310p_return, dummy_tensor):
is_310p, dummy_tensor):
with patch("vllm_ascend.utils.is_310p", return_value=is_310p_return):
with patch("vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P
if is_310p else AscendDeviceType._910_93):
layer = SiluAndMul()
out = layer.forward(dummy_tensor)
if is_310p_return:
if is_310p:
expected_arg = dummy_tensor.to(torch.float32)
else:
expected_arg = dummy_tensor

View File

@@ -29,7 +29,7 @@ from vllm_ascend.ops.fused_moe.fused_moe import (
AscendFusedMoE, AscendUnquantizedFusedMoEMethod)
from vllm_ascend.ops.fused_moe.moe_mlp import (cumsum_group_list,
unified_apply_mlp)
from vllm_ascend.utils import AscendSocVersion, adapt_patch
from vllm_ascend.utils import AscendDeviceType, adapt_patch
adapt_patch(True)
@@ -129,7 +129,7 @@ def mock_dist_env(mocker: MockerFixture):
return_value=mock_forward_context_obj), \
patch('vllm_ascend.ops.fused_moe.prepare_finalize.get_forward_context',
return_value=mock_forward_context_obj), \
patch("vllm_ascend.utils.get_ascend_soc_version", return_value=AscendSocVersion.A3), \
patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType._910_93), \
patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context',
return_value=mock_forward_context_obj), \
patch('vllm_ascend.ops.fused_moe.moe_comm_method.MC2CommImpl._get_token_dispatcher',
@@ -323,22 +323,21 @@ class TestCumsumGroupList(TestBase):
class TestUnifiedApplyMLP(TestBase):
@patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context')
@patch('vllm_ascend.ops.fused_moe.moe_mlp.is_310p')
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch('torch_npu.npu_grouped_matmul')
@patch('torch_npu.npu_dynamic_quant')
@patch('torch_npu.npu_dequant_swiglu_quant')
def test_unified_apply_mlp_with_quantization_mc2(self, mock_npu_dequant,
mock_npu_dynamic_quant,
mock_npu_grouped_matmul,
mock_is_310p,
mock_soc_version,
mock_get_forward_context):
mock_forward_context = MagicMock()
mock_forward_context.moe_comm_type = MoECommType.MC2
mock_get_forward_context.return_value = mock_forward_context
mock_is_310p.return_value = False
mock_npu_dynamic_quant.return_value = (torch.randint(-128,
127, (10, 20),
dtype=torch.int8),
@@ -387,7 +386,8 @@ class TestUnifiedApplyMLP(TestBase):
self.assertEqual(result.dtype, torch.bfloat16)
@patch('vllm_ascend.ops.fused_moe.moe_mlp.is_310p')
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch('torch_npu.npu_grouped_matmul')
@patch('torch_npu.npu_swiglu')
@patch('torch_npu.npu_dynamic_quant')
@@ -395,9 +395,7 @@ class TestUnifiedApplyMLP(TestBase):
mock_npu_dynamic_quant,
mock_npu_swiglu,
mock_npu_grouped_matmul,
mock_is_310p):
mock_is_310p.return_value = False
mock_soc_version):
mock_npu_grouped_matmul.side_effect = [[
torch.randn(10, 40, dtype=torch.float16)
], [torch.randn(10, 20, dtype=torch.float16)]]
@@ -490,15 +488,14 @@ class TestUnifiedApplyMLP(TestBase):
self.assertEqual(result.shape, hidden_states_shape)
self.assertEqual(result.dtype, torch.bfloat16)
@patch('vllm_ascend.ops.fused_moe.moe_mlp.is_310p')
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._310P)
@patch('torch_npu.npu_grouped_matmul')
@patch('torch_npu.npu_swiglu')
@patch('torch_npu.npu_dynamic_quant')
def test_unified_apply_mlp_without_quantization_310p(
self, mock_npu_dynamic_quant, mock_npu_swiglu,
mock_npu_grouped_matmul, mock_is_310p):
mock_is_310p.return_value = True
mock_npu_grouped_matmul, mock_soc_version):
mock_gmm1_out = torch.randn(10, 40, dtype=torch.float16)
mock_gmm2_out = torch.randn(10, 20, dtype=torch.float16)
mock_npu_grouped_matmul.side_effect = [[mock_gmm1_out],
@@ -527,8 +524,6 @@ class TestUnifiedApplyMLP(TestBase):
topk_scales=topk_scales,
with_quant=False)
mock_is_310p.assert_called_once()
self.assertEqual(mock_npu_grouped_matmul.call_count, 2)
mock_npu_swiglu.assert_called_once()

View File

@@ -7,6 +7,7 @@ from vllm.model_executor.layers.layernorm import RMSNorm
from tests.ut.base import PytestBase
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
from vllm_ascend.utils import AscendDeviceType
def mock_rms_norm(x, weight, eps):
@@ -60,8 +61,9 @@ class TestAscendRMSNorm(PytestBase):
# Test case for addrmsnorm + w8a8 quant fusion
def test_forward_oot_with_quant_fusion(self, mocker: MockerFixture):
mock_is_310p = mocker.patch("vllm_ascend.utils.is_310p")
mock_is_310p.return_value = False
mock_soc_version = mocker.patch(
"vllm_ascend.utils.get_ascend_device_type")
mock_soc_version.return_value = AscendDeviceType._910_93
mock_get_forward_context = mocker.patch(
"vllm_ascend.ops.layernorm.get_forward_context")

View File

@@ -12,6 +12,7 @@ from vllm.platforms import CpuArchEnum
from tests.ut.base import TestBase
from vllm_ascend.ascend_forward_context import set_ascend_forward_context
from vllm_ascend.ops.rotary_embedding import _custom_rotary_embedding_enabled
from vllm_ascend.utils import AscendDeviceType
MODEL = "Qwen3-0.6B"
MODEL_VL = "Qwen/Qwen2.5-VL-3B-Instruct"
@@ -97,7 +98,8 @@ class TestAscendRotaryEmbedding(unittest.TestCase):
self.mock_self.is_neox_style = self.is_neox_style
@patch('torch.ops._C_ascend')
@patch('vllm_ascend.ops.rotary_embedding.is_310p', return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch('vllm_ascend.ops.rotary_embedding._custom_rotary_embedding_enabled',
return_value=True)
@patch('torch.ops._npu_rotary_embedding')
@@ -106,8 +108,8 @@ class TestAscendRotaryEmbedding(unittest.TestCase):
@patch('vllm.distributed.parallel_state._DP', MagicMock(world_size=1))
@patch('vllm.distributed.parallel_state._TP', MagicMock(world_size=1))
def test_rope_forward_oot_custom_kernel(self, mock_rotary_embedding,
mock_custom_enabled, mock_is_310p,
mock__c):
mock_custom_enabled,
mock_soc_version, mock__c):
mock_config = MagicMock()
mock_config.torchair_graph_config.enabled = False

View File

@@ -22,7 +22,7 @@ import torch
from tests.ut.base import TestBase
from vllm_ascend.ops.fused_moe.token_dispatcher import ( # isort: skip
AscendSocVersion, TokenDispatcherWithAll2AllV,
AscendDeviceType, TokenDispatcherWithAll2AllV,
TokenDispatcherWithAllGather, TokenDispatcherWithMC2)
@@ -50,10 +50,10 @@ class TestTokenDispatcherWithMC2(TestBase):
return_value=self.forward_context)
self.forward_context_patch.start()
# Mock get_ascend_soc_version()
# Mock get_ascend_device_type()
self.ascend_soc_version_patch = patch(
"vllm_ascend.ops.fused_moe.token_dispatcher.get_ascend_soc_version",
return_value=AscendSocVersion.A3)
"vllm_ascend.ops.fused_moe.token_dispatcher.get_ascend_device_type",
return_value=AscendDeviceType._910_93)
self.ascend_soc_version_patch.start()
kwargs = {"with_quant": False, "top_k": 8, "num_experts": 128}

View File

@@ -12,6 +12,7 @@ from vllm_ascend.quantization.w8a8 import (AscendC8KVCacheMethod,
AscendW8A8LinearMethod,
fused_experts, fused_experts_310p,
quant_per_tensor)
from vllm_ascend.utils import AscendDeviceType
class TestQuantPerTensor(TestBase):
@@ -118,9 +119,11 @@ class TestAscendW8A8LinearMethod(TestBase):
expected_y_output += bias
self.assertTrue(torch.equal(output, expected_y_output))
@patch("vllm_ascend.quantization.w8a8.is_310p", return_value=True)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._310P)
@patch("torch_npu.npu_quant_matmul")
def test_apply_with_x_is_310p(self, mock_npu_quant_matmul, mock_is_310p):
def test_apply_with_x_is_310p(self, mock_npu_quant_matmul,
mock_soc_version):
layer = MagicMock()
layer.aclnn_input_scale = 0.1
layer.aclnn_input_offset = 0.2
@@ -279,11 +282,12 @@ class TestAscendW8A8FusedMoEMethod(TestBase):
mock_fused_experts.assert_called_once()
self.assertEqual(result.shape, (32, self.hidden_size))
@patch("vllm_ascend.quantization.w8a8.is_310p", return_value=True)
@patch('vllm_ascend.quantization.w8a8.get_ascend_device_type',
return_value=AscendDeviceType._310P)
@patch('vllm_ascend.quantization.w8a8.select_experts')
@patch('vllm_ascend.quantization.w8a8.fused_experts_310p')
def test_apply_is_310p(self, mock_fused_experts_310p, mock_select_experts,
mock_is_310p):
mock_soc_version):
# Setup
mock_layer = MagicMock()
x = torch.randn(32, self.hidden_size)
@@ -342,8 +346,9 @@ class TestAscendC8KVCacheMethod(TestBase):
expected_shape = (self.layer.num_kv_heads * self.layer.head_size, )
self.assertEqual(param.shape, expected_shape)
@patch("vllm_ascend.quantization.w8a8.is_310p", return_value=False)
def test_process_weights_after_loading_not_310p(self, mock_is_310p):
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
def test_process_weights_after_loading_not_310p(self, mock_soc_version):
key_data = torch.ones(4 * 64)
value_data = torch.ones(4 * 64) * 2
@@ -356,8 +361,9 @@ class TestAscendC8KVCacheMethod(TestBase):
self.assertTrue(torch.all(self.method.antiquant_scale_comb[0] == 1))
self.assertTrue(torch.all(self.method.antiquant_scale_comb[1] == 2))
@patch("vllm_ascend.quantization.w8a8.is_310p", return_value=True)
def test_process_weights_after_loading_is_310p(self, mock_is_310p):
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._310P)
def test_process_weights_after_loading_is_310p(self, mock_soc_version):
key_data = torch.ones(4 * 64)
value_data = torch.ones(4 * 64) * 2

View File

@@ -9,7 +9,7 @@ from vllm.platforms import PlatformEnum
from tests.ut.base import TestBase
from vllm_ascend.platform import NPUPlatform
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD, AscendDeviceType
class TestNPUPlatform(TestBase):
@@ -231,13 +231,14 @@ class TestNPUPlatform(TestBase):
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch("vllm_ascend.utils.update_aclgraph_sizes")
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("os.environ", {})
@patch(
"vllm_ascend.core.recompute_schedule_config.RecomputeSchedulerConfig.initialize_from_config"
)
def test_check_and_update_config_basic_config_update(
self, mock_init_recompute, mock_is_310p, mock_update_acl,
self, mock_init_recompute, mock_soc_version, mock_update_acl,
mock_init_ascend, mock_check_ascend):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
@@ -259,7 +260,8 @@ class TestNPUPlatform(TestBase):
mock_init_ascend.assert_called_once_with(vllm_config)
mock_check_ascend.assert_called_once()
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
@@ -267,7 +269,7 @@ class TestNPUPlatform(TestBase):
)
def test_check_and_update_config_no_model_config_warning(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_is_310p):
mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -283,7 +285,8 @@ class TestNPUPlatform(TestBase):
self.platform.check_and_update_config(vllm_config)
self.assertTrue("Model config is missing" in cm.output[0])
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
@@ -291,7 +294,7 @@ class TestNPUPlatform(TestBase):
)
def test_check_and_update_config_enforce_eager_mode(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_is_310p):
mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -318,7 +321,8 @@ class TestNPUPlatform(TestBase):
CUDAGraphMode.NONE,
)
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.utils.update_default_aclgraph_sizes")
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@@ -327,7 +331,7 @@ class TestNPUPlatform(TestBase):
)
def test_check_and_update_config_unsupported_compilation_level(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_update_default, mock_is_310p):
mock_update_default, mock_soc_version):
mock_update_default.return_value = MagicMock()
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
@@ -357,11 +361,12 @@ class TestNPUPlatform(TestBase):
@pytest.mark.skip(
"Revert me when vllm support setting cudagraph_mode on oot platform")
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
def test_check_and_update_config_unsupported_cudagraph_mode(
self, mock_init_ascend, mock_check_ascend, mock_is_310p):
self, mock_init_ascend, mock_check_ascend, mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -386,7 +391,8 @@ class TestNPUPlatform(TestBase):
CUDAGraphMode.NONE,
)
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.utils.update_default_aclgraph_sizes")
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@@ -395,7 +401,7 @@ class TestNPUPlatform(TestBase):
)
def test_check_and_update_config_torchair_enabled_compilation(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_update_default, mock_is_310p):
mock_update_default, mock_soc_version):
mock_update_default.return_value = MagicMock()
mock_ascend_config = TestNPUPlatform.mock_vllm_ascend_config()
mock_ascend_config.torchair_graph_config.enabled = True
@@ -424,7 +430,8 @@ class TestNPUPlatform(TestBase):
CUDAGraphMode.NONE,
)
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
@@ -432,7 +439,7 @@ class TestNPUPlatform(TestBase):
)
def test_check_and_update_config_cache_config_block_size(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_is_310p):
mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -450,7 +457,8 @@ class TestNPUPlatform(TestBase):
self.assertEqual(vllm_config.cache_config.block_size, 128)
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
@@ -458,7 +466,7 @@ class TestNPUPlatform(TestBase):
)
def test_check_and_update_config_v1_worker_class_selection(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_is_310p):
mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -489,12 +497,13 @@ class TestNPUPlatform(TestBase):
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch("vllm_ascend.utils.is_310p", return_value=True)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._310P)
@patch(
"vllm_ascend.core.recompute_schedule_config.RecomputeSchedulerConfig.initialize_from_config"
)
def test_check_and_update_config_310p_no_custom_ops(
self, mock_init_recompute, mock_is_310p, mock_init_ascend,
self, mock_init_recompute, mock_soc_version, mock_init_ascend,
mock_check_ascend):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
@@ -511,7 +520,8 @@ class TestNPUPlatform(TestBase):
self.platform.check_and_update_config(vllm_config)
self.assertEqual(vllm_config.compilation_config.custom_ops, [])
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
@@ -519,7 +529,7 @@ class TestNPUPlatform(TestBase):
)
def test_check_and_update_config_ascend_scheduler_config(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_is_310p):
mock_soc_version):
mock_ascend_config = TestNPUPlatform.mock_vllm_ascend_config()
mock_ascend_config.ascend_scheduler_config.enabled = True
mock_init_ascend.return_value = mock_ascend_config

View File

@@ -35,16 +35,6 @@ class TestUtils(TestBase):
from vllm_ascend import platform
importlib.reload(platform)
def test_is_310p(self):
utils._IS_310P = None
with mock.patch("vllm_ascend._build_info.__soc_version__",
"Ascend310P3"):
self.assertTrue(utils.is_310p())
utils._IS_310P = None
with mock.patch("vllm_ascend._build_info.__soc_version__",
"Ascend910P1"):
self.assertFalse(utils.is_310p())
def test_is_enable_nz(self):
with mock.patch("vllm_ascend.utils.envs_ascend.VLLM_ASCEND_ENABLE_NZ",
1):

View File

@@ -28,7 +28,7 @@ from vllm_ascend.quantization.quant_config import AscendFusedMoEMethod
from vllm_ascend.torchair.ops.torchair_fused_moe import (
TorchairAscendFusedMoE, TorchairAscendUnquantizedFusedMoEMethod)
from vllm_ascend.utils import adapt_patch # noqa E402
from vllm_ascend.utils import AscendSocVersion
from vllm_ascend.utils import AscendDeviceType
adapt_patch(True)
@@ -398,7 +398,7 @@ class TestTorchairAscendUnquantizedFusedMoEMethod:
forward_context = MagicMock(
fused_moe_state=get_fused_moe_state(ep_size, is_prefill, True))
with patch("vllm_ascend.torchair.ops.torchair_fused_moe.get_forward_context", return_value=forward_context), \
patch("vllm_ascend.torchair.ops.torchair_fused_moe.get_ascend_soc_version", return_value=AscendSocVersion.A3):
patch("vllm_ascend.torchair.ops.torchair_fused_moe.get_ascend_device_type", return_value=AscendDeviceType._910_93):
expert_map = torch.tensor([0, 1, 2, -1, -1, -1, -1, -1])
moe_method.ep_size = ep_size
x = torch.randn(8, 2, 2)

View File

@@ -8,6 +8,7 @@ from vllm_ascend.torchair.ops.torchair_rotary_embedding import (
_set_cos_sin_cache, custom_rotary_embedding_enabled,
native_rope_deepseek_forward, rope_forward_oot, rotate_half,
yarn_find_correction_dim, yarn_get_mscale)
from vllm_ascend.utils import AscendDeviceType
class TestCustomRotaryEmbeddingEnabled(TestBase):
@@ -107,14 +108,15 @@ class TestRopeForwardOot(TestBase):
@patch('torch.ops._C_ascend')
@patch(
'vllm_ascend.torchair.ops.torchair_rotary_embedding.get_ascend_config')
@patch('vllm_ascend.torchair.ops.torchair_rotary_embedding.is_310p',
return_value=False)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch(
'vllm_ascend.torchair.ops.torchair_rotary_embedding.custom_rotary_embedding_enabled',
return_value=True)
@patch('torch.ops._npu_rotary_embedding')
def test_rope_forward_oot_custom_kernel(self, mock_rotary_embedding,
mock_custom_enabled, mock_is_310p,
mock_custom_enabled,
mock_soc_version,
mock_get_ascend_config, mock__c):
mock_config = MagicMock()
mock_config.torchair_graph_config.enabled = False

View File

@@ -5,7 +5,7 @@ import torch
from tests.ut.base import TestBase
from vllm_ascend.torchair.quantization.torchair_w8a8_dynamic import (
torchair_fused_experts_with_all2all, torchair_fused_experts_with_mc2)
from vllm_ascend.utils import AscendSocVersion
from vllm_ascend.utils import AscendDeviceType
class TestAscendW8A8FusedMoEMethod(TestBase):
@@ -79,7 +79,7 @@ class TestAscendW8A8FusedMoEMethod(TestBase):
'HCCL_INTRA_PCIE_ENABLE': '1'
})
@patch(
"vllm_ascend.torchair.quantization.torchair_w8a8_dynamic.get_ascend_soc_version"
"vllm_ascend.torchair.quantization.torchair_w8a8_dynamic.get_ascend_device_type"
)
@patch(
'vllm_ascend.torchair.quantization.torchair_w8a8_dynamic.get_mc2_group'
@@ -94,7 +94,7 @@ class TestAscendW8A8FusedMoEMethod(TestBase):
mock_ascend_soc_version):
"""Test expert_scales is passed in A2 SOC version with mc2 optimization"""
# Setup mocks
mock_ascend_soc_version.return_value = AscendSocVersion.A2
mock_ascend_soc_version.return_value = AscendDeviceType._910B
mock_group = MagicMock()
mock_group.rank_in_group = 0

View File

@@ -16,7 +16,7 @@ from unittest.mock import MagicMock, patch
import pytest
from vllm_ascend.ascend_forward_context import MoECommType
from vllm_ascend.utils import AscendSocVersion
from vllm_ascend.utils import AscendDeviceType
from vllm_ascend.worker.model_runner_v1 import NPUModelRunner
@@ -25,21 +25,21 @@ from vllm_ascend.worker.model_runner_v1 import NPUModelRunner
"soc_version, enable_expert_parallel, world_size, num_tokens, mc2_tokens_capacity, quant_type, expected_method",
[
# Case 1: Expert parallel is disabled, should always be 'allgather'
(AscendSocVersion.A2, False, 8, 100, 256, None, MoECommType.ALLGATHER),
(AscendSocVersion.A3, False, 16, 500, 256, None, MoECommType.ALLGATHER),
(AscendDeviceType._910B, False, 8, 100, 256, None, MoECommType.ALLGATHER),
(AscendDeviceType._910_93, False, 16, 500, 256, None, MoECommType.ALLGATHER),
# Case 2: A2 SOC with w4a8_dynamic -> use alltoall when not mc2
(AscendSocVersion.A2, True, 8, 100, 256, "w4a8_dynamic", MoECommType.ALLTOALL),
(AscendSocVersion.A2, True, 16, 257, 256, "w4a8_dynamic", MoECommType.ALLTOALL),
(AscendSocVersion.A2, True, 16, 100, 256, "w4a8_dynamic", MoECommType.MC2), # meets mc2 condition
(AscendDeviceType._910B, True, 8, 100, 256, "w4a8_dynamic", MoECommType.ALLTOALL),
(AscendDeviceType._910B, True, 16, 257, 256, "w4a8_dynamic", MoECommType.ALLTOALL),
(AscendDeviceType._910B, True, 16, 100, 256, "w4a8_dynamic", MoECommType.MC2), # meets mc2 condition
# Case 3: A2 SOC without w4a8_dynamic -> fallback to allgather
(AscendSocVersion.A2, True, 8, 100, 256, None, MoECommType.ALLGATHER),
(AscendSocVersion.A2, True, 16, 257, 256, None, MoECommType.ALLGATHER),
(AscendDeviceType._910B, True, 8, 100, 256, None, MoECommType.ALLGATHER),
(AscendDeviceType._910B, True, 16, 257, 256, None, MoECommType.ALLGATHER),
# Case 4: A3 SOC
(AscendSocVersion.A3, True, 8, 100, 256, None, MoECommType.MC2),
(AscendSocVersion.A3, True, 8, 257, 256, None, MoECommType.ALLTOALL),
(AscendDeviceType._910_93, True, 8, 100, 256, None, MoECommType.MC2),
(AscendDeviceType._910_93, True, 8, 257, 256, None, MoECommType.ALLTOALL),
])
# yapf: enable
def test_select_moe_comm_method(soc_version, enable_expert_parallel,
@@ -65,7 +65,7 @@ def test_select_moe_comm_method(soc_version, enable_expert_parallel,
mock_runner.vllm_config = mock_vllm_config
# Patch the helper functions
with patch('vllm_ascend.worker.model_runner_v1.get_ascend_soc_version',
with patch('vllm_ascend.worker.model_runner_v1.get_ascend_device_type',
return_value=soc_version), \
patch('vllm_ascend.worker.model_runner_v1.is_global_first_rank',
return_value=True), \
@@ -100,7 +100,7 @@ def test_select_moe_comm_method_unsupported_soc():
unsupported_soc = "UnsupportedSOC"
with patch('vllm_ascend.worker.model_runner_v1.get_ascend_soc_version',
with patch('vllm_ascend.worker.model_runner_v1.get_ascend_device_type',
return_value=unsupported_soc), \
patch('vllm_ascend.worker.model_runner_v1.is_global_first_rank',
return_value=True), \

View File

@@ -52,7 +52,7 @@ class TestNPUWorker(TestBase):
@patch("vllm_ascend.worker.worker_v1.register_ascend_customop")
@patch("vllm_ascend.worker.worker_v1.get_ascend_config")
@patch("vllm_ascend.worker.worker_v1.init_ascend_config")
@patch("vllm_ascend.worker.worker_v1.init_ascend_soc_version")
@patch("vllm_ascend.worker.worker_v1.check_ascend_device_type")
@patch("vllm_ascend.worker.worker_v1.try_register_lib")
@patch(init_cached_hf_modules_path)
@patch("vllm_ascend.worker.worker_v1.NPUWorker._init_profiler")
@@ -61,7 +61,7 @@ class TestNPUWorker(TestBase):
mock_init_profiler,
mock_init_cached_hf_modules,
mock_try_register_lib,
mock_init_ascend_soc_version,
mock_check_ascend_device_type,
mock_init_ascend_config,
mock_get_ascend_config,
mock_register_ascend_customop,
@@ -93,7 +93,7 @@ class TestNPUWorker(TestBase):
mock_register_atb_extensions.assert_called_once()
mock_register_ascend_customop.assert_called_once()
mock_init_ascend_config.assert_called_once_with(self.vllm_config_mock)
mock_init_ascend_soc_version.assert_called_once()
mock_check_ascend_device_type.assert_called_once()
# Verify try_register_lib call
mock_try_register_lib.assert_called_once_with(
@@ -114,7 +114,7 @@ class TestNPUWorker(TestBase):
@patch("vllm_ascend.worker.worker_v1.register_ascend_customop")
@patch("vllm_ascend.worker.worker_v1.get_ascend_config")
@patch("vllm_ascend.worker.worker_v1.init_ascend_config")
@patch("vllm_ascend.worker.worker_v1.init_ascend_soc_version")
@patch("vllm_ascend.worker.worker_v1.check_ascend_device_type")
@patch("vllm_ascend.worker.worker_v1.try_register_lib")
@patch(init_cached_hf_modules_path)
@patch("vllm_ascend.worker.worker_v1.NPUWorker._init_profiler")
@@ -123,7 +123,7 @@ class TestNPUWorker(TestBase):
mock_init_profiler,
mock_init_cached_hf_modules,
mock_try_register_lib,
mock_init_ascend_soc_version,
mock_check_ascend_device_type,
mock_init_ascend_config,
mock_get_ascend_config,
mock_register_ascend_customop,
@@ -159,7 +159,7 @@ class TestNPUWorker(TestBase):
@patch("vllm_ascend.worker.worker_v1.register_ascend_customop")
@patch("vllm_ascend.worker.worker_v1.get_ascend_config")
@patch("vllm_ascend.worker.worker_v1.init_ascend_config")
@patch("vllm_ascend.worker.worker_v1.init_ascend_soc_version")
@patch("vllm_ascend.worker.worker_v1.check_ascend_device_type")
@patch("vllm_ascend.worker.worker_v1.try_register_lib")
@patch(init_cached_hf_modules_path)
@patch("vllm_ascend.worker.worker_v1.NPUWorker._init_profiler")
@@ -168,7 +168,7 @@ class TestNPUWorker(TestBase):
mock_init_profiler,
mock_init_cached_hf_modules,
mock_try_register_lib,
mock_init_ascend_soc_version,
mock_check_ascend_device_type,
mock_init_ascend_config,
mock_get_ascend_config,
mock_register_ascend_customop,