[main] rename device type (#5099)

### What this PR does / why we need it?
Rename `_910B` to `A2`;
Rename `_910_93` to `A3`;
Rename `_910_95` to `A5`;

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: zzzzwwjj <1183291235@qq.com>
This commit is contained in:
zzzzwwjj
2025-12-17 14:08:19 +08:00
committed by GitHub
parent 4144376e88
commit 06b82e7503
16 changed files with 47 additions and 48 deletions

View File

@@ -49,7 +49,7 @@ def test_SiluAndMul_forward(mock_maybe_prefetch_mlp_down_proj,
with patch("vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P
if is_310p else AscendDeviceType._910_93):
if is_310p else AscendDeviceType.A3):
layer = SiluAndMul()
out = layer.forward(dummy_tensor)

View File

@@ -127,7 +127,7 @@ def mock_dist_env(mocker: MockerFixture):
return_value=mock_forward_context_obj), \
patch('vllm_ascend.ops.fused_moe.prepare_finalize.get_forward_context',
return_value=mock_forward_context_obj), \
patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType._910_93), \
patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3), \
patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context',
return_value=mock_forward_context_obj), \
patch('vllm_ascend.ops.fused_moe.moe_comm_method.MC2CommImpl._get_token_dispatcher',
@@ -323,7 +323,7 @@ class TestUnifiedApplyMLP(TestBase):
@patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context')
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch('torch_npu.npu_grouped_matmul')
@patch('torch_npu.npu_dynamic_quant')
@patch('torch_npu.npu_dequant_swiglu_quant')
@@ -386,7 +386,7 @@ class TestUnifiedApplyMLP(TestBase):
self.assertEqual(result.dtype, torch.bfloat16)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch('torch_npu.npu_grouped_matmul')
@patch('torch_npu.npu_swiglu')
@patch('torch_npu.npu_dynamic_quant')

View File

@@ -30,7 +30,7 @@ def test_RMSNorm_forward(mock_add_rmsnorm, mock_rmsnorm, is_310p, residual,
with patch("vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P
if is_310p else AscendDeviceType._910_93):
if is_310p else AscendDeviceType.A3):
layer = RMSNorm(hidden_size=8, eps=1e-05)
if residual is not None:
out_x, out_residual = layer.forward_oot(dummy_tensor, residual)

View File

@@ -99,7 +99,7 @@ class TestAscendRotaryEmbedding(unittest.TestCase):
@patch('torch.ops._C_ascend')
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch('vllm_ascend.ops.rotary_embedding._custom_rotary_embedding_enabled',
return_value=True)
@patch('torch.ops._npu_rotary_embedding')

View File

@@ -53,7 +53,7 @@ class TestTokenDispatcherWithMC2(TestBase):
# Mock get_ascend_device_type()
self.ascend_soc_version_patch = patch(
"vllm_ascend.ops.fused_moe.token_dispatcher.get_ascend_device_type",
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
self.ascend_soc_version_patch.start()
kwargs = {"with_quant": False, "top_k": 8, "num_experts": 128}