[main] rename device type (#5099)

### What this PR does / why we need it?
Rename `_910B` to `A2`;
Rename `_910_93` to `A3`;
Rename `_910_95` to `A5`;

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: zzzzwwjj <1183291235@qq.com>
This commit is contained in:
zzzzwwjj
2025-12-17 14:08:19 +08:00
committed by GitHub
parent 4144376e88
commit 06b82e7503
16 changed files with 47 additions and 48 deletions

View File

@@ -215,7 +215,7 @@ def test_aclgraph_capture_replay_metrics_dp2(
# Part A: Warmup runs (Profile run + 2 runs per captured graph)
warmup_runs = 1 + (2 * max_batch_sizes)
soc_version = get_ascend_device_type()
if soc_version in {AscendDeviceType._910_93} and "DeepSeek" in model:
if soc_version in {AscendDeviceType.A3} and "DeepSeek" in model:
# An extra warmup run is needed for MC2 warmup here
warmup_runs += 1

View File

@@ -26,7 +26,7 @@ class TestAscendAttentionBackend(TestBase):
AscendAttentionMetadataBuilder)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
def test_get_kv_cache_shape_not_310p(self, mock_soc_version):
result = AscendAttentionBackend.get_kv_cache_shape(10, 20, 30, 40)
self.assertEqual(result, (2, 10, 20, 30, 40))
@@ -103,7 +103,7 @@ class TestAscendAttentionMetadataBuilder(TestBase):
@patch('vllm_ascend.attention.attention_v1.AscendMetadata')
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
def test_build_non_310p(self, mock_soc_version, mock_ascend_metadata):
common_attn_metadata = AscendCommonAttentionMetadata(
query_start_loc=torch.tensor([0, 2, 5, 9]),

View File

@@ -49,7 +49,7 @@ def test_SiluAndMul_forward(mock_maybe_prefetch_mlp_down_proj,
with patch("vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P
if is_310p else AscendDeviceType._910_93):
if is_310p else AscendDeviceType.A3):
layer = SiluAndMul()
out = layer.forward(dummy_tensor)

View File

@@ -127,7 +127,7 @@ def mock_dist_env(mocker: MockerFixture):
return_value=mock_forward_context_obj), \
patch('vllm_ascend.ops.fused_moe.prepare_finalize.get_forward_context',
return_value=mock_forward_context_obj), \
patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType._910_93), \
patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3), \
patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context',
return_value=mock_forward_context_obj), \
patch('vllm_ascend.ops.fused_moe.moe_comm_method.MC2CommImpl._get_token_dispatcher',
@@ -323,7 +323,7 @@ class TestUnifiedApplyMLP(TestBase):
@patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context')
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch('torch_npu.npu_grouped_matmul')
@patch('torch_npu.npu_dynamic_quant')
@patch('torch_npu.npu_dequant_swiglu_quant')
@@ -386,7 +386,7 @@ class TestUnifiedApplyMLP(TestBase):
self.assertEqual(result.dtype, torch.bfloat16)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch('torch_npu.npu_grouped_matmul')
@patch('torch_npu.npu_swiglu')
@patch('torch_npu.npu_dynamic_quant')

View File

@@ -30,7 +30,7 @@ def test_RMSNorm_forward(mock_add_rmsnorm, mock_rmsnorm, is_310p, residual,
with patch("vllm_ascend.utils.get_ascend_device_type",
return_value=AscendDeviceType._310P
if is_310p else AscendDeviceType._910_93):
if is_310p else AscendDeviceType.A3):
layer = RMSNorm(hidden_size=8, eps=1e-05)
if residual is not None:
out_x, out_residual = layer.forward_oot(dummy_tensor, residual)

View File

@@ -99,7 +99,7 @@ class TestAscendRotaryEmbedding(unittest.TestCase):
@patch('torch.ops._C_ascend')
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch('vllm_ascend.ops.rotary_embedding._custom_rotary_embedding_enabled',
return_value=True)
@patch('torch.ops._npu_rotary_embedding')

View File

@@ -53,7 +53,7 @@ class TestTokenDispatcherWithMC2(TestBase):
# Mock get_ascend_device_type()
self.ascend_soc_version_patch = patch(
"vllm_ascend.ops.fused_moe.token_dispatcher.get_ascend_device_type",
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
self.ascend_soc_version_patch.start()
kwargs = {"with_quant": False, "top_k": 8, "num_experts": 128}

View File

@@ -347,7 +347,7 @@ class TestAscendC8KVCacheMethod(TestBase):
self.assertEqual(param.shape, expected_shape)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
def test_process_weights_after_loading_not_310p(self, mock_soc_version):
key_data = torch.ones(4 * 64)
value_data = torch.ones(4 * 64) * 2

View File

@@ -231,7 +231,7 @@ class TestNPUPlatform(TestBase):
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch("vllm_ascend.utils.update_aclgraph_sizes")
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch("os.environ", {})
@patch(
"vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config"
@@ -263,7 +263,7 @@ class TestNPUPlatform(TestBase):
mock_init_ascend.assert_called_once_with(vllm_config)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config"
@@ -288,7 +288,7 @@ class TestNPUPlatform(TestBase):
self.assertTrue("Model config is missing" in cm.output[0])
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config"
@@ -324,7 +324,7 @@ class TestNPUPlatform(TestBase):
)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch("vllm_ascend.utils.update_default_aclgraph_sizes")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
@@ -365,7 +365,7 @@ class TestNPUPlatform(TestBase):
@pytest.mark.skip(
"Revert me when vllm support setting cudagraph_mode on oot platform")
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch("vllm_ascend.ascend_config.init_ascend_config")
def test_check_and_update_config_unsupported_cudagraph_mode(
self, mock_init_ascend, mock_soc_version):
@@ -394,7 +394,7 @@ class TestNPUPlatform(TestBase):
)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config"
@@ -421,7 +421,7 @@ class TestNPUPlatform(TestBase):
self.assertEqual(vllm_config.cache_config.block_size, 128)
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
return_value=AscendDeviceType.A3)
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_scheduler.RecomputeSchedulerConfig.initialize_from_config"