[long_seq] remove long_seq env (#4660)

### What this PR does / why we need it?
remove env VLLM_ASCEND_ENABLE_CONTEXT_PARALLEL 

- vLLM version: v0.12.0

---------

Signed-off-by: LookAround <lixushi@huawei.com>
Signed-off-by: ZhangMingWei716 <2894054457@qq.com>
Co-authored-by: ZhangMingWei716 <2894054457@qq.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
LookAround0301
2025-12-05 10:31:49 +08:00
committed by GitHub
parent ea54388e19
commit b32ef53b3b
16 changed files with 230 additions and 176 deletions

View File

@@ -54,12 +54,16 @@ class TestAscendAttentionBackend(TestBase):
class TestAscendAttentionMetadataBuilder(TestBase):
@patch('vllm.distributed.parallel_state.get_pcp_group')
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state.get_dcp_group')
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch("vllm.distributed.get_decode_context_model_parallel_world_size",
return_value=1)
def setUp(self, mock_get_dcp_size, mock_dcp, mock_get_dcp_group):
def setUp(self, mock_get_dcp_size, mock_dcp, mock_get_dcp_group, mock_pcp,
mock_get_pcp_group):
mock_dcp.world_size = 1
dcp_group = MagicMock(spec=GroupCoordinator)
dcp_group.rank_in_group = 0
@@ -67,6 +71,13 @@ class TestAscendAttentionMetadataBuilder(TestBase):
dcp_group.device_group = MagicMock()
mock_get_dcp_group.return_value = dcp_group
mock_pcp.world_size = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.rank_in_group = 0
pcp_group.world_size = 1
pcp_group.device_group = MagicMock()
mock_get_pcp_group.return_value = pcp_group
self.mock_vllm_config = MagicMock()
self.mock_vllm_config.speculative_config = None
self.mock_vllm_config.model_config.max_model_len = 640
@@ -117,12 +128,16 @@ class TestAscendAttentionMetadataBuilder(TestBase):
class TestAscendAttentionBackendImpl(TestBase):
@patch('vllm.distributed.parallel_state.get_pcp_group')
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state.get_dcp_group')
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch("vllm.distributed.get_decode_context_model_parallel_world_size",
return_value=1)
def setUp(self, mock_get_dcp_size, mock_dcp, mock_get_dcp_group):
def setUp(self, mock_get_dcp_size, mock_dcp, mock_get_dcp_group, mock_pcp,
mock_get_pcp_group):
mock_dcp.world_size = 1
dcp_group = MagicMock(spec=GroupCoordinator)
dcp_group.rank_in_group = 0
@@ -130,6 +145,13 @@ class TestAscendAttentionBackendImpl(TestBase):
dcp_group.device_group = MagicMock()
mock_get_dcp_group.return_value = dcp_group
mock_pcp.world_size = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.rank_in_group = 0
pcp_group.world_size = 1
pcp_group.device_group = MagicMock()
mock_get_pcp_group.return_value = pcp_group
self.layer = MagicMock()
self.layer.layer_name = "test_layer"
self.layer._k_scale_float = 1.0

View File

@@ -177,13 +177,17 @@ class TestAscendMLAMetadata(TestBase):
class TestAscendMLAMetadataBuilder(TestBase):
@patch('vllm.distributed.parallel_state.get_pcp_group')
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state.get_dcp_group')
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch("vllm.distributed.get_decode_context_model_parallel_world_size",
return_value=1)
def test_ascend_mla_metadata_builder_default(self, mock_get_dcp_size,
mock_dcp, mock_get_dcp_group):
mock_dcp, mock_get_dcp_group,
mock_pcp, mock_get_pcp_group):
mock_vllm_config = MagicMock()
mock_vllm_config.model_config.max_model_len = 1024
mock_vllm_config.model_config.get_head_size.return_value = 64
@@ -201,6 +205,13 @@ class TestAscendMLAMetadataBuilder(TestBase):
dcp_group.device_group = MagicMock()
mock_get_dcp_group.return_value = dcp_group
mock_pcp.world_size = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.rank_in_group = 0
pcp_group.world_size = 1
pcp_group.device_group = MagicMock()
mock_get_pcp_group.return_value = pcp_group
mock_vllm_config.speculative_config = None
ascend_config = MagicMock()
@@ -215,6 +226,9 @@ class TestAscendMLAMetadataBuilder(TestBase):
builder.chunked_prefill_enabled,
mock_vllm_config.scheduler_config.enable_chunked_prefill)
@patch('vllm.distributed.parallel_state.get_pcp_group')
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state.get_dcp_group')
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@@ -222,7 +236,9 @@ class TestAscendMLAMetadataBuilder(TestBase):
return_value=1)
def test_ascend_mla_metadata_builder_spec_decode(self, mock_get_dcp_size,
mock_dcp,
mock_get_dcp_group):
mock_get_dcp_group,
mock_pcp,
mock_get_pcp_group):
mock_vllm_config = MagicMock()
mock_vllm_config.model_config.max_model_len = 1024
mock_vllm_config.model_config.get_head_size.return_value = 64
@@ -240,6 +256,13 @@ class TestAscendMLAMetadataBuilder(TestBase):
dcp_group.device_group = MagicMock()
mock_get_dcp_group.return_value = dcp_group
mock_pcp.world_size = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.rank_in_group = 0
pcp_group.world_size = 1
pcp_group.device_group = MagicMock()
mock_get_pcp_group.return_value = pcp_group
mock_spec_config = MagicMock()
mock_spec_config.num_speculative_tokens = 3
mock_vllm_config.speculative_config = mock_spec_config
@@ -256,13 +279,17 @@ class TestAscendMLAMetadataBuilder(TestBase):
builder.chunked_prefill_enabled,
mock_vllm_config.scheduler_config.enable_chunked_prefill)
@patch('vllm.distributed.parallel_state.get_pcp_group')
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state.get_dcp_group')
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch("vllm.distributed.get_decode_context_model_parallel_world_size",
return_value=1)
def test_ascend_mla_metadata_builder_build_full_graph(
self, mock_get_dcp_size, mock_dcp, mock_get_dcp_group):
self, mock_get_dcp_size, mock_dcp, mock_get_dcp_group, mock_pcp,
mock_get_pcp_group):
mock_vllm_config = MagicMock()
mock_vllm_config.model_config.max_model_len = 1024
mock_vllm_config.model_config.get_head_size.return_value = 64
@@ -280,6 +307,13 @@ class TestAscendMLAMetadataBuilder(TestBase):
dcp_group.device_group = MagicMock()
mock_get_dcp_group.return_value = dcp_group
mock_pcp.world_size = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.rank_in_group = 0
pcp_group.world_size = 1
pcp_group.device_group = MagicMock()
mock_get_pcp_group.return_value = pcp_group
mock_spec_config = MagicMock()
mock_spec_config.num_speculative_tokens = 1
mock_spec_config.disable_padded_drafter_batch = True
@@ -307,13 +341,16 @@ class TestAscendMLAMetadataBuilder(TestBase):
[1, 2, 4, 5, 6, 6, 7, 8])
self.assertEqual(metadata.decode.block_table.shape[0], 8)
@patch('vllm.distributed.parallel_state.get_pcp_group')
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state.get_dcp_group')
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch("vllm.distributed.get_decode_context_model_parallel_world_size",
return_value=1)
def test_reorder_batch(self, mock_get_dcp_size, mock_dcp,
mock_get_dcp_group):
mock_get_dcp_group, mock_pcp, mock_get_pcp_group):
ascend_config = MagicMock()
mock_vllm_config = MagicMock()
@@ -331,6 +368,13 @@ class TestAscendMLAMetadataBuilder(TestBase):
dcp_group.device_group = MagicMock()
mock_get_dcp_group.return_value = dcp_group
mock_pcp.world_size = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.rank_in_group = 0
pcp_group.world_size = 1
pcp_group.device_group = MagicMock()
mock_get_pcp_group.return_value = pcp_group
mock_vllm_config.speculative_config = None
with patch("vllm_ascend.attention.mla_v1.get_ascend_config",
@@ -358,6 +402,9 @@ class TestAscendMLAMetadataBuilder(TestBase):
self.assertTrue(modified)
input_batch.swap_states.assert_called_once_with(1, 2)
@patch('vllm.distributed.parallel_state.get_pcp_group')
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state.get_dcp_group')
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@@ -365,7 +412,9 @@ class TestAscendMLAMetadataBuilder(TestBase):
return_value=1)
def test_pad_actual_seq_lens_q_mtp_disable_pad(self, mock_get_dcp_size,
mock_dcp,
mock_get_dcp_group):
mock_get_dcp_group,
mock_pcp,
mock_get_pcp_group):
mock_vllm_config = MagicMock()
mock_vllm_config.model_config.max_model_len = 1024
mock_vllm_config.model_config.get_head_size.return_value = 64
@@ -384,6 +433,13 @@ class TestAscendMLAMetadataBuilder(TestBase):
dcp_group.device_group = MagicMock()
mock_get_dcp_group.return_value = dcp_group
mock_pcp.world_size = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.rank_in_group = 0
pcp_group.world_size = 1
pcp_group.device_group = MagicMock()
mock_get_pcp_group.return_value = pcp_group
builder = AscendMLAMetadataBuilder(None, None, mock_vllm_config,
mock_device)
input_seq_lens = [1, 2, 4, 5]
@@ -394,14 +450,18 @@ class TestAscendMLAMetadataBuilder(TestBase):
num_reqs_pad_size, num_reqs, input_seq_lens)
self.assertEqual(output_seq_lens, expect_output)
@patch('vllm.distributed.parallel_state.get_pcp_group')
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state.get_dcp_group')
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch("vllm.distributed.get_decode_context_model_parallel_world_size",
return_value=1)
def test_pad_actual_seq_lens_q_mtp_enable_pad(self, mock_get_dcp_size,
mock_dcp,
mock_get_dcp_group):
mock_dcp, mock_get_dcp_group,
mock_pcp,
mock_get_pcp_group):
mock_vllm_config = MagicMock()
mock_vllm_config.model_config.max_model_len = 1024
mock_vllm_config.model_config.get_head_size.return_value = 64
@@ -419,6 +479,14 @@ class TestAscendMLAMetadataBuilder(TestBase):
dcp_group.world_size = 1
dcp_group.device_group = MagicMock()
mock_get_dcp_group.return_value = dcp_group
mock_pcp.world_size = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.rank_in_group = 0
pcp_group.world_size = 1
pcp_group.device_group = MagicMock()
mock_get_pcp_group.return_value = pcp_group
common_metadata = MagicMock()
common_metadata.actual_seq_lengths_q = [2, 4, 6, 8]
@@ -452,6 +520,7 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
self.kv_cache_spec.head_size = 128
self.kv_cache_spec.num_heads = 32
@patch("vllm_ascend.attention.mla_v1.get_pcp_group")
@patch(
"vllm_ascend.attention.mla_v1.get_decode_context_model_parallel_world_size"
)
@@ -461,9 +530,13 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
@patch("torch.npu.is_available")
def test_build_prefix_no_cache_metadata(self, mock_npu_available,
mock_zeros, mock_get_ascend_config,
mock_dcp_world_size):
mock_dcp_world_size,
mock_get_pcp_group):
mock_npu_available.return_value = False
mock_dcp_world_size.return_value = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.world_size = 1
mock_get_pcp_group.return_value = pcp_group
def zeros_override(*args, **kwargs):
kwargs.pop('pin_memory', None)
@@ -512,6 +585,7 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
torch.all(metadata.slot_mapping == base_inputs["slot_mapping"]))
self.assertEqual(metadata.head_dim, self.kv_cache_spec.head_size)
@patch("vllm_ascend.attention.mla_v1.get_pcp_group")
@patch(
"vllm_ascend.attention.mla_v1.get_decode_context_model_parallel_world_size"
)
@@ -521,9 +595,13 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
@patch("torch.npu.is_available")
def test_build_chunked_prefix_metadata(self, mock_npu_available,
mock_zeros, mock_get_ascend_config,
mock_dcp_world_size):
mock_dcp_world_size,
mock_get_pcp_group):
mock_npu_available.return_value = False
mock_dcp_world_size.return_value = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.world_size = 1
mock_get_pcp_group.return_value = pcp_group
def zeros_override(*args, **kwargs):
kwargs.pop('pin_memory', None)
@@ -573,14 +651,18 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
torch.all(metadata.slot_mapping == base_inputs["slot_mapping"]))
self.assertEqual(metadata.head_dim, self.kv_cache_spec.head_size)
@patch("vllm_ascend.attention.mla_v1.get_pcp_group")
@patch(
"vllm_ascend.attention.mla_v1.get_decode_context_model_parallel_world_size"
)
@patch("vllm_ascend.attention.mla_v1.get_ascend_config")
def test_build_decode_only_metadata(self, mock_get_ascend_config,
mock_dcp_world_size):
mock_dcp_world_size,
mock_get_pcp_group):
mock_dcp_world_size.return_value = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.world_size = 1
mock_get_pcp_group.return_value = pcp_group
common_attn_metadata = AscendCommonAttentionMetadata(
query_start_loc=torch.tensor([0, 1, 2, 3]),
query_start_loc_cpu=torch.tensor([0, 1, 2, 3]),
@@ -622,14 +704,18 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
torch.all(metadata.slot_mapping == base_inputs["slot_mapping"]))
self.assertEqual(metadata.head_dim, self.kv_cache_spec.head_size)
@patch("vllm_ascend.attention.mla_v1.get_pcp_group")
@patch(
"vllm_ascend.attention.mla_v1.get_decode_context_model_parallel_world_size"
)
@patch("vllm_ascend.attention.mla_v1.get_ascend_config")
def test_build_for_graph_capture_decode_only(self, mock_get_ascend_config,
mock_dcp_world_size):
mock_dcp_world_size,
mock_get_pcp_group):
mock_dcp_world_size.return_value = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.world_size = 1
mock_get_pcp_group.return_value = pcp_group
common_attn_metadata = AscendCommonAttentionMetadata(
query_start_loc=torch.tensor([0, 1, 2, 3]),
query_start_loc_cpu=torch.tensor([0, 1, 2, 3]),
@@ -672,14 +758,18 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
torch.all(metadata.slot_mapping == base_inputs["slot_mapping"]))
self.assertEqual(metadata.head_dim, self.kv_cache_spec.head_size)
@patch("vllm_ascend.attention.mla_v1.get_pcp_group")
@patch(
"vllm_ascend.attention.mla_v1.get_decode_context_model_parallel_world_size"
)
@patch("vllm_ascend.attention.mla_v1.get_ascend_config")
def test_build_for_graph_capture_prefill(self, mock_get_ascend_config,
mock_dcp_world_size):
mock_dcp_world_size,
mock_get_pcp_group):
mock_dcp_world_size.return_value = 1
pcp_group = MagicMock(spec=GroupCoordinator)
pcp_group.world_size = 1
mock_get_pcp_group.return_value = pcp_group
common_attn_metadata = AscendCommonAttentionMetadata(
query_start_loc=torch.tensor([0, 3, 7]),
query_start_loc_cpu=torch.tensor([0, 3, 7]),
@@ -716,6 +806,8 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
class TestAscendMLAImpl(TestBase):
@patch('vllm.distributed.parallel_state._PCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm.distributed.parallel_state._DCP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch("vllm.distributed.get_decode_context_model_parallel_world_size",
@@ -727,13 +819,16 @@ class TestAscendMLAImpl(TestBase):
@patch("vllm_ascend.attention.mla_v1.get_current_vllm_config")
@patch("vllm_ascend.attention.mla_v1.get_ascend_config")
def setUp(self, ascend_config, get_current_vllm_config, mock_get_tp_size,
mock_tp, mock_get_dcp_size, mock_dcp):
mock_tp, mock_get_dcp_size, mock_dcp, mock_pcp):
mock_tp.world_size = 2
mock_tp.rank_in_group = MagicMock()
mock_tp.device_group = MagicMock()
mock_dcp.world_size = 1
mock_dcp.rank_in_group = MagicMock()
mock_dcp.device_group = MagicMock()
mock_pcp.world_size = 1
mock_pcp.rank_in_group = MagicMock()
mock_pcp.device_group = MagicMock()
vllm_config = MagicMock()
speculative_config = MagicMock()
model_config = MagicMock()