[Misc] Remove CP Redundant Variables after FIA operator enables for CANN 8.5 (#6013)

### What this PR does / why we need it?
PCP/DCP splits the kv-cache onto different cards. After introducing the
parameter cp-kv-cache-interleave-size, the first size tokens will be
cached at Card 0, and so on.
However, if there are too few tokens, some cards will not store the
key-value pairs, resulting in values ​​of 0, corrupted values, and
precision issues. Currently, additional operations are introduced to
avoid this precision problem.

After we integrate FIA operator in mla_cp._forward_decode and CANN
updates to 8.5.0, we now can remove these additional operations.
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?
passed all CI by CANN 8.5.0
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996

Signed-off-by: dsxsteven <dsxsteven@sina.com>
Signed-off-by: dsxsteven <36877507+dsxsteven@users.noreply.github.com>
This commit is contained in:
dsxsteven
2026-01-23 14:13:12 +08:00
committed by GitHub
parent 418a43e2a2
commit 8378bc28b0
8 changed files with 78 additions and 57 deletions

View File

@@ -210,3 +210,72 @@ def test_accuracy_pcp_only(max_tokens: int, ) -> None:
name_0="vllm_eager_outputs",
name_1="vllm_pcp_only_outputs",
)
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [10])
def test_models_long_sequence_cp_kv_interleave_size_output_between_tp_and_cp(
model: str,
max_tokens: int,
) -> None:
prompts = [
"The president of the United States is", "The capital of France is"
]
common_kwargs = {
"max_model_len": 1024,
}
if model == "vllm-ascend/DeepSeek-V2-Lite-W8A8":
cp_kwargs = {
"tensor_parallel_size": 2,
"decode_context_parallel_size": 2,
"prefill_context_parallel_size": 2,
"enable_expert_parallel": True,
"cp_kv_cache_interleave_size": 128,
"enforce_eager": True,
"quantization": "ascend",
}
tp_kwargs = {
"tensor_parallel_size": 4,
"enable_expert_parallel": True,
"enforce_eager": True,
"quantization": "ascend",
}
else:
cp_kwargs = {
"tensor_parallel_size": 1,
"decode_context_parallel_size": 1,
"prefill_context_parallel_size": 2,
"cp_kv_cache_interleave_size": 128,
"compilation_config": {
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [4, 8, 24, 48, 60]
},
}
tp_kwargs = {
"tensor_parallel_size": 2,
"enforce_eager": True,
}
cp_full_kwargs = {}
cp_full_kwargs.update(common_kwargs) # type: ignore
cp_full_kwargs.update(cp_kwargs) # type: ignore
tp_full_kwargs = {}
tp_full_kwargs.update(common_kwargs) # type: ignore
tp_full_kwargs.update(tp_kwargs) # type: ignore
with VllmRunner(model, **cp_full_kwargs) as runner: # type: ignore
vllm_context_parallel_outputs = runner.generate_greedy(
prompts, max_tokens)
with VllmRunner(model, **tp_full_kwargs) as runner: # type: ignore
vllm_eager_outputs = runner.generate_greedy(prompts, max_tokens)
check_outputs_equal(
outputs_0_lst=vllm_eager_outputs,
outputs_1_lst=vllm_context_parallel_outputs,
name_0="vllm_eager_outputs",
name_1="vllm_context_parallel_outputs",
)

View File

@@ -439,11 +439,7 @@ class TestAscendMLAImpl(TestBase):
decode_metadata = MagicMock()
decode_metadata.actual_seq_lengths_q = MagicMock()
decode_metadata.seq_lens_list = MagicMock()
decode_metadata.batch_seq_mask = torch.tensor([True, False],
dtype=torch.bool)
result = _process_attn_out_lse(attn_output, softmax_lse,
decode_metadata.batch_seq_mask)
result = _process_attn_out_lse(attn_output, softmax_lse)
self.assertEqual(result.shape[0], B * self.impl.pcp_size)
self.assertEqual(result.shape[1], N)
@@ -478,8 +474,6 @@ class TestAscendMLAImpl(TestBase):
attn_metadata.decode = MagicMock()
attn_metadata.decode.actual_seq_lengths_q = MagicMock()
attn_metadata.decode.seq_lens_list = MagicMock()
attn_metadata.decode.batch_seq_mask = torch.tensor([False, False],
dtype=torch.bool)
self.impl.enable_kv_nz = True
@@ -886,12 +880,9 @@ class TestAscendMLAImpl(TestBase):
# Inputs
attn_output = torch.randn(B, H, D)
softmax_lse = torch.randn(B, H, 1)
batch_seq_mask = torch.tensor([False, True, False, False]) # [B]
decode_meta = MagicMock()
decode_meta.batch_seq_mask = batch_seq_mask
result = _process_attn_out_lse(attn_output, softmax_lse,
batch_seq_mask)
result = _process_attn_out_lse(attn_output, softmax_lse)
# [PCP * S, DCP * H, D + 1]
self.assertIsInstance(result, torch.Tensor)
assert result.shape == (B * self.impl.pcp_size, H, D + 1)

View File

@@ -137,7 +137,6 @@ class TestAscendMLADecodeMetadata(TestBase):
seq_lens_list = [2, 3]
attn_mask = None
cp_seq_len = torch.tensor([2, 3])
batch_seq_mask = torch.tensor([[1, 1, 0, 0], [1, 1, 1, 0]])
metadata = AscendMLADecodeMetadata(input_positions=input_positions,
block_table=block_table,
@@ -145,8 +144,7 @@ class TestAscendMLADecodeMetadata(TestBase):
max_seq_lens=max_seq_lens,
seq_lens_list=seq_lens_list,
attn_mask=attn_mask,
cp_seq_len=cp_seq_len,
batch_seq_mask=batch_seq_mask)
cp_seq_len=cp_seq_len)
self.assertIs(metadata.input_positions, input_positions)
self.assertIs(metadata.block_table, block_table)
@@ -155,7 +153,6 @@ class TestAscendMLADecodeMetadata(TestBase):
self.assertEqual(metadata.seq_lens_list, seq_lens_list)
self.assertIsNone(attn_mask)
self.assertIs(metadata.cp_seq_len, cp_seq_len)
self.assertIs(metadata.batch_seq_mask, batch_seq_mask)
class TestAscendMLAMetadata(TestBase):