[Misc] Remove CP Redundant Variables after FIA operator enables for CANN 8.5 (#6013)

### What this PR does / why we need it?
PCP/DCP splits the kv-cache onto different cards. After introducing the
parameter cp-kv-cache-interleave-size, the first size tokens will be
cached at Card 0, and so on.
However, if there are too few tokens, some cards will not store the
key-value pairs, resulting in values ​​of 0, corrupted values, and
precision issues. Currently, additional operations are introduced to
avoid this precision problem.

After we integrate FIA operator in mla_cp._forward_decode and CANN
updates to 8.5.0, we now can remove these additional operations.
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?
passed all CI by CANN 8.5.0
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996

Signed-off-by: dsxsteven <dsxsteven@sina.com>
Signed-off-by: dsxsteven <36877507+dsxsteven@users.noreply.github.com>
This commit is contained in:
dsxsteven
2026-01-23 14:13:12 +08:00
committed by GitHub
parent 418a43e2a2
commit 8378bc28b0
8 changed files with 78 additions and 57 deletions

View File

@@ -73,9 +73,6 @@ class AscendAttentionCPMetadataBuilder(AscendAttentionMetadataBuilder):
device: torch.device,
):
super().__init__(kv_cache_spec, layer_names, vllm_config, device)
self.batch_seq_mask_buf = torch.empty(
vllm_config.scheduler_config.max_num_batched_tokens, dtype=torch.uint8, device=device
)
self.pcp_size = get_pcp_group().world_size
self.pcp_rank = get_pcp_group().rank_in_group if self.pcp_size > 1 else 0
self.dcp_size = get_decode_context_model_parallel_world_size()
@@ -216,14 +213,9 @@ class AscendAttentionCPMetadataBuilder(AscendAttentionMetadataBuilder):
if num_decodes > 0:
num_computed_tokens_array = np.array(num_computed_tokens_of_pcp_dcp)
num_computed_tokens_array = num_computed_tokens_array[:num_decodes]
batch_seq_mask = num_computed_tokens_array[:, self.pcp_rank, self.dcp_rank] == 0
# TODO: numpy array mode of the shared memory is used to improve performance
self.batch_seq_mask_buf[: batch_seq_mask.shape[0]].copy_(
torch.from_numpy(batch_seq_mask), non_blocking=True
)
decode_metadata = AscendMetadataForDecode(
num_computed_tokens_of_pcp_dcp=num_computed_tokens_array,
batch_seq_mask=self.batch_seq_mask_buf[: batch_seq_mask.shape[0]],
block_tables=block_table[:num_decodes],
)
@@ -525,7 +517,7 @@ class AscendAttentionCPImpl(AscendAttentionBackendImpl):
graph_params.handles[num_tokens].append(handle)
else:
attn_out, attn_lse = torch_npu.npu_fused_infer_attention_score(query, k_nope, value, **common_kwargs)
attn_out_lse = _process_attn_out_lse(attn_out, attn_lse, attn_metadata.decode_meta.batch_seq_mask)
attn_out_lse = _process_attn_out_lse(attn_out, attn_lse)
attn_out = _npu_attention_update(self.head_size, attn_out_lse)
return attn_out
@@ -633,9 +625,6 @@ class AscendAttentionCPImpl(AscendAttentionBackendImpl):
actual_seq_lengths_kv=prefill_metadata.chunked_context.actual_seq_lengths_kv,
actual_seq_lengths=attn_metadata.prefill.chunked_context.actual_chunk_seq_lengths,
)
batch_chunk_seq_mask = attn_metadata.prefill.chunked_context.batch_chunk_seq_mask
lse_mask = batch_chunk_seq_mask[:, None, None].expand_as(prefix_chunk_lse)
prefix_chunk_lse = torch.where(lse_mask, -torch.inf, prefix_chunk_lse)
return prefix_chunk_output, prefix_chunk_lse