[Misc] Remove CP Redundant Variables after FIA operator enables for CANN 8.5 (#6013)
### What this PR does / why we need it?
PCP/DCP splits the kv-cache onto different cards. After introducing the
parameter cp-kv-cache-interleave-size, the first size tokens will be
cached at Card 0, and so on.
However, if there are too few tokens, some cards will not store the
key-value pairs, resulting in values of 0, corrupted values, and
precision issues. Currently, additional operations are introduced to
avoid this precision problem.
After we integrate FIA operator in mla_cp._forward_decode and CANN
updates to 8.5.0, we now can remove these additional operations.
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
passed all CI by CANN 8.5.0
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996
Signed-off-by: dsxsteven <dsxsteven@sina.com>
Signed-off-by: dsxsteven <36877507+dsxsteven@users.noreply.github.com>
This commit is contained in:
@@ -73,9 +73,6 @@ class AscendAttentionCPMetadataBuilder(AscendAttentionMetadataBuilder):
|
||||
device: torch.device,
|
||||
):
|
||||
super().__init__(kv_cache_spec, layer_names, vllm_config, device)
|
||||
self.batch_seq_mask_buf = torch.empty(
|
||||
vllm_config.scheduler_config.max_num_batched_tokens, dtype=torch.uint8, device=device
|
||||
)
|
||||
self.pcp_size = get_pcp_group().world_size
|
||||
self.pcp_rank = get_pcp_group().rank_in_group if self.pcp_size > 1 else 0
|
||||
self.dcp_size = get_decode_context_model_parallel_world_size()
|
||||
@@ -216,14 +213,9 @@ class AscendAttentionCPMetadataBuilder(AscendAttentionMetadataBuilder):
|
||||
if num_decodes > 0:
|
||||
num_computed_tokens_array = np.array(num_computed_tokens_of_pcp_dcp)
|
||||
num_computed_tokens_array = num_computed_tokens_array[:num_decodes]
|
||||
batch_seq_mask = num_computed_tokens_array[:, self.pcp_rank, self.dcp_rank] == 0
|
||||
# TODO: numpy array mode of the shared memory is used to improve performance
|
||||
self.batch_seq_mask_buf[: batch_seq_mask.shape[0]].copy_(
|
||||
torch.from_numpy(batch_seq_mask), non_blocking=True
|
||||
)
|
||||
decode_metadata = AscendMetadataForDecode(
|
||||
num_computed_tokens_of_pcp_dcp=num_computed_tokens_array,
|
||||
batch_seq_mask=self.batch_seq_mask_buf[: batch_seq_mask.shape[0]],
|
||||
block_tables=block_table[:num_decodes],
|
||||
)
|
||||
|
||||
@@ -525,7 +517,7 @@ class AscendAttentionCPImpl(AscendAttentionBackendImpl):
|
||||
graph_params.handles[num_tokens].append(handle)
|
||||
else:
|
||||
attn_out, attn_lse = torch_npu.npu_fused_infer_attention_score(query, k_nope, value, **common_kwargs)
|
||||
attn_out_lse = _process_attn_out_lse(attn_out, attn_lse, attn_metadata.decode_meta.batch_seq_mask)
|
||||
attn_out_lse = _process_attn_out_lse(attn_out, attn_lse)
|
||||
attn_out = _npu_attention_update(self.head_size, attn_out_lse)
|
||||
return attn_out
|
||||
|
||||
@@ -633,9 +625,6 @@ class AscendAttentionCPImpl(AscendAttentionBackendImpl):
|
||||
actual_seq_lengths_kv=prefill_metadata.chunked_context.actual_seq_lengths_kv,
|
||||
actual_seq_lengths=attn_metadata.prefill.chunked_context.actual_chunk_seq_lengths,
|
||||
)
|
||||
batch_chunk_seq_mask = attn_metadata.prefill.chunked_context.batch_chunk_seq_mask
|
||||
lse_mask = batch_chunk_seq_mask[:, None, None].expand_as(prefix_chunk_lse)
|
||||
prefix_chunk_lse = torch.where(lse_mask, -torch.inf, prefix_chunk_lse)
|
||||
|
||||
return prefix_chunk_output, prefix_chunk_lse
|
||||
|
||||
|
||||
@@ -84,20 +84,13 @@ class AscendMetadataForDecode:
|
||||
"""Decode-specific metadata for Ascend attention with Context Parallelism."""
|
||||
|
||||
num_computed_tokens_of_pcp_dcp: list[list[list[int]]] | None = None
|
||||
batch_seq_mask: torch.Tensor = None
|
||||
block_tables: torch.Tensor = None
|
||||
|
||||
|
||||
def _process_attn_out_lse(
|
||||
attn_output: torch.Tensor, softmax_lse: torch.Tensor, batch_seq_mask: torch.Tensor
|
||||
) -> torch.Tensor:
|
||||
def _process_attn_out_lse(attn_output: torch.Tensor, softmax_lse: torch.Tensor) -> torch.Tensor:
|
||||
pcp_size = get_pcp_group().world_size
|
||||
dcp_size = get_decode_context_model_parallel_world_size()
|
||||
dcp_group = get_dcp_group().device_group if dcp_size > 1 else None
|
||||
out_mask = batch_seq_mask[:, None, None].expand_as(attn_output)
|
||||
attn_output = torch.where(out_mask, 0, attn_output)
|
||||
lse_mask = batch_seq_mask[:, None, None].expand_as(softmax_lse)
|
||||
softmax_lse = torch.where(lse_mask, -torch.inf, softmax_lse)
|
||||
softmax_lse = softmax_lse.to(torch.float32)
|
||||
attn_output = attn_output.to(torch.float32)
|
||||
# Concat out&lse: [bs,num_heads,v_head_dim] + [bs,num_heads,1] -> [bs,num_heads,v_head_dim+1]
|
||||
|
||||
@@ -68,10 +68,6 @@ class AscendMlaCPMetadataBuilder(AscendMLAMetadataBuilder):
|
||||
self.dcp_rank = get_decode_context_model_parallel_rank() if self.dcp_size > 1 else 0
|
||||
self.cp_local_block_size = vllm_config.parallel_config.cp_kv_cache_interleave_size
|
||||
self.cp_virtual_block_size = self.cp_local_block_size * self.dcp_size * self.pcp_size
|
||||
scheduler_config = vllm_config.scheduler_config
|
||||
decode_max_num_seqs = getattr(scheduler_config, "decode_max_num_seqs", 0)
|
||||
max_num_seqs = max(scheduler_config.max_num_seqs, decode_max_num_seqs)
|
||||
self.batch_seq_mask_buf = torch.empty(max_num_seqs * self.decode_threshold, dtype=torch.uint8, device=device)
|
||||
self.block_size = (self.block_size * self.cp_virtual_block_size) // np.gcd(
|
||||
self.block_size, self.cp_virtual_block_size
|
||||
)
|
||||
@@ -238,12 +234,7 @@ class AscendMlaCPMetadataBuilder(AscendMLAMetadataBuilder):
|
||||
|
||||
cp_seq_len = num_computed_tokens_of_cp_dcp_array[:, self.pcp_rank, self.dcp_rank]
|
||||
cp_seq_len = torch.tensor(cp_seq_len, dtype=torch.int32)
|
||||
batch_seq_mask = cp_seq_len == 0
|
||||
self.batch_seq_mask_buf[: batch_seq_mask.shape[0]].copy_(batch_seq_mask, non_blocking=True)
|
||||
batch_seq_mask = self.batch_seq_mask_buf[: batch_seq_mask.shape[0]]
|
||||
cp_seq_len = torch.where(cp_seq_len == 0, 1, cp_seq_len)
|
||||
decode_metadata.cp_seq_len = cp_seq_len.tolist()
|
||||
decode_metadata.batch_seq_mask = batch_seq_mask
|
||||
|
||||
actual_seq_lengths_q = torch.arange(self.num_decodes_flatten) + 1
|
||||
decode_metadata.actual_seq_lengths_q = actual_seq_lengths_q
|
||||
@@ -651,7 +642,7 @@ class AscendMlaCPImpl(AscendMLAImpl):
|
||||
softmax_lse = softmax_lse.permute(0, 2, 1, 3).reshape(B_lse * Q_S, N_lse, 1)
|
||||
|
||||
# Update out&lse
|
||||
attn_out_lse = _process_attn_out_lse(attn_output, softmax_lse, decode_meta.batch_seq_mask)
|
||||
attn_out_lse = _process_attn_out_lse(attn_output, softmax_lse)
|
||||
attn_output = _npu_attention_update(self.kv_lora_rank, attn_out_lse)
|
||||
return self._v_up_proj(attn_output)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user