support cp&dcp (#3260)
### What this PR does / why we need it? This PR adds the Prefill Context Parallelism (PCP) feature, which corresponds to DCP. For specific implementation details, please refer to the RFC https://github.com/vllm-project/vllm/issues/25749. TL;DR: PCP enhances long-sequence inference capabilities by partitioning the sequence dimension during the prefill stage. ### Does this PR introduce _any_ user-facing change? The current implementation primarily includes the following changes: Modified ModelRunner.py for CP partitioning logic for tokens; Modified attention_v1.py and mla_v1.py to adapt the GQA/MLA backend to PCP. Modified block_tables.py to extend the KV cache storage based on DCP&PCP; Added necessary command-line arguments to control parallelism for PCP; ### How was this patch tested? - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 --------- Signed-off-by: LookAround <lixushi@huawei.com> Signed-off-by: chenjie <chenjie137@huawei.com> Signed-off-by: Delphine-Nic <tanwenqin@huawei.com> Signed-off-by: zhangsicheng5 <zhangsicheng5@huawei.com> Signed-off-by: Feng Liu <liufeng248@huawei.com> Signed-off-by: gaojc <1055866782@qq.com> Signed-off-by: weiguihua2 <weiguihua2@huawei.com> Signed-off-by: z50049692 <zhangmingwei11@huawei.com> Co-authored-by: chenjie <chenjie137@huawei.com> Co-authored-by: Delphine-Nic <tanwenqin@huawei.com> Co-authored-by: zhangsicheng5 <zhangsicheng5@huawei.com> Co-authored-by: Feng Liu <liufeng248@huawei.com> Co-authored-by: gaojc <1055866782@qq.com> Co-authored-by: weiguihua2 <weiguihua2@huawei.com> Co-authored-by: z50049692 <zhangmingwei11@huawei.com> Co-authored-by: w00896881 <wangzixuan40@huawei.com>
This commit is contained in:
@@ -2,14 +2,24 @@ from dataclasses import dataclass
|
||||
from typing import (TYPE_CHECKING, ClassVar, NamedTuple, Optional, Tuple, Type,
|
||||
TypeVar)
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn.functional as F
|
||||
import torch_npu
|
||||
from torch import nn
|
||||
from vllm.attention.backends.abstract import (AttentionBackend,
|
||||
AttentionMetadata,
|
||||
MLAAttentionImpl)
|
||||
from vllm.config import VllmConfig, get_current_vllm_config
|
||||
from vllm.distributed import get_tensor_model_parallel_world_size
|
||||
|
||||
# isort: off
|
||||
from vllm.distributed import (get_dcp_group,
|
||||
get_decode_context_model_parallel_rank,
|
||||
get_decode_context_model_parallel_world_size,
|
||||
get_tensor_model_parallel_rank,
|
||||
get_tensor_model_parallel_world_size,
|
||||
get_tp_group)
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.linear import (LinearBase,
|
||||
@@ -32,9 +42,15 @@ from vllm_ascend.multistream.ms_split import model_input_split_v1_mla_attn
|
||||
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
|
||||
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
|
||||
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, ACL_FORMAT_FRACTAL_NZ,
|
||||
is_enable_nz)
|
||||
is_enable_nz, prefill_context_parallel_enable)
|
||||
from vllm_ascend.worker.npu_input_batch import InputBatch
|
||||
|
||||
if prefill_context_parallel_enable():
|
||||
from vllm.distributed import (get_pcp_group,
|
||||
get_prefill_context_model_parallel_rank,
|
||||
get_prefill_context_model_parallel_world_size
|
||||
)
|
||||
# isort:on
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
|
||||
@@ -65,6 +81,22 @@ class AscendMLABackend(AttentionBackend):
|
||||
return AscendMLAImpl
|
||||
|
||||
|
||||
@dataclass
|
||||
class AscendPCPMetadata:
|
||||
q_head_idx: torch.Tensor = None
|
||||
q_tail_idx: torch.Tensor = None
|
||||
kv_with_q_head_nomask_idx: torch.Tensor = None
|
||||
kv_with_q_head_mask_idx: torch.Tensor = None
|
||||
kv_with_q_tail_nomask_idx: torch.Tensor = None
|
||||
kv_with_q_tail_mask_idx: torch.Tensor = None
|
||||
attn_mask_seqlens: torch.Tensor = None
|
||||
head_attn_nomask_seqlens: torch.Tensor = None
|
||||
tail_attn_nomask_seqlens: torch.Tensor = None
|
||||
q_full_idx: torch.Tensor = None
|
||||
pcp_prefill_mask: torch.Tensor = None
|
||||
pcp_allgather_restore_idx: Optional[list[int]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class AscendMLAPrefillMetadata:
|
||||
""" Prefill Specific Metadata for Ascend"""
|
||||
@@ -92,6 +124,7 @@ class AscendMLAPrefillMetadata:
|
||||
chunked_context: Optional[ChunkedContextMetadata] = None
|
||||
sin: torch.Tensor = None
|
||||
cos: torch.Tensor = None
|
||||
pcp_metadata: Optional[AscendPCPMetadata] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -107,6 +140,8 @@ class AscendMLADecodeMetadata:
|
||||
attn_mask: Optional[torch.Tensor] = None
|
||||
sin: torch.Tensor = None
|
||||
cos: torch.Tensor = None
|
||||
num_computed_tokens_of_pcp_dcp: Optional[list[Optional[list[Optional[
|
||||
list[int]]]]]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -124,6 +159,7 @@ class AscendMLAMetadata:
|
||||
# |-------------------- seq_len ---------------------|
|
||||
# |-- query_len ---|
|
||||
|
||||
num_actual_tokens_pcp_padded: int
|
||||
num_actual_tokens: int # Number of tokens excluding padding.
|
||||
slot_mapping: torch.Tensor
|
||||
query_start_loc: torch.Tensor
|
||||
@@ -297,6 +333,11 @@ class AscendMLAMetadataBuilder:
|
||||
num_actual_tokens = common_attn_metadata.num_actual_tokens
|
||||
query_start_loc = common_attn_metadata.query_start_loc
|
||||
query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu
|
||||
long_seq_metadata = common_attn_metadata.prefill_context_parallel_metadata
|
||||
|
||||
num_actual_tokens_pcp_padded = long_seq_metadata.num_actual_tokens_pcp_padded if long_seq_metadata else None
|
||||
num_computed_tokens_of_pcp_dcp = long_seq_metadata.num_computed_tokens_of_pcp_dcp if long_seq_metadata else None
|
||||
|
||||
num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = \
|
||||
split_decodes_and_prefills(common_attn_metadata, decode_threshold=self.decode_threshold)
|
||||
assert num_decodes + num_prefills == num_reqs
|
||||
@@ -308,10 +349,14 @@ class AscendMLAMetadataBuilder:
|
||||
device = self.device
|
||||
|
||||
block_table = (common_attn_metadata.block_table_tensor[:num_reqs])
|
||||
slot_mapping = common_attn_metadata.slot_mapping[:num_actual_tokens]
|
||||
input_positions = common_attn_metadata.positions[:
|
||||
num_actual_tokens].long(
|
||||
)
|
||||
if num_actual_tokens_pcp_padded is None:
|
||||
num_actual_tokens_pcp_padded = num_actual_tokens
|
||||
|
||||
slot_mapping = common_attn_metadata.slot_mapping[:
|
||||
num_actual_tokens_pcp_padded]
|
||||
|
||||
if self.cos_cache is None:
|
||||
self.cos_cache = model.model.layers[
|
||||
@@ -332,6 +377,31 @@ class AscendMLAMetadataBuilder:
|
||||
prefill_metadata = None
|
||||
chunked_context_metadata = None
|
||||
if num_prefills > 0:
|
||||
pcp_metadata = None
|
||||
common_long_seq_metadata = common_attn_metadata.prefill_context_parallel_metadata
|
||||
if common_long_seq_metadata is not None:
|
||||
pcp_metadata = AscendPCPMetadata(
|
||||
q_head_idx=common_long_seq_metadata.q_head_idx_tensor,
|
||||
q_tail_idx=common_long_seq_metadata.q_tail_idx_tensor,
|
||||
kv_with_q_head_nomask_idx=common_long_seq_metadata.
|
||||
kv_with_q_head_nomask_idx_tensor,
|
||||
kv_with_q_head_mask_idx=common_long_seq_metadata.
|
||||
kv_with_q_head_mask_idx_tensor,
|
||||
kv_with_q_tail_nomask_idx=common_long_seq_metadata.
|
||||
kv_with_q_tail_nomask_idx_tensor,
|
||||
kv_with_q_tail_mask_idx=common_long_seq_metadata.
|
||||
kv_with_q_tail_mask_idx_tensor,
|
||||
attn_mask_seqlens=common_long_seq_metadata.
|
||||
attn_mask_seqlens,
|
||||
head_attn_nomask_seqlens=common_long_seq_metadata.
|
||||
head_attn_nomask_seqlens,
|
||||
tail_attn_nomask_seqlens=common_long_seq_metadata.
|
||||
tail_attn_nomask_seqlens,
|
||||
q_full_idx=common_long_seq_metadata.q_full_idx,
|
||||
pcp_prefill_mask=common_long_seq_metadata.pcp_prefill_mask,
|
||||
pcp_allgather_restore_idx=long_seq_metadata.
|
||||
pcp_allgather_restore_idx if long_seq_metadata else None)
|
||||
|
||||
reqs_start = num_decodes # prefill_start
|
||||
tokens_start = num_decode_tokens
|
||||
max_query_len = query_lens[reqs_start:].max().item()
|
||||
@@ -392,6 +462,7 @@ class AscendMLAMetadataBuilder:
|
||||
chunked_context=chunked_context_metadata,
|
||||
sin=sin,
|
||||
cos=cos,
|
||||
pcp_metadata=pcp_metadata,
|
||||
)
|
||||
|
||||
decode_metadata = None
|
||||
@@ -426,7 +497,9 @@ class AscendMLAMetadataBuilder:
|
||||
attn_mask=common_attn_metadata.spec_attn_mask,
|
||||
actual_seq_lengths_q=actual_seq_lengths_q,
|
||||
sin=sin,
|
||||
cos=cos)
|
||||
cos=cos,
|
||||
num_computed_tokens_of_pcp_dcp=
|
||||
num_computed_tokens_of_pcp_dcp)
|
||||
else:
|
||||
cos[:num_decode_tokens,
|
||||
...] = self.cos_cache[input_positions].unsqueeze(
|
||||
@@ -444,9 +517,12 @@ class AscendMLAMetadataBuilder:
|
||||
attn_mask=common_attn_metadata.spec_attn_mask,
|
||||
actual_seq_lengths_q=actual_seq_lengths_q,
|
||||
sin=sin[:num_decode_tokens, ...],
|
||||
cos=cos[:num_decode_tokens, ...])
|
||||
cos=cos[:num_decode_tokens, ...],
|
||||
num_computed_tokens_of_pcp_dcp=
|
||||
num_computed_tokens_of_pcp_dcp)
|
||||
|
||||
return self.metadata_cls( # type: ignore
|
||||
num_actual_tokens_pcp_padded=num_actual_tokens_pcp_padded,
|
||||
num_input_tokens=common_attn_metadata.num_input_tokens,
|
||||
num_actual_tokens=num_actual_tokens,
|
||||
query_lens=query_lens.tolist(),
|
||||
@@ -494,6 +570,7 @@ class DecodeMLAPreprocessResult(NamedTuple):
|
||||
q_pe: Optional[torch.Tensor] = None
|
||||
k_nope: Optional[torch.Tensor] = None
|
||||
k_pe: Optional[torch.Tensor] = None
|
||||
decode_q_wo_k_up: Optional[torch.Tensor] = None
|
||||
|
||||
|
||||
class PrefillMLAPreprocessResult(NamedTuple):
|
||||
@@ -561,8 +638,27 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
self.speculative_config = vllm_config.speculative_config
|
||||
self.enable_mlapo = envs.VLLM_ASCEND_ENABLE_MLAPO
|
||||
|
||||
self.pcp_size = get_prefill_context_model_parallel_world_size(
|
||||
) if prefill_context_parallel_enable() else 1
|
||||
self.pcp_rank = get_prefill_context_model_parallel_rank(
|
||||
) if self.pcp_size > 1 else 0
|
||||
self.pcp_group = get_pcp_group(
|
||||
).device_group if self.pcp_size > 1 else None
|
||||
|
||||
self.dcp_size = get_decode_context_model_parallel_world_size()
|
||||
self.dcp_rank = get_decode_context_model_parallel_rank(
|
||||
) if self.dcp_size > 1 else 0
|
||||
self.dcp_group = get_dcp_group(
|
||||
).device_group if self.dcp_size > 1 else None
|
||||
|
||||
self.tp_size = get_tensor_model_parallel_world_size()
|
||||
self.tp_rank = get_tensor_model_parallel_rank()
|
||||
self.tp_group = get_tp_group(
|
||||
).device_group if self.tp_size > 1 else None
|
||||
|
||||
def _v_up_proj(self, x):
|
||||
if self.W_UV.shape[0] * self.W_UV.shape[1] < 65536:
|
||||
if self.W_UV.shape[0] * self.W_UV.shape[
|
||||
1] < 65536 and not self.dcp_size * self.pcp_size > 1:
|
||||
x = x.view(-1, self.num_heads, self.kv_lora_rank)
|
||||
x = torch_npu.npu_transpose_batchmatmul(x,
|
||||
self.W_UV,
|
||||
@@ -1062,7 +1158,6 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
else:
|
||||
attn_output, _ = torch_npu.npu_fused_infer_attention_score(
|
||||
q_nope, k_nope, k_nope, **common_kwargs)
|
||||
|
||||
current_ms_metadata = get_multistream_comm_context()
|
||||
if current_ms_metadata is None:
|
||||
return self._v_up_proj(attn_output)
|
||||
@@ -1162,9 +1257,9 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
|
||||
# Process for Flash Comm V1
|
||||
q_c = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(
|
||||
q_c, need_gather_q_kv)
|
||||
q_c.contiguous(), need_gather_q_kv)
|
||||
kv_no_split = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(
|
||||
kv_no_split, need_gather_q_kv)
|
||||
kv_no_split.contiguous(), need_gather_q_kv)
|
||||
|
||||
decode_preprocess_res = None
|
||||
prefill_preprocess_res = None
|
||||
@@ -1177,8 +1272,17 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
sin = attn_metadata.decode.sin
|
||||
decode_ql_nope, decode_q_pe = \
|
||||
self._q_proj_and_k_up_proj(decode_q_c)
|
||||
if self.dcp_size > 1:
|
||||
decode_q_no_split = torch.cat([decode_ql_nope, decode_q_pe],
|
||||
dim=-1)
|
||||
decode_q_no_split = get_dcp_group().all_gather(
|
||||
decode_q_no_split, 1)
|
||||
decode_ql_nope, decode_q_pe = decode_q_no_split.split(
|
||||
[self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
|
||||
decode_q_pe = self.rope_single(decode_q_pe, cos, sin)
|
||||
decode_slots = attn_metadata.slot_mapping[:num_decode_tokens]
|
||||
decode_slots = attn_metadata.slot_mapping[:num_decode_tokens *
|
||||
self.pcp_size:self.
|
||||
pcp_size]
|
||||
decode_kv_no_split = kv_no_split[:num_decode_tokens]
|
||||
decode_k_pe, decode_k_nope = self.exec_kv_decode(
|
||||
decode_kv_no_split, cos, sin, kv_cache, decode_slots)
|
||||
@@ -1186,6 +1290,10 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
decode_ql_nope, decode_q_pe, decode_k_nope, decode_k_pe)
|
||||
# Preprocess for prefill tokens
|
||||
if has_prefill:
|
||||
if self.pcp_size > 1:
|
||||
num_actual_tokens = (attn_metadata.num_actual_tokens_pcp_padded
|
||||
- self.pcp_size * num_decode_tokens
|
||||
) // self.pcp_size + num_decode_tokens
|
||||
prefill_kv_no_split = kv_no_split[
|
||||
num_decode_tokens:num_actual_tokens]
|
||||
prefill_q_c = q_c[num_decode_tokens:num_actual_tokens]
|
||||
@@ -1193,20 +1301,65 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
.view(-1, self.num_heads, self.qk_head_dim)
|
||||
prefill_q_pe = prefill_q[..., self.qk_nope_head_dim:]
|
||||
prefill_q_nope = prefill_q[..., :self.qk_nope_head_dim]
|
||||
cos = attn_metadata.prefill.cos
|
||||
sin = attn_metadata.prefill.sin
|
||||
if self.pcp_size > 1:
|
||||
cos = attn_metadata.prefill.cos[:num_actual_tokens -
|
||||
num_decode_tokens]
|
||||
sin = attn_metadata.prefill.sin[:num_actual_tokens -
|
||||
num_decode_tokens]
|
||||
else:
|
||||
cos = attn_metadata.prefill.cos
|
||||
sin = attn_metadata.prefill.sin
|
||||
prefill_slots = attn_metadata.slot_mapping[
|
||||
num_decode_tokens:num_actual_tokens]
|
||||
prefill_q_pe = self.rope_single(prefill_q_pe, cos, sin)
|
||||
prefill_k_pe, prefill_k_c_normed = self.exec_kv_prefill(
|
||||
prefill_kv_no_split, cos, sin, kv_cache, prefill_slots)
|
||||
prefill_k_pe = prefill_k_pe.view(prefill_q_c.shape[0],
|
||||
self.num_kv_heads, -1)
|
||||
if self.pcp_size > 1:
|
||||
prefill_kv_no_split = kv_no_split[:num_actual_tokens]
|
||||
kv_c, k_pe = prefill_kv_no_split.split(
|
||||
[self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
|
||||
kv_c_normed = self.kv_a_layernorm(kv_c.contiguous())
|
||||
assert len(
|
||||
kv_cache
|
||||
) > 1, "the number of kv cache should be greater than 1, namely (nope_cache and rope_cache)"
|
||||
kv_c_normed = kv_c_normed.view(
|
||||
[num_actual_tokens, self.num_kv_heads, -1])
|
||||
k_pe = k_pe.unsqueeze(1)
|
||||
prefill_k_pe = k_pe
|
||||
prefill_k_pe[
|
||||
num_decode_tokens:num_actual_tokens] = self.rope_single(
|
||||
prefill_k_pe[num_decode_tokens:num_actual_tokens], cos,
|
||||
sin)
|
||||
prefill_k_c_normed = kv_c_normed[:num_actual_tokens]
|
||||
prefill_kv_c_k_pe = torch.cat(
|
||||
[prefill_k_c_normed, prefill_k_pe], dim=-1)
|
||||
prefill_kv_c_k_pe = get_pcp_group().all_gather(
|
||||
prefill_kv_c_k_pe, 0)
|
||||
prefill_kv_c_k_pe = torch.index_select(
|
||||
prefill_kv_c_k_pe, 0, attn_metadata.prefill.pcp_metadata.
|
||||
pcp_allgather_restore_idx)
|
||||
prefill_kv_c_k_pe = prefill_kv_c_k_pe[num_decode_tokens *
|
||||
self.pcp_size:]
|
||||
prefill_k_c_normed, prefill_k_pe = prefill_kv_c_k_pe.split(
|
||||
[self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
|
||||
kv_c_normed, k_pe = prefill_k_c_normed, prefill_k_pe
|
||||
prefill_k_c_normed = prefill_k_c_normed.squeeze()
|
||||
slot_mapping = attn_metadata.slot_mapping[self.pcp_size *
|
||||
num_decode_tokens:]
|
||||
torch_npu._npu_reshape_and_cache(key=kv_c_normed,
|
||||
value=k_pe,
|
||||
key_cache=kv_cache[0],
|
||||
value_cache=kv_cache[1],
|
||||
slot_indices=slot_mapping)
|
||||
else:
|
||||
prefill_k_pe, prefill_k_c_normed = self.exec_kv_prefill(
|
||||
prefill_kv_no_split, cos, sin, kv_cache, prefill_slots)
|
||||
prefill_k_nope, prefill_value = self.kv_b_proj(
|
||||
prefill_k_c_normed)[0].view(
|
||||
-1, self.num_heads,
|
||||
self.qk_nope_head_dim + self.v_head_dim).split(
|
||||
[self.qk_nope_head_dim, self.v_head_dim], dim=-1)
|
||||
if not self.pcp_size > 1:
|
||||
prefill_k_pe = prefill_k_pe.view(prefill_q_c.shape[0],
|
||||
self.num_kv_heads, -1)
|
||||
prefill_k_pe = prefill_k_pe.expand(
|
||||
(*prefill_k_nope.shape[:-1], -1))
|
||||
prefill_preprocess_res = PrefillMLAPreprocessResult(
|
||||
@@ -1227,7 +1380,10 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
if attn_metadata is None:
|
||||
# Profiling run.
|
||||
return output
|
||||
num_actual_tokens = attn_metadata.num_actual_tokens
|
||||
if self.pcp_size > 1:
|
||||
num_actual_tokens = attn_metadata.num_actual_tokens_pcp_padded // self.pcp_size
|
||||
else:
|
||||
num_actual_tokens = attn_metadata.num_actual_tokens
|
||||
assert attn_metadata.num_decodes is not None and \
|
||||
attn_metadata.num_prefills is not None and \
|
||||
attn_metadata.num_decode_tokens is not None
|
||||
@@ -1253,12 +1409,20 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
|
||||
if decode_preprocess_res is not None:
|
||||
# MLA Preprocess for decoding
|
||||
output_decode = self._forward_decode(decode_preprocess_res.ql_nope,
|
||||
decode_preprocess_res.q_pe,
|
||||
decode_preprocess_res.k_nope,
|
||||
decode_preprocess_res.k_pe,
|
||||
kv_cache[0].shape[1],
|
||||
attn_metadata)
|
||||
if self.pcp_size * self.dcp_size > 1:
|
||||
output_decode = self._forward_decode_pcp_dcp(
|
||||
decode_preprocess_res.ql_nope,
|
||||
decode_preprocess_res.q_pe,
|
||||
decode_preprocess_res.k_nope,
|
||||
decode_preprocess_res.k_pe,
|
||||
kv_cache[0].shape[1],
|
||||
attn_metadata,
|
||||
)
|
||||
else:
|
||||
output_decode = self._forward_decode(
|
||||
decode_preprocess_res.ql_nope, decode_preprocess_res.q_pe,
|
||||
decode_preprocess_res.k_nope, decode_preprocess_res.k_pe,
|
||||
kv_cache[0].shape[1], attn_metadata)
|
||||
current_ms_metadata = get_multistream_comm_context()
|
||||
if current_ms_metadata is not None:
|
||||
with torch.npu.stream(current_ms_metadata.comm_stream):
|
||||
@@ -1271,10 +1435,16 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
# FIX: aicore move should be also placed on the comm stream in dbo,
|
||||
# otherwise it may affect the accuracy
|
||||
# TODO: use an elegant way to overlap
|
||||
output_prefill = self._forward_prefill(
|
||||
prefill_preprocess_res.q_nope, prefill_preprocess_res.q_pe,
|
||||
prefill_preprocess_res.k_nope, prefill_preprocess_res.k_pe,
|
||||
prefill_preprocess_res.value, kv_cache, attn_metadata)
|
||||
if self.pcp_size > 1:
|
||||
output_prefill = self._forward_prefill_cp(
|
||||
prefill_preprocess_res.q_nope, prefill_preprocess_res.q_pe,
|
||||
prefill_preprocess_res.k_nope, prefill_preprocess_res.k_pe,
|
||||
prefill_preprocess_res.value, kv_cache, attn_metadata)
|
||||
else:
|
||||
output_prefill = self._forward_prefill(
|
||||
prefill_preprocess_res.q_nope, prefill_preprocess_res.q_pe,
|
||||
prefill_preprocess_res.k_nope, prefill_preprocess_res.k_pe,
|
||||
prefill_preprocess_res.value, kv_cache, attn_metadata)
|
||||
current_ms_metadata = get_multistream_comm_context()
|
||||
if current_ms_metadata is not None:
|
||||
with torch.npu.stream(current_ms_metadata.comm_stream):
|
||||
@@ -1311,3 +1481,281 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
if has_prefill:
|
||||
maybe_save_kv_layer_to_connector(layer_name, list(kv_cache))
|
||||
return output_padded
|
||||
|
||||
def _forward_prefill_cp(
|
||||
self,
|
||||
q_nope: torch.Tensor,
|
||||
q_pe: torch.Tensor,
|
||||
k_nope: torch.Tensor,
|
||||
k_pe: torch.Tensor,
|
||||
value: torch.Tensor,
|
||||
kv_c_and_k_pe_cache: Tuple[torch.Tensor],
|
||||
attn_metadata: AscendMLAMetadata,
|
||||
) -> torch.Tensor:
|
||||
assert attn_metadata.prefill is not None
|
||||
assert attn_metadata.prefill.pcp_metadata is not None
|
||||
num_tokens = q_nope.size(0)
|
||||
# Use precomputed indices from the metadata (already converted to tensors and on device)
|
||||
q_head_idx = attn_metadata.prefill.pcp_metadata.q_head_idx
|
||||
q_tail_idx = attn_metadata.prefill.pcp_metadata.q_tail_idx
|
||||
kv_with_q_head_nomask_idx = attn_metadata.prefill.pcp_metadata.kv_with_q_head_nomask_idx
|
||||
kv_with_q_head_mask_idx = attn_metadata.prefill.pcp_metadata.kv_with_q_head_mask_idx
|
||||
kv_with_q_tail_nomask_idx = attn_metadata.prefill.pcp_metadata.kv_with_q_tail_nomask_idx
|
||||
kv_with_q_tail_mask_idx = attn_metadata.prefill.pcp_metadata.kv_with_q_tail_mask_idx
|
||||
attn_mask_seqlens = attn_metadata.prefill.pcp_metadata.attn_mask_seqlens
|
||||
head_attn_nomask_seqlens = attn_metadata.prefill.pcp_metadata.head_attn_nomask_seqlens
|
||||
tail_attn_nomask_seqlens = attn_metadata.prefill.pcp_metadata.tail_attn_nomask_seqlens
|
||||
mask = attn_metadata.prefill.pcp_metadata.pcp_prefill_mask
|
||||
|
||||
output_head = self._attention_with_mask_and_nomask(
|
||||
q_nope=torch.index_select(q_nope, 0, q_head_idx),
|
||||
q_pe=torch.index_select(q_pe, 0, q_head_idx),
|
||||
k_nope=k_nope,
|
||||
k_pe=k_pe,
|
||||
value=value,
|
||||
kv_mask_idx=kv_with_q_head_mask_idx,
|
||||
kv_nomask_idx=kv_with_q_head_nomask_idx,
|
||||
attn_mask_seqlens=attn_mask_seqlens,
|
||||
attn_nomask_seqlens=head_attn_nomask_seqlens,
|
||||
mask=mask)
|
||||
|
||||
output_tail = self._attention_with_mask_and_nomask(
|
||||
q_nope=torch.index_select(q_nope, 0, q_tail_idx),
|
||||
q_pe=torch.index_select(q_pe, 0, q_tail_idx),
|
||||
k_nope=k_nope,
|
||||
k_pe=k_pe,
|
||||
value=value,
|
||||
kv_mask_idx=kv_with_q_tail_mask_idx,
|
||||
kv_nomask_idx=kv_with_q_tail_nomask_idx,
|
||||
attn_mask_seqlens=attn_mask_seqlens,
|
||||
attn_nomask_seqlens=tail_attn_nomask_seqlens,
|
||||
mask=mask)
|
||||
|
||||
q_full_idx = attn_metadata.prefill.pcp_metadata.q_full_idx
|
||||
output = torch.index_select(
|
||||
torch.cat([output_head, output_tail], dim=0), 0, q_full_idx)
|
||||
|
||||
output = output.reshape([num_tokens, self.num_heads * self.v_head_dim])
|
||||
|
||||
return output
|
||||
|
||||
def _attention_with_mask_and_nomask(
|
||||
self, q_nope: torch.Tensor, q_pe: torch.Tensor,
|
||||
k_nope: torch.Tensor, k_pe: torch.Tensor, value: torch.Tensor,
|
||||
kv_mask_idx: torch.Tensor, kv_nomask_idx: torch.Tensor,
|
||||
attn_mask_seqlens: torch.Tensor, attn_nomask_seqlens: torch.Tensor,
|
||||
mask: torch.Tensor):
|
||||
attn_output = torch.empty(q_nope.shape[0],
|
||||
self.num_heads,
|
||||
self.v_head_dim,
|
||||
dtype=k_pe.dtype,
|
||||
device=k_pe.device)
|
||||
attn_lse = torch.empty(self.num_heads,
|
||||
q_pe.shape[0],
|
||||
dtype=torch.float32,
|
||||
device=k_pe.device)
|
||||
# mask
|
||||
k_nope_mask = torch.index_select(k_nope, 0, kv_mask_idx)
|
||||
value_mask = torch.index_select(value, 0, kv_mask_idx)
|
||||
k_pe_mask = torch.index_select(k_pe, 0, kv_mask_idx)
|
||||
torch_npu.atb.npu_ring_mla(q_nope=q_nope,
|
||||
q_rope=q_pe,
|
||||
k_nope=k_nope_mask,
|
||||
k_rope=k_pe_mask,
|
||||
value=value_mask,
|
||||
mask=mask,
|
||||
seqlen=attn_mask_seqlens,
|
||||
head_num=self.num_heads,
|
||||
kv_head_num=self.num_heads,
|
||||
pre_out=None,
|
||||
prev_lse=None,
|
||||
qk_scale=self.scale,
|
||||
kernel_type="kernel_type_high_precision",
|
||||
mask_type="mask_type_triu",
|
||||
input_layout="type_bsnd",
|
||||
calc_type="calc_type_first_ring",
|
||||
output=attn_output,
|
||||
softmax_lse=attn_lse)
|
||||
|
||||
# nomask
|
||||
if kv_nomask_idx.shape[0] == 0:
|
||||
return attn_output
|
||||
|
||||
k_nope_nomask = torch.index_select(k_nope, 0, kv_nomask_idx)
|
||||
value_nomask = torch.index_select(value, 0, kv_nomask_idx)
|
||||
k_pe_nomask = torch.index_select(k_pe, 0, kv_nomask_idx)
|
||||
torch_npu.atb.npu_ring_mla(q_nope=q_nope,
|
||||
q_rope=q_pe,
|
||||
k_nope=k_nope_nomask,
|
||||
k_rope=k_pe_nomask,
|
||||
value=value_nomask,
|
||||
mask=mask,
|
||||
seqlen=attn_nomask_seqlens,
|
||||
head_num=self.num_heads,
|
||||
kv_head_num=self.num_heads,
|
||||
pre_out=attn_output,
|
||||
prev_lse=attn_lse,
|
||||
qk_scale=self.scale,
|
||||
kernel_type="kernel_type_high_precision",
|
||||
mask_type="no_mask",
|
||||
input_layout="type_bsnd",
|
||||
calc_type="calc_type_default",
|
||||
output=attn_output,
|
||||
softmax_lse=attn_lse)
|
||||
return attn_output
|
||||
|
||||
def _forward_decode_pcp_dcp(
|
||||
self,
|
||||
q_nope: torch.Tensor,
|
||||
q_pe: torch.Tensor,
|
||||
k_nope: torch.Tensor,
|
||||
k_pe: torch.Tensor,
|
||||
block_size: int,
|
||||
attn_metadata: AscendMLAMetadata,
|
||||
) -> torch.Tensor:
|
||||
decode_meta = attn_metadata.decode
|
||||
assert decode_meta is not None
|
||||
num_tokens = q_nope.size(0)
|
||||
# shape of knope/k_pe for npu graph mode should be:
|
||||
# [num_blocks, num_kv_heads, block_size, self.kv_lora_rank/self.qk_rope_head_dim]
|
||||
if self.dcp_size > 1:
|
||||
num_heads = self.num_heads * self.dcp_size
|
||||
else:
|
||||
num_heads = self.num_heads
|
||||
|
||||
k_nope = k_nope.view(-1, block_size, self.num_kv_heads,
|
||||
self.kv_lora_rank)
|
||||
k_pe = k_pe.view(-1, block_size, self.num_kv_heads,
|
||||
self.qk_rope_head_dim)
|
||||
q_nope = q_nope.view(num_tokens, num_heads, -1)
|
||||
q_pe = q_pe.view(num_tokens, num_heads, -1)
|
||||
# use pcp & dcp split computed token nums from scheduler to compute actual seq_len and seq_mask
|
||||
num_computed_tokens_of_pcp_dcp = np.array(
|
||||
decode_meta.num_computed_tokens_of_pcp_dcp
|
||||
)[:attn_metadata.num_decodes] # [bs, pcp_size, dcp_size]
|
||||
seq_mask_pcp = torch.where(
|
||||
torch.tensor(num_computed_tokens_of_pcp_dcp.sum(2)) == 0, 0,
|
||||
1).to(torch.uint8).to(q_pe.device)
|
||||
seq_mask_dcp = torch.where(
|
||||
torch.tensor(
|
||||
num_computed_tokens_of_pcp_dcp[:, self.pcp_rank, :]) == 0, 0,
|
||||
1).to(torch.uint8).to(q_pe.device)
|
||||
seq_len = num_computed_tokens_of_pcp_dcp[:, self.pcp_rank,
|
||||
self.dcp_rank]
|
||||
seq_len = torch.tensor(seq_len, dtype=torch.int32)
|
||||
# npu_multi_head_latent_attention does not support seq_len = 0,
|
||||
# update where seq_len == 0 to 1.
|
||||
# This will not influence result, since we will use seq_mask to update lse.
|
||||
seq_len = torch.where(seq_len == 0, 1, seq_len)
|
||||
|
||||
if torch.sum(seq_len).item() == 0:
|
||||
# Case that no kv_cache has been stored on this rank, no need to do following computation.
|
||||
attn_output = torch.zeros(
|
||||
[num_tokens, num_heads, self.kv_lora_rank],
|
||||
dtype=q_nope.dtype,
|
||||
device=q_nope.device)
|
||||
softmax_lse = torch.full((num_tokens, num_heads, 1),
|
||||
float('-inf'),
|
||||
dtype=q_nope.dtype,
|
||||
device=q_nope.device)
|
||||
else:
|
||||
attn_output, softmax_lse = torch_npu.atb.npu_multi_head_latent_attention(
|
||||
q_nope,
|
||||
q_pe,
|
||||
k_nope,
|
||||
k_pe,
|
||||
decode_meta.block_table,
|
||||
seq_len,
|
||||
num_heads,
|
||||
self.scale,
|
||||
self.num_kv_heads,
|
||||
return_lse=True,
|
||||
calc_type="calc_type_ring")
|
||||
|
||||
if self.dcp_size > 1:
|
||||
# Concat out&lse: [bs,num_heads,v_head_dim] + [bs,num_heads,1] -> [bs,num_heads,v_head_dim+1]
|
||||
attn_out_lse = torch.cat([attn_output, softmax_lse], dim=-1)
|
||||
# permute: [bs, num_heads, v_head_dim+1] -> [num_heads, v_head_dim+1, bs]
|
||||
attn_out_lse = attn_out_lse.permute([1, 2, 0]).contiguous()
|
||||
attn_out_lse_all2all = torch.empty_like(attn_out_lse)
|
||||
dist.all_to_all_single(attn_out_lse_all2all,
|
||||
attn_out_lse,
|
||||
group=self.dcp_group)
|
||||
# permute: [num_heads, v_head_dim+1, bs] -> [bs, num_heads, v_head_dim+1]
|
||||
attn_out_lse_all2all = attn_out_lse_all2all.permute([2, 0, 1])
|
||||
attn_out_lse_split_on_seq = list(
|
||||
torch.chunk(attn_out_lse_all2all, self.dcp_size, dim=1))
|
||||
# Update out&lse
|
||||
attn_out_g = None
|
||||
attn_lse_g = None
|
||||
for i, attn_out_lse_l in enumerate(attn_out_lse_split_on_seq):
|
||||
attn_out_l, attn_lse_l = torch.split(attn_out_lse_l,
|
||||
[self.kv_lora_rank, 1],
|
||||
dim=-1)
|
||||
attn_out_g, attn_lse_g = self._update_out_and_lse(
|
||||
attn_out_g, attn_lse_g, attn_out_l, attn_lse_l,
|
||||
seq_mask_dcp[:, i])
|
||||
attn_output = attn_out_g
|
||||
softmax_lse = attn_lse_g
|
||||
|
||||
if self.pcp_size > 1:
|
||||
# Concat out&lse: [bs,num_heads,v_head_dim] + [bs,num_heads,1] -> [bs,num_heads,v_head_dim+1]
|
||||
attn_out_lse = torch.cat([attn_output, softmax_lse], dim=-1)
|
||||
# AllGather out&lse within PCP group
|
||||
attn_out_lse_list = [
|
||||
torch.empty_like(attn_out_lse) for _ in range(self.pcp_size)
|
||||
]
|
||||
dist.all_gather(attn_out_lse_list,
|
||||
attn_out_lse,
|
||||
group=self.pcp_group)
|
||||
# Update out&lse
|
||||
attn_out_g = None
|
||||
attn_lse_g = None
|
||||
for i, attn_out_lse_l in enumerate(attn_out_lse_list):
|
||||
attn_out_l, attn_lse_l = torch.split(attn_out_lse_l,
|
||||
[self.kv_lora_rank, 1],
|
||||
dim=-1)
|
||||
attn_out_g, attn_lse_g = self._update_out_and_lse(
|
||||
attn_out_g, attn_lse_g, attn_out_l, attn_lse_l,
|
||||
seq_mask_pcp[:, i])
|
||||
attn_output = attn_out_g
|
||||
current_ms_metadata = get_multistream_comm_context()
|
||||
if current_ms_metadata is None:
|
||||
return self._v_up_proj(attn_output)
|
||||
else:
|
||||
current_ms_metadata.before_comm_event.record()
|
||||
with torch.npu.stream(current_ms_metadata.comm_stream):
|
||||
current_ms_metadata.before_comm_event.wait()
|
||||
return self._v_up_proj(attn_output)
|
||||
|
||||
|
||||
# TODO use update op to replace this
|
||||
|
||||
def _update_out_and_lse(
|
||||
self,
|
||||
out: torch.Tensor,
|
||||
lse: torch.Tensor,
|
||||
block_out: torch.Tensor,
|
||||
block_lse: torch.Tensor,
|
||||
mask: torch.Tensor = None,
|
||||
):
|
||||
if out is None:
|
||||
out = block_out.to(torch.float32)
|
||||
lse = block_lse
|
||||
else:
|
||||
if mask is None:
|
||||
mask = torch.ones([block_out.size(0)],
|
||||
dtype=torch.uint8,
|
||||
device=block_out.device)
|
||||
out_mask = mask[:, None, None].expand_as(block_out)
|
||||
lse_mask = mask[:, None, None].expand_as(block_lse)
|
||||
block_out = block_out.to(torch.float32)
|
||||
out_without_update = out.clone()
|
||||
lse_without_update = lse.clone()
|
||||
|
||||
out = out - F.sigmoid(block_lse - lse) * (out - block_out)
|
||||
lse = lse - F.logsigmoid(lse - block_lse)
|
||||
# mask
|
||||
out = torch.where(out_mask, out, out_without_update)
|
||||
lse = torch.where(lse_mask, lse, lse_without_update)
|
||||
return out, lse
|
||||
|
||||
Reference in New Issue
Block a user