Handle with_prefill_across_dp for multistream mla (#1322)
### What this PR does / why we need it? After #1094, decode might be executed with non-compiled mode, despite of `torchair_graph_config.enabled`, causing multistream mla to fail, which assumes torchair compiled mode for decode when `torchair_graph_config.enabled == True`. Augment that assumption to fix this. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Tested both offline, and by graph mode mla e2e testcase. --------- Signed-off-by: sdmyzlp <lrwei2@petalmail.com>
This commit is contained in:
@@ -563,8 +563,6 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
ascend_config = get_ascend_config()
|
||||
self.torchair_graph_enabled = ascend_config.torchair_graph_config.enabled
|
||||
self.enable_kv_nz = ascend_config.torchair_graph_config.enable_kv_nz
|
||||
self.enable_multistream_mla = \
|
||||
ascend_config.torchair_graph_config.enable_multistream_mla
|
||||
|
||||
# Adapt torch air graph mode with spec decoding.
|
||||
speculative_config = get_current_vllm_config().speculative_config
|
||||
@@ -863,6 +861,7 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
sin: torch.Tensor,
|
||||
kv_cache: Tuple,
|
||||
slots: torch.Tensor,
|
||||
enable_multistream_mla: bool = False,
|
||||
):
|
||||
|
||||
B = hidden_states.shape[0]
|
||||
@@ -874,7 +873,7 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
cache_mode = "PA_NZ" if self.enable_kv_nz else "PA"
|
||||
with npu_stream_switch("mla_secondary",
|
||||
0,
|
||||
enabled=self.enable_multistream_mla):
|
||||
enabled=enable_multistream_mla):
|
||||
k_pe, k_nope, _, _ = torch_npu.npu_kv_rmsnorm_rope_cache(
|
||||
kv,
|
||||
self.kv_a_layernorm.weight,
|
||||
@@ -1034,6 +1033,7 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
kv_cache: torch.Tensor,
|
||||
attn_metadata: M,
|
||||
output: Optional[torch.Tensor] = None,
|
||||
enable_multistream_mla: bool = False,
|
||||
) -> torch.Tensor:
|
||||
assert output is not None, "Output tensor must be provided."
|
||||
if attn_metadata is None:
|
||||
@@ -1093,22 +1093,22 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
# KvRmsNormRopeCache and SingleRope.
|
||||
npu_wait_tensor(decode_hs_or_q_c,
|
||||
cos,
|
||||
enabled=self.enable_multistream_mla)
|
||||
enabled=enable_multistream_mla)
|
||||
npu_wait_tensor(decode_hs_or_q_c,
|
||||
sin,
|
||||
enabled=self.enable_multistream_mla)
|
||||
enabled=enable_multistream_mla)
|
||||
decode_ql_nope, decode_q_pe = \
|
||||
self._q_proj_and_k_up_proj(decode_hs_or_q_c)
|
||||
if self.running_in_graph:
|
||||
decode_k_pe, decode_k_nope = self.exec_kv(
|
||||
hidden_states_or_kv_c_normed, cos, sin, kv_cache,
|
||||
attn_metadata.slot_mapping)
|
||||
attn_metadata.slot_mapping, enable_multistream_mla)
|
||||
with npu_stream_switch("mla_secondary",
|
||||
0,
|
||||
enabled=self.enable_multistream_mla):
|
||||
enabled=enable_multistream_mla):
|
||||
npu_wait_tensor(decode_q_pe,
|
||||
decode_k_pe,
|
||||
enabled=self.enable_multistream_mla)
|
||||
enabled=enable_multistream_mla)
|
||||
decode_q_pe = self.rope_single(decode_q_pe, cos, sin)
|
||||
else:
|
||||
decode_q_pe[...], decode_k_pe[...] = self.rotary_emb(
|
||||
|
||||
Reference in New Issue
Block a user