Remove extra copy in deepseek forward absorb (#5578)

Co-authored-by: saienduri <saimanas.enduri@amd.com>
This commit is contained in:
Ke Bao
2025-04-22 10:33:21 +08:00
committed by GitHub
parent b9c87e781d
commit 11b23ae97b
3 changed files with 18 additions and 21 deletions

View File

@@ -665,6 +665,7 @@ class DeepseekScalingRotaryEmbedding(RotaryEmbedding):
offsets: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""PyTorch-native implementation equivalent to forward()."""
dtype = query.dtype
query_rot = query[..., : self.rotary_dim]
key_rot = key[..., : self.rotary_dim]
if self.rotary_dim < self.head_size:
@@ -695,7 +696,7 @@ class DeepseekScalingRotaryEmbedding(RotaryEmbedding):
else:
query = query_rot
key = key_rot
return query, key
return query.to(dtype), key.to(dtype)
class Llama3RotaryEmbedding(RotaryEmbedding):

View File

@@ -682,10 +682,6 @@ class DeepseekV2AttentionMLA(nn.Module):
forward_batch: ForwardBatch,
zero_allocator: BumpAllocator,
) -> torch.Tensor:
q_len = hidden_states.shape[0]
q_input = hidden_states.new_empty(
q_len, self.num_local_heads, self.kv_lora_rank + self.qk_rope_head_dim
)
if self.q_lora_rank is not None:
q = self.q_a_proj(hidden_states)[0]
q = self.q_a_layernorm(q)
@@ -729,20 +725,20 @@ class DeepseekV2AttentionMLA(nn.Module):
)
else:
q_nope_out = torch.bmm(q_nope.transpose(0, 1), self.w_kc)
q_input[..., : self.kv_lora_rank] = q_nope_out.transpose(0, 1)
q_nope_out = q_nope_out.transpose(0, 1)
latent_cache = self.kv_a_proj_with_mqa(hidden_states)[0]
v_input = latent_cache[..., : self.kv_lora_rank]
v_input = self.kv_a_layernorm(v_input.contiguous()).unsqueeze(1)
k_input = latent_cache.unsqueeze(1)
k_input[..., : self.kv_lora_rank] = v_input
k_pe = k_input[..., self.kv_lora_rank :]
k_nope = latent_cache[..., : self.kv_lora_rank]
k_nope = self.kv_a_layernorm(k_nope).unsqueeze(1)
k_pe = latent_cache[..., self.kv_lora_rank :].unsqueeze(1)
q_pe, k_pe = self.rotary_emb(positions, q_pe, k_pe)
q_input[..., self.kv_lora_rank :] = q_pe
k_input[..., self.kv_lora_rank :] = k_pe
attn_output = self.attn_mqa(q_input, k_input, v_input, forward_batch)
q = torch.cat([q_nope_out, q_pe], dim=-1)
k = torch.cat([k_nope, k_pe], dim=-1)
attn_output = self.attn_mqa(q, k, k_nope, forward_batch)
attn_output = attn_output.view(-1, self.num_local_heads, self.kv_lora_rank)
if self.use_deep_gemm_bmm: