[Feature] add support kimi vl model (#5383)

Co-authored-by: wenju.li <wenju.li@deepctr.cn>
This commit is contained in:
liwenju0
2025-04-30 12:31:19 +08:00
committed by GitHub
parent 403b855a22
commit 8fefdd32c7
13 changed files with 1189 additions and 11 deletions

View File

@@ -752,7 +752,7 @@ class DeepseekV2AttentionMLA(nn.Module):
q_nope_out = q_nope_out.transpose(0, 1)
k_nope = latent_cache[..., : self.kv_lora_rank]
k_nope = self.kv_a_layernorm(k_nope).unsqueeze(1)
k_nope = self.kv_a_layernorm(k_nope.contiguous()).unsqueeze(1)
k_pe = latent_cache[..., self.kv_lora_rank :].unsqueeze(1)
q_pe, k_pe = self.rotary_emb(positions, q_pe, k_pe)
@@ -1391,6 +1391,9 @@ class DeepseekV2Model(nn.Module):
self.dp_size = get_attention_dp_size()
def get_input_embeddings(self) -> torch.Tensor:
return self.embed_tokens
def forward(
self,
input_ids: torch.Tensor,