Upgrade to vllm 0.17.0 corex v4.1 overlay
This commit is contained in:
@@ -9,7 +9,6 @@ from vllm.model_executor.custom_op import PluggableLayer
|
||||
from vllm.model_executor.layers.attention import MLAAttention
|
||||
from vllm.model_executor.layers.quantization import QuantizationConfig
|
||||
|
||||
|
||||
@dataclass
|
||||
class MLAModules:
|
||||
"""Modules used in MLA."""
|
||||
@@ -18,7 +17,7 @@ class MLAModules:
|
||||
kv_b_proj: torch.nn.Module
|
||||
rotary_emb: torch.nn.Module
|
||||
o_proj: torch.nn.Module
|
||||
fused_qkv_a_proj: torch.nn.Module | None
|
||||
q_a_proj: torch.nn.Module | None
|
||||
kv_a_proj_with_mqa: torch.nn.Module | None
|
||||
q_a_layernorm: torch.nn.Module | None
|
||||
q_b_proj: torch.nn.Module | None
|
||||
@@ -74,7 +73,7 @@ class MultiHeadLatentAttentionWrapper(PluggableLayer):
|
||||
self.q_lora_rank = q_lora_rank
|
||||
self.kv_lora_rank = kv_lora_rank
|
||||
self.num_heads = num_heads
|
||||
self.fused_qkv_a_proj = mla_modules.fused_qkv_a_proj
|
||||
self.q_a_proj = mla_modules.q_a_proj
|
||||
self.kv_a_proj_with_mqa = mla_modules.kv_a_proj_with_mqa
|
||||
self.q_a_layernorm = mla_modules.q_a_layernorm
|
||||
self.q_b_proj = mla_modules.q_b_proj
|
||||
@@ -106,7 +105,7 @@ class MultiHeadLatentAttentionWrapper(PluggableLayer):
|
||||
kv_b_proj=self.kv_b_proj,
|
||||
use_sparse=self.is_sparse,
|
||||
indexer=self.indexer,
|
||||
rotary_emb=self.rotary_emb
|
||||
rotary_emb=self.rotary_emb,
|
||||
)
|
||||
|
||||
self.prefix = prefix
|
||||
@@ -119,60 +118,47 @@ class MultiHeadLatentAttentionWrapper(PluggableLayer):
|
||||
) -> torch.Tensor:
|
||||
q_c = None
|
||||
kv_lora = None
|
||||
|
||||
if self.q_lora_rank is not None:
|
||||
assert self.fused_qkv_a_proj is not None, (
|
||||
"fused_qkv_a_proj is required when q_lora_rank is not None"
|
||||
)
|
||||
assert self.q_a_layernorm is not None, (
|
||||
"q_a_layernorm is required when q_lora_rank is not None"
|
||||
)
|
||||
assert self.q_b_proj is not None, (
|
||||
"q_b_proj is required when q_lora_rank is not None"
|
||||
)
|
||||
qkv_lora = self.fused_qkv_a_proj(hidden_states)[0]
|
||||
q_c, kv_lora = qkv_lora.split(
|
||||
[self.q_lora_rank, self.kv_lora_rank + self.qk_rope_head_dim],
|
||||
dim=-1,
|
||||
)
|
||||
q_c = self.q_a_layernorm(q_c)
|
||||
q = self.q_b_proj(q_c)[0]
|
||||
q = self.q_a_proj(hidden_states)[0]
|
||||
kv_a, k_pe = self.kv_a_proj_with_mqa(hidden_states)[0].split([self.kv_lora_rank, self.qk_rope_head_dim], dim=1)
|
||||
q = self.q_a_layernorm(q)
|
||||
q = self.q_b_proj(q)[0].view(-1, self.num_heads, self.qk_head_dim)
|
||||
kv_a = self.kv_a_layernorm(kv_a)
|
||||
else:
|
||||
assert self.kv_a_proj_with_mqa is not None, (
|
||||
"kv_a_proj_with_mqa is required when q_lora_rank is None"
|
||||
)
|
||||
assert self.q_proj is not None, (
|
||||
"q_proj is required when q_lora_rank is None"
|
||||
)
|
||||
kv_lora = self.kv_a_proj_with_mqa(hidden_states)[0]
|
||||
q = self.q_proj(hidden_states)[0]
|
||||
|
||||
kv_c, k_pe = kv_lora.split([self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
|
||||
kv_c_normed = self.kv_a_layernorm(kv_c)
|
||||
|
||||
q = q.view(-1, self.num_heads, self.qk_head_dim)
|
||||
# Add head dim of 1 to k_pe
|
||||
# k_pe = k_pe.unsqueeze(1)
|
||||
|
||||
# if self.rotary_emb is not None:
|
||||
# q[..., self.qk_nope_head_dim :], k_pe = self.rotary_emb(
|
||||
# positions, q[..., self.qk_nope_head_dim :], k_pe
|
||||
# )
|
||||
|
||||
if self.indexer and self.is_sparse:
|
||||
_topk_indices = self.indexer(
|
||||
hidden_states, q_c, positions, self.indexer_rope_emb
|
||||
)
|
||||
|
||||
q = self.q_proj(hidden_states)[0].view(-1, self.num_heads, self.qk_head_dim)
|
||||
latent_kpe = self.kv_a_proj_with_mqa(hidden_states)[0]
|
||||
kv_a, k_pe = latent_kpe.split([self.kv_lora_rank, self.qk_rope_head_dim], dim=1)
|
||||
kv_a = self.kv_a_layernorm(kv_a)
|
||||
|
||||
# NOTE attention data do not have position, pass it here
|
||||
if llama_4_scaling is not None:
|
||||
q *= llama_4_scaling
|
||||
|
||||
self.mla_attn.impl.forward_prepare(positions)
|
||||
attn_out = self.mla_attn(
|
||||
q,
|
||||
kv_c_normed,
|
||||
k_pe,
|
||||
output_shape=(hidden_states.shape[0], self.num_heads * self.v_head_dim),
|
||||
)
|
||||
|
||||
attn_out = self.mla_attn(q, kv_a, k_pe, positions)
|
||||
return self.o_proj(attn_out)[0]
|
||||
|
||||
def forward_opt(
|
||||
self,
|
||||
positions: torch.Tensor,
|
||||
hidden_states: torch.Tensor,
|
||||
llama_4_scaling: torch.Tensor | None = None):
|
||||
if self.q_lora_rank is not None:
|
||||
q_latent_kpe = self.q_a_proj(hidden_states)[0]
|
||||
q, kv_a, k_pe, _ = q_latent_kpe.split([self.q_lora_rank, self.kv_lora_rank, self.qk_rope_head_dim, self.q_a_proj.output_padding_size], dim=1)
|
||||
q_c = self.q_a_layernorm(q)
|
||||
q = self.q_b_proj(q_c)[0].view(-1, self.num_heads, self.qk_head_dim)
|
||||
kv_a = self.kv_a_layernorm(kv_a)
|
||||
else:
|
||||
q = self.q_proj(hidden_states)[0].view(-1, self.num_heads, self.qk_head_dim)
|
||||
latent_kpe = self.kv_a_proj_with_mqa(hidden_states)[0]
|
||||
kv_a, k_pe = latent_kpe.split([self.kv_lora_rank, self.qk_rope_head_dim], dim=1)
|
||||
kv_a = self.kv_a_layernorm(kv_a)
|
||||
if self.indexer and self.is_sparse:
|
||||
_topk_indices = self.indexer(hidden_states, q_c, positions,
|
||||
self.rotary_emb)
|
||||
|
||||
# NOTE attention data do not have position, pass it here
|
||||
if llama_4_scaling is not None:
|
||||
q *= llama_4_scaling
|
||||
attn_out = self.mla_attn(q, kv_a, k_pe, positions)
|
||||
return self.o_proj(attn_out)[0]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user