[QUANT] Add GPTQModel Dynamic Quantization + lm_head Quantization (#3790)

Signed-off-by: ZX-ModelCloud <zx@modelcloud.ai>
Co-authored-by: ZX-ModelCloud <zx@modelcloud.ai>
This commit is contained in:
Qubitium-ModelCloud
2025-03-05 17:11:00 +08:00
committed by GitHub
parent 583d6af71b
commit 56a724eba3
56 changed files with 1988 additions and 282 deletions

View File

@@ -47,6 +47,7 @@ from sglang.srt.layers.vocab_parallel_embedding import (
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.model_loader.loader import DefaultModelLoader
from sglang.srt.model_loader.weight_utils import default_weight_loader
from sglang.srt.utils import add_prefix
class Grok1MLP(nn.Module):
@@ -65,7 +66,7 @@ class Grok1MLP(nn.Module):
[intermediate_size] * 2,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
prefix=add_prefix("gate_up_proj", prefix),
use_presharded_weights=use_presharded_weights,
)
self.down_proj = RowParallelLinear(
@@ -73,7 +74,7 @@ class Grok1MLP(nn.Module):
hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
prefix=add_prefix("down_proj", prefix),
reduce_results=reduce_results,
use_presharded_weights=use_presharded_weights,
)
@@ -107,6 +108,7 @@ class Grok1MoE(nn.Module):
tp_size: Optional[int] = None,
reduce_results=True,
use_presharded_weights: bool = False,
prefix: str = "",
):
super().__init__()
self.hidden_size = hidden_size
@@ -118,6 +120,7 @@ class Grok1MoE(nn.Module):
bias=False,
params_dtype=params_dtype,
quant_config=None,
prefix=add_prefix("gate", prefix),
)
self.router_logit_softcapping = getattr(
@@ -135,6 +138,7 @@ class Grok1MoE(nn.Module):
tp_size=tp_size,
activation="gelu",
use_presharded_weights=use_presharded_weights,
prefix=add_prefix("experts", prefix),
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
@@ -163,6 +167,7 @@ class Grok1Attention(nn.Module):
rope_theta: float = 10000,
quant_config: Optional[QuantizationConfig] = None,
reduce_results: bool = True,
prefix: str = "",
) -> None:
super().__init__()
self.config = config
@@ -195,6 +200,7 @@ class Grok1Attention(nn.Module):
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
prefix=add_prefix("qkv_proj", prefix),
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
@@ -202,6 +208,7 @@ class Grok1Attention(nn.Module):
bias=False,
quant_config=quant_config,
reduce_results=reduce_results,
prefix=add_prefix("o_proj", prefix),
)
self.rotary_emb = get_rope(
self.head_dim,
@@ -220,6 +227,7 @@ class Grok1Attention(nn.Module):
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
logit_cap=logit_cap,
prefix=add_prefix("attn", prefix),
)
def forward(
@@ -243,6 +251,7 @@ class Grok1DecoderLayer(nn.Module):
layer_id: int = 0,
quant_config: Optional[QuantizationConfig] = None,
use_presharded_weights: bool = False,
prefix: str = "",
) -> None:
super().__init__()
self.num_experts = config.num_local_experts
@@ -259,6 +268,7 @@ class Grok1DecoderLayer(nn.Module):
layer_id=layer_id,
rope_theta=rope_theta,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
self.block_sparse_moe = Grok1MoE(
config=config,
@@ -273,6 +283,7 @@ class Grok1DecoderLayer(nn.Module):
quant_config=quant_config,
reduce_results=True,
use_presharded_weights=use_presharded_weights,
prefix=add_prefix("block_sparse_moe", prefix),
)
self.pre_attn_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attn_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@@ -311,6 +322,7 @@ class Grok1Model(nn.Module):
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig] = None,
use_presharded_weights: bool = False,
prefix: str = "",
) -> None:
super().__init__()
self.config = config
@@ -320,6 +332,7 @@ class Grok1Model(nn.Module):
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
prefix=add_prefix("embed_tokens", prefix),
)
self.layers = nn.ModuleList(
[
@@ -328,6 +341,7 @@ class Grok1Model(nn.Module):
i,
quant_config=quant_config,
use_presharded_weights=use_presharded_weights,
prefix=add_prefix(f"layers.{i}", prefix),
)
for i in range(config.num_hidden_layers)
]
@@ -359,6 +373,7 @@ class Grok1ForCausalLM(nn.Module):
self,
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
) -> None:
super().__init__()
self.config = config
@@ -377,8 +392,11 @@ class Grok1ForCausalLM(nn.Module):
config,
quant_config=quant_config,
use_presharded_weights=self.use_presharded_weights,
prefix=add_prefix("model", prefix),
)
self.lm_head = ParallelLMHead(
config.vocab_size, config.hidden_size, prefix=add_prefix("lm_head", prefix)
)
self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size)
self.logits_processor = LogitsProcessor(config)
def forward(