[QUANT] Add GPTQModel Dynamic Quantization + lm_head Quantization (#3790)

Signed-off-by: ZX-ModelCloud <zx@modelcloud.ai>
Co-authored-by: ZX-ModelCloud <zx@modelcloud.ai>
This commit is contained in:
Qubitium-ModelCloud
2025-03-05 17:11:00 +08:00
committed by GitHub
parent 583d6af71b
commit 56a724eba3
56 changed files with 1988 additions and 282 deletions

View File

@@ -46,7 +46,7 @@ from sglang.srt.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from sglang.srt.utils import set_weight_attrs
from sglang.srt.utils import add_prefix, set_weight_attrs
class DbrxRouter(nn.Module):
@@ -58,6 +58,7 @@ class DbrxRouter(nn.Module):
self,
config: DbrxConfig,
params_dtype: Optional[torch.dtype] = None,
prefix: str = "",
):
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
@@ -89,6 +90,7 @@ class DbrxExperts(nn.Module):
config: DbrxConfig,
quant_config: Optional[QuantizationConfig] = None,
params_dtype: Optional[torch.dtype] = None,
prefix: str = "",
):
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
@@ -189,6 +191,7 @@ class DbrxAttention(nn.Module):
config: DbrxConfig,
layer_id: int = 0,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
):
super().__init__()
self.d_model = config.d_model
@@ -207,12 +210,14 @@ class DbrxAttention(nn.Module):
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
prefix=add_prefix("Wqkv", prefix),
)
self.out_proj = RowParallelLinear(
self.d_model,
self.d_model,
bias=False,
quant_config=quant_config,
prefix=add_prefix("out_proj", prefix),
)
self.rotary_emb = get_rope(
self.head_dim,
@@ -244,6 +249,7 @@ class DbrxAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
prefix=add_prefix("attn", prefix),
)
def forward(
@@ -268,10 +274,16 @@ class DbrxFusedNormAttention(nn.Module):
config: DbrxConfig,
layer_id: int = 0,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
):
super().__init__()
self.d_model = config.d_model
self.attn = DbrxAttention(config, layer_id, quant_config=quant_config)
self.attn = DbrxAttention(
config,
layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
self.norm_1 = nn.LayerNorm(self.d_model)
self.norm_2 = nn.LayerNorm(self.d_model)
@@ -300,10 +312,14 @@ class DbrxBlock(nn.Module):
config: DbrxConfig,
layer_id: int = 0,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
):
super().__init__()
self.norm_attn_norm = DbrxFusedNormAttention(
config, layer_id, quant_config=quant_config
config,
layer_id,
quant_config=quant_config,
prefix=add_prefix("norm_attn_norm", prefix),
)
self.ffn = DbrxExperts(config, quant_config=quant_config)
@@ -328,6 +344,7 @@ class DbrxModel(nn.Module):
self,
config: DbrxConfig,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
):
super().__init__()
self.wte = VocabParallelEmbedding(
@@ -336,7 +353,12 @@ class DbrxModel(nn.Module):
)
self.blocks = nn.ModuleList(
[
DbrxBlock(config, i, quant_config=quant_config)
DbrxBlock(
config,
i,
quant_config=quant_config,
prefix=add_prefix(f"blocks.{i}", prefix),
)
for i in range(config.n_layers)
]
)
@@ -369,17 +391,21 @@ class DbrxForCausalLM(nn.Module):
self,
config: DbrxConfig,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
):
super().__init__()
self.config = config
self.quant_config = quant_config
self.unpadded_vocab_size = config.vocab_size
self.transformer = DbrxModel(config, quant_config=quant_config)
self.transformer = DbrxModel(
config, quant_config=quant_config, prefix=add_prefix("transformer", prefix)
)
self.lm_head = ParallelLMHead(
config.vocab_size,
config.d_model,
org_num_embeddings=config.vocab_size,
padding_size=DEFAULT_VOCAB_PADDING_SIZE,
prefix=add_prefix("lm_head", prefix),
)
self.logits_processor = LogitsProcessor(config)