Add torchao quant for mixtral and qwen_moe (#1418)

This commit is contained in:
Jerry Zhang
2024-09-13 23:46:55 -07:00
committed by GitHub
parent 70b6802982
commit 30b404ce72
4 changed files with 50 additions and 20 deletions

View File

@@ -41,7 +41,7 @@ from sglang.srt.layers.activation import SiluAndMul
from sglang.srt.layers.layernorm import RMSNorm
from sglang.srt.layers.logits_processor import LogitsProcessor, LogitsProcessorOutput
from sglang.srt.layers.radix_attention import RadixAttention
from sglang.srt.layers.torchao_utils import torchao_quantize_param_data
from sglang.srt.layers.torchao_utils import apply_torchao_config_
from sglang.srt.managers.schedule_batch import global_server_args_dict
from sglang.srt.model_executor.forward_batch_info import InputMetadata
@@ -405,24 +405,7 @@ class LlamaForCausalLM(nn.Module):
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
if self.torchao_config:
if name.endswith("proj.weight") and param.ndim == 2:
params_dict[name] = torchao_quantize_param_data(
param, self.torchao_config
)
if self.torchao_config:
# quantizing the loaded, stacked params, e.g. "...qkv_proj"
stacked_params = set(entry[0] for entry in stacked_params_mapping)
for param_suffix in stacked_params:
for name in params_dict:
if param_suffix in name:
param = params_dict[name]
params_dict[name] = torchao_quantize_param_data(
param, self.torchao_config
)
self.load_state_dict(params_dict, assign=True)
apply_torchao_config_(self, params_dict, set(["proj.weight"]))
class Phi3ForCausalLM(LlamaForCausalLM):