feat: support glm4 tuning (#8473)

This commit is contained in:
Yineng Zhang
2025-07-28 14:32:58 -07:00
committed by GitHub
parent 9c138a0445
commit 1466c1b896

View File

@@ -427,6 +427,11 @@ def main(args: argparse.Namespace):
topk = config.num_experts_per_tok
intermediate_size = config.moe_intermediate_size
shard_intermediate_size = 2 * intermediate_size // args.tp_size
elif config.architectures[0] in ["Glm4MoeForCausalLM"]:
E = config.n_routed_experts
topk = config.num_experts_per_tok
intermediate_size = config.moe_intermediate_size
shard_intermediate_size = 2 * intermediate_size // args.tp_size
else:
# Default: Mixtral
E = config.num_local_experts