feat: Add fused moe triton config for qwen3 moe on h100 (#5833)

This commit is contained in:
XinyuanTong
2025-04-28 08:37:13 -07:00
committed by GitHub
parent 8601300beb
commit 0045f4b2af
2 changed files with 151 additions and 0 deletions

View File

@@ -398,6 +398,11 @@ def main(args: argparse.Namespace):
topk = config.num_experts_per_tok
intermediate_size = config.moe_intermediate_size
shard_intermediate_size = 2 * intermediate_size // args.tp_size
elif config.architectures[0] == "Qwen3MoeForCausalLM":
E = config.num_experts
topk = config.num_experts_per_tok
intermediate_size = config.moe_intermediate_size
shard_intermediate_size = 2 * intermediate_size // args.tp_size
elif config.architectures[0] in ["DeepseekV2ForCausalLM", "DeepseekV3ForCausalLM"]:
n_share_fusion_experts = args.n_share_experts_fusion
E = (