[fix] logical_to_all_physical_map index 256 is out of bounds in EP parallel. (#6767)
Signed-off-by: miter <miterv@outlook.com>
This commit is contained in:
@@ -1714,21 +1714,33 @@ class DeepseekV2ForCausalLM(nn.Module):
|
||||
or self.config.n_routed_experts != 256
|
||||
):
|
||||
self.num_fused_shared_experts = 0
|
||||
global_server_args_dict["disable_shared_experts_fusion"] = 1
|
||||
global_server_args_dict["disable_shared_experts_fusion"] = True
|
||||
log_info_on_rank0(
|
||||
logger,
|
||||
"Only Deepseek V3/R1 on NV-platform can use shared experts fusion optimization. Shared experts fusion optimization is disabled.",
|
||||
)
|
||||
elif (global_server_args_dict["enable_deepep_moe"] or global_server_args_dict["enable_ep_moe"]):
|
||||
self.num_fused_shared_experts = 0
|
||||
global_server_args_dict["disable_shared_experts_fusion"] = True
|
||||
log_info_on_rank0(
|
||||
logger,
|
||||
"Deepseek V3/R1 can not use shared experts fusion optimization when in deepep_moe or ep_moe mode. Shared experts fusion optimization is disabled.",
|
||||
)
|
||||
elif self.num_fused_shared_experts == 0:
|
||||
if (
|
||||
_is_cuda
|
||||
and torch.cuda.get_device_capability("cuda") >= (9, 0)
|
||||
and self.config.architectures[0] == architecture
|
||||
and self.config.n_routed_experts == 256
|
||||
and (not global_server_args_dict["enable_deepep_moe"])
|
||||
and (
|
||||
not (
|
||||
global_server_args_dict["enable_deepep_moe"]
|
||||
or global_server_args_dict["enable_ep_moe"]
|
||||
)
|
||||
)
|
||||
):
|
||||
self.num_fused_shared_experts = self.config.n_shared_experts
|
||||
global_server_args_dict["disable_shared_experts_fusion"] = 0
|
||||
global_server_args_dict["disable_shared_experts_fusion"] = False
|
||||
log_info_on_rank0(
|
||||
logger,
|
||||
"Deepseek V3/R1 with fp8 can use shared experts fusion optimization when SM version >=90. Shared experts fusion optimization is enabled.",
|
||||
|
||||
Reference in New Issue
Block a user