[fix CI] Fix logical condition in fused MoE layer for compressed tensor quantization (#10299)

This commit is contained in:
Xiaoyu Zhang
2025-09-11 14:54:09 +08:00
committed by GitHub
parent ef959d7b85
commit 37367da639

View File

@@ -613,8 +613,10 @@ class FusedMoE(torch.nn.Module):
loaded_weight = loaded_weight.to(param.data.device)
if (
"compressed" in self.quant_method.__class__.__name__.lower()
or "w4afp8" in self.quant_config.get_name()
(
"compressed" in self.quant_method.__class__.__name__.lower()
or "w4afp8" in self.quant_config.get_name()
)
and (param.data[expert_id] != 1).any()
and ((param.data[expert_id] - loaded_weight).abs() > 1e-5).any()
):