[fix CI] Fix logical condition in fused MoE layer for compressed tensor quantization (#10299)
This commit is contained in:
@@ -613,8 +613,10 @@ class FusedMoE(torch.nn.Module):
|
||||
loaded_weight = loaded_weight.to(param.data.device)
|
||||
|
||||
if (
|
||||
"compressed" in self.quant_method.__class__.__name__.lower()
|
||||
or "w4afp8" in self.quant_config.get_name()
|
||||
(
|
||||
"compressed" in self.quant_method.__class__.__name__.lower()
|
||||
or "w4afp8" in self.quant_config.get_name()
|
||||
)
|
||||
and (param.data[expert_id] != 1).any()
|
||||
and ((param.data[expert_id] - loaded_weight).abs() > 1e-5).any()
|
||||
):
|
||||
|
||||
Reference in New Issue
Block a user