[Bug Fix] Fix Glm4vVisionBlock norm (#9884)
This commit is contained in:
committed by
GitHub
parent
2985090084
commit
0f6ac5e21d
@@ -93,9 +93,8 @@ class Glm4vVisionBlock(Qwen2_5_VisionBlock):
|
||||
quant_config=quant_config,
|
||||
prefix=prefix,
|
||||
num_dummy_heads=config.num_dummy_heads,
|
||||
rms_norm_eps=config.rms_norm_eps,
|
||||
)
|
||||
self.norm1 = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
self.norm2 = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
|
||||
self.mlp = Glm4vVisionMLP(
|
||||
config.hidden_size,
|
||||
|
||||
@@ -113,12 +113,13 @@ class Qwen2_5_VisionBlock(nn.Module):
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
prefix: str = "",
|
||||
num_dummy_heads: int = 0,
|
||||
rms_norm_eps: float = 1e-6,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
if norm_layer is None:
|
||||
norm_layer = partial(nn.LayerNorm, eps=1e-6)
|
||||
self.norm1 = RMSNorm(dim, eps=1e-6)
|
||||
self.norm2 = RMSNorm(dim, eps=1e-6)
|
||||
self.norm1 = RMSNorm(dim, eps=rms_norm_eps)
|
||||
self.norm2 = RMSNorm(dim, eps=rms_norm_eps)
|
||||
|
||||
if attn_implementation is None:
|
||||
softmax_in_single_precision = False
|
||||
|
||||
Reference in New Issue
Block a user