Upgrade to vllm 0.17.0 corex v4.1 overlay
This commit is contained in:
@@ -82,11 +82,12 @@ def fused_add_rms_norm(
|
||||
return rms_norm_batch_invariant(
|
||||
x + residual, weight, variance_epsilon
|
||||
), x + residual
|
||||
ops.fused_add_rms_norm(
|
||||
x, residual = ops.fused_add_rms_norm(
|
||||
x,
|
||||
residual,
|
||||
weight,
|
||||
variance_epsilon,
|
||||
residual_alpha,
|
||||
)
|
||||
return x, residual
|
||||
|
||||
@@ -125,7 +126,7 @@ def dispatch_rocm_rmsnorm_func(
|
||||
return fused_add_rms_norm
|
||||
return rms_norm
|
||||
|
||||
|
||||
|
||||
def rms_norm_qk(
|
||||
input_q: torch.Tensor,
|
||||
input_k: torch.Tensor,
|
||||
@@ -140,11 +141,7 @@ def rms_norm_qk(
|
||||
output_q, output_k, input_q, input_k, weight_q, weight_k, epsilon
|
||||
)
|
||||
return output_q, output_k
|
||||
|
||||
|
||||
def dispatch_cuda_rmsnorm_qk_func() -> callable:
|
||||
return rms_norm_qk
|
||||
|
||||
|
||||
|
||||
@CustomOp.register("rms_norm_qk")
|
||||
class RMSNormQK(CustomOp):
|
||||
@@ -226,8 +223,7 @@ class RMSNormQK(CustomOp):
|
||||
f"[RMSNormQK] Expected input_q and input_k to have same dtype, "
|
||||
f"but got {input_q.dtype} vs {input_k.dtype}"
|
||||
)
|
||||
norm_func = dispatch_cuda_rmsnorm_qk_func()
|
||||
return norm_func(
|
||||
return rms_norm_qk(
|
||||
input_q,
|
||||
input_k,
|
||||
weight_q,
|
||||
@@ -264,7 +260,7 @@ class RMSNormQK(CustomOp):
|
||||
f"eps={self.variance_epsilon}, "
|
||||
)
|
||||
|
||||
|
||||
# --8<-- [start:rms_norm]
|
||||
@CustomOp.register("rms_norm")
|
||||
class RMSNorm(CustomOp):
|
||||
"""Root mean square normalization.
|
||||
@@ -375,7 +371,7 @@ class RMSNorm(CustomOp):
|
||||
# otherwise Inductor eliminates the casts to and from f16,
|
||||
# increasing memory usage (and complicating pattern matching)
|
||||
x = x + residual
|
||||
residual = x.to(orig_dtype).contiguous()
|
||||
residual = x.to(orig_dtype)
|
||||
|
||||
if x.shape[-1] != hidden_size:
|
||||
raise ValueError(
|
||||
@@ -425,6 +421,7 @@ class RMSNorm(CustomOp):
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
residual: torch.Tensor | None = None,
|
||||
residual_alpha: float = 1.0,
|
||||
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
||||
if self.variance_size_override is not None:
|
||||
return self.forward_native(x, residual)
|
||||
@@ -499,7 +496,7 @@ class RMSNorm(CustomOp):
|
||||
add_residual = residual is not None
|
||||
if add_residual:
|
||||
return fused_add_rms_norm(
|
||||
x, residual, self.weight.data, self.variance_epsilon
|
||||
x, residual, self.weight.data, self.variance_epsilon,residual_alpha
|
||||
)
|
||||
else:
|
||||
return rms_norm(x, self.weight.data, self.variance_epsilon)
|
||||
@@ -649,6 +646,7 @@ class RMSNormGated(CustomOp):
|
||||
norm_before_gate: bool = False,
|
||||
device: torch.device | None = None,
|
||||
dtype: torch.dtype | None = None,
|
||||
activation: str = "swish",
|
||||
):
|
||||
"""Initialize RMSNormGated.
|
||||
|
||||
@@ -663,10 +661,12 @@ class RMSNormGated(CustomOp):
|
||||
If False and z is provided: out = norm(x * silu(z))
|
||||
device: Device to create parameters on
|
||||
dtype: Data type for parameters
|
||||
activation: Activation function name for gating
|
||||
"""
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super().__init__()
|
||||
self.eps = eps
|
||||
self.activation = activation
|
||||
self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
|
||||
self.register_parameter("bias", None)
|
||||
self.group_size = group_size
|
||||
@@ -693,6 +693,11 @@ class RMSNormGated(CustomOp):
|
||||
- norm_before_gate=True: out = norm(x) * silu(z)
|
||||
- norm_before_gate=False: out = norm(x * silu(z))
|
||||
"""
|
||||
orig_dtype = x.dtype
|
||||
x = x.float()
|
||||
weight = self.weight.float()
|
||||
z = z.float() if z is not None else None
|
||||
|
||||
# Apply gating before normalization if needed
|
||||
if z is not None and not self.norm_before_gate:
|
||||
x = x * F.silu(z)
|
||||
@@ -702,7 +707,7 @@ class RMSNormGated(CustomOp):
|
||||
# Standard RMS norm across the last dimension
|
||||
variance = x.pow(2).mean(dim=-1, keepdim=True)
|
||||
x_normed = x * torch.rsqrt(variance + self.eps)
|
||||
out = x_normed * self.weight
|
||||
out = x_normed * weight
|
||||
else:
|
||||
# Group RMS norm
|
||||
from einops import rearrange
|
||||
@@ -710,13 +715,13 @@ class RMSNormGated(CustomOp):
|
||||
x_group = rearrange(x, "... (g d) -> ... g d", d=self.group_size)
|
||||
variance = x_group.pow(2).mean(dim=-1, keepdim=True)
|
||||
x_normed = x_group * torch.rsqrt(variance + self.eps)
|
||||
out = rearrange(x_normed, "... g d -> ... (g d)") * self.weight
|
||||
out = rearrange(x_normed, "... g d -> ... (g d)") * weight
|
||||
|
||||
# Apply gating after normalization if needed
|
||||
if z is not None and self.norm_before_gate:
|
||||
out = out * F.silu(z)
|
||||
|
||||
return out
|
||||
return out.to(orig_dtype)
|
||||
|
||||
def forward_cuda(
|
||||
self, x: torch.Tensor, z: torch.Tensor | None = None
|
||||
@@ -731,6 +736,7 @@ class RMSNormGated(CustomOp):
|
||||
eps=self.eps,
|
||||
group_size=self.group_size,
|
||||
norm_before_gate=self.norm_before_gate,
|
||||
activation=self.activation,
|
||||
)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user