support QuickGELU (#3250)

This commit is contained in:
Yineng Zhang
2025-02-01 19:31:47 +08:00
committed by GitHub
parent 4eb4b401cc
commit 8db776f049
2 changed files with 10 additions and 1 deletions

View File

@@ -72,6 +72,15 @@ class GeluAndMul(CustomOp):
return out
class QuickGELU(CustomOp):
def forward_native(self, x: torch.Tensor) -> torch.Tensor:
return x * torch.sigmoid(1.702 * x)
def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
# TODO(zhyncs): Implement the CUDA kernel for QuickGELU in sgl-kernel
return self.forward_native(x)
class ScaledActivation(nn.Module):
"""An activation function with post-scale parameters.

View File

@@ -31,10 +31,10 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from vllm.model_executor.layers.activation import QuickGELU
from sglang.srt.configs import Qwen2VLConfig, Qwen2VLVisionConfig
from sglang.srt.hf_transformers_utils import get_processor
from sglang.srt.layers.activation import QuickGELU
from sglang.srt.layers.attention.vision import VisionAttention
from sglang.srt.layers.linear import ColumnParallelLinear, RowParallelLinear
from sglang.srt.layers.logits_processor import LogitsProcessor