32 lines
1.2 KiB
Python
32 lines
1.2 KiB
Python
################################################################################
|
|
# Copyright(c)2020-2025 Shanghai Biren Technology Co., Ltd. All rights reserved.
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
################################################################################
|
|
import torch
|
|
import torch_br
|
|
from fastcore.basics import patch_to
|
|
|
|
from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul
|
|
|
|
|
|
@patch_to(SiluAndMul)
|
|
def silu_and_mul_forward_oot(self, x: torch.Tensor) -> torch.Tensor:
|
|
d = x.shape[-1] // 2
|
|
return torch_br.supa_silumul(x[..., :d], x[..., d:]) # type: ignore
|
|
|
|
|
|
@patch_to(QuickGELU)
|
|
def quick_gelu_forward_oot(self, x: torch.Tensor) -> torch.Tensor: # noqa:F811
|
|
return self.forward_native(x)
|