Sync from v0.13
This commit is contained in:
144
tests/kernels/core/test_activation.py
Normal file
144
tests/kernels/core/test_activation.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import random
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.kernels.allclose_default import get_default_atol, get_default_rtol
|
||||
from tests.kernels.utils import opcheck
|
||||
from vllm.model_executor.layers.activation import (
|
||||
FastGELU,
|
||||
FatreluAndMul,
|
||||
GeluAndMul,
|
||||
MulAndSilu,
|
||||
NewGELU,
|
||||
QuickGELU,
|
||||
SiluAndMul,
|
||||
SwigluOAIAndMul,
|
||||
)
|
||||
from vllm.platforms import current_platform
|
||||
|
||||
DTYPES = [torch.half, torch.bfloat16, torch.float]
|
||||
NUM_TOKENS = [7, 83, 2048] # Arbitrary values for testing
|
||||
D = [512, 13824] # Arbitrary values for testing
|
||||
SEEDS = [0]
|
||||
CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"activation",
|
||||
[
|
||||
"silu_and_mul",
|
||||
"mul_and_silu",
|
||||
"gelu",
|
||||
"gelu_tanh",
|
||||
"fatrelu",
|
||||
"swigluoai_and_mul",
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
|
||||
@pytest.mark.parametrize("d", D)
|
||||
@pytest.mark.parametrize("dtype", DTYPES)
|
||||
@pytest.mark.parametrize("seed", SEEDS)
|
||||
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
||||
@torch.inference_mode()
|
||||
def test_act_and_mul(
|
||||
activation: str,
|
||||
num_tokens: int,
|
||||
d: int,
|
||||
dtype: torch.dtype,
|
||||
seed: int,
|
||||
device: str,
|
||||
) -> None:
|
||||
current_platform.seed_everything(seed)
|
||||
torch.set_default_device(device)
|
||||
x = torch.randn(num_tokens, 2 * d, dtype=dtype)
|
||||
if activation == "silu_and_mul":
|
||||
layer = SiluAndMul()
|
||||
fn = torch.ops._C.silu_and_mul
|
||||
if activation == "mul_and_silu":
|
||||
layer = MulAndSilu()
|
||||
fn = torch.ops._C.mul_and_silu
|
||||
elif activation == "gelu":
|
||||
layer = GeluAndMul(approximate="none")
|
||||
fn = torch.ops._C.gelu_and_mul
|
||||
elif activation == "gelu_tanh":
|
||||
layer = GeluAndMul(approximate="tanh")
|
||||
fn = torch.ops._C.gelu_tanh_and_mul
|
||||
elif activation == "fatrelu":
|
||||
threshold = random.uniform(0, 1)
|
||||
layer = FatreluAndMul(threshold)
|
||||
fn = torch.ops._C.fatrelu_and_mul
|
||||
elif activation == "swigluoai_and_mul":
|
||||
layer = SwigluOAIAndMul()
|
||||
fn = torch.ops._C.swigluoai_and_mul
|
||||
out = layer(x)
|
||||
ref_out = layer.forward_native(x)
|
||||
if activation == "swigluoai_and_mul":
|
||||
rtol = {
|
||||
# For fp16, change the relative tolerance from 1e-3 to 2e-3
|
||||
torch.float16: 2e-3,
|
||||
torch.bfloat16: 2e-2,
|
||||
torch.float: 1.3e-6,
|
||||
}
|
||||
|
||||
def _get_rtol(output) -> float:
|
||||
return rtol[output.dtype]
|
||||
|
||||
torch.testing.assert_close(
|
||||
out, ref_out, atol=get_default_atol(out), rtol=_get_rtol(out)
|
||||
)
|
||||
else:
|
||||
# The SiluAndMul, MulAndSilu, GELU and FatReLU implementations are
|
||||
# equivalent to the native PyTorch implementations, so we can do exact
|
||||
# comparison.
|
||||
torch.testing.assert_close(out, ref_out, atol=0.0, rtol=0.0)
|
||||
|
||||
d = x.shape[-1] // 2
|
||||
output_shape = x.shape[:-1] + (d,)
|
||||
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
||||
if activation == "fatrelu":
|
||||
opcheck(fn, (out, x, threshold))
|
||||
elif activation == "swigluoai_and_mul":
|
||||
opcheck(fn, (out, x, layer.alpha, layer.limit))
|
||||
else:
|
||||
opcheck(fn, (out, x))
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"activation",
|
||||
[
|
||||
(FastGELU, torch.ops._C.gelu_fast),
|
||||
(NewGELU, torch.ops._C.gelu_new),
|
||||
(QuickGELU, torch.ops._C.gelu_quick),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
|
||||
@pytest.mark.parametrize("d", D)
|
||||
@pytest.mark.parametrize("dtype", DTYPES)
|
||||
@pytest.mark.parametrize("seed", SEEDS)
|
||||
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
||||
@torch.inference_mode()
|
||||
def test_activation(
|
||||
activation: type[torch.nn.Module],
|
||||
num_tokens: int,
|
||||
d: int,
|
||||
dtype: torch.dtype,
|
||||
seed: int,
|
||||
device: str,
|
||||
) -> None:
|
||||
current_platform.seed_everything(seed)
|
||||
torch.set_default_device(device)
|
||||
x = torch.randn(num_tokens, d, dtype=dtype)
|
||||
layer = activation[0]()
|
||||
fn = activation[1]
|
||||
out = layer(x)
|
||||
ref_out = layer.forward_native(x)
|
||||
torch.testing.assert_close(
|
||||
out, ref_out, atol=get_default_atol(out), rtol=get_default_rtol(out)
|
||||
)
|
||||
|
||||
out = torch.empty_like(x)
|
||||
opcheck(fn, (out, x))
|
||||
Reference in New Issue
Block a user