Upgrade to vllm 0.17.0 corex v4.1 overlay

This commit is contained in:
2026-04-29 19:38:22 +08:00
parent 8fac6062e4
commit 938d0854a5
430 changed files with 35969 additions and 14511 deletions

View File

@@ -8,6 +8,7 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from vllm import envs
from vllm.distributed import (
divide,
get_tensor_model_parallel_rank,
@@ -130,13 +131,12 @@ class SiluAndMul(CustomOp):
def __init__(self, *, compile_native: bool = True):
super().__init__(compile_native=compile_native)
if current_platform.is_cuda_alike():
if current_platform.is_cuda_alike() or current_platform.is_xpu():
from vllm import _custom_ops as ops
self.op = ops.silu_and_mul
elif current_platform.is_xpu():
from vllm._ipex_ops import ipex_ops
self.op = ipex_ops.silu_and_mul
if envs.VLLM_USE_SILU_QUANT_FUSION:
self.op = ops.silu_and_mul_quant
else:
self.op = ops.silu_and_mul
elif current_platform.is_cpu():
self._forward_method = self.forward_native
@@ -146,11 +146,15 @@ class SiluAndMul(CustomOp):
d = x.shape[-1] // 2
return F.silu(x[..., :d]) * x[..., d:]
def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
def forward_cuda(self, x: torch.Tensor, out_dim: int = 0) -> torch.Tensor:
d = x.shape[-1] // 2
output_shape = x.shape[:-1] + (d,)
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
self.op(out, x)
if envs.VLLM_USE_SILU_QUANT_FUSION:
quant_out, out_scales = self.op(x, out_dim)
out = (quant_out, out_scales, x.dtype)
else:
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
self.op(out, x)
return out
def forward_xpu(self, x: torch.Tensor) -> torch.Tensor:
@@ -174,7 +178,6 @@ class MulAndSilu(CustomOp):
def __init__(self):
super().__init__()
if current_platform.is_cuda_alike() or current_platform.is_xpu():
# self.op = torch.ops._C.mul_and_silu
from vllm import _custom_ops as ops
self.op = ops.mul_and_silu
elif current_platform.is_cpu():
@@ -397,7 +400,6 @@ class NewGELU(CustomOp):
or current_platform.is_cpu()
or current_platform.is_xpu()
):
# self.op = torch.ops._C.gelu_new
from vllm import _custom_ops as ops
self.op = ops.gelu_new
@@ -427,7 +429,8 @@ class FastGELU(CustomOp):
or current_platform.is_cpu()
or current_platform.is_xpu()
):
self.op = torch.ops._C.gelu_fast
from vllm import _custom_ops as ops
self.op = ops.gelu_fast
def forward_native(self, x: torch.Tensor) -> torch.Tensor:
"""PyTorch-native implementation equivalent to forward()."""
@@ -455,7 +458,6 @@ class QuickGELU(CustomOp):
or current_platform.is_cpu()
or current_platform.is_xpu()
):
# self.op = torch.ops._C.gelu_quick
from vllm import _custom_ops as ops
self.op = ops.gelu_quick