enable llama3.1-8B on xpu (#9434)

This commit is contained in:
Huaiyu, Zheng
2025-09-08 13:34:20 +08:00
committed by GitHub
parent b7d1f17b8d
commit ee21817c6b
4 changed files with 43 additions and 13 deletions

View File

@@ -28,6 +28,7 @@ from sglang.srt.utils import (
is_cuda,
is_hip,
is_npu,
is_xpu,
supports_custom_op,
)
@@ -37,6 +38,7 @@ _is_npu = is_npu()
_use_aiter = get_bool_env_var("SGLANG_USE_AITER") and _is_hip
_is_cpu_amx_available = cpu_has_amx_support()
_is_cpu = is_cpu()
_is_xpu = is_xpu()
if _is_cuda:
from flashinfer.norm import fused_add_rmsnorm as flashinfer_fused_add_rmsnorm
@@ -327,7 +329,9 @@ class Gemma3RMSNorm(CustomOp):
return f"{tuple(self.weight.shape)}, eps={self.eps}"
if not (_is_cuda or _is_hip or _is_npu or (_is_cpu and _is_cpu_amx_available)):
if not (
_is_cuda or _is_hip or _is_npu or (_is_cpu and _is_cpu_amx_available) or _is_xpu
):
logger.info(
"sgl-kernel layernorm implementation is not available on current platform. Fallback to other kernel libraries."
)