Revert "Revert "fix: import vllm_rotary_embedding error when head_size not in 64, 128, 256, 512"" (#5777)

This commit is contained in:
Lianmin Zheng
2025-04-27 01:04:15 -07:00
committed by GitHub
parent a45a4b239d
commit 6e313c1b8b

View File

@@ -14,8 +14,6 @@ _is_cuda = is_cuda()
if _is_cuda:
from sgl_kernel import apply_rope_with_cos_sin_cache_inplace
else:
from vllm._custom_ops import rotary_embedding as vllm_rotary_embedding
def _rotate_neox(x: torch.Tensor) -> torch.Tensor:
@@ -84,6 +82,12 @@ class RotaryEmbedding(CustomOp):
# NOTE(ByronHsu): cache needs to be in FP32 for numerical stability
if not _is_cuda:
cache = cache.to(dtype)
if not _is_cuda or self.head_size not in [64, 128, 256, 512]:
from vllm._custom_ops import rotary_embedding
self.vllm_rotary_embedding = rotary_embedding
self.cos_sin_cache: torch.Tensor
self.register_buffer("cos_sin_cache", cache, persistent=False)
@@ -160,7 +164,7 @@ class RotaryEmbedding(CustomOp):
)
else:
self.cos_sin_cache = self.cos_sin_cache.to(query.device, dtype=query.dtype)
vllm_rotary_embedding(
self.vllm_rotary_embedding(
positions,
query,
key,