ROCm: bump 6.3.0 (#3259)

This commit is contained in:
HAI
2025-02-02 12:13:40 -08:00
committed by GitHub
parent 55f5fc68ac
commit 566d61d90f
7 changed files with 28 additions and 22 deletions

View File

@@ -19,31 +19,29 @@ dependencies = ["requests", "tqdm", "numpy", "IPython", "setproctitle"]
runtime_common = [
"aiohttp", "decord", "fastapi",
"hf_transfer", "huggingface_hub", "interegular", "modelscope",
"orjson", "outlines>=0.0.44,<0.1.0",
"packaging", "pillow", "prometheus-client>=0.20.0",
"psutil", "pydantic", "python-multipart",
"pyzmq>=25.1.2", "torchao>=0.7.0", "uvicorn", "uvloop",
"xgrammar>=0.1.10"
"orjson", "packaging", "pillow", "prometheus-client>=0.20.0",
"psutil", "pydantic", "python-multipart", "pyzmq>=25.1.2",
"torchao>=0.7.0", "uvicorn", "uvloop", "xgrammar>=0.1.10"
]
srt = [
"sglang[runtime_common]", "cuda-python",
"sgl-kernel>=0.0.3.post1", "torch", "vllm==0.6.4.post1",
"flashinfer==0.1.6"
"flashinfer==0.1.6", "outlines>=0.0.44,<0.1.0"
]
# HIP (Heterogeneous-computing Interface for Portability) for AMD
# => base docker rocm/vllm-dev:20241022, not from public vllm whl
srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.3.post2.dev1"]
srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.7.dev2", "outlines==0.1.11"]
# xpu is not enabled in public vllm and torch whl,
# need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm
srt_xpu = ["sglang[runtime_common]"]
srt_xpu = ["sglang[runtime_common]", "outlines>=0.0.44,<0.1.0"]
#For Intel Gaudi(device : hpu) follow the installation guide
#https://docs.vllm.ai/en/latest/getting_started/gaudi-installation.html
srt_hpu = ["sglang[runtime_common]"]
srt_hpu = ["sglang[runtime_common]", "outlines>=0.0.44,<0.1.0"]
# CPU: currently, there are no pre-built vllm wheels for CPU.
# To install vllm for CPU, please follow the instruction here:
# https://docs.vllm.ai/en/latest/getting_started/installation/cpu/index.html
srt_cpu = ["sglang[runtime_common]", "torch"]
srt_cpu = ["sglang[runtime_common]", "torch", "outlines>=0.0.44,<0.1.0"]
openai = ["openai>=1.0", "tiktoken"]
anthropic = ["anthropic>=0.20.0"]

View File

@@ -20,7 +20,6 @@ from typing import Dict, List, Optional, Tuple, Union
import interegular
import torch
from outlines.fsm.guide import RegexGuide
from outlines.fsm.json_schema import build_regex_from_schema
from outlines.models.transformers import TransformerTokenizer
from pydantic import BaseModel
@@ -29,6 +28,15 @@ from sglang.srt.constrained.base_grammar_backend import (
BaseGrammarObject,
)
from sglang.srt.constrained.outlines_jump_forward import OutlinesJumpForwardMap
from sglang.srt.utils import is_hip
is_hip_ = is_hip()
if is_hip_:
from outlines_core.fsm.json_schema import build_regex_from_schema
else:
from outlines.fsm.json_schema import build_regex_from_schema
logger = logging.getLogger(__name__)

View File

@@ -20,7 +20,7 @@ class CustomOp(nn.Module):
raise NotImplementedError
def forward_hip(self, *args, **kwargs):
raise NotImplementedError
return self.forward_native(*args, **kwargs)
def forward_xpu(self, *args, **kwargs):
return self.forward_native(*args, **kwargs)