Fix dependency (#3813)

This commit is contained in:
Lianmin Zheng
2025-02-24 03:50:58 -08:00
committed by GitHub
parent c979580817
commit 27a46317b6
6 changed files with 43 additions and 31 deletions

View File

@@ -17,32 +17,54 @@ dependencies = ["requests", "tqdm", "numpy", "IPython", "setproctitle"]
[project.optional-dependencies]
runtime_common = [
"aiohttp", "decord", "fastapi",
"hf_transfer", "huggingface_hub", "interegular", "modelscope",
"orjson", "packaging", "pillow", "prometheus-client>=0.20.0",
"psutil", "pydantic", "python-multipart", "pyzmq>=25.1.2",
"torchao>=0.7.0", "uvicorn", "uvloop", "xgrammar==0.1.10", "ninja", "transformers==4.48.3"
"aiohttp",
"decord",
"fastapi",
"hf_transfer",
"huggingface_hub",
"interegular",
"modelscope",
"orjson",
"packaging",
"pillow",
"prometheus-client>=0.20.0",
"psutil",
"pydantic",
"python-multipart",
"pyzmq>=25.1.2",
"torchao>=0.7.0",
"uvicorn",
"uvloop",
"xgrammar==0.1.10",
"ninja",
"transformers==4.48.3",
]
srt = [
"sglang[runtime_common]", "cuda-python",
"sgl-kernel>=0.0.3.post6", "torch", "vllm>=0.6.4.post1,<=0.7.2",
"sglang[runtime_common]",
"sgl-kernel>=0.0.3.post6",
"flashinfer_python>=0.2.1.post2",
"torch==2.5.1",
"vllm>=0.6.4.post1,<=0.7.2",
"cuda-python",
"outlines>=0.0.44,<=0.1.11",
]
# HIP (Heterogeneous-computing Interface for Portability) for AMD
# => base docker rocm/vllm-dev:20241022, not from public vllm whl
srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.7.dev2", "outlines==0.1.11", "sgl-kernel>=0.0.3.post1"]
srt_hip = ["sglang[runtime_common]", "sgl-kernel>=0.0.3.post1", "torch", "vllm==0.6.7.dev2", "outlines==0.1.11"]
# xpu is not enabled in public vllm and torch whl,
# need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm
srt_xpu = ["sglang[runtime_common]", "outlines>=0.0.44,<0.1.0"]
#For Intel Gaudi(device : hpu) follow the installation guide
#https://docs.vllm.ai/en/latest/getting_started/gaudi-installation.html
srt_hpu = ["sglang[runtime_common]", "outlines>=0.0.44,<0.1.0"]
srt_xpu = ["sglang[runtime_common]", "outlines>=0.0.44,<=0.1.11"]
# For Intel Gaudi(device : hpu) follow the installation guide
# https://docs.vllm.ai/en/latest/getting_started/gaudi-installation.html
srt_hpu = ["sglang[runtime_common]", "outlines>=0.0.44,<=0.1.11"]
# CPU: currently, there are no pre-built vllm wheels for CPU.
# To install vllm for CPU, please follow the instruction here:
# https://docs.vllm.ai/en/latest/getting_started/installation/cpu/index.html
srt_cpu = ["sglang[runtime_common]", "torch", "outlines>=0.0.44,<0.1.0"]
srt_cpu = ["sglang[runtime_common]", "outlines>=0.0.44,<=0.1.11", "torch"]
openai = ["openai>=1.0", "tiktoken"]
anthropic = ["anthropic>=0.20.0"]