chore: bump v0.3.6.post3 (#2259)
This commit is contained in:
28
Makefile
28
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: check-deps install-deps format
|
||||
.PHONY: check-deps install-deps format update
|
||||
|
||||
check-deps:
|
||||
@command -v isort >/dev/null 2>&1 || (echo "Installing isort..." && pip install isort)
|
||||
@@ -10,3 +10,29 @@ install-deps:
|
||||
format: check-deps
|
||||
@echo "Formatting modified Python files..."
|
||||
git diff --name-only --diff-filter=M | grep '\.py$$' | xargs -I {} sh -c 'isort {} && black {}'
|
||||
|
||||
FILES_TO_UPDATE = docker/Dockerfile.rocm \
|
||||
python/pyproject.toml \
|
||||
python/sglang/version.py \
|
||||
docs/developer/setup_github_runner.md \
|
||||
docs/start/install.md
|
||||
|
||||
update:
|
||||
@if [ -z "$(filter-out $@,$(MAKECMDGOALS))" ]; then \
|
||||
echo "Version required. Usage: make update <new_version>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@OLD_VERSION=$$(grep "version" python/sglang/version.py | cut -d '"' -f2); \
|
||||
NEW_VERSION=$(filter-out $@,$(MAKECMDGOALS)); \
|
||||
echo "Updating version from $$OLD_VERSION to $$NEW_VERSION"; \
|
||||
for file in $(FILES_TO_UPDATE); do \
|
||||
if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
sed -i '' -e "s/$$OLD_VERSION/$$NEW_VERSION/g" $$file; \
|
||||
else \
|
||||
sed -i -e "s/$$OLD_VERSION/$$NEW_VERSION/g" $$file; \
|
||||
fi \
|
||||
done; \
|
||||
echo "Version update complete"
|
||||
|
||||
%:
|
||||
@:
|
||||
|
||||
@@ -23,31 +23,32 @@ RUN pip3 install datamodel_code_generator
|
||||
|
||||
WORKDIR /sgl-workspace
|
||||
|
||||
ARG CUDA_VERSION
|
||||
RUN python3 -m pip install --upgrade pip setuptools wheel html5lib six \
|
||||
&& git clone --depth=1 https://github.com/sgl-project/sglang.git \
|
||||
&& cd sglang \
|
||||
&& if [ "$BUILD_TYPE" = "srt" ]; then \
|
||||
python3 -m pip --no-cache-dir install -e "python[srt]"; \
|
||||
if [ "$CUDA_VERSION" = "12.1.1" ]; then \
|
||||
python3 -m pip --no-cache-dir install -e "python[srt]" --find-links https://flashinfer.ai/whl/cu121/torch2.4/flashinfer/; \
|
||||
elif [ "$CUDA_VERSION" = "12.4.1" ]; then \
|
||||
python3 -m pip --no-cache-dir install -e "python[srt]" --find-links https://flashinfer.ai/whl/cu124/torch2.4/flashinfer/; \
|
||||
elif [ "$CUDA_VERSION" = "11.8.0" ]; then \
|
||||
python3 -m pip --no-cache-dir install -e "python[srt]" --find-links https://flashinfer.ai/whl/cu118/torch2.4/flashinfer/; \
|
||||
else \
|
||||
echo "Unsupported CUDA version: $CUDA_VERSION" && exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
python3 -m pip --no-cache-dir install -e "python[all]"; \
|
||||
if [ "$CUDA_VERSION" = "12.1.1" ]; then \
|
||||
python3 -m pip --no-cache-dir install -e "python[all]" --find-links https://flashinfer.ai/whl/cu121/torch2.4/flashinfer/; \
|
||||
elif [ "$CUDA_VERSION" = "12.4.1" ]; then \
|
||||
python3 -m pip --no-cache-dir install -e "python[all]" --find-links https://flashinfer.ai/whl/cu124/torch2.4/flashinfer/; \
|
||||
elif [ "$CUDA_VERSION" = "11.8.0" ]; then \
|
||||
python3 -m pip --no-cache-dir install -e "python[all]" --find-links https://flashinfer.ai/whl/cu118/torch2.4/flashinfer/; \
|
||||
else \
|
||||
echo "Unsupported CUDA version: $CUDA_VERSION" && exit 1; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
ARG CUDA_VERSION
|
||||
RUN if [ "$CUDA_VERSION" = "12.1.1" ]; then \
|
||||
export CUDA_IDENTIFIER=cu121 && \
|
||||
python3 -m pip --no-cache-dir install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/; \
|
||||
elif [ "$CUDA_VERSION" = "12.4.1" ]; then \
|
||||
export CUDA_IDENTIFIER=cu124 && \
|
||||
python3 -m pip --no-cache-dir install flashinfer -i https://flashinfer.ai/whl/cu124/torch2.4/; \
|
||||
elif [ "$CUDA_VERSION" = "11.8.0" ]; then \
|
||||
export CUDA_IDENTIFIER=cu118 && \
|
||||
python3 -m pip install torch==2.4.0 --index-url https://download.pytorch.org/whl/cu118 && \
|
||||
python3 -m pip --no-cache-dir install flashinfer -i https://flashinfer.ai/whl/cu118/torch2.4/; \
|
||||
else \
|
||||
echo "Unsupported CUDA version: $CUDA_VERSION" && exit 1; \
|
||||
fi
|
||||
|
||||
RUN python3 -m pip cache purge
|
||||
|
||||
|
||||
ENV DEBIAN_FRONTEND=interactive
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Usage (to build SGLang ROCm docker image):
|
||||
# docker build --build-arg SGL_BRANCH=v0.3.6.post2 -t v0.3.6.post2-rocm620 -f Dockerfile.rocm .
|
||||
# docker build --build-arg SGL_BRANCH=v0.3.6.post3 -t v0.3.6.post3-rocm620 -f Dockerfile.rocm .
|
||||
|
||||
# default base image
|
||||
ARG BASE_IMAGE="rocm/vllm-dev:20241022"
|
||||
|
||||
@@ -11,9 +11,9 @@ docker pull nvidia/cuda:12.1.1-devel-ubuntu22.04
|
||||
# Nvidia
|
||||
docker run --shm-size 128g -it -v /tmp/huggingface:/hf_home --gpus all nvidia/cuda:12.1.1-devel-ubuntu22.04 /bin/bash
|
||||
# AMD
|
||||
docker run --rm --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.3.6.post2-rocm620 /bin/bash
|
||||
docker run --rm --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.3.6.post3-rocm620 /bin/bash
|
||||
# AMD just the last 2 GPUs
|
||||
docker run --rm --device=/dev/kfd --device=/dev/dri/renderD176 --device=/dev/dri/renderD184 --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.3.6.post2-rocm620 /bin/bash
|
||||
docker run --rm --device=/dev/kfd --device=/dev/dri/renderD176 --device=/dev/dri/renderD184 --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.3.6.post3-rocm620 /bin/bash
|
||||
```
|
||||
|
||||
### Step 2: Configure the runner by `config.sh`
|
||||
|
||||
@@ -5,10 +5,7 @@ You can install SGLang using any of the methods below.
|
||||
## Method 1: With pip
|
||||
```
|
||||
pip install --upgrade pip
|
||||
pip install "sglang[all]"
|
||||
|
||||
# Install FlashInfer accelerated kernels (CUDA only for now)
|
||||
pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/
|
||||
pip install "sglang[all]" --find-links https://flashinfer.ai/whl/cu121/torch2.4/flashinfer/
|
||||
```
|
||||
|
||||
Note: Please check the [FlashInfer installation doc](https://docs.flashinfer.ai/installation.html) to install the proper version according to your PyTorch and CUDA versions.
|
||||
@@ -16,14 +13,11 @@ Note: Please check the [FlashInfer installation doc](https://docs.flashinfer.ai/
|
||||
## Method 2: From source
|
||||
```
|
||||
# Use the last release branch
|
||||
git clone -b v0.3.6.post2 https://github.com/sgl-project/sglang.git
|
||||
git clone -b v0.3.6.post3 https://github.com/sgl-project/sglang.git
|
||||
cd sglang
|
||||
|
||||
pip install --upgrade pip
|
||||
pip install -e "python[all]"
|
||||
|
||||
# Install FlashInfer accelerated kernels (CUDA only for now)
|
||||
pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/
|
||||
pip install -e "python[all]" --find-links https://flashinfer.ai/whl/cu121/torch2.4/flashinfer/
|
||||
```
|
||||
|
||||
Note: Please check the [FlashInfer installation doc](https://docs.flashinfer.ai/installation.html) to install the proper version according to your PyTorch and CUDA versions.
|
||||
@@ -32,7 +26,7 @@ Note: To AMD ROCm system with Instinct/MI GPUs, do following instead:
|
||||
|
||||
```
|
||||
# Use the last release branch
|
||||
git clone -b v0.3.6.post2 https://github.com/sgl-project/sglang.git
|
||||
git clone -b v0.3.6.post3 https://github.com/sgl-project/sglang.git
|
||||
cd sglang
|
||||
|
||||
pip install --upgrade pip
|
||||
@@ -57,7 +51,7 @@ docker run --gpus all \
|
||||
Note: To AMD ROCm system with Instinct/MI GPUs, it is recommended to use `docker/Dockerfile.rocm` to build images, example and usage as below:
|
||||
|
||||
```bash
|
||||
docker build --build-arg SGL_BRANCH=v0.3.6.post2 -t v0.3.6.post2-rocm620 -f Dockerfile.rocm .
|
||||
docker build --build-arg SGL_BRANCH=v0.3.6.post3 -t v0.3.6.post3-rocm620 -f Dockerfile.rocm .
|
||||
|
||||
alias drun='docker run -it --rm --network=host --device=/dev/kfd --device=/dev/dri --ipc=host \
|
||||
--shm-size 16G --group-add video --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
||||
@@ -66,11 +60,11 @@ alias drun='docker run -it --rm --network=host --device=/dev/kfd --device=/dev/d
|
||||
drun -p 30000:30000 \
|
||||
-v ~/.cache/huggingface:/root/.cache/huggingface \
|
||||
--env "HF_TOKEN=<secret>" \
|
||||
v0.3.6.post2-rocm620 \
|
||||
v0.3.6.post3-rocm620 \
|
||||
python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --host 0.0.0.0 --port 30000
|
||||
|
||||
# Till flashinfer backend available, --attention-backend triton --sampling-backend pytorch are set by default
|
||||
drun v0.3.6.post2-rocm620 python3 -m sglang.bench_one_batch --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8
|
||||
drun v0.3.6.post3-rocm620 python3 -m sglang.bench_one_batch --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8
|
||||
```
|
||||
|
||||
## Method 4: Using docker compose
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "sglang"
|
||||
version = "0.3.6.post2"
|
||||
version = "0.3.6.post3"
|
||||
description = "SGLang is yet another fast serving framework for large language models and vision language models."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.8"
|
||||
@@ -23,7 +23,7 @@ runtime_common = ["aiohttp", "decord", "fastapi",
|
||||
"psutil", "pydantic", "python-multipart",
|
||||
"pyzmq>=25.1.2", "torchao", "uvicorn", "uvloop",
|
||||
"xgrammar>=0.1.4"]
|
||||
srt = ["sglang[runtime_common]", "torch", "vllm>=0.6.3.post1", "cuda-python"]
|
||||
srt = ["sglang[runtime_common]", "torch", "vllm>=0.6.3.post1", "cuda-python", "flashinfer>=0.1.6"]
|
||||
|
||||
# HIP (Heterogeneous-computing Interface for Portability) for AMD
|
||||
# => base docker rocm/vllm-dev:20241022, not from public vllm whl
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "0.3.6.post2"
|
||||
__version__ = "0.3.6.post3"
|
||||
|
||||
@@ -3,9 +3,8 @@ Install the dependency in CI.
|
||||
"""
|
||||
|
||||
pip install --upgrade pip
|
||||
pip install -e "python[all]"
|
||||
pip install -e "python[all]" --find-links https://flashinfer.ai/whl/cu121/torch2.4/flashinfer/
|
||||
pip install transformers==4.45.2 sentence_transformers accelerate peft
|
||||
pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ --force-reinstall
|
||||
# for compling eagle kernels
|
||||
pip install cutex
|
||||
# for compling xgrammar kernels
|
||||
|
||||
Reference in New Issue
Block a user