ROCm base image update (#2692)

Co-authored-by: wunhuang <wunhuang@amd.com>
This commit is contained in:
kk
2025-01-01 12:12:19 +08:00
committed by GitHub
parent 0d8d97b8e6
commit b6e0cfb5e1
2 changed files with 8 additions and 2 deletions

View File

@@ -2,7 +2,7 @@
# docker build --build-arg SGL_BRANCH=v0.4.1.post3 -t v0.4.1.post3-rocm620 -f Dockerfile.rocm .
# default base image
ARG BASE_IMAGE="rocm/vllm-dev:20241022"
ARG BASE_IMAGE="rocm/vllm-dev:20241031-tuned"
FROM $BASE_IMAGE AS base
USER root
@@ -33,6 +33,12 @@ RUN git clone ${SGL_REPO} \
RUN cp -r /sgl-workspace/sglang /sglang
RUN python -m pip cache purge
RUN pip install IPython \
&& pip install orjson \
&& pip install python-multipart \
&& pip install torchao \
&& pip install pybind11
RUN pip uninstall -y triton
RUN git clone ${TRITON_REPO} \
&& cd triton \

View File

@@ -27,7 +27,7 @@ srt = ["sglang[runtime_common]", "torch", "vllm>=0.6.3.post1,<=0.6.4.post1", "cu
# HIP (Heterogeneous-computing Interface for Portability) for AMD
# => base docker rocm/vllm-dev:20241022, not from public vllm whl
srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.3.dev13"]
srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.3.post2.dev1"]
# xpu is not enabled in public vllm and torch whl,
# need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm
srt_xpu = ["sglang[runtime_common]"]