[Doc] update base image url(1.Replace conda with uv; 2.Integrate xpytorch and ops into the image.) (#146)
Signed-off-by: WeiJie_Hong <1462519292@qq.com>
This commit is contained in:
@@ -30,33 +30,40 @@ RUN apt-get update && \
|
||||
gcc g++ cmake libnuma-dev \
|
||||
wget tmux curl \
|
||||
software-properties-common && \
|
||||
apt remove -y python3.8 python3.8-minimal python3.8-dev && \
|
||||
apt purge -y python3.8 python3.8-minimal python3.8-dev && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN conda init && conda create --name vllm_kunlun_0.10.1.1 python=3.10.15 -y && \
|
||||
conda run -n vllm_kunlun_0.10.1.1 && source activate vllm_kunlun_0.10.1.1 && \
|
||||
conda clean -afy
|
||||
RUN sed -i '/# >>> conda initialize >>>/,/# <<< conda initialize <<</d' ~/.bashrc && \
|
||||
rm -rf /root/miniconda3
|
||||
|
||||
RUN source activate vllm_kunlun_0.10.1.1 && \
|
||||
pip install torch==2.5.1+cu118 torchvision==0.20.1+cu118 torchaudio==2.5.1+cu118 --index-url https://download.pytorch.org/whl/cu118 && \
|
||||
pip cache purge && rm -rf /root/.cache/pip
|
||||
ENV PATH=/root/.local/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
|
||||
RUN source activate vllm_kunlun_0.10.1.1 && \
|
||||
pip install setuptools==80.9.0 cuda_mock==1.1.1 hyperparameter==0.5.6 black==23.3.0 lark==1.2.2 \
|
||||
RUN curl -LsSf https://astral.sh/uv/install.sh | sh && \
|
||||
source $HOME/.local/bin/env
|
||||
|
||||
RUN uv venv --python 3.10 /opt/vllm_kunlun
|
||||
ENV PATH=/opt/vllm_kunlun/bin:${PATH}
|
||||
|
||||
RUN source /opt/vllm_kunlun/bin/activate && \
|
||||
uv pip install torch==2.5.1+cu118 torchvision==0.20.1+cu118 torchaudio==2.5.1+cu118 --index-url https://download.pytorch.org/whl/cu118 && \
|
||||
rm -rf /root/.cache/pip && uv cache clean
|
||||
|
||||
RUN uv pip install setuptools==80.9.0 cuda_mock==1.1.1 hyperparameter==0.5.6 black==23.3.0 lark==1.2.2 \
|
||||
networkx wheel PyYAML==6.0.2 ipython h5py regex==2024.9.11 colorama==0.4.6 pynvml==11.5.3 \
|
||||
nvidia-cuda-runtime-cu11 tabulate==0.9.0 openpyxl==3.1.5 pandas prettytable \
|
||||
nvidia-cuda-runtime-cu11 tabulate==0.9.0 openpyxl==3.1.5 pandas prettytable setproctitle\
|
||||
pytest==8.1.0 pytest-repeat==0.9.3 pytest-timeout==2.3.1 py==1.11.0 datasets==2.16.0 \
|
||||
pydantic==2.9.2 psutil==6.1.0 einops==0.8.0 \
|
||||
pytest-html==4.1.1 py-cpuinfo pytest-timeout==2.3.1 termcolor jsonlines==4.0.0 tiktoken \
|
||||
pytest-html==4.1.1 pytest-timeout==2.3.1 termcolor jsonlines==4.0.0 tiktoken \
|
||||
qwen_vl_utils filetype fastapi==0.112.1 fire py-cpuinfo transformers==4.57.1 \
|
||||
gradio==4.0.0 sse-starlette trl==0.8.6 uvicorn accelerate==0.30.1 --index-url https://pip.baidu-int.com/simple/ && \
|
||||
pip cache purge && rm -rf /root/.cache/pip
|
||||
rm -rf /root/.cache/pip && uv cache clean
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=en_US.UTF-8 \
|
||||
LANGUAGE=en_US:en \
|
||||
TERM=xterm-256color \
|
||||
PATH=/root/miniconda/envs/vllm_kunlun_0.10.1.1/bin/:$PATH
|
||||
TERM=xterm-256color
|
||||
|
||||
WORKDIR /workspace
|
||||
RUN wget https://su.bcebos.com/v1/klx-sdk-release-public/xccl/resource/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu20.04-x86_64.tgz && tar -xf MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu20.04-x86_64.tgz
|
||||
@@ -67,21 +74,16 @@ RUN apt-get install -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY vllm-kunlun/ /workspace/vllm-kunlun/
|
||||
RUN cd /workspace/vllm-kunlun && \
|
||||
bash dockerfile/install.sh && \
|
||||
rm -rf /workspace/vllm-kunlun/build/ && rm -rf /workspace/vllm-kunlun/vllm_kunlun.egg-info/
|
||||
|
||||
# xpu-smi tools
|
||||
WORKDIR /workspace
|
||||
COPY xre-Linux-x86_64-5.2.0.0/ /workspace/xre-Linux-x86_64-5.2.0.0/
|
||||
RUN mv /workspace/xre-Linux-x86_64-5.2.0.0/bin/* /usr/local/bin/ && mv /workspace/xre-Linux-x86_64-5.2.0.0/so/* /lib/x86_64-linux-gnu/ && \
|
||||
rm -rf /workspace/xre-Linux-x86_64-5.2.0.0/
|
||||
|
||||
ENV LD_LIBRARY_PATH=/opt/vllm_kunlun/lib:/opt/vllm_kunlun/lib64:/lib/x86_64-linux-gnu/:/opt/vllm_kunlun/xcudart/lib/
|
||||
|
||||
RUN rm -rf \
|
||||
/root/.cache \
|
||||
/root/.conda \
|
||||
/tmp/*
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
CMD ["/bin/bash"]
|
||||
@@ -11,7 +11,7 @@ This document describes how to install vllm-kunlun manually.
|
||||
- vLLM (same version as vllm-kunlun)
|
||||
|
||||
## Setup environment using container
|
||||
We provide a clean, minimal base image for your use`wjie520/vllm_kunlun:v0.0.1`.You can pull it using the `docker pull` command.
|
||||
We provide a clean, minimal base image for your use`wjie520/vllm_kunlun:base_v0.0.2` and `wjie520/vllm_kunlun:base_mimo_v0.0.2`(Only MIMO_V2 and GPT-OSS).You can pull it using the `docker pull` command.
|
||||
### Container startup script
|
||||
|
||||
:::::{tab-set}
|
||||
@@ -31,17 +31,15 @@ if [ $XPU_NUM -gt 0 ]; then
|
||||
done
|
||||
DOCKER_DEVICE_CONFIG="${DOCKER_DEVICE_CONFIG} --device=/dev/xpuctrl:/dev/xpuctrl"
|
||||
fi
|
||||
export build_image="wjie520/vllm_kunlun:v0.0.1"
|
||||
export build_image="wjie520/vllm_kunlun:base_v0.0.2"
|
||||
# or export build_image="iregistry.baidu-int.com/xmlir/xmlir_ubuntu_2004_x86_64:v0.32"
|
||||
|
||||
|
||||
docker run -itd ${DOCKER_DEVICE_CONFIG} \
|
||||
--net=host \
|
||||
--cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
||||
--tmpfs /dev/shm:rw,nosuid,nodev,exec,size=32g \
|
||||
--cap-add=SYS_PTRACE \
|
||||
-v /home/users/vllm-kunlun:/home/vllm-kunlun \
|
||||
-v /usr/local/bin/xpu-smi:/usr/local/bin/xpu-smi \
|
||||
--name "$1" \
|
||||
-w /workspace \
|
||||
"$build_image" /bin/bash
|
||||
@@ -51,10 +49,7 @@ docker run -itd ${DOCKER_DEVICE_CONFIG} \
|
||||
## Install vLLM-kunlun
|
||||
### Install vLLM 0.11.0
|
||||
```
|
||||
conda activate vllm_kunlun_0.10.1.1
|
||||
# or conda activate python310_torch25_cuda
|
||||
|
||||
pip install vllm==0.11.0 --no-build-isolation --no-deps
|
||||
uv pip install vllm==0.11.0 --no-build-isolation --no-deps
|
||||
```
|
||||
### Build and Install
|
||||
Navigate to the vllm-kunlun directory and build the package:
|
||||
@@ -63,45 +58,12 @@ git clone https://github.com/baidu/vLLM-Kunlun
|
||||
|
||||
cd vLLM-Kunlun
|
||||
|
||||
pip install -r requirements.txt
|
||||
uv pip install -r requirements.txt
|
||||
|
||||
python setup.py build
|
||||
|
||||
python setup.py install
|
||||
|
||||
```
|
||||
### Replace eval_frame.py
|
||||
Copy the eval_frame.py patch:
|
||||
```
|
||||
cp vllm_kunlun/patches/eval_frame.py /root/miniconda/envs/vllm_kunlun_0.10.1.1/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py
|
||||
```
|
||||
## Install the KL3-customized build of PyTorch
|
||||
```
|
||||
wget -O xpytorch-cp310-torch251-ubuntu2004-x64.run https://baidu-kunlun-public.su.bcebos.com/v1/baidu-kunlun-share/1130/xpytorch-cp310-torch251-ubuntu2004-x64.run?authorization=bce-auth-v1%2FALTAKypXxBzU7gg4Mk4K4c6OYR%2F2025-12-02T05%3A01%3A27Z%2F-1%2Fhost%2Ff3cf499234f82303891aed2bcb0628918e379a21e841a3fac6bd94afef491ff7
|
||||
bash xpytorch-cp310-torch251-ubuntu2004-x64.run
|
||||
```
|
||||
## Install the KL3-customized build of PyTorch(Only MIMO V2)
|
||||
```
|
||||
wget -O xpytorch-cp310-torch251-ubuntu2004-x64.run https://klx-sdk-release-public.su.bcebos.com/kunlun2aiak_output/1231/xpytorch-cp310-torch251-ubuntu2004-x64.run
|
||||
bash xpytorch-cp310-torch251-ubuntu2004-x64.run
|
||||
```
|
||||
|
||||
## Install custom ops
|
||||
```
|
||||
pip install "https://baidu-kunlun-public.su.bcebos.com/v1/baidu-kunlun-share/1130/xtorch_ops-0.1.2209%2B6752ad20-cp310-cp310-linux_x86_64.whl?authorization=bce-auth-v1%2FALTAKypXxBzU7gg4Mk4K4c6OYR%2F2025-12-05T06%3A18%3A00Z%2F-1%2Fhost%2F14936c2b7e7c557c1400e4c467c79f7a9217374a7aa4a046711ac4d948f460cd"
|
||||
```
|
||||
## Install custom ops(Only MIMO V2)
|
||||
```
|
||||
pip install "https://vllm-ai-models.bj.bcebos.com/v1/vLLM-Kunlun/ops/swa/xtorch_ops-0.1.2109%252B523cb26d-cp310-cp310-linux_x86_64.whl"
|
||||
```
|
||||
|
||||
## Install the KLX3 custom Triton build
|
||||
```
|
||||
pip install "https://cce-ai-models.bj.bcebos.com/v1/vllm-kunlun-0.11.0/triton-3.0.0%2Bb2cde523-cp310-cp310-linux_x86_64.whl"
|
||||
```
|
||||
## Install the AIAK custom ops library
|
||||
```
|
||||
pip install "https://cce-ai-models.bj.bcebos.com/XSpeedGate-whl/release_merge/20251219_152418/xspeedgate_ops-0.0.0-cp310-cp310-linux_x86_64.whl"
|
||||
```
|
||||
## Quick Start
|
||||
|
||||
|
||||
Reference in New Issue
Block a user