Files
xc-llm-kunlun/docs/Dockerfile.xpu
Xinyu Dong 7be26ca617 [Bugs] Fix Docs Build Problem (#97)
* [Bugs] Docs fixed

* Update contributing.md

* Update index.md

* fix lua to text

* fix title size
2026-01-10 05:55:40 +08:00

88 lines
3.8 KiB
Docker

#
# Copyright (c) 2025 Baidu Technologies Co., Ltd. All Rights Reserved.
# This file is a part of the vllm-kunlun project.
#
# This file is mainly Adapted from vllm-project/vllm/vllm/envs.py
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM iregistry.baidu-int.com/xmlir/xmlir_ubuntu_2004_x86_64:v0.37_base
SHELL ["bash", "-c"]
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates tzdata vim net-tools \
gcc g++ cmake libnuma-dev \
wget tmux curl \
software-properties-common && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN conda init && conda create --name vllm_kunlun_0.10.1.1 python=3.10.15 -y && \
conda run -n vllm_kunlun_0.10.1.1 && source activate vllm_kunlun_0.10.1.1 && \
conda clean -afy
RUN source activate vllm_kunlun_0.10.1.1 && \
pip install torch==2.5.1+cu118 torchvision==0.20.1+cu118 torchaudio==2.5.1+cu118 --index-url https://download.pytorch.org/whl/cu118 && \
pip cache purge && rm -rf /root/.cache/pip
RUN source activate vllm_kunlun_0.10.1.1 && \
pip install setuptools==80.9.0 cuda_mock==1.1.1 hyperparameter==0.5.6 black==23.3.0 lark==1.2.2 \
networkx wheel PyYAML==6.0.2 ipython h5py regex==2024.9.11 colorama==0.4.6 pynvml==11.5.3 \
nvidia-cuda-runtime-cu11 tabulate==0.9.0 openpyxl==3.1.5 pandas prettytable \
pytest==8.1.0 pytest-repeat==0.9.3 pytest-timeout==2.3.1 py==1.11.0 datasets==2.16.0 \
pydantic==2.9.2 psutil==6.1.0 einops==0.8.0 \
pytest-html==4.1.1 py-cpuinfo pytest-timeout==2.3.1 termcolor jsonlines==4.0.0 tiktoken \
qwen_vl_utils filetype fastapi==0.112.1 fire py-cpuinfo transformers==4.57.1 \
gradio==4.0.0 sse-starlette trl==0.8.6 uvicorn accelerate==0.30.1 --index-url https://pip.baidu-int.com/simple/ && \
pip cache purge && rm -rf /root/.cache/pip
ENV DEBIAN_FRONTEND=noninteractive \
LANG=en_US.UTF-8 \
LANGUAGE=en_US:en \
TERM=xterm-256color \
PATH=/root/miniconda/envs/vllm_kunlun_0.10.1.1/bin/:$PATH
WORKDIR /workspace
RUN wget https://su.bcebos.com/v1/klx-sdk-release-public/xccl/resource/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu20.04-x86_64.tgz && tar -xf MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu20.04-x86_64.tgz
WORKDIR /workspace/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu20.04-x86_64
RUN apt-get install -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' flex swig tk debhelper libltdl-dev libusb-1.0-0 tcl chrpath pkg-config graphviz bison && \
./mlnxofedinstall --user-space-only --skip-distro-check --without-fw-update --force && \
rm -rf /workspace/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu20.04-x86_64* && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
WORKDIR /workspace
COPY vllm-kunlun/ /workspace/vllm-kunlun/
RUN cd /workspace/vllm-kunlun && \
bash dockerfile/install.sh && \
rm -rf /workspace/vllm-kunlun/build/ && rm -rf /workspace/vllm-kunlun/vllm_kunlun.egg-info/
# xpu-smi tools
WORKDIR /workspace
COPY xre-Linux-x86_64-5.2.0.0/ /workspace/xre-Linux-x86_64-5.2.0.0/
RUN mv /workspace/xre-Linux-x86_64-5.2.0.0/bin/* /usr/local/bin/ && mv /workspace/xre-Linux-x86_64-5.2.0.0/so/* /lib/x86_64-linux-gnu/ && \
rm -rf /workspace/xre-Linux-x86_64-5.2.0.0/
RUN rm -rf \
/root/.cache \
/root/.conda \
/tmp/*
CMD ["/bin/bash"]