Add flashmla and fast hadamard transform to Dockerfile (#11235)
This commit is contained in:
@@ -1,9 +1,12 @@
|
||||
ARG CUDA_VERSION=12.9.1
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu22.04 AS base
|
||||
ARG TARGETARCH
|
||||
|
||||
ARG BUILD_TYPE=all
|
||||
ARG BRANCH_TYPE=remote
|
||||
ARG DEEPEP_COMMIT=9af0e0d0e74f3577af1979c9b9e1ac2cad0104ee
|
||||
ARG FLASHMLA_COMMIT=1408756a88e52a25196b759eaf8db89d2b51b5a1
|
||||
ARG FAST_HADAMARD_TRANSFORM_COMMIT=f3cdeed95b0f3284b5df3da9b3311d3d0600ce2b
|
||||
ARG CMAKE_BUILD_PARALLEL_LEVEL=2
|
||||
ARG SGL_KERNEL_VERSION=0.3.12
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -136,6 +139,27 @@ RUN cd /sgl-workspace/DeepEP && \
|
||||
esac && \
|
||||
NVSHMEM_DIR=${NVSHMEM_DIR} TORCH_CUDA_ARCH_LIST="${CHOSEN_TORCH_CUDA_ARCH_LIST}" pip install .
|
||||
|
||||
# Install flashmla
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
git clone https://github.com/deepseek-ai/FlashMLA.git flash-mla && \
|
||||
cd flash-mla && \
|
||||
git checkout ${FLASHMLA_COMMIT} && \
|
||||
git submodule update --init --recursive && \
|
||||
pip install -v . ; \
|
||||
fi
|
||||
|
||||
# Install fast-hadamard-transform
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
git clone https://github.com/Dao-AILab/fast-hadamard-transform && \
|
||||
cd fast-hadamard-transform && \
|
||||
git checkout ${FAST_HADAMARD_TRANSFORM_COMMIT} && \
|
||||
pip install . ; \
|
||||
fi
|
||||
|
||||
# Install tilelang
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
pip install tilelang==0.1.6.post1 ; \
|
||||
fi
|
||||
|
||||
# Python tools
|
||||
RUN python3 -m pip install --no-cache-dir \
|
||||
|
||||
Reference in New Issue
Block a user