From 292a867ad93bed5ed6ab1e514135a2ef0793d156 Mon Sep 17 00:00:00 2001 From: Baizhou Zhang Date: Sun, 5 Oct 2025 21:31:28 -0700 Subject: [PATCH] Add flashmla and fast hadamard transform to Dockerfile (#11235) --- docker/Dockerfile | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index 4aef6cf13..75e3afabc 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,9 +1,12 @@ ARG CUDA_VERSION=12.9.1 FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu22.04 AS base +ARG TARGETARCH ARG BUILD_TYPE=all ARG BRANCH_TYPE=remote ARG DEEPEP_COMMIT=9af0e0d0e74f3577af1979c9b9e1ac2cad0104ee +ARG FLASHMLA_COMMIT=1408756a88e52a25196b759eaf8db89d2b51b5a1 +ARG FAST_HADAMARD_TRANSFORM_COMMIT=f3cdeed95b0f3284b5df3da9b3311d3d0600ce2b ARG CMAKE_BUILD_PARALLEL_LEVEL=2 ARG SGL_KERNEL_VERSION=0.3.12 ENV DEBIAN_FRONTEND=noninteractive \ @@ -136,6 +139,27 @@ RUN cd /sgl-workspace/DeepEP && \ esac && \ NVSHMEM_DIR=${NVSHMEM_DIR} TORCH_CUDA_ARCH_LIST="${CHOSEN_TORCH_CUDA_ARCH_LIST}" pip install . +# Install flashmla +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + git clone https://github.com/deepseek-ai/FlashMLA.git flash-mla && \ + cd flash-mla && \ + git checkout ${FLASHMLA_COMMIT} && \ + git submodule update --init --recursive && \ + pip install -v . ; \ + fi + +# Install fast-hadamard-transform +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + git clone https://github.com/Dao-AILab/fast-hadamard-transform && \ + cd fast-hadamard-transform && \ + git checkout ${FAST_HADAMARD_TRANSFORM_COMMIT} && \ + pip install . ; \ + fi + +# Install tilelang +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + pip install tilelang==0.1.6.post1 ; \ + fi # Python tools RUN python3 -m pip install --no-cache-dir \