From 9a71500cfb267bef4f0c2bfc4ce60eba9fe5674d Mon Sep 17 00:00:00 2001 From: nvjullin Date: Fri, 24 Oct 2025 08:47:04 +0800 Subject: [PATCH] Fixed aarch64 flash-mla (#12009) --- docker/Dockerfile | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 76e6ba2ef..485a828b8 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -141,16 +141,14 @@ RUN cd /sgl-workspace/DeepEP && \ NVSHMEM_DIR=${NVSHMEM_DIR} TORCH_CUDA_ARCH_LIST="${CHOSEN_TORCH_CUDA_ARCH_LIST}" pip install . # Install flashmla -RUN if [ "$TARGETARCH" = "amd64" ]; then \ - git clone https://github.com/deepseek-ai/FlashMLA.git flash-mla && \ +RUN git clone https://github.com/deepseek-ai/FlashMLA.git flash-mla && \ cd flash-mla && \ git checkout ${FLASHMLA_COMMIT} && \ git submodule update --init --recursive && \ if [ "$CUDA_VERSION" = "12.6.1" ]; then \ export FLASH_MLA_DISABLE_SM100=1; \ fi && \ - pip install -v . ; \ - fi + pip install -v . ; # Python tools RUN python3 -m pip install --no-cache-dir \