From ffd195b0fe5f645fbf19d1a217c9f1afe48aaff9 Mon Sep 17 00:00:00 2001 From: SILONG ZENG <2609716663@qq.com> Date: Mon, 23 Mar 2026 20:14:42 +0800 Subject: [PATCH] [Bugfix]Remove conflicting triton after vllm-ascend install on x86 (#7497) ### What this PR does / why we need it? This PR fixes the x86 image issue where both `triton` and `triton-ascend` are installed in the final environment. - https://github.com/vllm-project/vllm-ascend/issues/7359 We confirmed the root cause is not that `triton` fails to uninstall after the upstream `vllm` installation. Instead, during the `vllm-ascend` installation step, pip resolves and installs upstream `triton` again alongside `triton-ascend` on x86 platforms. This leads to module conflicts at runtime because both distributions provide the `triton` Python package. To fix this, this PR updates all Dockerfiles to remove upstream `triton` immediately after installing `vllm-ascend`, while keeping the `triton-ascend` version resolved by `vllm-ascend` itself. Affected files: - `Dockerfile` - `Dockerfile.a3` - `Dockerfile.310p` - `Dockerfile.openEuler` - `Dockerfile.a3.openEuler` - `Dockerfile.310p.openEuler` ### Does this PR introduce _any_ user-facing change? Yes. For x86 container images, the final Python environment will no longer keep upstream `triton` alongside `triton-ascend`. This avoids importing the wrong Triton package and fixes related runtime failures. ### How was this patch tested? Root cause validation was performed by reproducing the installation flow locally and checking the package state after each step. Observed during `vllm-ascend` installation on x86: - `triton-ascend` was installed as expected - upstream `triton` was also installed again in the same step ``` bash export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge Successfully installed aiofiles-25.1.0 arctic-inference-0.1.1 blinker-1.9.0 cmake-4.2.3 fastapi-0.123.10 flask-3.1.3 h2-4.3.0 hpack-4.1.0 hypercorn-0.18.0 hyperframe-6.1.0 itsdangerous-2.2.0 numpy-1.26.4 opencv-python-headless-4.11.0.86 pandas-3.0.1 pandas-stubs-3.0.0.260204 priority-2.0.0 pybind11-3.0.2 python-dateutil-2.9.0.post0 quart-0.20.0 setuptools-scm-9.2.2 six-1.17.0 starlette-0.50.0 torch-2.9.0+cpu torch-npu-2.9.0 torchaudio-2.9.0+cpu torchvision-0.24.0+cpu triton-3.6.0 triton-ascend-3.2.0 vllm_ascend-0.17.0rc2.dev51+geb92e7d50 werkzeug-3.1.6 wheel-0.46.3 wsproto-1.3.2 xgrammar-0.1.32 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning. Files removed: 423 (1025.9 MB) Directories removed: 5 ``` - vLLM version: v0.17.0 - vLLM main: https://github.com/vllm-project/vllm/commit/8b6325758cce5f9c36d38f2462edbd368b97a07c --------- Signed-off-by: MrZ20 <2609716663@qq.com> --- Dockerfile | 2 ++ Dockerfile.310p | 2 ++ Dockerfile.310p.openEuler | 2 ++ Dockerfile.a3 | 2 ++ Dockerfile.a3.openEuler | 2 ++ Dockerfile.openEuler | 2 ++ 6 files changed, 12 insertions(+) diff --git a/Dockerfile b/Dockerfile index a814c483..6132ffa5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -59,11 +59,13 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm # Install vllm-ascend # Append `libascend_hal.so` path (devlib) to LD_LIBRARY_PATH +# Installing vllm-ascend on x86 can pull upstream triton back in alongside triton-ascend. Remove it immediately after this step. RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + if [ "$(uname -i)" = "x86_64" ]; then python3 -m pip uninstall -y triton; fi && \ python3 -m pip cache purge # Install clang-15 (for triton-ascend) diff --git a/Dockerfile.310p b/Dockerfile.310p index 10f86283..02339797 100644 --- a/Dockerfile.310p +++ b/Dockerfile.310p @@ -49,11 +49,13 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm # Install vllm-ascend # Append `libascend_hal.so` path (devlib) to LD_LIBRARY_PATH +# Installing vllm-ascend on x86 can pull upstream triton back in alongside triton-ascend. Remove it immediately after this step. RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + if [ "$(uname -i)" = "x86_64" ]; then python3 -m pip uninstall -y triton; fi && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode) diff --git a/Dockerfile.310p.openEuler b/Dockerfile.310p.openEuler index 07b65c8c..de5594ce 100644 --- a/Dockerfile.310p.openEuler +++ b/Dockerfile.310p.openEuler @@ -44,12 +44,14 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/[a python3 -m pip cache purge # Install vllm-ascend +# Installing vllm-ascend on x86 can pull upstream triton back in alongside triton-ascend. Remove it immediately after this step. RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/usr/include/c++/12:/usr/include/c++/12/`uname -i`-openEuler-linux && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + if [ "$(uname -i)" = "x86_64" ]; then python3 -m pip uninstall -y triton; fi && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode) diff --git a/Dockerfile.a3 b/Dockerfile.a3 index cb772f4d..45fe4f1e 100644 --- a/Dockerfile.a3 +++ b/Dockerfile.a3 @@ -58,11 +58,13 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm # Install vllm-ascend # Append `libascend_hal.so` path (devlib) to LD_LIBRARY_PATH +# Installing vllm-ascend on x86 can pull upstream triton back in alongside triton-ascend. Remove it immediately after this step. RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + if [ "$(uname -i)" = "x86_64" ]; then python3 -m pip uninstall -y triton; fi && \ python3 -m pip cache purge # Install clang-15 (for triton-ascend) diff --git a/Dockerfile.a3.openEuler b/Dockerfile.a3.openEuler index 8ef81da4..fe30bdcc 100644 --- a/Dockerfile.a3.openEuler +++ b/Dockerfile.a3.openEuler @@ -58,12 +58,14 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/[a python3 -m pip cache purge # Install vllm-ascend +# Installing vllm-ascend on x86 can pull upstream triton back in alongside triton-ascend. Remove it immediately after this step. RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/usr/include/c++/12:/usr/include/c++/12/`uname -i`-openEuler-linux && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + if [ "$(uname -i)" = "x86_64" ]; then python3 -m pip uninstall -y triton; fi && \ python3 -m pip cache purge # Install clang (for triton-ascend) diff --git a/Dockerfile.openEuler b/Dockerfile.openEuler index baab0f3d..c9608e19 100644 --- a/Dockerfile.openEuler +++ b/Dockerfile.openEuler @@ -58,12 +58,14 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/[a python3 -m pip cache purge # Install vllm-ascend +# Installing vllm-ascend on x86 can pull upstream triton back in alongside triton-ascend. Remove it immediately after this step. RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/usr/include/c++/12:/usr/include/c++/12/`uname -i`-openEuler-linux && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + if [ "$(uname -i)" = "x86_64" ]; then python3 -m pip uninstall -y triton; fi && \ python3 -m pip cache purge # Install clang (for triton-ascend)