From caf0289e1a7c051911fb4d0b2dc1e73132027989 Mon Sep 17 00:00:00 2001 From: starkwj Date: Mon, 5 Jan 2026 09:10:56 +0000 Subject: [PATCH] add Dockerfile and readme --- Dockerfile | 6 ++ Dockerfile.backup | 69 +++++++++++++++++ README-vllm-ascend.md | 91 +++++++++++++++++++++++ README.zh.md => README-vllm-ascend.zh.md | 0 README.md | 95 +++++++----------------- 5 files changed, 194 insertions(+), 67 deletions(-) create mode 100644 Dockerfile.backup create mode 100644 README-vllm-ascend.md rename README.zh.md => README-vllm-ascend.zh.md (100%) diff --git a/Dockerfile b/Dockerfile index 1af5f0c..18f016d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -59,9 +59,15 @@ RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ + cd /vllm-workspace/vllm-ascend/csrc/idle_offload && \ + make install && make clean && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge +ENV VLLM_ASCEND_ENABLE_NZ=0 \ + VLLM_WORKER_MULTIPROC_METHOD=spawn \ + VLLM_ASCEND_ENABLE_IDLE_OFFLOAD=1 + # Install modelscope (for fast download) and ray (for multinode) RUN python3 -m pip install modelscope 'ray>=2.47.1,<=2.48.0' 'protobuf>3.20.0' && \ python3 -m pip cache purge diff --git a/Dockerfile.backup b/Dockerfile.backup new file mode 100644 index 0000000..1af5f0c --- /dev/null +++ b/Dockerfile.backup @@ -0,0 +1,69 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# + +FROM quay.io/ascend/cann:8.3.rc2-910b-ubuntu22.04-py3.11 + +ARG PIP_INDEX_URL="https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" +ARG COMPILE_CUSTOM_KERNELS=1 +ARG MOONCAKE_TAG="v0.3.7.post2" + +# Define environments +ENV DEBIAN_FRONTEND=noninteractive +ENV COMPILE_CUSTOM_KERNELS=${COMPILE_CUSTOM_KERNELS} + +WORKDIR /workspace + +COPY . /vllm-workspace/vllm-ascend/ + +# Install Mooncake dependencies +RUN apt-get update -y && \ + apt-get install -y git vim wget net-tools gcc g++ cmake libnuma-dev && \ + git clone --depth 1 --branch ${MOONCAKE_TAG} https://github.com/kvcache-ai/Mooncake /vllm-workspace/Mooncake && \ + cp /vllm-workspace/vllm-ascend/tools/mooncake_installer.sh /vllm-workspace/Mooncake/ && \ + cd /vllm-workspace/Mooncake && bash mooncake_installer.sh -y && \ + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/lib64 && \ + mkdir -p build && cd build && cmake .. -DUSE_ASCEND_DIRECT=ON && \ + make -j$(nproc) && make install && \ + rm -fr /vllm-workspace/Mooncake/build && \ + rm -rf /var/cache/apt/* && \ + rm -rf /var/lib/apt/lists/* + +RUN pip config set global.index-url ${PIP_INDEX_URL} + +# Install vLLM +ARG VLLM_REPO=https://github.com/vllm-project/vllm.git +ARG VLLM_TAG=v0.11.0 +RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm +# In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. +RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \ + python3 -m pip uninstall -y triton && \ + python3 -m pip cache purge + +# Install vllm-ascend +# Append `libascend_hal.so` path (devlib) to LD_LIBRARY_PATH +RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ + source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ + source /usr/local/Ascend/nnal/atb/set_env.sh && \ + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ + python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + python3 -m pip cache purge + +# Install modelscope (for fast download) and ray (for multinode) +RUN python3 -m pip install modelscope 'ray>=2.47.1,<=2.48.0' 'protobuf>3.20.0' && \ + python3 -m pip cache purge + +CMD ["/bin/bash"] diff --git a/README-vllm-ascend.md b/README-vllm-ascend.md new file mode 100644 index 0000000..994f8cc --- /dev/null +++ b/README-vllm-ascend.md @@ -0,0 +1,91 @@ +

+ + + vllm-ascend + +

+ +

+vLLM Ascend Plugin +

+ +

+| About Ascend | Documentation | #sig-ascend | Users Forum | Weekly Meeting | +

+ +

+English | 中文 +

+ +--- +*Latest News* 🔥 +- [2025/09] We released the new official version [v0.9.1](https://github.com/vllm-project/vllm-ascend/releases/tag/v0.9.1)! Please follow the [official guide](https://vllm-ascend.readthedocs.io/en/v0.9.1-dev/tutorials/large_scale_ep.html) to start deploy large scale Expert Parallelism (EP) on Ascend. +- [2025/08] We hosted the [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/7n8OYNrCC_I9SJaybHA_-Q) with vLLM and Tencent! Please find the meetup slides [here](https://drive.google.com/drive/folders/1Pid6NSFLU43DZRi0EaTcPgXsAzDvbBqF). +- [2025/06] [User stories](https://vllm-ascend.readthedocs.io/en/latest/community/user_stories/index.html) page is now live! It kicks off with ‌LLaMA-Factory/verl//TRL/GPUStack‌ to demonstrate how ‌vLLM Ascend‌ assists Ascend users in enhancing their experience across fine-tuning, evaluation, reinforcement learning (RL), and deployment scenarios. +- [2025/06] [Contributors](https://vllm-ascend.readthedocs.io/en/latest/community/contributors.html) page is now live! All contributions deserve to be recorded, thanks for all contributors. +- [2025/05] We've released first official version [v0.7.3](https://github.com/vllm-project/vllm-ascend/releases/tag/v0.7.3)! We collaborated with the vLLM community to publish a blog post sharing our practice: [Introducing vLLM Hardware Plugin, Best Practice from Ascend NPU](https://blog.vllm.ai/2025/05/12/hardware-plugin.html). +- [2025/03] We hosted the [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/VtxO9WXa5fC-mKqlxNUJUQ) with vLLM team! Please find the meetup slides [here](https://drive.google.com/drive/folders/1Pid6NSFLU43DZRi0EaTcPgXsAzDvbBqF). +- [2025/02] vLLM community officially created [vllm-project/vllm-ascend](https://github.com/vllm-project/vllm-ascend) repo for running vLLM seamlessly on the Ascend NPU. +- [2024/12] We are working with the vLLM community to support [[RFC]: Hardware pluggable](https://github.com/vllm-project/vllm/issues/11162). +--- +## Overview + +vLLM Ascend (`vllm-ascend`) is a community maintained hardware plugin for running vLLM seamlessly on the Ascend NPU. + +It is the recommended approach for supporting the Ascend backend within the vLLM community. It adheres to the principles outlined in the [[RFC]: Hardware pluggable](https://github.com/vllm-project/vllm/issues/11162), providing a hardware-pluggable interface that decouples the integration of the Ascend NPU with vLLM. + +By using vLLM Ascend plugin, popular open-source models, including Transformer-like, Mixture-of-Expert, Embedding, Multi-modal LLMs can run seamlessly on the Ascend NPU. + +## Prerequisites + +- Hardware: Atlas 800I A2 Inference series, Atlas A2 Training series, Atlas 800I A3 Inference series, Atlas A3 Training series, Atlas 300I Duo (Experimental) +- OS: Linux +- Software: + * Python >= 3.9, < 3.12 + * CANN >= 8.3.rc1 (Ascend HDK version refers to [here](https://www.hiascend.com/document/detail/zh/canncommercial/83RC1/releasenote/releasenote_0000.html)) + * PyTorch == 2.7.1, torch-npu == 2.7.1 + * vLLM (the same version as vllm-ascend) + +## Getting Started + +Please use the following recommended versions to get started quickly: + +| Version | Release type | Doc | +|------------|--------------|--------------------------------------| +|v0.11.0rc0|Latest release candidate|[QuickStart](https://vllm-ascend.readthedocs.io/en/latest/quick_start.html) and [Installation](https://vllm-ascend.readthedocs.io/en/latest/installation.html) for more details| +|v0.9.1|Latest stable version|[QuickStart](https://vllm-ascend.readthedocs.io/en/v0.9.1-dev/quick_start.html) and [Installation](https://vllm-ascend.readthedocs.io/en/v0.9.1-dev/installation.html) for more details| + +## Contributing +See [CONTRIBUTING](https://vllm-ascend.readthedocs.io/en/latest/developer_guide/contribution/index.html) for more details, which is a step-by-step guide to help you set up development environment, build and test. + +We welcome and value any contributions and collaborations: +- Please let us know if you encounter a bug by [filing an issue](https://github.com/vllm-project/vllm-ascend/issues) +- Please use [User forum](https://discuss.vllm.ai/c/hardware-support/vllm-ascend-support) for usage questions and help. + +## Branch + +vllm-ascend has main branch and dev branch. + +- **main**: main branch,corresponds to the vLLM main branch, and is continuously monitored for quality through Ascend CI. +- **vX.Y.Z-dev**: development branch, created with part of new releases of vLLM. For example, `v0.7.3-dev` is the dev branch for vLLM `v0.7.3` version. + +Below is maintained branches: + +| Branch | Status | Note | +|------------|--------------|--------------------------------------| +| main | Maintained | CI commitment for vLLM main branch and vLLM v0.11.0 tag | +| v0.7.1-dev | Unmaintained | Only doc fixed is allowed | +| v0.7.3-dev | Maintained | CI commitment for vLLM 0.7.3 version, only bug fix is allowed and no new release tag any more. | +| v0.9.1-dev | Maintained | CI commitment for vLLM 0.9.1 version | +| rfc/feature-name | Maintained | [Feature branches](https://vllm-ascend.readthedocs.io/en/latest/community/versioning_policy.html#feature-branches) for collaboration | + +Please refer to [Versioning policy](https://vllm-ascend.readthedocs.io/en/latest/community/versioning_policy.html) for more details. + +## Weekly Meeting + +- vLLM Ascend Weekly Meeting: https://tinyurl.com/vllm-ascend-meeting +- Wednesday, 15:00 - 16:00 (UTC+8, [Convert to your timezone](https://dateful.com/convert/gmt8?t=15)) + +## License + +Apache License 2.0, as found in the [LICENSE](./LICENSE) file. diff --git a/README.zh.md b/README-vllm-ascend.zh.md similarity index 100% rename from README.zh.md rename to README-vllm-ascend.zh.md diff --git a/README.md b/README.md index 994f8cc..617cf55 100644 --- a/README.md +++ b/README.md @@ -1,90 +1,51 @@ -

- - - vllm-ascend - -

+# vLLM-Ascend Multi-LLM Serving -

-vLLM Ascend Plugin -

- -

-| About Ascend | Documentation | #sig-ascend | Users Forum | Weekly Meeting | -

- -

-English | 中文 -

- ---- -*Latest News* 🔥 -- [2025/09] We released the new official version [v0.9.1](https://github.com/vllm-project/vllm-ascend/releases/tag/v0.9.1)! Please follow the [official guide](https://vllm-ascend.readthedocs.io/en/v0.9.1-dev/tutorials/large_scale_ep.html) to start deploy large scale Expert Parallelism (EP) on Ascend. -- [2025/08] We hosted the [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/7n8OYNrCC_I9SJaybHA_-Q) with vLLM and Tencent! Please find the meetup slides [here](https://drive.google.com/drive/folders/1Pid6NSFLU43DZRi0EaTcPgXsAzDvbBqF). -- [2025/06] [User stories](https://vllm-ascend.readthedocs.io/en/latest/community/user_stories/index.html) page is now live! It kicks off with ‌LLaMA-Factory/verl//TRL/GPUStack‌ to demonstrate how ‌vLLM Ascend‌ assists Ascend users in enhancing their experience across fine-tuning, evaluation, reinforcement learning (RL), and deployment scenarios. -- [2025/06] [Contributors](https://vllm-ascend.readthedocs.io/en/latest/community/contributors.html) page is now live! All contributions deserve to be recorded, thanks for all contributors. -- [2025/05] We've released first official version [v0.7.3](https://github.com/vllm-project/vllm-ascend/releases/tag/v0.7.3)! We collaborated with the vLLM community to publish a blog post sharing our practice: [Introducing vLLM Hardware Plugin, Best Practice from Ascend NPU](https://blog.vllm.ai/2025/05/12/hardware-plugin.html). -- [2025/03] We hosted the [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/VtxO9WXa5fC-mKqlxNUJUQ) with vLLM team! Please find the meetup slides [here](https://drive.google.com/drive/folders/1Pid6NSFLU43DZRi0EaTcPgXsAzDvbBqF). -- [2025/02] vLLM community officially created [vllm-project/vllm-ascend](https://github.com/vllm-project/vllm-ascend) repo for running vLLM seamlessly on the Ascend NPU. -- [2024/12] We are working with the vLLM community to support [[RFC]: Hardware pluggable](https://github.com/vllm-project/vllm/issues/11162). ---- ## Overview -vLLM Ascend (`vllm-ascend`) is a community maintained hardware plugin for running vLLM seamlessly on the Ascend NPU. +This repository is a modified version of [vLLM-Ascend](https://github.com/vllm-project/vllm-ascend) (v0.11.0) designed to enable multiple large language models (LLMs) to share one Ascend NPU. -It is the recommended approach for supporting the Ascend backend within the vLLM community. It adheres to the principles outlined in the [[RFC]: Hardware pluggable](https://github.com/vllm-project/vllm/issues/11162), providing a hardware-pluggable interface that decouples the integration of the Ascend NPU with vLLM. +The key feature of this project is efficient memory coordination, enabling multiple vLLM instances share and dynamically hold Ascend NPU's physical memory. +When an instance is idle, model parameters are offloaded to host memory. +Upon a new inference request, the model parameters are quickly restored to the NPU’s memory (if not exist), without the need to init the engine and load the model from scratch. (For Qwen3-8B, it introduces 0.8s of additional latency to TTFT when retoring from offload status.) -By using vLLM Ascend plugin, popular open-source models, including Transformer-like, Mixture-of-Expert, Embedding, Multi-modal LLMs can run seamlessly on the Ascend NPU. -## Prerequisites +## Features -- Hardware: Atlas 800I A2 Inference series, Atlas A2 Training series, Atlas 800I A3 Inference series, Atlas A3 Training series, Atlas 300I Duo (Experimental) -- OS: Linux -- Software: - * Python >= 3.9, < 3.12 - * CANN >= 8.3.rc1 (Ascend HDK version refers to [here](https://www.hiascend.com/document/detail/zh/canncommercial/83RC1/releasenote/releasenote_0000.html)) - * PyTorch == 2.7.1, torch-npu == 2.7.1 - * vLLM (the same version as vllm-ascend) +- **Shared NPU Usage**: Multiple vLLM instances can share the same Ascend NPU, allowing for multi-LLM serving of different LLMs. +- **Fast Memory Restore**: We decouple the virtual and physical memory allcation. Physical NPU memory is allocated, exported and shared with other LLM engines. LLM engines can restore quickly without reinitialize and memory allocation. -## Getting Started -Please use the following recommended versions to get started quickly: +## Installation -| Version | Release type | Doc | -|------------|--------------|--------------------------------------| -|v0.11.0rc0|Latest release candidate|[QuickStart](https://vllm-ascend.readthedocs.io/en/latest/quick_start.html) and [Installation](https://vllm-ascend.readthedocs.io/en/latest/installation.html) for more details| -|v0.9.1|Latest stable version|[QuickStart](https://vllm-ascend.readthedocs.io/en/v0.9.1-dev/quick_start.html) and [Installation](https://vllm-ascend.readthedocs.io/en/v0.9.1-dev/installation.html) for more details| +### Build from Dockerfile -## Contributing -See [CONTRIBUTING](https://vllm-ascend.readthedocs.io/en/latest/developer_guide/contribution/index.html) for more details, which is a step-by-step guide to help you set up development environment, build and test. +Clone this repository: -We welcome and value any contributions and collaborations: -- Please let us know if you encounter a bug by [filing an issue](https://github.com/vllm-project/vllm-ascend/issues) -- Please use [User forum](https://discuss.vllm.ai/c/hardware-support/vllm-ascend-support) for usage questions and help. +```bash +docker build -t vllm-ascend-multi-llm:latest -f ./Dockerfile . +``` -## Branch +## Usage -vllm-ascend has main branch and dev branch. +> [!NOTE] +> Some platforms may not allow multiple containers to share the same Ascend NPU. You may try to use privilegd container to bypass this restriction and mount all NPUs, and set the env ASCEND_RT_VISIBLE_DEVICES to specify the target device to use. -- **main**: main branch,corresponds to the vLLM main branch, and is continuously monitored for quality through Ascend CI. -- **vX.Y.Z-dev**: development branch, created with part of new releases of vLLM. For example, `v0.7.3-dev` is the dev branch for vLLM `v0.7.3` version. +0. To share NPU, processes coordinate via shm, so you need to set all containers with `ipc=host`. +1. Start a daemon process in a standalone container, by running `vllm_vnpu_daemon` installed inside the image. +2. Start LLM services with this image, following the official usage instructions. +3. Due to the limited stream resource of Ascend NPU, you may need to restrict graph capture sizes or disable ACLgraph by setting `--enforce-eager`, especially when launching multiple LLMs. Refer to the [link](https://docs.vllm.ai/projects/ascend/en/latest/faqs.html#how-to-troubleshoot-and-resolve-size-capture-failures-resulting-from-stream-resource-exhaustion-and-what-are-the-underlying-causes). -Below is maintained branches: -| Branch | Status | Note | -|------------|--------------|--------------------------------------| -| main | Maintained | CI commitment for vLLM main branch and vLLM v0.11.0 tag | -| v0.7.1-dev | Unmaintained | Only doc fixed is allowed | -| v0.7.3-dev | Maintained | CI commitment for vLLM 0.7.3 version, only bug fix is allowed and no new release tag any more. | -| v0.9.1-dev | Maintained | CI commitment for vLLM 0.9.1 version | -| rfc/feature-name | Maintained | [Feature branches](https://vllm-ascend.readthedocs.io/en/latest/community/versioning_policy.html#feature-branches) for collaboration | +## Limitations -Please refer to [Versioning policy](https://vllm-ascend.readthedocs.io/en/latest/community/versioning_policy.html) for more details. +- This project only support share a single NPU currently. This is also limited by the fact that HCCL cannot be shared. We haven't figure out how to bypass HCCL. *Help wanted*. +- The prefix cache will be reset when the LLM is restored, since we just simply discard the KV cache when the LLM is offloaded. -## Weekly Meeting -- vLLM Ascend Weekly Meeting: https://tinyurl.com/vllm-ascend-meeting -- Wednesday, 15:00 - 16:00 (UTC+8, [Convert to your timezone](https://dateful.com/convert/gmt8?t=15)) +## Roadmap +- [ ] Space-sharing. +- [ ] ... + ## License