### What this PR does / why we need it?
- Fixes https://github.com/vllm-project/vllm-ascend/issues/1533
### How was this patch tested?
1. Run the image
```
docker run \
--name cann_container \
--device /dev/davinci6 \
--device /dev/davinci_manager \
--device /dev/devmm_svm \
--device /dev/hisi_hdc \
-v /usr/local/dcmi:/usr/local/dcmi \
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
-v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \
-v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
-v /etc/ascend_install.info:/etc/ascend_install.info \
-it quay.io/ascend/cann:8.1.rc1-910b-openeuler22.03-py3.11 bash
```
2. Install package
torch=2.5.1
torch-npu=2.5.1.post1.dev20250619
vllm=0.9.1
vllm-ascend=vllm_ascend-0.1.dev1+g02ac443-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl
Artifact download URL:
https://github.com/vllm-project/vllm-ascend/actions/runs/16039661265/artifacts/3454481370
3. Test offline script
```
from vllm import LLM, SamplingParams
import os
os.environ["VLLM_USE_V1"] = "1"
prompts = [
"Hello, my name is",
]
llm = LLM(model="Qwen3/Qwen3-1.7B")
outputs = llm.generate(prompts)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
4. Results

- vLLM version: v0.9.2
- vLLM main:
b942c094e3
---------
Signed-off-by: Icey <1790571317@qq.com>
46 lines
1.6 KiB
Docker
46 lines
1.6 KiB
Docker
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
ARG PY_VERSION=3.10
|
|
FROM quay.io/ascend/manylinux:8.0.0-910b-manylinux_2_28-py${PY_VERSION}
|
|
|
|
ARG COMPILE_CUSTOM_KERNELS=1
|
|
|
|
# Define environments
|
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
ENV COMPILE_CUSTOM_KERNELS=${COMPILE_CUSTOM_KERNELS}
|
|
RUN yum update -y && \
|
|
yum install -y python3-pip git vim wget net-tools gcc gcc-c++ make cmake numactl-devel && \
|
|
rm -rf /var/cache/yum
|
|
|
|
WORKDIR /workspace
|
|
|
|
COPY . /workspace/vllm-ascend/
|
|
|
|
# Install req
|
|
RUN python3 -m pip install -r vllm-ascend/requirements.txt --extra-index https://download.pytorch.org/whl/cpu/ && \
|
|
python3 -m pip install twine
|
|
|
|
# Install vllm-ascend
|
|
RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
|
|
source /usr/local/Ascend/nnal/atb/set_env.sh && \
|
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \
|
|
cd vllm-ascend && \
|
|
python3 setup.py bdist_wheel && \
|
|
ls -l dist
|
|
|
|
CMD ["/bin/bash"]
|