From 9ff6b0b86204bc641e7c8d7d5cfd12e2309391ef Mon Sep 17 00:00:00 2001 From: menogrey <34808445+menogrey@users.noreply.github.com> Date: Thu, 16 Oct 2025 14:38:11 +0800 Subject: [PATCH] [CI]: Fix doctest ci for main release (#3451) ### What this PR does / why we need it? Fix dockets CI for main release. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 Signed-off-by: menogrey <1299267905@qq.com> --- docs/source/installation.md | 8 ++++++-- .../002-pip-binary-installation-test.sh | 18 +++++++++++++++--- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/docs/source/installation.md b/docs/source/installation.md index 0d3b54d..0319465 100644 --- a/docs/source/installation.md +++ b/docs/source/installation.md @@ -141,8 +141,12 @@ Then you can install `vllm` and `vllm-ascend` from **pre-built wheel**: ```{code-block} bash :substitutions: -# Install vllm-project/vllm from pypi -pip install vllm==|pip_vllm_version| +# Install vllm-project/vllm. The newest supported version is |vllm_version|. +# Because the version |vllm_version| has not been archived in pypi, so you need to install from source. +git clone --depth 1 --branch |vllm_version| https://github.com/vllm-project/vllm +cd vllm +VLLM_TARGET_DEVICE=empty pip install -v -e . +cd .. # Install vllm-project/vllm-ascend from pypi. pip install vllm-ascend==|pip_vllm_ascend_version| diff --git a/tests/e2e/doctests/002-pip-binary-installation-test.sh b/tests/e2e/doctests/002-pip-binary-installation-test.sh index 525f348..4aad569 100644 --- a/tests/e2e/doctests/002-pip-binary-installation-test.sh +++ b/tests/e2e/doctests/002-pip-binary-installation-test.sh @@ -40,21 +40,33 @@ function install_binary_test() { create_vllm_venv PIP_VLLM_VERSION=$(get_version pip_vllm_version) + VLLM_VERSION=$(get_version vllm_version) PIP_VLLM_ASCEND_VERSION=$(get_version pip_vllm_ascend_version) _info "====> Install vllm==${PIP_VLLM_VERSION} and vllm-ascend ${PIP_VLLM_ASCEND_VERSION}" # Setup extra-index-url for x86 & torch_npu dev version pip config set global.extra-index-url "https://download.pytorch.org/whl/cpu/ https://mirrors.huaweicloud.com/ascend/repos/pypi" - pip install vllm=="$(get_version pip_vllm_version)" - pip install vllm-ascend=="$(get_version pip_vllm_ascend_version)" + if [[ "${VLLM_VERSION} " != "v0.11.0rc3" ]]; then + # The vLLM version already in pypi, we install from pypi. + pip install vllm=="${PIP_VLLM_VERSION}" + else + # The vLLM version not in pypi, we install from source code with a specific tag. + git clone --depth 1 --branch "${VLLM_VERSION}" https://github.com/vllm-project/vllm + cd vllm + VLLM_TARGET_DEVICE=empty pip install -v -e . + cd .. + fi + + pip install vllm-ascend=="${PIP_VLLM_ASCEND_VERSION}" pip list | grep vllm # Verify the installation _info "====> Run offline example test" pip install modelscope - python3 "${SCRIPT_DIR}/../../examples/offline_inference_npu.py" + cd ${SCRIPT_DIR}/../../examples && python3 ./offline_inference_npu.py + cd - }