From 807f0895b2cf4f569f0c86aa808149bef70493b1 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Tue, 5 Aug 2025 08:43:24 +0800 Subject: [PATCH] Bump torch version to 2.7.1 (#1562) ### What this PR does / why we need it? Bump torch version to 2.7.1, and cleanup infer schema patch https://github.com/vllm-project/vllm-ascend/commit/857f489 (https://github.com/vllm-project/vllm-ascend/pull/837), this patch depends on also: https://github.com/vllm-project/vllm-ascend/pull/1974 ### Does this PR introduce any user-facing change? No #### How was this patch tested? CI passed torch-npu 2.7.1rc1 install guide: https://gitee.com/ascend/pytorch/tree/v2.7.1/ install depending: ``` pip3 install pyyaml pip3 install setuptools ``` install torch-npu: Closes: https://github.com/vllm-project/vllm-ascend/issues/1866 Closes: https://github.com/vllm-project/vllm-ascend/issues/1390 - vLLM version: v0.10.0 - vLLM main: https://github.com/vllm-project/vllm/commit/9af654cc38c74cd51b00c609eaa290e495f225e1 --------- Signed-off-by: Yikun Jiang Signed-off-by: leo-pony Co-authored-by: Yikun Jiang --- .github/workflows/release_whl.yml | 3 +- README.md | 2 +- README.zh.md | 2 +- docs/source/installation.md | 4 +- pyproject.toml | 6 +- requirements.txt | 6 +- tests/e2e/singlecard/ops/test_fused_moe.py | 3 - .../worker/patch_common/test_patch_utils.py | 104 ------------------ tests/ut/test_platform.py | 9 +- vllm_ascend/__init__.py | 4 - vllm_ascend/patch/__init__.py | 14 --- .../patch/worker/patch_common/__init__.py | 3 - .../patch/worker/patch_common/patch_utils.py | 38 ------- vllm_ascend/platform.py | 6 - vllm_ascend/quantization/quant_config.py | 4 - 15 files changed, 14 insertions(+), 194 deletions(-) delete mode 100644 tests/ut/patch/worker/patch_common/test_patch_utils.py delete mode 100644 vllm_ascend/patch/worker/patch_common/patch_utils.py diff --git a/.github/workflows/release_whl.yml b/.github/workflows/release_whl.yml index 2e6a44b..d780a5f 100644 --- a/.github/workflows/release_whl.yml +++ b/.github/workflows/release_whl.yml @@ -90,7 +90,8 @@ jobs: --exclude libc10.so \ --exclude libc_sec.so \ --exclude "libascend*.so" \ - --exclude "libtorch*.so" + --exclude "libtorch*.so" \ + --exclude "liberror_manager.so" done rm -f dist/*.whl mv dist/repaired/*.whl dist/ diff --git a/README.md b/README.md index e04a551..3b93f10 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ By using vLLM Ascend plugin, popular open-source models, including Transformer-l - Software: * Python >= 3.9, < 3.12 * CANN >= 8.2.rc1 - * PyTorch >= 2.5.1, torch-npu >= 2.5.1.post1.dev20250619 + * PyTorch >= 2.7.1, torch-npu >= 2.7.1.dev20250724 * vLLM (the same version as vllm-ascend) ## Getting Started diff --git a/README.zh.md b/README.zh.md index 746fed9..ee1dc5f 100644 --- a/README.zh.md +++ b/README.zh.md @@ -42,7 +42,7 @@ vLLM 昇腾插件 (`vllm-ascend`) 是一个由社区维护的让vLLM在Ascend NP - 软件: * Python >= 3.9, < 3.12 * CANN >= 8.2.rc1 - * PyTorch >= 2.5.1, torch-npu >= 2.5.1.post1.dev20250619 + * PyTorch >= 2.5.1, torch-npu >= 2.7.1.dev20250724 * vLLM (与vllm-ascend版本一致) ## 开始使用 diff --git a/docs/source/installation.md b/docs/source/installation.md index 76bcfdc..e3bbfdf 100644 --- a/docs/source/installation.md +++ b/docs/source/installation.md @@ -12,8 +12,8 @@ This document describes how to install vllm-ascend manually. | Software | Supported version | Note | |---------------|----------------------------------|-------------------------------------------| | CANN | >= 8.2.RC1 | Required for vllm-ascend and torch-npu | - | torch-npu | >= 2.5.1.post1.dev20250619 | Required for vllm-ascend, No need to install manually, it will be auto installed in below steps | - | torch | >= 2.5.1 | Required for torch-npu and vllm | + | torch-npu | >= 2.7.1.dev20250724 | Required for vllm-ascend, No need to install manually, it will be auto installed in below steps | + | torch | >= 2.7.1 | Required for torch-npu and vllm | You have 2 way to install: - **Using pip**: first prepare env manually or via CANN image, then install `vllm-ascend` using pip. diff --git a/pyproject.toml b/pyproject.toml index 390d8c4..e394895 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,9 +12,9 @@ requires = [ "scipy", "setuptools>=64", "setuptools-scm>=8", - "torch-npu==2.5.1.post1.dev20250619", - "torch>=2.5.1", - "torchvision<0.21.0", + "torch-npu==2.7.1.dev20250724", + "torch>=2.7.1", + "torchvision", "wheel", "msgpack", "quart", diff --git a/requirements.txt b/requirements.txt index c2b2a31..6384149 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,8 +10,8 @@ pyyaml scipy setuptools>=64 setuptools-scm>=8 -torch>=2.5.1 -torchvision<0.21.0 +torch>=2.7.1 +torchvision wheel # Remove after https://github.com/vllm-project/vllm-ascend/issues/2034 transformers<4.54.0 @@ -26,4 +26,4 @@ numba # Install torch_npu --pre --extra-index-url https://mirrors.huaweicloud.com/ascend/repos/pypi -torch-npu==2.5.1.post1.dev20250619 +torch-npu==2.7.1.dev20250724 diff --git a/tests/e2e/singlecard/ops/test_fused_moe.py b/tests/e2e/singlecard/ops/test_fused_moe.py index f41504a..d04f3a6 100644 --- a/tests/e2e/singlecard/ops/test_fused_moe.py +++ b/tests/e2e/singlecard/ops/test_fused_moe.py @@ -19,9 +19,6 @@ Run `pytest tests/ops/test_fused_moe.py`. """ -# fused moe ops test will hit the infer_schema error, we need add the patch -# here to make the test pass. -import vllm_ascend.patch.worker.patch_common.patch_utils # type: ignore[import] # isort: skip # noqa from unittest.mock import MagicMock, patch diff --git a/tests/ut/patch/worker/patch_common/test_patch_utils.py b/tests/ut/patch/worker/patch_common/test_patch_utils.py deleted file mode 100644 index d64e833..0000000 --- a/tests/ut/patch/worker/patch_common/test_patch_utils.py +++ /dev/null @@ -1,104 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# This file is a part of the vllm-ascend project. -# - -from typing import List, Optional -from unittest.mock import MagicMock, patch - -import torch -from torch.library import Library - -from tests.ut.base import TestBase -from vllm_ascend.patch.worker.patch_common.patch_utils import \ - ascend_direct_register_custom_op - - -class TestPatchUtils(TestBase): - - def setUp(self): - super().setUp() - - self.mock_op_func = MagicMock() - self.mock_op_func.__annotations__ = { - 'param1': list[int], - 'param2': Optional[list[int]], - 'param3': str - } - - self.mock_fake_impl = MagicMock() - self.mock_lib = MagicMock(spec=Library) - - self.op_name = "test_op" - self.mutates_args = ["arg1"] - self.dispatch_key = "NPU" - self.tags = (torch.Tag.pt2_compliant_tag, ) - - self.patch_infer_schema = patch( - 'vllm_ascend.patch.worker.patch_common.patch_utils.torch.library.infer_schema' - ) - self.patch_vllm_lib = patch( - 'vllm_ascend.patch.worker.patch_common.patch_utils.vllm_lib') - - self.mock_infer_schema = self.patch_infer_schema.start() - self.mock_vllm_lib = self.patch_vllm_lib.start() - - self.addCleanup(self.patch_infer_schema.stop) - self.addCleanup(self.patch_vllm_lib.stop) - - def test_utils_patched(self): - from vllm import utils - - self.assertIs(utils.direct_register_custom_op, - ascend_direct_register_custom_op) - - def test_register_with_default_lib(self): - self.mock_infer_schema.return_value = "(Tensor self) -> Tensor" - - ascend_direct_register_custom_op(op_name=self.op_name, - op_func=self.mock_op_func, - mutates_args=self.mutates_args, - fake_impl=self.mock_fake_impl, - dispatch_key=self.dispatch_key, - tags=self.tags) - - self.assertEqual(self.mock_op_func.__annotations__['param1'], - List[int]) - self.assertEqual(self.mock_op_func.__annotations__['param2'], - Optional[List[int]]) - self.assertEqual(self.mock_op_func.__annotations__['param3'], str) - - self.mock_infer_schema.assert_called_once_with( - self.mock_op_func, mutates_args=self.mutates_args) - - self.mock_vllm_lib.define.assert_called_once_with( - f"{self.op_name}(Tensor self) -> Tensor", tags=self.tags) - self.mock_vllm_lib.impl.assert_called_once_with( - self.op_name, self.mock_op_func, dispatch_key=self.dispatch_key) - self.mock_vllm_lib._register_fake.assert_called_once_with( - self.op_name, self.mock_fake_impl) - - def test_register_with_custom_lib(self): - self.mock_infer_schema.return_value = "(Tensor a, Tensor b) -> Tensor" - - ascend_direct_register_custom_op(op_name=self.op_name, - op_func=self.mock_op_func, - mutates_args=self.mutates_args, - target_lib=self.mock_lib) - - self.mock_lib.define.assert_called_once_with( - f"{self.op_name}(Tensor a, Tensor b) -> Tensor", tags=()) - self.mock_lib.impl.assert_called_once_with(self.op_name, - self.mock_op_func, - dispatch_key="CUDA") - self.mock_lib._register_fake.assert_not_called() diff --git a/tests/ut/test_platform.py b/tests/ut/test_platform.py index 89441f8..c22db8b 100644 --- a/tests/ut/test_platform.py +++ b/tests/ut/test_platform.py @@ -543,13 +543,9 @@ class TestNPUPlatform(TestBase): @patch("torch.distributed.is_hccl_available", return_value=True) @patch("torch_npu._C._distributed_c10d.ProcessGroupHCCL") - @patch("torch_npu._C._distributed_c10d.ProcessGroupHCCL.Options") @patch("torch.distributed.ProcessGroup") - def test_successful_initialization(self, mock_pg, mock_options_cls, - mock_pg_hccl, _): + def test_successful_initialization(self, mock_pg, mock_pg_hccl, _): mock_prefix = MagicMock(spec=PrefixStore) - mock_options = MagicMock(spec=ProcessGroup.Options) - mock_options_cls.return_value = mock_options mock_backend = MagicMock() mock_pg_hccl.return_value = mock_backend group_rank = 0 @@ -574,8 +570,7 @@ class TestNPUPlatform(TestBase): timeout=timedelta(seconds=30), ) - mock_pg.assert_called_once_with(mock_prefix, group_rank, group_size, - unittest.mock.ANY) + mock_pg.assert_called_once_with(mock_prefix, group_rank, group_size) mock_pg_hccl.assert_called_once_with(mock_prefix, group_rank, group_size, unittest.mock.ANY) mock_backend._set_sequence_number_for_group.assert_called_once() diff --git a/vllm_ascend/__init__.py b/vllm_ascend/__init__.py index c8f3331..7588e70 100644 --- a/vllm_ascend/__init__.py +++ b/vllm_ascend/__init__.py @@ -23,9 +23,5 @@ def register(): def register_model(): - # fix pytorch schema check error, remove this line after pytorch - # is upgraded to 2.7.0 - import vllm_ascend.patch.worker.patch_common.patch_utils # noqa: F401 - from .models import register_model register_model() diff --git a/vllm_ascend/patch/__init__.py b/vllm_ascend/patch/__init__.py index f22d948..754a344 100644 --- a/vllm_ascend/patch/__init__.py +++ b/vllm_ascend/patch/__init__.py @@ -75,20 +75,6 @@ # Future Plan: # Remove this patch when vllm merged them. # -# ** File: worker/patch_common/patch_utils.py ** -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# 1. `vllm.utils.direct_register_custom_op` -# Why: -# pytorch 2.7.o is not compatible with pytorch 2.5.1. While vllm is based on pytorch 2.7.0, but vllm ascend -# is based on pytorch 2.5.1, so we need to use this patch to make vllm compatible with pytorch 2.5.1. -# How: -# patch __annotations__ check to make it compatible with pytorch 2.5.1. -# Related PR (if no, explain why): -# This is the problem in vllm-ascend -# Future Plan: -# Remove this patch once pytorch 2.7.0 is supported for vllm ascend. -# -# ** File: worker/patch_0_10_0/patch_sampler_gather_logprobs.py ** # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # 1. `vllm.v1.sample.sampler.Sampler.gather_logprobs` # Why: diff --git a/vllm_ascend/patch/worker/patch_common/__init__.py b/vllm_ascend/patch/worker/patch_common/__init__.py index 2533d13..78b6fcd 100644 --- a/vllm_ascend/patch/worker/patch_common/__init__.py +++ b/vllm_ascend/patch/worker/patch_common/__init__.py @@ -15,9 +15,6 @@ # limitations under the License. # -# patch_utils should be the first import, because it will be used by other -# patch files. -import vllm_ascend.patch.worker.patch_common.patch_utils # noqa isort:skip import vllm_ascend.patch.worker.patch_common.patch_distributed # noqa import vllm_ascend.patch.worker.patch_common.patch_linear # noqa import vllm_ascend.patch.worker.patch_common.patch_minicpm # noqa diff --git a/vllm_ascend/patch/worker/patch_common/patch_utils.py b/vllm_ascend/patch/worker/patch_common/patch_utils.py deleted file mode 100644 index dec618c..0000000 --- a/vllm_ascend/patch/worker/patch_common/patch_utils.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import Callable, List, Optional, Tuple - -import torch -from torch.library import Library -from vllm import utils -from vllm.utils import vllm_lib - - -def ascend_direct_register_custom_op( - op_name: str, - op_func: Callable, - mutates_args: list[str], - fake_impl: Optional[Callable] = None, - target_lib: Optional[Library] = None, - dispatch_key: str = "CUDA", - tags: Tuple[torch.Tag, ...] = (), -): - # In pytorch 2.5.1, torch.library.infer_schema require the input function to - # have annotations supported by typing library. But in pytorch 2.7.0 which - # vllm using, torch.library.infer_schema require the python builtin type. In - # this case, we should revert built type to typing type for 2.5.1 backward - # compatibility. - for k, v in op_func.__annotations__.items(): - if v == list[int]: - op_func.__annotations__[k] = List[int] - if v == Optional[list[int]]: - op_func.__annotations__[k] = Optional[List[int]] - # TODO: add more type convert here if needed. - import torch.library - schema_str = torch.library.infer_schema(op_func, mutates_args=mutates_args) - my_lib = target_lib or vllm_lib - my_lib.define(op_name + schema_str, tags=tags) - my_lib.impl(op_name, op_func, dispatch_key=dispatch_key) - if fake_impl is not None: - my_lib._register_fake(op_name, fake_impl) - - -utils.direct_register_custom_op = ascend_direct_register_custom_op diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 2d3b819..fa369ac 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -259,16 +259,10 @@ class NPUPlatform(Platform): assert is_hccl_available() - # TODO(Yizhou): The reason we need to set options while vllm does not - # seems to be related to the version of PyTorch. In the latest version, - # there is no need to set options. While in the older version, 2.5.1 - # specifically, we need to set options. - options = ProcessGroup.Options(backend=backend) pg: ProcessGroup = ProcessGroup( prefix_store, group_rank, group_size, - options, ) backend_options = ProcessGroupHCCL.Options() diff --git a/vllm_ascend/quantization/quant_config.py b/vllm_ascend/quantization/quant_config.py index 2577b35..0b8935b 100644 --- a/vllm_ascend/quantization/quant_config.py +++ b/vllm_ascend/quantization/quant_config.py @@ -15,10 +15,6 @@ # limitations under the License. # This file is a part of the vllm-ascend project. # -# By using quantization case, this file is called before worker patch achieve, -# we need to import patch_utils here first to make sure the patch is applied. -import vllm_ascend.patch.worker.patch_common.patch_utils # type: ignore[import] # isort: skip # noqa - from types import MappingProxyType from typing import Any, Callable, Dict, List, Mapping, Optional