Bump torch version to 2.7.1 (#1562)
### What this PR does / why we need it?
Bump torch version to 2.7.1, and cleanup infer schema patch
https://github.com/vllm-project/vllm-ascend/commit/857f489
(https://github.com/vllm-project/vllm-ascend/pull/837), this patch
depends on also: https://github.com/vllm-project/vllm-ascend/pull/1974
### Does this PR introduce any user-facing change?
No
#### How was this patch tested?
CI passed
torch-npu 2.7.1rc1 install guide:
https://gitee.com/ascend/pytorch/tree/v2.7.1/
install depending:
```
pip3 install pyyaml
pip3 install setuptools
```
install torch-npu:
Closes: https://github.com/vllm-project/vllm-ascend/issues/1866
Closes: https://github.com/vllm-project/vllm-ascend/issues/1390
- vLLM version: v0.10.0
- vLLM main:
9af654cc38
---------
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: Yikun Jiang <yikunkero@gmail.com>
This commit is contained in:
3
.github/workflows/release_whl.yml
vendored
3
.github/workflows/release_whl.yml
vendored
@@ -90,7 +90,8 @@ jobs:
|
||||
--exclude libc10.so \
|
||||
--exclude libc_sec.so \
|
||||
--exclude "libascend*.so" \
|
||||
--exclude "libtorch*.so"
|
||||
--exclude "libtorch*.so" \
|
||||
--exclude "liberror_manager.so"
|
||||
done
|
||||
rm -f dist/*.whl
|
||||
mv dist/repaired/*.whl dist/
|
||||
|
||||
@@ -42,7 +42,7 @@ By using vLLM Ascend plugin, popular open-source models, including Transformer-l
|
||||
- Software:
|
||||
* Python >= 3.9, < 3.12
|
||||
* CANN >= 8.2.rc1
|
||||
* PyTorch >= 2.5.1, torch-npu >= 2.5.1.post1.dev20250619
|
||||
* PyTorch >= 2.7.1, torch-npu >= 2.7.1.dev20250724
|
||||
* vLLM (the same version as vllm-ascend)
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -42,7 +42,7 @@ vLLM 昇腾插件 (`vllm-ascend`) 是一个由社区维护的让vLLM在Ascend NP
|
||||
- 软件:
|
||||
* Python >= 3.9, < 3.12
|
||||
* CANN >= 8.2.rc1
|
||||
* PyTorch >= 2.5.1, torch-npu >= 2.5.1.post1.dev20250619
|
||||
* PyTorch >= 2.5.1, torch-npu >= 2.7.1.dev20250724
|
||||
* vLLM (与vllm-ascend版本一致)
|
||||
|
||||
## 开始使用
|
||||
|
||||
@@ -12,8 +12,8 @@ This document describes how to install vllm-ascend manually.
|
||||
| Software | Supported version | Note |
|
||||
|---------------|----------------------------------|-------------------------------------------|
|
||||
| CANN | >= 8.2.RC1 | Required for vllm-ascend and torch-npu |
|
||||
| torch-npu | >= 2.5.1.post1.dev20250619 | Required for vllm-ascend, No need to install manually, it will be auto installed in below steps |
|
||||
| torch | >= 2.5.1 | Required for torch-npu and vllm |
|
||||
| torch-npu | >= 2.7.1.dev20250724 | Required for vllm-ascend, No need to install manually, it will be auto installed in below steps |
|
||||
| torch | >= 2.7.1 | Required for torch-npu and vllm |
|
||||
|
||||
You have 2 way to install:
|
||||
- **Using pip**: first prepare env manually or via CANN image, then install `vllm-ascend` using pip.
|
||||
|
||||
@@ -12,9 +12,9 @@ requires = [
|
||||
"scipy",
|
||||
"setuptools>=64",
|
||||
"setuptools-scm>=8",
|
||||
"torch-npu==2.5.1.post1.dev20250619",
|
||||
"torch>=2.5.1",
|
||||
"torchvision<0.21.0",
|
||||
"torch-npu==2.7.1.dev20250724",
|
||||
"torch>=2.7.1",
|
||||
"torchvision",
|
||||
"wheel",
|
||||
"msgpack",
|
||||
"quart",
|
||||
|
||||
@@ -10,8 +10,8 @@ pyyaml
|
||||
scipy
|
||||
setuptools>=64
|
||||
setuptools-scm>=8
|
||||
torch>=2.5.1
|
||||
torchvision<0.21.0
|
||||
torch>=2.7.1
|
||||
torchvision
|
||||
wheel
|
||||
# Remove after https://github.com/vllm-project/vllm-ascend/issues/2034
|
||||
transformers<4.54.0
|
||||
@@ -26,4 +26,4 @@ numba
|
||||
# Install torch_npu
|
||||
--pre
|
||||
--extra-index-url https://mirrors.huaweicloud.com/ascend/repos/pypi
|
||||
torch-npu==2.5.1.post1.dev20250619
|
||||
torch-npu==2.7.1.dev20250724
|
||||
|
||||
@@ -19,9 +19,6 @@
|
||||
|
||||
Run `pytest tests/ops/test_fused_moe.py`.
|
||||
"""
|
||||
# fused moe ops test will hit the infer_schema error, we need add the patch
|
||||
# here to make the test pass.
|
||||
import vllm_ascend.patch.worker.patch_common.patch_utils # type: ignore[import] # isort: skip # noqa
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
from typing import List, Optional
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import torch
|
||||
from torch.library import Library
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.patch.worker.patch_common.patch_utils import \
|
||||
ascend_direct_register_custom_op
|
||||
|
||||
|
||||
class TestPatchUtils(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.mock_op_func = MagicMock()
|
||||
self.mock_op_func.__annotations__ = {
|
||||
'param1': list[int],
|
||||
'param2': Optional[list[int]],
|
||||
'param3': str
|
||||
}
|
||||
|
||||
self.mock_fake_impl = MagicMock()
|
||||
self.mock_lib = MagicMock(spec=Library)
|
||||
|
||||
self.op_name = "test_op"
|
||||
self.mutates_args = ["arg1"]
|
||||
self.dispatch_key = "NPU"
|
||||
self.tags = (torch.Tag.pt2_compliant_tag, )
|
||||
|
||||
self.patch_infer_schema = patch(
|
||||
'vllm_ascend.patch.worker.patch_common.patch_utils.torch.library.infer_schema'
|
||||
)
|
||||
self.patch_vllm_lib = patch(
|
||||
'vllm_ascend.patch.worker.patch_common.patch_utils.vllm_lib')
|
||||
|
||||
self.mock_infer_schema = self.patch_infer_schema.start()
|
||||
self.mock_vllm_lib = self.patch_vllm_lib.start()
|
||||
|
||||
self.addCleanup(self.patch_infer_schema.stop)
|
||||
self.addCleanup(self.patch_vllm_lib.stop)
|
||||
|
||||
def test_utils_patched(self):
|
||||
from vllm import utils
|
||||
|
||||
self.assertIs(utils.direct_register_custom_op,
|
||||
ascend_direct_register_custom_op)
|
||||
|
||||
def test_register_with_default_lib(self):
|
||||
self.mock_infer_schema.return_value = "(Tensor self) -> Tensor"
|
||||
|
||||
ascend_direct_register_custom_op(op_name=self.op_name,
|
||||
op_func=self.mock_op_func,
|
||||
mutates_args=self.mutates_args,
|
||||
fake_impl=self.mock_fake_impl,
|
||||
dispatch_key=self.dispatch_key,
|
||||
tags=self.tags)
|
||||
|
||||
self.assertEqual(self.mock_op_func.__annotations__['param1'],
|
||||
List[int])
|
||||
self.assertEqual(self.mock_op_func.__annotations__['param2'],
|
||||
Optional[List[int]])
|
||||
self.assertEqual(self.mock_op_func.__annotations__['param3'], str)
|
||||
|
||||
self.mock_infer_schema.assert_called_once_with(
|
||||
self.mock_op_func, mutates_args=self.mutates_args)
|
||||
|
||||
self.mock_vllm_lib.define.assert_called_once_with(
|
||||
f"{self.op_name}(Tensor self) -> Tensor", tags=self.tags)
|
||||
self.mock_vllm_lib.impl.assert_called_once_with(
|
||||
self.op_name, self.mock_op_func, dispatch_key=self.dispatch_key)
|
||||
self.mock_vllm_lib._register_fake.assert_called_once_with(
|
||||
self.op_name, self.mock_fake_impl)
|
||||
|
||||
def test_register_with_custom_lib(self):
|
||||
self.mock_infer_schema.return_value = "(Tensor a, Tensor b) -> Tensor"
|
||||
|
||||
ascend_direct_register_custom_op(op_name=self.op_name,
|
||||
op_func=self.mock_op_func,
|
||||
mutates_args=self.mutates_args,
|
||||
target_lib=self.mock_lib)
|
||||
|
||||
self.mock_lib.define.assert_called_once_with(
|
||||
f"{self.op_name}(Tensor a, Tensor b) -> Tensor", tags=())
|
||||
self.mock_lib.impl.assert_called_once_with(self.op_name,
|
||||
self.mock_op_func,
|
||||
dispatch_key="CUDA")
|
||||
self.mock_lib._register_fake.assert_not_called()
|
||||
@@ -543,13 +543,9 @@ class TestNPUPlatform(TestBase):
|
||||
|
||||
@patch("torch.distributed.is_hccl_available", return_value=True)
|
||||
@patch("torch_npu._C._distributed_c10d.ProcessGroupHCCL")
|
||||
@patch("torch_npu._C._distributed_c10d.ProcessGroupHCCL.Options")
|
||||
@patch("torch.distributed.ProcessGroup")
|
||||
def test_successful_initialization(self, mock_pg, mock_options_cls,
|
||||
mock_pg_hccl, _):
|
||||
def test_successful_initialization(self, mock_pg, mock_pg_hccl, _):
|
||||
mock_prefix = MagicMock(spec=PrefixStore)
|
||||
mock_options = MagicMock(spec=ProcessGroup.Options)
|
||||
mock_options_cls.return_value = mock_options
|
||||
mock_backend = MagicMock()
|
||||
mock_pg_hccl.return_value = mock_backend
|
||||
group_rank = 0
|
||||
@@ -574,8 +570,7 @@ class TestNPUPlatform(TestBase):
|
||||
timeout=timedelta(seconds=30),
|
||||
)
|
||||
|
||||
mock_pg.assert_called_once_with(mock_prefix, group_rank, group_size,
|
||||
unittest.mock.ANY)
|
||||
mock_pg.assert_called_once_with(mock_prefix, group_rank, group_size)
|
||||
mock_pg_hccl.assert_called_once_with(mock_prefix, group_rank,
|
||||
group_size, unittest.mock.ANY)
|
||||
mock_backend._set_sequence_number_for_group.assert_called_once()
|
||||
|
||||
@@ -23,9 +23,5 @@ def register():
|
||||
|
||||
|
||||
def register_model():
|
||||
# fix pytorch schema check error, remove this line after pytorch
|
||||
# is upgraded to 2.7.0
|
||||
import vllm_ascend.patch.worker.patch_common.patch_utils # noqa: F401
|
||||
|
||||
from .models import register_model
|
||||
register_model()
|
||||
|
||||
@@ -75,20 +75,6 @@
|
||||
# Future Plan:
|
||||
# Remove this patch when vllm merged them.
|
||||
#
|
||||
# ** File: worker/patch_common/patch_utils.py **
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# 1. `vllm.utils.direct_register_custom_op`
|
||||
# Why:
|
||||
# pytorch 2.7.o is not compatible with pytorch 2.5.1. While vllm is based on pytorch 2.7.0, but vllm ascend
|
||||
# is based on pytorch 2.5.1, so we need to use this patch to make vllm compatible with pytorch 2.5.1.
|
||||
# How:
|
||||
# patch __annotations__ check to make it compatible with pytorch 2.5.1.
|
||||
# Related PR (if no, explain why):
|
||||
# This is the problem in vllm-ascend
|
||||
# Future Plan:
|
||||
# Remove this patch once pytorch 2.7.0 is supported for vllm ascend.
|
||||
#
|
||||
# ** File: worker/patch_0_10_0/patch_sampler_gather_logprobs.py **
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# 1. `vllm.v1.sample.sampler.Sampler.gather_logprobs`
|
||||
# Why:
|
||||
|
||||
@@ -15,9 +15,6 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# patch_utils should be the first import, because it will be used by other
|
||||
# patch files.
|
||||
import vllm_ascend.patch.worker.patch_common.patch_utils # noqa isort:skip
|
||||
import vllm_ascend.patch.worker.patch_common.patch_distributed # noqa
|
||||
import vllm_ascend.patch.worker.patch_common.patch_linear # noqa
|
||||
import vllm_ascend.patch.worker.patch_common.patch_minicpm # noqa
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
from typing import Callable, List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch.library import Library
|
||||
from vllm import utils
|
||||
from vllm.utils import vllm_lib
|
||||
|
||||
|
||||
def ascend_direct_register_custom_op(
|
||||
op_name: str,
|
||||
op_func: Callable,
|
||||
mutates_args: list[str],
|
||||
fake_impl: Optional[Callable] = None,
|
||||
target_lib: Optional[Library] = None,
|
||||
dispatch_key: str = "CUDA",
|
||||
tags: Tuple[torch.Tag, ...] = (),
|
||||
):
|
||||
# In pytorch 2.5.1, torch.library.infer_schema require the input function to
|
||||
# have annotations supported by typing library. But in pytorch 2.7.0 which
|
||||
# vllm using, torch.library.infer_schema require the python builtin type. In
|
||||
# this case, we should revert built type to typing type for 2.5.1 backward
|
||||
# compatibility.
|
||||
for k, v in op_func.__annotations__.items():
|
||||
if v == list[int]:
|
||||
op_func.__annotations__[k] = List[int]
|
||||
if v == Optional[list[int]]:
|
||||
op_func.__annotations__[k] = Optional[List[int]]
|
||||
# TODO: add more type convert here if needed.
|
||||
import torch.library
|
||||
schema_str = torch.library.infer_schema(op_func, mutates_args=mutates_args)
|
||||
my_lib = target_lib or vllm_lib
|
||||
my_lib.define(op_name + schema_str, tags=tags)
|
||||
my_lib.impl(op_name, op_func, dispatch_key=dispatch_key)
|
||||
if fake_impl is not None:
|
||||
my_lib._register_fake(op_name, fake_impl)
|
||||
|
||||
|
||||
utils.direct_register_custom_op = ascend_direct_register_custom_op
|
||||
@@ -259,16 +259,10 @@ class NPUPlatform(Platform):
|
||||
|
||||
assert is_hccl_available()
|
||||
|
||||
# TODO(Yizhou): The reason we need to set options while vllm does not
|
||||
# seems to be related to the version of PyTorch. In the latest version,
|
||||
# there is no need to set options. While in the older version, 2.5.1
|
||||
# specifically, we need to set options.
|
||||
options = ProcessGroup.Options(backend=backend)
|
||||
pg: ProcessGroup = ProcessGroup(
|
||||
prefix_store,
|
||||
group_rank,
|
||||
group_size,
|
||||
options,
|
||||
)
|
||||
|
||||
backend_options = ProcessGroupHCCL.Options()
|
||||
|
||||
@@ -15,10 +15,6 @@
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
# By using quantization case, this file is called before worker patch achieve,
|
||||
# we need to import patch_utils here first to make sure the patch is applied.
|
||||
import vllm_ascend.patch.worker.patch_common.patch_utils # type: ignore[import] # isort: skip # noqa
|
||||
|
||||
from types import MappingProxyType
|
||||
from typing import Any, Callable, Dict, List, Mapping, Optional
|
||||
|
||||
|
||||
Reference in New Issue
Block a user