Bump torch version to 2.7.1 (#1562)

### What this PR does / why we need it?
Bump torch version to 2.7.1, and cleanup infer schema patch
https://github.com/vllm-project/vllm-ascend/commit/857f489
(https://github.com/vllm-project/vllm-ascend/pull/837), this patch
depends on also: https://github.com/vllm-project/vllm-ascend/pull/1974

### Does this PR introduce any user-facing change?
No

#### How was this patch tested?
CI passed

torch-npu 2.7.1rc1 install guide:
https://gitee.com/ascend/pytorch/tree/v2.7.1/
install depending:
```
pip3 install pyyaml
pip3 install setuptools
```
install torch-npu:

Closes: https://github.com/vllm-project/vllm-ascend/issues/1866
Closes: https://github.com/vllm-project/vllm-ascend/issues/1390


- vLLM version: v0.10.0
- vLLM main:
9af654cc38

---------

Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: Yikun Jiang <yikunkero@gmail.com>
This commit is contained in:
leo-pony
2025-08-05 08:43:24 +08:00
committed by GitHub
parent 36e450eb0f
commit 807f0895b2
15 changed files with 14 additions and 194 deletions

View File

@@ -15,9 +15,6 @@
# limitations under the License.
#
# patch_utils should be the first import, because it will be used by other
# patch files.
import vllm_ascend.patch.worker.patch_common.patch_utils # noqa isort:skip
import vllm_ascend.patch.worker.patch_common.patch_distributed # noqa
import vllm_ascend.patch.worker.patch_common.patch_linear # noqa
import vllm_ascend.patch.worker.patch_common.patch_minicpm # noqa

View File

@@ -1,38 +0,0 @@
from typing import Callable, List, Optional, Tuple
import torch
from torch.library import Library
from vllm import utils
from vllm.utils import vllm_lib
def ascend_direct_register_custom_op(
op_name: str,
op_func: Callable,
mutates_args: list[str],
fake_impl: Optional[Callable] = None,
target_lib: Optional[Library] = None,
dispatch_key: str = "CUDA",
tags: Tuple[torch.Tag, ...] = (),
):
# In pytorch 2.5.1, torch.library.infer_schema require the input function to
# have annotations supported by typing library. But in pytorch 2.7.0 which
# vllm using, torch.library.infer_schema require the python builtin type. In
# this case, we should revert built type to typing type for 2.5.1 backward
# compatibility.
for k, v in op_func.__annotations__.items():
if v == list[int]:
op_func.__annotations__[k] = List[int]
if v == Optional[list[int]]:
op_func.__annotations__[k] = Optional[List[int]]
# TODO: add more type convert here if needed.
import torch.library
schema_str = torch.library.infer_schema(op_func, mutates_args=mutates_args)
my_lib = target_lib or vllm_lib
my_lib.define(op_name + schema_str, tags=tags)
my_lib.impl(op_name, op_func, dispatch_key=dispatch_key)
if fake_impl is not None:
my_lib._register_fake(op_name, fake_impl)
utils.direct_register_custom_op = ascend_direct_register_custom_op