[Main2Main] Upgrade vllm commit to 0113 (#5839)

### What this PR does / why we need it?
Upgrade vllm commit to 0113 (11b6af5280d6d6dfb8953af16e67b25f819b3be9)

- Modify import paths due to the refactors
https://github.com/vllm-project/vllm/pull/31916
https://github.com/vllm-project/vllm/pull/32054

- Fix `TypeError: NPUOffloadingSpec.__init__() takes 2 positional
arguments but 3 were given` due to
https://github.com/vllm-project/vllm/pull/24498

- Skip the async-scheduling tests in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are never
verified
https://github.com/vllm-project/vllm/pull/31998

- Skip some pooling tests, which are caused by
https://github.com/vllm-project/vllm/pull/32148
where vllm is also failed
https://buildkite.com/vllm/ci/builds/46705/steps/canvas?jid=019bb329-3834-4685-862b-1613b8e0f5d4

We will reopen those tests when main2main reachs
https://github.com/vllm-project/vllm/pull/32243

- Skip some cases in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are
broken by
https://github.com/vllm-project/vllm/pull/32118

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
wjunLu
2026-01-15 09:48:53 +08:00
committed by GitHub
parent e67608041d
commit c11a05c4e1
29 changed files with 229 additions and 54 deletions

View File

@@ -3,7 +3,6 @@ from typing import Any, Optional
import torch
import zmq
from vllm.attention.backends.abstract import AttentionMetadata
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
KVConnectorBase_V1, KVConnectorMetadata, KVConnectorRole)
@@ -20,6 +19,14 @@ from vllm_ascend.distributed.kv_transfer.kv_pool.ascend_store.pool_scheduler imp
KVPoolScheduler, get_zmq_rpc_path_lookup)
from vllm_ascend.distributed.kv_transfer.kv_pool.ascend_store.pool_worker import \
KVPoolWorker
from vllm_ascend.utils import vllm_version_is
# isort: off
if vllm_version_is('0.13.0'):
from vllm.attention.backends.abstract import AttentionMetadata # type: ignore
else:
from vllm.v1.attention.backend import AttentionMetadata # type: ignore
# isort: on
class AscendStoreConnector(KVConnectorBase_V1):

View File

@@ -9,7 +9,6 @@ from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Optional, Sequence
import torch
from vllm.attention.backends.abstract import AttentionType
from vllm.attention.layer import Attention, MLAAttention
from vllm.config import VllmConfig, get_layers_from_vllm_config
from vllm.distributed.ec_transfer import get_ec_transfer, has_ec_transfer
@@ -26,13 +25,25 @@ from vllm.v1.kv_cache_interface import (FullAttentionSpec, KVCacheSpec,
from vllm_ascend.distributed.kv_transfer.kv_pool.ascend_store.metadata import (
MetadataServer, MetadataServerProc, MLAConfig)
from vllm_ascend.utils import vllm_version_is
# isort: off
if vllm_version_is('0.13.0'):
from vllm.attention.backends.abstract import AttentionType # type: ignore
else:
from vllm.v1.attention.backend import AttentionType # type: ignore
if TYPE_CHECKING:
from vllm.attention.backends.abstract import AttentionMetadata
if vllm_version_is('0.13.0'):
from vllm.attention.backends.abstract import \
AttentionMetadata # type: ignore
else:
from vllm.v1.attention.backend import AttentionType #type: ignore
from vllm.forward_context import ForwardContext
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.request import Request
# isort: on
@dataclass