[Main2Main] Upgrade vllm commit to 0113 (#5839)

### What this PR does / why we need it?
Upgrade vllm commit to 0113 (11b6af5280d6d6dfb8953af16e67b25f819b3be9)

- Modify import paths due to the refactors
https://github.com/vllm-project/vllm/pull/31916
https://github.com/vllm-project/vllm/pull/32054

- Fix `TypeError: NPUOffloadingSpec.__init__() takes 2 positional
arguments but 3 were given` due to
https://github.com/vllm-project/vllm/pull/24498

- Skip the async-scheduling tests in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are never
verified
https://github.com/vllm-project/vllm/pull/31998

- Skip some pooling tests, which are caused by
https://github.com/vllm-project/vllm/pull/32148
where vllm is also failed
https://buildkite.com/vllm/ci/builds/46705/steps/canvas?jid=019bb329-3834-4685-862b-1613b8e0f5d4

We will reopen those tests when main2main reachs
https://github.com/vllm-project/vllm/pull/32243

- Skip some cases in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are
broken by
https://github.com/vllm-project/vllm/pull/32118

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
wjunLu
2026-01-15 09:48:53 +08:00
committed by GitHub
parent e67608041d
commit c11a05c4e1
29 changed files with 229 additions and 54 deletions

View File

@@ -22,15 +22,9 @@ from typing import ClassVar, List, Optional, Tuple, Type
import torch
import torch_npu
import vllm.envs as envs_vllm
from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
AttentionLayer, AttentionType)
from vllm.attention.backends.registry import (AttentionBackendEnum,
register_backend)
from vllm.config import VllmConfig, get_current_vllm_config
from vllm.forward_context import ForwardContext, get_forward_context
from vllm.utils.math_utils import cdiv
from vllm.v1.attention.backends.utils import (AttentionCGSupport,
AttentionMetadataBuilder)
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.kv_cache_interface import AttentionSpec, CrossAttentionSpec
@@ -45,7 +39,23 @@ from vllm_ascend.compilation.acl_graph import (
update_draft_graph_params_workspaces, update_graph_params_workspaces)
from vllm_ascend.device.device_op import DeviceOperator
from vllm_ascend.ops.flashcomm2_oshard_manager import flashcomm2_oshard_manager
from vllm_ascend.utils import weak_ref_tensors
from vllm_ascend.utils import vllm_version_is, weak_ref_tensors
# isort: off
if vllm_version_is('0.13.0'):
from vllm.v1.attention.backends.utils import (AttentionCGSupport,
AttentionMetadataBuilder)
from vllm.attention.backends.abstract import ( # type: ignore
AttentionBackend, AttentionImpl, AttentionLayer, AttentionType)
from vllm.attention.backends.registry import ( # type: ignore
AttentionBackendEnum, register_backend)
else:
from vllm.v1.attention.backend import ( # type: ignore
AttentionBackend, AttentionCGSupport, AttentionImpl, AttentionLayer,
AttentionType, AttentionMetadataBuilder)
from vllm.v1.attention.backends.registry import ( # type: ignore
AttentionBackendEnum, register_backend)
# isort: on
# default max value of sliding window size
SWA_INT_MAX = 2147483647

View File

@@ -27,7 +27,6 @@ from vllm.distributed import (get_dcp_group,
get_decode_context_model_parallel_world_size,
get_pcp_group)
from vllm.forward_context import ForwardContext, get_forward_context
from vllm.v1.attention.backends.utils import AttentionCGSupport
from vllm.v1.kv_cache_interface import AttentionSpec
from vllm_ascend.attention.attention_v1 import (AscendAttentionBackendImpl,
@@ -41,7 +40,13 @@ from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
split_decodes_and_prefills)
from vllm_ascend.compilation.acl_graph import (get_graph_params,
update_graph_params_workspaces)
from vllm_ascend.utils import cp_chunkedprefill_comm_stream, weak_ref_tensors
from vllm_ascend.utils import (cp_chunkedprefill_comm_stream, vllm_version_is,
weak_ref_tensors)
if vllm_version_is('0.13.0'):
from vllm.v1.attention.backends.utils import AttentionCGSupport
else:
from vllm.v1.attention.backend import AttentionCGSupport
class AscendAttentionCPMetadataBuilder(AscendAttentionMetadataBuilder):

View File

@@ -10,7 +10,6 @@ from vllm.distributed import (get_dcp_group,
get_pcp_group)
from vllm.forward_context import ForwardContext, get_forward_context
from vllm.utils.math_utils import cdiv
from vllm.v1.attention.backends.utils import AttentionCGSupport
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
# isort: off
@@ -28,7 +27,12 @@ from vllm_ascend.attention.context_parallel.common_cp import (
from vllm_ascend.compilation.acl_graph import (get_draft_graph_params,
get_graph_params,
update_graph_params_workspaces)
from vllm_ascend.utils import weak_ref_tensors
from vllm_ascend.utils import weak_ref_tensors, vllm_version_is
if vllm_version_is('0.13.0'):
from vllm.v1.attention.backends.utils import AttentionCGSupport
else:
from vllm.v1.attention.backend import AttentionCGSupport
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024

View File

@@ -5,14 +5,12 @@ import numpy as np
import torch
import torch_npu
import vllm.envs as envs_vllm
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
from vllm.config import VllmConfig, get_current_vllm_config
from vllm.forward_context import ForwardContext, get_forward_context
from vllm.logger import logger
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
from vllm.utils.math_utils import cdiv, round_down
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder
from vllm.v1.attention.backends.utils import AttentionCGSupport
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
from vllm_ascend import envs
@@ -44,10 +42,17 @@ from vllm_ascend.worker.npu_input_batch import NPUInputBatch
if TYPE_CHECKING:
from vllm.v1.core.sched.output import SchedulerOutput
# isort: off
if vllm_version_is('0.13.0'):
from vllm.v1.attention.backends.utils import AttentionCGSupport
from vllm.attention.backends.abstract import ( # type: ignore
AttentionBackend, MLAAttentionImpl)
from vllm.attention.backends.utils import PAD_SLOT_ID # type: ignore
else:
from vllm.v1.attention.backend import ( # type: ignore
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
from vllm.v1.attention.backends.utils import PAD_SLOT_ID # type: ignore
# isort: on
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024
BUILD_METADATA_STEP_PREFILL = 0

View File

@@ -5,7 +5,6 @@ import torch
import torch_npu
import vllm.envs as envs_vllm
from torch import nn
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
from vllm.config import CUDAGraphMode, VllmConfig, get_current_vllm_config
from vllm.distributed import get_tensor_model_parallel_world_size, get_tp_group
from vllm.forward_context import get_forward_context
@@ -13,7 +12,6 @@ from vllm.logger import logger
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
from vllm.triton_utils import HAS_TRITON
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder
from vllm.v1.attention.backends.utils import AttentionCGSupport
from vllm.v1.kv_cache_interface import AttentionSpec
from vllm_ascend import envs
@@ -34,11 +32,20 @@ from vllm_ascend.ops.triton.rope import rope_forward_triton
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, _round_up, dispose_layer,
enable_dsa_cp, maybe_trans_nz)
enable_dsa_cp, maybe_trans_nz, vllm_version_is)
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
# isort: off
if TYPE_CHECKING:
from vllm.v1.core.sched.output import SchedulerOutput
if vllm_version_is('0.13.0'):
from vllm.v1.attention.backends.utils import AttentionCGSupport
from vllm.attention.backends.abstract import ( # type: ignore
AttentionBackend, MLAAttentionImpl)
else:
from vllm.v1.attention.backend import ( # type: ignore
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
# isort: on
class AscendSFABackend(AttentionBackend):