[Main2Main] Upgrade vllm commit to 0113 (#5839)
### What this PR does / why we need it?
Upgrade vllm commit to 0113 (11b6af5280d6d6dfb8953af16e67b25f819b3be9)
- Modify import paths due to the refactors
https://github.com/vllm-project/vllm/pull/31916
https://github.com/vllm-project/vllm/pull/32054
- Fix `TypeError: NPUOffloadingSpec.__init__() takes 2 positional
arguments but 3 were given` due to
https://github.com/vllm-project/vllm/pull/24498
- Skip the async-scheduling tests in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are never
verified
https://github.com/vllm-project/vllm/pull/31998
- Skip some pooling tests, which are caused by
https://github.com/vllm-project/vllm/pull/32148
where vllm is also failed
https://buildkite.com/vllm/ci/builds/46705/steps/canvas?jid=019bb329-3834-4685-862b-1613b8e0f5d4
We will reopen those tests when main2main reachs
https://github.com/vllm-project/vllm/pull/32243
- Skip some cases in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are
broken by
https://github.com/vllm-project/vllm/pull/32118
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef
Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -22,15 +22,9 @@ from typing import ClassVar, List, Optional, Tuple, Type
|
||||
import torch
|
||||
import torch_npu
|
||||
import vllm.envs as envs_vllm
|
||||
from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
|
||||
AttentionLayer, AttentionType)
|
||||
from vllm.attention.backends.registry import (AttentionBackendEnum,
|
||||
register_backend)
|
||||
from vllm.config import VllmConfig, get_current_vllm_config
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.utils.math_utils import cdiv
|
||||
from vllm.v1.attention.backends.utils import (AttentionCGSupport,
|
||||
AttentionMetadataBuilder)
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, CrossAttentionSpec
|
||||
|
||||
@@ -45,7 +39,23 @@ from vllm_ascend.compilation.acl_graph import (
|
||||
update_draft_graph_params_workspaces, update_graph_params_workspaces)
|
||||
from vllm_ascend.device.device_op import DeviceOperator
|
||||
from vllm_ascend.ops.flashcomm2_oshard_manager import flashcomm2_oshard_manager
|
||||
from vllm_ascend.utils import weak_ref_tensors
|
||||
from vllm_ascend.utils import vllm_version_is, weak_ref_tensors
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import (AttentionCGSupport,
|
||||
AttentionMetadataBuilder)
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend, AttentionImpl, AttentionLayer, AttentionType)
|
||||
from vllm.attention.backends.registry import ( # type: ignore
|
||||
AttentionBackendEnum, register_backend)
|
||||
else:
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, AttentionImpl, AttentionLayer,
|
||||
AttentionType, AttentionMetadataBuilder)
|
||||
from vllm.v1.attention.backends.registry import ( # type: ignore
|
||||
AttentionBackendEnum, register_backend)
|
||||
# isort: on
|
||||
|
||||
# default max value of sliding window size
|
||||
SWA_INT_MAX = 2147483647
|
||||
|
||||
Reference in New Issue
Block a user