Drop vLLM 0.13.0 support (#6069)
### What this PR does / why we need it?
Drop vLLM 0.13.0 support, upgrade to 0.14.0
- vLLM version: v0.13.0
- vLLM main:
d68209402d
---------
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -25,6 +25,18 @@ import vllm.envs as envs_vllm
|
||||
from vllm.config import VllmConfig, get_current_vllm_config
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.utils.math_utils import cdiv
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend,
|
||||
AttentionCGSupport,
|
||||
AttentionImpl,
|
||||
AttentionLayer,
|
||||
AttentionMetadataBuilder,
|
||||
AttentionType,
|
||||
)
|
||||
from vllm.v1.attention.backends.registry import ( # type: ignore
|
||||
AttentionBackendEnum,
|
||||
register_backend,
|
||||
)
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, CrossAttentionSpec
|
||||
|
||||
@@ -44,34 +56,7 @@ from vllm_ascend.compilation.acl_graph import (
|
||||
)
|
||||
from vllm_ascend.device.device_op import DeviceOperator
|
||||
from vllm_ascend.ops.flashcomm2_oshard_manager import flashcomm2_oshard_manager
|
||||
from vllm_ascend.utils import vllm_version_is, weak_ref_tensors
|
||||
|
||||
if vllm_version_is("0.13.0"):
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend,
|
||||
AttentionImpl,
|
||||
AttentionLayer,
|
||||
AttentionType,
|
||||
)
|
||||
from vllm.attention.backends.registry import ( # type: ignore
|
||||
AttentionBackendEnum,
|
||||
register_backend,
|
||||
)
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport, AttentionMetadataBuilder
|
||||
else:
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend,
|
||||
AttentionCGSupport,
|
||||
AttentionImpl,
|
||||
AttentionLayer,
|
||||
AttentionMetadataBuilder,
|
||||
AttentionType,
|
||||
)
|
||||
from vllm.v1.attention.backends.registry import ( # type: ignore
|
||||
AttentionBackendEnum,
|
||||
register_backend,
|
||||
)
|
||||
|
||||
from vllm_ascend.utils import weak_ref_tensors
|
||||
|
||||
# default max value of sliding window size
|
||||
SWA_INT_MAX = 2147483647
|
||||
|
||||
@@ -29,6 +29,7 @@ from vllm.distributed import (
|
||||
get_pcp_group,
|
||||
)
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.v1.attention.backend import AttentionCGSupport
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec
|
||||
|
||||
from vllm_ascend.attention.attention_v1 import (
|
||||
@@ -49,12 +50,7 @@ from vllm_ascend.attention.utils import (
|
||||
split_decodes_and_prefills,
|
||||
)
|
||||
from vllm_ascend.compilation.acl_graph import get_graph_params, update_graph_params_workspaces
|
||||
from vllm_ascend.utils import cp_chunkedprefill_comm_stream, vllm_version_is, weak_ref_tensors
|
||||
|
||||
if vllm_version_is("0.13.0"):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionCGSupport
|
||||
from vllm_ascend.utils import cp_chunkedprefill_comm_stream, weak_ref_tensors
|
||||
|
||||
|
||||
class AscendAttentionCPMetadataBuilder(AscendAttentionMetadataBuilder):
|
||||
|
||||
@@ -12,6 +12,7 @@ from vllm.distributed import (
|
||||
)
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.utils.math_utils import cdiv
|
||||
from vllm.v1.attention.backend import AttentionCGSupport
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
|
||||
|
||||
from vllm_ascend.attention.attention_v1 import AscendAttentionState
|
||||
@@ -37,12 +38,7 @@ from vllm_ascend.attention.context_parallel.common_cp import (
|
||||
)
|
||||
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
|
||||
from vllm_ascend.compilation.acl_graph import get_draft_graph_params, get_graph_params, update_graph_params_workspaces
|
||||
from vllm_ascend.utils import vllm_version_is, weak_ref_tensors
|
||||
|
||||
if vllm_version_is("0.13.0"):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionCGSupport
|
||||
from vllm_ascend.utils import weak_ref_tensors
|
||||
|
||||
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024
|
||||
|
||||
|
||||
@@ -10,7 +10,10 @@ from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
from vllm.utils.math_utils import cdiv, round_down
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
|
||||
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder
|
||||
from vllm.v1.attention.backends.utils import PAD_SLOT_ID # type: ignore
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
|
||||
|
||||
from vllm_ascend import envs
|
||||
@@ -35,23 +38,12 @@ from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
|
||||
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
|
||||
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
|
||||
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, maybe_trans_nz,
|
||||
vllm_version_is, weak_ref_tensors)
|
||||
weak_ref_tensors)
|
||||
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend, MLAAttentionImpl)
|
||||
from vllm.attention.backends.utils import PAD_SLOT_ID # type: ignore
|
||||
else:
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
|
||||
from vllm.v1.attention.backends.utils import PAD_SLOT_ID # type: ignore
|
||||
# isort: on
|
||||
|
||||
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024
|
||||
BUILD_METADATA_STEP_PREFILL = 0
|
||||
|
||||
@@ -12,6 +12,8 @@ from vllm.forward_context import get_forward_context
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
|
||||
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec
|
||||
|
||||
@@ -35,20 +37,11 @@ from vllm_ascend.ops.triton.rope import rope_forward_triton
|
||||
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
|
||||
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
|
||||
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, _round_up, dispose_layer,
|
||||
enable_dsa_cp, enable_dsa_cp_with_layer_shard, maybe_trans_nz, vllm_version_is)
|
||||
enable_dsa_cp, enable_dsa_cp_with_layer_shard, maybe_trans_nz)
|
||||
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
|
||||
# isort: off
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend, MLAAttentionImpl)
|
||||
else:
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
|
||||
# isort: on
|
||||
|
||||
# token count limits within bmm_transpose operator
|
||||
BMM_TRANS_MAX_SUPPORTED_TOKENS = 1024
|
||||
|
||||
Reference in New Issue
Block a user