Drop vLLM 0.13.0 support (#6069)
### What this PR does / why we need it?
Drop vLLM 0.13.0 support, upgrade to 0.14.0
- vLLM version: v0.13.0
- vLLM main:
d68209402d
---------
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -12,6 +12,8 @@ from vllm.forward_context import get_forward_context
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
|
||||
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec
|
||||
|
||||
@@ -35,20 +37,11 @@ from vllm_ascend.ops.triton.rope import rope_forward_triton
|
||||
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
|
||||
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
|
||||
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, _round_up, dispose_layer,
|
||||
enable_dsa_cp, enable_dsa_cp_with_layer_shard, maybe_trans_nz, vllm_version_is)
|
||||
enable_dsa_cp, enable_dsa_cp_with_layer_shard, maybe_trans_nz)
|
||||
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
|
||||
# isort: off
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
if vllm_version_is('0.13.0'):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
from vllm.attention.backends.abstract import ( # type: ignore
|
||||
AttentionBackend, MLAAttentionImpl)
|
||||
else:
|
||||
from vllm.v1.attention.backend import ( # type: ignore
|
||||
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
|
||||
# isort: on
|
||||
|
||||
# token count limits within bmm_transpose operator
|
||||
BMM_TRANS_MAX_SUPPORTED_TOKENS = 1024
|
||||
|
||||
Reference in New Issue
Block a user