Drop vLLM 0.13.0 support (#6069)
### What this PR does / why we need it?
Drop vLLM 0.13.0 support, upgrade to 0.14.0
- vLLM version: v0.13.0
- vLLM main:
d68209402d
---------
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -12,6 +12,7 @@ from vllm.distributed import (
|
||||
)
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.utils.math_utils import cdiv
|
||||
from vllm.v1.attention.backend import AttentionCGSupport
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
|
||||
|
||||
from vllm_ascend.attention.attention_v1 import AscendAttentionState
|
||||
@@ -37,12 +38,7 @@ from vllm_ascend.attention.context_parallel.common_cp import (
|
||||
)
|
||||
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
|
||||
from vllm_ascend.compilation.acl_graph import get_draft_graph_params, get_graph_params, update_graph_params_workspaces
|
||||
from vllm_ascend.utils import vllm_version_is, weak_ref_tensors
|
||||
|
||||
if vllm_version_is("0.13.0"):
|
||||
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionCGSupport
|
||||
from vllm_ascend.utils import weak_ref_tensors
|
||||
|
||||
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024
|
||||
|
||||
|
||||
Reference in New Issue
Block a user