Drop vLLM 0.13.0 support (#6069)

### What this PR does / why we need it?
Drop vLLM 0.13.0 support, upgrade to 0.14.0

- vLLM version: v0.13.0
- vLLM main:
d68209402d

---------

Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
zhangxinyuehfad
2026-01-23 09:45:08 +08:00
committed by GitHub
parent 27a513b672
commit 819a4459ce
39 changed files with 86 additions and 272 deletions

View File

@@ -10,7 +10,10 @@ from vllm.forward_context import ForwardContext, get_forward_context
from vllm.logger import logger
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
from vllm.utils.math_utils import cdiv, round_down
from vllm.v1.attention.backend import ( # type: ignore
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder
from vllm.v1.attention.backends.utils import PAD_SLOT_ID # type: ignore
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
from vllm_ascend import envs
@@ -35,23 +38,12 @@ from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, maybe_trans_nz,
vllm_version_is, weak_ref_tensors)
weak_ref_tensors)
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
if TYPE_CHECKING:
from vllm.v1.core.sched.output import SchedulerOutput
# isort: off
if vllm_version_is('0.13.0'):
from vllm.v1.attention.backends.utils import AttentionCGSupport
from vllm.attention.backends.abstract import ( # type: ignore
AttentionBackend, MLAAttentionImpl)
from vllm.attention.backends.utils import PAD_SLOT_ID # type: ignore
else:
from vllm.v1.attention.backend import ( # type: ignore
AttentionBackend, AttentionCGSupport, MLAAttentionImpl)
from vllm.v1.attention.backends.utils import PAD_SLOT_ID # type: ignore
# isort: on
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024
BUILD_METADATA_STEP_PREFILL = 0