Upgrade vllm commit hash to 1216 (#5053)
### What this PR does / why we need it?
Upstream vLLM PR #30212 https://github.com/vllm-project/vllm/pull/30212
refactored the attention backend selection interface, This PR adapts
vllm-ascend's get_attn_backend_cls to align with the new upstream
standard, ensuring compatibility and reducing maintenance overhead.
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
co-author:[leo-pony][nengjunma@outlook.com](mailto:nengjunma@outlook.com)
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: zxwang <1476209578@qq.com>
Signed-off-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
This commit is contained in:
@@ -355,23 +355,15 @@ class NPUPlatform(Platform):
|
||||
CUSTOM_OP_REGISTERED = True
|
||||
|
||||
@classmethod
|
||||
def get_attn_backend_cls(
|
||||
cls,
|
||||
selected_backend,
|
||||
head_size,
|
||||
dtype,
|
||||
kv_cache_dtype,
|
||||
block_size,
|
||||
use_mla,
|
||||
has_sink=False,
|
||||
use_sparse=False,
|
||||
# NOTE: Please pay special attention to the order of these parameters.
|
||||
# Although we are only using some of them so far
|
||||
# vllm passes them in sequence when using this interface.
|
||||
use_mm_prefix: bool = False,
|
||||
attn_type: str | None = None,
|
||||
):
|
||||
# choose attention backend based on use_mla
|
||||
def get_attn_backend_cls(cls, selected_backend, *args, **kwargs):
|
||||
if "attn_selector_config" in kwargs:
|
||||
use_mla = kwargs["attn_selector_config"].use_mla
|
||||
use_sparse = kwargs["attn_selector_config"].use_sparse
|
||||
else:
|
||||
use_mla = kwargs.get("use_mla",
|
||||
args[4] if len(args) >= 5 else None)
|
||||
use_sparse = kwargs.get("use_sparse",
|
||||
args[6] if len(args) >= 7 else None)
|
||||
backend_map = {
|
||||
(True, False): "vllm_ascend.attention.mla_v1.AscendMLABackend",
|
||||
(False, False):
|
||||
|
||||
Reference in New Issue
Block a user