[KV-Sharing] Support KV-Sharing feature in CLA models (#4138)

### What this PR does / why we need it?
Support KV-Sharing feature in CLA (cross layer attention) models, which
sharing kv cache in some layers.

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
This commit is contained in:
Mengqing Cao
2025-12-23 10:48:31 +08:00
committed by GitHub
parent 9a79cbaecb
commit 449f8f65a7
5 changed files with 105 additions and 19 deletions

View File

@@ -105,6 +105,7 @@ jobs:
pytest -sv --durations=0 tests/e2e/singlecard/test_xlite.py pytest -sv --durations=0 tests/e2e/singlecard/test_xlite.py
pytest -sv --durations=0 tests/e2e/singlecard/pooling/ pytest -sv --durations=0 tests/e2e/singlecard/pooling/
pytest -sv --durations=0 tests/e2e/singlecard/compile/test_norm_quant_fusion.py pytest -sv --durations=0 tests/e2e/singlecard/compile/test_norm_quant_fusion.py
pytest -sv --durations=0 tests/e2e/singlecard/test_cross_layer_attn_model.py
# ------------------------------------ v1 spec decode test ------------------------------------ # # ------------------------------------ v1 spec decode test ------------------------------------ #
pytest -sv --durations=0 tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py pytest -sv --durations=0 tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py

View File

@@ -23,3 +23,4 @@ mindstudio-probe>=8.3.0
arctic-inference==0.1.1 arctic-inference==0.1.1
xlite xlite
uc-manager uc-manager
timm

View File

@@ -0,0 +1,69 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Compare the outputs of cross layer attention model with and without aclgraph.
Run `pytest tests/e2e/singlecard/test_cross_layer_attn_model.py`.
"""
import os
import pytest
from tests.e2e.conftest import VllmRunner
from tests.e2e.model_utils import check_outputs_equal
os.environ["VLLM_USE_MODELSCOPE"] = "True"
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
MODELS = [
"google/gemma-3n-E2B-it",
]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [32])
def test_models_with_aclgraph(
model: str,
max_tokens: int,
) -> None:
prompts = [
"Hello, my name is", "The president of the United States is",
"The capital of France is", "The future of AI is"
]
with VllmRunner(
model,
max_model_len=1024,
enforce_eager=False,
cudagraph_capture_sizes=[4],
) as vllm_model:
vllm_aclgraph_outputs = vllm_model.generate_greedy(prompts, max_tokens)
with VllmRunner(
model,
max_model_len=1024,
enforce_eager=True,
) as vllm_model:
vllm_eager_outputs = vllm_model.generate_greedy(prompts, max_tokens)
check_outputs_equal(
outputs_0_lst=vllm_eager_outputs,
outputs_1_lst=vllm_aclgraph_outputs,
name_0="vllm_eager_outputs",
name_1="vllm_aclgraph_outputs",
)

View File

@@ -307,6 +307,7 @@ class AscendAttentionBackendImpl(AttentionImpl):
device="npu") device="npu")
self.alibi_slopes = alibi_slopes self.alibi_slopes = alibi_slopes
self.attn_type = attn_type self.attn_type = attn_type
self.kv_sharing_target_layer_name = kv_sharing_target_layer_name
assert self.num_heads % self.num_kv_heads == 0 assert self.num_heads % self.num_kv_heads == 0
self.num_queries_per_kv = self.num_heads // self.num_kv_heads self.num_queries_per_kv = self.num_heads // self.num_kv_heads
@@ -618,6 +619,7 @@ class AscendAttentionBackendImpl(AttentionImpl):
if len(kv_cache) > 1: if len(kv_cache) > 1:
if self.key_cache is None: if self.key_cache is None:
self.key_cache, self.value_cache = kv_cache[0], kv_cache[1] self.key_cache, self.value_cache = kv_cache[0], kv_cache[1]
if self.kv_sharing_target_layer_name is None:
slots = attn_metadata.slot_mapping slots = attn_metadata.slot_mapping
if get_ascend_device_type() == AscendDeviceType.A5: if get_ascend_device_type() == AscendDeviceType.A5:
# TODO: Once eagle running to here, it may has error because of the 0 dim of slot_mapping. # TODO: Once eagle running to here, it may has error because of the 0 dim of slot_mapping.
@@ -625,7 +627,8 @@ class AscendAttentionBackendImpl(AttentionImpl):
# If it's necessary, the slots should be sliced. # If it's necessary, the slots should be sliced.
torch_npu.npu_scatter_pa_kv_cache( torch_npu.npu_scatter_pa_kv_cache(
key=key[:attn_metadata.num_actual_tokens], key=key[:attn_metadata.num_actual_tokens],
value=value[:attn_metadata.num_actual_tokens].contiguous(), value=value[:attn_metadata.
num_actual_tokens].contiguous(),
key_cache=self.key_cache, key_cache=self.key_cache,
value_cache=self.value_cache, value_cache=self.value_cache,
slot_mapping=slots) slot_mapping=slots)

View File

@@ -1195,6 +1195,10 @@ class NPUModelRunner(GPUModelRunner):
def _build_attn_state(self, num_reqs, num_scheduled_tokens, def _build_attn_state(self, num_reqs, num_scheduled_tokens,
num_valid_tokens): num_valid_tokens):
if self.shared_kv_cache_layers is not None:
# sharing kv across layers need to read the kvcache,
# directly return chunked prefill in this scenario
return AscendAttentionState.ChunkedPrefill
if np.array_equal(self.seq_lens.np[:num_reqs], num_scheduled_tokens): if np.array_equal(self.seq_lens.np[:num_reqs], num_scheduled_tokens):
attn_state = AscendAttentionState.PrefillNoCache attn_state = AscendAttentionState.PrefillNoCache
# We assume it is the decode stage, where prefill occurs but only one token is not hit in cache. # We assume it is the decode stage, where prefill occurs but only one token is not hit in cache.
@@ -2243,6 +2247,7 @@ class NPUModelRunner(GPUModelRunner):
kv_cache_config = deepcopy(kv_cache_config) kv_cache_config = deepcopy(kv_cache_config)
self.kv_cache_config = kv_cache_config self.kv_cache_config = kv_cache_config
self.may_add_encoder_only_layers_to_kv_cache_config() self.may_add_encoder_only_layers_to_kv_cache_config()
self.maybe_add_kv_sharing_layers_to_kv_cache_groups(kv_cache_config)
# NOTE(cmq): initialize_attn_backend must before using self.attn_groups # NOTE(cmq): initialize_attn_backend must before using self.attn_groups
self.initialize_attn_backend(kv_cache_config) self.initialize_attn_backend(kv_cache_config)
self.use_hybrid_blocks = (len(self.attn_groups) > 1) self.use_hybrid_blocks = (len(self.attn_groups) > 1)
@@ -2282,6 +2287,13 @@ class NPUModelRunner(GPUModelRunner):
kv_caches = self._reshape_kv_cache_tensors(kv_cache_config, kv_caches = self._reshape_kv_cache_tensors(kv_cache_config,
kv_cache_raw_tensors) kv_cache_raw_tensors)
# Set up cross-layer KV cache sharing
for layer_name, target_layer_name in self.shared_kv_cache_layers.items(
):
logger.debug("%s reuses KV cache of %s", layer_name,
target_layer_name)
kv_caches[layer_name] = kv_caches[target_layer_name]
from vllm.v1.worker.utils import bind_kv_cache from vllm.v1.worker.utils import bind_kv_cache
bind_kv_cache(kv_caches, bind_kv_cache(kv_caches,
self.compilation_config.static_forward_context, self.compilation_config.static_forward_context,