### What this PR does / why we need it? 1. Fix `TypeError: FusedMoEParallelConfig.__init__() missing 1 required positional argument: 'is_sequence_parallel'` due to https://github.com/vllm-project/vllm/pull/32567 2. Fix ` TypeError: '>' not supported between instances of 'MagicMock' and 'int'` due to https://github.com/vllm-project/vllm/pull/33035 3. Fix `TypeError: Can't instantiate abstract class AscendMLAImpl with abstract methods forward_mha, forward_mqa` and AttributeError: 'bool' object has no attribute 'process_weights_after_loading' due to https://github.com/vllm-project/vllm/pull/33284 4. Fix `'AscendSharedFusedMoE' object has no attribute '_routed_input_transform'`due to https://github.com/vllm-project/vllm/pull/32790 5. Fix `NPUModelRunner._dummy_run() got an unexpected keyword argument 'num_active_loras'` due to https://github.com/vllm-project/vllm/pull/32005 6. Fix the problem caused by` 'tuple' object has no attribute 'job_id'` due to https://github.com/vllm-project/vllm/pull/27492 7. Fix the problem that all_moe_layers is not equal to vllm.moe_forward, vllm.moe_forward_shared due to https://github.com/vllm-project/vllm/pull/33184 8. Add patch to fix the problem "got multiple values for keyword argument 'add_special_tokens'" due to https://github.com/vllm-project/vllm/pull/32863 ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0 --------- Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com> Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com> Signed-off-by: hfadzxy <starmoon_zhang@163.com> Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com> Co-authored-by: hfadzxy <starmoon_zhang@163.com>
57 lines
2.2 KiB
Python
57 lines
2.2 KiB
Python
import torch
|
|
import vllm.v1.worker.utils as utils
|
|
from vllm.v1.worker.utils import defaultdict, extract_layer_index
|
|
from vllm_ascend.utils import vllm_version_is
|
|
|
|
if vllm_version_is("v0.15.0"):
|
|
from vllm.attention.layer import Attention # type: ignore
|
|
else:
|
|
from vllm.model_executor.layers.attention import Attention
|
|
|
|
# Without this patch, it will raise an exception when initialize kv_cache.
|
|
# TODO To remove the patch, we need check why the original bind_kv_cache raises an NotImplementedError.
|
|
def bind_kv_cache(
|
|
kv_caches: dict[str, torch.Tensor],
|
|
forward_context: dict[str, Attention],
|
|
runner_kv_caches: list[torch.Tensor],
|
|
num_attn_module: int = 1,
|
|
) -> None:
|
|
"""
|
|
Bind the allocated KV cache to both ModelRunner and forward context so
|
|
that the KV cache can be used in the forward pass.
|
|
|
|
This function:
|
|
1) Fills the ModelRunner's kv cache list (`runner_kv_caches`) with
|
|
kv_caches.
|
|
2) Associates each attention layer in the `forward_context` with its
|
|
corresponding KV cache in kv_caches.
|
|
|
|
Args:
|
|
kv_caches: The allocated kv_caches with layer names as keys.
|
|
forward_context: The global forward context containing all Attention
|
|
layers with layer names as keys.
|
|
runner_kv_caches: The kv_cache declared by ModelRunner.
|
|
"""
|
|
# Bind kv_caches to ModelRunner
|
|
assert len(runner_kv_caches) == 0
|
|
|
|
# Convert kv_caches dict to a list of tensors in the order of layer_index.
|
|
index2name = defaultdict(list)
|
|
for layer_name in kv_caches:
|
|
index2name[extract_layer_index(layer_name,
|
|
num_attn_module)].append(layer_name)
|
|
|
|
for layer_index in sorted(index2name.keys()):
|
|
layer_names = index2name[layer_index]
|
|
# remove some codes for the typical case of encoder-decoder model, e.g., bart.
|
|
layer_name = layer_names[0]
|
|
runner_kv_caches.append(kv_caches[layer_name])
|
|
|
|
# Bind kv_caches to forward context
|
|
for layer_name, kv_cache in kv_caches.items():
|
|
# NOTE: Use list because of v0 PP virtual engine.
|
|
forward_context[layer_name].kv_cache = [kv_cache]
|
|
|
|
|
|
utils.bind_kv_cache = bind_kv_cache
|