### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|
`.../distributed/kv_transfer/kv_pool/ascend_store/ascend_store_connector.py`
|
|
`vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/backend/backend.py`
|
| `
.../distributed/kv_transfer/kv_pool/ascend_store/backend/memcache_backend.py`
|
| `
.../distributed/kv_transfer/kv_pool/ascend_store/backend/mooncake_backend.py`
|
| `
vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/config_data.py`
|
| `
vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/kv_transfer.py`
|
| `
vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_scheduler.py`
|
| `
vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_worker.py`
|
| `
.../distributed/kv_transfer/kv_pool/cpu_offload/cpu_kv_cache_manager.py`
|
| `
.../distributed/kv_transfer/kv_pool/cpu_offload/cpu_offload_connector.py`
|
| ` vllm_ascend/distributed/kv_transfer/kv_pool/cpu_offload/metadata.py`
|
| ` vllm_ascend/distributed/kv_transfer/kv_pool/ucm_connector.py` |
| `
vllm_ascend/distributed/kv_transfer/utils/mooncake_transfer_engine.py` |
| ` vllm_ascend/distributed/kv_transfer/utils/utils.py` |
| ` vllm_ascend/kv_offload/cpu_npu.py` |
| ` vllm_ascend/kv_offload/npu.py` |
| ` vllm_ascend/lora/lora_ops.py` |
| ` vllm_ascend/lora/punica_npu.py` |
| ` vllm_ascend/lora/utils.py` |
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996
---------
Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: SILONG ZENG <2609716663@qq.com>
This commit is contained in:
@@ -4,8 +4,7 @@ from vllm.logger import init_logger
|
||||
from vllm.utils.platform_utils import is_pin_memory_available
|
||||
from vllm.v1.attention.backend import AttentionBackend # type: ignore
|
||||
from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec
|
||||
from vllm.v1.kv_offload.worker.worker import (OffloadingHandler,
|
||||
TransferResult, TransferSpec)
|
||||
from vllm.v1.kv_offload.worker.worker import OffloadingHandler, TransferResult, TransferSpec
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
@@ -44,7 +43,6 @@ def expand_block_ids(
|
||||
|
||||
|
||||
class CpuNpuOffloadingHandler(OffloadingHandler):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
gpu_block_size: int,
|
||||
@@ -81,20 +79,22 @@ class CpuNpuOffloadingHandler(OffloadingHandler):
|
||||
cpu_shape[num_blocks_idx] = num_cpu_blocks * self.block_size_factor
|
||||
|
||||
logger.debug("Allocating CPU tensor of shape %r", cpu_shape)
|
||||
self.cpu_tensors.append((
|
||||
torch.zeros(
|
||||
cpu_shape,
|
||||
dtype=gpu_tensor[0].dtype,
|
||||
device="cpu",
|
||||
pin_memory=pin_memory,
|
||||
),
|
||||
torch.zeros(
|
||||
cpu_shape,
|
||||
dtype=gpu_tensor[0].dtype,
|
||||
device="cpu",
|
||||
pin_memory=pin_memory,
|
||||
),
|
||||
))
|
||||
self.cpu_tensors.append(
|
||||
(
|
||||
torch.zeros(
|
||||
cpu_shape,
|
||||
dtype=gpu_tensor[0].dtype,
|
||||
device="cpu",
|
||||
pin_memory=pin_memory,
|
||||
),
|
||||
torch.zeros(
|
||||
cpu_shape,
|
||||
dtype=gpu_tensor[0].dtype,
|
||||
device="cpu",
|
||||
pin_memory=pin_memory,
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
def transfer_async(self, job_id: int, spec: TransferSpec) -> bool:
|
||||
logger.info("start transfer_async...")
|
||||
@@ -123,9 +123,7 @@ class CpuNpuOffloadingHandler(OffloadingHandler):
|
||||
dst_sub_blocks_to_skip = -src_blocks.size % dst_block_size_factor
|
||||
src_sub_block_count = src_blocks.size * src_block_size_factor
|
||||
|
||||
assert (
|
||||
src_sub_block_count == dst_blocks.size * dst_block_size_factor -
|
||||
dst_sub_blocks_to_skip)
|
||||
assert src_sub_block_count == dst_blocks.size * dst_block_size_factor - dst_sub_blocks_to_skip
|
||||
|
||||
src_to_dst = np.empty((src_sub_block_count, 2), dtype=np.int64)
|
||||
expand_block_ids(src_blocks, src_block_size_factor, src_to_dst[:, 0])
|
||||
@@ -137,18 +135,14 @@ class CpuNpuOffloadingHandler(OffloadingHandler):
|
||||
)
|
||||
src_to_dst_tensor = torch.from_numpy(src_to_dst)
|
||||
|
||||
event = self.events_pool.pop(
|
||||
) if self.events_pool else torch.npu.Event()
|
||||
event = self.events_pool.pop() if self.events_pool else torch.npu.Event()
|
||||
with torch.npu.stream(stream):
|
||||
for src_tensor, dst_tensor in zip(src_tensors, dst_tensors):
|
||||
src_key_cache, src_value_cache = src_tensor[0], src_tensor[1]
|
||||
dst_key_cache, dst_value_cache = dst_tensor[0], dst_tensor[1]
|
||||
|
||||
torch.ops._C_ascend.swap_blocks(src_key_cache, dst_key_cache,
|
||||
src_to_dst_tensor)
|
||||
torch.ops._C_ascend.swap_blocks(src_value_cache,
|
||||
dst_value_cache,
|
||||
src_to_dst_tensor)
|
||||
torch.ops._C_ascend.swap_blocks(src_key_cache, dst_key_cache, src_to_dst_tensor)
|
||||
torch.ops._C_ascend.swap_blocks(src_value_cache, dst_value_cache, src_to_dst_tensor)
|
||||
|
||||
event.record(stream)
|
||||
|
||||
@@ -175,4 +169,4 @@ class CpuNpuOffloadingHandler(OffloadingHandler):
|
||||
event = self.transfer_events.get(job_id)
|
||||
if event is not None:
|
||||
# This will block until the NPU event is complete
|
||||
event.synchronize()
|
||||
event.synchronize()
|
||||
|
||||
@@ -1,48 +1,40 @@
|
||||
from collections.abc import Iterator
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.v1.attention.backend import AttentionBackend # type: ignore
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig
|
||||
from vllm.v1.kv_offload.abstract import LoadStoreSpec, OffloadingManager
|
||||
from vllm.v1.kv_offload.backends.cpu import CPUBackend
|
||||
from vllm.v1.kv_offload.lru_manager import LRUOffloadingManager
|
||||
from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec
|
||||
from vllm.v1.kv_offload.spec import OffloadingSpec
|
||||
from vllm.v1.kv_offload.worker.worker import OffloadingHandler
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig
|
||||
|
||||
from vllm_ascend.kv_offload.cpu_npu import CpuNpuOffloadingHandler
|
||||
|
||||
|
||||
class NPUOffloadingSpec(OffloadingSpec):
|
||||
|
||||
def __init__(self,
|
||||
vllm_config: VllmConfig,
|
||||
kv_cache_config: Optional[KVCacheConfig] = None):
|
||||
def __init__(self, vllm_config: VllmConfig, kv_cache_config: KVCacheConfig | None = None):
|
||||
super().__init__(vllm_config, kv_cache_config)
|
||||
|
||||
num_cpu_blocks = self.extra_config.get("num_cpu_blocks")
|
||||
if not num_cpu_blocks:
|
||||
raise Exception(
|
||||
"num_cpu_blocks must be specified in kv_connector_extra_config"
|
||||
)
|
||||
raise Exception("num_cpu_blocks must be specified in kv_connector_extra_config")
|
||||
self.num_cpu_blocks: int = num_cpu_blocks
|
||||
|
||||
# scheduler-side
|
||||
self._manager: Optional[OffloadingManager] = None
|
||||
self._manager: OffloadingManager | None = None
|
||||
|
||||
# worker-side
|
||||
self._handler: Optional[OffloadingHandler] = None
|
||||
self._handler: OffloadingHandler | None = None
|
||||
|
||||
def get_manager(self) -> OffloadingManager:
|
||||
if not self._manager:
|
||||
kv_events_config = self.vllm_config.kv_events_config
|
||||
enable_events = (kv_events_config is not None
|
||||
and kv_events_config.enable_kv_cache_events)
|
||||
enable_events = kv_events_config is not None and kv_events_config.enable_kv_cache_events
|
||||
self._manager = LRUOffloadingManager(
|
||||
CPUBackend(block_size=self.offloaded_block_size,
|
||||
num_blocks=self.num_cpu_blocks),
|
||||
CPUBackend(block_size=self.offloaded_block_size, num_blocks=self.num_cpu_blocks),
|
||||
enable_events=enable_events,
|
||||
)
|
||||
return self._manager
|
||||
@@ -51,8 +43,7 @@ class NPUOffloadingSpec(OffloadingSpec):
|
||||
self,
|
||||
kv_caches: dict[str, torch.Tensor],
|
||||
attn_backends: dict[str, type[AttentionBackend]],
|
||||
) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec],
|
||||
OffloadingHandler]]:
|
||||
) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]]:
|
||||
if not self._handler:
|
||||
self._handler = CpuNpuOffloadingHandler(
|
||||
attn_backends=attn_backends,
|
||||
|
||||
Reference in New Issue
Block a user