Files
xc-llm-ascend/vllm_ascend/distributed/kv_transfer/utils/mooncake_transfer_engine.py

44 lines
2.0 KiB
Python
Raw Permalink Normal View History

import threading
[Lint]Style: Convert `vllm-ascend/` to ruff format(Batch #5) (#5996) ### What this PR does / why we need it? **Scope of Changes**: | File Path | | :--- | | `.../distributed/kv_transfer/kv_pool/ascend_store/ascend_store_connector.py` | | `vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/backend/backend.py` | | ` .../distributed/kv_transfer/kv_pool/ascend_store/backend/memcache_backend.py` | | ` .../distributed/kv_transfer/kv_pool/ascend_store/backend/mooncake_backend.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/config_data.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/kv_transfer.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_scheduler.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_worker.py` | | ` .../distributed/kv_transfer/kv_pool/cpu_offload/cpu_kv_cache_manager.py` | | ` .../distributed/kv_transfer/kv_pool/cpu_offload/cpu_offload_connector.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/cpu_offload/metadata.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ucm_connector.py` | | ` vllm_ascend/distributed/kv_transfer/utils/mooncake_transfer_engine.py` | | ` vllm_ascend/distributed/kv_transfer/utils/utils.py` | | ` vllm_ascend/kv_offload/cpu_npu.py` | | ` vllm_ascend/kv_offload/npu.py` | | ` vllm_ascend/lora/lora_ops.py` | | ` vllm_ascend/lora/punica_npu.py` | | ` vllm_ascend/lora/utils.py` | ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/2c24bc6996cb165fce92f780b388a5e39b3f4060 --------- Signed-off-by: MrZ20 <2609716663@qq.com> Signed-off-by: SILONG ZENG <2609716663@qq.com>
2026-01-24 22:45:38 +08:00
class GlobalTE:
def __init__(self):
self.transfer_engine = None
self.is_register_buffer: bool = False
self.transfer_engine_lock = threading.Lock()
self.register_buffer_lock = threading.Lock()
[Lint]Style: Convert `vllm-ascend/` to ruff format(Batch #5) (#5996) ### What this PR does / why we need it? **Scope of Changes**: | File Path | | :--- | | `.../distributed/kv_transfer/kv_pool/ascend_store/ascend_store_connector.py` | | `vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/backend/backend.py` | | ` .../distributed/kv_transfer/kv_pool/ascend_store/backend/memcache_backend.py` | | ` .../distributed/kv_transfer/kv_pool/ascend_store/backend/mooncake_backend.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/config_data.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/kv_transfer.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_scheduler.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_worker.py` | | ` .../distributed/kv_transfer/kv_pool/cpu_offload/cpu_kv_cache_manager.py` | | ` .../distributed/kv_transfer/kv_pool/cpu_offload/cpu_offload_connector.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/cpu_offload/metadata.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ucm_connector.py` | | ` vllm_ascend/distributed/kv_transfer/utils/mooncake_transfer_engine.py` | | ` vllm_ascend/distributed/kv_transfer/utils/utils.py` | | ` vllm_ascend/kv_offload/cpu_npu.py` | | ` vllm_ascend/kv_offload/npu.py` | | ` vllm_ascend/lora/lora_ops.py` | | ` vllm_ascend/lora/punica_npu.py` | | ` vllm_ascend/lora/utils.py` | ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/2c24bc6996cb165fce92f780b388a5e39b3f4060 --------- Signed-off-by: MrZ20 <2609716663@qq.com> Signed-off-by: SILONG ZENG <2609716663@qq.com>
2026-01-24 22:45:38 +08:00
def get_transfer_engine(self, hostname: str, device_name: str | None):
if self.transfer_engine is None:
with self.transfer_engine_lock:
# Double-Checked Locking
if self.transfer_engine is None:
try:
from mooncake.engine import TransferEngine # type: ignore
except ImportError as e:
raise ImportError(
"Please install mooncake by following the instructions at "
"https://github.com/kvcache-ai/Mooncake/blob/main/doc/en/build.md " # noqa: E501
"to run vLLM with MooncakeConnector."
) from e
self.transfer_engine = TransferEngine()
device_name = device_name if device_name is not None else ""
[Lint]Style: Convert `vllm-ascend/` to ruff format(Batch #5) (#5996) ### What this PR does / why we need it? **Scope of Changes**: | File Path | | :--- | | `.../distributed/kv_transfer/kv_pool/ascend_store/ascend_store_connector.py` | | `vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/backend/backend.py` | | ` .../distributed/kv_transfer/kv_pool/ascend_store/backend/memcache_backend.py` | | ` .../distributed/kv_transfer/kv_pool/ascend_store/backend/mooncake_backend.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/config_data.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/kv_transfer.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_scheduler.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_worker.py` | | ` .../distributed/kv_transfer/kv_pool/cpu_offload/cpu_kv_cache_manager.py` | | ` .../distributed/kv_transfer/kv_pool/cpu_offload/cpu_offload_connector.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/cpu_offload/metadata.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ucm_connector.py` | | ` vllm_ascend/distributed/kv_transfer/utils/mooncake_transfer_engine.py` | | ` vllm_ascend/distributed/kv_transfer/utils/utils.py` | | ` vllm_ascend/kv_offload/cpu_npu.py` | | ` vllm_ascend/kv_offload/npu.py` | | ` vllm_ascend/lora/lora_ops.py` | | ` vllm_ascend/lora/punica_npu.py` | | ` vllm_ascend/lora/utils.py` | ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/2c24bc6996cb165fce92f780b388a5e39b3f4060 --------- Signed-off-by: MrZ20 <2609716663@qq.com> Signed-off-by: SILONG ZENG <2609716663@qq.com>
2026-01-24 22:45:38 +08:00
ret_value = self.transfer_engine.initialize(hostname, "P2PHANDSHAKE", "ascend", device_name)
if ret_value != 0:
[Lint]Style: Convert `vllm-ascend/` to ruff format(Batch #5) (#5996) ### What this PR does / why we need it? **Scope of Changes**: | File Path | | :--- | | `.../distributed/kv_transfer/kv_pool/ascend_store/ascend_store_connector.py` | | `vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/backend/backend.py` | | ` .../distributed/kv_transfer/kv_pool/ascend_store/backend/memcache_backend.py` | | ` .../distributed/kv_transfer/kv_pool/ascend_store/backend/mooncake_backend.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/config_data.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/kv_transfer.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_scheduler.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ascend_store/pool_worker.py` | | ` .../distributed/kv_transfer/kv_pool/cpu_offload/cpu_kv_cache_manager.py` | | ` .../distributed/kv_transfer/kv_pool/cpu_offload/cpu_offload_connector.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/cpu_offload/metadata.py` | | ` vllm_ascend/distributed/kv_transfer/kv_pool/ucm_connector.py` | | ` vllm_ascend/distributed/kv_transfer/utils/mooncake_transfer_engine.py` | | ` vllm_ascend/distributed/kv_transfer/utils/utils.py` | | ` vllm_ascend/kv_offload/cpu_npu.py` | | ` vllm_ascend/kv_offload/npu.py` | | ` vllm_ascend/lora/lora_ops.py` | | ` vllm_ascend/lora/punica_npu.py` | | ` vllm_ascend/lora/utils.py` | ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/2c24bc6996cb165fce92f780b388a5e39b3f4060 --------- Signed-off-by: MrZ20 <2609716663@qq.com> Signed-off-by: SILONG ZENG <2609716663@qq.com>
2026-01-24 22:45:38 +08:00
raise RuntimeError(f"TransferEngine initialization failed with ret_value: {ret_value}")
return self.transfer_engine
def register_buffer(self, ptrs: list[int], sizes: list[int]):
with self.register_buffer_lock:
assert self.transfer_engine is not None, "Transfer engine must be initialized"
if self.is_register_buffer:
return
for ptr, size in zip(ptrs, sizes):
ret_value = self.transfer_engine.register_memory(ptr, size)
if ret_value != 0:
raise RuntimeError("Mooncake memory registration failed.")
self.is_register_buffer = True
global_te = GlobalTE()