fix multiproc executor determine kv cache memory & update Dockerfile

This commit is contained in:
starkwj
2026-04-24 08:31:54 +00:00
parent e4d898b245
commit e17006077a
10 changed files with 145 additions and 153 deletions

View File

@@ -336,6 +336,25 @@ class NPUWorker(WorkerBase):
bytes.
"""
GiB = lambda b: b / GiB_bytes
if envs_ascend.VLLM_ASCEND_ENABLE_VNPU:
allocator = CaMemAllocator.get_instance()
free, total = allocator.get_pool_mem_info()
if self.cache_config.gpu_memory_utilization <= 0.9:
logger.warning(
"GPU memory utilization is set to %.2f. For VNPU mode, it is recommended to set gpu_memory_utilization to a larger value",
self.cache_config.gpu_memory_utilization,
)
available_kv_cache_memory = int(
total * self.cache_config.gpu_memory_utilization - (total - free)
)
available_kv_cache_memory = int(max(available_kv_cache_memory, 0))
self.available_kv_cache_memory_bytes = available_kv_cache_memory
logger.info_once(
"Available KV cache memory: %.2f GiB",
GiB(self.available_kv_cache_memory_bytes),
scope="local",
)
return int(self.available_kv_cache_memory_bytes)
# Execute a forward pass with dummy inputs to profile the memory usage
# of the model.
@@ -363,28 +382,6 @@ class NPUWorker(WorkerBase):
return int(self.available_kv_cache_memory_bytes)
@torch.inference_mode()
def determine_available_memory_vnpu_offload_mode(self) -> int:
GiB = lambda b: b / GiB_bytes
allocator = CaMemAllocator.get_instance()
free, total = allocator.get_pool_mem_info()
if self.cache_config.gpu_memory_utilization <= 0.9:
logger.warning(
"GPU memory utilization is set to %.2f. For VNPU mode, it is recommended to set gpu_memory_utilization to a larger value",
self.cache_config.gpu_memory_utilization,
)
available_kv_cache_memory = int(
total * self.cache_config.gpu_memory_utilization - (total - free)
)
available_kv_cache_memory = int(max(available_kv_cache_memory, 0))
self.available_kv_cache_memory_bytes = available_kv_cache_memory
logger.info_once(
"Available KV cache memory: %.2f GiB",
GiB(self.available_kv_cache_memory_bytes),
scope="local",
)
return int(self.available_kv_cache_memory_bytes)
def execute_model(
self,
scheduler_output: "SchedulerOutput",