fix multiproc executor determine kv cache memory
Some checks failed
Merge Conflict Labeler / main (push) Has been cancelled

This commit is contained in:
starkwj
2026-04-24 08:31:54 +00:00
parent e4d898b245
commit 31868639fd
3 changed files with 19 additions and 97 deletions

View File

@@ -77,75 +77,5 @@ def _process_input_queue(self):
self._handle_client_request(*req) self._handle_client_request(*req)
@instrument(span_name="Prepare model")
def _initialize_kv_caches(self, vllm_config: VllmConfig) -> KVCacheConfig:
start = time.time()
# Get all kv cache needed by the model
kv_cache_specs = self.model_executor.get_kv_cache_specs()
has_kv_cache = any(kv_cache_spec for kv_cache_spec in kv_cache_specs)
if has_kv_cache:
if envs_ascend.VLLM_ASCEND_ENABLE_VNPU:
# get available memory in idle offload mode
available_gpu_memory = (
self.model_executor.determine_available_memory_vnpu_offload_mode())
self.available_gpu_memory_for_kv_cache = \
available_gpu_memory[0]
elif envs.VLLM_ELASTIC_EP_SCALE_UP_LAUNCH:
# NOTE(yongji): should already be set
# during _eep_scale_up_before_kv_init
assert self.available_gpu_memory_for_kv_cache > 0
available_gpu_memory = [self.available_gpu_memory_for_kv_cache] * len(
kv_cache_specs
)
else:
# Profiles the peak memory usage of the model to determine how
# much memory can be allocated for kv cache.
available_gpu_memory = self.model_executor.determine_available_memory()
self.available_gpu_memory_for_kv_cache = available_gpu_memory[0]
else:
# Attention free models don't need memory for kv cache
available_gpu_memory = [0] * len(kv_cache_specs)
assert len(kv_cache_specs) == len(available_gpu_memory)
# Track max_model_len before KV cache config to detect auto-fit changes
max_model_len_before = vllm_config.model_config.max_model_len
kv_cache_configs = get_kv_cache_configs(
vllm_config, kv_cache_specs, available_gpu_memory
)
# If auto-fit reduced max_model_len, sync the new value to workers.
# This is needed because workers were spawned before memory profiling
# and have the original (larger) max_model_len cached.
max_model_len_after = vllm_config.model_config.max_model_len
if max_model_len_after != max_model_len_before:
self.collective_rpc("update_max_model_len", args=(max_model_len_after,))
scheduler_kv_cache_config = generate_scheduler_kv_cache_config(kv_cache_configs)
vllm_config.cache_config.num_gpu_blocks = scheduler_kv_cache_config.num_blocks
kv_cache_groups = scheduler_kv_cache_config.kv_cache_groups
if kv_cache_groups:
vllm_config.cache_config.block_size = min(
g.kv_cache_spec.block_size for g in kv_cache_groups
)
vllm_config.validate_block_size()
# Initialize kv cache and warmup the execution
self.model_executor.initialize_from_config(kv_cache_configs)
elapsed = time.time() - start
logger.info_once(
"init engine (profile, create kv cache, warmup model) took %.2f seconds",
elapsed,
scope="local",
)
return scheduler_kv_cache_config
EngineCoreProc.run_busy_loop = run_busy_loop EngineCoreProc.run_busy_loop = run_busy_loop
EngineCoreProc._process_input_queue = _process_input_queue EngineCoreProc._process_input_queue = _process_input_queue
EngineCore._initialize_kv_caches = _initialize_kv_caches

View File

@@ -42,11 +42,6 @@ def reload_vram(self) -> bool:
time.sleep(0.001) time.sleep(0.001)
def determine_available_memory_vnpu_offload_mode(self) -> int:
return self.collective_rpc("determine_available_memory_vnpu_offload_mode")
Executor.is_offloaded = is_offloaded Executor.is_offloaded = is_offloaded
Executor.offload_vram = offload_vram Executor.offload_vram = offload_vram
Executor.reload_vram = reload_vram Executor.reload_vram = reload_vram
Executor.determine_available_memory_vnpu_offload_mode = determine_available_memory_vnpu_offload_mode

View File

@@ -336,6 +336,25 @@ class NPUWorker(WorkerBase):
bytes. bytes.
""" """
GiB = lambda b: b / GiB_bytes GiB = lambda b: b / GiB_bytes
if envs_ascend.VLLM_ASCEND_ENABLE_VNPU:
allocator = CaMemAllocator.get_instance()
free, total = allocator.get_pool_mem_info()
if self.cache_config.gpu_memory_utilization <= 0.9:
logger.warning(
"GPU memory utilization is set to %.2f. For VNPU mode, it is recommended to set gpu_memory_utilization to a larger value",
self.cache_config.gpu_memory_utilization,
)
available_kv_cache_memory = int(
total * self.cache_config.gpu_memory_utilization - (total - free)
)
available_kv_cache_memory = int(max(available_kv_cache_memory, 0))
self.available_kv_cache_memory_bytes = available_kv_cache_memory
logger.info_once(
"Available KV cache memory: %.2f GiB",
GiB(self.available_kv_cache_memory_bytes),
scope="local",
)
return int(self.available_kv_cache_memory_bytes)
# Execute a forward pass with dummy inputs to profile the memory usage # Execute a forward pass with dummy inputs to profile the memory usage
# of the model. # of the model.
@@ -363,28 +382,6 @@ class NPUWorker(WorkerBase):
return int(self.available_kv_cache_memory_bytes) return int(self.available_kv_cache_memory_bytes)
@torch.inference_mode()
def determine_available_memory_vnpu_offload_mode(self) -> int:
GiB = lambda b: b / GiB_bytes
allocator = CaMemAllocator.get_instance()
free, total = allocator.get_pool_mem_info()
if self.cache_config.gpu_memory_utilization <= 0.9:
logger.warning(
"GPU memory utilization is set to %.2f. For VNPU mode, it is recommended to set gpu_memory_utilization to a larger value",
self.cache_config.gpu_memory_utilization,
)
available_kv_cache_memory = int(
total * self.cache_config.gpu_memory_utilization - (total - free)
)
available_kv_cache_memory = int(max(available_kv_cache_memory, 0))
self.available_kv_cache_memory_bytes = available_kv_cache_memory
logger.info_once(
"Available KV cache memory: %.2f GiB",
GiB(self.available_kv_cache_memory_bytes),
scope="local",
)
return int(self.available_kv_cache_memory_bytes)
def execute_model( def execute_model(
self, self,
scheduler_output: "SchedulerOutput", scheduler_output: "SchedulerOutput",