# # Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. # Copyright 2023 The vLLM team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is a part of the vllm-ascend project. # Adapted from vllm-project/vllm/vllm/worker/gpu_worker.py # from typing import Optional import torch import torch.nn as nn import torch_npu from torch_npu.op_plugin.atb._atb_ops import _register_atb_extensions from vllm import envs from vllm.config import VllmConfig from vllm.distributed import (ensure_model_parallel_initialized, init_distributed_environment) from vllm.distributed.kv_transfer import ensure_kv_transfer_initialized from vllm.distributed.parallel_state import get_pp_group, get_tp_group from vllm.logger import logger from vllm.lora.request import LoRARequest from vllm.sequence import IntermediateTensors from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, GiB_bytes from vllm.v1.core.sched.output import SchedulerOutput from vllm.v1.kv_cache_interface import KVCacheConfig, KVCacheSpec from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.worker.worker_base import WorkerBase from vllm_ascend.ascend_config import init_ascend_config from vllm_ascend.device_allocator.camem import CaMemAllocator from vllm_ascend.distributed.parallel_state import init_ascend_model_parallel from vllm_ascend.platform import NPUPlatform from vllm_ascend.utils import (init_ascend_soc_version, sleep_mode_enabled, try_register_lib, vllm_version_is) from vllm_ascend.worker.model_runner_v1 import NPUModelRunner if not vllm_version_is("0.10.0"): from vllm.tasks import SupportedTask class NPUWorker(WorkerBase): def __init__( self, vllm_config: VllmConfig, local_rank: int, rank: int, distributed_init_method: str, is_driver_worker: bool = False, # Additional parameters for compatibility with vllm **kwargs): """Initialize the worker for Ascend.""" # register patch for vllm from vllm_ascend.utils import adapt_patch adapt_patch() # Register ops when worker init. from vllm_ascend import ops ops.register_dummy_fusion_op() _register_atb_extensions() # init ascend config init_ascend_config(vllm_config) super().__init__(vllm_config=vllm_config, local_rank=local_rank, rank=rank, distributed_init_method=distributed_init_method, is_driver_worker=is_driver_worker) # Try to import mindie_turbo to accelerate vLLM inference. local_dp_rank = self.vllm_config.parallel_config.data_parallel_rank_local world_size = self.vllm_config.parallel_config.world_size self.local_rank_across_dp = local_dp_rank * world_size + self.local_rank try_register_lib( "mindie_turbo", "MindIE Turbo is installed. vLLM inference will be accelerated with MindIE Turbo." ) if self.cache_config.cache_dtype == "auto": self.cache_dtype = self.model_config.dtype else: self.cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[ self.cache_config.cache_dtype] if self.model_config.trust_remote_code: # note: lazy import to avoid importing torch before initializing from vllm.utils import init_cached_hf_modules init_cached_hf_modules() self.profiler = self._init_profiler() def sleep(self, level: int = 1) -> None: if not sleep_mode_enabled(): raise ValueError( "Sleep mode is not enabled. Please compile vllm-ascend with COMPILE_CUSTOM_KERNELS=1." ) free_bytes_before_sleep = NPUPlatform.mem_get_info()[0] allocator = CaMemAllocator.get_instance() allocator.sleep(offload_tags=("weights", ) if level == 1 else tuple()) free_bytes_after_sleep, total = NPUPlatform.mem_get_info() freed_bytes = free_bytes_after_sleep - free_bytes_before_sleep used_bytes = total - free_bytes_after_sleep assert freed_bytes >= 0, "Memory usage increased after sleeping." logger.info( "Sleep mode freed %.2f GiB memory, " "%.2f GiB memory is still in use.", freed_bytes / GiB_bytes, used_bytes / GiB_bytes) def wake_up(self, tags: Optional[list[str]] = None) -> None: if not sleep_mode_enabled(): raise ValueError( "Sleep mode is not enabled. Please compile vllm-ascend with COMPILE_CUSTOM_KERNELS=1." ) allocator = CaMemAllocator.get_instance() allocator.wake_up(tags=tags) def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks: int) -> None: self.cache_config.num_gpu_blocks = num_gpu_blocks self.cache_config.num_cpu_blocks = num_cpu_blocks def init_device(self): device = torch.device(f"npu:{self.local_rank}") NPUPlatform.set_device(device) NPUPlatform.empty_cache() self.init_npu_memory = NPUPlatform.mem_get_info()[0] init_ascend_soc_version() # Initialize the distributed environment. self._init_worker_distributed_environment() # Set random seed. NPUPlatform.seed_everything(self.model_config.seed) # Init ModelRunner here, so that we have access to self.device. self.model_runner = NPUModelRunner(self.vllm_config, device) def determine_available_memory(self) -> int: # Profile the memory usage of the model and get the maximum number of # cache blocks that can be allocated with the remaining free memory. NPUPlatform.clear_npu_memory() # Execute a forward pass with dummy inputs to profile the memory usage # of the model. _, total_npu_memory = NPUPlatform.mem_get_info() self.model_runner.profile_run() # Calculate the number of blocks that can be allocated with the # profiled peak memory. free_npu_memory, _ = NPUPlatform.mem_get_info() # NOTE(woosuk): Here we assume that the other processes using the same # GPU did not change their memory usage during the profiling. assert self.init_npu_memory > free_npu_memory, ( "Error in memory profiling. " f"Initial free memory {self.init_npu_memory}, current free memory" f" {free_npu_memory}. This happens when the NPU memory was " "not properly cleaned up before initializing the vLLM instance.") # Get the peak memory allocation recorded by torch peak_memory = torch_npu.npu.memory_stats()["allocated_bytes.all.peak"] # TODO: don`t need impl this func after empty_cache in # Worker.determine_num_available_blocks() unified` NPUPlatform.empty_cache() torch_allocated_bytes = torch_npu.npu.memory_stats( )["allocated_bytes.all.current"] total_allocated_bytes = torch_npu.npu.mem_get_info( )[1] - torch_npu.npu.mem_get_info()[0] non_torch_allocations = total_allocated_bytes - torch_allocated_bytes if non_torch_allocations > 0: peak_memory += non_torch_allocations available_kv_cache_memory = int( total_npu_memory * self.cache_config.gpu_memory_utilization - peak_memory) available_kv_cache_memory = int(max(available_kv_cache_memory, 0)) logger.info( f"Available memory: {available_kv_cache_memory}, total memory: {total_npu_memory}" ) return available_kv_cache_memory def execute_model( self, scheduler_output: "SchedulerOutput", ) -> Optional[ModelRunnerOutput]: intermediate_tensors = None if not get_pp_group().is_first_rank: intermediate_tensors = IntermediateTensors( get_pp_group().recv_tensor_dict( all_gather_group=get_tp_group())) output = self.model_runner.execute_model(scheduler_output, intermediate_tensors) parallel_config = self.vllm_config.parallel_config if parallel_config.distributed_executor_backend != "external_launcher" \ and not get_pp_group().is_last_rank: assert isinstance(output, IntermediateTensors) get_pp_group().send_tensor_dict(output.tensors, all_gather_group=get_tp_group()) return None assert isinstance(output, ModelRunnerOutput) return output if self.is_driver_worker else None def load_model(self) -> None: if self.vllm_config.model_config.enable_sleep_mode: allocator = CaMemAllocator.get_instance() assert allocator.get_current_usage() == 0, ( "Sleep mode can only be " "used for one instance per process.") context = allocator.use_memory_pool(tag="weights") else: from contextlib import nullcontext context = nullcontext() # type: ignore with context: self.model_runner.load_model() def compile_or_warm_up_model(self) -> None: warmup_sizes = self.vllm_config.compilation_config.compile_sizes.copy() if not self.model_config.enforce_eager: warmup_sizes = [ x for x in warmup_sizes if x not in self.vllm_config.compilation_config.cudagraph_capture_sizes ] for size in sorted(warmup_sizes, reverse=True): logger.info("Compile and warming up model for size %d", size) self.model_runner._dummy_run(size) if not self.model_config.enforce_eager: self.model_runner.capture_model() # Reset the seed to ensure that the random state is not affected by # the model initialization and profiling. NPUPlatform.seed_everything(self.model_config.seed) def get_model(self) -> nn.Module: return self.model_runner.get_model() def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: return self.model_runner.get_kv_cache_spec() def initialize_from_config(self, kv_cache_config: KVCacheConfig) -> None: """Allocate NPU KV cache with the specified kv_cache_config.""" if self.vllm_config.model_config.enable_sleep_mode: allocator = CaMemAllocator.get_instance() context = allocator.use_memory_pool(tag="kv_cache") else: from contextlib import nullcontext context = nullcontext() # type: ignore with context: self.model_runner.initialize_kv_cache(kv_cache_config) def profile(self, is_start: bool = True): if self.profiler is None: raise RuntimeError("Profiler is not enabled.") if is_start: self.profiler.start() else: self.profiler.stop() def add_lora(self, lora_request: LoRARequest) -> bool: return self.model_runner.add_lora(lora_request) def remove_lora(self, lora_id: int) -> bool: return self.model_runner.remove_lora(lora_id) def list_loras(self) -> set[int]: return self.model_runner.list_loras() def pin_lora(self, lora_id: int) -> bool: return self.model_runner.pin_lora(lora_id) def execute_dummy_batch(self) -> None: self.model_runner._dummy_run(1) def _init_worker_distributed_environment(self) -> None: """Initialize the distributed environment.""" init_distributed_environment(self.parallel_config.world_size, self.rank, self.distributed_init_method, self.local_rank, "hccl") ensure_model_parallel_initialized( self.parallel_config.tensor_parallel_size, self.parallel_config.pipeline_parallel_size) init_ascend_model_parallel(self.parallel_config) ensure_kv_transfer_initialized(self.vllm_config) def _init_profiler(self): # Torch profiler. Enabled and configured through env vars: # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace if envs.VLLM_TORCH_PROFILER_DIR: torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR logger.info("Profiling enabled. Traces will be saved to: %s", torch_profiler_trace_dir) experimental_config = torch_npu.profiler._ExperimentalConfig( export_type=torch_npu.profiler.ExportType.Text, profiler_level=torch_npu.profiler.ProfilerLevel.Level1, msprof_tx=False, aic_metrics=torch_npu.profiler.AiCMetrics.AiCoreNone, l2_cache=False, op_attr=False, data_simplification=False, record_op_args=False, gc_detect_threshold=None, ) return torch_npu.profiler.profile( activities=[ torch_npu.profiler.ProfilerActivity.CPU, torch_npu.profiler.ProfilerActivity.NPU, ], with_stack=False, profile_memory=False, with_modules=False, experimental_config=experimental_config, on_trace_ready=torch_npu.profiler.tensorboard_trace_handler( torch_profiler_trace_dir)) else: return None def get_supported_pooling_tasks(self): return self.model_runner.get_supported_pooling_tasks() def get_supported_tasks(self) -> "tuple[SupportedTask, ...]": return self.model_runner.get_supported_tasks()