# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import os import socket import time import warnings from collections.abc import AsyncGenerator, Iterable, Mapping from copy import copy from typing import Any import torch import vllm.envs as envs from vllm import TokensPrompt from vllm.config import VllmConfig from vllm.distributed.weight_transfer.base import ( WeightTransferInitRequest, WeightTransferUpdateRequest, ) from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.protocol import EngineClient, StreamingInput from vllm.inputs import ProcessorInputs, PromptType from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry from vllm.outputs import STREAM_FINISHED, PoolingRequestOutput, RequestOutput from vllm.plugins.io_processors import get_io_processor from vllm.pooling_params import PoolingParams from vllm.renderers import renderer_from_config from vllm.renderers.inputs.preprocess import extract_prompt_components from vllm.sampling_params import RequestOutputKind, SamplingParams from vllm.tasks import SupportedTask from vllm.tokenizers import TokenizerLike from vllm.tracing import init_tracer from vllm.transformers_utils.config import maybe_register_config_serialize_by_value from vllm.usage.usage_lib import UsageContext from vllm.utils.async_utils import cancel_task_threadsafe from vllm.utils.collection_utils import as_list from vllm.v1.engine import EngineCoreRequest, PauseMode from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.exceptions import EngineDeadError, EngineGenerateError from vllm.v1.engine.input_processor import InputProcessor from vllm.v1.engine.output_processor import OutputProcessor, RequestOutputCollector from vllm.v1.engine.parallel_sampling import ParentRequest from vllm.v1.executor import Executor from vllm.v1.metrics.loggers import ( StatLoggerFactory, StatLoggerManager, load_stat_logger_plugin_factories, ) from vllm.v1.metrics.prometheus import shutdown_prometheus from vllm.v1.metrics.stats import IterationStats logger = init_logger(__name__) class InputStreamError(Exception): """Wrapper for errors from the input stream generator. This is used to propagate errors from the user's input generator without wrapping them in EngineGenerateError. """ def __init__(self, cause: Exception): self.cause = cause super().__init__(str(cause)) class AsyncLLM(EngineClient): """An asynchronous wrapper for the vLLM engine.""" def __init__( self, vllm_config: VllmConfig, executor_class: type[Executor], log_stats: bool, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, use_cached_outputs: bool = False, log_requests: bool = True, start_engine_loop: bool = True, stat_loggers: list[StatLoggerFactory] | None = None, aggregate_engine_logging: bool = False, client_addresses: dict[str, str] | None = None, client_count: int = 1, client_index: int = 0, ) -> None: """ Create an AsyncLLM. Args: vllm_config: global configuration. executor_class: an Executor impl, e.g. MultiprocExecutor. log_stats: Whether to log stats. usage_context: Usage context of the LLM. mm_registry: Multi-modal registry. use_cached_outputs: Whether to use cached outputs. log_requests: Whether to log requests. start_engine_loop: Whether to start the engine loop. stat_loggers: customized stat loggers for the engine. If not provided, default stat loggers will be used. PLEASE BE AWARE THAT STAT LOGGER IS NOT STABLE IN V1, AND ITS BASE CLASS INTERFACE MIGHT CHANGE. Returns: None """ # Ensure we can serialize custom transformer configs maybe_register_config_serialize_by_value() self.vllm_config = vllm_config self.model_config = vllm_config.model_config self.observability_config = vllm_config.observability_config tracing_endpoint = self.observability_config.otlp_traces_endpoint if tracing_endpoint is not None: init_tracer("vllm.llm_engine", tracing_endpoint) self.log_requests = log_requests custom_stat_loggers = list(stat_loggers or []) custom_stat_loggers.extend(load_stat_logger_plugin_factories()) has_custom_loggers = bool(custom_stat_loggers) self.log_stats = log_stats or has_custom_loggers if not log_stats and has_custom_loggers: logger.info( "AsyncLLM created with log_stats=False, " "but custom stat loggers were found; " "enabling logging without default stat loggers." ) self.renderer = renderer = renderer_from_config(self.vllm_config) self.io_processor = get_io_processor( self.vllm_config, self.model_config.io_processor_plugin, ) # Convert TokPrompt --> EngineCoreRequest. self.input_processor = InputProcessor(self.vllm_config, renderer) # Converts EngineCoreOutputs --> RequestOutput. self.output_processor = OutputProcessor( renderer.tokenizer, log_stats=self.log_stats, stream_interval=self.vllm_config.scheduler_config.stream_interval, tracing_enabled=tracing_endpoint is not None, ) # EngineCore (starts the engine in background process). self.engine_core = EngineCoreClient.make_async_mp_client( vllm_config=vllm_config, executor_class=executor_class, log_stats=self.log_stats, client_addresses=client_addresses, client_count=client_count, client_index=client_index, ) # Loggers. self.logger_manager: StatLoggerManager | None = None if self.log_stats: self.logger_manager = StatLoggerManager( vllm_config=vllm_config, engine_idxs=self.engine_core.engine_ranks_managed, custom_stat_loggers=custom_stat_loggers, enable_default_loggers=log_stats, client_count=client_count, aggregate_engine_logging=aggregate_engine_logging, ) self.logger_manager.log_engine_initialized() self._client_count = client_count self.output_handler: asyncio.Task | None = None try: # Start output handler eagerly if we are in the asyncio eventloop. asyncio.get_running_loop() self._run_output_handler() except RuntimeError: pass if ( vllm_config.profiler_config.profiler == "torch" and not vllm_config.profiler_config.ignore_frontend ): profiler_dir = vllm_config.profiler_config.torch_profiler_dir logger.info( "Torch profiler enabled. AsyncLLM CPU traces will be collected under %s", # noqa: E501 profiler_dir, ) worker_name = f"{socket.gethostname()}_{os.getpid()}.async_llm" self.profiler = torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, ], with_stack=vllm_config.profiler_config.torch_profiler_with_stack, on_trace_ready=torch.profiler.tensorboard_trace_handler( profiler_dir, worker_name=worker_name, use_gzip=vllm_config.profiler_config.torch_profiler_use_gzip, ), ) else: self.profiler = None @classmethod def from_vllm_config( cls, vllm_config: VllmConfig, start_engine_loop: bool = True, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: list[StatLoggerFactory] | None = None, enable_log_requests: bool = False, aggregate_engine_logging: bool = False, disable_log_stats: bool = False, client_addresses: dict[str, str] | None = None, client_count: int = 1, client_index: int = 0, ) -> "AsyncLLM": # Create the LLMEngine. return cls( vllm_config=vllm_config, executor_class=Executor.get_class(vllm_config), start_engine_loop=start_engine_loop, stat_loggers=stat_loggers, log_requests=enable_log_requests, log_stats=not disable_log_stats, aggregate_engine_logging=aggregate_engine_logging, usage_context=usage_context, client_addresses=client_addresses, client_count=client_count, client_index=client_index, ) @classmethod def from_engine_args( cls, engine_args: AsyncEngineArgs, start_engine_loop: bool = True, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: list[StatLoggerFactory] | None = None, ) -> "AsyncLLM": """Create an AsyncLLM from the EngineArgs.""" # Create the engine configs. vllm_config = engine_args.create_engine_config(usage_context) executor_class = Executor.get_class(vllm_config) # Create the AsyncLLM. return cls( vllm_config=vllm_config, executor_class=executor_class, log_requests=engine_args.enable_log_requests, log_stats=not engine_args.disable_log_stats, start_engine_loop=start_engine_loop, usage_context=usage_context, stat_loggers=stat_loggers, ) def __del__(self): self.shutdown() def shutdown(self): """Shutdown, cleaning up the background proc and IPC.""" shutdown_prometheus() if renderer := getattr(self, "renderer", None): renderer.shutdown() if engine_core := getattr(self, "engine_core", None): engine_core.shutdown() handler = getattr(self, "output_handler", None) if handler is not None: cancel_task_threadsafe(handler) async def get_supported_tasks(self) -> tuple[SupportedTask, ...]: if not hasattr(self, "_supported_tasks"): # Cache the result self._supported_tasks = await self.engine_core.get_supported_tasks_async() return self._supported_tasks async def add_request( self, request_id: str, prompt: EngineCoreRequest | PromptType | ProcessorInputs | AsyncGenerator[StreamingInput, None], params: SamplingParams | PoolingParams, arrival_time: float | None = None, lora_request: LoRARequest | None = None, tokenization_kwargs: dict[str, Any] | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, data_parallel_rank: int | None = None, prompt_text: str | None = None, reasoning_ended: bool | None = None, ) -> RequestOutputCollector: """Add new request to the AsyncLLM.""" if self.errored: raise EngineDeadError() is_pooling = isinstance(params, PoolingParams) if ( self.vllm_config.cache_config.kv_sharing_fast_prefill and not is_pooling and params.prompt_logprobs ): raise ValueError( "--kv-sharing-fast-prefill produces incorrect logprobs for " "prompt tokens, please disable it when the requests need " "prompt logprobs" ) if isinstance(prompt, AsyncGenerator): if reasoning_ended is not None: raise NotImplementedError # Streaming input case. return await self._add_streaming_input_request( request_id, prompt, params, arrival_time, lora_request, tokenization_kwargs, trace_headers, priority, data_parallel_rank, ) # Convert Input --> Request. if isinstance(prompt, EngineCoreRequest): logger.warning_once( "Passing EngineCoreRequest to AsyncLLM.generate() and .add_requests() " "is deprecated and will be removed in v0.18. You should instead pass " "the outputs of Renderer.render_cmpl() or Renderer.render_chat()." ) request = prompt if request_id != request.request_id: logger.warning_once( "AsyncLLM.add_request() was passed a request_id parameter that " "does not match the EngineCoreRequest.request_id attribute. The " "latter will be used, and the former will be ignored." ) else: request = self.input_processor.process_inputs( request_id, prompt, params, supported_tasks=await self.get_supported_tasks(), arrival_time=arrival_time, lora_request=lora_request, tokenization_kwargs=tokenization_kwargs, trace_headers=trace_headers, priority=priority, data_parallel_rank=data_parallel_rank, ) prompt_text, _, _ = extract_prompt_components(self.model_config, prompt) if reasoning_ended is not None: request.reasoning_ended = reasoning_ended self.input_processor.assign_request_id(request) # We start the output_handler on the first call to add_request() so # we can call __init__ before the event loop, which enables us # to handle startup failure gracefully in the OpenAI server. self._run_output_handler() # Create a new output collector for the request. queue = RequestOutputCollector(params.output_kind, request.request_id) # Use cloned params that may have been updated in process_inputs() params = request.params if is_pooling or params.n == 1: await self._add_request(request, prompt_text, None, 0, queue) return queue parent_params = params assert isinstance(parent_params, SamplingParams) # Fan out child requests (for n>1). parent_request = ParentRequest(request) for idx in range(parent_params.n): request_id, child_params = parent_request.get_child_info(idx) child_request = request if idx == parent_params.n - 1 else copy(request) child_request.request_id = request_id child_request.sampling_params = child_params await self._add_request( child_request, prompt_text, parent_request, idx, queue ) return queue async def _add_request( self, request: EngineCoreRequest, prompt: str | None, parent_req: ParentRequest | None, index: int, queue: RequestOutputCollector, ): # Add the request to OutputProcessor (this process). self.output_processor.add_request(request, prompt, parent_req, index, queue) # Add the EngineCoreRequest to EngineCore (separate process). await self.engine_core.add_request_async(request) if self.log_requests: logger.info("Added request %s.", request.request_id) async def _add_streaming_input_request( self, request_id: str, input_stream: AsyncGenerator[StreamingInput, None], sampling_params: SamplingParams | PoolingParams, arrival_time: float | None = None, lora_request: LoRARequest | None = None, tokenization_kwargs: dict[str, Any] | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, data_parallel_rank: int | None = None, ) -> RequestOutputCollector: self._validate_streaming_input_sampling_params(sampling_params) inputs = dict( supported_tasks=await self.get_supported_tasks(), arrival_time=arrival_time, lora_request=lora_request, tokenization_kwargs=tokenization_kwargs, trace_headers=trace_headers, priority=priority, data_parallel_rank=data_parallel_rank, ) if not sampling_params.skip_clone: sampling_params = sampling_params.clone() sampling_params.skip_clone = True # Create request for validation, also used as the finished signal # once the input stream is closed. final_req = self.input_processor.process_inputs( request_id=request_id, prompt=TokensPrompt(prompt_token_ids=[0]), params=sampling_params, **inputs, # type: ignore[arg-type] ) self.input_processor.assign_request_id(final_req) internal_req_id = final_req.request_id queue = RequestOutputCollector(sampling_params.output_kind, internal_req_id) async def handle_inputs(): cancelled = False try: async for input_chunk in input_stream: sp = input_chunk.sampling_params if sp: self._validate_streaming_input_sampling_params(sp) else: sp = sampling_params # TODO(nick): Avoid re-validating reused sampling parameters req = self.input_processor.process_inputs( request_id=internal_req_id, prompt=input_chunk.prompt, params=sp, resumable=True, **inputs, # type: ignore[arg-type] ) req.external_req_id = request_id if req.prompt_embeds is not None: raise ValueError( "prompt_embeds not supported for streaming inputs" ) prompt_text, _, _ = extract_prompt_components( self.model_config, input_chunk.prompt ) await self._add_request(req, prompt_text, None, 0, queue) except (asyncio.CancelledError, GeneratorExit): cancelled = True except Exception as error: # Wrap in InputStreamError so generate() can propagate it # without wrapping in EngineGenerateError. queue.put(InputStreamError(error)) finally: queue._input_stream_task = None if not cancelled: # Send empty final request to indicate that inputs have # finished. Don't send if cancelled (session was aborted). await self._add_request(final_req, None, None, 0, queue) # Ensure output handler is running. self._run_output_handler() queue._input_stream_task = asyncio.create_task(handle_inputs()) return queue @staticmethod def _validate_streaming_input_sampling_params( params: SamplingParams | PoolingParams, ): if ( not isinstance(params, SamplingParams) or params.n > 1 or params.output_kind == RequestOutputKind.FINAL_ONLY or params.stop ): raise ValueError( "Input streaming not currently supported " "for pooling models, n > 1, request_kind = FINAL_ONLY " "or with stop strings." ) # TODO: we should support multiple prompts in one call, as you # can do with LLM.generate. So that for multi-prompt completion # requests we don't need to send multiple messages to core proc, # and so we don't need multiple streams which then get # re-multiplexed in the API server anyhow. async def generate( self, prompt: EngineCoreRequest | PromptType | ProcessorInputs | AsyncGenerator[StreamingInput, None], sampling_params: SamplingParams, request_id: str, *, prompt_text: str | None = None, lora_request: LoRARequest | None = None, tokenization_kwargs: dict[str, Any] | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, data_parallel_rank: int | None = None, reasoning_ended: bool | None = None, ) -> AsyncGenerator[RequestOutput, None]: """ Main function called by the API server to kick off a request * 1) Making an AsyncStream corresponding to the Request. * 2) Processing the Input. * 3) Adding the Request to the Detokenizer. * 4) Adding the Request to the EngineCore (separate process). A separate output_handler loop runs in a background AsyncIO task, pulling outputs from EngineCore and putting them into the per-request AsyncStream. The caller of generate() iterates the returned AsyncGenerator, returning the RequestOutput back to the caller. """ q: RequestOutputCollector | None = None try: q = await self.add_request( request_id, prompt, sampling_params, lora_request=lora_request, tokenization_kwargs=tokenization_kwargs, trace_headers=trace_headers, priority=priority, data_parallel_rank=data_parallel_rank, prompt_text=prompt_text, reasoning_ended=reasoning_ended, ) # The output_handler task pushes items into the queue. # This task pulls from the queue and yields to caller. finished = False while not finished: # Note: drain queue without await if possible (avoids # task switching under load which helps performance). out = q.get_nowait() or await q.get() # Note: both OutputProcessor and EngineCore handle their # own request cleanup based on finished. assert isinstance(out, RequestOutput) finished = out.finished if out is not STREAM_FINISHED: yield out # If the request is disconnected by the client, generate() # is cancelled or the generator is garbage collected. So, # we abort the request if we end up here. except (asyncio.CancelledError, GeneratorExit): if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: logger.info("Request %s aborted.", request_id) raise # Engine is dead. Do not abort since we shut down. except EngineDeadError: if self.log_requests: logger.info("Request %s failed (engine dead).", request_id) raise # Request validation error. except ValueError as e: if self.log_requests: logger.info("Request %s failed (bad request): %s.", request_id, e) raise # Error from input stream generator - propagate directly. except InputStreamError as e: if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: logger.info("Request %s failed (input error): %s.", request_id, e) raise e.cause from e # Unexpected error in the generate() task (possibly recoverable). except Exception as e: if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: try: s = f"{e.__class__.__name__}: {e}" except Exception as e2: s = ( f"{e.__class__.__name__}: " "error during printing an exception of class" + e2.__class__.__name__ ) logger.info("Request %s failed due to %s.", request_id, s) raise EngineGenerateError() from e finally: if q is not None: q.close() def _run_output_handler(self): """Background loop: pulls from EngineCore and pushes to AsyncStreams.""" if self.output_handler is not None: return # Ensure that the task doesn't have a circular ref back to the AsyncLLM # object, or else it won't be garbage collected and cleaned up properly. engine_core = self.engine_core output_processor = self.output_processor log_stats = self.log_stats logger_manager = self.logger_manager renderer = self.renderer chunk_size = envs.VLLM_V1_OUTPUT_PROC_CHUNK_SIZE async def output_handler(): try: while True: # 1) Pull EngineCoreOutputs from the EngineCore. outputs = await engine_core.get_output_async() num_outputs = len(outputs.outputs) iteration_stats = ( IterationStats() if (log_stats and num_outputs) else None ) # Split outputs into chunks of at most # VLLM_V1_OUTPUT_PROC_CHUNK_SIZE, so that we don't block the # event loop for too long. engine_core_outputs = outputs.outputs for start in range(0, num_outputs, chunk_size): end = start + chunk_size outputs_slice = engine_core_outputs[start:end] # 2) Process EngineCoreOutputs. processed_outputs = output_processor.process_outputs( outputs_slice, outputs.timestamp, iteration_stats ) # NOTE: RequestOutputs are pushed to their queues. assert not processed_outputs.request_outputs # Allow other asyncio tasks to run between chunks if end < num_outputs: await asyncio.sleep(0) # 3) Abort any reqs that finished due to stop strings. if processed_outputs.reqs_to_abort: await engine_core.abort_requests_async( processed_outputs.reqs_to_abort ) output_processor.update_scheduler_stats(outputs.scheduler_stats) # 4) Logging. # TODO(rob): make into a coroutine and launch it in # background thread once Prometheus overhead is non-trivial. if logger_manager: logger_manager.record( engine_idx=outputs.engine_index, scheduler_stats=outputs.scheduler_stats, iteration_stats=iteration_stats, mm_cache_stats=renderer.stat_mm_cache(), ) except Exception as e: logger.exception("AsyncLLM output_handler failed.") output_processor.propagate_error(e) self.output_handler = asyncio.create_task(output_handler()) async def abort( self, request_id: str | Iterable[str], internal: bool = False ) -> None: """Abort RequestId in OutputProcessor and EngineCore.""" request_ids = ( (request_id,) if isinstance(request_id, str) else as_list(request_id) ) all_request_ids = self.output_processor.abort_requests(request_ids, internal) await self.engine_core.abort_requests_async(all_request_ids) if self.log_requests: logger.info("Aborted request(s) %s.", ",".join(request_ids)) async def pause_generation( self, *, mode: PauseMode = "abort", wait_for_inflight_requests: bool | None = None, clear_cache: bool = True, ) -> None: """ Pause generation to allow model weight updates. All mode handling (abort / wait / keep) and cache clearing is done in the engine. New generation/encoding requests will not be scheduled until resume is called. Args: mode: How to handle in-flight requests: - ``"abort"``: Abort all in-flight requests immediately (default). - ``"wait"``: Wait for in-flight requests to complete. - ``"keep"``: Freeze requests in queue; they resume on :meth:`resume_generation`. wait_for_inflight_requests: DEPRECATED: use mode argument. clear_cache: Whether to clear KV cache and prefix cache after draining. Set to ``False`` to preserve cache for faster resume. """ if wait_for_inflight_requests: warnings.warn( "The `wait_for_inflight_requests` parameter in " "`AsyncLLM.pause_generation()` is deprecated. " "Please use `mode` argument instead.", DeprecationWarning, stacklevel=2, ) mode = "wait" await self.engine_core.pause_scheduler_async(mode=mode, clear_cache=clear_cache) # Small sleep to help ensure that final outputs from any in-flight requests are # returned prior to this method returning. These outputs come out of the engine # prior to the wait-for-idle completion event, but involve additional async # tasks in output processing. # Note that this is not required for correctness, just more intuitive ordering # of events from caller's pov. await asyncio.sleep(0.02) async def resume_generation(self) -> None: """Resume generation after :meth:`pause_generation`.""" await self.engine_core.resume_scheduler_async() async def is_paused(self) -> bool: """Return whether the engine is currently paused.""" return await self.engine_core.is_scheduler_paused_async() async def encode( self, prompt: PromptType | ProcessorInputs, pooling_params: PoolingParams, request_id: str, lora_request: LoRARequest | None = None, trace_headers: Mapping[str, str] | None = None, priority: int = 0, tokenization_kwargs: dict[str, Any] | None = None, reasoning_ended: bool | None = None, ) -> AsyncGenerator[PoolingRequestOutput, None]: """ Main function called by the API server to kick off a request * 1) Making an AsyncStream corresponding to the Request. * 2) Processing the Input. * 3) Adding the Request to the EngineCore (separate process). A separate output_handler loop runs in a background AsyncIO task, pulling outputs from EngineCore and putting them into the per-request AsyncStream. The caller of generate() iterates the returned AsyncGenerator, returning the RequestOutput back to the caller. """ q: RequestOutputCollector | None = None try: q = await self.add_request( request_id, prompt, pooling_params, lora_request=lora_request, tokenization_kwargs=tokenization_kwargs, trace_headers=trace_headers, priority=priority, reasoning_ended=reasoning_ended, ) # The output_handler task pushes items into the queue. # This task pulls from the queue and yields to caller. finished = False while not finished: # Note: drain queue without await if possible (avoids # task switching under load which helps performance). out = q.get_nowait() or await q.get() assert isinstance(out, PoolingRequestOutput) # Note: both OutputProcessor and EngineCore handle their # own request cleanup based on finished. finished = out.finished yield out # If the request is disconnected by the client, generate() # is cancelled. So, we abort the request if we end up here. except asyncio.CancelledError: if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: logger.info("Request %s aborted.", request_id) raise # Engine is dead. Do not abort since we shut down. except EngineDeadError: if self.log_requests: logger.info("Request %s failed (engine dead).", request_id) raise # Request validation error. except ValueError: if self.log_requests: logger.info("Request %s failed (bad request).", request_id) raise # Unexpected error in the generate() task (possibly recoverable). except Exception as e: if q is not None: await self.abort(q.request_id, internal=True) if self.log_requests: logger.info("Request %s failed.", request_id) raise EngineGenerateError() from e finally: if q is not None: q.close() @property def tokenizer(self) -> TokenizerLike | None: return self.renderer.tokenizer def get_tokenizer(self) -> TokenizerLike: return self.renderer.get_tokenizer() async def is_tracing_enabled(self) -> bool: return self.observability_config.otlp_traces_endpoint is not None async def do_log_stats(self) -> None: if self.logger_manager: self.logger_manager.log() async def check_health(self) -> None: logger.debug("Called check_health.") if self.errored: raise self.dead_error async def start_profile(self, profile_prefix: str | None = None) -> None: coros = [self.engine_core.profile_async(True, profile_prefix)] if self.profiler is not None: coros.append(asyncio.to_thread(self.profiler.start)) await asyncio.gather(*coros) async def stop_profile(self) -> None: coros = [self.engine_core.profile_async(False)] if self.profiler is not None: coros.append(asyncio.to_thread(self.profiler.stop)) await asyncio.gather(*coros) async def reset_mm_cache(self) -> None: self.renderer.clear_mm_cache() await self.engine_core.reset_mm_cache_async() async def reset_prefix_cache( self, reset_running_requests: bool = False, reset_connector: bool = False ) -> bool: return await self.engine_core.reset_prefix_cache_async( reset_running_requests, reset_connector ) async def reset_encoder_cache(self) -> None: await self.engine_core.reset_encoder_cache_async() async def sleep(self, level: int = 1, mode: PauseMode = "abort") -> None: await self.engine_core.sleep_async(level, mode) if self.logger_manager is not None: self.logger_manager.record_sleep_state(1, level) async def wake_up(self, tags: list[str] | None = None) -> None: await self.engine_core.wake_up_async(tags) if self.logger_manager is not None: self.logger_manager.record_sleep_state(0, 0) async def is_sleeping(self) -> bool: return await self.engine_core.is_sleeping_async() async def add_lora(self, lora_request: LoRARequest) -> bool: """Load a new LoRA adapter into the engine for future requests.""" return await self.engine_core.add_lora_async(lora_request) async def remove_lora(self, lora_id: int) -> bool: """Remove an already loaded LoRA adapter.""" return await self.engine_core.remove_lora_async(lora_id) async def list_loras(self) -> set[int]: """List all registered adapters.""" return await self.engine_core.list_loras_async() async def pin_lora(self, lora_id: int) -> bool: """Prevent an adapter from being evicted.""" return await self.engine_core.pin_lora_async(lora_id) async def collective_rpc( self, method: str, timeout: float | None = None, args: tuple = (), kwargs: dict | None = None, ): """ Perform a collective RPC call to the given path. """ return await self.engine_core.collective_rpc_async( method, timeout, args, kwargs ) async def wait_for_requests_to_drain(self, drain_timeout: int = 300): """Wait for all requests to be drained.""" start_time = time.time() while time.time() - start_time < drain_timeout: if not self.engine_core.dp_engines_running(): logger.info("Engines are idle, requests have been drained") return logger.info("Engines are still running, waiting for requests to drain...") await asyncio.sleep(1) # Wait 1 second before checking again raise TimeoutError( f"Timeout reached after {drain_timeout} seconds " "waiting for requests to drain." ) async def scale_elastic_ep( self, new_data_parallel_size: int, drain_timeout: int = 300 ): """ Scale up or down the data parallel size by adding or removing engine cores. Args: new_data_parallel_size: The new number of data parallel workers drain_timeout: Maximum time to wait for requests to drain (seconds) """ old_data_parallel_size = self.vllm_config.parallel_config.data_parallel_size if old_data_parallel_size == new_data_parallel_size: logger.info( "Data parallel size is already %s, skipping scale", new_data_parallel_size, ) return logger.info( "Waiting for requests to drain before scaling up to %s engines...", new_data_parallel_size, ) await self.wait_for_requests_to_drain(drain_timeout) logger.info( "Requests have been drained, proceeding with scale to %s engines", new_data_parallel_size, ) await self.engine_core.scale_elastic_ep(new_data_parallel_size) self.vllm_config.parallel_config.data_parallel_size = new_data_parallel_size # recreate stat loggers if new_data_parallel_size > old_data_parallel_size and self.log_stats: # TODO(rob): fix this after talking with Ray team. # This resets all the prometheus metrics since we # unregister during initialization. Need to understand # the intended behavior here better. self.logger_manager = StatLoggerManager( vllm_config=self.vllm_config, engine_idxs=list(range(new_data_parallel_size)), custom_stat_loggers=None, ) @property def is_running(self) -> bool: # Is None before the loop is started. return self.output_handler is None or not self.output_handler.done() @property def is_stopped(self) -> bool: return self.errored @property def errored(self) -> bool: return self.engine_core.resources.engine_dead or not self.is_running @property def dead_error(self) -> BaseException: return EngineDeadError() async def init_weight_transfer_engine( self, request: WeightTransferInitRequest ) -> None: """ Initialize weight transfer for RL training. Args: request: Weight transfer initialization request with backend-specific info """ from vllm.distributed.weight_transfer.base import ( WeightTransferInitRequest, ) if isinstance(request, WeightTransferInitRequest): init_info_dict = request.init_info else: raise TypeError(f"Expected WeightTransferInitRequest, got {type(request)}") await self.collective_rpc( "init_weight_transfer_engine", kwargs={"init_info": init_info_dict} ) async def update_weights(self, request: WeightTransferUpdateRequest) -> None: """ Batched weight update for RL training. Args: request: Weight update request with backend-specific update info """ if isinstance(request, WeightTransferUpdateRequest): update_info_dict = request.update_info else: raise TypeError( f"Expected WeightTransferUpdateRequest, got {type(request)}" ) await self.collective_rpc( "update_weights", kwargs={"update_info": update_info_dict} )