[gpt-oss] Add gpt-oss bf16 support

This commit is contained in:
2025-08-13 21:25:57 +08:00
parent 5d2e7edf78
commit 17ea2ec6aa
1232 changed files with 777 additions and 36 deletions

94
vllm/plugins/__init__.py Normal file
View File

@@ -0,0 +1,94 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import logging
import os
from typing import Any, Callable
import torch
import vllm.envs as envs
logger = logging.getLogger(__name__)
DEFAULT_PLUGINS_GROUP = 'vllm.general_plugins'
# make sure one process only loads plugins once
plugins_loaded = False
def load_plugins_by_group(group: str) -> dict[str, Callable[[], Any]]:
import sys
if sys.version_info < (3, 10):
from importlib_metadata import entry_points
else:
from importlib.metadata import entry_points
allowed_plugins = envs.VLLM_PLUGINS
discovered_plugins = entry_points(group=group)
if len(discovered_plugins) == 0:
logger.debug("No plugins for group %s found.", group)
return {}
# Check if the only discovered plugin is the default one
is_default_group = (group == DEFAULT_PLUGINS_GROUP)
# Use INFO for non-default groups and DEBUG for the default group
log_level = logger.debug if is_default_group else logger.info
log_level("Available plugins for group %s:", group)
for plugin in discovered_plugins:
log_level("- %s -> %s", plugin.name, plugin.value)
if allowed_plugins is None:
log_level("All plugins in this group will be loaded. "
"Set `VLLM_PLUGINS` to control which plugins to load.")
plugins = dict[str, Callable[[], Any]]()
for plugin in discovered_plugins:
if allowed_plugins is None or plugin.name in allowed_plugins:
if allowed_plugins is not None:
log_level("Loading plugin %s", plugin.name)
try:
func = plugin.load()
plugins[plugin.name] = func
except Exception:
logger.exception("Failed to load plugin %s", plugin.name)
return plugins
def load_general_plugins():
"""WARNING: plugins can be loaded for multiple times in different
processes. They should be designed in a way that they can be loaded
multiple times without causing issues.
"""
global plugins_loaded
if plugins_loaded:
return
plugins_loaded = True
# some platform-specific configurations
from vllm.platforms import current_platform
if current_platform.is_xpu():
# see https://github.com/pytorch/pytorch/blob/43c5f59/torch/_dynamo/config.py#L158
torch._dynamo.config.disable = True
elif current_platform.is_hpu():
# NOTE(kzawora): PT HPU lazy backend (PT_HPU_LAZY_MODE = 1)
# does not support torch.compile
# Eager backend (PT_HPU_LAZY_MODE = 0) must be selected for
# torch.compile support
is_lazy = os.environ.get('PT_HPU_LAZY_MODE', '1') == '1'
if is_lazy:
torch._dynamo.config.disable = True
# NOTE(kzawora) multi-HPU inference with HPUGraphs (lazy-only)
# requires enabling lazy collectives
# see https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_HPU_Graphs.html # noqa: E501
os.environ['PT_HPU_ENABLE_LAZY_COLLECTIVES'] = 'true'
plugins = load_plugins_by_group(group=DEFAULT_PLUGINS_GROUP)
# general plugins, we only need to execute the loaded functions
for func in plugins.values():
func()

View File

@@ -0,0 +1,15 @@
# LoRA Resolver Plugins
This directory contains vLLM general plugins for dynamically discovering and loading LoRA adapters
via the LoRAResolver plugin framework.
Note that `VLLM_ALLOW_RUNTIME_LORA_UPDATING` must be set to true to allow LoRA resolver plugins
to work, and `VLLM_PLUGINS` must be set to include the desired resolver plugins.
# lora_filesystem_resolver
This LoRA Resolver is installed with vLLM by default.
To use, set `VLLM_PLUGIN_LORA_CACHE_DIR` to a local directory. When vLLM receives a request
for a LoRA adapter `foobar` it doesn't currently recognize, it will look in that local directory
for a subdirectory `foobar` containing a LoRA adapter. If such an adapter exists, it will
load that adapter, and then service the request as normal. That adapter will then be available
for future requests as normal.

View File

View File

@@ -0,0 +1,50 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
import os
from typing import Optional
import vllm.envs as envs
from vllm.lora.request import LoRARequest
from vllm.lora.resolver import LoRAResolver, LoRAResolverRegistry
class FilesystemResolver(LoRAResolver):
def __init__(self, lora_cache_dir: str):
self.lora_cache_dir = lora_cache_dir
async def resolve_lora(self, base_model_name: str,
lora_name: str) -> Optional[LoRARequest]:
lora_path = os.path.join(self.lora_cache_dir, lora_name)
if os.path.exists(lora_path):
adapter_config_path = os.path.join(self.lora_cache_dir, lora_name,
"adapter_config.json")
if os.path.exists(adapter_config_path):
with open(adapter_config_path) as file:
adapter_config = json.load(file)
if adapter_config["peft_type"] == "LORA" and adapter_config[
"base_model_name_or_path"] == base_model_name:
lora_request = LoRARequest(lora_name=lora_name,
lora_int_id=abs(
hash(lora_name)),
lora_path=lora_path)
return lora_request
return None
def register_filesystem_resolver():
"""Register the filesystem LoRA Resolver with vLLM"""
lora_cache_dir = envs.VLLM_LORA_RESOLVER_CACHE_DIR
if lora_cache_dir:
if not os.path.exists(lora_cache_dir) or not os.path.isdir(
lora_cache_dir):
raise ValueError(
"VLLM_LORA_RESOLVER_CACHE_DIR must be set to a valid directory \
for Filesystem Resolver plugin to function")
fs_resolver = FilesystemResolver(lora_cache_dir)
LoRAResolverRegistry.register_resolver("Filesystem Resolver",
fs_resolver)
return