[Model] Support DeepSeek-V4

This commit is contained in:
chenxb002
2026-04-24 09:50:34 +08:00
commit b9925203b8
172 changed files with 44780 additions and 0 deletions

View File

@@ -0,0 +1,3 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM-MLU project

View File

@@ -0,0 +1,112 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM-MLU project
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import torch
from vllm.distributed import get_dcp_group
from vllm.logger import init_logger
from vllm.v1.worker.block_table import BlockTable
from vllm_mlu.mlu_hijack_utils import MluHijackObject
logger = init_logger(__name__)
class BlockTable_MluHijack(BlockTable):
def __init__(
self,
block_size: int,
max_num_reqs: int,
max_num_blocks_per_req: int,
max_num_batched_tokens: int,
pin_memory: bool,
device: torch.device,
kernel_block_size: int,
dcp_kv_cache_interleave_size: int,
):
"""
Args:
block_size: Block size used for KV cache memory allocation
max_num_reqs: Maximum number of concurrent requests supported.
max_num_blocks_per_req: Maximum number of blocks per request.
max_num_batched_tokens: Maximum number of tokens in a batch.
pin_memory: Whether to pin memory for faster GPU transfers.
device: Target device for the block table.
kernel_block_size: The block_size of underlying attention kernel.
Will be the same as `block_size` if `block_size` is supported
by the attention kernel.
"""
self.max_num_reqs = max_num_reqs
self.max_num_batched_tokens = max_num_batched_tokens
self.pin_memory = pin_memory
self.device = device
if kernel_block_size == block_size:
# Standard case: allocation and computation use same block size
# No block splitting needed, direct mapping
self.block_size = block_size
self.blocks_per_kv_block = 1
self.use_hybrid_blocks = False
else:
# Hybrid case: allocation block size differs from kernel block size
# Memory blocks are subdivided to match kernel requirements
# Example: 32-token memory blocks with 16-token kernel blocks
# → Each memory block corresponds to 2 kernel blocks
if block_size % kernel_block_size != 0:
raise ValueError(
f"kernel_block_size {kernel_block_size} must divide "
f"kv_manager_block_size size {block_size} evenly"
)
self.block_size = kernel_block_size
self.blocks_per_kv_block = block_size // kernel_block_size
self.use_hybrid_blocks = True
self.max_num_blocks_per_req = max_num_blocks_per_req * self.blocks_per_kv_block
self.block_table = self._make_buffer(
self.max_num_reqs, self.max_num_blocks_per_req, dtype=torch.int32
)
self.num_blocks_per_row = np.zeros(max_num_reqs, dtype=np.int32)
'''
=============================
Modify by vllm_mlu
=============================
@brief: change slot_mapping dtype for int64 to int32
'''
self.slot_mapping = self._make_buffer(
self.max_num_batched_tokens, dtype=torch.int32
)
'''
==================
End of MLU Hijack
==================
'''
if self.use_hybrid_blocks:
self._kernel_block_arange = np.arange(0, self.blocks_per_kv_block).reshape(
1, -1
)
else:
self._kernel_block_arange = None
try:
self.dcp_world_size = get_dcp_group().world_size
self.dcp_rank = get_dcp_group().rank_in_group
except AssertionError:
# DCP might not be initialized in testing
self.dcp_world_size = 1
self.dcp_rank = 0
self.dcp_kv_cache_interleave_size = dcp_kv_cache_interleave_size
MluHijackObject.apply_hijack(
BlockTable,
BlockTable.__init__,
BlockTable_MluHijack.__init__
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,25 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM-MLU project
from vllm.v1.worker.gpu_input_batch import InputBatch
from vllm_mlu.mlu_hijack_utils import MluHijackObject
def split_decodes_and_prefills(self):
decodes = 0
prefills = 0
for i, req_id in enumerate(self.req_ids):
req_index = self.req_id_to_index.get(req_id)
num_prompt_tokens = self.num_prompt_tokens[req_index]
num_computed_tokens = self.num_computed_tokens_cpu[req_index]
if num_computed_tokens < num_prompt_tokens:
prefills += 1
else:
decodes += 1
return decodes, prefills
MluHijackObject.apply_hijack(InputBatch,
"split_decodes_and_prefills",
split_decodes_and_prefills)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,638 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM-MLU project
# SPDX-License-Identifier: Apache-2.0
"""A GPU worker class."""
import copy
import gc
import os
from contextlib import AbstractContextManager, nullcontext
from types import NoneType
from typing import TYPE_CHECKING, Optional
import torch
import torch.distributed
import vllm.envs as envs
from vllm.config import VllmConfig
from vllm.distributed.parallel_state import get_tp_group, get_pp_group
from vllm.distributed.kv_transfer import (ensure_kv_transfer_initialized,
has_kv_transfer_group)
from vllm.logger import init_logger
from vllm.model_executor import set_random_seed
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from vllm.v1.worker.utils import is_residual_scattered_for_sp
from vllm.v1.worker.worker_base import WorkerBase
from vllm.v1.outputs import EMPTY_MODEL_RUNNER_OUTPUT, ModelRunnerOutput
from vllm.v1.utils import report_usage_stats
from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType
from vllm.v1.worker.gpu_worker import Worker, init_worker_distributed_environment
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.utils.mem_constants import GiB_bytes
if TYPE_CHECKING:
from vllm.v1.core.sched.output import SchedulerOutput
from vllm_mlu.model_executor.warmup.kernel_warmup import kernel_warmup
from vllm_mlu.profiler.mlu_profiler import MluProfilerWrapper
from vllm_mlu.utils import MemorySnapshot, memory_profiling
from vllm_mlu._mlu_utils import VLLM_DUMP_MLU_INFO_EN
from vllm_mlu.device_allocator.cnmem import CnMemAllocator
from vllm_mlu.v1.worker.mlu_quant import MLUWorkerQuant
from vllm_mlu.v1.worker.gpu_model_runner import MLUModelRunner
from vllm_mlu.v1.worker.dp_gpu_model_runner import DPMLUModelRunner
logger = init_logger(__name__)
class MLUWorker(Worker, MLUWorkerQuant):
def __init__(
self,
vllm_config: VllmConfig,
local_rank: int,
rank: int,
distributed_init_method: str,
is_driver_worker: bool = False,
):
WorkerBase.__init__(self, vllm_config=vllm_config,
local_rank=local_rank,
rank=rank,
distributed_init_method=distributed_init_method,
is_driver_worker=is_driver_worker)
if self.model_config.trust_remote_code:
# note: lazy import to avoid importing torch before initializing
from vllm.utils.import_utils import init_cached_hf_modules
init_cached_hf_modules()
# Buffers saved before sleep
self._sleep_saved_buffers: dict[str, torch.Tensor] = {}
# Torch profiler. Enabled and configured through env vars:
# VLLM_TORCH_PROFILER_DIR=/path/to/save/trace
if envs.VLLM_TORCH_PROFILER_DIR:
torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR
worker_name = f"{vllm_config.instance_id}-rank-{self.rank}"
logger.info(
"Profiling enabled. Traces will be saved to: %s",
torch_profiler_trace_dir,
)
logger.debug(
"Profiler config: record_shapes=%s,"
"profile_memory=%s,with_stack=%s,with_flops=%s",
envs.VLLM_TORCH_PROFILER_RECORD_SHAPES,
envs.VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY,
envs.VLLM_TORCH_PROFILER_WITH_STACK,
envs.VLLM_TORCH_PROFILER_WITH_FLOPS,
)
self.profiler = torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.MLU,
],
record_shapes=envs.VLLM_TORCH_PROFILER_RECORD_SHAPES,
profile_memory=envs.VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY,
with_stack=envs.VLLM_TORCH_PROFILER_WITH_STACK,
with_flops=envs.VLLM_TORCH_PROFILER_WITH_FLOPS,
on_trace_ready=torch.profiler.tensorboard_trace_handler(
torch_profiler_trace_dir, worker_name=worker_name, use_gzip=True
),
)
elif envs.VLLM_TORCH_CUDA_PROFILE:
self.profiler = MluProfilerWrapper()
else:
self.profiler = None
def sleep(self, level: int = 1) -> None:
free_bytes_before_sleep = torch.mlu.mem_get_info()[0]
# Save the buffers before level 2 sleep
if level == 2:
model = self.model_runner.model
self._sleep_saved_buffers = {
name: buffer.cpu().clone() for name, buffer in model.named_buffers()
}
allocator = CnMemAllocator.get_instance()
allocator.sleep(offload_tags=("weights", ) if level == 1 else tuple())
free_bytes_after_sleep, total = torch.mlu.mem_get_info()
freed_bytes = free_bytes_after_sleep - free_bytes_before_sleep
used_bytes = total - free_bytes_after_sleep
assert freed_bytes >= 0, "Memory usage increased after sleeping."
logger.info(
"Sleep mode freed %.2f GiB memory, "
"%.2f GiB memory is still in use.", freed_bytes / GiB_bytes,
used_bytes / GiB_bytes)
def wake_up(self, tags: Optional[list[str]] = None) -> None:
allocator = CnMemAllocator.get_instance()
allocator.wake_up(tags)
# Restore the buffers after level 2 sleep
if len(self._sleep_saved_buffers):
model = self.model_runner.model
for name, buffer in model.named_buffers():
if name in self._sleep_saved_buffers:
buffer.data.copy_(self._sleep_saved_buffers[name].data)
self._sleep_saved_buffers = {}
def _maybe_get_memory_pool_context(self, tag: str) -> AbstractContextManager:
if self.vllm_config.model_config.enable_sleep_mode:
allocator = CnMemAllocator.get_instance()
if tag == "weights":
assert allocator.get_current_usage() == 0, (
"Sleep mode can only be used for one instance per process."
)
context = allocator.use_memory_pool(tag=tag)
else:
context = nullcontext()
return context
def init_device(self):
if self.device_config.device.type == "mlu":
# This env var set by Ray causes exceptions with graph building.
os.environ.pop("CNCL_ASYNC_ERROR_HANDLING", None)
# if (
# self.parallel_config.data_parallel_size > 1
# and self.parallel_config.data_parallel_size_local > 0
# and self.parallel_config.distributed_executor_backend
# not in ["ray", "external_launcher"]
# and self.vllm_config.parallel_config.data_parallel_backend != "ray"
# ):
# # Use local DP rank if available, otherwise use global DP rank.
# dp_local_rank = self.parallel_config.data_parallel_rank_local
# if dp_local_rank is None:
# dp_local_rank = self.parallel_config.data_parallel_rank
# tp_pp_world_size = (
# self.parallel_config.pipeline_parallel_size
# * self.parallel_config.tensor_parallel_size
# )
# # DP_LOCAL_RANK * TP_PP_WORLD_SIZE + TP_LOCAL_RANK
# self.local_rank += dp_local_rank * tp_pp_world_size
# assert self.local_rank < torch.mlu.device_count(), (
# f"DP adjusted local rank {self.local_rank} is out of bounds. "
# )
self.device = torch.device(f"mlu:{self.local_rank}")
current_platform.set_device(self.device)
current_platform.check_if_supports_dtype(self.model_config.dtype)
# Initialize the distributed environment BEFORE taking
# memory snapshot
# This ensures NCCL buffers are allocated before we measure
# available memory
init_worker_distributed_environment(
self.vllm_config,
self.rank,
self.distributed_init_method,
self.local_rank,
current_platform.dist_backend,
)
# Set random seed.
set_random_seed(self.model_config.seed)
gc.collect()
torch.mlu.empty_cache()
# take current memory snapshot
self.init_snapshot = MemorySnapshot()
self.requested_memory = (
self.init_snapshot.total_memory
* self.cache_config.gpu_memory_utilization
)
if self.init_snapshot.free_memory < self.requested_memory:
GiB = lambda b: round(b / GiB_bytes, 2)
raise ValueError(
f"Free memory on device "
f"({GiB(self.init_snapshot.free_memory)}/"
f"{GiB(self.init_snapshot.total_memory)} GiB) on startup "
f"is less than desired GPU memory utilization "
f"({self.cache_config.gpu_memory_utilization}, "
f"{GiB(self.requested_memory)} GiB). Decrease GPU memory "
f"utilization or reduce GPU memory used by other processes."
)
else:
raise RuntimeError(f"Not support device type: {self.device_config.device}")
# Construct the model runner
model_runner_cls = (DPMLUModelRunner
if self._enable_moe_dp_opt() else MLUModelRunner)
self.model_runner: MLUModelRunner = model_runner_cls(
self.vllm_config, self.device)
if self.rank == 0:
# If usage stat is enabled, collect relevant info.
report_usage_stats(self.vllm_config)
@torch.inference_mode()
def determine_available_memory(self) -> int:
"""Profiles the peak memory usage of the model to determine how much
memory can be used for KV cache without OOMs.
The engine will first conduct a profiling of the existing memory usage.
Then, it calculate the free memory that can be used for KV cache in
bytes.
Tip:
You may limit the usage of GPU memory
by adjusting the `gpu_memory_utilization` parameter.
"""
GiB = lambda b: b / GiB_bytes
if kv_cache_memory_bytes := self.cache_config.kv_cache_memory_bytes:
# still need a profile run which compiles the model for
# max_num_batched_tokens
self.model_runner.profile_run()
msg = (
f"Initial free memory {GiB(self.init_snapshot.free_memory):.2f} "
f"GiB, reserved {GiB(kv_cache_memory_bytes):.2f} GiB memory for "
"KV Cache as specified by kv_cache_memory_bytes config and "
"skipped memory profiling. This does not respect the "
"gpu_memory_utilization config. Only use kv_cache_memory_bytes "
"config when you want manual control of KV cache memory "
"size. If OOM'ed, check the difference of initial free "
"memory between the current run and the previous run "
"where kv_cache_memory_bytes is suggested and update it "
"correspondingly."
)
logger.info(msg)
return kv_cache_memory_bytes
torch.mlu.empty_cache()
torch.mlu.reset_peak_memory_stats()
# Execute a forward pass with dummy inputs to profile the memory usage
# of the model.
with memory_profiling(
self.init_snapshot,
weights_memory=int(self.model_runner.model_memory_usage),
) as profile_result:
self.model_runner.profile_run()
self.non_torch_memory = profile_result.non_torch_increase
self.peak_activation_memory = profile_result.torch_peak_increase
free_gpu_memory = profile_result.after_profile.free_memory
GiB = lambda b: b / GiB_bytes
# Execute a forward pass with dummy inputs to profile the memory usage
# of the model.
with memory_profiling(
self.init_snapshot,
weights_memory=int(
self.model_runner.model_memory_usage)) as profile_result:
self.model_runner.profile_run()
free_gpu_memory = profile_result.after_profile.free_memory
# NOTE(woosuk): Here we assume that the other processes using the same
# GPU did not change their memory usage during the profiling.
assert self.init_snapshot.free_memory > free_gpu_memory, (
"Error in memory profiling. "
f"Initial free memory {GiB(self.init_snapshot.free_memory)} GiB, "
f"current free memory {GiB(free_gpu_memory)} GiB. "
"This happens when other processes sharing the same container "
"release GPU memory while vLLM is profiling during initialization. "
"To fix this, ensure consistent GPU memory allocation or "
"isolate vLLM in its own container."
)
self.available_kv_cache_memory_bytes = (
self.requested_memory - profile_result.non_kv_cache_memory
)
unrequested_memory = self.init_snapshot.free_memory - self.requested_memory
logger.debug(
"Initial free memory: %.2f GiB; Requested memory: %.2f (util), %.2f GiB",
GiB(self.init_snapshot.free_memory),
self.cache_config.gpu_memory_utilization,
GiB(self.requested_memory),
)
logger.debug(
"Free memory after profiling: %.2f GiB (total), "
"%.2f GiB (within requested)",
GiB(free_gpu_memory),
GiB(free_gpu_memory - unrequested_memory),
)
logger.debug(profile_result)
logger.info_once(
"Available KV cache memory: %.2f GiB",
GiB(self.available_kv_cache_memory_bytes),
scope="local",
)
gc.collect()
self.peak_memory = profile_result.non_kv_cache_memory
self.block_memory = self.available_kv_cache_memory_bytes
return int(self.available_kv_cache_memory_bytes)
def initialize_from_config(self, kv_cache_config: KVCacheConfig) -> None:
"""Allocate GPU KV cache with the specified kv_cache_config."""
# Init kv cache connector here, because it requires
# `kv_cache_config`.
# NOTE(Kuntai): This need to be done before `initialize_kv_cache`,
# because `initialize_kv_cache` will inject kv cache groups not
# related to kv cache connector (e.g. kv cache sharing layers).
ensure_kv_transfer_initialized(self.vllm_config, kv_cache_config)
if self.vllm_config.model_config.enable_sleep_mode:
allocator = CnMemAllocator.get_instance()
context = allocator.use_memory_pool(tag="kv_cache")
else:
context = nullcontext()
with context:
self.model_runner.initialize_kv_cache(kv_cache_config)
def compile_or_warm_up_model(self) -> None:
# warm up sizes that are not in cudagraph capture sizes,
# but users still want to compile for better performance,
# e.g. for the max-num-batched token size in chunked prefill.
warmup_sizes = self.vllm_config.compilation_config.compile_sizes.copy()
if not self.model_config.enforce_eager:
warmup_sizes = [
x for x in warmup_sizes
if x not in self.vllm_config.compilation_config.cudagraph_capture_sizes
]
# We skip EPLB here since we don't want to record dummy metrics
for size in sorted(warmup_sizes, reverse=True):
logger.info("Compile and warming up model for size %d", size)
self.model_runner._dummy_run(size, skip_eplb=True, remove_lora=False)
self.model_runner.maybe_remove_all_loras(self.model_runner.lora_config)
# Warmup and tune the kernels used during model execution before
# cuda graph capture.
kernel_warmup(self)
cuda_graph_memory_bytes = 0
if not self.model_config.enforce_eager:
cuda_graph_memory_bytes = self.model_runner.capture_model()
if self.cache_config.kv_cache_memory_bytes is None and hasattr(
self, "peak_activation_memory"
):
# Suggests optimal kv cache memory size if we rely on
# memory_profiling to guess the kv cache memory size which
# provides peak_activation_memory and a few other memory
# consumption. `memory_profiling` does not consider
# CUDAGraph memory size and may not utilize all gpu memory.
# Users may want fine-grained control to specify kv cache
# memory size.
GiB = lambda b: round(b / GiB_bytes, 2)
# empirically observed that the memory profiling may
# slightly underestimate the memory consumption.
# So leave a small buffer (=150MiB) to avoid OOM.
redundancy_buffer_memory = 150 * (1 << 20)
non_kv_cache_memory = (
self.model_runner.model_memory_usage
+ self.peak_activation_memory
+ self.non_torch_memory
+ cuda_graph_memory_bytes
)
kv_cache_memory_bytes_to_gpu_limit = (
self.init_snapshot.free_memory
- non_kv_cache_memory
- redundancy_buffer_memory
)
kv_cache_memory_bytes_to_requested_limit = (
int(self.requested_memory)
- non_kv_cache_memory
- redundancy_buffer_memory
)
msg = (
f"Free memory on device "
f"({GiB(self.init_snapshot.free_memory)}/"
f"{GiB(self.init_snapshot.total_memory)} GiB) on startup. "
f"Desired GPU memory utilization is "
f"({self.cache_config.gpu_memory_utilization}, "
f"{GiB(self.requested_memory)} GiB). "
f"Actual usage is {GiB(self.model_runner.model_memory_usage)} "
f"GiB for weight, {GiB(self.peak_activation_memory)} GiB "
f"for peak activation, {GiB(self.non_torch_memory)} GiB "
f"for non-torch memory, and {GiB(cuda_graph_memory_bytes)} "
f"GiB for CUDAGraph memory. Replace gpu_memory_utilization "
f"config with `--kv-cache-memory="
f"{kv_cache_memory_bytes_to_requested_limit}` "
f"({GiB(kv_cache_memory_bytes_to_requested_limit)} GiB) to fit "
f"into requested memory, or `--kv-cache-memory="
f"{kv_cache_memory_bytes_to_gpu_limit}` "
f"({GiB(kv_cache_memory_bytes_to_gpu_limit)} GiB) to fully "
f"utilize gpu memory. Current kv cache memory in use is "
f"{GiB(self.available_kv_cache_memory_bytes)} GiB."
)
logger.debug(msg)
# Warm up sampler and preallocate memory buffer for logits and other
# sampling related tensors of max possible shape to avoid memory
# fragmentation issue.
# NOTE: This is called after `capture_model` on purpose to prevent
# memory buffers from being cleared by `torch.cuda.empty_cache`.
if get_pp_group().is_last_rank:
max_num_reqs = min(
self.scheduler_config.max_num_seqs,
self.scheduler_config.max_num_batched_tokens,
)
# We skip EPLB here since we don't want to record dummy metrics
hidden_states, last_hidden_states = self.model_runner._dummy_run(
num_tokens=max_num_reqs,
skip_eplb=True,
)
if self.model_runner.is_pooling_model:
self.model_runner._dummy_pooler_run(hidden_states)
else:
self.model_runner._dummy_sampler_run(hidden_states=last_hidden_states)
# Reset the seed to ensure that the random state is not affected by
# the model initialization and profiling.
set_random_seed(self.model_config.seed)
@torch.inference_mode()
def execute_model(
self, scheduler_output: "SchedulerOutput",
) -> ModelRunnerOutput | None:
intermediate_tensors = None
forward_pass = scheduler_output.total_num_scheduled_tokens > 0
num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
num_input_tokens = self.model_runner._get_num_input_tokens(num_scheduled_tokens)
all_gather_tensors = {
"residual": not is_residual_scattered_for_sp(
self.vllm_config, num_input_tokens
)
}
if forward_pass and not get_pp_group().is_first_rank:
intermediate_tensors = IntermediateTensors(
get_pp_group().recv_tensor_dict(
all_gather_group=get_tp_group(),
all_gather_tensors=all_gather_tensors,
)
)
with self.annotate_profile(scheduler_output):
output = self.model_runner.execute_model(
scheduler_output, intermediate_tensors
)
if isinstance(output, (ModelRunnerOutput, NoneType)):
return output
assert isinstance(output, IntermediateTensors)
parallel_config = self.vllm_config.parallel_config
assert (
parallel_config.distributed_executor_backend != "external_launcher"
and not get_pp_group().is_last_rank
)
get_pp_group().send_tensor_dict(
output.tensors,
all_gather_group=get_tp_group(),
all_gather_tensors=all_gather_tensors,
)
return None
def _enable_moe_dp_opt(self):
'''
We will enable the MLU-optimized DP scheme for the specified MoE models,
otherwise the native DP implementation will be used.
'''
# case0 enable data parallel
enable_dp = self.parallel_config.data_parallel_size > 1
# case1 ds mla
is_ds_mla = self.model_config.is_deepseek_mla
# case2 qwen3 moe
is_supported_moe_model = hasattr(self.model_config.hf_text_config, "model_type") and \
self.model_config.hf_text_config.model_type in ('qwen3_moe', 'glm4_moe')
# case 3, private model
is_private_model = getattr(self.model_config.hf_config, "is_private", False)
return enable_dp and (is_ds_mla or is_supported_moe_model or is_private_model)
def execute_dummy_batch(self) -> None:
if self._enable_moe_dp_opt():
self.model_runner.moe_dp_execute_dummy_batch(1)
else:
self.model_runner._dummy_run(1, uniform_decode=True)
def response_remote_alloc_once(self) -> None:
self.model_runner.response_remote_alloc_once()
def _eplb_before_scale_down(self, old_ep_size: int, new_ep_size: int) -> None:
from vllm.distributed.parallel_state import get_ep_group
if get_ep_group().rank == 0:
logger.info(
"[Elastic EP] Starting expert resharding before scaling down..."
)
rank_mapping = {
old_ep_rank: old_ep_rank if old_ep_rank < new_ep_size else -1
for old_ep_rank in range(old_ep_size)
}
assert self.model_runner.eplb_state is not None
self.model_runner.eplb_state.rearrange(
execute_shuffle=True,
global_expert_load=None,
rank_mapping=rank_mapping,
)
torch.mlu.synchronize()
if get_ep_group().rank == 0:
logger.info("[Elastic EP] Expert resharding completed!")
def reinitialize_distributed(
self, reconfig_request: ReconfigureDistributedRequest
) -> None:
from vllm.config import set_current_vllm_config
from vllm.distributed.parallel_state import (
cleanup_dist_env_and_memory,
get_ep_group,
)
old_ep_size = get_ep_group().world_size
old_ep_rank = get_ep_group().rank
new_ep_size = (
reconfig_request.new_data_parallel_size
* get_tp_group().world_size
* get_pp_group().world_size
)
if new_ep_size < old_ep_size:
self._eplb_before_scale_down(old_ep_size, new_ep_size)
cleanup_dist_env_and_memory()
if (
reconfig_request.new_data_parallel_rank
== ReconfigureRankType.SHUTDOWN_CURRENT_RANK
):
assert old_ep_rank >= new_ep_size
# shutdown
return
self._reconfigure_parallel_config(reconfig_request)
with set_current_vllm_config(self.vllm_config):
init_worker_distributed_environment(
self.vllm_config,
self.rank,
self.distributed_init_method,
self.local_rank,
current_platform.dist_backend,
)
global_expert_loads = self._reconfigure_moe(old_ep_size, new_ep_size)
if new_ep_size > old_ep_size:
assert global_expert_loads is not None
self._eplb_after_scale_up(old_ep_size, new_ep_size, global_expert_loads)
def get_hfu_info(self, batch, input_len, output_len):
try:
self.model_runner.model.collect_hfu_io_effciency_info(batch, input_len, output_len)
if VLLM_DUMP_MLU_INFO_EN:
return self.model_runner.model.hfu_info, self.model_runner.model.io_efficiency
else:
return self.model_runner.model.flops_info, 0.0
except Exception as e:
raise RuntimeError(
"Model match failure when get HFU info, please check if an init method was registed."
)
def _get_latency(self, time_markers):
total_latency = 0
if not isinstance(time_markers, list):
time_markers = [time_markers]
for time_marker in time_markers:
start, end = time_marker
latency = start.elapsed_time(end)
total_latency += latency
return total_latency
def get_latency(self):
return self._get_latency(self.model_runner.time_markers)
def get_mm_encoder_latency(self):
if not hasattr(self.model_runner, "mm_time_markers"):
return None
mm_time_markers = self.model_runner.mm_time_markers
return None if len(mm_time_markers) == 0 else\
self._get_latency(mm_time_markers)
def get_memory_usage(self):
return (self.peak_memory, self.block_memory)
def recapture_model(self,
prefill_enable_mlugraph: bool,
batch_size: int,
input_len: int):
# Reset history capture context
self.model_runner.reset_capture_context(
prefill_enable_mlugraph, batch_size, input_len)
# Re-capture decode graph(full graph or peicewise graph)
self.compile_or_warm_up_model()

View File

@@ -0,0 +1,120 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM-MLU project
"""
Define KV connector functionality mixin for model runners.
"""
import copy
from collections.abc import Generator
from contextlib import AbstractContextManager, contextmanager, nullcontext
from typing import (
TYPE_CHECKING, # noqa: UP035
)
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer import (
ensure_kv_transfer_shutdown,
get_kv_transfer_group,
has_kv_transfer_group,
)
from vllm.distributed.kv_transfer.kv_connector.base import KVConnectorBase
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorStats
from vllm.forward_context import get_forward_context, set_forward_context
from vllm.logger import init_logger
from vllm.v1.outputs import (
EMPTY_MODEL_RUNNER_OUTPUT,
KVConnectorOutput,
ModelRunnerOutput,
)
from vllm.v1.worker.kv_connector_model_runner_mixin import KVConnectorModelRunnerMixin
if TYPE_CHECKING:
from vllm.v1.core.sched.output import SchedulerOutput
from vllm_mlu.mlu_hijack_utils import MluHijackObject
logger = init_logger(__name__)
# Defined as a kv connector functionality mixin for ModelRunner (GPU, TPU)
class KVConnectorModelRunnerMixin_MluHijack(KVConnectorModelRunnerMixin):
@staticmethod
def maybe_setup_kv_connector(scheduler_output: "SchedulerOutput"):
# Update KVConnector with the KVConnector metadata forward().
if has_kv_transfer_group():
kv_connector = get_kv_transfer_group()
assert isinstance(kv_connector, KVConnectorBase)
assert scheduler_output.kv_connector_metadata is not None
kv_connector.bind_connector_metadata(scheduler_output.kv_connector_metadata)
# Background KV cache transfers happen here.
# These transfers are designed to be async and the requests
# involved may be disjoint from the running requests.
# Do this here to save a collective_rpc.
kv_connector.start_load_kv(get_forward_context())
'''
=============================
Modify by vllm_mlu
=============================
@brief: supoort disagg for mlu.
'''
kv_connector.request_remote_memory_send()
'''
==================
End of MLU Hijack
==================
'''
# This context manager must be used within an active forward context.
# It encapsulates the entire KV connector lifecycle within execute_model
@staticmethod
@contextmanager
def _get_kv_connector_output(
scheduler_output: "SchedulerOutput", wait_for_save: bool = True
) -> Generator[KVConnectorOutput, None, None]:
output = KVConnectorOutput()
# Update KVConnector with the KVConnector metadata forward().
kv_connector = get_kv_transfer_group()
assert isinstance(kv_connector, KVConnectorBase)
assert scheduler_output.kv_connector_metadata is not None
kv_connector.bind_connector_metadata(scheduler_output.kv_connector_metadata)
# Background KV cache transfers happen here.
# These transfers are designed to be async and the requests
# involved may be disjoint from the running requests.
# Do this here to save a collective_rpc.
kv_connector.start_load_kv(get_forward_context())
'''
=============================
Modify by vllm_mlu
=============================
@brief: supoort disagg for mlu.
'''
kv_connector.request_remote_memory_send()
'''
==================
End of MLU Hijack
==================
'''
try:
yield output
finally:
output.finished_sending, output.finished_recving = (
kv_connector.get_finished(scheduler_output.finished_req_ids)
)
output.invalid_block_ids = kv_connector.get_block_ids_with_load_errors()
output.kv_connector_stats = (
KVConnectorModelRunnerMixin.get_kv_connector_stats()
)
MluHijackObject.apply_hijack(KVConnectorModelRunnerMixin,
KVConnectorModelRunnerMixin.maybe_setup_kv_connector,
KVConnectorModelRunnerMixin_MluHijack.maybe_setup_kv_connector)
MluHijackObject.apply_hijack(KVConnectorModelRunnerMixin,
KVConnectorModelRunnerMixin._get_kv_connector_output,
KVConnectorModelRunnerMixin_MluHijack._get_kv_connector_output)

View File

@@ -0,0 +1,33 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM-MLU project
from typing import List
from vllm.lora.request import LoRARequest
from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
from vllm_mlu.mlu_hijack_utils import MluHijackObject
def vllm_mlu__v1__worker__LoRAModelRunnerMixin__add_dummy_loras(self, num_loras: int) -> List[LoRARequest]:
assert num_loras > 0
assert self.lora_manager is not None
dummy_lora_requests: list[LoRARequest] = []
with self.lora_manager.dummy_lora_cache():
for idx in range(num_loras):
lora_id = idx + 1
dummy_lora_request = LoRARequest(
lora_name=f"capture_graph_{lora_id}",
lora_int_id=lora_id,
lora_path="/not/a/real/path",
)
self.lora_manager.add_dummy_lora(dummy_lora_request,
rank=self.LORA_WARMUP_RANK)
dummy_lora_requests.append(dummy_lora_request)
return dummy_lora_requests
MluHijackObject.apply_hijack(LoRAModelRunnerMixin,
"add_dummy_loras",
vllm_mlu__v1__worker__LoRAModelRunnerMixin__add_dummy_loras)

View File

@@ -0,0 +1,281 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM-MLU project
"""A MLU quant class."""
import functools
from collections import defaultdict
from typing import Dict, Any, List, Optional, Union
import numpy as np
import torch
import torch.distributed
from vllm.distributed import (
get_moe_tensor_parallel_rank, get_moe_tensor_parallel_world_size,
get_moe_expert_parallel_rank, get_moe_expert_parallel_world_size,
get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size)
import vllm.envs as envs
from vllm.utils.torch_utils import STR_DTYPE_TO_TORCH_DTYPE
from vllm.model_executor.layers.vocab_parallel_embedding import (VocabParallelEmbedding,
ParallelLMHead)
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear)
from vllm.model_executor.models.deepseek_v2 import DeepseekV2MLAAttention
from vllm_mlu.model_executor.layers.feed_forward import FeedForward
from vllm_mlu.model_executor.layers.sparse_moe_mlp import SparseMoeMlp
from vllm.logger import init_logger
logger = init_logger(__name__)
def default_act_range_value():
return {
"x": None,
"split": None,
"is_linear": False,
"is_qkv": False,
"q_proj_size": 0,
"num_kv_head_replicas": 1,
"is_merge": False,
"input_id": [],
"self_rank": 0,
"rank": None,
"tensor_rank": None,
"tp_world_size": None,
"moe_tp_rank": None,
"moe_tp_world_size": None,
"moe_ep_rank": None,
"moe_ep_world_size": None,
"weight": None,
}
def _str_to_torch_dtype(dtype: str) -> torch.dtype:
dtype = dtype.split(".")[-1]
# STR_DTYPE_TO_TORCH_DTYPE dict does not have float16 type
return STR_DTYPE_TO_TORCH_DTYPE[dtype] if dtype != "float16" else torch.float16
class ActRangeValue:
"""
ActRangeValue for v1 MsgpackEncoder and MsgpackDecoder. This is a *WorkAround*.
The decode tensor can be wrong if we pass act range dict directly.
NOTE: here, we transfer torch.Tensor to numpy ndarray because torch.Tensor
may cause core dump.
"""
def __init__(self):
self.layer_name: str = ""
self.x: Optional[np.ndarray] = None
self.split: str = None
self.is_linear: bool = False
self.is_qkv: bool = False
self.q_proj_size: int = 0
self.num_kv_head_replicas: int = 1
self.is_merge: bool = False
self.input_id_dtype: str = None
self.input_id: Optional[List[np.ndarray]] = []
self.self_rank: int = 0
self.rank: Optional[int] = None
self.tensor_rank: Optional[int] = None
self.tp_world_size: Optional[int] = None
self.moe_tp_rank: Optional[int] = None
self.moe_tp_world_size: Optional[int] = None
self.moe_ep_rank: Optional[int] = None
self.moe_ep_world_size: Optional[int] = None
self.weight: Optional[np.ndarray] = None
self.weight_dtype: str = None
@classmethod
def serial(cls, layer_name: str, act_range: Dict[str, Any]) -> 'ActRangeValue':
instance = cls()
instance.layer_name = layer_name
instance.x = act_range.get("x")
instance.split = act_range.get("split")
instance.is_linear = act_range.get("is_linear", False)
instance.is_qkv = act_range.get("is_qkv", False)
instance.q_proj_size = act_range.get("q_proj_size", 0)
instance.num_kv_head_replicas = act_range.get("num_kv_head_replicas", 1)
instance.is_merge = act_range.get("is_merge", False)
instance.input_id = act_range.get("input_id", [])
instance.self_rank = act_range.get("self_rank", 0)
instance.rank = act_range.get("rank")
instance.tensor_rank = act_range.get("tensor_rank")
instance.tp_world_size = act_range.get("tp_world_size")
instance.moe_tp_rank = act_range.get("moe_tp_rank")
instance.moe_tp_world_size = act_range.get("moe_tp_world_size")
instance.moe_ep_rank = act_range.get("moe_ep_rank")
instance.moe_ep_world_size = act_range.get("moe_ep_world_size")
instance.weight = act_range.get("weight")
if instance.x is not None:
instance.x = instance.x.numpy()
# input_id and weight are used for debug
if isinstance(instance.input_id, torch.Tensor):
instance.input_id_dtype = str(instance.input_id.dtype)
instance.input_id = instance.input_id.float().numpy()
else:
input_id_np = []
for input_id in instance.input_id:
instance.input_id_dtype = str(input_id.dtype)
input_id_np.append(input_id.float().numpy())
instance.input_id = input_id_np
if instance.weight is not None:
instance.weight_dtype = str(instance.weight.dtype)
instance.weight = instance.weight.float().numpy()
return instance
def deserial(self) -> Dict[str, Any]:
act_range = self.to_dict()
if self.x is not None:
act_range["x"] = torch.from_numpy(self.x)
if self.input_id is not None:
if isinstance(self.input_id, torch.Tensor):
act_range["input_id"] = torch.from_numpy(self.input_id).to(
_str_to_torch_dtype(self.input_id_dtype))
else:
input_id_tensor = []
for input_id in self.input_id:
input_id_tensor.append(torch.from_numpy(input_id).to(
_str_to_torch_dtype(self.input_id_dtype)))
act_range["input_id"] = input_id_tensor
if self.weight_dtype is not None:
act_range["weight"] = torch.from_numpy(self.weight).to(
_str_to_torch_dtype(self.weight_dtype))
return act_range
def to_dict(self) -> Dict[str, Any]:
return {
"x": self.x,
"split": self.split,
"is_linear": self.is_linear,
"is_qkv": self.is_qkv,
"q_proj_size": self.q_proj_size,
"num_kv_head_replicas": self.num_kv_head_replicas,
"is_merge": self.is_merge,
"input_id": self.input_id,
"self_rank": self.self_rank,
"rank": self.rank,
"tensor_rank": self.tensor_rank,
"tp_world_size": self.tp_world_size,
"moe_tp_rank": self.moe_tp_rank,
"moe_tp_world_size": self.moe_tp_world_size,
"moe_ep_rank": self.moe_ep_rank,
"moe_ep_world_size": self.moe_ep_world_size,
"weight": self.weight,
}
def __repr__(self) -> str:
return f"layer: {self.layer_name}, ActRangeValue({self.to_dict()})"
class MLUWorkerQuant(object):
'''
mlu quant
'''
def stat_tensor(self, name, tensor, act_range, key, dim):
logger.debug(f"name:{name}, key:{key}, dim:{dim}, tensor.shape:{tensor.shape}")
hidden_dim = tensor.shape[-1]
tensor = tensor.view(-1, hidden_dim).abs()
comming_max = torch.max(tensor, dim=dim)[0].float()
if act_range[name][key] is None:
act_range[name][key] = comming_max
else:
act_range[name][key] = torch.max(act_range[name][key], comming_max)
def stat_input_hook(self, m, x, y, name, act_range, is_linear, is_save_input_id):
if isinstance(x, tuple):
x = x[0]
if isinstance(y, tuple):
y = y[0]
logger.debug(f"name:{name}, x.shape:{x.shape}, y.shape:{y.shape}, m.weight.shape:{m.weight.shape}")
if is_linear:
self.stat_tensor(name, x, act_range, "x", 0)
if act_range[name]["is_qkv"] and is_save_input_id and ".0." in name:
x_cpu = x.clone().to("cpu")
act_range[name]["input_id"].append(x_cpu)
def setup_smooth_hook(self, is_save_input_id: bool = False, is_save_moe_info: bool = False):
models = [self.model_runner.model]
if hasattr(self.model_runner, "drafter") and self.model_runner.drafter is not None:
models += [self.model_runner.drafter.model]
self.act_range = defaultdict(default_act_range_value)
self.hooks = []
linear_class_list = (ColumnParallelLinear, MergedColumnParallelLinear, QKVParallelLinear, RowParallelLinear)
other_class_list = (VocabParallelEmbedding, ParallelLMHead)
class_list = linear_class_list + other_class_list
row_class_list = (RowParallelLinear)
for model in models:
for name, m in model.named_modules():
if isinstance(m, FeedForward):
m.use_bt_ffn = False
if isinstance(m, SparseMoeMlp):
m.is_use_fused_moe = False
if isinstance(m, DeepseekV2MLAAttention):
m.use_fused_mla_qkv = False
if isinstance(m, class_list):
is_linear = True if isinstance(m, linear_class_list) else False
split_type = "row" if isinstance(m, row_class_list) else "col"
self.act_range[name]["split"] = split_type
self.act_range[name]["is_linear"] = is_linear
if isinstance(m, QKVParallelLinear):
self.act_range[name]["is_qkv"] = True
self.act_range[name]["q_proj_size"] = m.num_heads * m.head_size
self.act_range[name]["num_kv_head_replicas"] = m.num_kv_head_replicas
self.act_range[name]["is_merge"] = isinstance(m, MergedColumnParallelLinear)
if is_save_moe_info:
self.act_range[name]["rank"] = torch.distributed.get_rank()
self.act_range[name]["tensor_rank"] = get_tensor_model_parallel_rank()
self.act_range[name]["tp_world_size"] = get_tensor_model_parallel_world_size()
self.act_range[name]["moe_tp_rank"] = get_moe_tensor_parallel_rank()
self.act_range[name]["moe_tp_world_size"] = get_moe_tensor_parallel_world_size()
self.act_range[name]["moe_ep_rank"] = get_moe_expert_parallel_rank()
self.act_range[name]["moe_ep_world_size"] = get_moe_expert_parallel_world_size()
if ".expert." in name:
self.act_range[name]["weight"] = m.weight
logger.info(f"rank:{self.rank}, add hook to {name}, is_linear:{is_linear}, split_type:{split_type}")
self.hooks.append(m.register_forward_hook(functools.partial(self.stat_input_hook,
name=name, act_range=self.act_range,
is_linear=is_linear,
is_save_input_id=is_save_input_id)))
def remove_hooks(self):
for h in self.hooks:
h.remove()
def get_act_range(self):
act_range = defaultdict(default_act_range_value)
for layer_name, layer_range in self.act_range.items():
for tensor_key, tensor_value in layer_range.items():
if isinstance(tensor_value, torch.Tensor):
act_range[layer_name][tensor_key] = tensor_value.to("cpu")
elif tensor_key == "input_id" and isinstance(tensor_value, list):
input_id_len = len(tensor_value)
for i in range(input_id_len):
if isinstance(tensor_value[i], torch.Tensor):
act_range[layer_name][tensor_key].append(tensor_value[i].to("cpu"))
else:
act_range[layer_name][tensor_key].append(tensor_value[i])
else:
act_range[layer_name][tensor_key] = tensor_value
serialization_result = []
for layer_name, layer_range in act_range.items():
serialization_result.append(ActRangeValue.serial(layer_name, layer_range))
return serialization_result
@torch.no_grad()
def get_named_parameters(self):
name_parameters = {}
for name, param in self.model_runner.model.named_parameters():
name_parameters[name] = param.to("cpu")
return name_parameters