371 lines
14 KiB
Python
371 lines
14 KiB
Python
# Copyright 2023-2024 SGLang Team
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# ==============================================================================
|
|
"""A tensor parallel worker."""
|
|
from __future__ import annotations
|
|
|
|
import logging
|
|
import threading
|
|
from typing import TYPE_CHECKING, Optional, Tuple, Union
|
|
|
|
import torch
|
|
|
|
from sglang.srt.configs.model_config import ModelConfig
|
|
from sglang.srt.distributed import get_pp_group, get_world_group
|
|
from sglang.srt.hf_transformers_utils import (
|
|
get_processor,
|
|
get_tokenizer,
|
|
get_tokenizer_from_processor,
|
|
)
|
|
from sglang.srt.layers.logits_processor import LogitsProcessorOutput
|
|
from sglang.srt.managers.io_struct import (
|
|
DestroyWeightsUpdateGroupReqInput,
|
|
GetWeightsByNameReqInput,
|
|
InitWeightsSendGroupForRemoteInstanceReqInput,
|
|
InitWeightsUpdateGroupReqInput,
|
|
LoadLoRAAdapterReqInput,
|
|
SendWeightsToRemoteInstanceReqInput,
|
|
UnloadLoRAAdapterReqInput,
|
|
UpdateWeightFromDiskReqInput,
|
|
UpdateWeightsFromDistributedReqInput,
|
|
UpdateWeightsFromTensorReqInput,
|
|
)
|
|
from sglang.srt.managers.schedule_batch import ModelWorkerBatch, global_server_args_dict
|
|
from sglang.srt.mem_cache.allocator import BaseTokenToKVPoolAllocator
|
|
from sglang.srt.mem_cache.memory_pool import ReqToTokenPool
|
|
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, PPProxyTensors
|
|
from sglang.srt.model_executor.model_runner import ModelRunner
|
|
from sglang.srt.patch_torch import monkey_patch_torch_reductions
|
|
from sglang.srt.server_args import ServerArgs
|
|
from sglang.srt.utils import MultiprocessingSerializer, broadcast_pyobj, set_random_seed
|
|
|
|
if TYPE_CHECKING:
|
|
from sglang.srt.managers.cache_controller import LayerDoneCounter
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class TpModelWorker:
|
|
"""A tensor parallel model worker."""
|
|
|
|
def __init__(
|
|
self,
|
|
server_args: ServerArgs,
|
|
gpu_id: int,
|
|
tp_rank: int,
|
|
moe_ep_rank: int,
|
|
pp_rank: int,
|
|
dp_rank: Optional[int],
|
|
nccl_port: int,
|
|
is_draft_worker: bool = False,
|
|
req_to_token_pool: Optional[ReqToTokenPool] = None,
|
|
token_to_kv_pool_allocator: Optional[BaseTokenToKVPoolAllocator] = None,
|
|
):
|
|
# Parse args
|
|
self.tp_size = server_args.tp_size
|
|
self.tp_rank = tp_rank
|
|
self.moe_ep_rank = moe_ep_rank
|
|
self.pp_rank = pp_rank
|
|
|
|
# Init model and tokenizer
|
|
self.model_config = ModelConfig.from_server_args(
|
|
server_args,
|
|
model_path=(
|
|
server_args.model_path
|
|
if not is_draft_worker
|
|
else server_args.speculative_draft_model_path
|
|
),
|
|
model_revision=(
|
|
server_args.revision
|
|
if not is_draft_worker
|
|
else server_args.speculative_draft_model_revision
|
|
),
|
|
is_draft_model=is_draft_worker,
|
|
)
|
|
|
|
self.model_runner = ModelRunner(
|
|
model_config=self.model_config,
|
|
mem_fraction_static=server_args.mem_fraction_static,
|
|
gpu_id=gpu_id,
|
|
tp_rank=tp_rank,
|
|
tp_size=server_args.tp_size,
|
|
moe_ep_rank=moe_ep_rank,
|
|
moe_ep_size=server_args.ep_size,
|
|
pp_rank=pp_rank,
|
|
pp_size=server_args.pp_size,
|
|
nccl_port=nccl_port,
|
|
dp_rank=dp_rank,
|
|
server_args=server_args,
|
|
is_draft_worker=is_draft_worker,
|
|
req_to_token_pool=req_to_token_pool,
|
|
token_to_kv_pool_allocator=token_to_kv_pool_allocator,
|
|
)
|
|
if server_args.skip_tokenizer_init:
|
|
self.tokenizer = self.processor = None
|
|
else:
|
|
if self.model_config.is_multimodal:
|
|
self.processor = get_processor(
|
|
server_args.tokenizer_path,
|
|
tokenizer_mode=server_args.tokenizer_mode,
|
|
trust_remote_code=server_args.trust_remote_code,
|
|
revision=server_args.revision,
|
|
)
|
|
self.tokenizer = get_tokenizer_from_processor(self.processor)
|
|
else:
|
|
self.tokenizer = get_tokenizer(
|
|
server_args.tokenizer_path,
|
|
tokenizer_mode=server_args.tokenizer_mode,
|
|
trust_remote_code=server_args.trust_remote_code,
|
|
revision=server_args.revision,
|
|
)
|
|
self.device = self.model_runner.device
|
|
|
|
# Init nccl groups
|
|
self.pp_group = get_pp_group()
|
|
self.world_group = get_world_group()
|
|
|
|
# Profile number of tokens
|
|
self.max_total_num_tokens = self.model_runner.max_total_num_tokens
|
|
self.max_prefill_tokens = server_args.max_prefill_tokens
|
|
self.max_running_requests = min(
|
|
(
|
|
self.max_total_num_tokens // 2
|
|
if server_args.max_running_requests is None
|
|
else server_args.max_running_requests
|
|
// (server_args.dp_size if server_args.enable_dp_attention else 1)
|
|
),
|
|
self.model_runner.req_to_token_pool.size,
|
|
)
|
|
assert self.max_running_requests > 0, "max_running_request is zero"
|
|
self.max_queued_requests = server_args.max_queued_requests
|
|
assert (
|
|
self.max_queued_requests is None or self.max_queued_requests >= 1
|
|
), "If configured, max_queued_requests must be at least 1 for any work to be scheduled."
|
|
self.max_req_len = min(
|
|
self.model_config.context_len - 1,
|
|
self.max_total_num_tokens - 1,
|
|
)
|
|
self.max_req_input_len = self.max_req_len - 5
|
|
assert (
|
|
self.max_req_len > 0 and self.max_req_input_len > 0
|
|
), "Memory pool size is too small"
|
|
|
|
# Sync random seed across TP workers
|
|
self.random_seed = broadcast_pyobj(
|
|
[server_args.random_seed],
|
|
self.tp_size * self.pp_rank + tp_rank,
|
|
self.world_group.cpu_group,
|
|
src=self.world_group.ranks[0],
|
|
)[0]
|
|
set_random_seed(self.random_seed)
|
|
|
|
# A reference make this class has the same member as TpModelWorkerClient
|
|
self.worker = self
|
|
|
|
self.hicache_layer_transfer_counter = None
|
|
|
|
def register_hicache_layer_transfer_counter(self, counter: LayerDoneCounter):
|
|
self.hicache_layer_transfer_counter = counter
|
|
|
|
def set_hicache_consumer(self, consumer_index: int):
|
|
if self.hicache_layer_transfer_counter is not None:
|
|
self.hicache_layer_transfer_counter.set_consumer(consumer_index)
|
|
|
|
def get_worker_info(self):
|
|
return (
|
|
self.max_total_num_tokens,
|
|
self.max_prefill_tokens,
|
|
self.max_running_requests,
|
|
self.max_queued_requests,
|
|
self.max_req_len,
|
|
self.max_req_input_len,
|
|
self.random_seed,
|
|
self.device,
|
|
global_server_args_dict,
|
|
self.model_runner.req_to_token_pool.size,
|
|
self.model_runner.req_to_token_pool.max_context_len,
|
|
self.model_runner.token_to_kv_pool.size,
|
|
)
|
|
|
|
@property
|
|
def sliding_window_size(self) -> Optional[int]:
|
|
return self.model_runner.sliding_window_size
|
|
|
|
@property
|
|
def is_hybrid(self) -> bool:
|
|
return self.model_runner.is_hybrid is not None
|
|
|
|
def get_tokens_per_layer_info(self):
|
|
return (
|
|
self.model_runner.full_max_total_num_tokens,
|
|
self.model_runner.swa_max_total_num_tokens,
|
|
)
|
|
|
|
def get_pad_input_ids_func(self):
|
|
return getattr(self.model_runner.model, "pad_input_ids", None)
|
|
|
|
def get_tp_group(self):
|
|
return self.model_runner.tp_group
|
|
|
|
def get_attention_tp_group(self):
|
|
return self.model_runner.attention_tp_group
|
|
|
|
def get_attention_tp_cpu_group(self):
|
|
return getattr(self.model_runner.attention_tp_group, "cpu_group", None)
|
|
|
|
def get_memory_pool(self):
|
|
return (
|
|
self.model_runner.req_to_token_pool,
|
|
self.model_runner.token_to_kv_pool_allocator,
|
|
)
|
|
|
|
def forward_batch_generation(
|
|
self,
|
|
model_worker_batch: ModelWorkerBatch,
|
|
launch_done: Optional[threading.Event] = None,
|
|
skip_sample: bool = False,
|
|
) -> Tuple[
|
|
Union[LogitsProcessorOutput, torch.Tensor], Optional[torch.Tensor], bool
|
|
]:
|
|
# update the consumer index of hicache to the running batch
|
|
self.set_hicache_consumer(model_worker_batch.hicache_consumer_index)
|
|
|
|
forward_batch = ForwardBatch.init_new(model_worker_batch, self.model_runner)
|
|
|
|
pp_proxy_tensors = None
|
|
if not self.pp_group.is_first_rank:
|
|
pp_proxy_tensors = PPProxyTensors(
|
|
self.pp_group.recv_tensor_dict(
|
|
all_gather_group=self.get_attention_tp_group()
|
|
)
|
|
)
|
|
|
|
if self.pp_group.is_last_rank:
|
|
logits_output, can_run_cuda_graph = self.model_runner.forward(
|
|
forward_batch, pp_proxy_tensors=pp_proxy_tensors
|
|
)
|
|
if launch_done is not None:
|
|
launch_done.set()
|
|
|
|
if skip_sample:
|
|
next_token_ids = None
|
|
# For prefill-only requests, we still need to compute logprobs even when sampling is skipped
|
|
if (
|
|
model_worker_batch.is_prefill_only
|
|
and model_worker_batch.return_logprob
|
|
):
|
|
# Compute logprobs without full sampling
|
|
self.model_runner.compute_logprobs_only(
|
|
logits_output, model_worker_batch
|
|
)
|
|
else:
|
|
next_token_ids = self.model_runner.sample(logits_output, forward_batch)
|
|
|
|
return logits_output, next_token_ids, can_run_cuda_graph
|
|
else:
|
|
pp_proxy_tensors, can_run_cuda_graph = self.model_runner.forward(
|
|
forward_batch,
|
|
pp_proxy_tensors=pp_proxy_tensors,
|
|
)
|
|
return pp_proxy_tensors.tensors, None, can_run_cuda_graph
|
|
|
|
def forward_batch_embedding(self, model_worker_batch: ModelWorkerBatch):
|
|
forward_batch = ForwardBatch.init_new(model_worker_batch, self.model_runner)
|
|
logits_output, _ = self.model_runner.forward(forward_batch)
|
|
embeddings = logits_output.embeddings
|
|
return embeddings
|
|
|
|
def update_weights_from_disk(self, recv_req: UpdateWeightFromDiskReqInput):
|
|
success, message = self.model_runner.update_weights_from_disk(
|
|
recv_req.model_path, recv_req.load_format
|
|
)
|
|
return success, message
|
|
|
|
def init_weights_update_group(self, recv_req: InitWeightsUpdateGroupReqInput):
|
|
success, message = self.model_runner.init_weights_update_group(
|
|
recv_req.master_address,
|
|
recv_req.master_port,
|
|
recv_req.rank_offset,
|
|
recv_req.world_size,
|
|
recv_req.group_name,
|
|
recv_req.backend,
|
|
)
|
|
return success, message
|
|
|
|
def destroy_weights_update_group(self, recv_req: DestroyWeightsUpdateGroupReqInput):
|
|
success, message = self.model_runner.destroy_weights_update_group(
|
|
recv_req.group_name,
|
|
)
|
|
return success, message
|
|
|
|
def init_weights_send_group_for_remote_instance(
|
|
self, recv_req: InitWeightsSendGroupForRemoteInstanceReqInput
|
|
):
|
|
success, message = (
|
|
self.model_runner.init_weights_send_group_for_remote_instance(
|
|
recv_req.master_address,
|
|
recv_req.ports,
|
|
recv_req.group_rank,
|
|
recv_req.world_size,
|
|
recv_req.group_name,
|
|
recv_req.backend,
|
|
)
|
|
)
|
|
return success, message
|
|
|
|
def send_weights_to_remote_instance(
|
|
self, recv_req: SendWeightsToRemoteInstanceReqInput
|
|
):
|
|
success, message = self.model_runner.send_weights_to_remote_instance(
|
|
recv_req.master_address,
|
|
recv_req.ports,
|
|
recv_req.group_name,
|
|
)
|
|
return success, message
|
|
|
|
def update_weights_from_distributed(
|
|
self, recv_req: UpdateWeightsFromDistributedReqInput
|
|
):
|
|
success, message = self.model_runner.update_weights_from_distributed(
|
|
recv_req.names, recv_req.dtypes, recv_req.shapes, recv_req.group_name
|
|
)
|
|
return success, message
|
|
|
|
def update_weights_from_tensor(self, recv_req: UpdateWeightsFromTensorReqInput):
|
|
|
|
monkey_patch_torch_reductions()
|
|
success, message = self.model_runner.update_weights_from_tensor(
|
|
named_tensors=MultiprocessingSerializer.deserialize(
|
|
recv_req.serialized_named_tensors[self.tp_rank]
|
|
),
|
|
load_format=recv_req.load_format,
|
|
)
|
|
return success, message
|
|
|
|
def get_weights_by_name(self, recv_req: GetWeightsByNameReqInput):
|
|
parameter = self.model_runner.get_weights_by_name(
|
|
recv_req.name, recv_req.truncate_size
|
|
)
|
|
return parameter
|
|
|
|
def load_lora_adapter(self, recv_req: LoadLoRAAdapterReqInput):
|
|
result = self.model_runner.load_lora_adapter(recv_req.to_ref())
|
|
return result
|
|
|
|
def unload_lora_adapter(self, recv_req: UnloadLoRAAdapterReqInput):
|
|
result = self.model_runner.unload_lora_adapter(recv_req.to_ref())
|
|
return result
|
|
|
|
def can_run_lora_batch(self, lora_ids: list[str]) -> bool:
|
|
return self.model_runner.lora_manager.validate_lora_batch(lora_ids)
|