Simplify sampler and its error handling (#1441)
This commit is contained in:
@@ -1,6 +1,5 @@
|
||||
import dataclasses
|
||||
import logging
|
||||
from typing import Tuple, Union
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
from flashinfer.sampling import (
|
||||
@@ -9,43 +8,17 @@ from flashinfer.sampling import (
|
||||
top_k_top_p_sampling_from_probs,
|
||||
top_p_renorm_prob,
|
||||
)
|
||||
from torch.library import custom_op as torch_custom_op
|
||||
from vllm.model_executor.custom_op import CustomOp
|
||||
from torch import nn
|
||||
|
||||
from sglang.srt.layers.logits_processor import LogitsProcessorOutput
|
||||
|
||||
# TODO: move this dict to another place
|
||||
from sglang.srt.managers.schedule_batch import global_server_args_dict
|
||||
from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class SampleOutput:
|
||||
success: torch.Tensor
|
||||
probs: torch.Tensor
|
||||
batch_next_token_ids: torch.Tensor
|
||||
|
||||
|
||||
class Sampler(CustomOp):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# FIXME: torch.multinomial has too many bugs
|
||||
self.forward_native = self.forward_cuda
|
||||
self.is_torch_compile = False
|
||||
|
||||
def _get_probs(self, logits: torch.Tensor, sampling_info: SamplingBatchInfo):
|
||||
# Post process logits
|
||||
logits = logits.contiguous()
|
||||
logits.div_(sampling_info.temperatures)
|
||||
if self.is_torch_compile:
|
||||
# FIXME: Temporary workaround for unknown bugs in torch.compile
|
||||
logits.add_(0)
|
||||
|
||||
return torch.softmax(logits, dim=-1)
|
||||
|
||||
def forward_cuda(
|
||||
class Sampler(nn.Module):
|
||||
def forward(
|
||||
self,
|
||||
logits: Union[torch.Tensor, LogitsProcessorOutput],
|
||||
sampling_info: SamplingBatchInfo,
|
||||
@@ -53,7 +26,15 @@ class Sampler(CustomOp):
|
||||
if isinstance(logits, LogitsProcessorOutput):
|
||||
logits = logits.next_token_logits
|
||||
|
||||
probs = self._get_probs(logits, sampling_info)
|
||||
# Post process logits
|
||||
logits.div_(sampling_info.temperatures)
|
||||
probs = logits[:] = torch.softmax(logits, dim=-1)
|
||||
|
||||
if torch.any(torch.isnan(probs)):
|
||||
logger.warning("Detected errors during sampling! NaN in the probability.")
|
||||
probs = torch.where(
|
||||
torch.isnan(probs), torch.full_like(probs, 1e-10), probs
|
||||
)
|
||||
|
||||
if global_server_args_dict["sampling_backend"] == "flashinfer":
|
||||
max_top_k_round, batch_size = 32, probs.shape[0]
|
||||
@@ -67,12 +48,16 @@ class Sampler(CustomOp):
|
||||
probs, uniform_samples, sampling_info.min_ps
|
||||
)
|
||||
else:
|
||||
batch_next_token_ids, success = flashinfer_top_k_top_p(
|
||||
batch_next_token_ids, success = top_k_top_p_sampling_from_probs(
|
||||
probs, uniform_samples, sampling_info.top_ks, sampling_info.top_ps
|
||||
)
|
||||
|
||||
if not torch.all(success):
|
||||
logger.warning("Detected errors during sampling!")
|
||||
batch_next_token_ids = torch.zeros_like(batch_next_token_ids)
|
||||
elif global_server_args_dict["sampling_backend"] == "pytorch":
|
||||
# Here we provide a slower fallback implementation.
|
||||
batch_next_token_ids, success = top_k_top_p_min_p_sampling_from_probs_torch(
|
||||
batch_next_token_ids = top_k_top_p_min_p_sampling_from_probs_torch(
|
||||
probs, sampling_info.top_ks, sampling_info.top_ps, sampling_info.min_ps
|
||||
)
|
||||
else:
|
||||
@@ -80,48 +65,7 @@ class Sampler(CustomOp):
|
||||
f"Invalid sampling backend: {global_server_args_dict['sampling_backend']}"
|
||||
)
|
||||
|
||||
return SampleOutput(success, probs, batch_next_token_ids)
|
||||
|
||||
def forward_native(
|
||||
self,
|
||||
logits: Union[torch.Tensor, LogitsProcessorOutput],
|
||||
sampling_info: SamplingBatchInfo,
|
||||
):
|
||||
if isinstance(logits, LogitsProcessorOutput):
|
||||
logits = logits.next_token_logits
|
||||
|
||||
probs = self._get_probs(logits, sampling_info)
|
||||
|
||||
batch_next_token_ids, success = top_k_top_p_min_p_sampling_from_probs_torch(
|
||||
probs, sampling_info.top_ks, sampling_info.top_ps, sampling_info.min_ps
|
||||
)
|
||||
|
||||
return SampleOutput(success, probs, batch_next_token_ids)
|
||||
|
||||
|
||||
@torch_custom_op("my_lib::flashinfer_top_k_top_p", mutates_args={})
|
||||
def flashinfer_top_k_top_p(
|
||||
probs: torch.Tensor,
|
||||
uniform_samples: torch.Tensor,
|
||||
top_ks: torch.Tensor,
|
||||
top_ps: torch.Tensor,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# NOTE: we do not use min_p neither in CUDA nor in torch.compile
|
||||
return top_k_top_p_sampling_from_probs(probs, uniform_samples, top_ks, top_ps)
|
||||
|
||||
|
||||
@flashinfer_top_k_top_p.register_fake
|
||||
def _(
|
||||
probs: torch.Tensor,
|
||||
uniform_samples: torch.Tensor,
|
||||
top_ks: torch.Tensor,
|
||||
top_ps: torch.Tensor,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
bs = probs.shape[0]
|
||||
return (
|
||||
torch.ones(bs, dtype=torch.bool, device=probs.device),
|
||||
torch.zeros(bs, dtype=torch.int32, device=probs.device),
|
||||
)
|
||||
return batch_next_token_ids
|
||||
|
||||
|
||||
def top_k_top_p_min_p_sampling_from_probs_torch(
|
||||
@@ -141,19 +85,6 @@ def top_k_top_p_min_p_sampling_from_probs_torch(
|
||||
] = 0.0
|
||||
probs_sort[probs_sort < min_p_thresholds.view(-1, 1)] = 0.0
|
||||
probs_sort.div_(probs_sort.max(dim=-1, keepdim=True)[0])
|
||||
try:
|
||||
# FIXME: torch.multiomial does not support num_samples = 1
|
||||
sampled_index = torch.multinomial(probs_sort, num_samples=2, replacement=True)[
|
||||
:, :1
|
||||
]
|
||||
except RuntimeError as e:
|
||||
logger.warning(f"Sampling error: {e}")
|
||||
batch_next_token_ids = torch.zeros(
|
||||
(probs_sort.shape[0],), dtype=torch.int32, device=probs.device
|
||||
)
|
||||
success = torch.zeros(probs.shape[0], dtype=torch.bool, device=probs.device)
|
||||
return batch_next_token_ids, success
|
||||
|
||||
sampled_index = torch.multinomial(probs_sort, num_samples=1)
|
||||
batch_next_token_ids = torch.gather(probs_idx, dim=1, index=sampled_index).view(-1)
|
||||
success = torch.ones(probs.shape[0], dtype=torch.bool, device=probs.device)
|
||||
return batch_next_token_ids, success
|
||||
return batch_next_token_ids
|
||||
|
||||
Reference in New Issue
Block a user