Sampler cudagraph (#1253)
This commit is contained in:
@@ -21,10 +21,63 @@ class SamplingBatchInfo:
|
||||
top_ps: torch.Tensor = None
|
||||
top_ks: torch.Tensor = None
|
||||
min_ps: torch.Tensor = None
|
||||
penalizer_orchestrator: penaltylib.BatchedPenalizerOrchestrator = None
|
||||
|
||||
# Dispatch in CUDA graph
|
||||
need_min_p_sampling: bool = False
|
||||
|
||||
# Bias Tensors
|
||||
logit_bias: torch.Tensor = None
|
||||
vocab_mask: torch.Tensor = None
|
||||
|
||||
# Penalizer
|
||||
penalizer_orchestrator: penaltylib.BatchedPenalizerOrchestrator = None
|
||||
linear_penalties: torch.Tensor = None
|
||||
scaling_penalties: torch.Tensor = None
|
||||
|
||||
def has_bias(self):
|
||||
return (
|
||||
self.logit_bias is not None
|
||||
or self.vocab_mask is not None
|
||||
or self.linear_penalties is not None
|
||||
or self.scaling_penalties is not None
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def dummy_one(cls, max_bs: int, vocab_size: int):
|
||||
ret = cls(vocab_size=vocab_size)
|
||||
ret.temperatures = torch.ones((max_bs, 1), dtype=torch.float, device="cuda")
|
||||
ret.top_ps = torch.ones((max_bs,), dtype=torch.float, device="cuda")
|
||||
ret.top_ks = torch.ones((max_bs,), dtype=torch.int, device="cuda")
|
||||
ret.min_ps = torch.zeros((max_bs,), dtype=torch.float, device="cuda")
|
||||
return ret
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, slice):
|
||||
# NOTE: We do not use cuda graph when there is bias tensors
|
||||
assert not self.has_bias()
|
||||
return SamplingBatchInfo(
|
||||
vocab_size=self.vocab_size,
|
||||
temperatures=self.temperatures[key],
|
||||
top_ps=self.top_ps[key],
|
||||
top_ks=self.top_ks[key],
|
||||
min_ps=self.min_ps[key],
|
||||
need_min_p_sampling=self.need_min_p_sampling,
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
def inplace_assign(self, bs: int, other: SamplingBatchInfo):
|
||||
# NOTE: We do not use cuda graph when there is bias tensors
|
||||
assert not self.has_bias()
|
||||
|
||||
self.vocab_size = other.vocab_size
|
||||
self.need_min_p_sampling = other.need_min_p_sampling
|
||||
|
||||
self.temperatures[:bs] = other.temperatures
|
||||
self.top_ps[:bs] = other.top_ps
|
||||
self.top_ks[:bs] = other.top_ks
|
||||
self.min_ps[:bs] = other.min_ps
|
||||
|
||||
@classmethod
|
||||
def from_schedule_batch(cls, batch: ScheduleBatch, vocab_size: int):
|
||||
device = "cuda"
|
||||
@@ -45,6 +98,7 @@ class SamplingBatchInfo:
|
||||
ret.min_ps = torch.tensor(
|
||||
[r.sampling_params.min_p for r in reqs], dtype=torch.float, device=device
|
||||
)
|
||||
ret.need_min_p_sampling = any(r.sampling_params.min_p > 0 for r in reqs)
|
||||
|
||||
# Each penalizers will do nothing if they evaluate themselves as not required by looking at
|
||||
# the sampling_params of the requests (See {_is_required()} of each penalizers). So this
|
||||
@@ -72,6 +126,25 @@ class SamplingBatchInfo:
|
||||
|
||||
return ret
|
||||
|
||||
def prepare_penalties(self):
|
||||
self.scaling_penalties = None
|
||||
self.linear_penalties = None
|
||||
|
||||
for penalizer in self.penalizer_orchestrator.penalizers.values():
|
||||
if isinstance(penalizer, penaltylib.BatchedRepetitionPenalizer):
|
||||
if penalizer.is_prepared():
|
||||
self.scaling_penalties = penalizer.cumulated_repetition_penalties
|
||||
else:
|
||||
if penalizer.is_prepared():
|
||||
if self.linear_penalties is None:
|
||||
bs = self.penalizer_orchestrator.batch.batch_size()
|
||||
self.linear_penalties = torch.zeros(
|
||||
(bs, self.vocab_size),
|
||||
dtype=torch.float32,
|
||||
device="cuda",
|
||||
)
|
||||
self.linear_penalties = penalizer.apply(self.linear_penalties)
|
||||
|
||||
def update_regex_vocab_mask(self, batch: ScheduleBatch):
|
||||
bs, reqs = batch.batch_size(), batch.reqs
|
||||
device = "cuda"
|
||||
|
||||
Reference in New Issue
Block a user