feat: frequency, min_new_tokens, presence, and repetition penalties (#973)
This commit is contained in:
@@ -0,0 +1,80 @@
|
||||
import typing
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from sglang.srt.sampling.penaltylib.penalizers.frequency_penalty import (
|
||||
BatchedFrequencyPenalizer,
|
||||
)
|
||||
from sglang.test.srt.sampling.penaltylib.utils import (
|
||||
BaseBatchedPenalizerTest,
|
||||
MockSamplingParams,
|
||||
Step,
|
||||
StepType,
|
||||
Subject,
|
||||
)
|
||||
|
||||
FREQUENCY_PENALTY = 0.12
|
||||
|
||||
|
||||
class TestBatchedFrequencyPenalizer(BaseBatchedPenalizerTest):
|
||||
Penalizer = BatchedFrequencyPenalizer
|
||||
|
||||
def _create_subject(self, frequency_penalty: float) -> Subject:
|
||||
return Subject(
|
||||
sampling_params=MockSamplingParams(
|
||||
frequency_penalty=frequency_penalty,
|
||||
),
|
||||
steps=[
|
||||
Step(
|
||||
type=StepType.INPUT,
|
||||
token_ids=[0, 1, 2],
|
||||
expected_tensors={
|
||||
"frequency_penalties": self.tensor(
|
||||
[[frequency_penalty] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
"cumulated_frequency_penalties": self.tensor(
|
||||
[[0.0] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
},
|
||||
expected_logits=self.tensor(
|
||||
[[1] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[1, 2, 2],
|
||||
expected_tensors={
|
||||
"frequency_penalties": self.tensor(
|
||||
[[frequency_penalty] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
"cumulated_frequency_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
frequency_penalty * i if i in {1, 2} else 0.0
|
||||
for i in range(self.vocab_size)
|
||||
],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
},
|
||||
expected_logits=self.tensor(
|
||||
[
|
||||
[
|
||||
1.0 - frequency_penalty * i if i in {1, 2} else 1.0
|
||||
for i in range(self.vocab_size)
|
||||
],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def create_test_subjects(self) -> typing.List[Subject]:
|
||||
self.enabled = self._create_subject(frequency_penalty=FREQUENCY_PENALTY)
|
||||
self.disabled = self._create_subject(frequency_penalty=0.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
152
test/srt/sampling/penaltylib/penalizers/test_min_new_tokens.py
Normal file
152
test/srt/sampling/penaltylib/penalizers/test_min_new_tokens.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import typing
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from sglang.srt.sampling.penaltylib.penalizers.min_new_tokens import (
|
||||
BatchedMinNewTokensPenalizer,
|
||||
)
|
||||
from sglang.test.srt.sampling.penaltylib.utils import (
|
||||
BaseBatchedPenalizerTest,
|
||||
MockSamplingParams,
|
||||
Step,
|
||||
StepType,
|
||||
Subject,
|
||||
)
|
||||
|
||||
MIN_NEW_TOKENS = 2
|
||||
EOS_TOKEN_ID = 4
|
||||
STOP_TOKEN_ID = 3
|
||||
|
||||
ALL_STOP_TOKEN_IDS = {STOP_TOKEN_ID, EOS_TOKEN_ID}
|
||||
|
||||
|
||||
class TestBatchedMinNewTokensPenalizer(BaseBatchedPenalizerTest):
|
||||
Penalizer = BatchedMinNewTokensPenalizer
|
||||
|
||||
def _create_subject(self, min_new_tokens: int) -> Subject:
|
||||
return Subject(
|
||||
eos_token_id=EOS_TOKEN_ID,
|
||||
sampling_params=MockSamplingParams(
|
||||
min_new_tokens=min_new_tokens,
|
||||
stop_token_ids={STOP_TOKEN_ID},
|
||||
),
|
||||
steps=[
|
||||
Step(
|
||||
type=StepType.INPUT,
|
||||
token_ids=[0, 1, 2],
|
||||
expected_tensors={
|
||||
"min_new_tokens": self.tensor(
|
||||
[[min_new_tokens]], dtype=torch.int32
|
||||
),
|
||||
"stop_token_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 0
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"len_output_tokens": self.tensor([[0]], dtype=torch.int32),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 1
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
)
|
||||
if min_new_tokens > 0
|
||||
else torch.ones(
|
||||
(1, self.vocab_size),
|
||||
dtype=torch.float32,
|
||||
device=self.device,
|
||||
)
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[0],
|
||||
expected_tensors={
|
||||
"min_new_tokens": self.tensor(
|
||||
[[min_new_tokens]], dtype=torch.int32
|
||||
),
|
||||
"stop_token_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 0
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"len_output_tokens": self.tensor([[1]], dtype=torch.int32),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 1
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
)
|
||||
if min_new_tokens > 1
|
||||
else torch.ones(
|
||||
(1, self.vocab_size),
|
||||
dtype=torch.float32,
|
||||
device=self.device,
|
||||
)
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[0],
|
||||
expected_tensors={
|
||||
"min_new_tokens": self.tensor(
|
||||
[[min_new_tokens]], dtype=torch.int32
|
||||
),
|
||||
"stop_token_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 0
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"len_output_tokens": self.tensor([[2]], dtype=torch.int32),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 1
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
)
|
||||
if min_new_tokens > 2
|
||||
else torch.ones(
|
||||
(1, self.vocab_size),
|
||||
dtype=torch.float32,
|
||||
device=self.device,
|
||||
)
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def create_test_subjects(self) -> typing.List[Subject]:
|
||||
self.enabled = self._create_subject(min_new_tokens=MIN_NEW_TOKENS)
|
||||
self.disabled = self._create_subject(min_new_tokens=0.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -0,0 +1,80 @@
|
||||
import typing
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from sglang.srt.sampling.penaltylib.penalizers.presence_penalty import (
|
||||
BatchedPresencePenalizer,
|
||||
)
|
||||
from sglang.test.srt.sampling.penaltylib.utils import (
|
||||
BaseBatchedPenalizerTest,
|
||||
MockSamplingParams,
|
||||
Step,
|
||||
StepType,
|
||||
Subject,
|
||||
)
|
||||
|
||||
PRESENCE_PENALTY = 0.12
|
||||
|
||||
|
||||
class TestBatchedPresencePenalizer(BaseBatchedPenalizerTest):
|
||||
Penalizer = BatchedPresencePenalizer
|
||||
|
||||
def _create_subject(self, presence_penalty: float) -> Subject:
|
||||
return Subject(
|
||||
sampling_params=MockSamplingParams(
|
||||
presence_penalty=presence_penalty,
|
||||
),
|
||||
steps=[
|
||||
Step(
|
||||
type=StepType.INPUT,
|
||||
token_ids=[0, 1, 2],
|
||||
expected_tensors={
|
||||
"presence_penalties": self.tensor(
|
||||
[[presence_penalty] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
"cumulated_presence_penalties": self.tensor(
|
||||
[[0.0] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
},
|
||||
expected_logits=self.tensor(
|
||||
[[1] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[1, 2, 2],
|
||||
expected_tensors={
|
||||
"presence_penalties": self.tensor(
|
||||
[[presence_penalty] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
"cumulated_presence_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
presence_penalty if i in {1, 2} else 0.0
|
||||
for i in range(self.vocab_size)
|
||||
],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
},
|
||||
expected_logits=self.tensor(
|
||||
[
|
||||
[
|
||||
1.0 - presence_penalty if i in {1, 2} else 1.0
|
||||
for i in range(self.vocab_size)
|
||||
],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def create_test_subjects(self) -> typing.List[Subject]:
|
||||
self.enabled = self._create_subject(presence_penalty=PRESENCE_PENALTY)
|
||||
self.disabled = self._create_subject(presence_penalty=0.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -0,0 +1,87 @@
|
||||
import typing
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from sglang.srt.sampling.penaltylib.penalizers.repetition_penalty import (
|
||||
BatchedRepetitionPenalizer,
|
||||
)
|
||||
from sglang.test.srt.sampling.penaltylib.utils import (
|
||||
BaseBatchedPenalizerTest,
|
||||
MockSamplingParams,
|
||||
Step,
|
||||
StepType,
|
||||
Subject,
|
||||
)
|
||||
|
||||
REPETITION_PENALTY = 2.0
|
||||
|
||||
|
||||
class TestBatchedRepetitionPenalizer(BaseBatchedPenalizerTest):
|
||||
Penalizer = BatchedRepetitionPenalizer
|
||||
|
||||
def _create_subject(self, repetition_penalty: float) -> Subject:
|
||||
l = 1.0 / repetition_penalty
|
||||
return Subject(
|
||||
sampling_params=MockSamplingParams(
|
||||
repetition_penalty=repetition_penalty,
|
||||
),
|
||||
steps=[
|
||||
Step(
|
||||
type=StepType.INPUT,
|
||||
token_ids=[0, 1, 2],
|
||||
expected_tensors={
|
||||
"repetition_penalties": self.tensor(
|
||||
[[repetition_penalty] * self.vocab_size],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"cumulated_repetition_penalties": (
|
||||
self.tensor(
|
||||
[[2.0, 2.0, 2.0, 1.0, 1.0]], dtype=torch.float32
|
||||
)
|
||||
if repetition_penalty != 1.0
|
||||
else self.tensor(
|
||||
[[1.0] * self.vocab_size], dtype=torch.float32
|
||||
)
|
||||
),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor([[l, l, l, 1.0, 1.0]], dtype=torch.float32)
|
||||
if repetition_penalty != 1.0
|
||||
else self.tensor([[1.0] * self.vocab_size], dtype=torch.float32)
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[0, 1, 3],
|
||||
expected_tensors={
|
||||
"repetition_penalties": self.tensor(
|
||||
[[repetition_penalty] * self.vocab_size],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"cumulated_repetition_penalties": (
|
||||
self.tensor(
|
||||
[[2.0, 2.0, 2.0, 2.0, 1.0]], dtype=torch.float32
|
||||
)
|
||||
if repetition_penalty != 1.0
|
||||
else self.tensor(
|
||||
[[1.0] * self.vocab_size], dtype=torch.float32
|
||||
)
|
||||
),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor([[l, l, l, l, 1.0]], dtype=torch.float32)
|
||||
if repetition_penalty != 1.0
|
||||
else self.tensor([[1.0] * self.vocab_size], dtype=torch.float32)
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def create_test_subjects(self) -> typing.List[Subject]:
|
||||
self.enabled = self._create_subject(repetition_penalty=REPETITION_PENALTY)
|
||||
self.disabled = self._create_subject(repetition_penalty=1.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user