feat: frequency, min_new_tokens, presence, and repetition penalties (#973)
This commit is contained in:
@@ -0,0 +1,80 @@
|
||||
import typing
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from sglang.srt.sampling.penaltylib.penalizers.frequency_penalty import (
|
||||
BatchedFrequencyPenalizer,
|
||||
)
|
||||
from sglang.test.srt.sampling.penaltylib.utils import (
|
||||
BaseBatchedPenalizerTest,
|
||||
MockSamplingParams,
|
||||
Step,
|
||||
StepType,
|
||||
Subject,
|
||||
)
|
||||
|
||||
FREQUENCY_PENALTY = 0.12
|
||||
|
||||
|
||||
class TestBatchedFrequencyPenalizer(BaseBatchedPenalizerTest):
|
||||
Penalizer = BatchedFrequencyPenalizer
|
||||
|
||||
def _create_subject(self, frequency_penalty: float) -> Subject:
|
||||
return Subject(
|
||||
sampling_params=MockSamplingParams(
|
||||
frequency_penalty=frequency_penalty,
|
||||
),
|
||||
steps=[
|
||||
Step(
|
||||
type=StepType.INPUT,
|
||||
token_ids=[0, 1, 2],
|
||||
expected_tensors={
|
||||
"frequency_penalties": self.tensor(
|
||||
[[frequency_penalty] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
"cumulated_frequency_penalties": self.tensor(
|
||||
[[0.0] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
},
|
||||
expected_logits=self.tensor(
|
||||
[[1] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[1, 2, 2],
|
||||
expected_tensors={
|
||||
"frequency_penalties": self.tensor(
|
||||
[[frequency_penalty] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
"cumulated_frequency_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
frequency_penalty * i if i in {1, 2} else 0.0
|
||||
for i in range(self.vocab_size)
|
||||
],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
},
|
||||
expected_logits=self.tensor(
|
||||
[
|
||||
[
|
||||
1.0 - frequency_penalty * i if i in {1, 2} else 1.0
|
||||
for i in range(self.vocab_size)
|
||||
],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def create_test_subjects(self) -> typing.List[Subject]:
|
||||
self.enabled = self._create_subject(frequency_penalty=FREQUENCY_PENALTY)
|
||||
self.disabled = self._create_subject(frequency_penalty=0.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
152
test/srt/sampling/penaltylib/penalizers/test_min_new_tokens.py
Normal file
152
test/srt/sampling/penaltylib/penalizers/test_min_new_tokens.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import typing
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from sglang.srt.sampling.penaltylib.penalizers.min_new_tokens import (
|
||||
BatchedMinNewTokensPenalizer,
|
||||
)
|
||||
from sglang.test.srt.sampling.penaltylib.utils import (
|
||||
BaseBatchedPenalizerTest,
|
||||
MockSamplingParams,
|
||||
Step,
|
||||
StepType,
|
||||
Subject,
|
||||
)
|
||||
|
||||
MIN_NEW_TOKENS = 2
|
||||
EOS_TOKEN_ID = 4
|
||||
STOP_TOKEN_ID = 3
|
||||
|
||||
ALL_STOP_TOKEN_IDS = {STOP_TOKEN_ID, EOS_TOKEN_ID}
|
||||
|
||||
|
||||
class TestBatchedMinNewTokensPenalizer(BaseBatchedPenalizerTest):
|
||||
Penalizer = BatchedMinNewTokensPenalizer
|
||||
|
||||
def _create_subject(self, min_new_tokens: int) -> Subject:
|
||||
return Subject(
|
||||
eos_token_id=EOS_TOKEN_ID,
|
||||
sampling_params=MockSamplingParams(
|
||||
min_new_tokens=min_new_tokens,
|
||||
stop_token_ids={STOP_TOKEN_ID},
|
||||
),
|
||||
steps=[
|
||||
Step(
|
||||
type=StepType.INPUT,
|
||||
token_ids=[0, 1, 2],
|
||||
expected_tensors={
|
||||
"min_new_tokens": self.tensor(
|
||||
[[min_new_tokens]], dtype=torch.int32
|
||||
),
|
||||
"stop_token_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 0
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"len_output_tokens": self.tensor([[0]], dtype=torch.int32),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 1
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
)
|
||||
if min_new_tokens > 0
|
||||
else torch.ones(
|
||||
(1, self.vocab_size),
|
||||
dtype=torch.float32,
|
||||
device=self.device,
|
||||
)
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[0],
|
||||
expected_tensors={
|
||||
"min_new_tokens": self.tensor(
|
||||
[[min_new_tokens]], dtype=torch.int32
|
||||
),
|
||||
"stop_token_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 0
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"len_output_tokens": self.tensor([[1]], dtype=torch.int32),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 1
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
)
|
||||
if min_new_tokens > 1
|
||||
else torch.ones(
|
||||
(1, self.vocab_size),
|
||||
dtype=torch.float32,
|
||||
device=self.device,
|
||||
)
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[0],
|
||||
expected_tensors={
|
||||
"min_new_tokens": self.tensor(
|
||||
[[min_new_tokens]], dtype=torch.int32
|
||||
),
|
||||
"stop_token_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 0
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"len_output_tokens": self.tensor([[2]], dtype=torch.int32),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor(
|
||||
[
|
||||
[
|
||||
float("-inf") if i in ALL_STOP_TOKEN_IDS else 1
|
||||
for i in range(self.vocab_size)
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
)
|
||||
if min_new_tokens > 2
|
||||
else torch.ones(
|
||||
(1, self.vocab_size),
|
||||
dtype=torch.float32,
|
||||
device=self.device,
|
||||
)
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def create_test_subjects(self) -> typing.List[Subject]:
|
||||
self.enabled = self._create_subject(min_new_tokens=MIN_NEW_TOKENS)
|
||||
self.disabled = self._create_subject(min_new_tokens=0.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -0,0 +1,80 @@
|
||||
import typing
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from sglang.srt.sampling.penaltylib.penalizers.presence_penalty import (
|
||||
BatchedPresencePenalizer,
|
||||
)
|
||||
from sglang.test.srt.sampling.penaltylib.utils import (
|
||||
BaseBatchedPenalizerTest,
|
||||
MockSamplingParams,
|
||||
Step,
|
||||
StepType,
|
||||
Subject,
|
||||
)
|
||||
|
||||
PRESENCE_PENALTY = 0.12
|
||||
|
||||
|
||||
class TestBatchedPresencePenalizer(BaseBatchedPenalizerTest):
|
||||
Penalizer = BatchedPresencePenalizer
|
||||
|
||||
def _create_subject(self, presence_penalty: float) -> Subject:
|
||||
return Subject(
|
||||
sampling_params=MockSamplingParams(
|
||||
presence_penalty=presence_penalty,
|
||||
),
|
||||
steps=[
|
||||
Step(
|
||||
type=StepType.INPUT,
|
||||
token_ids=[0, 1, 2],
|
||||
expected_tensors={
|
||||
"presence_penalties": self.tensor(
|
||||
[[presence_penalty] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
"cumulated_presence_penalties": self.tensor(
|
||||
[[0.0] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
},
|
||||
expected_logits=self.tensor(
|
||||
[[1] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[1, 2, 2],
|
||||
expected_tensors={
|
||||
"presence_penalties": self.tensor(
|
||||
[[presence_penalty] * self.vocab_size], dtype=torch.float32
|
||||
),
|
||||
"cumulated_presence_penalties": self.tensor(
|
||||
[
|
||||
[
|
||||
presence_penalty if i in {1, 2} else 0.0
|
||||
for i in range(self.vocab_size)
|
||||
],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
},
|
||||
expected_logits=self.tensor(
|
||||
[
|
||||
[
|
||||
1.0 - presence_penalty if i in {1, 2} else 1.0
|
||||
for i in range(self.vocab_size)
|
||||
],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def create_test_subjects(self) -> typing.List[Subject]:
|
||||
self.enabled = self._create_subject(presence_penalty=PRESENCE_PENALTY)
|
||||
self.disabled = self._create_subject(presence_penalty=0.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -0,0 +1,87 @@
|
||||
import typing
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from sglang.srt.sampling.penaltylib.penalizers.repetition_penalty import (
|
||||
BatchedRepetitionPenalizer,
|
||||
)
|
||||
from sglang.test.srt.sampling.penaltylib.utils import (
|
||||
BaseBatchedPenalizerTest,
|
||||
MockSamplingParams,
|
||||
Step,
|
||||
StepType,
|
||||
Subject,
|
||||
)
|
||||
|
||||
REPETITION_PENALTY = 2.0
|
||||
|
||||
|
||||
class TestBatchedRepetitionPenalizer(BaseBatchedPenalizerTest):
|
||||
Penalizer = BatchedRepetitionPenalizer
|
||||
|
||||
def _create_subject(self, repetition_penalty: float) -> Subject:
|
||||
l = 1.0 / repetition_penalty
|
||||
return Subject(
|
||||
sampling_params=MockSamplingParams(
|
||||
repetition_penalty=repetition_penalty,
|
||||
),
|
||||
steps=[
|
||||
Step(
|
||||
type=StepType.INPUT,
|
||||
token_ids=[0, 1, 2],
|
||||
expected_tensors={
|
||||
"repetition_penalties": self.tensor(
|
||||
[[repetition_penalty] * self.vocab_size],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"cumulated_repetition_penalties": (
|
||||
self.tensor(
|
||||
[[2.0, 2.0, 2.0, 1.0, 1.0]], dtype=torch.float32
|
||||
)
|
||||
if repetition_penalty != 1.0
|
||||
else self.tensor(
|
||||
[[1.0] * self.vocab_size], dtype=torch.float32
|
||||
)
|
||||
),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor([[l, l, l, 1.0, 1.0]], dtype=torch.float32)
|
||||
if repetition_penalty != 1.0
|
||||
else self.tensor([[1.0] * self.vocab_size], dtype=torch.float32)
|
||||
),
|
||||
),
|
||||
Step(
|
||||
type=StepType.OUTPUT,
|
||||
token_ids=[0, 1, 3],
|
||||
expected_tensors={
|
||||
"repetition_penalties": self.tensor(
|
||||
[[repetition_penalty] * self.vocab_size],
|
||||
dtype=torch.float32,
|
||||
),
|
||||
"cumulated_repetition_penalties": (
|
||||
self.tensor(
|
||||
[[2.0, 2.0, 2.0, 2.0, 1.0]], dtype=torch.float32
|
||||
)
|
||||
if repetition_penalty != 1.0
|
||||
else self.tensor(
|
||||
[[1.0] * self.vocab_size], dtype=torch.float32
|
||||
)
|
||||
),
|
||||
},
|
||||
expected_logits=(
|
||||
self.tensor([[l, l, l, l, 1.0]], dtype=torch.float32)
|
||||
if repetition_penalty != 1.0
|
||||
else self.tensor([[1.0] * self.vocab_size], dtype=torch.float32)
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def create_test_subjects(self) -> typing.List[Subject]:
|
||||
self.enabled = self._create_subject(repetition_penalty=REPETITION_PENALTY)
|
||||
self.disabled = self._create_subject(repetition_penalty=1.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -0,0 +1,75 @@
|
||||
import json
|
||||
import unittest
|
||||
|
||||
import requests
|
||||
|
||||
from sglang.srt.utils import kill_child_process
|
||||
from sglang.test.test_utils import DEFAULT_MODEL_NAME_FOR_TEST, popen_launch_server
|
||||
|
||||
|
||||
class TestBatchPenalizerE2E(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.model = DEFAULT_MODEL_NAME_FOR_TEST
|
||||
cls.base_url = f"http://127.0.0.1:{8157}"
|
||||
cls.process = popen_launch_server(
|
||||
cls.model,
|
||||
cls.base_url,
|
||||
timeout=300,
|
||||
other_args=(
|
||||
"--random-seed",
|
||||
"0",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
kill_child_process(cls.process.pid)
|
||||
|
||||
def run_decode(
|
||||
self,
|
||||
return_logprob=True,
|
||||
top_logprobs_num=5,
|
||||
return_text=True,
|
||||
n=1,
|
||||
**sampling_params,
|
||||
):
|
||||
response = requests.post(
|
||||
self.base_url + "/generate",
|
||||
json={
|
||||
# prompt that is supposed to generate < 32 tokens
|
||||
"text": "<|start_header_id|>user<|end_header_id|>\n\nWhat is the answer for 1 + 1 = ?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||
"sampling_params": {
|
||||
"max_new_tokens": 32,
|
||||
"n": n,
|
||||
**sampling_params,
|
||||
},
|
||||
"stream": False,
|
||||
"return_logprob": return_logprob,
|
||||
"top_logprobs_num": top_logprobs_num,
|
||||
"return_text_in_logprobs": return_text,
|
||||
"logprob_start_len": 0,
|
||||
},
|
||||
)
|
||||
print(json.dumps(response.json()))
|
||||
print("=" * 100)
|
||||
|
||||
def test_default_values(self):
|
||||
self.run_decode()
|
||||
|
||||
def test_frequency_penalty(self):
|
||||
self.run_decode(frequency_penalty=2)
|
||||
|
||||
def test_min_new_tokens(self):
|
||||
self.run_decode(min_new_tokens=16)
|
||||
|
||||
def test_presence_penalty(self):
|
||||
self.run_decode(presence_penalty=2)
|
||||
|
||||
def test_repetition_penalty(self):
|
||||
self.run_decode(repetition_penalty=2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main(warnings="ignore")
|
||||
Reference in New Issue
Block a user