Improve: Token-In Token-Out Usage for RLHF (#2843)

This commit is contained in:
Shi Shuai
2025-01-11 23:14:26 +00:00
committed by GitHub
parent 197cbf9bab
commit c4f9707e16
12 changed files with 166 additions and 128 deletions

View File

@@ -45,7 +45,6 @@ suites = {
"test_vision_chunked_prefill.py",
"test_vision_openai_server.py",
"test_session_control.py",
"test_engine_token_ids.py",
],
"nightly": [
"test_nightly_gsm8k_eval.py",

View File

@@ -1,45 +0,0 @@
import unittest
from transformers import AutoTokenizer
import sglang as sgl
from sglang.test.test_utils import DEFAULT_SMALL_MODEL_NAME_FOR_TEST
class TestEngineTokenIds(unittest.TestCase):
def test_token_ids_in_generate(self):
llm = sgl.Engine(
model_path=DEFAULT_SMALL_MODEL_NAME_FOR_TEST, return_token_ids=True
)
tokenizer = AutoTokenizer.from_pretrained(DEFAULT_SMALL_MODEL_NAME_FOR_TEST)
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
sampling_params = {"temperature": 0, "top_p": 0.95}
outputs = llm.generate(prompts, sampling_params)
for prompt, output in zip(prompts, outputs):
deocode_input = tokenizer.decode(
output["input_ids"], skip_special_tokens=True
)
assert (deocode_input in prompt) or (
prompt in deocode_input
), f"Decode input: {deocode_input} mismatch for: {prompt}"
deocode_output = tokenizer.decode(
output["output_ids"], skip_special_tokens=True
)
assert (deocode_output in output["text"]) or (
output["text"] in deocode_output
), f"Decode output: {deocode_output} mismatch for: {output['text']}"
llm.shutdown()
if __name__ == "__main__":
unittest.main()

View File

@@ -1,11 +1,8 @@
"""
python3 -m unittest test_skip_tokenizer_init.TestSkipTokenizerInit.test_parallel_sample
"""
import json
import unittest
import requests
from transformers import AutoTokenizer
from sglang.srt.utils import kill_process_tree
from sglang.test.test_utils import (
@@ -15,35 +12,63 @@ from sglang.test.test_utils import (
popen_launch_server,
)
_server_process = None
_base_url = None
_tokenizer = None
def setUpModule():
"""
Launch the server once before all tests and initialize the tokenizer.
"""
global _server_process, _base_url, _tokenizer
_server_process = popen_launch_server(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
DEFAULT_URL_FOR_TEST,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=["--skip-tokenizer-init"],
)
_base_url = DEFAULT_URL_FOR_TEST
_tokenizer = AutoTokenizer.from_pretrained(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, use_fast=False
)
print(">>> setUpModule: Server launched, tokenizer ready")
def tearDownModule():
"""
Terminate the server once after all tests have completed.
"""
global _server_process
if _server_process is not None:
kill_process_tree(_server_process.pid)
_server_process = None
print(">>> tearDownModule: Server terminated")
class TestSkipTokenizerInit(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = DEFAULT_SMALL_MODEL_NAME_FOR_TEST
cls.base_url = DEFAULT_URL_FOR_TEST
cls.process = popen_launch_server(
cls.model,
cls.base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=["--skip-tokenizer-init"],
)
def run_decode(
self,
prompt_text="The capital of France is",
max_new_tokens=32,
return_logprob=False,
top_logprobs_num=0,
n=1,
):
input_ids = _tokenizer(prompt_text, return_tensors="pt")["input_ids"][
0
].tolist()
@classmethod
def tearDownClass(cls):
kill_process_tree(cls.process.pid)
def run_decode(self, return_logprob=False, top_logprobs_num=0, n=1):
max_new_tokens = 32
input_ids = [128000, 791, 6864, 315, 9822, 374] # The capital of France is
response = requests.post(
self.base_url + "/generate",
_base_url + "/generate",
json={
"input_ids": input_ids,
"sampling_params": {
"temperature": 0 if n == 1 else 0.5,
"max_new_tokens": max_new_tokens,
"n": n,
"stop_token_ids": [119690],
"stop_token_ids": [_tokenizer.eos_token_id],
},
"stream": False,
"return_logprob": return_logprob,
@@ -52,25 +77,37 @@ class TestSkipTokenizerInit(unittest.TestCase):
},
)
ret = response.json()
print(json.dumps(ret))
print(json.dumps(ret, indent=2))
def assert_one_item(item):
self.assertEqual(
len(item["token_ids"]), item["meta_info"]["completion_tokens"]
)
self.assertEqual(len(item["token_ids"]), max_new_tokens)
assert item["meta_info"]["prompt_tokens"] == len(input_ids)
if item["meta_info"]["finish_reason"]["type"] == "stop":
self.assertEqual(
item["meta_info"]["finish_reason"]["matched"],
_tokenizer.eos_token_id,
)
elif item["meta_info"]["finish_reason"]["type"] == "length":
self.assertEqual(
len(item["token_ids"]), item["meta_info"]["completion_tokens"]
)
self.assertEqual(len(item["token_ids"]), max_new_tokens)
self.assertEqual(item["meta_info"]["prompt_tokens"], len(input_ids))
if return_logprob:
assert len(item["meta_info"]["input_token_logprobs"]) == len(
input_ids
), f'{len(item["meta_info"]["input_token_logprobs"])} vs. f{len(input_ids)}'
assert len(item["meta_info"]["output_token_logprobs"]) == max_new_tokens
if return_logprob:
self.assertEqual(
len(item["meta_info"]["input_token_logprobs"]),
len(input_ids),
f'{len(item["meta_info"]["input_token_logprobs"])} mismatch with {len(input_ids)}',
)
self.assertEqual(
len(item["meta_info"]["output_token_logprobs"]),
max_new_tokens,
)
# Determine whether to assert a single item or multiple items based on n
if n == 1:
assert_one_item(ret)
else:
assert len(ret) == n
self.assertEqual(len(ret), n)
for i in range(n):
assert_one_item(ret[i])
@@ -84,10 +121,10 @@ class TestSkipTokenizerInit(unittest.TestCase):
def test_logprob(self):
for top_logprobs_num in [0, 3]:
self.run_decode(
return_logprob=True,
top_logprobs_num=top_logprobs_num,
)
self.run_decode(return_logprob=True, top_logprobs_num=top_logprobs_num)
def test_eos_behavior(self):
self.run_decode(max_new_tokens=256)
if __name__ == "__main__":