example: add vlm to token in & out example (#3941)

Co-authored-by: zhaochenyang20 <zhaochen20@outlook.com>
This commit is contained in:
Mick
2025-03-05 14:18:26 +08:00
committed by GitHub
parent e074d84e5b
commit 583d6af71b
9 changed files with 154 additions and 29 deletions

View File

@@ -5,13 +5,18 @@ python3 -m unittest test_skip_tokenizer_init.TestSkipTokenizerInit.run_decode_st
import json
import unittest
from io import BytesIO
import requests
from transformers import AutoTokenizer
from PIL import Image
from transformers import AutoProcessor, AutoTokenizer
from sglang.lang.chat_template import get_chat_template_by_model_path
from sglang.srt.utils import kill_process_tree
from sglang.test.test_utils import (
DEFAULT_IMAGE_URL,
DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
DEFAULT_SMALL_VLM_MODEL_NAME,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
popen_launch_server,
@@ -29,6 +34,7 @@ class TestSkipTokenizerInit(unittest.TestCase):
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=["--skip-tokenizer-init", "--stream-output"],
)
cls.eos_token_id = [119690]
cls.tokenizer = AutoTokenizer.from_pretrained(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, use_fast=False
)
@@ -45,9 +51,7 @@ class TestSkipTokenizerInit(unittest.TestCase):
top_logprobs_num=0,
n=1,
):
input_ids = self.tokenizer(prompt_text, return_tensors="pt")["input_ids"][
0
].tolist()
input_ids = self.get_input_ids(prompt_text)
response = requests.post(
self.base_url + "/generate",
@@ -104,7 +108,7 @@ class TestSkipTokenizerInit(unittest.TestCase):
def run_decode_stream(self, return_logprob=False, top_logprobs_num=0, n=1):
max_new_tokens = 32
input_ids = [128000, 791, 6864, 315, 9822, 374] # The capital of France is
input_ids = self.get_input_ids("The capital of France is")
requests.post(self.base_url + "/flush_cache")
response = requests.post(
self.base_url + "/generate",
@@ -114,7 +118,7 @@ class TestSkipTokenizerInit(unittest.TestCase):
"temperature": 0 if n == 1 else 0.5,
"max_new_tokens": max_new_tokens,
"n": n,
"stop_token_ids": [119690],
"stop_token_ids": self.eos_token_id,
},
"stream": False,
"return_logprob": return_logprob,
@@ -125,6 +129,9 @@ class TestSkipTokenizerInit(unittest.TestCase):
ret = response.json()
print(json.dumps(ret))
output_ids = ret["output_ids"]
print("output from non-streaming request:")
print(output_ids)
print(self.tokenizer.decode(output_ids, skip_special_tokens=True))
requests.post(self.base_url + "/flush_cache")
response_stream = requests.post(
@@ -135,7 +142,7 @@ class TestSkipTokenizerInit(unittest.TestCase):
"temperature": 0 if n == 1 else 0.5,
"max_new_tokens": max_new_tokens,
"n": n,
"stop_token_ids": [119690],
"stop_token_ids": self.eos_token_id,
},
"stream": True,
"return_logprob": return_logprob,
@@ -143,13 +150,10 @@ class TestSkipTokenizerInit(unittest.TestCase):
"logprob_start_len": 0,
},
)
ret = response.json()
output_ids = ret["output_ids"]
print("output from non-streaming request:")
print(output_ids)
response_stream_json = []
for line in response_stream.iter_lines():
print(line)
if line.startswith(b"data: ") and line[6:] != b"[DONE]":
response_stream_json.append(json.loads(line[6:]))
out_stream_ids = []
@@ -157,6 +161,8 @@ class TestSkipTokenizerInit(unittest.TestCase):
out_stream_ids += x["output_ids"]
print("output from streaming request:")
print(out_stream_ids)
print(self.tokenizer.decode(out_stream_ids, skip_special_tokens=True))
assert output_ids == out_stream_ids
def test_simple_decode(self):
@@ -175,6 +181,46 @@ class TestSkipTokenizerInit(unittest.TestCase):
def test_simple_decode_stream(self):
self.run_decode_stream()
def get_input_ids(self, prompt_text) -> list[int]:
input_ids = self.tokenizer(prompt_text, return_tensors="pt")["input_ids"][
0
].tolist()
return input_ids
class TestSkipTokenizerInitVLM(TestSkipTokenizerInit):
@classmethod
def setUpClass(cls):
cls.image_url = DEFAULT_IMAGE_URL
response = requests.get(cls.image_url)
cls.image = Image.open(BytesIO(response.content))
cls.model = DEFAULT_SMALL_VLM_MODEL_NAME
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model, use_fast=False)
cls.processor = AutoProcessor.from_pretrained(cls.model, trust_remote_code=True)
cls.base_url = DEFAULT_URL_FOR_TEST
cls.process = popen_launch_server(
cls.model,
cls.base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=["--skip-tokenizer-init"],
)
cls.eos_token_id = [cls.tokenizer.eos_token_id]
def get_input_ids(self, _prompt_text) -> list[int]:
chat_template = get_chat_template_by_model_path(self.model)
text = f"{chat_template.image_token}What is in this picture?"
inputs = self.processor(
text=[text],
images=[self.image],
return_tensors="pt",
)
return inputs.input_ids[0].tolist()
def test_simple_decode_stream(self):
# TODO mick
pass
if __name__ == "__main__":
unittest.main()