Test the case when max_new_tokens is very large (#1038)
This commit is contained in:
@@ -5,13 +5,15 @@ from sglang.test.test_utils import run_unittest_files
|
||||
|
||||
suites = {
|
||||
"minimal": [
|
||||
"test_eval_accuracy.py",
|
||||
"test_openai_server.py",
|
||||
"test_vision_openai_server.py",
|
||||
"test_embedding_openai_server.py",
|
||||
"test_chunked_prefill.py",
|
||||
"test_embedding_openai_server.py",
|
||||
"test_eval_accuracy.py",
|
||||
"test_large_max_new_tokens.py",
|
||||
"test_openai_server.py",
|
||||
"test_skip_tokenizer_init.py",
|
||||
"test_torch_compile.py",
|
||||
"test_models_from_modelscope.py",
|
||||
"test_vision_openai_server.py",
|
||||
"test_large_max_new_tokens.py",
|
||||
"models/test_generation_models.py",
|
||||
"models/test_embedding_models.py",
|
||||
"sampling/penaltylib",
|
||||
|
||||
72
test/srt/test_large_max_new_tokens.py
Normal file
72
test/srt/test_large_max_new_tokens.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import unittest
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import openai
|
||||
|
||||
from sglang.srt.hf_transformers_utils import get_tokenizer
|
||||
from sglang.srt.utils import kill_child_process
|
||||
from sglang.test.test_utils import DEFAULT_MODEL_NAME_FOR_TEST, popen_launch_server
|
||||
|
||||
|
||||
class TestOpenAIServer(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.model = DEFAULT_MODEL_NAME_FOR_TEST
|
||||
cls.base_url = "http://127.0.0.1:8157"
|
||||
cls.api_key = "sk-123456"
|
||||
cls.process = popen_launch_server(
|
||||
cls.model,
|
||||
cls.base_url,
|
||||
timeout=300,
|
||||
api_key=cls.api_key,
|
||||
other_args=("--max-total-token", "1024"),
|
||||
env={"SGLANG_CLIP_MAX_NEW_TOKENS": "256", **os.environ},
|
||||
return_stdout_stderr=True,
|
||||
)
|
||||
cls.base_url += "/v1"
|
||||
cls.tokenizer = get_tokenizer(DEFAULT_MODEL_NAME_FOR_TEST)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
kill_child_process(cls.process.pid)
|
||||
|
||||
def run_chat_completion(self):
|
||||
client = openai.Client(api_key=self.api_key, base_url=self.base_url)
|
||||
response = client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful AI assistant"},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Please repeat the world 'hello' for 10000 times.",
|
||||
},
|
||||
],
|
||||
temperature=0,
|
||||
)
|
||||
return response
|
||||
|
||||
def test_chat_completion(self):
|
||||
num_requests = 4
|
||||
|
||||
futures = []
|
||||
with ThreadPoolExecutor(16) as executor:
|
||||
for i in range(num_requests):
|
||||
futures.append(executor.submit(self.run_chat_completion))
|
||||
|
||||
all_requests_running = False
|
||||
for line in iter(self.process.stderr.readline, ""):
|
||||
line = str(line)
|
||||
print(line, end="")
|
||||
if f"#running-req: {num_requests}" in line:
|
||||
all_requests_running = True
|
||||
break
|
||||
|
||||
assert all_requests_running
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user