Enhance the test case for chunked prefill (#1785)

This commit is contained in:
Lianmin Zheng
2024-10-24 21:23:09 -07:00
committed by GitHub
parent 384d85ba35
commit 1701b0db31
6 changed files with 162 additions and 107 deletions

View File

@@ -15,7 +15,7 @@ suites = {
"test_embedding_openai_server.py",
"test_eval_accuracy_mini.py",
"test_json_constrained.py",
# "test_large_max_new_tokens.py", # This test hangs on CI due to unknown reasons
"test_large_max_new_tokens.py",
"test_openai_server.py",
"test_overlap_schedule.py",
"test_pytorch_sampling_backend.py",

View File

@@ -1,65 +1,31 @@
import unittest
from types import SimpleNamespace
"""
python3 -m unittest test_chunked_prefill.TestChunkedPrefill.test_mixed_chunked_prefill_without_radix_cache
"""
import unittest
from sglang.srt.utils import kill_child_process
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
popen_launch_server,
run_bench_serving,
run_mmlu_test,
)
class TestChunkedPrefill(unittest.TestCase):
def run_mmlu(
self, disable_radix_cache, enable_mixed_chunk, chunked_prefill_size=32
):
other_args = ["--chunked-prefill-size", str(chunked_prefill_size)]
if disable_radix_cache:
other_args += ["--disable-radix-cache"]
if enable_mixed_chunk:
other_args += ["--enable-mixed-chunk"]
model = DEFAULT_MODEL_NAME_FOR_TEST
base_url = DEFAULT_URL_FOR_TEST
process = popen_launch_server(
model,
base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=other_args,
)
args = SimpleNamespace(
base_url=base_url,
model=model,
eval_name="mmlu",
num_examples=64,
num_threads=32,
)
try:
metrics = run_eval(args)
assert metrics["score"] >= 0.65
finally:
kill_child_process(process.pid)
def test_chunked_prefill(self):
self.run_mmlu(disable_radix_cache=False, enable_mixed_chunk=False)
run_mmlu_test(disable_radix_cache=False, enable_mixed_chunk=False)
def test_mixed_chunked_prefill(self):
self.run_mmlu(disable_radix_cache=False, enable_mixed_chunk=True)
run_mmlu_test(disable_radix_cache=False, enable_mixed_chunk=True)
def test_chunked_prefill_without_radix_cache(self):
self.run_mmlu(disable_radix_cache=True, enable_mixed_chunk=False)
run_mmlu_test(disable_radix_cache=True, enable_mixed_chunk=False)
def test_mixed_chunked_prefill_without_radix_cache(self):
self.run_mmlu(disable_radix_cache=True, enable_mixed_chunk=True)
run_mmlu_test(disable_radix_cache=True, enable_mixed_chunk=True)
def test_no_chunked_prefill(self):
self.run_mmlu(
run_mmlu_test(
disable_radix_cache=False, enable_mixed_chunk=False, chunked_prefill_size=-1
)

View File

@@ -1,3 +1,7 @@
"""
python3 -m unittest test_large_max_new_tokens.TestLargeMaxNewTokens.test_chat_completion
"""
import os
import unittest
from concurrent.futures import ThreadPoolExecutor
@@ -20,6 +24,10 @@ class TestLargeMaxNewTokens(unittest.TestCase):
cls.model = DEFAULT_MODEL_NAME_FOR_TEST
cls.base_url = DEFAULT_URL_FOR_TEST
cls.api_key = "sk-123456"
cls.stdout = open("stdout.txt", "w")
cls.stderr = open("stderr.txt", "w")
cls.process = popen_launch_server(
cls.model,
cls.base_url,
@@ -27,7 +35,7 @@ class TestLargeMaxNewTokens(unittest.TestCase):
api_key=cls.api_key,
other_args=("--max-total-token", "1024", "--context-len", "8192"),
env={"SGLANG_CLIP_MAX_NEW_TOKENS": "256", **os.environ},
return_stdout_stderr=True,
return_stdout_stderr=(cls.stdout, cls.stderr),
)
cls.base_url += "/v1"
cls.tokenizer = get_tokenizer(DEFAULT_MODEL_NAME_FOR_TEST)
@@ -35,6 +43,10 @@ class TestLargeMaxNewTokens(unittest.TestCase):
@classmethod
def tearDownClass(cls):
kill_child_process(cls.process.pid)
cls.stdout.close()
cls.stderr.close()
os.remove("stdout.txt")
os.remove("stderr.txt")
def run_chat_completion(self):
client = openai.Client(api_key=self.api_key, base_url=self.base_url)
@@ -56,16 +68,21 @@ class TestLargeMaxNewTokens(unittest.TestCase):
futures = []
with ThreadPoolExecutor(num_requests) as executor:
# Send multiple requests
for i in range(num_requests):
futures.append(executor.submit(self.run_chat_completion))
all_requests_running = False
for line in iter(self.process.stderr.readline, ""):
line = str(line)
print(line, end="")
if f"#running-req: {num_requests}" in line:
all_requests_running = True
break
# Ensure that they are running concurrently
pt = 0
while pt >= 0:
lines = open("stderr.txt").readlines()
for line in lines[pt:]:
print(line, end="", flush=True)
if f"#running-req: {num_requests}" in line:
all_requests_running = True
pt = -1
break
pt += 1
assert all_requests_running

View File

@@ -1,65 +1,27 @@
"""
Usage:
SGLANG_IS_IN_CI=true python3 -m unittest test_overlap_schedule.TestOverlapSchedule.test_radix_attention_chunked_prefill
SGLANG_IS_IN_CI=true python3 test_overlap_schedule.py
python3 -m unittest test_overlap_schedule.TestOverlapSchedule.test_radix_attention_chunked_prefill
python3 test_overlap_schedule.py
"""
import unittest
from types import SimpleNamespace
from sglang.srt.utils import kill_child_process
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_TEST,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
popen_launch_server,
)
from sglang.test.test_utils import run_mmlu_test
class TestOverlapSchedule(unittest.TestCase):
def run_mmlu(self, disable_radix_cache, chunked_prefill_size=32):
other_args = ["--chunked-prefill-size", str(chunked_prefill_size)]
if disable_radix_cache:
other_args += ["--disable-radix-cache"]
other_args += ["--enable-overlap-schedule"]
model = DEFAULT_MODEL_NAME_FOR_TEST
base_url = DEFAULT_URL_FOR_TEST
process = popen_launch_server(
model,
base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=other_args,
)
args = SimpleNamespace(
base_url=base_url,
model=model,
eval_name="mmlu",
num_examples=64,
num_threads=32,
)
try:
metrics = run_eval(args)
assert metrics["score"] >= 0.65
finally:
kill_child_process(process.pid)
def test_no_radix_attention_chunked_prefill(self):
self.run_mmlu(disable_radix_cache=True, chunked_prefill_size=32)
run_mmlu_test(disable_radix_cache=True, chunked_prefill_size=32)
def test_no_radix_attention_no_chunked_prefill(self):
self.run_mmlu(disable_radix_cache=True, chunked_prefill_size=-1)
run_mmlu_test(disable_radix_cache=True, chunked_prefill_size=-1)
def test_radix_attention_chunked_prefill(self):
self.run_mmlu(disable_radix_cache=False, chunked_prefill_size=32)
run_mmlu_test(disable_radix_cache=False, chunked_prefill_size=32)
def test_radix_attention_no_chunked_prefill(self):
self.run_mmlu(disable_radix_cache=False, chunked_prefill_size=-1)
run_mmlu_test(disable_radix_cache=False, chunked_prefill_size=-1)
if __name__ == "__main__":
unittest.main()
# @unittest.skip("did not support")