Update CI threshold & Improve code style (#2159)

This commit is contained in:
Lianmin Zheng
2024-11-24 06:29:38 -08:00
committed by GitHub
parent e3938b2f9c
commit 5652c56535
8 changed files with 126 additions and 41 deletions

View File

@@ -20,7 +20,7 @@ class TestBenchServing(unittest.TestCase):
)
if is_in_ci():
self.assertGreater(res["output_throughput"], 2850)
self.assertGreater(res["output_throughput"], 3350)
def test_offline_throughput_non_stream_small_batch_size(self):
res = run_bench_serving(
@@ -47,7 +47,7 @@ class TestBenchServing(unittest.TestCase):
)
if is_in_ci():
self.assertGreater(res["output_throughput"], 2900)
self.assertGreater(res["output_throughput"], 3350)
def test_offline_throughput_without_chunked_prefill(self):
res = run_bench_serving(
@@ -74,7 +74,7 @@ class TestBenchServing(unittest.TestCase):
)
if is_in_ci():
self.assertGreater(res["output_throughput"], 2950)
self.assertGreater(res["output_throughput"], 3450)
def test_offline_throughput_default_fp8(self):
res = run_bench_serving(
@@ -85,7 +85,7 @@ class TestBenchServing(unittest.TestCase):
)
if is_in_ci():
self.assertGreater(res["output_throughput"], 3200)
self.assertGreater(res["output_throughput"], 3850)
def test_online_latency_default(self):
res = run_bench_serving(
@@ -109,7 +109,7 @@ class TestBenchServing(unittest.TestCase):
)
if is_in_ci():
self.assertGreater(res["output_throughput"], 1900)
self.assertGreater(res["output_throughput"], 2150)
def test_moe_offline_throughput_without_radix_cache(self):
res = run_bench_serving(
@@ -120,7 +120,7 @@ class TestBenchServing(unittest.TestCase):
)
if is_in_ci():
self.assertGreater(res["output_throughput"], 1950)
self.assertGreater(res["output_throughput"], 2150)
if __name__ == "__main__":

View File

@@ -6,6 +6,7 @@ python3 -m unittest test_srt_endpoint.TestSRTEndpoint.test_logprob_with_chunked_
import json
import unittest
import numpy as np
import requests
from sglang.srt.utils import kill_child_process
@@ -132,6 +133,7 @@ class TestSRTEndpoint(unittest.TestCase):
)
def test_logprob_with_chunked_prefill(self):
"""Test a long prompt that requests output logprobs will not hit OOM."""
new_tokens = 4
prompts = "I have a very good idea on this. " * 8000
@@ -154,6 +156,63 @@ class TestSRTEndpoint(unittest.TestCase):
self.assertEqual(res["meta_info"]["completion_tokens"], new_tokens)
self.assertEqual(len(res["meta_info"]["output_token_logprobs"]), new_tokens)
def test_logprob_match(self):
"""Test the output logprobs are close to the input logprobs if we run a prefill again."""
def run_generate(
prompt, return_logprob=False, max_new_tokens=512, logprob_start_len=-1
):
if isinstance(prompt, str):
prompt_kwargs = {"text": prompt}
else:
prompt_kwargs = {"input_ids": prompt}
response = requests.post(
self.base_url + "/generate",
json={
**prompt_kwargs,
"sampling_params": {
"temperature": 1.0,
"max_new_tokens": max_new_tokens,
"ignore_eos": True,
},
"return_logprob": return_logprob,
"return_text_in_logprobs": True,
"logprob_start_len": logprob_start_len,
},
)
return response.json()
prompt = "I have a very good idea on how to"
gen = run_generate(prompt, return_logprob=True, logprob_start_len=0)
output_logprobs = np.array(
[x[0] for x in gen["meta_info"]["output_token_logprobs"]]
)
num_prompts_tokens = gen["meta_info"]["prompt_tokens"]
input_tokens = [x[1] for x in gen["meta_info"]["input_token_logprobs"]]
output_tokens = [x[1] for x in gen["meta_info"]["output_token_logprobs"]]
new_prompt = input_tokens + output_tokens
score = run_generate(
new_prompt, return_logprob=True, logprob_start_len=0, max_new_tokens=0
)
output_logprobs_score = np.array(
[
x[0]
for x in score["meta_info"]["input_token_logprobs"][num_prompts_tokens:]
]
)
print(f"{output_logprobs[-10:]=}")
print(f"{output_logprobs_score[-10:]=}")
diff = np.abs(output_logprobs - output_logprobs_score)
max_diff = np.max(diff)
self.assertLess(max_diff, 0.2)
def test_get_server_info(self):
response = requests.get(self.base_url + "/get_server_info")
response_json = response.json()