[CI] Fix ci tests (#5769)

This commit is contained in:
Lianmin Zheng
2025-04-27 07:18:10 -07:00
committed by GitHub
parent 35ca04d2fa
commit 621e96bf9b
18 changed files with 126 additions and 295 deletions

View File

@@ -3,8 +3,8 @@ import unittest
from sglang.test.test_utils import (
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
DEFAULT_FP8_MODEL_NAME_FOR_TEST,
DEFAULT_MODEL_NAME_FOR_TEST,
DEFAULT_MODEL_NAME_FOR_TEST_FP8,
DEFAULT_MOE_MODEL_NAME_FOR_TEST,
CustomTestCase,
is_in_ci,
@@ -28,7 +28,7 @@ class TestBenchServing(CustomTestCase):
f"### test_offline_throughput_default\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 3350)
self.assertGreater(res["output_throughput"], 3800)
def test_offline_throughput_non_stream_small_batch_size(self):
res = run_bench_serving(
@@ -48,9 +48,7 @@ class TestBenchServing(CustomTestCase):
f"### test_offline_throughput_non_stream_small_batch_size\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
# There is a regression with torch 2.5
# This number was 950 for torch 2.4
self.assertGreater(res["output_throughput"], 1000)
self.assertGreater(res["output_throughput"], 1050)
def test_offline_throughput_without_radix_cache(self):
res = run_bench_serving(
@@ -65,7 +63,7 @@ class TestBenchServing(CustomTestCase):
f"### test_offline_throughput_without_radix_cache\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 3350)
self.assertGreater(res["output_throughput"], 3800)
def test_offline_throughput_without_chunked_prefill(self):
res = run_bench_serving(
@@ -100,11 +98,11 @@ class TestBenchServing(CustomTestCase):
f"### test_offline_throughput_with_triton_attention_backend\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 3450)
self.assertGreater(res["output_throughput"], 3600)
def test_offline_throughput_default_fp8(self):
res = run_bench_serving(
model=DEFAULT_FP8_MODEL_NAME_FOR_TEST,
model=DEFAULT_MODEL_NAME_FOR_TEST_FP8,
num_prompts=500,
request_rate=float("inf"),
other_server_args=[],
@@ -115,7 +113,7 @@ class TestBenchServing(CustomTestCase):
f"### test_offline_throughput_default_fp8\n"
f'Output throughput: {res["output_throughput"]:.2f} token/s\n'
)
self.assertGreater(res["output_throughput"], 3900)
self.assertGreater(res["output_throughput"], 4200)
def test_online_latency_default(self):
res = run_bench_serving(
@@ -166,8 +164,8 @@ class TestBenchServing(CustomTestCase):
f'median_e2e_latency_ms : {res["median_e2e_latency_ms"]:.2f} ms\n'
f'accept_length : {res["accept_length"]:.2f} \n'
)
self.assertLess(res["median_e2e_latency_ms"], 900)
self.assertGreater(res["accept_length"], 2.99)
self.assertLess(res["median_e2e_latency_ms"], 800)
self.assertGreater(res["accept_length"], 3.0)
def test_moe_offline_throughput_default(self):
res = run_bench_serving(