[CI] test chunked prefill more (#5798)

This commit is contained in:
Lianmin Zheng
2025-04-28 10:57:17 -07:00
committed by GitHub
parent d73ddeb196
commit 849c83a0c0
15 changed files with 212 additions and 97 deletions

View File

@@ -26,7 +26,7 @@ class TestDummyGrok1(CustomTestCase):
)
if is_in_ci():
assert output_throughput > 0, f"{output_throughput=}"
self.assertGreater(output_throughput, 0)
if __name__ == "__main__":

View File

@@ -64,7 +64,7 @@ class TestVLMModels(CustomTestCase):
model = "openai_compatible"
tp = 1
tasks = "mmmu_val"
batch_size = 1
batch_size = 2
log_suffix = "openai_compatible"
os.makedirs(output_path, exist_ok=True)
@@ -125,6 +125,9 @@ class TestVLMModels(CustomTestCase):
"--chat-template",
model.chat_template,
"--trust-remote-code",
"--cuda-graph-max-bs",
"32",
"--enable-multimodal",
"--mem-fraction-static",
str(self.parsed_args.mem_fraction_static), # Use class variable
],
@@ -171,7 +174,7 @@ if __name__ == "__main__":
"--mem-fraction-static",
type=float,
help="Static memory fraction for the model",
default=0.6,
default=0.8,
)
# Parse args intended for unittest

View File

@@ -3,16 +3,28 @@ import unittest
from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_TEST,
DEFAULT_MOE_MODEL_NAME_FOR_TEST,
DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
CustomTestCase,
is_in_ci,
run_bench_offline_throughput,
run_bench_one_batch,
write_github_step_summary,
)
# We use `run_bench_offline_throughput`` instead of `run_bench_one_batch` for most cases
# because `run_bench_offline_throughput`` has overlap scheduler.
class TestBenchOneBatch(CustomTestCase):
def test_bs1_default(self):
def test_bs1_small(self):
output_throughput = run_bench_one_batch(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
)
self.assertGreater(output_throughput, 50)
def test_bs1_default(self):
output_throughput = run_bench_offline_throughput(
DEFAULT_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
)
@@ -24,26 +36,26 @@ class TestBenchOneBatch(CustomTestCase):
self.assertGreater(output_throughput, 135)
def test_moe_tp2_bs1(self):
output_throughput = run_bench_one_batch(
output_throughput = run_bench_offline_throughput(
DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2", "--cuda-graph-max-bs", "2"]
)
if is_in_ci():
write_github_step_summary(
f"### test_moe_tp2_bs1\n"
f"### test_moe_tp2_bs1 (Mixtral-8x7B)\n"
f"output_throughput: {output_throughput:.2f} token/s\n"
)
self.assertGreater(output_throughput, 125)
def test_torch_compile_tp2_bs1(self):
output_throughput = run_bench_one_batch(
output_throughput = run_bench_offline_throughput(
DEFAULT_MODEL_NAME_FOR_TEST,
["--tp", "2", "--enable-torch-compile", "--cuda-graph-max-bs", "2"],
)
if is_in_ci():
write_github_step_summary(
f"### test_torch_compile_tp2_bs1\n"
f"### test_torch_compile_tp2_bs1 (Mixtral-8x7B)\n"
f"output_throughput: {output_throughput:.2f} token/s\n"
)
self.assertGreater(output_throughput, 220)

View File

@@ -5,13 +5,13 @@ import requests
from sglang.srt.utils import kill_process_tree
from sglang.test.few_shot_gsm8k import run_eval as run_eval_few_shot_gsm8k
from sglang.test.send_one import BenchArgs, send_one_prompt
from sglang.test.test_utils import (
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
CustomTestCase,
is_in_ci,
popen_launch_server,
run_bench_one_batch,
write_github_step_summary,
)
@@ -48,22 +48,23 @@ class TestDeepseekV3(CustomTestCase):
metrics = run_eval_few_shot_gsm8k(args)
print(f"{metrics=}")
self.assertGreater(metrics["accuracy"], 0.935)
if is_in_ci():
write_github_step_summary(
f"### test_gsm8k (deepseek-v3)\n" f'{metrics["accuracy"]=:.3f}\n'
)
self.assertGreater(metrics["accuracy"], 0.935)
def test_bs_1_speed(self):
args = BenchArgs(port=int(self.base_url.split(":")[-1]), max_new_tokens=2048)
acc_length, speed = send_one_prompt(args)
class TestBenchOneBatch(CustomTestCase):
def test_bs1(self):
output_throughput = run_bench_one_batch(
FULL_DEEPSEEK_V3_MODEL_PATH,
["--trust-remote-code", "--tp", "8", "--cuda-graph-max-bs", "2"],
)
print(f"{output_throughput=:.2f} token/s")
print(f"{speed=:.2f}")
if is_in_ci():
write_github_step_summary(
f"### test_bs1 (deepseek-v3)\n" f"{output_throughput=:.2f} token/s\n"
f"### test_bs_1_speed (deepseek-v3)\n" f"{speed=:.2f} token/s\n"
)
self.assertGreater(output_throughput, 70)
self.assertGreater(speed, 75)
class TestDeepseekV3MTP(CustomTestCase):
@@ -80,13 +81,13 @@ class TestDeepseekV3MTP(CustomTestCase):
"--speculative-draft",
"lmsys/DeepSeek-V3-0324-NextN",
"--speculative-num-steps",
"5",
"3",
"--speculative-eagle-topk",
"4",
"2",
"--speculative-num-draft-tokens",
"8",
"4",
"--mem-fraction-static",
"0.6",
"0.7",
]
cls.process = popen_launch_server(
cls.model,
@@ -113,19 +114,34 @@ class TestDeepseekV3MTP(CustomTestCase):
)
metrics = run_eval_few_shot_gsm8k(args)
print(f"{metrics=}")
self.assertGreater(metrics["accuracy"], 0.94)
server_info = requests.get(self.base_url + "/get_server_info")
avg_spec_accept_length = server_info.json()["avg_spec_accept_length"]
print(f"{avg_spec_accept_length=}")
self.assertGreater(avg_spec_accept_length, 3.2)
if is_in_ci():
write_github_step_summary(
f"### test_gsm8k (deepseek-v3)\n"
f"### test_gsm8k (deepseek-v3 mtp)\n"
f'{metrics["accuracy"]=:.3f}\n'
f"{avg_spec_accept_length=:.2f}\n"
)
self.assertGreater(metrics["accuracy"], 0.935)
self.assertGreater(avg_spec_accept_length, 2.9)
def test_bs_1_speed(self):
args = BenchArgs(port=int(self.base_url.split(":")[-1]), max_new_tokens=2048)
acc_length, speed = send_one_prompt(args)
print(f"{acc_length=:.2f} {speed=:.2f}")
if is_in_ci():
write_github_step_summary(
f"### test_bs_1_speed (deepseek-v3 mtp)\n"
f"{acc_length=:.2f}\n"
f"{speed=:.2f} token/s\n"
)
self.assertGreater(acc_length, 2.9)
self.assertGreater(speed, 105)
if __name__ == "__main__":

View File

@@ -26,6 +26,8 @@ class TestMLA(CustomTestCase):
"--enable-torch-compile",
"--cuda-graph-max-bs",
"2",
"--chunked-prefill-size",
"256",
],
)

View File

@@ -19,7 +19,7 @@ class TestMLADeepseekV3(CustomTestCase):
def setUpClass(cls):
cls.model = "lmsys/sglang-ci-dsv3-test"
cls.base_url = DEFAULT_URL_FOR_TEST
other_args = ["--trust-remote-code"]
other_args = ["--trust-remote-code", "--chunked-prefill-size", "256"]
if torch.cuda.is_available() and torch.version.cuda:
other_args.extend(["--enable-torch-compile", "--cuda-graph-max-bs", "2"])
cls.process = popen_launch_server(

View File

@@ -13,23 +13,11 @@ from sglang.test.test_utils import (
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
CustomTestCase,
is_in_ci,
popen_launch_server,
run_bench_one_batch,
)
class TestTorchNativeAttnBackend(CustomTestCase):
def test_latency(self):
output_throughput = run_bench_one_batch(
DEFAULT_MODEL_NAME_FOR_TEST,
["--attention-backend", "torch_native"],
)
if is_in_ci():
# Torch native backend is expected to be slower
self.assertGreater(output_throughput, 40)
def test_mmlu(self):
model = DEFAULT_MODEL_NAME_FOR_TEST
base_url = DEFAULT_URL_FOR_TEST

View File

@@ -1,23 +1,29 @@
import unittest
from sglang.test.test_utils import CustomTestCase, is_in_ci, run_bench_one_batch
from sglang.test.test_utils import (
DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
CustomTestCase,
is_in_ci,
run_bench_offline_throughput,
)
class TestTorchTP(CustomTestCase):
def test_torch_native_llama(self):
output_throughput = run_bench_one_batch(
"meta-llama/Meta-Llama-3-8B",
output_throughput = run_bench_offline_throughput(
DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
[
"--tp",
"2",
"--json-model-override-args",
'{"architectures": ["TorchNativeLlamaForCausalLM"]}',
# This cannot run anymore with the new torch version.
# "--json-model-override-args",
# '{"architectures": ["TorchNativeLlamaForCausalLM"]}',
"--disable-cuda-graph",
],
)
if is_in_ci():
assert output_throughput > 0, f"{output_throughput=}"
self.assertGreater(output_throughput, 0)
if __name__ == "__main__":

View File

@@ -15,13 +15,13 @@ from sglang.test.test_utils import (
CustomTestCase,
is_in_ci,
popen_launch_server,
run_bench_one_batch,
run_bench_offline_throughput,
)
class TestTritonAttnBackend(CustomTestCase):
def test_latency(self):
output_throughput = run_bench_one_batch(
output_throughput = run_bench_offline_throughput(
DEFAULT_MODEL_NAME_FOR_TEST,
[
"--attention-backend",
@@ -32,6 +32,8 @@ class TestTritonAttnBackend(CustomTestCase):
],
)
print(f"{output_throughput=}")
if is_in_ci():
self.assertGreater(output_throughput, 153)