ci: refactor nightly test (#10495)

This commit is contained in:
Mick
2025-09-27 06:24:30 +08:00
committed by GitHub
parent 05a3526654
commit 777eb53897
16 changed files with 1656 additions and 187 deletions

View File

@@ -165,9 +165,6 @@ suites = {
"per-commit-8-gpu-h20": [
TestFile("quant/test_w4a8_deepseek_v3.py", 371),
],
"nightly": [
TestFile("test_nightly_gsm8k_eval.py"),
],
"vllm_dependency_test": [
TestFile("quant/test_awq.py", 163),
TestFile("test_bnb.py", 5),

View File

@@ -15,8 +15,10 @@ from sglang.test.test_utils import (
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
is_in_ci,
parse_models,
popen_launch_server,
write_github_step_summary,
write_results_to_json,
)
MODEL_SCORE_THRESHOLDS = {
@@ -73,10 +75,6 @@ TRITON_MOE_MODELS = {
}
def parse_models(model_string):
return [model.strip() for model in model_string.split(",") if model.strip()]
def popen_launch_server_wrapper(base_url, model, is_tp2):
other_args = ["--log-level-http", "warning", "--trust-remote-code"]
if is_tp2:
@@ -91,31 +89,6 @@ def popen_launch_server_wrapper(base_url, model, is_tp2):
return process
def write_results_to_json(model, metrics, mode="a"):
result = {
"timestamp": datetime.now().isoformat(),
"model": model,
"metrics": metrics,
"score": metrics["score"],
}
existing_results = []
if mode == "a" and os.path.exists("results.json"):
try:
with open("results.json", "r") as f:
existing_results = json.load(f)
except json.JSONDecodeError:
existing_results = []
if isinstance(existing_results, list):
existing_results.append(result)
else:
existing_results = [result]
with open("results.json", "w") as f:
json.dump(existing_results, f, indent=2)
def check_model_scores(results):
failed_models = []
summary = " | model | score | threshold |\n"

View File

@@ -1,8 +1,6 @@
import json
import os
import unittest
import warnings
from datetime import datetime
from types import SimpleNamespace
from sglang.srt.utils import kill_process_tree
@@ -14,9 +12,10 @@ from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
is_in_ci,
check_evaluation_test_results,
parse_models,
popen_launch_server,
write_github_step_summary,
write_results_to_json,
)
MODEL_SCORE_THRESHOLDS = {
@@ -25,11 +24,11 @@ MODEL_SCORE_THRESHOLDS = {
"deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct": 0.85,
"google/gemma-2-27b-it": 0.91,
"meta-llama/Llama-3.1-70B-Instruct": 0.95,
"mistralai/Mixtral-8x7B-Instruct-v0.1": 0.64,
"mistralai/Mixtral-8x7B-Instruct-v0.1": 0.62,
"Qwen/Qwen2-57B-A14B-Instruct": 0.86,
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8": 0.83,
"neuralmagic/Mistral-7B-Instruct-v0.3-FP8": 0.54,
"neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8": 0.84,
"neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8": 0.835,
"zai-org/GLM-4.5-Air-FP8": 0.75,
# The threshold of neuralmagic/gemma-2-2b-it-FP8 should be 0.6, but this model has some accuracy regression.
# The fix is tracked at https://github.com/sgl-project/sglang/issues/4324, we set it to 0.50, for now, to make CI green.
@@ -41,78 +40,6 @@ MODEL_SCORE_THRESHOLDS = {
}
def parse_models(model_string):
return [model.strip() for model in model_string.split(",") if model.strip()]
def popen_launch_server_wrapper(base_url, model, is_tp2):
other_args = ["--log-level-http", "warning", "--trust-remote-code"]
if is_tp2:
other_args.extend(["--tp", "2"])
process = popen_launch_server(
model,
base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=other_args,
)
return process
def write_results_to_json(model, metrics, mode="a"):
result = {
"timestamp": datetime.now().isoformat(),
"model": model,
"metrics": metrics,
"score": metrics["score"],
}
existing_results = []
if mode == "a" and os.path.exists("results.json"):
try:
with open("results.json", "r") as f:
existing_results = json.load(f)
except json.JSONDecodeError:
existing_results = []
if isinstance(existing_results, list):
existing_results.append(result)
else:
existing_results = [result]
with open("results.json", "w") as f:
json.dump(existing_results, f, indent=2)
def check_model_scores(results):
failed_models = []
summary = " | model | score | threshold |\n"
summary += "| ----- | ----- | --------- |\n"
for model, score in results:
threshold = MODEL_SCORE_THRESHOLDS.get(model)
if threshold is None:
print(f"Warning: No threshold defined for model {model}")
continue
if score < threshold:
failed_models.append(
f"\nScore Check Failed: {model}\n"
f"Model {model} score ({score:.4f}) is below threshold ({threshold:.4f})"
)
line = f"| {model} | {score} | {threshold} |\n"
summary += line
print(summary)
if is_in_ci():
write_github_step_summary(f"### TestNightlyGsm8KEval\n{summary}")
if failed_models:
raise AssertionError("\n".join(failed_models))
# Do not use `CustomTestCase` since `test_mgsm_en_all_models` does not want retry
class TestNightlyGsm8KEval(unittest.TestCase):
@classmethod
@@ -131,11 +58,17 @@ class TestNightlyGsm8KEval(unittest.TestCase):
)
is_first = True
all_results = []
model_count = 0
for model_group, is_fp8, is_tp2 in self.model_groups:
for model in model_group:
model_count += 1
with self.subTest(model=model):
process = popen_launch_server_wrapper(self.base_url, model, is_tp2)
process = popen_launch_server(
model=model,
base_url=self.base_url,
other_args=["--tp", "2"] if is_tp2 else [],
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
)
args = SimpleNamespace(
base_url=self.base_url,
@@ -153,7 +86,8 @@ class TestNightlyGsm8KEval(unittest.TestCase):
write_results_to_json(model, metrics, "w" if is_first else "a")
is_first = False
all_results.append((model, metrics["score"]))
# 0.0 for empty latency
all_results.append((model, metrics["score"], 0.0))
kill_process_tree(process.pid)
try:
@@ -164,7 +98,12 @@ class TestNightlyGsm8KEval(unittest.TestCase):
print(f"Error reading results.json: {e}")
# Check all scores after collecting all results
check_model_scores(all_results)
check_evaluation_test_results(
all_results,
self.__class__.__name__,
model_accuracy_thresholds=MODEL_SCORE_THRESHOLDS,
model_count=model_count,
)
if __name__ == "__main__":

View File

@@ -0,0 +1,135 @@
import os
import subprocess
import time
import unittest
from sglang.bench_one_batch_server import BenchmarkResult
from sglang.srt.utils import kill_process_tree
from sglang.test.test_utils import (
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
_parse_int_list_env,
is_in_ci,
parse_models,
popen_launch_server,
write_github_step_summary,
)
PROFILE_DIR = "performance_profiles_text_models"
class TestNightlyTextModelsPerformance(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model_groups = [
(parse_models("meta-llama/Llama-3.1-8B-Instruct"), False, False),
(parse_models("Qwen/Qwen2-57B-A14B-Instruct"), False, True),
# (parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1), False, False),
# (parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2), False, True),
# (parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1), True, False),
# (parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2), True, True),
]
cls.base_url = DEFAULT_URL_FOR_TEST
cls.batch_sizes = [1, 1, 8, 16, 64]
cls.input_lens = tuple(_parse_int_list_env("NIGHTLY_INPUT_LENS", "4096"))
cls.output_lens = tuple(_parse_int_list_env("NIGHTLY_OUTPUT_LENS", "512"))
os.makedirs(PROFILE_DIR, exist_ok=True)
cls.full_report = f"## {cls.__name__}\n" + BenchmarkResult.help_str()
def test_bench_one_batch(self):
all_benchmark_results = []
for model_group, is_fp8, is_tp2 in self.model_groups:
for model in model_group:
benchmark_results = []
with self.subTest(model=model):
process = popen_launch_server(
model=model,
base_url=self.base_url,
other_args=["--tp", "2"] if is_tp2 else [],
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
)
try:
profile_filename = (
f"{model.replace('/', '_')}_{int(time.time())}"
)
profile_path_prefix = os.path.join(
PROFILE_DIR, profile_filename
)
json_output_file = (
f"results_{model.replace('/', '_')}_{int(time.time())}.json"
)
command = [
"python3",
"-m",
"sglang.bench_one_batch_server",
"--model",
model,
"--base-url",
self.base_url,
"--batch-size",
*[str(x) for x in self.batch_sizes],
"--input-len",
*[str(x) for x in self.input_lens],
"--output-len",
*[str(x) for x in self.output_lens],
"--show-report",
"--profile",
"--profile-by-stage",
"--profile-filename-prefix",
profile_path_prefix,
f"--output-path={json_output_file}",
"--no-append-to-github-summary",
]
print(f"Running command: {' '.join(command)}")
result = subprocess.run(command, capture_output=True, text=True)
if result.returncode != 0:
print(
f"Error running benchmark for {model} with batch size:"
)
print(result.stderr)
# Continue to next batch size even if one fails
continue
# Load and deserialize JSON results
if os.path.exists(json_output_file):
import json
with open(json_output_file, "r") as f:
json_data = json.load(f)
# Convert JSON data to BenchmarkResult objects
for data in json_data:
benchmark_result = BenchmarkResult(**data)
all_benchmark_results.append(benchmark_result)
benchmark_results.append(benchmark_result)
print(
f"Loaded {len(benchmark_results)} benchmark results from {json_output_file}"
)
# Clean up JSON file
os.remove(json_output_file)
else:
print(
f"Warning: JSON output file {json_output_file} not found"
)
finally:
kill_process_tree(process.pid)
report_part = BenchmarkResult.generate_markdown_report(
PROFILE_DIR, benchmark_results
)
self.full_report += report_part + "\n"
if is_in_ci():
write_github_step_summary(self.full_report)
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,117 @@
import json
import unittest
import warnings
from types import SimpleNamespace
from sglang.srt.utils import kill_process_tree
from sglang.test.run_eval import run_eval
from sglang.test.test_utils import (
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
ModelDeploySetup,
ModelEvalMetrics,
check_evaluation_test_results,
popen_launch_server,
write_results_to_json,
)
MODEL_THRESHOLDS = {
# Conservative thresholds on 100 MMMU samples, especially for latency thresholds
ModelDeploySetup("deepseek-ai/deepseek-vl2-small"): ModelEvalMetrics(0.330, 56.1),
ModelDeploySetup("deepseek-ai/Janus-Pro-7B"): ModelEvalMetrics(0.285, 39.9),
ModelDeploySetup("Efficient-Large-Model/NVILA-Lite-2B-hf-0626"): ModelEvalMetrics(
0.305, 23.8
),
ModelDeploySetup("google/gemma-3-4b-it"): ModelEvalMetrics(0.360, 10.9),
ModelDeploySetup("google/gemma-3n-E4B-it"): ModelEvalMetrics(0.360, 15.3),
ModelDeploySetup("mistral-community/pixtral-12b"): ModelEvalMetrics(0.360, 14.5),
ModelDeploySetup("moonshotai/Kimi-VL-A3B-Instruct"): ModelEvalMetrics(0.330, 22.3),
ModelDeploySetup("openbmb/MiniCPM-o-2_6"): ModelEvalMetrics(0.330, 29.3),
ModelDeploySetup("openbmb/MiniCPM-v-2_6"): ModelEvalMetrics(0.270, 24.5),
ModelDeploySetup("OpenGVLab/InternVL2_5-2B"): ModelEvalMetrics(0.300, 14.0),
ModelDeploySetup("Qwen/Qwen2-VL-7B-Instruct"): ModelEvalMetrics(0.310, 83.3),
ModelDeploySetup("Qwen/Qwen2.5-VL-7B-Instruct"): ModelEvalMetrics(0.340, 31.9),
ModelDeploySetup("unsloth/Mistral-Small-3.1-24B-Instruct-2503"): ModelEvalMetrics(
0.310, 16.7
),
ModelDeploySetup("XiaomiMiMo/MiMo-VL-7B-RL"): ModelEvalMetrics(0.28, 32.0),
ModelDeploySetup("zai-org/GLM-4.1V-9B-Thinking"): ModelEvalMetrics(0.280, 30.4),
}
class TestNightlyVLMMmmuEval(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.models = list(MODEL_THRESHOLDS.keys())
cls.base_url = DEFAULT_URL_FOR_TEST
def test_mmmu_vlm_models(self):
warnings.filterwarnings(
"ignore", category=ResourceWarning, message="unclosed.*socket"
)
is_first = True
all_results = []
for model in self.models:
model_path = model.model_path
with self.subTest(model=model_path):
process = popen_launch_server(
model=model_path,
base_url=self.base_url,
other_args=model.extra_args,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
)
try:
args = SimpleNamespace(
base_url=self.base_url,
model=model_path,
eval_name="mmmu",
num_examples=100,
num_threads=64,
max_tokens=30,
)
args.return_latency = True
metrics, latency = run_eval(args)
metrics["score"] = round(metrics["score"], 4)
metrics["latency"] = round(latency, 4)
print(
f"{'=' * 42}\n{model_path} - metrics={metrics} score={metrics['score']}\n{'=' * 42}\n"
)
write_results_to_json(model_path, metrics, "w" if is_first else "a")
is_first = False
all_results.append(
(model_path, metrics["score"], metrics["latency"])
)
finally:
kill_process_tree(process.pid)
try:
with open("results.json", "r") as f:
print("\nFinal Results from results.json:")
print(json.dumps(json.load(f), indent=2))
except Exception as e:
print(f"Error reading results: {e}")
model_accuracy_thresholds = {
model.model_path: threshold.accuracy
for model, threshold in MODEL_THRESHOLDS.items()
}
model_latency_thresholds = {
model.model_path: threshold.eval_time
for model, threshold in MODEL_THRESHOLDS.items()
}
check_evaluation_test_results(
all_results,
self.__class__.__name__,
model_accuracy_thresholds=model_accuracy_thresholds,
model_latency_thresholds=model_latency_thresholds,
)
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,135 @@
import os
import subprocess
import unittest
import warnings
from sglang.bench_one_batch_server import BenchmarkResult
from sglang.srt.utils import kill_process_tree
from sglang.test.test_utils import (
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
_parse_int_list_env,
is_in_ci,
parse_models,
popen_launch_server,
write_github_step_summary,
)
PROFILE_DIR = "performance_profiles_vlms"
MODEL_DEFAULTS = [
# Keep conservative defaults. Can be overridden by env NIGHTLY_VLM_MODELS
"Qwen/Qwen2.5-VL-7B-Instruct",
"google/gemma-3-27b-it",
# "OpenGVLab/InternVL2_5-2B",
# buggy in official transformers impl
# "openbmb/MiniCPM-V-2_6",
]
class TestNightlyVLMModelsPerformance(unittest.TestCase):
@classmethod
def setUpClass(cls):
warnings.filterwarnings(
"ignore", category=ResourceWarning, message="unclosed.*socket"
)
cls.models = parse_models(
os.environ.get("NIGHTLY_VLM_MODELS", ",".join(MODEL_DEFAULTS))
)
cls.base_url = DEFAULT_URL_FOR_TEST
cls.batch_sizes = _parse_int_list_env("NIGHTLY_VLM_BATCH_SIZES", "1,1,2,8,16")
cls.input_lens = tuple(_parse_int_list_env("NIGHTLY_VLM_INPUT_LENS", "4096"))
cls.output_lens = tuple(_parse_int_list_env("NIGHTLY_VLM_OUTPUT_LENS", "512"))
cls.full_report = f"## {cls.__name__}\n" + BenchmarkResult.help_str()
def test_bench_one_batch(self):
all_benchmark_results = []
for model in self.models:
benchmark_results = []
with self.subTest(model=model):
process = popen_launch_server(
model=model,
base_url=self.base_url,
other_args=["--mem-fraction-static=0.7"],
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
)
try:
# Run bench_one_batch_server against the launched server
profile_filename = f"{model.replace('/', '_')}"
# path for this run
profile_path_prefix = os.path.join(PROFILE_DIR, profile_filename)
# JSON output file for this model
json_output_file = f"results_{model.replace('/', '_')}.json"
command = [
"python3",
"-m",
"sglang.bench_one_batch_server",
f"--model={model}",
"--base-url",
self.base_url,
"--batch-size",
*[str(x) for x in self.batch_sizes],
"--input-len",
*[str(x) for x in self.input_lens],
"--output-len",
*[str(x) for x in self.output_lens],
"--trust-remote-code",
"--dataset-name=mmmu",
"--profile",
"--profile-by-stage",
f"--profile-filename-prefix={profile_path_prefix}",
"--show-report",
f"--output-path={json_output_file}",
"--no-append-to-github-summary",
]
print(f"Running command: {' '.join(command)}")
result = subprocess.run(command, capture_output=True, text=True)
if result.returncode != 0:
print(f"Error running benchmark for {model} with batch size:")
print(result.stderr)
# Continue to next batch size even if one fails
continue
print(f"Output for {model} with batch size:")
print(result.stdout)
# Load and deserialize JSON results
if os.path.exists(json_output_file):
import json
with open(json_output_file, "r") as f:
json_data = json.load(f)
# Convert JSON data to BenchmarkResult objects
for data in json_data:
benchmark_result = BenchmarkResult(**data)
all_benchmark_results.append(benchmark_result)
benchmark_results.append(benchmark_result)
print(
f"Loaded {len(benchmark_results)} benchmark results from {json_output_file}"
)
else:
print(f"Warning: JSON output file {json_output_file} not found")
finally:
kill_process_tree(process.pid)
report_part = BenchmarkResult.generate_markdown_report(
PROFILE_DIR, benchmark_results
)
self.full_report += report_part + "\n"
if is_in_ci():
write_github_step_summary(self.full_report)
if __name__ == "__main__":
unittest.main()

View File

@@ -14,6 +14,7 @@ from sglang.test.test_utils import (
is_in_ci,
popen_launch_server,
write_github_step_summary,
write_results_to_json,
)
MODEL_SCORE_THRESHOLDS = {
@@ -52,31 +53,6 @@ def popen_launch_server_wrapper(base_url, model, is_fp8, is_tp2):
return process
def write_results_to_json(model, metrics, mode="a"):
result = {
"timestamp": datetime.now().isoformat(),
"model": model,
"metrics": metrics,
"score": metrics["score"],
}
existing_results = []
if mode == "a" and os.path.exists("results.json"):
try:
with open("results.json", "r") as f:
existing_results = json.load(f)
except json.JSONDecodeError:
existing_results = []
if isinstance(existing_results, list):
existing_results.append(result)
else:
existing_results = [result]
with open("results.json", "w") as f:
json.dump(existing_results, f, indent=2)
def check_model_scores(results):
failed_models = []
summary = " | model | score | threshold |\n"