Support server based rollout in Verlengine (#4848)

Co-authored-by: Jin Pan <jpan236@wisc.edu>
Co-authored-by: Chayenne <zhaochen20@outlook.com>
Co-authored-by: Jinn <47354855+jhinpan@users.noreply.github.com>
This commit is contained in:
tianlian yi
2025-04-13 01:07:52 +08:00
committed by GitHub
parent 3e4794aad8
commit bc92107b03
10 changed files with 720 additions and 29 deletions

View File

@@ -25,7 +25,12 @@ from sglang.bench_serving import run_benchmark
from sglang.global_config import global_config
from sglang.lang.backend.openai import OpenAI
from sglang.lang.backend.runtime_endpoint import RuntimeEndpoint
from sglang.srt.utils import get_bool_env_var, kill_process_tree, retry
from sglang.srt.utils import (
get_bool_env_var,
is_port_available,
kill_process_tree,
retry,
)
from sglang.test.run_eval import run_eval
from sglang.utils import get_exception_traceback
@@ -98,6 +103,17 @@ def call_generate_lightllm(prompt, temperature, max_tokens, stop=None, url=None)
return pred
def find_available_port(base_port: int):
port = base_port + random.randint(100, 1000)
while True:
if is_port_available(port):
return port
if port < 60000:
port += 42
else:
port -= 43
def call_generate_vllm(prompt, temperature, max_tokens, stop=None, n=1, url=None):
assert url is not None