Improve benchmark (#1140)
This commit is contained in:
@@ -65,10 +65,9 @@ def main(args):
|
||||
def get_one_answer(i):
|
||||
answer = call_generate(
|
||||
prompt=few_shot_examples + questions[i],
|
||||
# prompt="System: " + few_shot_examples + "<|separator|>\n\n" + questions[i],
|
||||
temperature=0,
|
||||
max_tokens=256,
|
||||
stop="Question",
|
||||
stop=["Question", "Assistant:", "<|separator|>"],
|
||||
)
|
||||
states[i] = answer
|
||||
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
# Benchmark Latency and Throughput
|
||||
|
||||
## SGLang
|
||||
|
||||
### Launch a server
|
||||
```
|
||||
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
|
||||
```
|
||||
|
||||
### Benchmark one batch
|
||||
|
||||
```
|
||||
python3 bench_one.py
|
||||
python3 bench_one.py --batch-size 64
|
||||
```
|
||||
|
||||
### Benchmark online serving with many requests
|
||||
|
||||
```
|
||||
python3 bench_serving.py --backend srt --port 30000 --tokenizer meta-llama/Llama-2-7b-chat-hf --num-prompt 1000 --request-rate 100 --input-len 1024 --output-len 256
|
||||
```
|
||||
|
||||
### Benchmark online serving on the ShareGPT dataset
|
||||
|
||||
#### Download data
|
||||
```
|
||||
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||
```
|
||||
|
||||
#### Run ShareGPT
|
||||
```
|
||||
python3 bench_serving.py --backend srt --port 30000 --tokenizer meta-llama/Llama-2-7b-chat-hf --dataset ShareGPT_V3_unfiltered_cleaned_split.json --num-prompts 10 --request-rate 10
|
||||
```
|
||||
|
||||
### Profile with Nsight
|
||||
0. Prerequisite
|
||||
```bash
|
||||
# install nsys
|
||||
# https://docs.nvidia.com/nsight-systems/InstallationGuide/index.html
|
||||
apt update
|
||||
apt install -y --no-install-recommends gnupg
|
||||
echo "deb http://developer.download.nvidia.com/devtools/repos/ubuntu$(source /etc/lsb-release; echo "$DISTRIB_RELEASE" | tr -d .)/$(dpkg --print-architecture) /" | tee /etc/apt/sources.list.d/nvidia-devtools.list
|
||||
apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub
|
||||
apt update
|
||||
apt install nsight-systems-cli
|
||||
```
|
||||
|
||||
1. To profile a single batch, use `nsys profile --cuda-graph-trace=node python3 -m sglang.bench_latency --model meta-llama/Meta-Llama-3-8B --batch-size 64 --input-len 512`
|
||||
|
||||
2. To profile a server, e.g.
|
||||
|
||||
```bash
|
||||
# server
|
||||
# set the delay and duration times according to needs
|
||||
nsys profile --trace-fork-before-exec=true --cuda-graph-trace=node -o sglang.out --delay 60 --duration 70 python3 -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct --disable-radix-cache
|
||||
|
||||
# client
|
||||
python3 -m sglang.bench_serving --backend sglang --num-prompts 6000 --dataset-name random --random-input 4096 --random-output 2048
|
||||
```
|
||||
|
||||
3. Use NVTX, e.g.
|
||||
|
||||
```bash
|
||||
# install nvtx
|
||||
pip install nvtx
|
||||
|
||||
# code snippets
|
||||
import nvtx
|
||||
with nvtx.annotate("description", color="color"):
|
||||
# some critical code
|
||||
```
|
||||
|
||||
|
||||
## Other baselines
|
||||
|
||||
### vLLM
|
||||
```
|
||||
python3 -m vllm.entrypoints.api_server --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel 1 --disable-log-requests --swap-space 16 --port 21000
|
||||
```
|
||||
|
||||
```
|
||||
# run synthetic
|
||||
python3 bench_serving.py --backend vllm --port 30000 --tokenizer meta-llama/Llama-2-7b-chat-hf --num-prompt 1000 --request-rate 100 --input-len 1024 --output-len 256
|
||||
```
|
||||
|
||||
```
|
||||
# run ShareGPT
|
||||
python3 bench_serving.py --backend vllm --port 21000 --tokenizer meta-llama/Llama-2-7b-chat-hf --dataset ShareGPT_V3_unfiltered_cleaned_split.json --num-prompts 10 --request-rate 10
|
||||
```
|
||||
|
||||
```
|
||||
# run one batch
|
||||
python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Meta-Llama-3-70B --tensor 8 --disable-log-requests --max-num-seqs 1024 --quantization fp8
|
||||
|
||||
python3 bench_one.py --input-len 1024 --batch-size 1 1 2 4 8 16 32 64 128 256 512 768 1024 --port 8000 --backend vllm
|
||||
```
|
||||
|
||||
### LightLLM
|
||||
```
|
||||
python -m lightllm.server.api_server --model_dir ~/model_weights/Llama-2-7b-chat-hf --max_total_token_num 15600 --tokenizer_mode auto --port 22000
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_serving.py --backend lightllm --port 22000 --tokenizer meta-llama/Llama-2-7b-chat-hf --dataset ShareGPT_V3_unfiltered_cleaned_split.json --num-prompts 10 --request-rate 10
|
||||
```
|
||||
@@ -1,147 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python3 bench_one.py --input-len 2048 --batch-size 1 2 4 8 16 32 64 128 256 512
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
|
||||
|
||||
def run_one_batch_size(bs):
|
||||
url = f"{args.host}:{args.port}"
|
||||
max_new_tokens = args.max_tokens
|
||||
|
||||
if args.input_len:
|
||||
input_ids = [
|
||||
[int(x) for x in np.random.randint(0, high=16384, size=(args.input_len,))]
|
||||
for _ in range(bs)
|
||||
]
|
||||
else:
|
||||
text = [f"{i, }" for i in range(bs)]
|
||||
|
||||
tic = time.time()
|
||||
if args.backend == "srt":
|
||||
if args.input_len:
|
||||
inputs = {"input_ids": input_ids}
|
||||
else:
|
||||
inputs = {"text": text}
|
||||
|
||||
response = requests.post(
|
||||
url + "/generate",
|
||||
json={
|
||||
"sampling_params": {
|
||||
"temperature": 0,
|
||||
"max_new_tokens": max_new_tokens,
|
||||
"ignore_eos": True,
|
||||
},
|
||||
**inputs,
|
||||
},
|
||||
)
|
||||
elif args.backend == "lightllm":
|
||||
response = requests.post(
|
||||
url + "/generate",
|
||||
json={
|
||||
"inputs": text[0],
|
||||
"parameters": {
|
||||
"temperature": 0,
|
||||
"max_new_tokens": max_new_tokens,
|
||||
"ignore_eos": True,
|
||||
},
|
||||
},
|
||||
)
|
||||
elif args.backend == "vllm":
|
||||
if args.input_len:
|
||||
inputs = {"prompt": input_ids}
|
||||
else:
|
||||
inputs = {"prompt": text}
|
||||
|
||||
response = requests.post(
|
||||
url + "/v1/completions",
|
||||
json={
|
||||
"model": args.vllm_model_name,
|
||||
"temperature": 0,
|
||||
"max_tokens": max_new_tokens,
|
||||
"ignore_eos": True,
|
||||
**inputs,
|
||||
},
|
||||
)
|
||||
elif args.backend == "ginfer":
|
||||
import grpc
|
||||
from ginfer import sampler_pb2, sampler_pb2_grpc
|
||||
|
||||
sampler_channel = grpc.insecure_channel(url.replace("http://", ""))
|
||||
sampler = sampler_pb2_grpc.SamplerStub(sampler_channel)
|
||||
|
||||
tic = time.time()
|
||||
sample_request = sampler_pb2.SampleTextRequest(
|
||||
prompt=text[0],
|
||||
settings=sampler_pb2.SampleSettings(
|
||||
max_len=max_new_tokens,
|
||||
rng_seed=0,
|
||||
temperature=0,
|
||||
nucleus_p=1,
|
||||
),
|
||||
)
|
||||
stream = sampler.SampleText(sample_request)
|
||||
response = "".join([x.text for x in stream])
|
||||
latency = time.time() - tic
|
||||
|
||||
if isinstance(response, str):
|
||||
ret = response
|
||||
else:
|
||||
ret = response.json()
|
||||
print(ret)
|
||||
|
||||
input_len = args.input_len if args.input_len else 1
|
||||
output_len = max_new_tokens
|
||||
|
||||
output_throughput = bs * max_new_tokens / latency
|
||||
overall_throughput = bs * (input_len + output_len) / latency
|
||||
print(f"latency: {latency:.2f} s")
|
||||
print(f"output throughput: {output_throughput:.2f} token/s")
|
||||
print(f"(input + output) throughput: {overall_throughput:.2f} token/s")
|
||||
|
||||
with open("results.jsonl", "a") as fout:
|
||||
res = {
|
||||
"backend": args.backend,
|
||||
"input_len": args.input_len,
|
||||
"output_len": args.max_tokens,
|
||||
"batch_size": bs,
|
||||
"latency": latency,
|
||||
"output_throughput": output_throughput,
|
||||
"overall_throughput": overall_throughput,
|
||||
}
|
||||
fout.write(json.dumps(res) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="http://127.0.0.1")
|
||||
parser.add_argument("--port", type=int, default=None)
|
||||
parser.add_argument("--backend", type=str, default="srt")
|
||||
parser.add_argument("--input-len", type=int, default=None)
|
||||
parser.add_argument("--batch-size", type=int, nargs="*", default=[1])
|
||||
parser.add_argument("--max-tokens", type=int, default=256)
|
||||
parser.add_argument(
|
||||
"--vllm-model-name", type=str, default="meta-llama/Meta-Llama-3-70B"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.port is None:
|
||||
if args.backend == "srt":
|
||||
args.port = 30000
|
||||
elif args.backend == "vllm":
|
||||
args.port = 21000
|
||||
elif args.backend == "lightllm":
|
||||
args.port = 22000
|
||||
elif args.backend == "ginfer":
|
||||
args.port = 9988
|
||||
else:
|
||||
raise ValueError(f"Invalid backend: {args.backend}")
|
||||
|
||||
for bs in args.batch_size:
|
||||
run_one_batch_size(bs)
|
||||
@@ -1,374 +0,0 @@
|
||||
"""Benchmark online serving throughput.
|
||||
|
||||
On the server side, run one of the following commands:
|
||||
(vLLM backend)
|
||||
python -m vllm.entrypoints.api_server \
|
||||
--model <your_model> --swap-space 16 \
|
||||
--disable-log-requests
|
||||
|
||||
(TGI backend)
|
||||
./launch_hf_server.sh <your_model>
|
||||
|
||||
On the client side, run:
|
||||
python benchmarks/benchmark_serving.py \
|
||||
--backend <backend> \
|
||||
--tokenizer <your_model> --dataset <target_dataset> \
|
||||
--request-rate <request_rate>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from typing import AsyncGenerator, List, Tuple
|
||||
|
||||
import aiohttp
|
||||
import numpy as np
|
||||
from tqdm.asyncio import tqdm_asyncio
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
# (prompt len, output len, latency)
|
||||
REQUEST_LATENCY: List[Tuple[int, int, float]] = []
|
||||
|
||||
|
||||
def sample_requests(
|
||||
dataset_path: str,
|
||||
num_requests: int,
|
||||
tokenizer: AutoTokenizer,
|
||||
) -> List[Tuple[str, int, int]]:
|
||||
def load_dataset():
|
||||
with open(dataset_path, encoding="utf-8") as f:
|
||||
dataset = json.load(f)
|
||||
# Filter out the conversations with less than 2 turns.
|
||||
dataset = [data for data in dataset if len(data["conversations"]) >= 2]
|
||||
# Only keep the first two turns of each conversation.
|
||||
dataset = [
|
||||
(data["conversations"][0]["value"], data["conversations"][1]["value"])
|
||||
for data in dataset
|
||||
]
|
||||
|
||||
# Tokenize the prompts and completions.
|
||||
prompts = [prompt for prompt, _ in dataset]
|
||||
prompt_token_ids = tokenizer(prompts).input_ids
|
||||
completions = [completion for _, completion in dataset]
|
||||
completion_token_ids = tokenizer(completions).input_ids
|
||||
tokenized_dataset = []
|
||||
for i in range(len(dataset)):
|
||||
output_len = len(completion_token_ids[i])
|
||||
tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len))
|
||||
|
||||
# Filter out too long sequences.
|
||||
filtered_dataset: List[Tuple[str, int, int]] = []
|
||||
for prompt, prompt_token_ids, output_len in tokenized_dataset:
|
||||
prompt_len = len(prompt_token_ids)
|
||||
if prompt_len < 4 or output_len < 4:
|
||||
# Prune too short sequences.
|
||||
# This is because TGI causes errors when the input or output length
|
||||
# is too short.
|
||||
continue
|
||||
if prompt_len > 1024 or prompt_len + output_len > 2048:
|
||||
# Prune too long sequences.
|
||||
continue
|
||||
filtered_dataset.append((prompt, prompt_len, output_len))
|
||||
|
||||
return filtered_dataset
|
||||
|
||||
try:
|
||||
from diskcache import Cache
|
||||
|
||||
home_dir = os.path.expanduser("~")
|
||||
cache = Cache(f"{home_dir}/.cache/sglang")
|
||||
with Cache(cache.directory) as reference:
|
||||
reference_key = f"{dataset_path}_{tokenizer.name_or_path}"
|
||||
if reference_key in reference:
|
||||
print("Reading dataset from cache...")
|
||||
dataset = reference[reference_key]
|
||||
else:
|
||||
dataset = load_dataset()
|
||||
reference[reference_key] = dataset
|
||||
except ImportError:
|
||||
dataset = load_dataset()
|
||||
|
||||
# Sample the requests.
|
||||
sampled_requests = random.sample(dataset, num_requests)
|
||||
return sampled_requests
|
||||
|
||||
|
||||
async def get_request(
|
||||
input_requests: List[Tuple[str, int, int]],
|
||||
request_rate: float,
|
||||
) -> AsyncGenerator[Tuple[str, int, int], None]:
|
||||
input_requests = iter(input_requests)
|
||||
for request in input_requests:
|
||||
yield request
|
||||
|
||||
if request_rate == float("inf"):
|
||||
# If the request rate is infinity, then we don't need to wait.
|
||||
continue
|
||||
# Sample the request interval from the exponential distribution.
|
||||
interval = np.random.exponential(1.0 / request_rate)
|
||||
# The next request will be sent after the interval.
|
||||
await asyncio.sleep(interval)
|
||||
|
||||
|
||||
async def send_request(
|
||||
backend: str,
|
||||
api_url: str,
|
||||
prompt: str,
|
||||
prompt_len: int,
|
||||
output_len: int,
|
||||
best_of: int,
|
||||
use_beam_search: bool,
|
||||
) -> None:
|
||||
request_start_time = time.perf_counter()
|
||||
|
||||
headers = {"User-Agent": "Benchmark Client"}
|
||||
if backend == "vllm":
|
||||
pload = {
|
||||
"prompt": prompt,
|
||||
"n": 1,
|
||||
"best_of": best_of,
|
||||
"use_beam_search": use_beam_search,
|
||||
"temperature": 0.0 if use_beam_search else 1.0,
|
||||
"top_p": 1.0,
|
||||
"max_tokens": output_len,
|
||||
"ignore_eos": True,
|
||||
"stream": False,
|
||||
}
|
||||
elif backend == "tgi":
|
||||
assert not use_beam_search
|
||||
params = {
|
||||
"best_of": best_of,
|
||||
"max_new_tokens": output_len,
|
||||
"do_sample": True,
|
||||
}
|
||||
pload = {
|
||||
"inputs": prompt,
|
||||
"parameters": params,
|
||||
}
|
||||
elif backend == "srt":
|
||||
assert not use_beam_search
|
||||
params = {
|
||||
"ignore_eos": True,
|
||||
"max_new_tokens": output_len,
|
||||
}
|
||||
pload = {
|
||||
"text": prompt,
|
||||
"sampling_params": params,
|
||||
}
|
||||
elif backend == "lightllm":
|
||||
assert not use_beam_search
|
||||
params = {
|
||||
"ignore_eos": True,
|
||||
"max_new_tokens": output_len,
|
||||
}
|
||||
pload = {
|
||||
"inputs": prompt,
|
||||
"parameters": params,
|
||||
}
|
||||
elif backend == "ginfer":
|
||||
pass
|
||||
else:
|
||||
raise ValueError(f"Unknown backend: {backend}")
|
||||
|
||||
if backend != "ginfer":
|
||||
timeout = aiohttp.ClientTimeout(total=3 * 3600)
|
||||
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||
while True:
|
||||
async with session.post(
|
||||
api_url, headers=headers, json=pload
|
||||
) as response:
|
||||
chunks = []
|
||||
async for chunk, _ in response.content.iter_chunks():
|
||||
chunks.append(chunk)
|
||||
output = b"".join(chunks).decode("utf-8")
|
||||
output = json.loads(output)
|
||||
|
||||
# Re-send the request if it failed.
|
||||
if "error" not in output:
|
||||
break
|
||||
else:
|
||||
print(output)
|
||||
else:
|
||||
import grpc
|
||||
from ginfer import sampler_pb2, sampler_pb2_grpc
|
||||
|
||||
api_url = api_url.replace("http://", "").replace("/generate", "")
|
||||
sampler_channel = grpc.aio.insecure_channel(api_url)
|
||||
sampler = sampler_pb2_grpc.SamplerStub(sampler_channel)
|
||||
|
||||
request_end_time = time.perf_counter()
|
||||
sample_request = sampler_pb2.SampleTextRequest(
|
||||
prompt=prompt,
|
||||
settings=sampler_pb2.SampleSettings(
|
||||
max_len=output_len,
|
||||
rng_seed=0,
|
||||
temperature=0,
|
||||
nucleus_p=1,
|
||||
),
|
||||
)
|
||||
stream = sampler.SampleText(sample_request)
|
||||
response = "".join([x.text async for x in stream])
|
||||
|
||||
request_end_time = time.perf_counter()
|
||||
request_latency = request_end_time - request_start_time
|
||||
REQUEST_LATENCY.append((prompt_len, output_len, request_latency))
|
||||
|
||||
|
||||
async def benchmark(
|
||||
backend: str,
|
||||
api_url: str,
|
||||
input_requests: List[Tuple[str, int, int]],
|
||||
best_of: int,
|
||||
use_beam_search: bool,
|
||||
request_rate: float,
|
||||
) -> None:
|
||||
tasks: List[asyncio.Task] = []
|
||||
async for request in get_request(input_requests, request_rate):
|
||||
prompt, prompt_len, output_len = request
|
||||
task = asyncio.create_task(
|
||||
send_request(
|
||||
backend,
|
||||
api_url,
|
||||
prompt,
|
||||
prompt_len,
|
||||
output_len,
|
||||
best_of,
|
||||
use_beam_search,
|
||||
)
|
||||
)
|
||||
tasks.append(task)
|
||||
await tqdm_asyncio.gather(*tasks)
|
||||
|
||||
|
||||
def main(args: argparse.Namespace):
|
||||
print(args)
|
||||
random.seed(args.seed)
|
||||
np.random.seed(args.seed)
|
||||
|
||||
api_url = f"{args.host}:{args.port}/generate"
|
||||
if args.tokenizer.endswith(".json") or args.tokenizer.endswith(".model"):
|
||||
from sglang.srt.hf_transformers_utils import get_tokenizer
|
||||
|
||||
tokenizer = get_tokenizer(args.tokenizer)
|
||||
else:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
args.tokenizer, trust_remote_code=args.trust_remote_code
|
||||
)
|
||||
|
||||
if args.dataset:
|
||||
input_requests = sample_requests(args.dataset, args.num_prompts, tokenizer)
|
||||
else:
|
||||
input_lens = np.random.randint(
|
||||
int(args.input_len * args.range_ratio),
|
||||
args.input_len + 1,
|
||||
size=args.num_prompts,
|
||||
)
|
||||
output_lens = np.random.randint(
|
||||
int(args.output_len * args.range_ratio),
|
||||
args.output_len + 1,
|
||||
size=args.num_prompts,
|
||||
)
|
||||
offsets = np.random.randint(0, tokenizer.vocab_size, size=args.num_prompts)
|
||||
input_requests = []
|
||||
for i in range(args.num_prompts):
|
||||
prompt = tokenizer.decode(
|
||||
[
|
||||
(offsets[i] + i + j) % (tokenizer.vocab_size - 129) + 128
|
||||
for j in range(input_lens[i])
|
||||
]
|
||||
)
|
||||
input_requests.append((prompt, int(input_lens[i]), int(output_lens[i])))
|
||||
|
||||
benchmark_start_time = time.perf_counter()
|
||||
asyncio.run(
|
||||
benchmark(
|
||||
args.backend,
|
||||
api_url,
|
||||
input_requests,
|
||||
args.best_of,
|
||||
args.use_beam_search,
|
||||
args.request_rate,
|
||||
)
|
||||
)
|
||||
benchmark_end_time = time.perf_counter()
|
||||
benchmark_time = benchmark_end_time - benchmark_start_time
|
||||
|
||||
# Compute the statistics.
|
||||
latencies = [latency for _, _, latency in REQUEST_LATENCY]
|
||||
avg_latency = np.mean(latencies)
|
||||
avg_per_token_latency = np.mean(
|
||||
[
|
||||
latency / (prompt_len + output_len)
|
||||
for prompt_len, output_len, latency in REQUEST_LATENCY
|
||||
]
|
||||
)
|
||||
avg_per_output_token_latency = np.mean(
|
||||
[latency / output_len for _, output_len, latency in REQUEST_LATENCY]
|
||||
)
|
||||
decoding_throughput = (
|
||||
np.sum([output_len for _, output_len, _ in REQUEST_LATENCY]) / benchmark_time
|
||||
)
|
||||
|
||||
# latencies = [round(latency, 2) for _, _, latency in REQUEST_LATENCY]
|
||||
# print(latencies)
|
||||
|
||||
print(f"Total time: {benchmark_time:.2f} s")
|
||||
print(f"Request throughput: {args.num_prompts / benchmark_time:.2f} requests/s")
|
||||
print(f"Decoding throughput: {decoding_throughput:.2f} token/s")
|
||||
print(f"Average latency: {avg_latency:.2f} s")
|
||||
print(f"Average latency per token: {avg_per_token_latency:.2f} s")
|
||||
print(f"Average latency per output token: {avg_per_output_token_latency:.2f} s")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Benchmark the online serving throughput."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--backend",
|
||||
type=str,
|
||||
default="srt",
|
||||
choices=["vllm", "tgi", "srt", "lightllm", "ginfer"],
|
||||
)
|
||||
parser.add_argument("--host", type=str, default="http://localhost")
|
||||
parser.add_argument("--port", type=int, default=30000)
|
||||
parser.add_argument("--dataset", type=str, help="Path to the dataset.")
|
||||
parser.add_argument("--input-len", type=int, default=2048)
|
||||
parser.add_argument("--output-len", type=int, default=256)
|
||||
parser.add_argument("--range-ratio", type=float, default=1.0)
|
||||
parser.add_argument(
|
||||
"--tokenizer",
|
||||
type=str,
|
||||
default="NousResearch/Meta-Llama-3-8B",
|
||||
help="Name or path of the tokenizer.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--best-of",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Generates `best_of` sequences per prompt and " "returns the best one.",
|
||||
)
|
||||
parser.add_argument("--use-beam-search", action="store_true")
|
||||
parser.add_argument(
|
||||
"--num-prompts", type=int, default=1000, help="Number of prompts to process."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--request-rate",
|
||||
type=float,
|
||||
default=float("inf"),
|
||||
help="Number of requests per second. If this is inf, "
|
||||
"then all the requests are sent at time 0. "
|
||||
"Otherwise, we use Poisson process to synthesize "
|
||||
"the request arrival times.",
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=0)
|
||||
parser.add_argument(
|
||||
"--trust-remote-code",
|
||||
action="store_true",
|
||||
help="trust remote code from huggingface",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
49
docs/en/benchmark_and_profiling.md
Normal file
49
docs/en/benchmark_and_profiling.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Benchmark and Profiling
|
||||
|
||||
## Benchmark
|
||||
- Benchmark a single static batch by running the following command without launching a server. The arguments are the same as for `launch_server.py`. Note that this is not a dynamic batching server, so it may run out of memory for a batch size that a real server can handle. A real server truncates the prefill into several batches, while this unit test does not. For accurate large batch testing, consider using `sglang.bench_serving`.
|
||||
```
|
||||
python -m sglang.bench_latency --model-path meta-llama/Meta-Llama-3-8B-Instruct --batch 32 --input-len 256 --output-len 32
|
||||
```
|
||||
- Benchmark online serving. Launch a server first and run the following command.
|
||||
```
|
||||
python3 -m sglang.bench_serving --backend sglang --num-prompt 10
|
||||
```
|
||||
|
||||
## Profile with Nsight
|
||||
0. Prerequisite
|
||||
```bash
|
||||
# install nsys
|
||||
# https://docs.nvidia.com/nsight-systems/InstallationGuide/index.html
|
||||
apt update
|
||||
apt install -y --no-install-recommends gnupg
|
||||
echo "deb http://developer.download.nvidia.com/devtools/repos/ubuntu$(source /etc/lsb-release; echo "$DISTRIB_RELEASE" | tr -d .)/$(dpkg --print-architecture) /" | tee /etc/apt/sources.list.d/nvidia-devtools.list
|
||||
apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub
|
||||
apt update
|
||||
apt install nsight-systems-cli
|
||||
```
|
||||
|
||||
1. To profile a single batch, use `nsys profile --trace-fork-before-exec=true --cuda-graph-trace=node python3 -m sglang.bench_latency --model meta-llama/Meta-Llama-3-8B --batch-size 64 --input-len 512`
|
||||
|
||||
2. To profile a server, e.g.
|
||||
|
||||
```bash
|
||||
# server
|
||||
# set the delay and duration times according to needs
|
||||
nsys profile --trace-fork-before-exec=true --cuda-graph-trace=node -o sglang.out --delay 60 --duration 70 python3 -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct --disable-radix-cache
|
||||
|
||||
# client
|
||||
python3 -m sglang.bench_serving --backend sglang --num-prompts 6000 --dataset-name random --random-input 4096 --random-output 2048
|
||||
```
|
||||
|
||||
3. Use NVTX, e.g.
|
||||
|
||||
```bash
|
||||
# install nvtx
|
||||
pip install nvtx
|
||||
|
||||
# code snippets
|
||||
import nvtx
|
||||
with nvtx.annotate("description", color="color"):
|
||||
# some critical code
|
||||
```
|
||||
@@ -149,10 +149,12 @@ async def async_request_openai_completions(
|
||||
"completions"
|
||||
), "OpenAI Completions API URL must end with 'completions'."
|
||||
|
||||
prompt = request_func_input.prompt
|
||||
|
||||
async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
|
||||
payload = {
|
||||
"model": request_func_input.model,
|
||||
"prompt": request_func_input.prompt,
|
||||
"prompt": prompt,
|
||||
"temperature": 0.0,
|
||||
"best_of": 1,
|
||||
"max_tokens": request_func_input.output_len,
|
||||
@@ -220,6 +222,13 @@ async def async_request_openai_completions(
|
||||
return output
|
||||
|
||||
|
||||
async def async_request_ginfer(
|
||||
request_func_input: RequestFuncInput,
|
||||
pbar: Optional[tqdm] = None,
|
||||
) -> RequestFuncOutput:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def get_model(pretrained_model_name_or_path: str) -> str:
|
||||
if os.getenv("SGLANG_USE_MODELSCOPE", "False").lower() == "true":
|
||||
import huggingface_hub.constants
|
||||
@@ -238,6 +247,13 @@ def get_model(pretrained_model_name_or_path: str) -> str:
|
||||
def get_tokenizer(
|
||||
pretrained_model_name_or_path: str,
|
||||
) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
|
||||
if pretrained_model_name_or_path.endswith(
|
||||
".json"
|
||||
) or pretrained_model_name_or_path.endswith(".model"):
|
||||
from sglang.srt.hf_transformers_utils import get_tokenizer
|
||||
|
||||
return get_tokenizer(pretrained_model_name_or_path)
|
||||
|
||||
if pretrained_model_name_or_path is not None and not os.path.exists(
|
||||
pretrained_model_name_or_path
|
||||
):
|
||||
@@ -252,6 +268,7 @@ ASYNC_REQUEST_FUNCS = {
|
||||
"vllm": async_request_openai_completions,
|
||||
"lmdeploy": async_request_openai_completions,
|
||||
"trt": async_request_trt_llm,
|
||||
"ginfer": async_request_ginfer,
|
||||
}
|
||||
|
||||
|
||||
@@ -351,9 +368,9 @@ def sample_sharegpt_requests(
|
||||
|
||||
# Tokenize the prompts and completions.
|
||||
prompt = dataset[i][0]
|
||||
prompt_token_ids = tokenizer(prompt).input_ids
|
||||
prompt_token_ids = tokenizer.encode(prompt)
|
||||
completion = dataset[i][1]
|
||||
completion_token_ids = tokenizer(completion).input_ids
|
||||
completion_token_ids = tokenizer.encode(completion)
|
||||
prompt_len = len(prompt_token_ids)
|
||||
output_len = (
|
||||
len(completion_token_ids) if fixed_output_len is None else fixed_output_len
|
||||
@@ -361,7 +378,9 @@ def sample_sharegpt_requests(
|
||||
if prompt_len < 4 or output_len < 4:
|
||||
# Prune too short sequences.
|
||||
continue
|
||||
if prompt_len > 1024 or prompt_len + output_len > 2048:
|
||||
if prompt_len > 1024 or (
|
||||
prompt_len + output_len > 2048 and fixed_output_len is None
|
||||
):
|
||||
# Prune too long sequences.
|
||||
continue
|
||||
filtered_dataset.append((prompt, prompt_len, output_len))
|
||||
@@ -422,7 +441,7 @@ def sample_random_requests(
|
||||
for i in range(num_prompts):
|
||||
# Tokenize the prompts and completions.
|
||||
prompt = dataset[i][0]
|
||||
prompt_token_ids = tokenizer(prompt).input_ids
|
||||
prompt_token_ids = tokenizer.encode(prompt)
|
||||
prompt_len = len(prompt_token_ids)
|
||||
|
||||
if prompt_len > input_lens[i]:
|
||||
@@ -488,7 +507,7 @@ def calculate_metrics(
|
||||
output_len = outputs[i].output_len
|
||||
output_lens.append(output_len)
|
||||
retokenized_output_len = len(
|
||||
tokenizer(outputs[i].generated_text, add_special_tokens=False).input_ids
|
||||
tokenizer.encode(outputs[i].generated_text, add_special_tokens=False)
|
||||
)
|
||||
retokenized_output_lens.append(retokenized_output_len)
|
||||
total_input += input_requests[i][1]
|
||||
@@ -547,7 +566,6 @@ async def benchmark(
|
||||
input_requests: List[Tuple[str, int, int]],
|
||||
request_rate: float,
|
||||
disable_tqdm: bool,
|
||||
enable_multi: bool,
|
||||
extra_request_body: Dict[str, Any],
|
||||
):
|
||||
if backend in ASYNC_REQUEST_FUNCS:
|
||||
@@ -756,6 +774,7 @@ def run_benchmark(args_: argparse.Namespace):
|
||||
global args
|
||||
args = args_
|
||||
|
||||
# Set global environments
|
||||
set_ulimit()
|
||||
random.seed(args.seed)
|
||||
np.random.seed(args.seed)
|
||||
@@ -764,12 +783,14 @@ def run_benchmark(args_: argparse.Namespace):
|
||||
if args.extra_request_body:
|
||||
extra_request_body = json.loads(args.extra_request_body)
|
||||
|
||||
# Set url
|
||||
if args.port is None:
|
||||
args.port = {
|
||||
"sglang": 30000,
|
||||
"lmdeploy": 23333,
|
||||
"vllm": 8000,
|
||||
"trt": 8000,
|
||||
"ginfer": 9988,
|
||||
}.get(args.backend, 30000)
|
||||
|
||||
api_url = (
|
||||
@@ -792,7 +813,11 @@ def run_benchmark(args_: argparse.Namespace):
|
||||
if args.model is None:
|
||||
print("Please provide a model using `--model` when using `trt` backend.")
|
||||
sys.exit(1)
|
||||
elif args.backend == "ginfer":
|
||||
api_url = args.base_url if args.base_url else f"{args.host}:{args.port}"
|
||||
args.model = args.model or "default"
|
||||
|
||||
# Get model name
|
||||
if args.model is None:
|
||||
try:
|
||||
response = requests.get(model_url)
|
||||
@@ -817,6 +842,7 @@ def run_benchmark(args_: argparse.Namespace):
|
||||
|
||||
print(f"{args}\n")
|
||||
|
||||
# Read dataset
|
||||
backend = args.backend
|
||||
model_id = args.model
|
||||
tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model
|
||||
@@ -842,7 +868,21 @@ def run_benchmark(args_: argparse.Namespace):
|
||||
else:
|
||||
raise ValueError(f"Unknown dataset: {args.dataset_name}")
|
||||
|
||||
if args.multi:
|
||||
if not args.multi:
|
||||
return asyncio.run(
|
||||
benchmark(
|
||||
backend=backend,
|
||||
api_url=api_url,
|
||||
model_id=model_id,
|
||||
tokenizer=tokenizer,
|
||||
input_requests=input_requests,
|
||||
request_rate=args.request_rate,
|
||||
disable_tqdm=args.disable_tqdm,
|
||||
extra_request_body=extra_request_body,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Benchmark multiple rps. TODO: use a fixed duration to compute num_prompts
|
||||
request_rates = parse_request_rate_range(args.request_rate_range)
|
||||
|
||||
for rate in request_rates:
|
||||
@@ -855,27 +895,11 @@ def run_benchmark(args_: argparse.Namespace):
|
||||
input_requests=input_requests,
|
||||
request_rate=rate,
|
||||
disable_tqdm=args.disable_tqdm,
|
||||
enable_multi=args.multi,
|
||||
extra_request_body=extra_request_body,
|
||||
)
|
||||
)
|
||||
else:
|
||||
return asyncio.run(
|
||||
benchmark(
|
||||
backend=backend,
|
||||
api_url=api_url,
|
||||
model_id=model_id,
|
||||
tokenizer=tokenizer,
|
||||
input_requests=input_requests,
|
||||
request_rate=args.request_rate,
|
||||
disable_tqdm=args.disable_tqdm,
|
||||
enable_multi=args.multi,
|
||||
extra_request_body=extra_request_body,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# to avoid relying on SGLang's components
|
||||
def set_ulimit(target_soft_limit=65535):
|
||||
resource_type = resource.RLIMIT_NOFILE
|
||||
current_soft, current_hard = resource.getrlimit(resource_type)
|
||||
@@ -968,7 +992,7 @@ if __name__ == "__main__":
|
||||
help="Number of requests per second. If this is inf, then all the requests are sent at time 0. "
|
||||
"Otherwise, we use Poisson process to synthesize the request arrival times. Default is 128.0.",
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=0, help="Default is 0.")
|
||||
parser.add_argument("--seed", type=int, default=1, help="The random seed.")
|
||||
parser.add_argument(
|
||||
"--multi",
|
||||
action="store_true",
|
||||
|
||||
@@ -30,7 +30,17 @@ from transformers import (
|
||||
PreTrainedTokenizer,
|
||||
PreTrainedTokenizerFast,
|
||||
)
|
||||
from vllm.transformers_utils.configs import ChatGLMConfig, DbrxConfig
|
||||
|
||||
try:
|
||||
from vllm.transformers_utils.configs import ChatGLMConfig, DbrxConfig
|
||||
|
||||
_CONFIG_REGISTRY: Dict[str, Type[PretrainedConfig]] = {
|
||||
ChatGLMConfig.model_type: ChatGLMConfig,
|
||||
DbrxConfig.model_type: DbrxConfig,
|
||||
}
|
||||
except ImportError:
|
||||
# We want this file to run without vllm dependency
|
||||
_CONFIG_REGISTRY: Dict[str, Type[PretrainedConfig]] = {}
|
||||
|
||||
from sglang.srt.utils import is_multimodal_model
|
||||
|
||||
|
||||
@@ -113,30 +113,7 @@ def call_generate_srt_raw(prompt, temperature, max_tokens, stop=None, url=None):
|
||||
|
||||
|
||||
def call_generate_ginfer(prompt, temperature, max_tokens, stop=None, url=None):
|
||||
import grpc
|
||||
from ginfer import sampler_pb2, sampler_pb2_grpc
|
||||
|
||||
sampler_channel = grpc.insecure_channel(url.replace("http://", ""))
|
||||
sampler = sampler_pb2_grpc.SamplerStub(sampler_channel)
|
||||
|
||||
if stop is None:
|
||||
stop_strings = None
|
||||
else:
|
||||
stop_strings = [stop]
|
||||
|
||||
sample_request = sampler_pb2.SampleTextRequest(
|
||||
prompt=prompt,
|
||||
settings=sampler_pb2.SampleSettings(
|
||||
max_len=max_tokens,
|
||||
rng_seed=0,
|
||||
temperature=max(temperature, 1e-7),
|
||||
nucleus_p=1,
|
||||
stop_strings=stop_strings,
|
||||
),
|
||||
)
|
||||
stream = sampler.SampleText(sample_request)
|
||||
response = "".join([x.text for x in stream])
|
||||
return response
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def call_generate_guidance(
|
||||
|
||||
Reference in New Issue
Block a user