364 lines
14 KiB
Python
364 lines
14 KiB
Python
"""Benchmark offline inference throughput."""
|
|
import argparse
|
|
import json
|
|
import random
|
|
import time
|
|
import random
|
|
from typing import List, Tuple, Union
|
|
|
|
import torch
|
|
from tqdm import tqdm
|
|
from transformers import AutoModelForCausalLM, PreTrainedTokenizerBase
|
|
|
|
from xtrt_llm.vllm import LLM, SamplingParams
|
|
from xtrt_llm.vllm.transformers_utils.tokenizer import get_tokenizer
|
|
|
|
|
|
def dummy_sample_requests(
|
|
tokenizer: PreTrainedTokenizerBase,
|
|
prompt: Union[str, List[str]],
|
|
tokenid: int,
|
|
output_len: Union[int, List[int]],
|
|
input_len: Union[int, List[int]],
|
|
max_model_len: int,
|
|
num_requests: Union[int, List[int]],
|
|
) -> List[Tuple[List[int], int, int]]:
|
|
|
|
if prompt is not None:
|
|
if isinstance(prompt, str):
|
|
assert isinstance(input_len, int) \
|
|
and isinstance(output_len, int) and isinstance(num_requests, int)
|
|
prompt_token_ids_list = [tokenizer(prompt).input_ids]
|
|
input_len = [input_len]
|
|
output_len = [output_len]
|
|
num_requests = [num_requests]
|
|
else:
|
|
assert isinstance(input_len, list) \
|
|
and isinstance(output_len, list) and isinstance(num_requests, list)
|
|
prompt_token_ids_list = [tokenizer(x).input_ids for x in prompt]
|
|
if tokenid is not None:
|
|
if isinstance(input_len, int):
|
|
assert isinstance(output_len, int) and isinstance(num_requests, int)
|
|
prompt_token_ids_list = [[tokenid] * input_len]
|
|
input_len = [input_len]
|
|
output_len = [output_len]
|
|
num_requests = [num_requests]
|
|
else:
|
|
assert isinstance(output_len, list) and isinstance(num_requests, list)
|
|
prompt_token_ids_list = [[tokenid] * x for x in input_len]
|
|
|
|
sampled_requests: List[Tuple[List[int], int, int]] = []
|
|
for i, prompt_token_ids in enumerate(prompt_token_ids_list):
|
|
for idx in range(num_requests[i]):
|
|
if len(prompt_token_ids) < input_len[i]:
|
|
prompt_token_ids.extend([prompt_token_ids[0]] *
|
|
(input_len[i] - len(prompt_token_ids)))
|
|
if len(prompt_token_ids) > input_len[i]:
|
|
prompt_token_ids = prompt_token_ids[:input_len[i] -
|
|
len(prompt_token_ids)]
|
|
sampled_requests.append(
|
|
(prompt_token_ids, input_len[i], min(output_len[i], max_model_len - input_len[i])))
|
|
|
|
random.shuffle(sampled_requests)
|
|
return sampled_requests
|
|
|
|
|
|
def sample_requests(
|
|
dataset_path: str,
|
|
num_requests: int,
|
|
tokenizer: PreTrainedTokenizerBase,
|
|
) -> List[Tuple[str, int, int]]:
|
|
# Load the dataset.
|
|
with open(dataset_path) as f:
|
|
dataset = json.load(f)
|
|
# Filter out the conversations with less than 2 turns.
|
|
dataset = [data for data in dataset if len(data["conversations"]) >= 2]
|
|
# Only keep the first two turns of each conversation.
|
|
dataset = [(data["conversations"][0]["value"],
|
|
data["conversations"][1]["value"]) for data in dataset]
|
|
|
|
# Tokenize the prompts and completions.
|
|
prompts = [prompt for prompt, _ in dataset]
|
|
prompt_token_ids = tokenizer(prompts).input_ids
|
|
completions = [completion for _, completion in dataset]
|
|
completion_token_ids = tokenizer(completions).input_ids
|
|
tokenized_dataset = []
|
|
for i in range(len(dataset)):
|
|
output_len = len(completion_token_ids[i])
|
|
tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len))
|
|
|
|
# Filter out too long sequences.
|
|
filtered_dataset: List[Tuple[str, int, int]] = []
|
|
for prompt, prompt_token_ids, output_len in tokenized_dataset:
|
|
prompt_len = len(prompt_token_ids)
|
|
if prompt_len < 4 or output_len < 4:
|
|
# Prune too short sequences.
|
|
continue
|
|
if prompt_len > 1024 or prompt_len + output_len > 2048:
|
|
# Prune too long sequences.
|
|
continue
|
|
filtered_dataset.append((prompt, prompt_len, output_len))
|
|
|
|
# Sample the requests.
|
|
sampled_requests = random.sample(filtered_dataset, num_requests)
|
|
return sampled_requests
|
|
|
|
|
|
def dummy_run_vllm(
|
|
requests: List[Tuple[List[int], int, int]],
|
|
model: str,
|
|
tokenizer: str,
|
|
tensor_parallel_size: int,
|
|
seed: int,
|
|
n: int,
|
|
use_beam_search: bool,
|
|
trust_remote_code: bool,
|
|
max_model_len: int,
|
|
engine_dir: str,
|
|
max_num_seqs: int,
|
|
max_num_batched_tokens: int,
|
|
) -> float:
|
|
llm = LLM(
|
|
model=model,
|
|
tokenizer=tokenizer,
|
|
tensor_parallel_size=tensor_parallel_size,
|
|
seed=seed,
|
|
trust_remote_code=trust_remote_code,
|
|
disable_log_stats=False,
|
|
max_model_len=max_model_len,
|
|
engine_dir=engine_dir,
|
|
max_num_seqs=max_num_seqs,
|
|
max_num_batched_tokens=max_num_batched_tokens,
|
|
)
|
|
start = time.time()
|
|
# Add the requests to the engine.
|
|
for prompt_tokenids, _, output_len in requests:
|
|
sampling_params = SamplingParams(
|
|
n=n,
|
|
temperature=0.0 if use_beam_search else 1.0,
|
|
top_p=1.0,
|
|
use_beam_search=use_beam_search,
|
|
ignore_eos=True,
|
|
max_tokens=output_len,
|
|
)
|
|
# FIXME(woosuk): Do not use internal method.
|
|
llm._add_request(
|
|
# model_type="llama2",
|
|
prompt=None,
|
|
prompt_token_ids=prompt_tokenids,
|
|
sampling_params=sampling_params,
|
|
)
|
|
|
|
# FIXME(woosuk): Do use internal method.
|
|
llm._run_engine(use_tqdm=True)
|
|
end = time.time()
|
|
return end - start
|
|
|
|
|
|
def run_vllm(
|
|
requests: List[Tuple[str, int, int]],
|
|
model: str,
|
|
tokenizer: str,
|
|
tensor_parallel_size: int,
|
|
seed: int,
|
|
n: int,
|
|
use_beam_search: bool,
|
|
trust_remote_code: bool,
|
|
) -> float:
|
|
llm = LLM(
|
|
model=model,
|
|
tokenizer=tokenizer,
|
|
tensor_parallel_size=tensor_parallel_size,
|
|
seed=seed,
|
|
trust_remote_code=trust_remote_code,
|
|
)
|
|
|
|
# Add the requests to the engine.
|
|
for prompt, _, output_len in requests:
|
|
sampling_params = SamplingParams(
|
|
n=n,
|
|
temperature=0.0 if use_beam_search else 1.0,
|
|
top_p=1.0,
|
|
use_beam_search=use_beam_search,
|
|
ignore_eos=True,
|
|
max_tokens=output_len,
|
|
)
|
|
# FIXME(woosuk): Do not use internal method.
|
|
llm._add_request(
|
|
model_type="llama2",
|
|
prompt=prompt,
|
|
prompt_token_ids=None,
|
|
sampling_params=sampling_params,
|
|
)
|
|
|
|
start = time.time()
|
|
# FIXME(woosuk): Do use internal method.
|
|
llm._run_engine(use_tqdm=True)
|
|
end = time.time()
|
|
return end - start
|
|
|
|
|
|
def run_hf(
|
|
requests: List[Tuple[str, int, int]],
|
|
model: str,
|
|
tokenizer: PreTrainedTokenizerBase,
|
|
n: int,
|
|
use_beam_search: bool,
|
|
max_batch_size: int,
|
|
trust_remote_code: bool,
|
|
) -> float:
|
|
assert not use_beam_search
|
|
llm = AutoModelForCausalLM.from_pretrained(
|
|
model, torch_dtype=torch.float16, trust_remote_code=trust_remote_code)
|
|
if llm.config.model_type == "llama":
|
|
# To enable padding in the HF backend.
|
|
tokenizer.pad_token = tokenizer.eos_token
|
|
llm = llm.cuda()
|
|
|
|
pbar = tqdm(total=len(requests))
|
|
start = time.time()
|
|
batch: List[str] = []
|
|
max_prompt_len = 0
|
|
max_output_len = 0
|
|
for i in range(len(requests)):
|
|
prompt, prompt_len, output_len = requests[i]
|
|
# Add the prompt to the batch.
|
|
batch.append(prompt)
|
|
max_prompt_len = max(max_prompt_len, prompt_len)
|
|
max_output_len = max(max_output_len, output_len)
|
|
if len(batch) < max_batch_size and i != len(requests) - 1:
|
|
# Check if we can add more requests to the batch.
|
|
_, next_prompt_len, next_output_len = requests[i + 1]
|
|
if (max(max_prompt_len, next_prompt_len) +
|
|
max(max_output_len, next_output_len)) <= 2048:
|
|
# We can add more requests to the batch.
|
|
continue
|
|
|
|
# Generate the sequences.
|
|
input_ids = tokenizer(batch, return_tensors="pt",
|
|
padding=True).input_ids
|
|
llm_outputs = llm.generate(
|
|
input_ids=input_ids.cuda(),
|
|
do_sample=not use_beam_search,
|
|
num_return_sequences=n,
|
|
temperature=1.0,
|
|
top_p=1.0,
|
|
use_cache=True,
|
|
max_new_tokens=max_output_len,
|
|
)
|
|
# Include the decoding time.
|
|
tokenizer.batch_decode(llm_outputs, skip_special_tokens=True)
|
|
pbar.update(len(batch))
|
|
|
|
# Clear the batch.
|
|
batch = []
|
|
max_prompt_len = 0
|
|
max_output_len = 0
|
|
end = time.time()
|
|
return end - start
|
|
|
|
|
|
def main(args: argparse.Namespace):
|
|
print(args)
|
|
random.seed(args.seed)
|
|
|
|
# Sample the requests.
|
|
tokenizer = get_tokenizer(args.tokenizer,
|
|
trust_remote_code=args.trust_remote_code)
|
|
if args.dummy_dataset:
|
|
requests = dummy_sample_requests(tokenizer, args.dummy_prompt,
|
|
args.dummy_tokenid,
|
|
args.dummy_output_len,
|
|
args.dummy_input_len,
|
|
args.max_model_len, args.num_prompts)
|
|
|
|
if args.backend == "vllm":
|
|
elapsed_time = dummy_run_vllm(
|
|
requests, args.model, args.tokenizer, args.tensor_parallel_size,
|
|
args.seed, args.n, args.use_beam_search, args.trust_remote_code,
|
|
args.max_model_len, args.engine_dir, args.max_num_seqs,
|
|
args.max_num_batched_tokens)
|
|
else:
|
|
raise ValueError(f"Unknown backend: {args.backend}")
|
|
total_num_tokens = sum(output_len
|
|
for _, _, output_len in requests)
|
|
print(f"Throughput: {len(requests) / elapsed_time:.2f} requests/s, "
|
|
f"{total_num_tokens / elapsed_time:.2f} tokens/s")
|
|
else:
|
|
requests = sample_requests(args.dataset, args.num_prompts, tokenizer)
|
|
|
|
if args.backend == "vllm":
|
|
elapsed_time = run_vllm(requests, args.model, args.tokenizer,
|
|
args.tensor_parallel_size, args.seed,
|
|
args.n, args.use_beam_search,
|
|
args.trust_remote_code)
|
|
elif args.backend == "hf":
|
|
assert args.tensor_parallel_size == 1
|
|
elapsed_time = run_hf(requests, args.model, tokenizer, args.n,
|
|
args.use_beam_search, args.hf_max_batch_size,
|
|
args.trust_remote_code)
|
|
else:
|
|
raise ValueError(f"Unknown backend: {args.backend}")
|
|
total_num_tokens = sum(prompt_len + output_len
|
|
for _, prompt_len, output_len in requests)
|
|
print(f"Throughput: {len(requests) / elapsed_time:.2f} requests/s, "
|
|
f"{total_num_tokens / elapsed_time:.2f} tokens/s")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser(description="Benchmark the throughput.")
|
|
parser.add_argument("--backend",
|
|
type=str,
|
|
choices=["vllm", "hf"],
|
|
default="vllm")
|
|
parser.add_argument("--dataset", type=str, help="Path to the dataset.")
|
|
parser.add_argument("--model", type=str, default="facebook/opt-125m")
|
|
parser.add_argument("--tokenizer", type=str, default=None)
|
|
parser.add_argument("--tensor-parallel-size", "-tp", type=int, default=1)
|
|
parser.add_argument("--n",
|
|
type=int,
|
|
default=1,
|
|
help="Number of generated sequences per prompt.")
|
|
parser.add_argument("--use-beam-search", action="store_true")
|
|
parser.add_argument("--num-prompts",
|
|
nargs='+',
|
|
type=int,
|
|
default=1000,
|
|
help="Number of prompts to process.")
|
|
parser.add_argument("--seed", type=int, default=0)
|
|
parser.add_argument("--hf-max-batch-size",
|
|
type=int,
|
|
default=None,
|
|
help="Maximum batch size for HF backend.")
|
|
parser.add_argument('--trust-remote-code',
|
|
action='store_true',
|
|
help='trust remote code from huggingface')
|
|
parser.add_argument('--max-model-len', type=int, default=2048)
|
|
parser.add_argument('--max-num-batched-tokens', type=int, default=2048)
|
|
parser.add_argument('--max-num-seqs', type=int, default=128)
|
|
parser.add_argument('--dummy-dataset',
|
|
action='store_true',
|
|
help='use dummy data to test')
|
|
parser.add_argument('--dummy-prompt', nargs='+', type=str, default=None)
|
|
parser.add_argument('--dummy-tokenid', type=int, default=None)
|
|
parser.add_argument('--dummy-input-len', nargs='+', type=int, default=1024)
|
|
parser.add_argument('--dummy-output-len', nargs='+', type=int, default=1024)
|
|
parser.add_argument("--engine_dir", type=str, help="Path to the engine.")
|
|
args = parser.parse_args()
|
|
|
|
if args.backend == "vllm":
|
|
if args.hf_max_batch_size is not None:
|
|
raise ValueError("HF max batch size is only for HF backend.")
|
|
elif args.backend == "hf":
|
|
if args.hf_max_batch_size is None:
|
|
raise ValueError("HF max batch size is required for HF backend.")
|
|
if args.dummy_dataset:
|
|
if args.dummy_prompt is None and args.dummy_tokenid is None:
|
|
raise ValueError(
|
|
"dummy_dataset is True, thus dummy_prompt is not None or dummy_tokenid is not None."
|
|
)
|
|
if args.tokenizer is None:
|
|
args.tokenizer = args.model
|
|
|
|
main(args)
|