release initial code
Co-authored-by: Ying Sheng <sqy1415@gmail.com> Co-authored-by: Liangsheng Yin <hnyls2002@gmail.com> Co-authored-by: Zhiqiang Xie <xiezhq@stanford.edu> Co-authored-by: parasol-aser <3848358+parasol-aser@users.noreply.github.com> Co-authored-by: LiviaSun <33578456+ChuyueSun@users.noreply.github.com> Co-authored-by: Cody Yu <hao.yu.cody@gmail.com>
This commit is contained in:
33
benchmark/long_json_decode/README.md
Normal file
33
benchmark/long_json_decode/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## Run benchmark
|
||||
|
||||
### Benchmark sglang
|
||||
```
|
||||
python3 -m sglang.launch_server --model-path codellama/CodeLlama-7b-instruct-hf --port 30000
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_sglang.py --num-questions 5 --parallel 1
|
||||
```
|
||||
|
||||
|
||||
### Benchmark vllm
|
||||
```
|
||||
python3 -m vllm.entrypoints.api_server --tokenizer-mode auto --model codellama/CodeLlama-7b-instruct-hf --disable-log-requests --port 21000 --gpu 0.97
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_other.py --backend vllm --num-questions 5
|
||||
```
|
||||
|
||||
|
||||
### Benchmark guidance
|
||||
```
|
||||
python3 bench_other.py --backend guidance --num-questions 5 --parallel 1
|
||||
```
|
||||
|
||||
|
||||
### Build dataset
|
||||
```
|
||||
pip install wikipedia
|
||||
python3 build_dataset.py
|
||||
```
|
||||
104
benchmark/long_json_decode/bench_other.py
Normal file
104
benchmark/long_json_decode/bench_other.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
import json
|
||||
import time
|
||||
|
||||
from tqdm import tqdm
|
||||
import numpy as np
|
||||
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
|
||||
from sglang.utils import read_jsonl, dump_state_text
|
||||
|
||||
|
||||
def json_decode(document, generate):
|
||||
s = "Please extract the information of a city from the following wikipedia page.\n"
|
||||
s += "Page begin.\n" + document + "Page end.\n"
|
||||
s += "Here is the name, country, and symbol of the city in JSON format.\n"
|
||||
s += '{\n'
|
||||
s += ' "name": "'
|
||||
s += generate(s, max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "country": "'
|
||||
s += generate(s, max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "air port code": "'
|
||||
s += generate(s, max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "top 3 landmarks": "'
|
||||
s += generate(s, max_tokens=24, stop='"') + '",\n'
|
||||
s += '}\n'
|
||||
return s
|
||||
|
||||
|
||||
def main(args):
|
||||
lines = read_jsonl(args.data_path)
|
||||
arguments = []
|
||||
for i in range(len(lines[:args.num_questions])):
|
||||
arguments.append({
|
||||
"document": lines[i]["document"],
|
||||
})
|
||||
states = [None] * len(arguments)
|
||||
|
||||
# Select backend
|
||||
if args.backend == "lightllm":
|
||||
url = f"{args.host}:{args.port}/generate"
|
||||
generate = partial(call_generate_lightllm, url=url, temperature=0)
|
||||
elif args.backend == "vllm":
|
||||
url = f"{args.host}:{args.port}/generate"
|
||||
generate = partial(call_generate_vllm, url=url, temperature=0)
|
||||
elif args.backend == "srt-raw":
|
||||
url = f"{args.host}:{args.port}/generate"
|
||||
generate = partial(call_generate_srt_raw, url=url, temperature=0)
|
||||
elif args.backend == "guidance":
|
||||
from guidance import models, gen
|
||||
|
||||
model = models.LlamaCpp("/home/ubuntu/model_weights/CodeLlama-7b-instruct-hf.gguf", n_gpu_layers=-1, n_ctx=11000)
|
||||
|
||||
def generate(prompt, max_tokens, stop):
|
||||
out = model + prompt + gen(name="answer",
|
||||
max_tokens=max_tokens, temperature=0, stop=stop)
|
||||
return out["answer"]
|
||||
|
||||
# warmup
|
||||
generate("Hello!", max_tokens=8, stop=None)
|
||||
else:
|
||||
raise ValueError(f"Invalid backend: {args.backend}")
|
||||
|
||||
# Run requests
|
||||
def get_one_answer(i):
|
||||
states[i] = json_decode(generate=generate, **arguments[i])
|
||||
|
||||
tic = time.time()
|
||||
if args.parallel == 1:
|
||||
for i in tqdm(range(len(arguments))):
|
||||
get_one_answer(i)
|
||||
else:
|
||||
with ThreadPoolExecutor(args.parallel) as executor:
|
||||
executor.map(get_one_answer, list(range(len(arguments))))
|
||||
latency = time.time() - tic
|
||||
|
||||
# Compute accuracy
|
||||
print(f"Latency: {latency:.3f}")
|
||||
|
||||
# Write results
|
||||
dump_state_text(f"tmp_output_{args.backend}.txt", states)
|
||||
|
||||
with open(args.result_file, "a") as fout:
|
||||
value = {
|
||||
"task": "long_json_decode",
|
||||
"backend": args.backend,
|
||||
"num_gpus": 1,
|
||||
"latency": round(latency, 3),
|
||||
"num_requests": args.num_questions,
|
||||
"other": {
|
||||
"num_questions": args.num_questions,
|
||||
"parallel": args.parallel,
|
||||
}
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--data-path", type=str, default="questions.jsonl")
|
||||
parser.add_argument("--num-questions", type=int, default=100)
|
||||
args = add_common_other_args_and_parse(parser)
|
||||
main(args)
|
||||
68
benchmark/long_json_decode/bench_sglang.py
Normal file
68
benchmark/long_json_decode/bench_sglang.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import sglang as sgl
|
||||
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
|
||||
from sglang.utils import read_jsonl, dump_state_text
|
||||
|
||||
|
||||
@sgl.function
|
||||
def json_decode(s, document):
|
||||
s += "Please extract the information of a city from the following wikipedia page.\n"
|
||||
s += "Page begin.\n" + document + "Page end.\n"
|
||||
s += "Here is the name, country, and symbol of the city in JSON format.\n"
|
||||
s += '{\n'
|
||||
s += ' "name": "' + sgl.gen("name", max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "country": "' + sgl.gen("country", max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "air port code": "' + sgl.gen("air port code", max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "top 3 landmarks": "' + sgl.gen("landmarks", max_tokens=24, stop='"') + '",\n'
|
||||
s += '}\n'
|
||||
|
||||
|
||||
def main(args):
|
||||
lines = read_jsonl(args.data_path)
|
||||
arguments = []
|
||||
for i in range(len(lines[:args.num_questions])):
|
||||
arguments.append({
|
||||
"document": lines[i]["document"],
|
||||
})
|
||||
|
||||
# Select backend
|
||||
backend = select_sglang_backend(args)
|
||||
sgl.set_default_backend(backend)
|
||||
|
||||
# Run requests
|
||||
tic = time.time()
|
||||
states = json_decode.run_batch(
|
||||
arguments, temperature=0, num_threads=args.parallel)
|
||||
latency = time.time() - tic
|
||||
|
||||
# Compute accuracy
|
||||
print(f"Latency: {latency:.3f}")
|
||||
|
||||
# Write results
|
||||
dump_state_text(f"tmp_output_{args.backend}.txt", states)
|
||||
|
||||
with open(args.result_file, "a") as fout:
|
||||
value = {
|
||||
"task": "long_json_decode",
|
||||
"backend": args.backend,
|
||||
"num_gpus": 1,
|
||||
"latency": round(latency, 3),
|
||||
"num_requests": args.num_questions,
|
||||
"other": {
|
||||
"num_questions": args.num_questions,
|
||||
"parallel": args.parallel,
|
||||
}
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--data-path", type=str, default="questions.jsonl")
|
||||
parser.add_argument("--num-questions", type=int, default=10)
|
||||
args = add_common_sglang_args_and_parse(parser)
|
||||
main(args)
|
||||
26
benchmark/long_json_decode/build_dataset.py
Normal file
26
benchmark/long_json_decode/build_dataset.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import json
|
||||
|
||||
import transformers
|
||||
import wikipedia
|
||||
|
||||
|
||||
name = "meta-llama/Llama-2-7b-chat-hf"
|
||||
t = transformers.AutoTokenizer.from_pretrained(name)
|
||||
city_names = ["los angles", "london", "tokyo", "beijing", "singapore"]
|
||||
|
||||
|
||||
for city_name in city_names:
|
||||
content = str(wikipedia.page(city_name).content)
|
||||
content = content.replace("\n\n", "\n")
|
||||
|
||||
tokens = t.encode(content)
|
||||
|
||||
truncate_len = int((10000 / len(tokens)) * len(content))
|
||||
truncate_content = content[:truncate_len]
|
||||
truncate_tokens = t.encode(truncate_content)
|
||||
|
||||
# Count token
|
||||
print(f"city_name: {city_name}, #tokens: {len(tokens)}, #truncate tokens: {len(truncate_tokens)}")
|
||||
|
||||
with open("questions.jsonl", "a") as fout:
|
||||
fout.write(json.dumps({"document": truncate_content}) + "\n")
|
||||
Reference in New Issue
Block a user