sglangv0.5.2 & support Qwen3-Next-80B-A3B-Instruct
This commit is contained in:
33
benchmark/long_json_decode/README.md
Normal file
33
benchmark/long_json_decode/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## Run benchmark
|
||||
|
||||
### Benchmark sglang
|
||||
```
|
||||
python3 -m sglang.launch_server --model-path codellama/CodeLlama-7b-instruct-hf --port 30000
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_sglang.py --num-questions 5 --parallel 1
|
||||
```
|
||||
|
||||
|
||||
### Benchmark vllm
|
||||
```
|
||||
python3 -m vllm.entrypoints.api_server --tokenizer-mode auto --model codellama/CodeLlama-7b-instruct-hf --disable-log-requests --port 21000 --gpu 0.97
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_other.py --backend vllm --num-questions 5
|
||||
```
|
||||
|
||||
|
||||
### Benchmark guidance
|
||||
```
|
||||
python3 bench_other.py --backend guidance --num-questions 5 --parallel 1 --n-ctx 11000 --model-path path/to/code-llama/gguf
|
||||
```
|
||||
|
||||
|
||||
### Build dataset
|
||||
```
|
||||
pip install wikipedia
|
||||
python3 build_dataset.py
|
||||
```
|
||||
89
benchmark/long_json_decode/bench_other.py
Normal file
89
benchmark/long_json_decode/bench_other.py
Normal file
@@ -0,0 +1,89 @@
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
from sglang.test.test_utils import add_common_other_args_and_parse, get_call_generate
|
||||
from sglang.utils import dump_state_text, read_jsonl
|
||||
|
||||
|
||||
def json_decode(document, generate):
|
||||
s = "Please extract the information of a city from the following wikipedia page.\n"
|
||||
s += "Page begin.\n" + document + "Page end.\n"
|
||||
s += "Here is the name, country, and symbol of the city in JSON format.\n"
|
||||
s += "{\n"
|
||||
s += ' "name": "'
|
||||
s += generate(s, max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "country": "'
|
||||
s += generate(s, max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "air port code": "'
|
||||
s += generate(s, max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "top 3 landmarks": "'
|
||||
s += generate(s, max_tokens=24, stop='"') + '",\n'
|
||||
s += "}\n"
|
||||
return s
|
||||
|
||||
|
||||
def main(args):
|
||||
lines = read_jsonl(args.data_path)
|
||||
arguments = []
|
||||
for i in range(len(lines[: args.num_questions])):
|
||||
arguments.append(
|
||||
{
|
||||
"document": lines[i]["document"],
|
||||
}
|
||||
)
|
||||
states = [None] * len(arguments)
|
||||
|
||||
# Select backend
|
||||
call_generate = partial(get_call_generate(args), temperature=0)
|
||||
|
||||
# Run requests
|
||||
def get_one_answer(i):
|
||||
states[i] = json_decode(generate=call_generate, **arguments[i])
|
||||
|
||||
tic = time.perf_counter()
|
||||
if args.parallel == 1:
|
||||
for i in tqdm(range(len(arguments))):
|
||||
get_one_answer(i)
|
||||
else:
|
||||
with ThreadPoolExecutor(args.parallel) as executor:
|
||||
list(
|
||||
tqdm(
|
||||
executor.map(get_one_answer, list(range(len(arguments)))),
|
||||
total=len(arguments),
|
||||
)
|
||||
)
|
||||
|
||||
latency = time.perf_counter() - tic
|
||||
|
||||
# Compute accuracy
|
||||
print(f"Latency: {latency:.3f}")
|
||||
|
||||
# Write results
|
||||
dump_state_text(f"tmp_output_{args.backend}.txt", states)
|
||||
|
||||
with open(args.result_file, "a") as fout:
|
||||
value = {
|
||||
"task": "long_json_decode",
|
||||
"backend": args.backend,
|
||||
"num_gpus": 1,
|
||||
"latency": round(latency, 3),
|
||||
"num_requests": args.num_questions,
|
||||
"other": {
|
||||
"num_questions": args.num_questions,
|
||||
"parallel": args.parallel,
|
||||
},
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--data-path", type=str, default="questions.jsonl")
|
||||
parser.add_argument("--num-questions", type=int, default=100)
|
||||
args = add_common_other_args_and_parse(parser)
|
||||
main(args)
|
||||
81
benchmark/long_json_decode/bench_sglang.py
Normal file
81
benchmark/long_json_decode/bench_sglang.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
|
||||
import sglang as sgl
|
||||
from sglang.test.test_utils import (
|
||||
add_common_sglang_args_and_parse,
|
||||
select_sglang_backend,
|
||||
)
|
||||
from sglang.utils import dump_state_text, read_jsonl
|
||||
|
||||
|
||||
@sgl.function
|
||||
def json_decode(s, document):
|
||||
s += "Please extract the information of a city from the following wikipedia page.\n"
|
||||
s += "Page begin.\n" + document + "Page end.\n"
|
||||
s += "Here is the name, country, and symbol of the city in JSON format.\n"
|
||||
s += "{\n"
|
||||
s += ' "name": "' + sgl.gen("name", max_tokens=8, stop='"') + '",\n'
|
||||
s += ' "country": "' + sgl.gen("country", max_tokens=8, stop='"') + '",\n'
|
||||
s += (
|
||||
' "air port code": "'
|
||||
+ sgl.gen("air port code", max_tokens=8, stop='"')
|
||||
+ '",\n'
|
||||
)
|
||||
s += (
|
||||
' "top 3 landmarks": "'
|
||||
+ sgl.gen("landmarks", max_tokens=24, stop='"')
|
||||
+ '",\n'
|
||||
)
|
||||
s += "}\n"
|
||||
|
||||
|
||||
def main(args):
|
||||
lines = read_jsonl(args.data_path)
|
||||
arguments = []
|
||||
for i in range(len(lines[: args.num_questions])):
|
||||
arguments.append(
|
||||
{
|
||||
"document": lines[i]["document"],
|
||||
}
|
||||
)
|
||||
|
||||
# Select backend
|
||||
backend = select_sglang_backend(args)
|
||||
sgl.set_default_backend(backend)
|
||||
|
||||
# Run requests
|
||||
tic = time.perf_counter()
|
||||
states = json_decode.run_batch(
|
||||
arguments, temperature=0, num_threads=args.parallel, progress_bar=True
|
||||
)
|
||||
latency = time.perf_counter() - tic
|
||||
|
||||
# Compute accuracy
|
||||
print(f"Latency: {latency:.3f}")
|
||||
|
||||
# Write results
|
||||
dump_state_text(f"tmp_output_{args.backend}.txt", states)
|
||||
|
||||
with open(args.result_file, "a") as fout:
|
||||
value = {
|
||||
"task": "long_json_decode",
|
||||
"backend": args.backend,
|
||||
"num_gpus": 1,
|
||||
"latency": round(latency, 3),
|
||||
"num_requests": args.num_questions,
|
||||
"other": {
|
||||
"num_questions": args.num_questions,
|
||||
"parallel": args.parallel,
|
||||
},
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--data-path", type=str, default="questions.jsonl")
|
||||
parser.add_argument("--num-questions", type=int, default=10)
|
||||
args = add_common_sglang_args_and_parse(parser)
|
||||
main(args)
|
||||
27
benchmark/long_json_decode/build_dataset.py
Normal file
27
benchmark/long_json_decode/build_dataset.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import json
|
||||
|
||||
import transformers
|
||||
import wikipedia
|
||||
|
||||
name = "meta-llama/Llama-2-7b-chat-hf"
|
||||
t = transformers.AutoTokenizer.from_pretrained(name)
|
||||
city_names = ["los angles", "london", "tokyo", "beijing", "singapore"]
|
||||
|
||||
|
||||
for city_name in city_names:
|
||||
content = str(wikipedia.page(city_name).content)
|
||||
content = content.replace("\n\n", "\n")
|
||||
|
||||
tokens = t.encode(content)
|
||||
|
||||
truncate_len = int((10000 / len(tokens)) * len(content))
|
||||
truncate_content = content[:truncate_len]
|
||||
truncate_tokens = t.encode(truncate_content)
|
||||
|
||||
# Count token
|
||||
print(
|
||||
f"city_name: {city_name}, #tokens: {len(tokens)}, #truncate tokens: {len(truncate_tokens)}"
|
||||
)
|
||||
|
||||
with open("questions.jsonl", "a") as fout:
|
||||
fout.write(json.dumps({"document": truncate_content}) + "\n")
|
||||
Reference in New Issue
Block a user