sglangv0.5.2 & support Qwen3-Next-80B-A3B-Instruct
This commit is contained in:
47
benchmark/hellaswag/README.md
Normal file
47
benchmark/hellaswag/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
## Run benchmark
|
||||
|
||||
### Benchmark sglang
|
||||
```
|
||||
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_sglang.py --num-questions 200
|
||||
```
|
||||
|
||||
|
||||
### Benchmark vllm
|
||||
```
|
||||
python3 -m vllm.entrypoints.api_server --tokenizer-mode auto --model meta-llama/Llama-2-7b-chat-hf --disable-log-requests --port 21000
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_other.py --num-questions 200 --backend vllm
|
||||
```
|
||||
|
||||
|
||||
### Benchmark lightllm
|
||||
```
|
||||
# A10G
|
||||
python -m lightllm.server.api_server --tokenizer_mode auto --model_dir ~/model_weights/llama-2-7b-chat-hf --max_total_token_num 16000 --port 22000
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_other.py --num-questions 200 --backend lightllm
|
||||
```
|
||||
|
||||
|
||||
### Benchmark guidance
|
||||
```
|
||||
CUDA_VISIBLE_DEVICES=0,1 python3 bench_other.py --num-questions 200 --backend guidance --parallel 1 --n-ctx 4096 --model-path path/to/gguf
|
||||
```
|
||||
|
||||
|
||||
### Benchmark lmql
|
||||
```
|
||||
lmql serve-model meta-llama/Llama-2-7b-chat-hf --cuda --port 23000
|
||||
```
|
||||
|
||||
```
|
||||
python3 bench_other.py --num-questions 200 --backend lmql --port 23000 --parallel 1
|
||||
```
|
||||
118
benchmark/hellaswag/bench_other.py
Normal file
118
benchmark/hellaswag/bench_other.py
Normal file
@@ -0,0 +1,118 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
from sglang.test.test_utils import add_common_other_args_and_parse, get_call_select
|
||||
from sglang.utils import download_and_cache_file, read_jsonl
|
||||
|
||||
|
||||
def get_one_example(lines, i, include_answer):
|
||||
ret = lines[i]["activity_label"] + ": " + lines[i]["ctx"] + " "
|
||||
if include_answer:
|
||||
ret += lines[i]["endings"][lines[i]["label"]]
|
||||
return ret
|
||||
|
||||
|
||||
def get_few_shot_examples(lines, k):
|
||||
ret = ""
|
||||
for i in range(k):
|
||||
ret += get_one_example(lines, i, True) + "\n\n"
|
||||
return ret
|
||||
|
||||
|
||||
def main(args):
|
||||
# Select backend
|
||||
call_select = get_call_select(args)
|
||||
|
||||
# Read data
|
||||
url = "https://raw.githubusercontent.com/rowanz/hellaswag/master/data/hellaswag_val.jsonl"
|
||||
filename = download_and_cache_file(url)
|
||||
lines = list(read_jsonl(filename))
|
||||
|
||||
# Construct prompts
|
||||
num_questions = args.num_questions
|
||||
num_shots = args.num_shots
|
||||
few_shot_examples = get_few_shot_examples(lines, num_shots)
|
||||
|
||||
questions = []
|
||||
choices = []
|
||||
labels = []
|
||||
for i in range(len(lines[:num_questions])):
|
||||
questions.append(get_one_example(lines, i, False))
|
||||
choices.append(lines[i]["endings"])
|
||||
labels.append(lines[i]["label"])
|
||||
|
||||
preds = [None] * len(labels)
|
||||
|
||||
# Run requests
|
||||
if args.backend != "lmql":
|
||||
# Use thread pool
|
||||
def get_one_answer(i):
|
||||
preds[i] = call_select(
|
||||
context=few_shot_examples + questions[i], choices=choices[i]
|
||||
)
|
||||
|
||||
tic = time.perf_counter()
|
||||
if args.parallel == 1:
|
||||
for i in tqdm(range(len(questions))):
|
||||
get_one_answer(i)
|
||||
else:
|
||||
with ThreadPoolExecutor(args.parallel) as executor:
|
||||
list(
|
||||
tqdm(
|
||||
executor.map(get_one_answer, list(range(len(questions)))),
|
||||
total=len(questions),
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Use asyncio
|
||||
async def batched_call(batch_size):
|
||||
for i in range(0, len(questions), batch_size):
|
||||
tasks = []
|
||||
for q, c in zip(
|
||||
questions[i : i + batch_size], choices[i : i + batch_size]
|
||||
):
|
||||
tasks.append(call_select(context=few_shot_examples + q, choices=c))
|
||||
rets = await asyncio.gather(*tasks)
|
||||
for j in range(len(rets)):
|
||||
preds[i + j] = rets[j]
|
||||
|
||||
tic = time.perf_counter()
|
||||
asyncio.run(batched_call(batch_size=args.parallel))
|
||||
|
||||
latency = time.perf_counter() - tic
|
||||
|
||||
# Compute accuracy
|
||||
acc = np.mean(np.array(preds) == np.array(labels))
|
||||
print(f"Latency: {latency:.3f}")
|
||||
print(f"Accuracy: {acc:.3f}")
|
||||
|
||||
# Write results
|
||||
with open(args.result_file, "a") as fout:
|
||||
value = {
|
||||
"task": "hellaswag",
|
||||
"backend": args.backend,
|
||||
"num_gpus": 1,
|
||||
"latency": round(latency, 3),
|
||||
"accuracy": round(acc, 3),
|
||||
"num_requests": args.num_questions,
|
||||
"other": {
|
||||
"num_questions": args.num_questions,
|
||||
"parallel": args.parallel,
|
||||
},
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--num-shots", type=int, default=20)
|
||||
parser.add_argument("--data-path", type=str, default="hellaswag_val.jsonl")
|
||||
parser.add_argument("--num-questions", type=int, default=200)
|
||||
args = add_common_other_args_and_parse(parser)
|
||||
main(args)
|
||||
109
benchmark/hellaswag/bench_sglang.py
Normal file
109
benchmark/hellaswag/bench_sglang.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
|
||||
from sglang.lang.api import set_default_backend
|
||||
from sglang.test.test_utils import (
|
||||
add_common_sglang_args_and_parse,
|
||||
select_sglang_backend,
|
||||
)
|
||||
from sglang.utils import download_and_cache_file, read_jsonl
|
||||
|
||||
|
||||
def get_one_example(lines, i, include_answer):
|
||||
ret = lines[i]["activity_label"] + ": " + lines[i]["ctx"] + " "
|
||||
if include_answer:
|
||||
ret += lines[i]["endings"][lines[i]["label"]]
|
||||
return ret
|
||||
|
||||
|
||||
def get_few_shot_examples(lines, k):
|
||||
ret = ""
|
||||
for i in range(k):
|
||||
ret += get_one_example(lines, i, True) + "\n\n"
|
||||
return ret
|
||||
|
||||
|
||||
def main(args):
|
||||
# Select backend
|
||||
set_default_backend(select_sglang_backend(args))
|
||||
|
||||
# Read data
|
||||
data_path = args.data_path
|
||||
url = "https://raw.githubusercontent.com/rowanz/hellaswag/master/data/hellaswag_val.jsonl"
|
||||
if not os.path.isfile(data_path):
|
||||
data_path = download_and_cache_file(url)
|
||||
lines = list(read_jsonl(data_path))
|
||||
|
||||
# Construct prompts
|
||||
num_questions = args.num_questions
|
||||
num_shots = args.num_shots
|
||||
few_shot_examples = get_few_shot_examples(lines, num_shots)
|
||||
|
||||
questions = []
|
||||
choices = []
|
||||
labels = []
|
||||
for i in range(len(lines[:num_questions])):
|
||||
questions.append(get_one_example(lines, i, False))
|
||||
choices.append(lines[i]["endings"])
|
||||
labels.append(lines[i]["label"])
|
||||
arguments = [{"question": q, "choices": c} for q, c in zip(questions, choices)]
|
||||
|
||||
#####################################
|
||||
######### SGL Program Begin #########
|
||||
#####################################
|
||||
|
||||
import sglang as sgl
|
||||
|
||||
@sgl.function
|
||||
def few_shot_hellaswag(s, question, choices):
|
||||
s += few_shot_examples + question
|
||||
s += sgl.select("answer", choices=choices)
|
||||
|
||||
#####################################
|
||||
########## SGL Program End ##########
|
||||
#####################################
|
||||
|
||||
# Run requests
|
||||
tic = time.perf_counter()
|
||||
rets = few_shot_hellaswag.run_batch(
|
||||
arguments,
|
||||
temperature=0,
|
||||
num_threads=args.parallel,
|
||||
progress_bar=True,
|
||||
)
|
||||
preds = [choices[i].index(rets[i]["answer"]) for i in range(len(rets))]
|
||||
latency = time.perf_counter() - tic
|
||||
|
||||
# Compute accuracy
|
||||
acc = np.mean(np.array(preds) == np.array(labels))
|
||||
print(f"Latency: {latency:.3f}")
|
||||
print(f"Accuracy: {acc:.3f}")
|
||||
|
||||
# Write results
|
||||
with open(args.result_file, "a") as fout:
|
||||
value = {
|
||||
"task": "hellaswag",
|
||||
"backend": args.backend,
|
||||
"num_gpus": 1,
|
||||
"latency": round(latency, 3),
|
||||
"accuracy": round(acc, 3),
|
||||
"num_requests": args.num_questions,
|
||||
"other": {
|
||||
"num_questions": args.num_questions,
|
||||
"parallel": args.parallel,
|
||||
},
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--num-shots", type=int, default=20)
|
||||
parser.add_argument("--data-path", type=str, default="hellaswag_val.jsonl")
|
||||
parser.add_argument("--num-questions", type=int, default=200)
|
||||
args = add_common_sglang_args_and_parse(parser)
|
||||
main(args)
|
||||
Reference in New Issue
Block a user