Organize Benchmark (#381)
This commit is contained in:
@@ -5,16 +5,11 @@ import json
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
from sglang.test.test_utils import (
|
||||
add_common_other_args_and_parse,
|
||||
call_generate_lightllm,
|
||||
call_generate_srt_raw,
|
||||
call_generate_vllm,
|
||||
)
|
||||
from sglang.test.test_utils import add_common_other_args_and_parse, get_call_generate
|
||||
from sglang.utils import dump_state_text, read_jsonl
|
||||
|
||||
INVALID = -9999999
|
||||
@@ -67,6 +62,32 @@ def multi_chain_gsm8k(question, num_chains, call_generate):
|
||||
return s
|
||||
|
||||
|
||||
async def multi_chain_gsm8k_async(question, num_chains, call_generate):
|
||||
s = "Question: " + question + "\n"
|
||||
# s += call_generate(s + "Answer: " + prompt_lib[0], max_tokens=256,
|
||||
# stop="Question", temperature=0)
|
||||
# return s
|
||||
|
||||
comps = []
|
||||
for i in range(num_chains):
|
||||
comps.append(
|
||||
await call_generate(
|
||||
s + "Answer: " + prompt_lib[i % num_chains],
|
||||
max_tokens=256,
|
||||
temperature=0.3,
|
||||
stop="Question",
|
||||
)
|
||||
)
|
||||
|
||||
s += "Answer: To answer this question, here are some possible solutions. "
|
||||
s += "After considering all of them, I will do a majority vote.\n\n"
|
||||
for i in range(num_chains):
|
||||
s += f"Solution {i+1}: " + comps[i].strip() + "\n\n"
|
||||
s += "\nBy considering the above solutions and doing a majority vote, I think the final answer (a single integer number) is "
|
||||
s += await call_generate(s, max_tokens=16, temperature=0, stop=None)
|
||||
return s
|
||||
|
||||
|
||||
def main(args):
|
||||
lines = read_jsonl(args.data_path)
|
||||
|
||||
@@ -83,71 +104,7 @@ def main(args):
|
||||
states = [None] * len(labels)
|
||||
|
||||
# Select backend
|
||||
if args.backend == "lightllm":
|
||||
url = f"{args.host}:{args.port}/generate"
|
||||
call_generate = partial(call_generate_lightllm, url=url)
|
||||
elif args.backend == "vllm":
|
||||
url = f"{args.host}:{args.port}/generate"
|
||||
call_generate = partial(call_generate_vllm, url=url)
|
||||
elif args.backend == "srt-raw":
|
||||
url = f"{args.host}:{args.port}/generate"
|
||||
call_generate = partial(call_generate_srt_raw, url=url)
|
||||
elif args.backend == "guidance":
|
||||
from guidance import gen, models
|
||||
|
||||
model = models.LlamaCpp(
|
||||
"/home/ubuntu/model_weights/Llama-2-7b-chat.gguf",
|
||||
n_gpu_layers=-1,
|
||||
n_ctx=4096,
|
||||
)
|
||||
|
||||
def call_generate(prompt, temperature, max_tokens, stop):
|
||||
out = (
|
||||
model
|
||||
+ prompt
|
||||
+ gen(
|
||||
name="answer",
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
stop=stop,
|
||||
)
|
||||
)
|
||||
return out["answer"]
|
||||
|
||||
# def multi_chain_gsm8k(question, num_chains, call_generate):
|
||||
# s = model + "Question: " + question + "\n"
|
||||
|
||||
# comps = []
|
||||
# for i in range(num_chains):
|
||||
# comps.append(call_generate(s + "Answer: " + prompt_lib[i % num_chains],
|
||||
# max_tokens=256, temperature=0.3, stop="Question"))
|
||||
|
||||
# s += "Answer: To answer this question, here are some possible solutions. "
|
||||
# s += "After considering all of them, I will do a majority vote.\n\n"
|
||||
# for i in range(num_chains):
|
||||
# s += f"Solution {i+1}: " + comps[i].strip() + "\n\n"
|
||||
# s += f"\nBy considering the above solutions and doing a majority vote, I think the final answer (a single integer number) is "
|
||||
# return call_generate(s, max_tokens=16, temperature=0, stop=None)
|
||||
|
||||
elif args.backend == "lmql":
|
||||
import lmql
|
||||
|
||||
model = lmql.model(
|
||||
"meta-llama/Llama-2-7b-chat-hf", endpoint=f"{args.host}:{args.port}"
|
||||
)
|
||||
|
||||
@lmql.query(model=model)
|
||||
async def program(question):
|
||||
'''lmql
|
||||
"""{question}[ANSWER]""" where len(TOKENS(ANSWER)) < 257 and STOPS_AT(ANSWER, "Question")
|
||||
return ANSWER
|
||||
'''
|
||||
|
||||
async def call_generate(prompt, temperature, max_tokens, stop):
|
||||
return await program(question=prompt, temperature=0)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid backend: {args.backend}")
|
||||
call_generate = get_call_generate(args)
|
||||
|
||||
# Run requests
|
||||
if args.backend != "lmql":
|
||||
@@ -158,31 +115,35 @@ def main(args):
|
||||
|
||||
tic = time.time()
|
||||
if args.parallel == 1:
|
||||
for i in range(len(questions)):
|
||||
for i in tqdm(range(len(questions))):
|
||||
get_one_answer(i)
|
||||
else:
|
||||
with ThreadPoolExecutor(args.parallel) as executor:
|
||||
executor.map(get_one_answer, list(range(len(questions))))
|
||||
list(
|
||||
tqdm(
|
||||
executor.map(get_one_answer, list(range(len(questions)))),
|
||||
total=len(questions),
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
# Use asyncio
|
||||
async def batched_call(batch_size):
|
||||
for i in range(0, len(questions), batch_size):
|
||||
tasks = []
|
||||
for q in questions[i : i + batch_size]:
|
||||
tasks.append(
|
||||
call_generate(
|
||||
few_shot_examples + q,
|
||||
temperature=0,
|
||||
max_tokens=256,
|
||||
stop="Question",
|
||||
)
|
||||
)
|
||||
rets = await asyncio.gather(*tasks)
|
||||
for j in range(len(rets)):
|
||||
states[i + j] = get_answer_value(rets[j])
|
||||
async def get_one_answer_asyncio(i):
|
||||
answer = await multi_chain_gsm8k_async(
|
||||
questions[i], args.num_chains, call_generate
|
||||
)
|
||||
states[i] = answer
|
||||
|
||||
tic = time.time()
|
||||
asyncio.run(batched_call(batch_size=args.parallel))
|
||||
loop = asyncio.get_event_loop()
|
||||
batches = [
|
||||
list(range(i, min(i + args.parallel, len(questions))))
|
||||
for i in range(0, len(questions), args.parallel)
|
||||
]
|
||||
for bt in tqdm(batches):
|
||||
tasks = [get_one_answer_asyncio(k) for k in bt]
|
||||
loop.run_until_complete(asyncio.gather(*tasks))
|
||||
|
||||
latency = time.time() - tic
|
||||
|
||||
preds = []
|
||||
|
||||
Reference in New Issue
Block a user