Format Benchmark Code (#399)
This commit is contained in:
@@ -1,15 +1,18 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
import json
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
|
||||
from tqdm import tqdm
|
||||
import numpy as np
|
||||
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
|
||||
from sglang.utils import read_jsonl, dump_state_text
|
||||
|
||||
from sglang.test.test_utils import (
|
||||
add_common_other_args_and_parse,
|
||||
call_generate_lightllm,
|
||||
call_generate_srt_raw,
|
||||
call_generate_vllm,
|
||||
)
|
||||
from sglang.utils import dump_state_text, read_jsonl
|
||||
|
||||
USER_PREFIX = "[INST] "
|
||||
USER_SUFFIX = " [/INST]"
|
||||
@@ -25,7 +28,11 @@ def multi_document_qa(docs, question, generate):
|
||||
s += "".join(docs)
|
||||
|
||||
s += "\nDocuments end."
|
||||
s += ("\n\nBased on the above documents, please answer this question:\n" + question + "\nAnswer in three words or fewer.")
|
||||
s += (
|
||||
"\n\nBased on the above documents, please answer this question:\n"
|
||||
+ question
|
||||
+ "\nAnswer in three words or fewer."
|
||||
)
|
||||
s += USER_SUFFIX
|
||||
s += ASSISTANT_PREFIX
|
||||
answer = generate(s, max_tokens=16, stop=None)
|
||||
@@ -42,11 +49,13 @@ def main(args):
|
||||
if args.backend == "guidance":
|
||||
num_docs = 7 # due to OOM
|
||||
|
||||
for i in range(len(l["questions"][:args.num_questions])):
|
||||
arguments.append({
|
||||
"docs": l["documents"][:num_docs],
|
||||
"question": l["questions"][i],
|
||||
})
|
||||
for i in range(len(l["questions"][: args.num_questions])):
|
||||
arguments.append(
|
||||
{
|
||||
"docs": l["documents"][:num_docs],
|
||||
"question": l["questions"][i],
|
||||
}
|
||||
)
|
||||
labels.append(l["answers"][i])
|
||||
states = [None] * len(arguments)
|
||||
|
||||
@@ -61,13 +70,20 @@ def main(args):
|
||||
url = f"{args.host}:{args.port}/generate"
|
||||
generate = partial(call_generate_srt_raw, url=url, temperature=0)
|
||||
elif args.backend == "guidance":
|
||||
from guidance import models, gen
|
||||
from guidance import gen, models
|
||||
|
||||
model = models.LlamaCpp("/home/ubuntu/model_weights/CodeLlama-7b-instruct-hf.gguf", n_gpu_layers=-1, n_ctx=11000)
|
||||
model = models.LlamaCpp(
|
||||
"/home/ubuntu/model_weights/CodeLlama-7b-instruct-hf.gguf",
|
||||
n_gpu_layers=-1,
|
||||
n_ctx=11000,
|
||||
)
|
||||
|
||||
def generate(prompt, max_tokens, stop):
|
||||
out = model + prompt + gen(name="answer",
|
||||
max_tokens=max_tokens, temperature=0, stop=stop)
|
||||
out = (
|
||||
model
|
||||
+ prompt
|
||||
+ gen(name="answer", max_tokens=max_tokens, temperature=0, stop=stop)
|
||||
)
|
||||
return out["answer"]
|
||||
|
||||
# warmup
|
||||
@@ -113,7 +129,7 @@ def main(args):
|
||||
"other": {
|
||||
"num_questions": args.num_questions,
|
||||
"parallel": args.parallel,
|
||||
}
|
||||
},
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
@@ -2,10 +2,12 @@ import argparse
|
||||
import json
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import sglang as sgl
|
||||
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
|
||||
from sglang.utils import read_jsonl, dump_state_text
|
||||
from sglang.test.test_utils import (
|
||||
add_common_sglang_args_and_parse,
|
||||
select_sglang_backend,
|
||||
)
|
||||
from sglang.utils import dump_state_text, read_jsonl
|
||||
|
||||
|
||||
@sgl.function
|
||||
@@ -19,7 +21,11 @@ def multi_document_qa(s, docs, question):
|
||||
forks.join("concate_and_append")
|
||||
|
||||
s += "\nDocuments end."
|
||||
s += ("\n\nBased on the above documents, please answer this question:\n" + question + "\nAnswer in three words or fewer.")
|
||||
s += (
|
||||
"\n\nBased on the above documents, please answer this question:\n"
|
||||
+ question
|
||||
+ "\nAnswer in three words or fewer."
|
||||
)
|
||||
s += sgl.user_end()
|
||||
s += sgl.assistant(sgl.gen("answer", max_tokens=16))
|
||||
|
||||
@@ -29,11 +35,13 @@ def main(args):
|
||||
l = lines[0]
|
||||
arguments = []
|
||||
labels = []
|
||||
for i in range(len(l["questions"][:args.num_questions])):
|
||||
arguments.append({
|
||||
"docs": l["documents"][:10],
|
||||
"question": l["questions"][i],
|
||||
})
|
||||
for i in range(len(l["questions"][: args.num_questions])):
|
||||
arguments.append(
|
||||
{
|
||||
"docs": l["documents"][:10],
|
||||
"question": l["questions"][i],
|
||||
}
|
||||
)
|
||||
labels.append(l["answers"][i])
|
||||
|
||||
# Select backend
|
||||
@@ -43,10 +51,11 @@ def main(args):
|
||||
# Run requests
|
||||
tic = time.time()
|
||||
states = multi_document_qa.run_batch(
|
||||
arguments, temperature=0, num_threads=args.parallel, progress_bar=True)
|
||||
arguments, temperature=0, num_threads=args.parallel, progress_bar=True
|
||||
)
|
||||
latency = time.time() - tic
|
||||
|
||||
# Compute accuracy
|
||||
# Compute accuracy
|
||||
print([s["answer"] for s in states])
|
||||
correct = 0
|
||||
for s, label in zip(states, labels):
|
||||
@@ -71,7 +80,7 @@ def main(args):
|
||||
"other": {
|
||||
"num_questions": args.num_questions,
|
||||
"parallel": args.parallel,
|
||||
}
|
||||
},
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
@@ -3,7 +3,8 @@ import json
|
||||
import transformers
|
||||
|
||||
content = "\n".join(
|
||||
open("llama2.txt", 'r', encoding='utf-8', errors='ignore').readlines())
|
||||
open("llama2.txt", "r", encoding="utf-8", errors="ignore").readlines()
|
||||
)
|
||||
content = content.replace("\n\n", "\n")
|
||||
|
||||
# Count token
|
||||
@@ -35,30 +36,35 @@ for i, s in enumerate(segments):
|
||||
|
||||
# Dump
|
||||
with open("questions.jsonl", "w") as fout:
|
||||
fout.write(json.dumps({
|
||||
"documents": segments[:30],
|
||||
"questions": [
|
||||
"What is the name of the fine-tuned LLMs?",
|
||||
"Which figure shows the helpfulness human evaluation results for Llama 2-Chat?",
|
||||
"What is the number of parameters in the largest Llama 2 model?",
|
||||
"What is the batch size of fine-tuning?",
|
||||
"Where can we find the details of potential data contamination?",
|
||||
"What is the full name of MPT?",
|
||||
"What is the power consumption of RSC in Watt?",
|
||||
"How many tokens of data do they train on?",
|
||||
"Which model's release is delayed due to a lack of time to sufficiently red team?",
|
||||
"Which activation function is used in Llama?"
|
||||
],
|
||||
"answers": [
|
||||
"Llama 2 Chat",
|
||||
"1",
|
||||
"70 B",
|
||||
"64",
|
||||
"A 6",
|
||||
"MosaicML",
|
||||
"400",
|
||||
"2 trillion",
|
||||
"34 B",
|
||||
"SwiGLU",
|
||||
],
|
||||
}) + "\n")
|
||||
fout.write(
|
||||
json.dumps(
|
||||
{
|
||||
"documents": segments[:30],
|
||||
"questions": [
|
||||
"What is the name of the fine-tuned LLMs?",
|
||||
"Which figure shows the helpfulness human evaluation results for Llama 2-Chat?",
|
||||
"What is the number of parameters in the largest Llama 2 model?",
|
||||
"What is the batch size of fine-tuning?",
|
||||
"Where can we find the details of potential data contamination?",
|
||||
"What is the full name of MPT?",
|
||||
"What is the power consumption of RSC in Watt?",
|
||||
"How many tokens of data do they train on?",
|
||||
"Which model's release is delayed due to a lack of time to sufficiently red team?",
|
||||
"Which activation function is used in Llama?",
|
||||
],
|
||||
"answers": [
|
||||
"Llama 2 Chat",
|
||||
"1",
|
||||
"70 B",
|
||||
"64",
|
||||
"A 6",
|
||||
"MosaicML",
|
||||
"400",
|
||||
"2 trillion",
|
||||
"34 B",
|
||||
"SwiGLU",
|
||||
],
|
||||
}
|
||||
)
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user