Format Benchmark Code (#399)
This commit is contained in:
@@ -1,29 +1,29 @@
|
||||
import argparse
|
||||
from functools import partial
|
||||
import json
|
||||
import time
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
|
||||
from agent_functions import (
|
||||
action_location_object_prompt,
|
||||
action_location_sector_prompt,
|
||||
generate_event_triple_prompt,
|
||||
generate_pronunciatio_prompt,
|
||||
poignancy_event_prompt,
|
||||
)
|
||||
from tqdm import tqdm
|
||||
|
||||
from sglang.test.test_utils import (
|
||||
add_common_other_args_and_parse,
|
||||
call_generate_lightllm,
|
||||
call_generate_vllm,
|
||||
call_generate_srt_raw,
|
||||
call_generate_vllm,
|
||||
)
|
||||
from sglang.utils import read_jsonl, dump_state_text
|
||||
|
||||
from agent_functions import (
|
||||
poignancy_event_prompt,
|
||||
generate_event_triple_prompt,
|
||||
generate_pronunciatio_prompt,
|
||||
action_location_sector_prompt,
|
||||
action_location_object_prompt,
|
||||
)
|
||||
from sglang.utils import dump_state_text, read_jsonl
|
||||
|
||||
|
||||
def main(args):
|
||||
lines = read_jsonl(args.data_path)[:args.num_events]
|
||||
lines = read_jsonl(args.data_path)[: args.num_events]
|
||||
mapping = {
|
||||
"poignancy_event": poignancy_event_prompt,
|
||||
"generate_event_triple": generate_event_triple_prompt,
|
||||
@@ -46,7 +46,7 @@ def main(args):
|
||||
url = f"{args.host}:{args.port}/generate"
|
||||
call_generate = partial(call_generate_srt_raw, url=url)
|
||||
elif args.backend == "guidance":
|
||||
from guidance import models, gen
|
||||
from guidance import gen, models
|
||||
|
||||
model = models.LlamaCpp(
|
||||
str(Path.home()) + "/model_weights/Llama-2-7b-chat.gguf",
|
||||
@@ -55,11 +55,15 @@ def main(args):
|
||||
)
|
||||
|
||||
def call_generate(prompt, temperature, max_tokens, stop):
|
||||
out = model + prompt + gen(
|
||||
name="result",
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
stop=stop,
|
||||
out = (
|
||||
model
|
||||
+ prompt
|
||||
+ gen(
|
||||
name="result",
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
stop=stop,
|
||||
)
|
||||
)
|
||||
return out["result"]
|
||||
|
||||
@@ -87,7 +91,7 @@ def main(args):
|
||||
"backend": args.backend,
|
||||
"num_gpus": 1,
|
||||
"latency": round(latency, 3),
|
||||
# to pack weighted functions as a single agent
|
||||
# to pack weighted functions as a single agent
|
||||
"num_requests": len(arguments) / len(mapping),
|
||||
"other": {
|
||||
"parallel": args.parallel,
|
||||
|
||||
Reference in New Issue
Block a user