Support oai in benchmark/mmlu (#323)
This commit is contained in:
@@ -15,6 +15,10 @@ python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port
|
||||
python3 bench_sglang.py --nsub 10
|
||||
```
|
||||
|
||||
```
|
||||
# OpenAI models
|
||||
python3 bench_sglang.py --backend gpt-3.5-turbo --parallel 8
|
||||
```
|
||||
|
||||
### Benchmark vllm
|
||||
```
|
||||
|
||||
@@ -64,10 +64,16 @@ def evaluate(args, subject, dev_df, test_df):
|
||||
#####################################
|
||||
|
||||
import sglang as sgl
|
||||
|
||||
@sgl.function
|
||||
def few_shot_mmlu(s, examples, question):
|
||||
s += examples + question + sgl.gen("answer")
|
||||
|
||||
if args.backend.startswith("gpt-"):
|
||||
@sgl.function
|
||||
def few_shot_mmlu(s, examples, question):
|
||||
s += sgl.user(examples + question)
|
||||
s += sgl.assistant(sgl.gen("answer"))
|
||||
else:
|
||||
@sgl.function
|
||||
def few_shot_mmlu(s, examples, question):
|
||||
s += examples + question + sgl.gen("answer")
|
||||
|
||||
#####################################
|
||||
########## SGL Program End ##########
|
||||
|
||||
@@ -155,7 +155,7 @@ def select_sglang_backend(args):
|
||||
global_config.enable_parallel_decoding = False
|
||||
global_config.enable_parallel_encoding = False
|
||||
backend = RuntimeEndpoint(f"{args.host}:{args.port}")
|
||||
elif args.backend.startswith("gpt"):
|
||||
elif args.backend.startswith("gpt-"):
|
||||
backend = OpenAI(args.backend)
|
||||
else:
|
||||
raise ValueError(f"Invalid backend: {args.backend}")
|
||||
|
||||
Reference in New Issue
Block a user