release initial code
Co-authored-by: Ying Sheng <sqy1415@gmail.com> Co-authored-by: Liangsheng Yin <hnyls2002@gmail.com> Co-authored-by: Zhiqiang Xie <xiezhq@stanford.edu> Co-authored-by: parasol-aser <3848358+parasol-aser@users.noreply.github.com> Co-authored-by: LiviaSun <33578456+ChuyueSun@users.noreply.github.com> Co-authored-by: Cody Yu <hao.yu.cody@gmail.com>
This commit is contained in:
60
benchmark/llava_bench/README.md
Normal file
60
benchmark/llava_bench/README.md
Normal file
@@ -0,0 +1,60 @@
|
||||
## Download benchmark images
|
||||
|
||||
```
|
||||
python3 download_images.py
|
||||
```
|
||||
|
||||
image benchmark source: https://huggingface.co/datasets/liuhaotian/llava-bench-in-the-wild
|
||||
|
||||
### Other Dependency
|
||||
```
|
||||
pip3 install "torch>=2.1.2" "transformers>=4.36" pillow
|
||||
```
|
||||
|
||||
## Run benchmark
|
||||
|
||||
### Benchmark sglang
|
||||
Launch a server
|
||||
```
|
||||
python3 -m sglang.launch_server --model-path liuhaotian/llava-v1.5-7b --tokenizer-path llava-hf/llava-1.5-7b-hf --port 30000
|
||||
```
|
||||
|
||||
Run benchmark
|
||||
```
|
||||
# Run with local models
|
||||
python3 bench_sglang.py --num-questions 60
|
||||
|
||||
# Run with OpenAI models
|
||||
python3 bench_sglang.py --num-questions 60 --backend gpt-4-vision-preview
|
||||
```
|
||||
|
||||
### Bench LLaVA original code
|
||||
```
|
||||
git clone git@github.com:haotian-liu/LLaVA.git
|
||||
cd LLaVA
|
||||
git reset --hard 9a26bd1435b4ac42c282757f2c16d34226575e96
|
||||
pip3 install -e .
|
||||
|
||||
cd ~/sglang/benchmark/llava_bench
|
||||
CUDA_VISIBLE_DEVICES=0 bash bench_hf_llava_bench.sh
|
||||
```
|
||||
|
||||
|
||||
### Benchmark llama.cpp
|
||||
|
||||
```
|
||||
# Install
|
||||
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python
|
||||
pip install sse_starlette starlette_context pydantic_settings
|
||||
|
||||
# Download weights
|
||||
mkdir -p ~/model_weights/llava-v1.5-7b/
|
||||
wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-f16.gguf -O ~/model_weights/llava-v1.5-7b/ggml-model-f16.gguf
|
||||
wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf -O ~/model_weights/llava-v1.5-7b/mmproj-model-f16.gguf
|
||||
```
|
||||
|
||||
```
|
||||
python3 -m llama_cpp.server --model ~/model_weights/llava-v1.5-7b/ggml-model-f16.gguf --clip_model_path ~/model_weights/llava-v1.5-7b/mmproj-model-f16.gguf --chat_format llava-1-5 --port 23000
|
||||
|
||||
OPENAI_BASE_URL=http://localhost:23000/v1 python3 bench_sglang.py --backend gpt-4-vision-preview --num-q 1
|
||||
```
|
||||
9
benchmark/llava_bench/bench_hf_llava_bench.sh
Normal file
9
benchmark/llava_bench/bench_hf_llava_bench.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
python -m llava.eval.model_vqa \
|
||||
--model-path liuhaotian/llava-v1.5-7b \
|
||||
--question-file ./questions.jsonl \
|
||||
--image-folder ./images \
|
||||
--answers-file ./answers_hf.jsonl \
|
||||
--temperature 0 \
|
||||
--conv-mode vicuna_v1
|
||||
9
benchmark/llava_bench/bench_hf_mme.sh
Normal file
9
benchmark/llava_bench/bench_hf_mme.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
python -m llava.eval.model_vqa_loader \
|
||||
--model-path liuhaotian/llava-v1.5-7b \
|
||||
--question-file ./mme_pack/llava_mme_bench_replace.jsonl \
|
||||
--image-folder ./mme_pack/MME_Benchmark_release_version \
|
||||
--answers-file ./answers_hf_mme.jsonl \
|
||||
--temperature 0 \
|
||||
--conv-mode vicuna_v1
|
||||
96
benchmark/llava_bench/bench_sglang.py
Normal file
96
benchmark/llava_bench/bench_sglang.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
|
||||
import sglang as sgl
|
||||
import tqdm
|
||||
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
|
||||
from sglang.utils import read_jsonl, dump_state_text
|
||||
from PIL import Image
|
||||
|
||||
|
||||
@sgl.function
|
||||
def image_qa(s, image_file, question):
|
||||
s += sgl.user(sgl.image(image_file) + question)
|
||||
s += sgl.assistant(sgl.gen("answer", max_tokens=args.max_tokens))
|
||||
|
||||
|
||||
def main(args):
|
||||
lines = read_jsonl(args.question_file)[:args.num_questions]
|
||||
arguments = [
|
||||
{"image_file":
|
||||
os.path.abspath(args.image_folder + "/" + l["image"]),
|
||||
"question": l["text"]} for l in lines
|
||||
]
|
||||
#arguments = [
|
||||
# {"image_file":
|
||||
# Image.open(os.path.abspath(args.image_folder + "/" + l["image"])),
|
||||
# "question": l["text"]} for l in lines
|
||||
#]
|
||||
|
||||
states = [None] * len(lines)
|
||||
|
||||
# Select backend
|
||||
backend = select_sglang_backend(args)
|
||||
sgl.set_default_backend(backend)
|
||||
|
||||
# Run requests
|
||||
tic = time.time()
|
||||
if args.parallel == 1:
|
||||
for i in tqdm.tqdm(range(len(lines))):
|
||||
image_file = arguments[i]["image_file"]
|
||||
question = arguments[i]["question"]
|
||||
ret = image_qa.run(
|
||||
image_file=image_file,
|
||||
question=question,
|
||||
temperature=0)
|
||||
states[i] = ret
|
||||
else:
|
||||
states = image_qa.run_batch(
|
||||
arguments,
|
||||
temperature=0,
|
||||
num_threads=args.parallel,
|
||||
progress_bar=True)
|
||||
latency = time.time() - tic
|
||||
|
||||
print(f"Latency: {latency:.3f}")
|
||||
|
||||
# Write results
|
||||
dump_state_text(f"tmp_output_{args.backend}.txt", states)
|
||||
|
||||
print(f"Write output to {args.answer_file}")
|
||||
with open(args.answer_file, "w") as fout:
|
||||
for i in range(len(lines)):
|
||||
value = {
|
||||
"question_id": lines[i]["question_id"],
|
||||
"prompt": lines[i]["text"],
|
||||
"text": states[i]["answer"].strip(),
|
||||
"model_id": backend.model_info["model_path"],
|
||||
"answer_id": i,
|
||||
"metadata": {},
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
with open(args.result_file, "a") as fout:
|
||||
value = {
|
||||
"task": "llava_bench",
|
||||
"backend": args.backend,
|
||||
"num_gpus": 1,
|
||||
"latency": round(latency, 3),
|
||||
"num_requests": len(lines),
|
||||
"parallel": args.parallel,
|
||||
}
|
||||
fout.write(json.dumps(value) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--question-file", type=str, default="questions.jsonl")
|
||||
parser.add_argument("--answer-file", type=str, default="answers.jsonl")
|
||||
parser.add_argument("--image-folder", type=str, default="./images")
|
||||
parser.add_argument("--temperature", type=float, default=0.0)
|
||||
parser.add_argument("--num-questions", type=int, default=None)
|
||||
parser.add_argument("--max-tokens", type=int, default=768)
|
||||
args = add_common_sglang_args_and_parse(parser)
|
||||
main(args)
|
||||
2
benchmark/llava_bench/bench_sglang_mme.sh
Normal file
2
benchmark/llava_bench/bench_sglang_mme.sh
Normal file
@@ -0,0 +1,2 @@
|
||||
MME_FOLDER=./mme_pack
|
||||
python3 bench_sglang.py --num-questions 5000 --question-file $MME_FOLDER/llava_mme_bench_replace.jsonl --answer-file answer_mme.jsonl --image-folder $MME_FOLDER/MME_Benchmark_release_version --max-tokens 4
|
||||
20
benchmark/llava_bench/download_images.py
Normal file
20
benchmark/llava_bench/download_images.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import os
|
||||
|
||||
# Create the 'images' directory if it doesn't exist
|
||||
if not os.path.exists('images'):
|
||||
os.makedirs('images')
|
||||
|
||||
# Base URL
|
||||
base_url = "https://huggingface.co/datasets/liuhaotian/llava-bench-in-the-wild/resolve/main/images/"
|
||||
|
||||
# Loop through image numbers
|
||||
for i in range(1, 25):
|
||||
# Format the image number with leading zeros
|
||||
image_number = str(i).zfill(3)
|
||||
image_url = base_url + image_number + ".jpg"
|
||||
image_path = "images/" + image_number + ".jpg"
|
||||
|
||||
# Download the image using wget
|
||||
os.system(f"wget -O {image_path} {image_url}")
|
||||
|
||||
print("Download complete.")
|
||||
Reference in New Issue
Block a user