Files
sglang/examples/quick_start/srt_example_llava.py

71 lines
1.6 KiB
Python
Raw Normal View History

2024-01-24 11:44:07 +00:00
"""
Usage: python3 srt_example_llava.py
"""
2024-07-18 04:55:39 +10:00
2024-01-18 13:46:38 -08:00
import sglang as sgl
@sgl.function
def image_qa(s, image_path, question):
s += sgl.user(sgl.image(image_path) + question)
s += sgl.assistant(sgl.gen("answer"))
2024-01-30 04:29:32 -08:00
def single():
state = image_qa.run(
2024-07-18 04:55:39 +10:00
image_path="images/cat.jpeg", question="What is this?", max_new_tokens=128
)
2024-01-30 04:29:32 -08:00
print(state["answer"], "\n")
def stream():
state = image_qa.run(
image_path="images/cat.jpeg",
question="What is this?",
max_new_tokens=64,
2024-07-18 04:55:39 +10:00
stream=True,
)
2024-01-30 04:29:32 -08:00
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = image_qa.run_batch(
[
2024-07-18 04:55:39 +10:00
{"image_path": "images/cat.jpeg", "question": "What is this?"},
{"image_path": "images/dog.jpeg", "question": "What is this?"},
2024-01-30 04:29:32 -08:00
],
2024-05-14 07:57:00 +08:00
max_new_tokens=128,
2024-01-30 04:29:32 -08:00
)
for s in states:
print(s["answer"], "\n")
if __name__ == "__main__":
2024-07-18 04:55:39 +10:00
runtime = sgl.Runtime(
model_path="liuhaotian/llava-v1.6-vicuna-7b",
tokenizer_path="llava-hf/llava-1.5-7b-hf",
)
2024-01-30 04:29:32 -08:00
sgl.set_default_backend(runtime)
2024-02-11 06:43:45 -08:00
print(f"chat template: {runtime.endpoint.chat_template.name}")
# Or you can use API models
# sgl.set_default_backend(sgl.OpenAI("gpt-4-vision-preview"))
# sgl.set_default_backend(sgl.VertexAI("gemini-pro-vision"))
2024-01-30 04:29:32 -08:00
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()
runtime.shutdown()