Files
sglang/examples/frontend_language/quick_start/local_example_llava_next.py

79 lines
2.0 KiB
Python
Raw Normal View History

2024-01-24 11:44:07 +00:00
"""
Usage: python3 local_example_llava_next.py
2024-01-24 11:44:07 +00:00
"""
2024-07-18 04:55:39 +10:00
2024-01-18 13:46:38 -08:00
import sglang as sgl
from sglang.lang.chat_template import get_chat_template
2024-01-18 13:46:38 -08:00
@sgl.function
def image_qa(s, image_path, question):
s += sgl.user(sgl.image(image_path) + question)
s += sgl.assistant(sgl.gen("answer"))
2024-01-30 04:29:32 -08:00
def single():
state = image_qa.run(
2024-07-18 04:55:39 +10:00
image_path="images/cat.jpeg", question="What is this?", max_new_tokens=128
)
2024-01-30 04:29:32 -08:00
print(state["answer"], "\n")
def stream():
state = image_qa.run(
image_path="images/cat.jpeg",
question="What is this?",
max_new_tokens=64,
2024-07-18 04:55:39 +10:00
stream=True,
)
2024-01-30 04:29:32 -08:00
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = image_qa.run_batch(
[
2024-07-18 04:55:39 +10:00
{"image_path": "images/cat.jpeg", "question": "What is this?"},
{"image_path": "images/dog.jpeg", "question": "What is this?"},
2024-01-30 04:29:32 -08:00
],
2024-05-14 07:57:00 +08:00
max_new_tokens=128,
2024-01-30 04:29:32 -08:00
)
for s in states:
print(s["answer"], "\n")
if __name__ == "__main__":
import multiprocessing as mp
mp.set_start_method("spawn", force=True)
runtime = sgl.Runtime(model_path="lmms-lab/llama3-llava-next-8b")
2024-11-03 12:25:39 -08:00
runtime.endpoint.chat_template = get_chat_template("llama-3-instruct-llava")
# Or you can use the 72B model
# runtime = sgl.Runtime(model_path="lmms-lab/llava-next-72b", tp_size=8)
# runtime.endpoint.chat_template = get_chat_template("chatml-llava")
2024-01-30 04:29:32 -08:00
sgl.set_default_backend(runtime)
2024-02-11 06:43:45 -08:00
print(f"chat template: {runtime.endpoint.chat_template.name}")
# Or you can use API models
# sgl.set_default_backend(sgl.OpenAI("gpt-4-vision-preview"))
# sgl.set_default_backend(sgl.VertexAI("gemini-pro-vision"))
2024-01-30 04:29:32 -08:00
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()
runtime.shutdown()