Cleanup readme, llava examples, usage examples and nccl init (#1194)
This commit is contained in:
@@ -1,45 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python3 async_io.py
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
from sglang import Runtime
|
||||
|
||||
|
||||
async def generate(
|
||||
engine,
|
||||
prompt,
|
||||
sampling_params,
|
||||
):
|
||||
tokenizer = engine.get_tokenizer()
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You will be given question answer tasks.",
|
||||
},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
|
||||
prompt = tokenizer.apply_chat_template(
|
||||
messages, tokenize=False, add_generation_prompt=True
|
||||
)
|
||||
|
||||
stream = engine.add_request(prompt, sampling_params)
|
||||
|
||||
async for output in stream:
|
||||
print(output, end="", flush=True)
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
runtime = Runtime(model_path="meta-llama/Llama-2-7b-chat-hf")
|
||||
print("--- runtime ready ---\n")
|
||||
|
||||
prompt = "Who is Alan Turing?"
|
||||
sampling_params = {"max_new_tokens": 128}
|
||||
asyncio.run(generate(runtime, prompt, sampling_params))
|
||||
|
||||
runtime.shutdown()
|
||||
@@ -1,53 +0,0 @@
|
||||
import sglang as sgl
|
||||
|
||||
character_regex = (
|
||||
r"""\{\n"""
|
||||
+ r""" "姓名": "[^"]{1,32}",\n"""
|
||||
+ r""" "学院": "(格兰芬多|赫奇帕奇|拉文克劳|斯莱特林)",\n"""
|
||||
+ r""" "血型": "(纯血|混血|麻瓜)",\n"""
|
||||
+ r""" "职业": "(学生|教师|傲罗|魔法部|食死徒|凤凰社成员)",\n"""
|
||||
+ r""" "魔杖": \{\n"""
|
||||
+ r""" "材质": "[^"]{1,32}",\n"""
|
||||
+ r""" "杖芯": "[^"]{1,32}",\n"""
|
||||
+ r""" "长度": [0-9]{1,2}\.[0-9]{0,2}\n"""
|
||||
+ r""" \},\n"""
|
||||
+ r""" "存活": "(存活|死亡)",\n"""
|
||||
+ r""" "守护神": "[^"]{1,32}",\n"""
|
||||
+ r""" "博格特": "[^"]{1,32}"\n"""
|
||||
+ r"""\}"""
|
||||
)
|
||||
|
||||
|
||||
@sgl.function
|
||||
def character_gen(s, name):
|
||||
s += name + " 是一名哈利波特系列小说中的角色。请填写以下关于这个角色的信息。"
|
||||
s += """\
|
||||
这是一个例子
|
||||
{
|
||||
"姓名": "哈利波特",
|
||||
"学院": "格兰芬多",
|
||||
"血型": "混血",
|
||||
"职业": "学生",
|
||||
"魔杖": {
|
||||
"材质": "冬青木",
|
||||
"杖芯": "凤凰尾羽",
|
||||
"长度": 11.0
|
||||
},
|
||||
"存活": "存活",
|
||||
"守护神": "麋鹿",
|
||||
"博格特": "摄魂怪"
|
||||
}
|
||||
"""
|
||||
s += f"现在请你填写{name}的信息:\n"
|
||||
s += sgl.gen("json_output", max_tokens=256, regex=character_regex)
|
||||
|
||||
|
||||
def main():
|
||||
backend = sgl.RuntimeEndpoint("http://localhost:30000")
|
||||
sgl.set_default_backend(backend)
|
||||
ret = character_gen.run(name="赫敏格兰杰", temperature=0)
|
||||
print(ret.text())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,44 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
|
||||
python choices_logprob.py
|
||||
"""
|
||||
|
||||
import sglang as sgl
|
||||
|
||||
|
||||
@sgl.function
|
||||
def tool_use(s, question):
|
||||
s += "To answer this question: " + question + ", "
|
||||
s += "I need to use a " + sgl.gen("tool", choices=["calculator", "search engine"])
|
||||
|
||||
|
||||
def main():
|
||||
# Run one case
|
||||
question = "What is 5 + 5?"
|
||||
state = tool_use.run(question)
|
||||
print("questions:", question)
|
||||
print("choice:", state["tool"])
|
||||
meta_info = state.get_meta_info("tool")
|
||||
print("logprobs of choice 1", meta_info["input_token_logprobs"][0])
|
||||
print("logprobs of choice 2", meta_info["input_token_logprobs"][1])
|
||||
print("-" * 50)
|
||||
|
||||
# Run a batch
|
||||
questions = [
|
||||
"What is 5 + 6?",
|
||||
"Who is Michael Jordan?",
|
||||
]
|
||||
states = tool_use.run_batch([{"question": q} for q in questions])
|
||||
for question, state in zip(questions, states):
|
||||
print("questions:", question)
|
||||
print("choice:", state["tool"])
|
||||
meta_info = state.get_meta_info("tool")
|
||||
print("logprobs of choice 1", meta_info["input_token_logprobs"][0])
|
||||
print("logprobs of choice 2", meta_info["input_token_logprobs"][1])
|
||||
print("-" * 50)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
|
||||
main()
|
||||
@@ -1,115 +0,0 @@
|
||||
from math import exp
|
||||
from pprint import pformat
|
||||
|
||||
import sglang as sgl
|
||||
|
||||
YELLOW = "\033[1;33m"
|
||||
GREEN = "\033[1;32m"
|
||||
BLUE = "\033[1;34m"
|
||||
CLEAR = "\033[1;0m"
|
||||
|
||||
|
||||
@sgl.function
|
||||
def cot_decoding(s, question, get_top_k, is_chat_model, verbose):
|
||||
"""CoT Decoding: http://arxiv.org/abs/2402.10200"""
|
||||
|
||||
if is_chat_model:
|
||||
s += sgl.user("Question: " + question + "\nAnswer:")
|
||||
s += sgl.assistant_begin()
|
||||
else:
|
||||
s += "Question: " + question + "\nAnswer:"
|
||||
|
||||
step_0 = s.fork(1)[0]
|
||||
forks = s.fork(get_top_k)
|
||||
answer_forks = s.fork(get_top_k)
|
||||
|
||||
# decoding step 0
|
||||
step_0 += sgl.gen(
|
||||
"get_top_k",
|
||||
max_tokens=0,
|
||||
return_logprob=True,
|
||||
top_logprobs_num=get_top_k,
|
||||
return_text_in_logprobs=True,
|
||||
)
|
||||
logprobs = step_0.get_meta_info("get_top_k")["output_top_logprobs"][0]
|
||||
|
||||
print("Decoding step 0:", ", ".join(pformat(token[2]) for token in logprobs))
|
||||
for idx, (f, token) in enumerate(zip(forks, logprobs)):
|
||||
logprob, token_id, text = token
|
||||
f += text
|
||||
|
||||
if text == "<|end_of_text|>":
|
||||
print(
|
||||
f"{YELLOW}Path #{idx} {pformat(text)}[{exp(logprob):.3f}] (score=nan, answer=nan){CLEAR}"
|
||||
)
|
||||
continue
|
||||
|
||||
# continue greedy decoding
|
||||
f += sgl.gen(
|
||||
"answer",
|
||||
temperature=0,
|
||||
max_tokens=1024,
|
||||
return_logprob=True,
|
||||
top_logprobs_num=2,
|
||||
return_text_in_logprobs=True,
|
||||
)
|
||||
|
||||
# calculate probability disparity between the top and secondary tokens
|
||||
x1s = [exp(xt[0][0]) for xt in f.get_meta_info("answer")["output_top_logprobs"]]
|
||||
x2s = [exp(xt[1][0]) for xt in f.get_meta_info("answer")["output_top_logprobs"]]
|
||||
tokens = [xt[0][2] for xt in f.get_meta_info("answer")["output_top_logprobs"]]
|
||||
delta = (sum(x1s) - sum(x2s)) / len(x1s)
|
||||
|
||||
# extract the answer span (without the '<|end_of_text|>' token)
|
||||
answer_forks[idx] += text + f["answer"] + "\nSo the answer is"
|
||||
answer_forks[idx] += sgl.gen(
|
||||
"answer_span",
|
||||
temperature=0,
|
||||
max_tokens=64,
|
||||
return_logprob=True,
|
||||
top_logprobs_num=2,
|
||||
return_text_in_logprobs=True,
|
||||
)
|
||||
answer = answer_forks[idx]["answer_span"].replace("\n", " ").strip(":")
|
||||
print(
|
||||
f"{YELLOW}Path #{idx} {pformat(text)}[{exp(logprob):.3f}] (score={delta}, answer={answer}){CLEAR}"
|
||||
)
|
||||
generated_text = str(answer_forks[idx])[len("ProgramState(") : -1]
|
||||
print(f"{BLUE}{pformat(generated_text)}{CLEAR}")
|
||||
|
||||
if verbose:
|
||||
answer_tokens = [
|
||||
xt[0][2]
|
||||
for xt in answer_forks[idx].get_meta_info("answer_span")[
|
||||
"output_top_logprobs"
|
||||
]
|
||||
]
|
||||
answer_x1s = [
|
||||
exp(xt[0][0])
|
||||
for xt in answer_forks[idx].get_meta_info("answer_span")[
|
||||
"output_top_logprobs"
|
||||
]
|
||||
]
|
||||
answer_x2s = [
|
||||
exp(xt[1][0])
|
||||
for xt in answer_forks[idx].get_meta_info("answer_span")[
|
||||
"output_top_logprobs"
|
||||
]
|
||||
]
|
||||
|
||||
for token, x1, x2 in zip(tokens, x1s, x2s):
|
||||
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})", end="")
|
||||
print("\n===========")
|
||||
for token, x1, x2 in zip(answer_tokens, answer_x1s, answer_x2s):
|
||||
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})", end="")
|
||||
print()
|
||||
|
||||
|
||||
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
|
||||
|
||||
state = cot_decoding.run(
|
||||
question=r"Claire makes a 3 egg omelet every morning for breakfast. How many dozens of eggs will she eat in 4 weeks?",
|
||||
get_top_k=10,
|
||||
is_chat_model=True,
|
||||
verbose=False,
|
||||
)
|
||||
@@ -1,83 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
|
||||
python json_decode.py
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
import sglang as sgl
|
||||
from sglang.srt.constrained import build_regex_from_object
|
||||
|
||||
character_regex = (
|
||||
r"""\{\n"""
|
||||
+ r""" "name": "[\w\d\s]{1,16}",\n"""
|
||||
+ r""" "house": "(Gryffindor|Slytherin|Ravenclaw|Hufflepuff)",\n"""
|
||||
+ r""" "blood status": "(Pure-blood|Half-blood|Muggle-born)",\n"""
|
||||
+ r""" "occupation": "(student|teacher|auror|ministry of magic|death eater|order of the phoenix)",\n"""
|
||||
+ r""" "wand": \{\n"""
|
||||
+ r""" "wood": "[\w\d\s]{1,16}",\n"""
|
||||
+ r""" "core": "[\w\d\s]{1,16}",\n"""
|
||||
+ r""" "length": [0-9]{1,2}\.[0-9]{0,2}\n"""
|
||||
+ r""" \},\n"""
|
||||
+ r""" "alive": "(Alive|Deceased)",\n"""
|
||||
+ r""" "patronus": "[\w\d\s]{1,16}",\n"""
|
||||
+ r""" "bogart": "[\w\d\s]{1,16}"\n"""
|
||||
+ r"""\}"""
|
||||
)
|
||||
|
||||
|
||||
@sgl.function
|
||||
def character_gen(s, name):
|
||||
s += (
|
||||
name
|
||||
+ " is a character in Harry Potter. Please fill in the following information about this character.\n"
|
||||
)
|
||||
s += "The constrained regex is:\n"
|
||||
s += character_regex + "\n"
|
||||
s += "The JSON output is:\n"
|
||||
s += sgl.gen("json_output", max_tokens=256, regex=character_regex)
|
||||
|
||||
|
||||
def driver_character_gen():
|
||||
state = character_gen.run(name="Hermione Granger")
|
||||
print(state.text())
|
||||
|
||||
|
||||
class Weapon(str, Enum):
|
||||
sword = "sword"
|
||||
axe = "axe"
|
||||
mace = "mace"
|
||||
spear = "spear"
|
||||
bow = "bow"
|
||||
crossbow = "crossbow"
|
||||
|
||||
|
||||
class Wizard(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
weapon: Weapon
|
||||
|
||||
|
||||
@sgl.function
|
||||
def pydantic_wizard_gen(s):
|
||||
s += "Give me a description about a wizard in the JSON format.\n"
|
||||
s += sgl.gen(
|
||||
"character",
|
||||
max_tokens=128,
|
||||
temperature=0,
|
||||
regex=build_regex_from_object(Wizard), # Requires pydantic >= 2.0
|
||||
)
|
||||
|
||||
|
||||
def driver_pydantic_wizard_gen():
|
||||
state = pydantic_wizard_gen.run()
|
||||
print(state.text())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
|
||||
driver_character_gen()
|
||||
# driver_pydantic_wizard_gen()
|
||||
@@ -1,104 +0,0 @@
|
||||
# NOTE: Currently this can only be run through HTTP requests.
|
||||
import json
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from json_decode import character_regex
|
||||
|
||||
from sglang.utils import http_request
|
||||
|
||||
character_names = ["Hermione Granger", "Ron Weasley", "Harry Potter"]
|
||||
|
||||
base_url = "http://localhost:30000"
|
||||
|
||||
prompt = "is a character in Harry Potter. Please fill in the following information about this character.\n"
|
||||
|
||||
|
||||
def openai_api_request(name):
|
||||
data = {
|
||||
"model": "",
|
||||
"prompt": name + prompt,
|
||||
"temperature": 0,
|
||||
"max_tokens": 128,
|
||||
"regex": character_regex,
|
||||
"logprobs": 3,
|
||||
}
|
||||
res = http_request(base_url + "/v1/completions", json=data).json()
|
||||
|
||||
# with open(f"json_logprobs_{name.replace(' ', '_')}_tmp.json", "w") as fout:
|
||||
# fout.write(json.dumps(res, indent=4))
|
||||
|
||||
logprobs = res["choices"][0]["logprobs"]
|
||||
usage = res["usage"]
|
||||
assert len(logprobs["token_logprobs"]) == len(logprobs["tokens"])
|
||||
assert len(logprobs["token_logprobs"]) == len(logprobs["top_logprobs"])
|
||||
assert len(logprobs["token_logprobs"]) == usage["completion_tokens"] - 1
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def srt_api_request(name):
|
||||
data = {
|
||||
"text": name + prompt,
|
||||
"sampling_params": {
|
||||
"temperature": 0,
|
||||
"max_new_tokens": 128,
|
||||
"regex": character_regex,
|
||||
},
|
||||
"return_logprob": True,
|
||||
"logprob_start_len": 0,
|
||||
"top_logprobs_num": 3,
|
||||
"return_text_in_logprobs": True,
|
||||
}
|
||||
|
||||
res = http_request(base_url + "/generate", json=data).json()
|
||||
|
||||
# with open(f"json_logprobs_{name.replace(' ', '_')}_tmp.json", "w") as fout:
|
||||
# fout.write(json.dumps(res, indent=4))
|
||||
|
||||
meta_info = res["meta_info"]
|
||||
assert len(meta_info["input_token_logprobs"]) == len(
|
||||
meta_info["input_top_logprobs"]
|
||||
)
|
||||
assert len(meta_info["output_token_logprobs"]) == len(
|
||||
meta_info["output_top_logprobs"]
|
||||
)
|
||||
assert len(meta_info["input_token_logprobs"]) == meta_info["prompt_tokens"]
|
||||
assert len(meta_info["output_token_logprobs"]) == meta_info["completion_tokens"] - 1
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def pretty_print(res):
|
||||
meta_info = res["meta_info"]
|
||||
|
||||
print("\n\n", "=" * 30, "Prefill", "=" * 30)
|
||||
for i in range(len(meta_info["input_token_logprobs"])):
|
||||
print(f"{str(meta_info['input_token_logprobs'][i][2].encode()): <20}", end="")
|
||||
top_ks = (
|
||||
[str(t[2].encode()) for t in meta_info["input_top_logprobs"][i]]
|
||||
if meta_info["input_top_logprobs"][i]
|
||||
else []
|
||||
)
|
||||
for top_k in top_ks:
|
||||
print(f"{top_k: <15}", end="")
|
||||
print()
|
||||
|
||||
print("\n\n", "=" * 30, "Decode", "=" * 30)
|
||||
for i in range(len(meta_info["output_token_logprobs"])):
|
||||
print(f"{str(meta_info['output_token_logprobs'][i][2].encode()): <20}", end="")
|
||||
top_ks = [str(t[2].encode()) for t in meta_info["output_top_logprobs"][i]]
|
||||
for top_k in top_ks:
|
||||
print(f"{top_k: <15}", end="")
|
||||
print()
|
||||
|
||||
print(res["text"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
with ThreadPoolExecutor() as executor:
|
||||
ress = executor.map(srt_api_request, character_names)
|
||||
|
||||
for res in ress:
|
||||
pretty_print(res)
|
||||
|
||||
openai_api_request("Hermione Granger")
|
||||
@@ -1,112 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
# Installing latest llava-next: pip install git+https://github.com/LLaVA-VL/LLaVA-NeXT.git
|
||||
# Installing latest sglang.
|
||||
|
||||
# Endpoint Service CLI:
|
||||
# python -m sglang.launch_server --model-path lmms-lab/llama3-llava-next-8b --tokenizer-path lmms-lab/llama3-llava-next-8b-tokenizer --port=30000 --host="127.0.0.1" --tp-size=4
|
||||
|
||||
python3 http_llama3_llava_test.py
|
||||
|
||||
Output:
|
||||
"Friends posing for a fun photo with a life-sized teddy bear, creating a playful and memorable moment."
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import copy
|
||||
import json
|
||||
import time
|
||||
|
||||
import aiohttp
|
||||
import requests
|
||||
from llava.conversation import conv_llava_llama_3
|
||||
|
||||
|
||||
async def send_request(url, data, delay=0):
|
||||
await asyncio.sleep(delay)
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(url, json=data) as resp:
|
||||
output = await resp.json()
|
||||
return output
|
||||
|
||||
|
||||
async def test_concurrent(args):
|
||||
url = f"{args.host}:{args.port}"
|
||||
|
||||
prompt = "<image>\nPlease generate caption towards this image."
|
||||
conv_template = copy.deepcopy(conv_llava_llama_3)
|
||||
conv_template.append_message(role=conv_template.roles[0], message=prompt)
|
||||
conv_template.append_message(role=conv_template.roles[1], message=None)
|
||||
prompt_with_template = conv_template.get_prompt()
|
||||
response = []
|
||||
for i in range(1):
|
||||
response.append(
|
||||
send_request(
|
||||
url + "/generate",
|
||||
{
|
||||
"text": prompt_with_template,
|
||||
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
|
||||
"sampling_params": {
|
||||
"max_new_tokens": 1024,
|
||||
"temperature": 0,
|
||||
"top_p": 1.0,
|
||||
"presence_penalty": 2,
|
||||
"frequency_penalty": 2,
|
||||
"stop": "<|eot_id|>",
|
||||
},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
rets = await asyncio.gather(*response)
|
||||
for ret in rets:
|
||||
print(ret["text"])
|
||||
|
||||
|
||||
def test_streaming(args):
|
||||
url = f"{args.host}:{args.port}"
|
||||
prompt = "<image>\nPlease generate caption towards this image."
|
||||
conv_template = copy.deepcopy(conv_llava_llama_3)
|
||||
conv_template.append_message(role=conv_template.roles[0], message=prompt)
|
||||
conv_template.append_message(role=conv_template.roles[1], message=None)
|
||||
prompt_with_template = conv_template.get_prompt()
|
||||
pload = {
|
||||
"text": prompt_with_template,
|
||||
"sampling_params": {
|
||||
"max_new_tokens": 1024,
|
||||
"temperature": 0,
|
||||
"top_p": 1.0,
|
||||
"presence_penalty": 2,
|
||||
"frequency_penalty": 2,
|
||||
"stop": "<|eot_id|>",
|
||||
},
|
||||
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
|
||||
"stream": True,
|
||||
}
|
||||
response = requests.post(
|
||||
url + "/generate",
|
||||
json=pload,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
prev = 0
|
||||
for chunk in response.iter_lines(decode_unicode=False):
|
||||
chunk = chunk.decode("utf-8")
|
||||
if chunk and chunk.startswith("data:"):
|
||||
if chunk == "data: [DONE]":
|
||||
break
|
||||
data = json.loads(chunk[5:].strip("\n"))
|
||||
output = data["text"].strip()
|
||||
print(output[prev:], end="", flush=True)
|
||||
prev = len(output)
|
||||
print("")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="http://127.0.0.1")
|
||||
parser.add_argument("--port", type=int, default=30000)
|
||||
args = parser.parse_args()
|
||||
asyncio.run(test_concurrent(args))
|
||||
test_streaming(args)
|
||||
@@ -1,211 +0,0 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import openai
|
||||
import requests
|
||||
from decord import VideoReader, cpu
|
||||
from PIL import Image
|
||||
|
||||
# pip install httpx==0.23.3
|
||||
# pip install decord
|
||||
# pip install protobuf==3.20.0
|
||||
|
||||
|
||||
def download_video(url, cache_dir):
|
||||
file_path = os.path.join(cache_dir, "jobs.mp4")
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
print(f"File downloaded and saved to: {file_path}")
|
||||
return file_path
|
||||
|
||||
|
||||
def create_openai_client(base_url):
|
||||
return openai.Client(api_key="EMPTY", base_url=base_url)
|
||||
|
||||
|
||||
def image_stream_request_test(client):
|
||||
print("----------------------Image Stream Request Test----------------------")
|
||||
stream_request = client.chat.completions.create(
|
||||
model="default",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/assets/logo.png"
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Please describe this image. Please list the benchmarks and the models.",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
temperature=0.7,
|
||||
max_tokens=1024,
|
||||
stream=True,
|
||||
)
|
||||
stream_response = ""
|
||||
|
||||
for chunk in stream_request:
|
||||
if chunk.choices[0].delta.content is not None:
|
||||
content = chunk.choices[0].delta.content
|
||||
stream_response += content
|
||||
sys.stdout.write(content)
|
||||
sys.stdout.flush()
|
||||
|
||||
print("-" * 30)
|
||||
|
||||
|
||||
def video_stream_request_test(client, video_path):
|
||||
print("------------------------Video Stream Request Test----------------------")
|
||||
messages = prepare_video_messages(video_path)
|
||||
|
||||
start_time = time.time()
|
||||
video_request = client.chat.completions.create(
|
||||
model="default",
|
||||
messages=messages,
|
||||
temperature=0,
|
||||
max_tokens=1024,
|
||||
stream=True,
|
||||
)
|
||||
print("-" * 30)
|
||||
video_response = ""
|
||||
|
||||
for chunk in video_request:
|
||||
if chunk.choices[0].delta.content is not None:
|
||||
content = chunk.choices[0].delta.content
|
||||
video_response += content
|
||||
sys.stdout.write(content)
|
||||
sys.stdout.flush()
|
||||
print("-" * 30)
|
||||
|
||||
|
||||
def image_speed_test(client):
|
||||
print("----------------------Image Speed Test----------------------")
|
||||
start_time = time.time()
|
||||
request = client.chat.completions.create(
|
||||
model="default",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/assets/logo.png"
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Please describe this image. Please list the benchmarks and the models.",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
temperature=0,
|
||||
max_tokens=1024,
|
||||
)
|
||||
end_time = time.time()
|
||||
response = request.choices[0].message.content
|
||||
print(response)
|
||||
print("-" * 30)
|
||||
print_speed_test_results(request, start_time, end_time)
|
||||
|
||||
|
||||
def video_speed_test(client, video_path):
|
||||
print("------------------------Video Speed Test------------------------")
|
||||
messages = prepare_video_messages(video_path)
|
||||
|
||||
start_time = time.time()
|
||||
video_request = client.chat.completions.create(
|
||||
model="default",
|
||||
messages=messages,
|
||||
temperature=0,
|
||||
max_tokens=1024,
|
||||
)
|
||||
end_time = time.time()
|
||||
video_response = video_request.choices[0].message.content
|
||||
print(video_response)
|
||||
print("-" * 30)
|
||||
print_speed_test_results(video_request, start_time, end_time)
|
||||
|
||||
|
||||
def prepare_video_messages(video_path):
|
||||
max_frames_num = 32
|
||||
vr = VideoReader(video_path, ctx=cpu(0))
|
||||
total_frame_num = len(vr)
|
||||
uniform_sampled_frames = np.linspace(
|
||||
0, total_frame_num - 1, max_frames_num, dtype=int
|
||||
)
|
||||
frame_idx = uniform_sampled_frames.tolist()
|
||||
frames = vr.get_batch(frame_idx).asnumpy()
|
||||
|
||||
base64_frames = []
|
||||
for frame in frames:
|
||||
pil_img = Image.fromarray(frame)
|
||||
buff = io.BytesIO()
|
||||
pil_img.save(buff, format="JPEG")
|
||||
base64_str = base64.b64encode(buff.getvalue()).decode("utf-8")
|
||||
base64_frames.append(base64_str)
|
||||
|
||||
messages = [{"role": "user", "content": []}]
|
||||
frame_format = {
|
||||
"type": "image_url",
|
||||
"image_url": {"url": "data:image/jpeg;base64,{}"},
|
||||
}
|
||||
|
||||
for base64_frame in base64_frames:
|
||||
frame_format["image_url"]["url"] = "data:image/jpeg;base64,{}".format(
|
||||
base64_frame
|
||||
)
|
||||
messages[0]["content"].append(frame_format.copy())
|
||||
|
||||
prompt = {"type": "text", "text": "Please describe the video in detail."}
|
||||
messages[0]["content"].append(prompt)
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
def print_speed_test_results(request, start_time, end_time):
|
||||
total_tokens = request.usage.total_tokens
|
||||
completion_tokens = request.usage.completion_tokens
|
||||
prompt_tokens = request.usage.prompt_tokens
|
||||
|
||||
print(f"Total tokens: {total_tokens}")
|
||||
print(f"Completion tokens: {completion_tokens}")
|
||||
print(f"Prompt tokens: {prompt_tokens}")
|
||||
print(f"Time taken: {end_time - start_time} seconds")
|
||||
print(f"Token per second: {total_tokens / (end_time - start_time)}")
|
||||
print(f"Completion token per second: {completion_tokens / (end_time - start_time)}")
|
||||
print(f"Prompt token per second: {prompt_tokens / (end_time - start_time)}")
|
||||
|
||||
|
||||
def main():
|
||||
url = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"
|
||||
cache_dir = os.path.expanduser("~/.cache")
|
||||
video_path = download_video(url, cache_dir)
|
||||
|
||||
client = create_openai_client("http://127.0.0.1:30000/v1")
|
||||
|
||||
image_stream_request_test(client)
|
||||
video_stream_request_test(client, video_path)
|
||||
image_speed_test(client)
|
||||
video_speed_test(client, video_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,112 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
# Installing latest llava-next: pip install git+https://github.com/LLaVA-VL/LLaVA-NeXT.git
|
||||
# Installing latest sglang.
|
||||
|
||||
# Endpoint Service CLI:
|
||||
# python -m sglang.launch_server --model-path lmms-lab/llava-next-72b --tokenizer-path lmms-lab/llavanext-qwen-tokenizer --port=30000 --host="127.0.0.1" --tp-size=4
|
||||
|
||||
python3 http_qwen_llava_test.py
|
||||
|
||||
Output:
|
||||
"Two children pose with a large teddy bear, one holding a smaller stuffed bear, in a room with an American flag and potted plants."
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import copy
|
||||
import json
|
||||
import time
|
||||
|
||||
import aiohttp
|
||||
import requests
|
||||
from llava.conversation import conv_qwen
|
||||
|
||||
|
||||
async def send_request(url, data, delay=0):
|
||||
await asyncio.sleep(delay)
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(url, json=data) as resp:
|
||||
output = await resp.json()
|
||||
return output
|
||||
|
||||
|
||||
async def test_concurrent(args):
|
||||
url = f"{args.host}:{args.port}"
|
||||
|
||||
prompt = "<image>\nPlease generate caption towards this image."
|
||||
conv_template = copy.deepcopy(conv_qwen)
|
||||
conv_template.append_message(role=conv_template.roles[0], message=prompt)
|
||||
conv_template.append_message(role=conv_template.roles[1], message=None)
|
||||
prompt_with_template = conv_template.get_prompt()
|
||||
response = []
|
||||
for i in range(1):
|
||||
response.append(
|
||||
send_request(
|
||||
url + "/generate",
|
||||
{
|
||||
"text": prompt_with_template,
|
||||
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
|
||||
"sampling_params": {
|
||||
"max_new_tokens": 1024,
|
||||
"temperature": 0,
|
||||
"top_p": 1.0,
|
||||
"presence_penalty": 2,
|
||||
"frequency_penalty": 2,
|
||||
"stop": "<|im_end|>",
|
||||
},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
rets = await asyncio.gather(*response)
|
||||
for ret in rets:
|
||||
print(ret["text"])
|
||||
|
||||
|
||||
def test_streaming(args):
|
||||
url = f"{args.host}:{args.port}"
|
||||
prompt = "<image>\nPlease generate caption towards this image."
|
||||
conv_template = copy.deepcopy(conv_qwen)
|
||||
conv_template.append_message(role=conv_template.roles[0], message=prompt)
|
||||
conv_template.append_message(role=conv_template.roles[1], message=None)
|
||||
prompt_with_template = conv_template.get_prompt()
|
||||
pload = {
|
||||
"text": prompt_with_template,
|
||||
"sampling_params": {
|
||||
"max_new_tokens": 1024,
|
||||
"temperature": 0,
|
||||
"top_p": 1.0,
|
||||
"presence_penalty": 2,
|
||||
"frequency_penalty": 2,
|
||||
"stop": "<|im_end|>",
|
||||
},
|
||||
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
|
||||
"stream": True,
|
||||
}
|
||||
response = requests.post(
|
||||
url + "/generate",
|
||||
json=pload,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
prev = 0
|
||||
for chunk in response.iter_lines(decode_unicode=False):
|
||||
chunk = chunk.decode("utf-8")
|
||||
if chunk and chunk.startswith("data:"):
|
||||
if chunk == "data: [DONE]":
|
||||
break
|
||||
data = json.loads(chunk[5:].strip("\n"))
|
||||
output = data["text"].strip()
|
||||
print(output[prev:], end="", flush=True)
|
||||
prev = len(output)
|
||||
print("")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="http://127.0.0.1")
|
||||
parser.add_argument("--port", type=int, default=30000)
|
||||
args = parser.parse_args()
|
||||
asyncio.run(test_concurrent(args))
|
||||
test_streaming(args)
|
||||
@@ -1,90 +0,0 @@
|
||||
"""
|
||||
Usage: python3 srt_example_llava.py
|
||||
"""
|
||||
|
||||
from PIL import ImageFile
|
||||
|
||||
import sglang as sgl
|
||||
from sglang.lang.chat_template import get_chat_template
|
||||
from sglang.srt.utils import load_image
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True # Allow loading of truncated images
|
||||
|
||||
|
||||
@sgl.function
|
||||
def image_qa(s, image, question):
|
||||
s += sgl.user(sgl.image(image) + question)
|
||||
s += sgl.assistant(sgl.gen("answer"))
|
||||
|
||||
|
||||
def single():
|
||||
image_url = "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg"
|
||||
pil_image, _ = load_image(image_url)
|
||||
state = image_qa.run(image=pil_image, question="What is this?", max_new_tokens=512)
|
||||
print(state["answer"], "\n")
|
||||
|
||||
|
||||
def stream():
|
||||
image_url = "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg"
|
||||
pil_image, _ = load_image(image_url)
|
||||
state = image_qa.run(
|
||||
image=pil_image,
|
||||
question="Please generate short caption for this image.",
|
||||
max_new_tokens=512,
|
||||
temperature=0,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for out in state.text_iter("answer"):
|
||||
print(out, end="", flush=True)
|
||||
print()
|
||||
|
||||
|
||||
def batch():
|
||||
image_url = "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg"
|
||||
pil_image, _ = load_image(image_url)
|
||||
states = image_qa.run_batch(
|
||||
[
|
||||
{"image": pil_image, "question": "What is this?"},
|
||||
{"image": pil_image, "question": "What is this?"},
|
||||
],
|
||||
max_new_tokens=512,
|
||||
)
|
||||
for s in states:
|
||||
print(s["answer"], "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import multiprocessing as mp
|
||||
|
||||
mp.set_start_method("spawn", force=True)
|
||||
runtime = sgl.Runtime(
|
||||
model_path="lmms-lab/llama3-llava-next-8b",
|
||||
tokenizer_path="lmms-lab/llama3-llava-next-8b-tokenizer",
|
||||
)
|
||||
runtime.endpoint.chat_template = get_chat_template("llama-3-instruct")
|
||||
# runtime = sgl.Runtime(
|
||||
# model_path="lmms-lab/llava-next-72b",
|
||||
# tokenizer_path="lmms-lab/llavanext-qwen-tokenizer",
|
||||
# )
|
||||
# runtime.endpoint.chat_template = get_chat_template("chatml-llava")
|
||||
sgl.set_default_backend(runtime)
|
||||
print(f"chat template: {runtime.endpoint.chat_template.name}")
|
||||
|
||||
# Or you can use API models
|
||||
# sgl.set_default_backend(sgl.OpenAI("gpt-4-vision-preview"))
|
||||
# sgl.set_default_backend(sgl.VertexAI("gemini-pro-vision"))
|
||||
|
||||
# Run a single request
|
||||
print("\n========== single ==========\n")
|
||||
single()
|
||||
|
||||
# Stream output
|
||||
print("\n========== stream ==========\n")
|
||||
stream()
|
||||
|
||||
# Run a batch of requests
|
||||
print("\n========== batch ==========\n")
|
||||
batch()
|
||||
|
||||
runtime.shutdown()
|
||||
@@ -1,266 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
pip install opencv-python-headless
|
||||
python3 srt_example_llava.py
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import csv
|
||||
import os
|
||||
import time
|
||||
|
||||
import sglang as sgl
|
||||
|
||||
|
||||
@sgl.function
|
||||
def video_qa(s, num_frames, video_path, question):
|
||||
s += sgl.user(sgl.video(video_path, num_frames) + question)
|
||||
s += sgl.assistant(sgl.gen("answer"))
|
||||
|
||||
|
||||
def single(path, num_frames=16):
|
||||
state = video_qa.run(
|
||||
num_frames=num_frames,
|
||||
video_path=path,
|
||||
question="Please provide a detailed description of the video, focusing on the main subjects, their actions, the background scenes",
|
||||
temperature=0.0,
|
||||
max_new_tokens=1024,
|
||||
)
|
||||
print(state["answer"], "\n")
|
||||
|
||||
|
||||
def split_into_chunks(lst, num_chunks):
|
||||
"""Split a list into a specified number of chunks."""
|
||||
# Calculate the chunk size using integer division. Note that this may drop some items if not evenly divisible.
|
||||
chunk_size = len(lst) // num_chunks
|
||||
|
||||
if chunk_size == 0:
|
||||
chunk_size = len(lst)
|
||||
# Use list comprehension to generate chunks. The last chunk will take any remainder if the list size isn't evenly divisible.
|
||||
chunks = [lst[i : i + chunk_size] for i in range(0, len(lst), chunk_size)]
|
||||
# Ensure we have exactly num_chunks chunks, even if some are empty
|
||||
chunks.extend([[] for _ in range(num_chunks - len(chunks))])
|
||||
return chunks
|
||||
|
||||
|
||||
def save_batch_results(batch_video_files, states, cur_chunk, batch_idx, save_dir):
|
||||
csv_filename = f"{save_dir}/chunk_{cur_chunk}_batch_{batch_idx}.csv"
|
||||
with open(csv_filename, "w", newline="") as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
writer.writerow(["video_name", "answer"])
|
||||
for video_path, state in zip(batch_video_files, states):
|
||||
video_name = os.path.basename(video_path)
|
||||
writer.writerow([video_name, state["answer"]])
|
||||
|
||||
|
||||
def compile_and_cleanup_final_results(cur_chunk, num_batches, save_dir):
|
||||
final_csv_filename = f"{save_dir}/final_results_chunk_{cur_chunk}.csv"
|
||||
with open(final_csv_filename, "w", newline="") as final_csvfile:
|
||||
writer = csv.writer(final_csvfile)
|
||||
writer.writerow(["video_name", "answer"])
|
||||
for batch_idx in range(num_batches):
|
||||
batch_csv_filename = f"{save_dir}/chunk_{cur_chunk}_batch_{batch_idx}.csv"
|
||||
with open(batch_csv_filename, "r") as batch_csvfile:
|
||||
reader = csv.reader(batch_csvfile)
|
||||
next(reader) # Skip header row
|
||||
for row in reader:
|
||||
writer.writerow(row)
|
||||
os.remove(batch_csv_filename)
|
||||
|
||||
|
||||
def find_video_files(video_dir):
|
||||
# Check if the video_dir is actually a file
|
||||
if os.path.isfile(video_dir):
|
||||
# If it's a file, return it as a single-element list
|
||||
return [video_dir]
|
||||
|
||||
# Original logic to find video files in a directory
|
||||
video_files = []
|
||||
for root, dirs, files in os.walk(video_dir):
|
||||
for file in files:
|
||||
if file.endswith((".mp4", ".avi", ".mov")):
|
||||
video_files.append(os.path.join(root, file))
|
||||
return video_files
|
||||
|
||||
|
||||
def batch(video_dir, save_dir, cur_chunk, num_chunks, num_frames=16, batch_size=64):
|
||||
video_files = find_video_files(video_dir)
|
||||
chunked_video_files = split_into_chunks(video_files, num_chunks)[cur_chunk]
|
||||
num_batches = 0
|
||||
|
||||
for i in range(0, len(chunked_video_files), batch_size):
|
||||
batch_video_files = chunked_video_files[i : i + batch_size]
|
||||
print(f"Processing batch of {len(batch_video_files)} video(s)...")
|
||||
|
||||
if not batch_video_files:
|
||||
print("No video files found in the specified directory.")
|
||||
return
|
||||
|
||||
batch_input = [
|
||||
{
|
||||
"num_frames": num_frames,
|
||||
"video_path": video_path,
|
||||
"question": "Please provide a detailed description of the video, focusing on the main subjects, their actions, the background scenes.",
|
||||
}
|
||||
for video_path in batch_video_files
|
||||
]
|
||||
|
||||
start_time = time.time()
|
||||
states = video_qa.run_batch(batch_input, max_new_tokens=512, temperature=0.2)
|
||||
total_time = time.time() - start_time
|
||||
average_time = total_time / len(batch_video_files)
|
||||
print(
|
||||
f"Number of videos in batch: {len(batch_video_files)}. Average processing time per video: {average_time:.2f} seconds. Total time for this batch: {total_time:.2f} seconds"
|
||||
)
|
||||
|
||||
save_batch_results(batch_video_files, states, cur_chunk, num_batches, save_dir)
|
||||
num_batches += 1
|
||||
|
||||
compile_and_cleanup_final_results(cur_chunk, num_batches, save_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
url = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"
|
||||
|
||||
cache_dir = os.path.expanduser("~/.cache")
|
||||
file_path = os.path.join(cache_dir, "jobs.mp4")
|
||||
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
|
||||
response = requests.get(url)
|
||||
response.raise_for_status() # Raise an exception for bad responses
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
print(f"File downloaded and saved to: {file_path}")
|
||||
# Create the parser
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run video processing with specified port."
|
||||
)
|
||||
|
||||
# Add an argument for the port
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=30000,
|
||||
help="The master port for distributed serving.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--chunk-idx", type=int, default=0, help="The index of the chunk to process."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num-chunks", type=int, default=8, help="The number of chunks to process."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save-dir",
|
||||
type=str,
|
||||
default="./work_dirs/llava_video",
|
||||
help="The directory to save the processed video files.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--video-dir",
|
||||
type=str,
|
||||
default=os.path.expanduser("~/.cache/jobs.mp4"),
|
||||
help="The directory or path for the processed video files.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model-path",
|
||||
type=str,
|
||||
default="lmms-lab/LLaVA-NeXT-Video-7B",
|
||||
help="The model path for the video processing.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num-frames",
|
||||
type=int,
|
||||
default=16,
|
||||
help="The number of frames to process in each video.",
|
||||
)
|
||||
parser.add_argument("--mm_spatial_pool_stride", type=int, default=2)
|
||||
|
||||
# Parse the arguments
|
||||
args = parser.parse_args()
|
||||
|
||||
cur_port = args.port
|
||||
|
||||
cur_chunk = args.chunk_idx
|
||||
|
||||
num_chunks = args.num_chunks
|
||||
|
||||
num_frames = args.num_frames
|
||||
|
||||
if "34b" in args.model_path.lower():
|
||||
tokenizer_path = "liuhaotian/llava-v1.6-34b-tokenizer"
|
||||
elif "7b" in args.model_path.lower():
|
||||
tokenizer_path = "llava-hf/llava-1.5-7b-hf"
|
||||
else:
|
||||
print("Invalid model path. Please specify a valid model path.")
|
||||
exit()
|
||||
|
||||
model_overide_args = {}
|
||||
|
||||
model_overide_args["mm_spatial_pool_stride"] = args.mm_spatial_pool_stride
|
||||
model_overide_args["architectures"] = ["LlavaVidForCausalLM"]
|
||||
model_overide_args["num_frames"] = args.num_frames
|
||||
model_overide_args["model_type"] = "llava"
|
||||
|
||||
if "34b" in args.model_path.lower():
|
||||
model_overide_args["image_token_index"] = 64002
|
||||
|
||||
if args.num_frames == 32:
|
||||
model_overide_args["rope_scaling"] = {"factor": 2.0, "type": "linear"}
|
||||
model_overide_args["max_sequence_length"] = 4096 * 2
|
||||
model_overide_args["tokenizer_model_max_length"] = 4096 * 2
|
||||
elif args.num_frames < 32:
|
||||
pass
|
||||
else:
|
||||
print(
|
||||
"The maximum number of frames to process is 32. Please specify a valid number of frames."
|
||||
)
|
||||
exit()
|
||||
|
||||
runtime = sgl.Runtime(
|
||||
model_path=args.model_path, # "liuhaotian/llava-v1.6-vicuna-7b",
|
||||
tokenizer_path=tokenizer_path,
|
||||
port=cur_port,
|
||||
additional_ports=[cur_port + 1, cur_port + 2, cur_port + 3, cur_port + 4],
|
||||
model_overide_args=model_overide_args,
|
||||
tp_size=1,
|
||||
)
|
||||
sgl.set_default_backend(runtime)
|
||||
print(f"chat template: {runtime.endpoint.chat_template.name}")
|
||||
|
||||
# Run a single request
|
||||
# try:
|
||||
print("\n========== single ==========\n")
|
||||
root = args.video_dir
|
||||
if os.path.isfile(root):
|
||||
video_files = [root]
|
||||
else:
|
||||
video_files = [
|
||||
os.path.join(root, f)
|
||||
for f in os.listdir(root)
|
||||
if f.endswith((".mp4", ".avi", ".mov"))
|
||||
] # Add more extensions if needed
|
||||
start_time = time.time() # Start time for processing a single video
|
||||
for cur_video in video_files[:1]:
|
||||
print(cur_video)
|
||||
single(cur_video, num_frames)
|
||||
end_time = time.time() # End time for processing a single video
|
||||
total_time = end_time - start_time
|
||||
average_time = total_time / len(
|
||||
video_files
|
||||
) # Calculate the average processing time
|
||||
print(f"Average processing time per video: {average_time:.2f} seconds")
|
||||
runtime.shutdown()
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
runtime.shutdown()
|
||||
|
||||
# # # Run a batch of requests
|
||||
# print("\n========== batch ==========\n")
|
||||
# if not os.path.exists(args.save_dir):
|
||||
# os.makedirs(args.save_dir)
|
||||
# batch(args.video_dir,args.save_dir,cur_chunk, num_chunks, num_frames, num_chunks)
|
||||
# runtime.shutdown()
|
||||
@@ -1,131 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
##### USAGE #####
|
||||
# - First node:
|
||||
# ```sh
|
||||
# bash examples/usage/llava_video/srt_example_llava_v.sh K 0 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
|
||||
# ```
|
||||
# - Second node:
|
||||
# ```sh
|
||||
# bash examples/usage/llava_video/srt_example_llava_v.sh K 1 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
|
||||
# ```
|
||||
# - The K node:
|
||||
# ```sh
|
||||
# bash examples/usage/llava_video/srt_example_llava_v.sh K K-1 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
|
||||
# ```
|
||||
|
||||
|
||||
# Replace `K`, `YOUR_VIDEO_PATH`, `YOUR_MODEL_PATH`, and `FRAMES_PER_VIDEO` with your specific details.
|
||||
# CURRENT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
CURRENT_ROOT=$(dirname "$0")
|
||||
|
||||
echo ${CURRENT_ROOT}
|
||||
|
||||
cd ${CURRENT_ROOT}
|
||||
|
||||
export PYTHONWARNINGS=ignore
|
||||
|
||||
START_TIME=$(date +%s) # Capture start time
|
||||
|
||||
NUM_NODES=$1
|
||||
|
||||
CUR_NODES_IDX=$2
|
||||
|
||||
VIDEO_DIR=$3
|
||||
|
||||
MODEL_PATH=$4
|
||||
|
||||
NUM_FRAMES=$5
|
||||
|
||||
|
||||
# FRAME_FORMAT=$6
|
||||
|
||||
# FRAME_FORMAT=$(echo $FRAME_FORMAT | tr '[:lower:]' '[:upper:]')
|
||||
|
||||
# # Check if FRAME_FORMAT is either JPEG or PNG
|
||||
# if [[ "$FRAME_FORMAT" != "JPEG" && "$FRAME_FORMAT" != "PNG" ]]; then
|
||||
# echo "Error: FRAME_FORMAT must be either JPEG or PNG."
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
# export TARGET_FRAMES=$TARGET_FRAMES
|
||||
|
||||
echo "Each video you will sample $NUM_FRAMES frames"
|
||||
|
||||
# export FRAME_FORMAT=$FRAME_FORMAT
|
||||
|
||||
# echo "The frame format is $FRAME_FORMAT"
|
||||
|
||||
# Assuming GPULIST is a bash array containing your GPUs
|
||||
GPULIST=(0 1 2 3 4 5 6 7)
|
||||
LOCAL_CHUNKS=${#GPULIST[@]}
|
||||
|
||||
echo "Number of GPUs in GPULIST: $LOCAL_CHUNKS"
|
||||
|
||||
ALL_CHUNKS=$((NUM_NODES * LOCAL_CHUNKS))
|
||||
|
||||
# Calculate GPUs per chunk
|
||||
GPUS_PER_CHUNK=1
|
||||
|
||||
echo $GPUS_PER_CHUNK
|
||||
|
||||
for IDX in $(seq 1 $LOCAL_CHUNKS); do
|
||||
(
|
||||
START=$(((IDX-1) * GPUS_PER_CHUNK))
|
||||
LENGTH=$GPUS_PER_CHUNK # Length for slicing, not the end index
|
||||
|
||||
CHUNK_GPUS=(${GPULIST[@]:$START:$LENGTH})
|
||||
|
||||
# Convert the chunk GPUs array to a comma-separated string
|
||||
CHUNK_GPUS_STR=$(IFS=,; echo "${CHUNK_GPUS[*]}")
|
||||
|
||||
LOCAL_IDX=$((CUR_NODES_IDX * LOCAL_CHUNKS + IDX))
|
||||
|
||||
echo "Chunk $(($LOCAL_IDX - 1)) will run on GPUs $CHUNK_GPUS_STR"
|
||||
|
||||
# Calculate the port for this chunk. Ensure it's incremented by 5 for each chunk.
|
||||
PORT=$((10000 + RANDOM % 55536))
|
||||
|
||||
MAX_RETRIES=10
|
||||
RETRY_COUNT=0
|
||||
COMMAND_STATUS=1 # Initialize as failed
|
||||
|
||||
while [ $RETRY_COUNT -lt $MAX_RETRIES ] && [ $COMMAND_STATUS -ne 0 ]; do
|
||||
echo "Running chunk $(($LOCAL_IDX - 1)) on GPUs $CHUNK_GPUS_STR with port $PORT. Attempt $(($RETRY_COUNT + 1))"
|
||||
|
||||
#!/bin/bash
|
||||
CUDA_VISIBLE_DEVICES=$CHUNK_GPUS_STR python3 srt_example_llava_v.py \
|
||||
--port $PORT \
|
||||
--num-chunks $ALL_CHUNKS \
|
||||
--chunk-idx $(($LOCAL_IDX - 1)) \
|
||||
--save-dir work_dirs/llava_next_video_inference_results \
|
||||
--video-dir $VIDEO_DIR \
|
||||
--model-path $MODEL_PATH \
|
||||
--num-frames $NUM_FRAMES #&
|
||||
|
||||
wait $! # Wait for the process to finish and capture its exit status
|
||||
COMMAND_STATUS=$?
|
||||
|
||||
if [ $COMMAND_STATUS -ne 0 ]; then
|
||||
echo "Execution failed for chunk $(($LOCAL_IDX - 1)), attempt $(($RETRY_COUNT + 1)). Retrying..."
|
||||
RETRY_COUNT=$(($RETRY_COUNT + 1))
|
||||
sleep 180 # Wait a bit before retrying
|
||||
else
|
||||
echo "Execution succeeded for chunk $(($LOCAL_IDX - 1))."
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $COMMAND_STATUS -ne 0 ]; then
|
||||
echo "Execution failed for chunk $(($LOCAL_IDX - 1)) after $MAX_RETRIES attempts."
|
||||
fi
|
||||
) #&
|
||||
sleep 2 # Slight delay to stagger the start times
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
cat work_dirs/llava_next_video_inference_results/final_results_chunk_*.csv > work_dirs/llava_next_video_inference_results/final_results_node_${CUR_NODES_IDX}.csv
|
||||
|
||||
END_TIME=$(date +%s) # Capture end time
|
||||
ELAPSED_TIME=$(($END_TIME - $START_TIME))
|
||||
echo "Total execution time: $ELAPSED_TIME seconds."
|
||||
@@ -1,96 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
|
||||
python openai_batch_chat.py
|
||||
Note: Before running this script,
|
||||
you should create the input.jsonl file with the following content:
|
||||
{"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world! List 3 NBA players and tell a story"}],"max_tokens": 300}}
|
||||
{"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are an assistant. "},{"role": "user", "content": "Hello world! List three capital and tell a story"}],"max_tokens": 500}}
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
|
||||
import openai
|
||||
from openai import OpenAI
|
||||
|
||||
|
||||
class OpenAIBatchProcessor:
|
||||
def __init__(self, api_key):
|
||||
# client = OpenAI(api_key=api_key)
|
||||
client = openai.Client(base_url="http://127.0.0.1:30000/v1", api_key="EMPTY")
|
||||
|
||||
self.client = client
|
||||
|
||||
def process_batch(self, input_file_path, endpoint, completion_window):
|
||||
|
||||
# Upload the input file
|
||||
with open(input_file_path, "rb") as file:
|
||||
uploaded_file = self.client.files.create(file=file, purpose="batch")
|
||||
|
||||
# Create the batch job
|
||||
batch_job = self.client.batches.create(
|
||||
input_file_id=uploaded_file.id,
|
||||
endpoint=endpoint,
|
||||
completion_window=completion_window,
|
||||
)
|
||||
|
||||
# Monitor the batch job status
|
||||
while batch_job.status not in ["completed", "failed", "cancelled"]:
|
||||
time.sleep(3) # Wait for 3 seconds before checking the status again
|
||||
print(
|
||||
f"Batch job status: {batch_job.status}...trying again in 3 seconds..."
|
||||
)
|
||||
batch_job = self.client.batches.retrieve(batch_job.id)
|
||||
|
||||
# Check the batch job status and errors
|
||||
if batch_job.status == "failed":
|
||||
print(f"Batch job failed with status: {batch_job.status}")
|
||||
print(f"Batch job errors: {batch_job.errors}")
|
||||
return None
|
||||
|
||||
# If the batch job is completed, process the results
|
||||
if batch_job.status == "completed":
|
||||
|
||||
# print result of batch job
|
||||
print("batch", batch_job.request_counts)
|
||||
|
||||
result_file_id = batch_job.output_file_id
|
||||
# Retrieve the file content from the server
|
||||
file_response = self.client.files.content(result_file_id)
|
||||
result_content = file_response.read() # Read the content of the file
|
||||
|
||||
# Save the content to a local file
|
||||
result_file_name = "batch_job_chat_results.jsonl"
|
||||
with open(result_file_name, "wb") as file:
|
||||
file.write(result_content) # Write the binary content to the file
|
||||
# Load data from the saved JSONL file
|
||||
results = []
|
||||
with open(result_file_name, "r", encoding="utf-8") as file:
|
||||
for line in file:
|
||||
json_object = json.loads(
|
||||
line.strip()
|
||||
) # Parse each line as a JSON object
|
||||
results.append(json_object)
|
||||
|
||||
return results
|
||||
else:
|
||||
print(f"Batch job failed with status: {batch_job.status}")
|
||||
return None
|
||||
|
||||
|
||||
# Initialize the OpenAIBatchProcessor
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
processor = OpenAIBatchProcessor(api_key)
|
||||
|
||||
# Process the batch job
|
||||
input_file_path = "input.jsonl"
|
||||
endpoint = "/v1/chat/completions"
|
||||
completion_window = "24h"
|
||||
|
||||
# Process the batch job
|
||||
results = processor.process_batch(input_file_path, endpoint, completion_window)
|
||||
|
||||
# Print the results
|
||||
print(results)
|
||||
@@ -1,97 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
|
||||
python openai_batch_complete.py
|
||||
Note: Before running this script,
|
||||
you should create the input.jsonl file with the following content:
|
||||
{"custom_id": "request-1", "method": "POST", "url": "/v1/completions", "body": {"model": "gpt-3.5-turbo-instruct", "prompt": "List 3 names of famous soccer player: ", "max_tokens": 200}}
|
||||
{"custom_id": "request-2", "method": "POST", "url": "/v1/completions", "body": {"model": "gpt-3.5-turbo-instruct", "prompt": "List 6 names of famous basketball player: ", "max_tokens": 400}}
|
||||
{"custom_id": "request-3", "method": "POST", "url": "/v1/completions", "body": {"model": "gpt-3.5-turbo-instruct", "prompt": "List 6 names of famous basketball player: ", "max_tokens": 400}}
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
|
||||
import openai
|
||||
from openai import OpenAI
|
||||
|
||||
|
||||
class OpenAIBatchProcessor:
|
||||
def __init__(self, api_key):
|
||||
# client = OpenAI(api_key=api_key)
|
||||
client = openai.Client(base_url="http://127.0.0.1:30000/v1", api_key="EMPTY")
|
||||
|
||||
self.client = client
|
||||
|
||||
def process_batch(self, input_file_path, endpoint, completion_window):
|
||||
|
||||
# Upload the input file
|
||||
with open(input_file_path, "rb") as file:
|
||||
uploaded_file = self.client.files.create(file=file, purpose="batch")
|
||||
|
||||
# Create the batch job
|
||||
batch_job = self.client.batches.create(
|
||||
input_file_id=uploaded_file.id,
|
||||
endpoint=endpoint,
|
||||
completion_window=completion_window,
|
||||
)
|
||||
|
||||
# Monitor the batch job status
|
||||
while batch_job.status not in ["completed", "failed", "cancelled"]:
|
||||
time.sleep(3) # Wait for 3 seconds before checking the status again
|
||||
print(
|
||||
f"Batch job status: {batch_job.status}...trying again in 3 seconds..."
|
||||
)
|
||||
batch_job = self.client.batches.retrieve(batch_job.id)
|
||||
|
||||
# Check the batch job status and errors
|
||||
if batch_job.status == "failed":
|
||||
print(f"Batch job failed with status: {batch_job.status}")
|
||||
print(f"Batch job errors: {batch_job.errors}")
|
||||
return None
|
||||
|
||||
# If the batch job is completed, process the results
|
||||
if batch_job.status == "completed":
|
||||
|
||||
# print result of batch job
|
||||
print("batch", batch_job.request_counts)
|
||||
|
||||
result_file_id = batch_job.output_file_id
|
||||
# Retrieve the file content from the server
|
||||
file_response = self.client.files.content(result_file_id)
|
||||
result_content = file_response.read() # Read the content of the file
|
||||
|
||||
# Save the content to a local file
|
||||
result_file_name = "batch_job_complete_results.jsonl"
|
||||
with open(result_file_name, "wb") as file:
|
||||
file.write(result_content) # Write the binary content to the file
|
||||
# Load data from the saved JSONL file
|
||||
results = []
|
||||
with open(result_file_name, "r", encoding="utf-8") as file:
|
||||
for line in file:
|
||||
json_object = json.loads(
|
||||
line.strip()
|
||||
) # Parse each line as a JSON object
|
||||
results.append(json_object)
|
||||
|
||||
return results
|
||||
else:
|
||||
print(f"Batch job failed with status: {batch_job.status}")
|
||||
return None
|
||||
|
||||
|
||||
# Initialize the OpenAIBatchProcessor
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
processor = OpenAIBatchProcessor(api_key)
|
||||
|
||||
# Process the batch job
|
||||
input_file_path = "input_complete.jsonl"
|
||||
endpoint = "/v1/completions"
|
||||
completion_window = "24h"
|
||||
|
||||
# Process the batch job
|
||||
results = processor.process_batch(input_file_path, endpoint, completion_window)
|
||||
|
||||
# Print the results
|
||||
print(results)
|
||||
@@ -1,155 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
***Note: for speculative execution to work, user must put all "gen" in "assistant".
|
||||
Show in "assistant" the desired answer format. Each "gen" term should have a stop token.
|
||||
The stream mode is not supported in speculative execution.
|
||||
|
||||
E.g.
|
||||
correct:
|
||||
sgl.assistant("\nName:" + sgl.gen("name", stop="\n") + "\nBirthday:" + sgl.gen("birthday", stop="\n") + "\nJob:" + sgl.gen("job", stop="\n"))
|
||||
incorrect:
|
||||
s += sgl.assistant("\nName:" + sgl.gen("name", stop="\n"))
|
||||
s += sgl.assistant("\nBirthday:" + sgl.gen("birthday", stop="\n"))
|
||||
s += sgl.assistant("\nJob:" + sgl.gen("job", stop="\n"))
|
||||
|
||||
export OPENAI_API_KEY=sk-******
|
||||
python3 openai_chat_speculative.py
|
||||
"""
|
||||
|
||||
import sglang as sgl
|
||||
from sglang import OpenAI, function, set_default_backend
|
||||
|
||||
|
||||
@function(num_api_spec_tokens=256)
|
||||
def gen_character_spec(s):
|
||||
s += sgl.system("You are a helpful assistant.")
|
||||
s += sgl.user("Construct a character within the following format:")
|
||||
s += sgl.assistant(
|
||||
"Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
|
||||
)
|
||||
s += sgl.user("Please generate new Name, Birthday and Job.\n")
|
||||
s += sgl.assistant(
|
||||
"Name:"
|
||||
+ sgl.gen("name", stop="\n")
|
||||
+ "\nBirthday:"
|
||||
+ sgl.gen("birthday", stop="\n")
|
||||
+ "\nJob:"
|
||||
+ sgl.gen("job", stop="\n")
|
||||
)
|
||||
|
||||
|
||||
@function(num_api_spec_tokens=256)
|
||||
def gen_character_spec_no_few_shot(s):
|
||||
s += sgl.user("Construct a character. For each field stop with a newline\n")
|
||||
s += sgl.assistant(
|
||||
"Name:"
|
||||
+ sgl.gen("name", stop="\n")
|
||||
+ "\nAge:"
|
||||
+ sgl.gen("age", stop="\n")
|
||||
+ "\nJob:"
|
||||
+ sgl.gen("job", stop="\n")
|
||||
)
|
||||
|
||||
|
||||
@function
|
||||
def gen_character_normal(s):
|
||||
s += sgl.system("You are a helpful assistant.")
|
||||
s += sgl.user("What's the answer of 23 + 8?")
|
||||
s += sgl.assistant(sgl.gen("answer", max_tokens=64))
|
||||
|
||||
|
||||
@function(num_api_spec_tokens=1024)
|
||||
def multi_turn_question(s, question_1, question_2):
|
||||
s += sgl.system("You are a helpful assistant.")
|
||||
s += sgl.user("Answer questions in the following format:")
|
||||
s += sgl.user(
|
||||
"Question 1: What is the capital of France?\nQuestion 2: What is the population of this city?\n"
|
||||
)
|
||||
s += sgl.assistant(
|
||||
"Answer 1: The capital of France is Paris.\nAnswer 2: The population of Paris in 2024 is estimated to be around 2.1 million for the city proper.\n"
|
||||
)
|
||||
s += sgl.user("Question 1: " + question_1 + "\nQuestion 2: " + question_2)
|
||||
s += sgl.assistant(
|
||||
"Answer 1: "
|
||||
+ sgl.gen("answer_1", stop="\n")
|
||||
+ "\nAnswer 2: "
|
||||
+ sgl.gen("answer_2", stop="\n")
|
||||
)
|
||||
|
||||
|
||||
def test_spec_single_turn():
|
||||
backend.token_usage.reset()
|
||||
|
||||
state = gen_character_spec.run()
|
||||
for m in state.messages():
|
||||
print(m["role"], ":", m["content"])
|
||||
|
||||
print("\n-- name:", state["name"])
|
||||
print("-- birthday:", state["birthday"])
|
||||
print("-- job:", state["job"])
|
||||
print(backend.token_usage)
|
||||
|
||||
|
||||
def test_inaccurate_spec_single_turn():
|
||||
state = gen_character_spec_no_few_shot.run()
|
||||
for m in state.messages():
|
||||
print(m["role"], ":", m["content"])
|
||||
|
||||
print("\n-- name:", state["name"])
|
||||
print("\n-- age:", state["age"])
|
||||
print("\n-- job:", state["job"])
|
||||
|
||||
|
||||
def test_normal_single_turn():
|
||||
state = gen_character_normal.run()
|
||||
for m in state.messages():
|
||||
print(m["role"], ":", m["content"])
|
||||
|
||||
|
||||
def test_spec_multi_turn():
|
||||
state = multi_turn_question.run(
|
||||
question_1="What is the capital of the United States?",
|
||||
question_2="List two local attractions in the capital of the United States.",
|
||||
)
|
||||
|
||||
for m in state.messages():
|
||||
print(m["role"], ":", m["content"])
|
||||
|
||||
print("\n-- answer_1 --\n", state["answer_1"])
|
||||
print("\n-- answer_2 --\n", state["answer_2"])
|
||||
|
||||
|
||||
def test_spec_multi_turn_stream():
|
||||
state = multi_turn_question.run(
|
||||
question_1="What is the capital of the United States?",
|
||||
question_2="List two local attractions.",
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for out in state.text_iter():
|
||||
print(out, end="", flush=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
backend = OpenAI("gpt-4-turbo")
|
||||
set_default_backend(backend)
|
||||
|
||||
print("\n========== test spec single turn ==========\n")
|
||||
# expect reasonable answer for each field
|
||||
test_spec_single_turn()
|
||||
|
||||
print("\n========== test inaccurate spec single turn ==========\n")
|
||||
# expect incomplete or unreasonable answers
|
||||
test_inaccurate_spec_single_turn()
|
||||
|
||||
print("\n========== test normal single turn ==========\n")
|
||||
# expect reasonable answer
|
||||
test_normal_single_turn()
|
||||
|
||||
print("\n========== test spec multi turn ==========\n")
|
||||
# expect answer with same format as in the few shot
|
||||
test_spec_multi_turn()
|
||||
|
||||
print("\n========== test spec multi turn stream ==========\n")
|
||||
# expect error in stream_executor: stream is not supported...
|
||||
test_spec_multi_turn_stream()
|
||||
@@ -1,153 +0,0 @@
|
||||
import openai
|
||||
|
||||
client = openai.Client(base_url="http://127.0.0.1:30000/v1", api_key="EMPTY")
|
||||
|
||||
# Text completion
|
||||
response = client.completions.create(
|
||||
model="default",
|
||||
prompt="I am a robot and I want to study like humans. Now let's tell a story. Once upon a time, there was a little",
|
||||
n=1,
|
||||
temperature=0.8,
|
||||
max_tokens=32,
|
||||
)
|
||||
print(response)
|
||||
|
||||
|
||||
# Text completion
|
||||
response = client.completions.create(
|
||||
model="default",
|
||||
prompt="I am a robot and I want to study like humans. Now let's tell a story. Once upon a time, there was a little",
|
||||
n=5,
|
||||
temperature=0.8,
|
||||
max_tokens=320,
|
||||
)
|
||||
print(response)
|
||||
|
||||
|
||||
# Text completion
|
||||
response = client.completions.create(
|
||||
model="default",
|
||||
prompt="I am a robot and I want to study like humans. Now let's tell a story. Once upon a time, there was a little",
|
||||
n=3,
|
||||
temperature=0.8,
|
||||
max_tokens=32,
|
||||
)
|
||||
print(response)
|
||||
|
||||
|
||||
# Text completion
|
||||
response = client.completions.create(
|
||||
model="default",
|
||||
prompt=["The name of the famous soccer player is"],
|
||||
n=1,
|
||||
temperature=0.8,
|
||||
max_tokens=128,
|
||||
)
|
||||
print(response)
|
||||
|
||||
|
||||
# Text completion
|
||||
response = client.completions.create(
|
||||
model="default",
|
||||
prompt=["The name of the famous soccer player is ", "The capital of US is"],
|
||||
n=1,
|
||||
temperature=0.8,
|
||||
max_tokens=32,
|
||||
)
|
||||
print(response)
|
||||
|
||||
|
||||
# Text completion
|
||||
response = client.completions.create(
|
||||
model="default",
|
||||
prompt=["The name of the famous soccer player is ", "The capital of US is"],
|
||||
n=3,
|
||||
temperature=0.8,
|
||||
max_tokens=32,
|
||||
)
|
||||
print(response)
|
||||
|
||||
|
||||
response = client.completions.create(
|
||||
model="default",
|
||||
prompt=[
|
||||
"prompt1: I am a robot and I want to learn like humans. Now let's begin a tale. Once upon a time, there was a small",
|
||||
"prompt2: As a robot, my goal is to understand human learning. Let's start a story. In a faraway land, there lived a tiny",
|
||||
"prompt3: Being a robot, I aspire to study like people. Let's share a story. Long ago, there was a little",
|
||||
"prompt4: I am a robot aiming to learn like humans. Let's narrate a story. Once, in a distant kingdom, there was a young",
|
||||
"prompt5: As a robot, I seek to learn in human ways. Let's tell a story. Once upon a time, in a small village, there was a young",
|
||||
],
|
||||
n=1,
|
||||
temperature=0.8,
|
||||
max_tokens=320,
|
||||
)
|
||||
print(response)
|
||||
|
||||
|
||||
# Text completion
|
||||
response = client.completions.create(
|
||||
model="default",
|
||||
prompt=[
|
||||
"The capital of France is",
|
||||
"The capital of Germany is",
|
||||
"The capital of US is",
|
||||
],
|
||||
n=3,
|
||||
temperature=0.8,
|
||||
max_tokens=32,
|
||||
)
|
||||
print(response)
|
||||
|
||||
# Chat completion
|
||||
response = client.chat.completions.create(
|
||||
model="default",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful AI assistant"},
|
||||
{"role": "user", "content": "List 3 countries and their capitals."},
|
||||
],
|
||||
temperature=0.8,
|
||||
max_tokens=1,
|
||||
logprobs=True,
|
||||
top_logprobs=3,
|
||||
)
|
||||
print(response)
|
||||
|
||||
# Chat completion
|
||||
response = client.chat.completions.create(
|
||||
model="default",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful AI assistant"},
|
||||
{"role": "user", "content": "List 3 countries and their capitals."},
|
||||
],
|
||||
temperature=0.8,
|
||||
max_tokens=1,
|
||||
n=1,
|
||||
)
|
||||
print(response)
|
||||
|
||||
# Chat completion
|
||||
response = client.chat.completions.create(
|
||||
model="default",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful AI assistant"},
|
||||
{"role": "user", "content": "List 3 countries and their capitals."},
|
||||
],
|
||||
temperature=0.8,
|
||||
max_tokens=1,
|
||||
logprobs=True,
|
||||
top_logprobs=3,
|
||||
)
|
||||
print(response)
|
||||
|
||||
# Chat completion
|
||||
response = client.chat.completions.create(
|
||||
model="default",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful AI assistant"},
|
||||
{"role": "user", "content": "List 3 countries and their capitals."},
|
||||
],
|
||||
temperature=0.8,
|
||||
max_tokens=1,
|
||||
n=4,
|
||||
)
|
||||
print(response)
|
||||
@@ -1,54 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python3 openai_speculative.py
|
||||
"""
|
||||
|
||||
from sglang import OpenAI, function, gen, set_default_backend
|
||||
|
||||
|
||||
@function(num_api_spec_tokens=64)
|
||||
def gen_character_spec(s):
|
||||
s += "Construct a character within the following format:\n"
|
||||
s += "Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
|
||||
s += "\nPlease generate new Name, Birthday and Job.\n"
|
||||
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
|
||||
s += "\nJob:" + gen("job", stop="\n") + "\n"
|
||||
|
||||
|
||||
@function
|
||||
def gen_character_no_spec(s):
|
||||
s += "Construct a character within the following format:\n"
|
||||
s += "Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
|
||||
s += "\nPlease generate new Name, Birthday and Job.\n"
|
||||
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
|
||||
s += "\nJob:" + gen("job", stop="\n") + "\n"
|
||||
|
||||
|
||||
@function(num_api_spec_tokens=64)
|
||||
def gen_character_spec_no_few_shot(s):
|
||||
# s += "Construct a character with name, birthday, and job:\n"
|
||||
s += "Construct a character:\n"
|
||||
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
|
||||
s += "\nJob:" + gen("job", stop="\n") + "\n"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
backend = OpenAI("gpt-3.5-turbo-instruct")
|
||||
set_default_backend(backend)
|
||||
|
||||
for function in [
|
||||
gen_character_spec,
|
||||
gen_character_no_spec,
|
||||
gen_character_spec_no_few_shot,
|
||||
]:
|
||||
backend.token_usage.reset()
|
||||
|
||||
print(f"function: {function.func.__name__}")
|
||||
|
||||
state = function.run()
|
||||
|
||||
print("...name:", state["name"])
|
||||
print("...birthday:", state["birthday"])
|
||||
print("...job:", state["job"])
|
||||
print(backend.token_usage)
|
||||
print()
|
||||
@@ -1,40 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python3 parallel_sample.py
|
||||
"""
|
||||
|
||||
import sglang as sgl
|
||||
|
||||
|
||||
@sgl.function
|
||||
def parallel_sample(s, question, n):
|
||||
s += (
|
||||
"Question: Compute 1 + 2 + 3\n"
|
||||
"Reasoning: I need to use a calculator.\n"
|
||||
"Tool: calculator\n"
|
||||
"Answer: 6\n"
|
||||
"Question: Compute 3 + 2 + 2\n"
|
||||
"Reasoning: I will try a calculator.\n"
|
||||
"Tool: calculator\n"
|
||||
"Answer: 7\n"
|
||||
)
|
||||
s += "Question: " + question + "\n"
|
||||
forks = s.fork(n)
|
||||
forks += "Reasoning:" + sgl.gen("reasoning", stop="\n") + "\n"
|
||||
forks += "Tool:" + sgl.gen("tool", choices=["calculator", "browser"]) + "\n"
|
||||
forks += "Answer:" + sgl.gen("answer", stop="\n") + "\n"
|
||||
forks.join()
|
||||
|
||||
|
||||
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo-instruct"))
|
||||
# sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
|
||||
|
||||
state = parallel_sample.run(question="Compute 5 + 2 + 4.", n=5, temperature=1.0)
|
||||
|
||||
for i in range(5):
|
||||
obj = {
|
||||
"reasoning": state["reasoning"][i],
|
||||
"tool": state["tool"][i],
|
||||
"answer": state["answer"][i],
|
||||
}
|
||||
print(f"[{i}], {obj}")
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 132 KiB |
@@ -1,527 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# RAG Powered by SGLang & Chroma Evaluated using Parea\n",
|
||||
"\n",
|
||||
"In this notebook, we will build a simple RAG pipeline using SGLang to execute our LLM calls, Chroma as vector database for retrieval and [Parea](https://www.parea.ai) for tracing and evaluation. We will then evaluate the performance of our RAG pipeline. The dataset we will use was created by [Virat](https://twitter.com/virattt) and contains 100 questions, contexts and answers from the Airbnb 2023 10k filing.\n",
|
||||
"\n",
|
||||
"The RAG pipeline consists of two steps:\n",
|
||||
"1. Retrieval: Given a question, we retrieve the relevant context from all provided contexts.\n",
|
||||
"2. Generation: Given the question and the retrieved context, we generate an answer.\n",
|
||||
"\n",
|
||||
"ℹ️ This notebook requires an OpenAI API key.\n",
|
||||
"\n",
|
||||
"ℹ️ This notebook requires a Parea API key, which can be created [here](https://docs.parea.ai/api-reference/authentication#parea-api-key)."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Setting up the environment\n",
|
||||
"\n",
|
||||
"We will first install the necessary packages: `sglang`, `parea-ai` and `chromadb`."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# note, if you use a Mac M1 chip, you might need to install grpcio 1.59.0 first such that installing chromadb works\n",
|
||||
"# !pip install grpcio==1.59.0\n",
|
||||
"\n",
|
||||
"!pip install sglang[openai] parea-ai chromadb"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Create a Parea API key as outlined [here](https://docs.parea.ai/api-reference/authentication#parea-api-key) and save it in a `.env` file as `PAREA_API_KEY=your-api-key`."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Indexing the data\n",
|
||||
"\n",
|
||||
"Now it's time to download the data & index it! For that, we create a collection called `contexts` in Chroma and add the contexts as documents."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"\n",
|
||||
"path_qca = \"airbnb-2023-10k-qca.json\"\n",
|
||||
"\n",
|
||||
"if not os.path.exists(path_qca):\n",
|
||||
" !wget https://virattt.github.io/datasets/abnb-2023-10k.json -O airbnb-2023-10k-qca.json\n",
|
||||
"\n",
|
||||
"with open(path_qca, 'r') as f:\n",
|
||||
" question_context_answers = json.load(f)\n",
|
||||
"\n",
|
||||
"chroma_client = chromadb.PersistentClient()\n",
|
||||
"collection = chroma_client.get_or_create_collection(name=\"contexts\")\n",
|
||||
"if collection.count() == 0:\n",
|
||||
" collection.add(\n",
|
||||
" documents=[qca[\"context\"] for qca in question_context_answers],\n",
|
||||
" ids=[str(i) for i in range(len(question_context_answers))]\n",
|
||||
" )"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Defining the RAG pipeline\n",
|
||||
"\n",
|
||||
"We will start with importing the necessary packages, setting up tracing of OpenAI calls via Parea and setting OpenAI as the default backend for SGLang."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"from sglang import function, user, assistant, gen, set_default_backend, OpenAI\n",
|
||||
"from sglang.lang.interpreter import ProgramState\n",
|
||||
"from parea import Parea, trace\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"load_dotenv()\n",
|
||||
"\n",
|
||||
"os.environ['TOKENIZERS_PARALLELISM'] = \"false\"\n",
|
||||
"\n",
|
||||
"p = Parea(api_key=os.getenv(\"PAREA_API_KEY\"), project_name=\"rag_sglang\")\n",
|
||||
"p.integrate_with_sglang()\n",
|
||||
"\n",
|
||||
"set_default_backend(OpenAI(\"gpt-3.5-turbo\"))"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we can define our retrieval step shown below. Notice, the `trace` decorator which will automatically trace inputs, output, latency, etc. of that call."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@trace\n",
|
||||
"def retrieval(question: str) -> List[str]:\n",
|
||||
" return collection.query(\n",
|
||||
" query_texts=[question],\n",
|
||||
" n_results=1\n",
|
||||
" )['documents'][0]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Next we will define the generation step which uses SGLang to execute the LLM call."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@function\n",
|
||||
"def generation_sglang(s, question: str, *context: str):\n",
|
||||
" context = \"\\n\".join(context)\n",
|
||||
" s += user(f'Given this question:\\n{question}\\n\\nAnd this context:\\n{context}\\n\\nAnswer the question.')\n",
|
||||
" s += assistant(gen(\"answer\"))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@trace\n",
|
||||
"def generation(question: str, *context):\n",
|
||||
" state: ProgramState = generation_sglang.run(question, *context)\n",
|
||||
" while not state.stream_executor.is_finished:\n",
|
||||
" time.sleep(1)\n",
|
||||
" return state.stream_executor.variables[\"answer\"]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Finally, we can tie it together and execute a sample query."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'The World Health Organization formally declared an end to the COVID-19 global health emergency'"
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"@trace\n",
|
||||
"def rag_pipeline(question: str) -> str:\n",
|
||||
" contexts = retrieval(question)\n",
|
||||
" return generation(question, *contexts)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"rag_pipeline(\"When did the World Health Organization formally declare an end to the COVID-19 global health emergency?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Debug Trace\n",
|
||||
"\n",
|
||||
"The output is unfortunately wrong! Using the traced pipeline, we can see that\n",
|
||||
"\n",
|
||||
"- the context is relevant to the question and contains the correct information\n",
|
||||
"- but, the generation step is cut off as max tokens is set to 16\n",
|
||||
"\n",
|
||||
"When opening the generation step in the playground and rerunning the prompt with max. tokens set to 1000, the correct answer is produced.\n",
|
||||
"\n",
|
||||
""
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Evaluating RAG Pipelines\n",
|
||||
"\n",
|
||||
"Before we apply above's fix, let's dive into evaluating RAG pipelines.\n",
|
||||
"\n",
|
||||
"RAG pipelines consist of a retrieval step to fetch relevant information and a generation step to generate a response to a users question. A RAG pipeline can fail at either step. E.g. the retrieval step can fail to find relevant information which makes generating the correct impossible. Another failure mode is that the generation step doesn't leverage the retrieved information correctly. We will apply the following evaluation metrics to understand different failure modes:\n",
|
||||
"\n",
|
||||
"- `context_relevancy`: measures how relevant the context is given the question\n",
|
||||
"- `percent_target_supported_by_context`: measures how much of the target answer is supported by the context; this will give an upper ceiling of how well the generation step can perform\n",
|
||||
"- `answer_context_faithfulness`: measures how much the generated answer utilizes the context\n",
|
||||
"- `answer_matches_target`: measures how well the generated answer matches the target answer judged by a LLM and gives a sense of accuracy of our entire pipeline\n",
|
||||
"\n",
|
||||
"To use these evaluation metrics, we can import them from `parea.evals.rag` and `parea.evals.general` and apply them to a function by specifying in the `trace` decorator which evaluation metrics to use. The `@trace` decorator will automatically log the results of the evaluation metrics to the Parea dashboard.\n",
|
||||
"\n",
|
||||
"Applying them to the retrieval step:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from parea.evals.rag import context_query_relevancy_factory, percent_target_supported_by_context_factory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"context_relevancy_eval = context_query_relevancy_factory()\n",
|
||||
"percent_target_supported_by_context = percent_target_supported_by_context_factory()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@trace(eval_funcs=[context_relevancy_eval, percent_target_supported_by_context])\n",
|
||||
"def retrieval(question: str) -> List[str]:\n",
|
||||
" return collection.query(\n",
|
||||
" query_texts=[question],\n",
|
||||
" n_results=1\n",
|
||||
" )['documents'][0]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we can apply `answer_context_faithfulness` and `answer_matches_target` to the generation step."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from parea.evals.general import answer_matches_target_llm_grader_factory\n",
|
||||
"from parea.evals.rag import answer_context_faithfulness_statement_level_factory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"answer_context_faithfulness = answer_context_faithfulness_statement_level_factory()\n",
|
||||
"answer_matches_target_llm_grader = answer_matches_target_llm_grader_factory()\n",
|
||||
"\n",
|
||||
"@function\n",
|
||||
"def generation_sglang(s, question: str, *context: str):\n",
|
||||
" context = \"\\n\".join(context)\n",
|
||||
" s += user(f'Given this question:\\n{question}\\n\\nAnd this context:\\n{context}\\n\\nAnswer the question.')\n",
|
||||
" s += assistant(gen(\"answer\", max_tokens=1_000))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@trace(eval_funcs=[answer_context_faithfulness, answer_matches_target_llm_grader])\n",
|
||||
"def generation(question: str, *context):\n",
|
||||
" state: ProgramState = generation_sglang.run(question, *context)\n",
|
||||
" while not state.stream_executor.is_finished:\n",
|
||||
" time.sleep(1)\n",
|
||||
" return state.stream_executor.variables[\"answer\"]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Finally, we tie them together & execute the original sample query."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'The World Health Organization formally declared an end to the COVID-19 global health emergency in May 2023.'"
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"@trace\n",
|
||||
"def rag_pipeline(question: str) -> str:\n",
|
||||
" contexts = retrieval(question)\n",
|
||||
" return generation(question, *contexts)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"rag_pipeline(\"When did the World Health Organization formally declare an end to the COVID-19 global health emergency?\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Great, the answer is correct! Can you spot the line where we fixed the output truncation issue?\n",
|
||||
"\n",
|
||||
"The evaluation scores appear in the bottom right of the logs (screenshot below). Note, that there is no score for `answer_matches_target_llm_grader` and `percent_target_supported_by_context` as these evals are automatically skipped if the target answer is not provided.\n",
|
||||
"\n",
|
||||
""
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Running an experiment\n",
|
||||
"\n",
|
||||
"Now we are (almost) ready to evaluate the performance of our RAG pipeline on the entire dataset. First, we will need to apply the `nest_asyncio` package to avoid issues with the Jupyter notebook event loop."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Requirement already satisfied: nest-asyncio in /Users/joschkabraun/miniconda3/envs/sglang/lib/python3.10/site-packages (1.6.0)\r\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install nest-asyncio\n",
|
||||
"import nest_asyncio\n",
|
||||
"nest_asyncio.apply()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Running the actual experiment is straight-forward. For that we use `p.experiment` to initialize the experiment with a name, the data (list of key-value pairs fed into our entry function) and the entry function. We then call `run` on the experiment to execute it. Note, that `target` is a reserved key in the data dictionary and will be used as the target answer for evaluation."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Run name set to: sneak-weal, since a name was not provided.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 100/100 [00:27<00:00, 3.63it/s]\n",
|
||||
"Waiting for evaluations to finish: 100%|██████████| 19/19 [00:10<00:00, 1.89it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Experiment RAG Run sneak-weal stats:\n",
|
||||
"{\n",
|
||||
" \"latency\": \"2.69\",\n",
|
||||
" \"input_tokens\": \"61.26\",\n",
|
||||
" \"output_tokens\": \"75.88\",\n",
|
||||
" \"total_tokens\": \"137.14\",\n",
|
||||
" \"cost\": \"0.00\",\n",
|
||||
" \"answer_context_faithfulness_statement_level\": \"0.26\",\n",
|
||||
" \"answer_matches_target_llm_grader\": \"0.22\",\n",
|
||||
" \"context_query_relevancy\": \"0.27\",\n",
|
||||
" \"percent_target_supported_by_context\": \"0.40\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"View experiment & traces at: https://app.parea.ai/experiments/RAG/30f0244a-d56c-44ff-bdfb-8f47626304b6\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"e = p.experiment(\n",
|
||||
" 'RAG',\n",
|
||||
" data=[\n",
|
||||
" {\n",
|
||||
" \"question\": qca[\"question\"],\n",
|
||||
" \"target\": qca[\"answer\"],\n",
|
||||
" }\n",
|
||||
" for qca in question_context_answers\n",
|
||||
" ],\n",
|
||||
" func=rag_pipeline\n",
|
||||
").run()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Analyzing the results\n",
|
||||
"\n",
|
||||
"When opening above experiment, we will see an overview of the experiment as shown below. The upper half shows a summary of the statistics on the left and charts to investigate the distribution and relationships of scores on the right. The lower half is a table with the individual traces which we can use to debug individual samples.\n",
|
||||
"\n",
|
||||
"When looking at the statistics, we can see that the accuracy of our RAG pipeline is 22% as measured by `answer_matches_target_llm_grader`. Though when checking the quality of our retrieval step (`context_query_relevancy`), we can see that our retrival step is fetching relevant information in only 27% of all samples. As shown in the GIF, we investigate the relationship between the two and see the two scores have 95% agreement. This confirms that the retrieval step is a major bottleneck for our RAG pipeline. So, now it's your turn to improve the retrieval step!\n",
|
||||
"\n",
|
||||
"Note, above link isn't publicly accessible but the experiment can be accessed through [here](https://app.parea.ai/public-experiments/parea/rag_sglang/30f0244a-d56c-44ff-bdfb-8f47626304b6).\n",
|
||||
"\n",
|
||||
""
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
|
||||
python readme_examples.py
|
||||
"""
|
||||
|
||||
import sglang as sgl
|
||||
|
||||
|
||||
@sgl.function
|
||||
def tool_use(s, question):
|
||||
s += "To answer this question: " + question + ". "
|
||||
s += (
|
||||
"I need to use a "
|
||||
+ sgl.gen("tool", choices=["calculator", "search engine"])
|
||||
+ ". "
|
||||
)
|
||||
|
||||
if s["tool"] == "calculator":
|
||||
s += "The math expression is" + sgl.gen("expression")
|
||||
elif s["tool"] == "search engine":
|
||||
s += "The key word to search is" + sgl.gen("word")
|
||||
|
||||
|
||||
@sgl.function
|
||||
def tip_suggestion(s):
|
||||
s += (
|
||||
"Here are two tips for staying healthy: "
|
||||
"1. Balanced Diet. 2. Regular Exercise.\n\n"
|
||||
)
|
||||
|
||||
forks = s.fork(2)
|
||||
for i, f in enumerate(forks):
|
||||
f += f"Now, expand tip {i+1} into a paragraph:\n"
|
||||
f += sgl.gen(f"detailed_tip", max_tokens=256, stop="\n\n")
|
||||
|
||||
s += "Tip 1:" + forks[0]["detailed_tip"] + "\n"
|
||||
s += "Tip 2:" + forks[1]["detailed_tip"] + "\n"
|
||||
s += "In summary" + sgl.gen("summary")
|
||||
|
||||
|
||||
@sgl.function
|
||||
def regular_expression_gen(s):
|
||||
s += "Q: What is the IP address of the Google DNS servers?\n"
|
||||
s += "A: " + sgl.gen(
|
||||
"answer",
|
||||
temperature=0,
|
||||
regex=r"((25[0-5]|2[0-4]\d|[01]?\d\d?).){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)",
|
||||
)
|
||||
|
||||
|
||||
@sgl.function
|
||||
def text_qa(s, question):
|
||||
s += "Q: " + question + "\n"
|
||||
s += "A:" + sgl.gen("answer", stop="\n")
|
||||
|
||||
|
||||
def driver_tool_use():
|
||||
state = tool_use.run(question="What is the capital of the United States?")
|
||||
print(state.text())
|
||||
print("\n")
|
||||
|
||||
|
||||
def driver_tip_suggestion():
|
||||
state = tip_suggestion.run()
|
||||
print(state.text())
|
||||
print("\n")
|
||||
|
||||
|
||||
def driver_regex():
|
||||
state = regular_expression_gen.run()
|
||||
print(state.text())
|
||||
print("\n")
|
||||
|
||||
|
||||
def driver_batching():
|
||||
states = text_qa.run_batch(
|
||||
[
|
||||
{"question": "What is the capital of the United Kingdom?"},
|
||||
{"question": "What is the capital of France?"},
|
||||
{"question": "What is the capital of Japan?"},
|
||||
],
|
||||
progress_bar=True,
|
||||
)
|
||||
|
||||
for s in states:
|
||||
print(s.text())
|
||||
print("\n")
|
||||
|
||||
|
||||
def driver_stream():
|
||||
state = text_qa.run(
|
||||
question="What is the capital of France?", temperature=0.1, stream=True
|
||||
)
|
||||
|
||||
for out in state.text_iter():
|
||||
print(out, end="", flush=True)
|
||||
print("\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo-instruct"))
|
||||
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
|
||||
|
||||
driver_tool_use()
|
||||
driver_tip_suggestion()
|
||||
driver_regex()
|
||||
driver_batching()
|
||||
driver_stream()
|
||||
@@ -1,49 +0,0 @@
|
||||
"""
|
||||
Usage:
|
||||
python3 streaming.py
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
import sglang as sgl
|
||||
|
||||
|
||||
@sgl.function
|
||||
def multi_turn_question(s, question_1, question_2):
|
||||
s += sgl.system("You are a helpful assistant.")
|
||||
s += sgl.user(question_1)
|
||||
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
|
||||
s += sgl.user(question_2)
|
||||
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
|
||||
|
||||
|
||||
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo"))
|
||||
|
||||
|
||||
def stream_a_variable():
|
||||
state = multi_turn_question.run(
|
||||
question_1="What is the capital of the United States?",
|
||||
question_2="List two local attractions.",
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for out in state.text_iter(var_name="answer_2"):
|
||||
print(out, end="", flush=True)
|
||||
print("\n")
|
||||
|
||||
|
||||
async def async_stream():
|
||||
state = multi_turn_question.run(
|
||||
question_1="What is the capital of the United States?",
|
||||
question_2="List two local attractions.",
|
||||
stream=True,
|
||||
)
|
||||
|
||||
async for out in state.text_async_iter(var_name="answer_2"):
|
||||
print(out, end="", flush=True)
|
||||
print("\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
stream_a_variable()
|
||||
asyncio.run(async_stream())
|
||||
@@ -1,10 +0,0 @@
|
||||
FROM nvcr.io/nvidia/tritonserver:24.01-py3
|
||||
|
||||
WORKDIR /opt
|
||||
|
||||
RUN git clone https://github.com/sgl-project/sglang.git
|
||||
|
||||
WORKDIR /opt/sglang
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install -e "python[all]" && \
|
||||
pip install datasets
|
||||
@@ -1,35 +0,0 @@
|
||||
# sglang_triton
|
||||
|
||||
Build the docker image:
|
||||
```
|
||||
docker build -t sglang-triton .
|
||||
```
|
||||
|
||||
Then do:
|
||||
```
|
||||
docker run -ti --gpus=all --network=host --name sglang-triton -v ./models:/mnt/models sglang-triton
|
||||
```
|
||||
|
||||
inside the docker container:
|
||||
```
|
||||
cd sglang
|
||||
python3 -m sglang.launch_server --model-path mistralai/Mistral-7B-Instruct-v0.2 --port 30000 --mem-fraction-static 0.9
|
||||
```
|
||||
|
||||
with another shell, inside the docker container:
|
||||
```
|
||||
docker exec -ti sglang-triton /bin/bash
|
||||
cd /mnt
|
||||
tritonserver --model-repository=/mnt/models
|
||||
```
|
||||
|
||||
|
||||
Send request to the server:
|
||||
```
|
||||
curl -X POST http://localhost:8000/v2/models/character_generation/generate \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"INPUT_TEXT": ["harry"]
|
||||
}'
|
||||
|
||||
```
|
||||
@@ -1,55 +0,0 @@
|
||||
import numpy
|
||||
import triton_python_backend_utils as pb_utils
|
||||
from pydantic import BaseModel
|
||||
|
||||
import sglang as sgl
|
||||
from sglang import function, set_default_backend
|
||||
from sglang.srt.constrained import build_regex_from_object
|
||||
|
||||
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
|
||||
|
||||
|
||||
class Character(BaseModel):
|
||||
name: str
|
||||
eye_color: str
|
||||
house: str
|
||||
|
||||
|
||||
@function
|
||||
def character_gen(s, name):
|
||||
s += (
|
||||
name
|
||||
+ " is a character in Harry Potter. Please fill in the following information about this character.\n"
|
||||
)
|
||||
s += sgl.gen(
|
||||
"json_output", max_tokens=256, regex=build_regex_from_object(Character)
|
||||
)
|
||||
|
||||
|
||||
class TritonPythonModel:
|
||||
def initialize(self, args):
|
||||
print("Initialized.")
|
||||
|
||||
def execute(self, requests):
|
||||
responses = []
|
||||
for request in requests:
|
||||
tensor_in = pb_utils.get_input_tensor_by_name(request, "INPUT_TEXT")
|
||||
if tensor_in is None:
|
||||
return pb_utils.InferenceResponse(output_tensors=[])
|
||||
|
||||
input_list_names = [
|
||||
i.decode("utf-8") if isinstance(i, bytes) else i
|
||||
for i in tensor_in.as_numpy().tolist()
|
||||
]
|
||||
|
||||
input_list_dicts = [{"name": i} for i in input_list_names]
|
||||
|
||||
states = character_gen.run_batch(input_list_dicts)
|
||||
character_strs = [state.text() for state in states]
|
||||
|
||||
tensor_out = pb_utils.Tensor(
|
||||
"OUTPUT_TEXT", numpy.array(character_strs, dtype=object)
|
||||
)
|
||||
|
||||
responses.append(pb_utils.InferenceResponse(output_tensors=[tensor_out]))
|
||||
return responses
|
||||
@@ -1,23 +0,0 @@
|
||||
name: "character_generation"
|
||||
backend: "python"
|
||||
input [
|
||||
{
|
||||
name: "INPUT_TEXT"
|
||||
data_type: TYPE_STRING
|
||||
dims: [ -1 ]
|
||||
}
|
||||
]
|
||||
output [
|
||||
{
|
||||
name: "OUTPUT_TEXT"
|
||||
data_type: TYPE_STRING
|
||||
dims: [ -1 ]
|
||||
}
|
||||
]
|
||||
instance_group [
|
||||
{
|
||||
count: 1
|
||||
kind: KIND_GPU
|
||||
gpus: [ 0 ]
|
||||
}
|
||||
]
|
||||
Reference in New Issue
Block a user