Improve Readme (#10)

This commit is contained in:
Lianmin Zheng
2024-01-15 21:37:11 -08:00
parent 70359bf31a
commit 46b7ea7c85
5 changed files with 167 additions and 7 deletions

View File

@@ -0,0 +1,79 @@
import sglang as sgl
@sgl.function
def tool_use(s, question):
s += "To answer this question: " + question + ", "
s += "I need to use a " + sgl.gen("tool", choices=["calculator", "web browser"]) + ". "
if s["tool"] == "calculator":
s += "The math expression is" + sgl.gen("expression")
elif s["tool"] == "web browser":
s += "The website url is" + sgl.gen("url")
@sgl.function
def tip_suggestion(s):
s += (
"Here are two tips for staying healthy: "
"1. Balanced Diet. 2. Regular Exercise.\n\n"
)
forks = s.fork(2)
for i, f in enumerate(forks):
f += f"Now, expand tip {i+1} into a paragraph:\n"
f += sgl.gen(f"detailed_tip", max_tokens=256, stop="\n\n")
s += "Tip 1:" + forks[0]["detailed_tip"] + "\n"
s += "Tip 2:" + forks[1]["detailed_tip"] + "\n"
s += "In summary" + sgl.gen("summary")
@sgl.function
def text_qa(s, question):
s += "Q: " + question + "\n"
s += "A:" + sgl.gen("answer", stop="\n")
def driver_tool_use():
state = tool_use.run(question="What is the capital of the United States?")
print(state.text())
print("\n")
def driver_tip_suggestion():
state = tip_suggestion.run()
print(state.text())
print("\n")
def driver_batching():
states = text_qa.run_batch(
[
{"question": "What is the capital of the United Kingdom?"},
{"question": "What is the capital of France?"},
{"question": "What is the capital of Japan?"},
],
)
for s in states:
print(s.text())
print("\n")
def driver_stream():
state = text_qa.run(
question="What is the capital of France?",
temperature=0.1)
for out in state.text_iter():
print(out, end="", flush=True)
print("\n")
if __name__ == "__main__":
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo-instruct"))
driver_tool_use()
driver_tip_suggestion()
driver_batching()
driver_stream()

View File

@@ -0,0 +1,43 @@
import asyncio
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo"))
def stream_a_variable():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True
)
for out in state.text_iter(var_name="answer_2"):
print(out, end="", flush=True)
print("\n")
async def async_stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True
)
async for out in state.text_async_iter(var_name="answer_2"):
print(out, end="", flush=True)
print("\n")
if __name__ == "__main__":
stream_a_variable()
asyncio.run(async_stream())