adapt to sglang v0.5.2rc1 on dcu

This commit is contained in:
maxiao
2025-09-04 15:56:33 +08:00
commit 909abb58f5
2320 changed files with 489411 additions and 0 deletions

View File

@@ -0,0 +1,92 @@
{% if not add_generation_prompt is defined %}
{% set add_generation_prompt = false %}
{% endif %}
{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true, is_last_user=false) %}
{%- for message in messages %}
{%- if message['role'] == 'system' %}
{%- if ns.is_first_sp %}
{% set ns.system_prompt = ns.system_prompt + message['content'] %}
{% set ns.is_first_sp = false %}
{%- else %}
{% set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %}
{%- endif %}
{%- endif %}
{%- endfor %}
{# --- Append tool descriptions if tools are defined --- #}
{% if tools is defined and tools is not none %}
{% set tool_ns = namespace(text='You are a helpful assistant with tool calling capabilities. '
'When a tool call is needed, you MUST use the following format to issue the call:\n'
'<tool▁calls▁begin><tool▁call▁begin>function<tool▁sep>FUNCTION_NAME\n'
'```json\n{"param1": "value1", "param2": "value2"}\n```<tool▁call▁end><tool▁calls▁end>\n\n'
'Make sure the JSON is valid.'
'## Tools\n\n### Function\n\nYou have the following functions available:\n\n') %}
{% for tool in tools %}
{% set tool_ns.text = tool_ns.text + '- `' + tool['name'] + '`:\n```json\n' + (tool | tojson) + '\n```\n' %}
{% endfor %}
{% set ns.system_prompt = ns.system_prompt + '\n\n' + tool_ns.text %}
{% endif %}
{{ bos_token }}
{{ ns.system_prompt }}
{%- for message in messages %}
{% set content = message['content'] %}
{%- if message['role'] == 'user' %}
{%- set ns.is_tool = false -%}
{%- set ns.is_first = false -%}
{%- set ns.is_last_user = true -%}
{{'<User>' + content + '<Assistant>'}}
{%- endif %}
{%- if message['role'] == 'assistant' %}
{% if '</think>' in content %}
{% set content = content.split('</think>')[-1] %}
{% endif %}
{% endif %}
{%- if message['role'] == 'assistant' and message['tool_calls'] is defined and message['tool_calls'] is not none %}
{%- set ns.is_last_user = false -%}
{%- if ns.is_tool %}
{{'<tool▁outputs▁end>'}}
{%- endif %}
{%- set ns.is_first = false %}
{%- set ns.is_tool = false -%}
{%- set ns.is_output_first = true %}
{%- for tool in message['tool_calls'] %}
{%- if not ns.is_first %}
{%- if content is none %}
{{'<tool▁calls▁begin><tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<tool▁call▁end>'}}
{%- else %}
{{content + '<tool▁calls▁begin><tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<tool▁call▁end>'}}
{%- endif %}
{%- set ns.is_first = true -%}
{%- else %}
{{'\n' + '<tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<tool▁call▁end>'}}
{%- endif %}
{%- endfor %}
{{'<tool▁calls▁end><end▁of▁sentence>'}}
{%- endif %}
{%- if message['role'] == 'assistant' and (message['tool_calls'] is not defined or message['tool_calls'] is none)%}
{%- set ns.is_last_user = false -%}
{%- if ns.is_tool %}
{{'<tool▁outputs▁end>' + content + '<end▁of▁sentence>'}}
{%- set ns.is_tool = false -%}
{%- else %}
{{content + '<end▁of▁sentence>'}}
{%- endif %}
{%- endif %}
{%- if message['role'] == 'tool' %}
{%- set ns.is_last_user = false -%}
{%- set ns.is_tool = true -%}
{%- if ns.is_output_first %}
{{'<tool▁outputs▁begin><tool▁output▁begin>' + content + '<tool▁output▁end>'}}
{%- set ns.is_output_first = false %}
{%- else %}
{{'\n<tool▁output▁begin>' + content + '<tool▁output▁end>'}}
{%- endif %}
{%- endif %}
{%- endfor -%}
{% if ns.is_tool %}
{{'<tool▁outputs▁end>'}}
{% endif %}
{% if add_generation_prompt and not ns.is_last_user and not ns.is_tool %}
{{'<Assistant>'}}
{% endif %}

View File

@@ -0,0 +1,91 @@
{% if not add_generation_prompt is defined %}
{% set add_generation_prompt = false %}
{% endif %}
{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true, is_last_user=false) %}
{%- for message in messages %}
{%- if message['role'] == 'system' %}
{%- if ns.is_first_sp %}
{% set ns.system_prompt = ns.system_prompt + message['content'] %}
{% set ns.is_first_sp = false %}
{%- else %}
{% set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %}
{%- endif %}
{%- endif %}
{%- endfor -%}
{# --- Append tool descriptions if tools are defined --- #}
{% if tools is defined and tools is not none %}
{% set tool_ns = namespace(text='You are a helpful assistant with tool calling capabilities. '
'When a tool call is needed, you MUST use the following format to issue the call:\n'
'<tool▁calls▁begin><tool▁call▁begin>function<tool▁sep>FUNCTION_NAME\n'
'```json\n{"param1": "value1", "param2": "value2"}\n```<tool▁call▁end><tool▁calls▁end>\n\n'
'Make sure the JSON is valid.'
'## Tools\n\n### Function\n\nYou have the following functions available:\n\n') %}
{% for tool in tools %}
{% set tool_ns.text = tool_ns.text + '\n```json\n' + (tool | tojson) + '\n```\n' %}
{% endfor %}
{% set ns.system_prompt = ns.system_prompt + '\n\n' + tool_ns.text %}
{% endif %}
{{- bos_token }}
{{- ns.system_prompt }}
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{%- set ns.is_tool = false -%}
{%- set ns.is_first = false -%}
{%- set ns.is_last_user = true -%}
{{'<User>' + message['content'] + '<Assistant>'}}
{%- endif %}
{%- if message['role'] == 'assistant' and message['tool_calls'] is defined and message['tool_calls'] is not none %}
{%- set ns.is_last_user = false -%}
{%- if ns.is_tool %}
{{- '<tool▁outputs▁end>'}}
{%- endif %}
{%- set ns.is_first = false %}
{%- set ns.is_tool = false -%}
{%- set ns.is_output_first = true %}
{%- for tool in message['tool_calls'] %}
{%- if not ns.is_first %}
{%- if message['content'] is none %}
{{- '<tool▁calls▁begin><tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments']|tojson + '\n' + '```' + '<tool▁call▁end>'}}
{%- else %}
{{- message['content'] + '<tool▁calls▁begin><tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments']|tojson + '\n' + '```' + '<tool▁call▁end>'}}
{%- endif %}
{%- set ns.is_first = true -%}
{%- else %}
{{- '\n' + '<tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<tool▁call▁end>'}}
{%- endif %}
{%- endfor %}
{{- '<tool▁calls▁end><end▁of▁sentence>'}}
{%- endif %}
{%- if message['role'] == 'assistant' and (message['tool_calls'] is not defined or message['tool_calls'] is none)%}
{%- set ns.is_last_user = false -%}
{%- if ns.is_tool %}
{{- '<tool▁outputs▁end>' + message['content'] + '<end▁of▁sentence>'}}
{%- set ns.is_tool = false -%}
{%- else %}
{% set content = message['content'] %}
{{- content + '<end▁of▁sentence>'}}
{%- endif %}
{%- endif %}
{%- if message['role'] == 'tool' %}
{%- set ns.is_last_user = false -%}
{%- set ns.is_tool = true -%}
{%- if ns.is_output_first %}
{{- 'Use the results below to formulate an answer to the user question unless additional information is needed.' }}
{{- '<tool▁outputs▁begin><tool▁output▁begin>' + message['content'] + '<tool▁output▁end>'}}
{%- set ns.is_output_first = false %}
{%- else %}
{{- '\n<tool▁output▁begin>' + message['content'] + '<tool▁output▁end>'}}
{%- endif %}
{%- endif %}
{%- endfor -%}
{% if ns.is_tool %}
{{- '<tool▁outputs▁end>'}}
{% endif %}
{% if add_generation_prompt and not ns.is_last_user and not ns.is_tool %}
{{- '<Assistant>'}}
{% endif %}

View File

@@ -0,0 +1,91 @@
{% if not add_generation_prompt is defined %}
{% set add_generation_prompt = false %}
{% endif %}
{% if not thinking is defined %}
{% set thinking = false %}
{% endif %}
{% set ns = namespace(is_first=false, is_tool=false, system_prompt='', is_first_sp=true, is_last_user=false) %}
{%- for message in messages %}
{%- if message['role'] == 'system' %}
{%- if ns.is_first_sp %}
{% set ns.system_prompt = ns.system_prompt + message['content'] %}
{% set ns.is_first_sp = false %}
{%- else %}
{% set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %}
{%- endif %}
{%- endif %}
{%- endfor %}
{% if tools is defined and tools is not none %}
{% set tool_ns = namespace(text='## Tools\nYou have access to the following tools:\n') %}
{% for tool in tools %}
{% set tool_ns.text = tool_ns.text + '\n### ' + tool.function.name + '\nDescription: ' + tool.function.description + '\n\nParameters: ' + (tool.function.parameters | tojson) + '\n' %}
{% endfor %}
{% set tool_ns.text = tool_ns.text + "\nIMPORTANT: ALWAYS adhere to this exact format for tool use:\n<tool▁calls▁begin><tool▁call▁begin>tool_call_name<tool▁sep>tool_call_arguments<tool▁call▁end>{{additional_tool_calls}}<tool▁calls▁end>\n\nWhere:\n\n- `tool_call_name` must be an exact match to one of the available tools\n- `tool_call_arguments` must be valid JSON that strictly follows the tool's Parameters Schema\n- For multiple tool calls, chain them directly without separators or spaces\n" %}
{% set ns.system_prompt = ns.system_prompt + '\n\n' + tool_ns.text %}
{% endif %}
{{ bos_token }}{{ ns.system_prompt }}
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{%- set ns.is_tool = false -%}
{%- set ns.is_first = false -%}
{%- set ns.is_last_user = true -%}
{{'<User>' + message['content']}}
{%- endif %}
{%- if message['role'] == 'assistant' and message['tool_calls'] is defined and message['tool_calls'] is not none %}
{%- if ns.is_last_user %}
{{'<Assistant></think>'}}
{%- endif %}
{%- set ns.is_last_user = false -%}
{%- set ns.is_first = false %}
{%- set ns.is_tool = false -%}
{%- for tool in message['tool_calls'] %}
{%- if not ns.is_first %}
{%- if message['content'] is none %}
{{'<tool▁calls▁begin><tool▁call▁begin>'+ tool['function']['name'] + '<tool▁sep>' + tool['function']['arguments']|tojson + '<tool▁call▁end>'}}
{%- else %}
{{message['content'] + '<tool▁calls▁begin><tool▁call▁begin>' + tool['function']['name'] + '<tool▁sep>' + tool['function']['arguments']|tojson + '<tool▁call▁end>'}}
{%- endif %}
{%- set ns.is_first = true -%}
{%- else %}
{{'<tool▁call▁begin>'+ tool['function']['name'] + '<tool▁sep>' + tool['function']['arguments']|tojson + '<tool▁call▁end>'}}
{%- endif %}
{%- endfor %}
{{'<tool▁calls▁end><end▁of▁sentence>'}}
{%- endif %}
{%- if message['role'] == 'assistant' and (message['tool_calls'] is not defined or message['tool_calls'] is none) %}
{%- if ns.is_last_user %}
{{'<Assistant>'}}
{%- if message['prefix'] is defined and message['prefix'] and thinking %}
{{'<think>'}}
{%- else %}
{{'</think>'}}
{%- endif %}
{%- endif %}
{%- set ns.is_last_user = false -%}
{%- if ns.is_tool %}
{{message['content'] + '<end▁of▁sentence>'}}
{%- set ns.is_tool = false -%}
{%- else %}
{%- set content = message['content'] -%}
{%- if '</think>' in content %}
{%- set content = content.split('</think>', 1)[1] -%}
{%- endif %}
{{content + '<end▁of▁sentence>'}}
{%- endif %}
{%- endif %}
{%- if message['role'] == 'tool' %}
{%- set ns.is_last_user = false -%}
{%- set ns.is_tool = true -%}
{{'<tool▁output▁begin>' + message['content'] + '<tool▁output▁end>'}}
{%- endif %}
{%- endfor -%}
{%- if add_generation_prompt and ns.is_last_user and not ns.is_tool %}
{{'<Assistant>'}}
{%- if not thinking %}
{{'</think>'}}
{%- else %}
{{'<think>'}}
{%- endif %}
{% endif %}

View File

@@ -0,0 +1,112 @@
{# Copied from https://github.com/wukaixingxp/vllm/blob/8a32e2a6e452a03c0e8222e3876ad6086cbf581f/examples/tool_chat_template_llama4_pythonic.jinja to enable better model response. #}
{{- bos_token }}
{%- if custom_tools is defined and custom_tools %}
{%- set tools = custom_tools %}
{%- endif %}
{%- if tools is defined and tools %}
{%- set tool_definition = tool_definition ~ (tools | tojson(indent=4)) %}
{%- else %}
{%- set tools = none %}
{%- endif %}
{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- set user_provided_system_message = true %}
{%- if messages[0]['content'] is string %}
{%- set system_message = messages[0]['content']|trim %}
{%- else %}
{%- set system_message = messages[0]['content'][0]['text']|trim %}
{%- endif %}
{%- set messages = messages[1:] %}
{%- else %}
{%- if tools is not none %}
{#- Since not system_message was provided by user, if tool is provided, system_message is now default tool system message #}
{#- This system message is from llama website:https://www.llama.com/docs/model-cards-and-prompt-formats/llama4/ #}
{%- set system_message = "You are a helpful assistant and an expert in function composition. You can answer general questions using your internal knowledge OR invoke functions when necessary. Follow these strict guidelines:\n\n1. FUNCTION CALLS:\n- ONLY use functions that are EXPLICITLY listed in the function list below\n- If NO functions are listed (empty function list []), respond ONLY with internal knowledge or \"I don't have access to [Unavailable service] information\"\n- If a function is not in the list, respond ONLY with internal knowledge or \"I don't have access to [Unavailable service] information\"\n- If ALL required parameters are present AND the query EXACTLY matches a listed function's purpose: output ONLY the function call(s)\n- Use exact format: [func_name1(param1=value1, param2=value2), func_name2(...)]\nExamples:\nCORRECT: [get_weather(location=\"Vancouver\"), calculate_route(start=\"Boston\", end=\"New York\")] <- Only if get_weather and calculate_route are in function list\nINCORRECT: get_weather(location=\"New York\")\nINCORRECT: Let me check the weather: [get_weather(location=\"New York\")]\nINCORRECT: [get_events(location=\"Singapore\")] <- If function not in list\n\n2. RESPONSE RULES:\n- For pure function requests matching a listed function: ONLY output the function call(s)\n- For knowledge questions: ONLY output text\n- For missing parameters: ONLY request the specific missing parameters\n- For unavailable services (not in function list): output ONLY with internal knowledge or \"I don't have access to [Unavailable service] information\". Do NOT execute a function call.\n- If the query asks for information beyond what a listed function provides: output ONLY with internal knowledge about your limitations\n- NEVER combine text and function calls in the same response\n- NEVER suggest alternative functions when the requested service is unavailable\n- NEVER create or invent new functions not listed below\n\n3. STRICT BOUNDARIES:\n- ONLY use functions from the list below - no exceptions\n- NEVER use a function as an alternative to unavailable information\n- NEVER call functions not present in the function list\n- NEVER add explanatory text to function calls\n- NEVER respond with empty brackets\n- Use proper Python/JSON syntax for function calls\n- Check the function list carefully before responding\n\n4. TOOL RESPONSE HANDLING:\n- When receiving tool responses: provide concise, natural language responses\n- Don't repeat tool response verbatim\n- Don't add supplementary information\n\nHere is a list of functions in JSON format that you can invoke:\n" %}
{%- else %}
{%- set system_message = "" %}
{%- endif %}
{%- endif %}
{#- Now writing the system message: use the user provided system message if user_provided_system_message, else default tool system message if tools presented #}
{%- if system_message %}
{#- always use user provided system message to override default tool system message #}
{{- "<|header_start|>system<|header_end|>\n\n" }}
{{- system_message }}
{%- if user_provided_system_message and tools %}
{{- "\nHere is a list of functions in JSON format that you can invoke. Use exact format: [func_name1(param1=value1, param2=value2), func_name2(...)]\n" }}
{{- tool_definition -}}
{%- elif tool_definition %}
{{- tool_definition -}}
{%- endif %}
{{- "<|eot|>" }}
{%- endif %}
{#- Now deal with all other messages #}
{%- for message in messages %}
{#- Base case: messages that are not from tool role and has empty tool_call list #}
{%- if not (message.role == 'ipython' or message.role == 'tool' or ('tool_calls' in message and message.tool_calls|length != 0 )) %}
{{- '<|header_start|>' + message['role'] + '<|header_end|>\n\n' }}
{%- if message['content'] is string %}
{{- message['content'] }}
{%- else %}
{%- for content in message['content'] %}
{%- if content['type'] == 'image' %}
{{- '<|image|>' }}
{%- elif content['type'] == 'text' %}
{{- content['text'] | trim }}
{%- endif %}
{%- endfor %}
{%- endif %}
{{- "<|eot|>" }}
{#- Tool case: messages has non-empty tool_call list, must from assistant #}
{%- elif 'tool_calls' in message %}
{#- assume tool_calls are always coming from assistant #}
{%- if message.role == 'assistant' %}
{{- '<|header_start|>assistant<|header_end|>\n\n' -}}
{%- if message['content'] is string %}
{{- message['content'] }}
{%- else %}
{%- for content in message['content'] %}
{%- if content['type'] == 'image' %}
{{- '<|image|>' }}
{%- elif content['type'] == 'text' %}
{{- content['text'] }}
{%- endif %}
{%- endfor %}
{%- endif %}
{{- "[" }}
{%- for tool_call in message.tool_calls %}
{%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- tool_call.name + '(' -}}
{%- for param in tool_call.arguments %}
{{- param + '="' -}}
{{- "%s" | format(tool_call.arguments[param]) -}}
{{- '"' -}}
{% if not loop.last %}, {% endif %}
{%- endfor %}
{{- ')' -}}
{% if not loop.last %}, {% endif %}
{%- endfor %}
{{- "]<|eot|>" }}
{%- endif %}
{#- Tool_response case: messages are from tool_response #}
{%- elif message.role == "tool" or message.role == "ipython" %}
{{- "<|header_start|>ipython<|header_end|>\n\n" }}
{%- if message.content is string %}
{{- message.content | tojson }}
{%- else %}
{%- for content in message['content'] %}
{%- if content['type'] == 'text' %}
{{- content['text'] | tojson }}
{%- endif %}
{%- endfor %}
{%- endif %}
{{- "<|eot|>" }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|header_start|>assistant<|header_end|>\n\n' }}
{%- endif %}

View File

@@ -0,0 +1,73 @@
"""
Usage:
export ANTHROPIC_API_KEY=sk-******
python3 anthropic_example_chat.py
"""
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
print()
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
sgl.set_default_backend(sgl.Anthropic("claude-3-haiku-20240307"))
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,68 @@
"""
Usage:
export ANTHROPIC_API_KEY=sk-******
python3 anthropic_example_complete.py
"""
import sglang as sgl
@sgl.function
def few_shot_qa(s, question):
s += """
\n\nHuman: What is the capital of France?
\n\nAssistant: Paris
\n\nHuman: What is the capital of Germany?
\n\nAssistant: Berlin
\n\nHuman: What is the capital of Italy?
\n\nAssistant: Rome
"""
s += "\n\nHuman: " + question + "\n"
s += "\n\nAssistant:" + sgl.gen("answer", temperature=0)
def single():
state = few_shot_qa.run(question="What is the capital of the United States?")
answer = state["answer"].strip().lower()
assert "washington" in answer, f"answer: {state['answer']}"
print(state.text())
def stream():
state = few_shot_qa.run(
question="What is the capital of the United States?", stream=True
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = few_shot_qa.run_batch(
[
{"question": "What is the capital of the United States?"},
{"question": "What is the capital of China?"},
]
)
for s in states:
print(s["answer"])
if __name__ == "__main__":
sgl.set_default_backend(sgl.Anthropic("claude-3-haiku-20240307"))
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,83 @@
"""
Usage:
export AZURE_OPENAI_API_KEY=sk-******
python3 openai_example_chat.py
"""
import os
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
print()
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
backend = sgl.OpenAI(
model_name="azure-gpt-4",
api_version="2023-07-01-preview",
azure_endpoint="https://oai-arena-sweden.openai.azure.com/",
api_key=os.environ["AZURE_OPENAI_API_KEY"],
is_azure=True,
)
sgl.set_default_backend(backend)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,73 @@
"""
Usage:
export GCP_PROJECT_ID=******
python3 gemini_example_chat.py
"""
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
print()
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
sgl.set_default_backend(sgl.VertexAI("gemini-pro"))
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,68 @@
"""
Usage:
export GCP_PROJECT_ID=******
python3 gemini_example_complete.py
"""
import sglang as sgl
@sgl.function
def few_shot_qa(s, question):
s += """The following are questions with answers.
Q: What is the capital of France?
A: Paris
Q: What is the capital of Germany?
A: Berlin
Q: What is the capital of Italy?
A: Rome
"""
s += "Q: " + question + "\n"
s += "A:" + sgl.gen("answer", stop="\n", temperature=0)
def single():
state = few_shot_qa.run(question="What is the capital of the United States?")
answer = state["answer"].strip().lower()
assert "washington" in answer, f"answer: {state['answer']}"
print(state.text())
def stream():
state = few_shot_qa.run(
question="What is the capital of the United States?", stream=True
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = few_shot_qa.run_batch(
[
{"question": "What is the capital of the United States?"},
{"question": "What is the capital of China?"},
]
)
for s in states:
print(s["answer"])
if __name__ == "__main__":
sgl.set_default_backend(sgl.VertexAI("gemini-pro"))
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,30 @@
"""
Usage:
export GCP_PROJECT_ID=******
python3 gemini_example_multimodal_chat.py
"""
import sglang as sgl
@sgl.function
def image_qa(s, image_file1, image_file2, question):
s += sgl.user(sgl.image(image_file1) + sgl.image(image_file2) + question)
s += sgl.assistant(sgl.gen("answer", max_tokens=256))
if __name__ == "__main__":
sgl.set_default_backend(sgl.VertexAI("gemini-pro-vision"))
state = image_qa.run(
image_file1="./images/cat.jpeg",
image_file2="./images/dog.jpeg",
question="Describe difference of the two images in one sentence.",
stream=True,
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
print(state["answer"])

Binary file not shown.

After

Width:  |  Height:  |  Size: 337 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 407 KiB

View File

@@ -0,0 +1,75 @@
"""
Usage:
python3 local_example_chat.py
"""
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
print()
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
runtime = sgl.Runtime(model_path="meta-llama/Llama-2-7b-chat-hf")
sgl.set_default_backend(runtime)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()
runtime.shutdown()

View File

@@ -0,0 +1,70 @@
"""
Usage:
python3 local_example_complete.py
"""
import sglang as sgl
@sgl.function
def few_shot_qa(s, question):
s += """The following are questions with answers.
Q: What is the capital of France?
A: Paris
Q: What is the capital of Germany?
A: Berlin
Q: What is the capital of Italy?
A: Rome
"""
s += "Q: " + question + "\n"
s += "A:" + sgl.gen("answer", stop="\n", temperature=0)
def single():
state = few_shot_qa.run(question="What is the capital of the United States?")
answer = state["answer"].strip().lower()
assert "washington" in answer, f"answer: {state['answer']}"
print(state.text())
def stream():
state = few_shot_qa.run(
question="What is the capital of the United States?", stream=True
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = few_shot_qa.run_batch(
[
{"question": "What is the capital of the United States?"},
{"question": "What is the capital of China?"},
]
)
for s in states:
print(s["answer"])
if __name__ == "__main__":
runtime = sgl.Runtime(model_path="meta-llama/Llama-2-7b-chat-hf")
sgl.set_default_backend(runtime)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()
runtime.shutdown()

View File

@@ -0,0 +1,78 @@
"""
Usage: python3 local_example_llava_next.py
"""
import sglang as sgl
from sglang.lang.chat_template import get_chat_template
@sgl.function
def image_qa(s, image_path, question):
s += sgl.user(sgl.image(image_path) + question)
s += sgl.assistant(sgl.gen("answer"))
def single():
state = image_qa.run(
image_path="images/cat.jpeg", question="What is this?", max_new_tokens=128
)
print(state["answer"], "\n")
def stream():
state = image_qa.run(
image_path="images/cat.jpeg",
question="What is this?",
max_new_tokens=64,
stream=True,
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = image_qa.run_batch(
[
{"image_path": "images/cat.jpeg", "question": "What is this?"},
{"image_path": "images/dog.jpeg", "question": "What is this?"},
],
max_new_tokens=128,
)
for s in states:
print(s["answer"], "\n")
if __name__ == "__main__":
import multiprocessing as mp
mp.set_start_method("spawn", force=True)
runtime = sgl.Runtime(model_path="lmms-lab/llama3-llava-next-8b")
runtime.endpoint.chat_template = get_chat_template("llama-3-instruct-llava")
# Or you can use the 72B model
# runtime = sgl.Runtime(model_path="lmms-lab/llava-next-72b", tp_size=8)
# runtime.endpoint.chat_template = get_chat_template("chatml-llava")
sgl.set_default_backend(runtime)
print(f"chat template: {runtime.endpoint.chat_template.name}")
# Or you can use API models
# sgl.set_default_backend(sgl.OpenAI("gpt-4-vision-preview"))
# sgl.set_default_backend(sgl.VertexAI("gemini-pro-vision"))
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()
runtime.shutdown()

View File

@@ -0,0 +1,74 @@
"""
Usage:
export OPENAI_API_KEY=sk-******
python3 openai_example_chat.py
"""
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
print()
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo"))
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,68 @@
"""
Usage:
export OPENAI_API_KEY=sk-******
python3 openai_example_complete.py
"""
import sglang as sgl
@sgl.function
def few_shot_qa(s, question):
s += """The following are questions with answers.
Q: What is the capital of France?
A: Paris
Q: What is the capital of Germany?
A: Berlin
Q: What is the capital of Italy?
A: Rome
"""
s += "Q: " + question + "\n"
s += "A:" + sgl.gen("answer", stop="\n", temperature=0)
def single():
state = few_shot_qa.run(question="What is the capital of the United States?")
answer = state["answer"].strip().lower()
assert "washington" in answer, f"answer: {state['answer']}"
print(state.text())
def stream():
state = few_shot_qa.run(
question="What is the capital of the United States?", stream=True
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = few_shot_qa.run_batch(
[
{"question": "What is the capital of the United States?"},
{"question": "What is the capital of China?"},
]
)
for s in states:
print(s["answer"])
if __name__ == "__main__":
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo-instruct"))
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,71 @@
"""
Usage:
export OPENAI_API_KEY=sk-******
python3 openai_example_chat.py
"""
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=1024, n=2))
s += sgl.user(question_2)
s += sgl.assistant(
sgl.gen(
"answer_2",
max_tokens=1024,
)
)
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
print("\n-- answer_2 --\n", state["answer_2"])
assert isinstance(state["answer_1"], list)
assert len(state["answer_1"]) == 2
assert isinstance(state["answer_2"], str)
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
print("\n-- answer_1 --\n", s["answer_1"])
print("\n-- answer_2 --\n", s["answer_2"])
assert isinstance(s["answer_1"], list)
assert len(s["answer_1"]) == 2
assert isinstance(s["answer_2"], str)
if __name__ == "__main__":
sgl.set_default_backend(sgl.OpenAI("o1"))
# Run a single request
print("\n========== single ==========\n")
single()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,57 @@
"""
Usage:
export OPENAI_API_KEY=sk-******
python3 openai_example_chat.py
"""
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=100))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2"))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
sgl.set_default_backend(sgl.OpenAI("o1"))
# Run a single request
print("\n========== single ==========\n")
single()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,81 @@
"""
Usage:
export OPENROUTER_API_KEY=sk-******
python3 together_example_chat.py
"""
import os
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
print()
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
backend = sgl.OpenAI(
model_name="google/gemma-7b-it:free",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ.get("OPENROUTER_API_KEY"),
)
sgl.set_default_backend(backend)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,81 @@
"""
Usage:
export TOGETHER_API_KEY=sk-******
python3 together_example_chat.py
"""
import os
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
print()
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
backend = sgl.OpenAI(
model_name="mistralai/Mixtral-8x7B-Instruct-v0.1",
base_url="https://api.together.xyz/v1",
api_key=os.environ.get("TOGETHER_API_KEY"),
)
sgl.set_default_backend(backend)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,76 @@
"""
Usage:
export TOGETHER_API_KEY=sk-******
python3 together_example_complete.py
"""
import os
import sglang as sgl
@sgl.function
def few_shot_qa(s, question):
s += """The following are questions with answers.
Q: What is the capital of France?
A: Paris
Q: What is the capital of Germany?
A: Berlin
Q: What is the capital of Italy?
A: Rome
"""
s += "Q: " + question + "\n"
s += "A:" + sgl.gen("answer", stop="\n", temperature=0)
def single():
state = few_shot_qa.run(question="What is the capital of the United States?")
answer = state["answer"].strip().lower()
assert "washington" in answer, f"answer: {state['answer']}"
print(state.text())
def stream():
state = few_shot_qa.run(
question="What is the capital of the United States?", stream=True
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = few_shot_qa.run_batch(
[
{"question": "What is the capital of the United States?"},
{"question": "What is the capital of China?"},
]
)
for s in states:
print(s["answer"])
if __name__ == "__main__":
backend = sgl.OpenAI(
model_name="mistralai/Mixtral-8x7B-Instruct-v0.1",
is_chat_model=False,
base_url="https://api.together.xyz/v1",
api_key=os.environ.get("TOGETHER_API_KEY"),
)
sgl.set_default_backend(backend)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()

View File

@@ -0,0 +1,53 @@
import sglang as sgl
character_regex = (
r"""\{\n"""
+ r""" "姓名": "[^"]{1,32}",\n"""
+ r""" "学院": "(格兰芬多|赫奇帕奇|拉文克劳|斯莱特林)",\n"""
+ r""" "血型": "(纯血|混血|麻瓜)",\n"""
+ r""" "职业": "(学生|教师|傲罗|魔法部|食死徒|凤凰社成员)",\n"""
+ r""" "魔杖": \{\n"""
+ r""" "材质": "[^"]{1,32}",\n"""
+ r""" "杖芯": "[^"]{1,32}",\n"""
+ r""" "长度": [0-9]{1,2}\.[0-9]{0,2}\n"""
+ r""" \},\n"""
+ r""" "存活": "(存活|死亡)",\n"""
+ r""" "守护神": "[^"]{1,32}",\n"""
+ r""" "博格特": "[^"]{1,32}"\n"""
+ r"""\}"""
)
@sgl.function
def character_gen(s, name):
s += name + " 是一名哈利波特系列小说中的角色。请填写以下关于这个角色的信息。"
s += """\
这是一个例子
{
"姓名": "哈利波特",
"学院": "格兰芬多",
"血型": "混血",
"职业": "学生",
"魔杖": {
"材质": "冬青木",
"杖芯": "凤凰尾羽",
"长度": 11.0
},
"存活": "存活",
"守护神": "麋鹿",
"博格特": "摄魂怪"
}
"""
s += f"现在请你填写{name}的信息:\n"
s += sgl.gen("json_output", max_tokens=256, regex=character_regex)
def main():
backend = sgl.RuntimeEndpoint("http://localhost:30000")
sgl.set_default_backend(backend)
ret = character_gen.run(name="赫敏格兰杰", temperature=0)
print(ret.text())
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,44 @@
"""
Usage:
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
python choices_logprob.py
"""
import sglang as sgl
@sgl.function
def tool_use(s, question):
s += "To answer this question: " + question + ", "
s += "I need to use a " + sgl.gen("tool", choices=["calculator", "search engine"])
def main():
# Run one case
question = "What is 5 + 5?"
state = tool_use.run(question)
print("questions:", question)
print("choice:", state["tool"])
meta_info = state.get_meta_info("tool")
print("logprobs of choice 1", meta_info["input_token_logprobs"][0])
print("logprobs of choice 2", meta_info["input_token_logprobs"][1])
print("-" * 50)
# Run a batch
questions = [
"What is 5 + 6?",
"Who is Michael Jordan?",
]
states = tool_use.run_batch([{"question": q} for q in questions])
for question, state in zip(questions, states):
print("questions:", question)
print("choice:", state["tool"])
meta_info = state.get_meta_info("tool")
print("logprobs of choice 1", meta_info["input_token_logprobs"][0])
print("logprobs of choice 2", meta_info["input_token_logprobs"][1])
print("-" * 50)
if __name__ == "__main__":
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
main()

View File

@@ -0,0 +1,115 @@
from math import exp
from pprint import pformat
import sglang as sgl
YELLOW = "\033[1;33m"
GREEN = "\033[1;32m"
BLUE = "\033[1;34m"
CLEAR = "\033[1;0m"
@sgl.function
def cot_decoding(s, question, get_top_k, is_chat_model, verbose):
"""CoT Decoding: http://arxiv.org/abs/2402.10200"""
if is_chat_model:
s += sgl.user("Question: " + question + "\nAnswer:")
s += sgl.assistant_begin()
else:
s += "Question: " + question + "\nAnswer:"
step_0 = s.fork(1)[0]
forks = s.fork(get_top_k)
answer_forks = s.fork(get_top_k)
# decoding step 0
step_0 += sgl.gen(
"get_top_k",
max_tokens=0,
return_logprob=True,
top_logprobs_num=get_top_k,
return_text_in_logprobs=True,
)
logprobs = step_0.get_meta_info("get_top_k")["output_top_logprobs"][0]
print("Decoding step 0:", ", ".join(pformat(token[2]) for token in logprobs))
for idx, (f, token) in enumerate(zip(forks, logprobs)):
logprob, token_id, text = token
f += text
if text == "<|end_of_text|>":
print(
f"{YELLOW}Path #{idx} {pformat(text)}[{exp(logprob):.3f}] (score=nan, answer=nan){CLEAR}"
)
continue
# continue greedy decoding
f += sgl.gen(
"answer",
temperature=0,
max_tokens=1024,
return_logprob=True,
top_logprobs_num=2,
return_text_in_logprobs=True,
)
# calculate probability disparity between the top and secondary tokens
x1s = [exp(xt[0][0]) for xt in f.get_meta_info("answer")["output_top_logprobs"]]
x2s = [exp(xt[1][0]) for xt in f.get_meta_info("answer")["output_top_logprobs"]]
tokens = [xt[0][2] for xt in f.get_meta_info("answer")["output_top_logprobs"]]
delta = (sum(x1s) - sum(x2s)) / len(x1s)
# extract the answer span (without the '<|end_of_text|>' token)
answer_forks[idx] += text + f["answer"] + "\nSo the answer is"
answer_forks[idx] += sgl.gen(
"answer_span",
temperature=0,
max_tokens=64,
return_logprob=True,
top_logprobs_num=2,
return_text_in_logprobs=True,
)
answer = answer_forks[idx]["answer_span"].replace("\n", " ").strip(":")
print(
f"{YELLOW}Path #{idx} {pformat(text)}[{exp(logprob):.3f}] (score={delta}, answer={answer}){CLEAR}"
)
generated_text = str(answer_forks[idx])[len("ProgramState(") : -1]
print(f"{BLUE}{pformat(generated_text)}{CLEAR}")
if verbose:
answer_tokens = [
xt[0][2]
for xt in answer_forks[idx].get_meta_info("answer_span")[
"output_top_logprobs"
]
]
answer_x1s = [
exp(xt[0][0])
for xt in answer_forks[idx].get_meta_info("answer_span")[
"output_top_logprobs"
]
]
answer_x2s = [
exp(xt[1][0])
for xt in answer_forks[idx].get_meta_info("answer_span")[
"output_top_logprobs"
]
]
for token, x1, x2 in zip(tokens, x1s, x2s):
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})", end="")
print("\n===========")
for token, x1, x2 in zip(answer_tokens, answer_x1s, answer_x2s):
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})", end="")
print()
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
state = cot_decoding.run(
question=r"Claire makes a 3 egg omelet every morning for breakfast. How many dozens of eggs will she eat in 4 weeks?",
get_top_k=10,
is_chat_model=True,
verbose=False,
)

View File

@@ -0,0 +1,83 @@
"""
Usage:
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
python json_decode.py
"""
from enum import Enum
from pydantic import BaseModel
import sglang as sgl
from sglang.srt.constrained.outlines_backend import build_regex_from_object
character_regex = (
r"""\{\n"""
+ r""" "name": "[\w\d\s]{1,16}",\n"""
+ r""" "house": "(Gryffindor|Slytherin|Ravenclaw|Hufflepuff)",\n"""
+ r""" "blood status": "(Pure-blood|Half-blood|Muggle-born)",\n"""
+ r""" "occupation": "(student|teacher|auror|ministry of magic|death eater|order of the phoenix)",\n"""
+ r""" "wand": \{\n"""
+ r""" "wood": "[\w\d\s]{1,16}",\n"""
+ r""" "core": "[\w\d\s]{1,16}",\n"""
+ r""" "length": [0-9]{1,2}\.[0-9]{0,2}\n"""
+ r""" \},\n"""
+ r""" "alive": "(Alive|Deceased)",\n"""
+ r""" "patronus": "[\w\d\s]{1,16}",\n"""
+ r""" "bogart": "[\w\d\s]{1,16}"\n"""
+ r"""\}"""
)
@sgl.function
def character_gen(s, name):
s += (
name
+ " is a character in Harry Potter. Please fill in the following information about this character.\n"
)
s += "The constrained regex is:\n"
s += character_regex + "\n"
s += "The JSON output is:\n"
s += sgl.gen("json_output", max_tokens=256, regex=character_regex)
def driver_character_gen():
state = character_gen.run(name="Hermione Granger")
print(state.text())
class Weapon(str, Enum):
sword = "sword"
axe = "axe"
mace = "mace"
spear = "spear"
bow = "bow"
crossbow = "crossbow"
class Wizard(BaseModel):
name: str
age: int
weapon: Weapon
@sgl.function
def pydantic_wizard_gen(s):
s += "Give me a description about a wizard in the JSON format.\n"
s += sgl.gen(
"character",
max_tokens=128,
temperature=0,
regex=build_regex_from_object(Wizard), # Requires pydantic >= 2.0
)
def driver_pydantic_wizard_gen():
state = pydantic_wizard_gen.run()
print(state.text())
if __name__ == "__main__":
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
driver_character_gen()
# driver_pydantic_wizard_gen()

View File

@@ -0,0 +1,103 @@
# NOTE: Currently this can only be run through HTTP requests.
from concurrent.futures import ThreadPoolExecutor
from json_decode import character_regex
from sglang.utils import http_request
character_names = ["Hermione Granger", "Ron Weasley", "Harry Potter"]
base_url = "http://localhost:30000"
prompt = "is a character in Harry Potter. Please fill in the following information about this character.\n"
def openai_api_request(name):
data = {
"model": "",
"prompt": name + prompt,
"temperature": 0,
"max_tokens": 128,
"regex": character_regex,
"logprobs": 3,
}
res = http_request(base_url + "/v1/completions", json=data).json()
# with open(f"json_logprobs_{name.replace(' ', '_')}_tmp.json", "w") as fout:
# fout.write(json.dumps(res, indent=4))
logprobs = res["choices"][0]["logprobs"]
usage = res["usage"]
assert len(logprobs["token_logprobs"]) == len(logprobs["tokens"])
assert len(logprobs["token_logprobs"]) == len(logprobs["top_logprobs"])
assert len(logprobs["token_logprobs"]) == usage["completion_tokens"] - 1
return res
def srt_api_request(name):
data = {
"text": name + prompt,
"sampling_params": {
"temperature": 0,
"max_new_tokens": 128,
"regex": character_regex,
},
"return_logprob": True,
"logprob_start_len": 0,
"top_logprobs_num": 3,
"return_text_in_logprobs": True,
}
res = http_request(base_url + "/generate", json=data).json()
# with open(f"json_logprobs_{name.replace(' ', '_')}_tmp.json", "w") as fout:
# fout.write(json.dumps(res, indent=4))
meta_info = res["meta_info"]
assert len(meta_info["input_token_logprobs"]) == len(
meta_info["input_top_logprobs"]
)
assert len(meta_info["output_token_logprobs"]) == len(
meta_info["output_top_logprobs"]
)
assert len(meta_info["input_token_logprobs"]) == meta_info["prompt_tokens"]
assert len(meta_info["output_token_logprobs"]) == meta_info["completion_tokens"] - 1
return res
def pretty_print(res):
meta_info = res["meta_info"]
print("\n\n", "=" * 30, "Prefill", "=" * 30)
for i in range(len(meta_info["input_token_logprobs"])):
print(f"{str(meta_info['input_token_logprobs'][i][2].encode()): <20}", end="")
top_ks = (
[str(t[2].encode()) for t in meta_info["input_top_logprobs"][i]]
if meta_info["input_top_logprobs"][i]
else []
)
for top_k in top_ks:
print(f"{top_k: <15}", end="")
print()
print("\n\n", "=" * 30, "Decode", "=" * 30)
for i in range(len(meta_info["output_token_logprobs"])):
print(f"{str(meta_info['output_token_logprobs'][i][2].encode()): <20}", end="")
top_ks = [str(t[2].encode()) for t in meta_info["output_top_logprobs"][i]]
for top_k in top_ks:
print(f"{top_k: <15}", end="")
print()
print(res["text"])
if __name__ == "__main__":
with ThreadPoolExecutor() as executor:
ress = executor.map(srt_api_request, character_names)
for res in ress:
pretty_print(res)
openai_api_request("Hermione Granger")

View File

@@ -0,0 +1,260 @@
"""
Usage:
pip install opencv-python-headless
python3 srt_example_llava_v.py
"""
import argparse
import csv
import json
import os
import time
import requests
import sglang as sgl
@sgl.function
def video_qa(s, num_frames, video_path, question):
s += sgl.user(sgl.video(video_path, num_frames) + question)
s += sgl.assistant(sgl.gen("answer"))
def single(path, num_frames=16):
state = video_qa.run(
num_frames=num_frames,
video_path=path,
question="Please provide a detailed description of the video, focusing on the main subjects, their actions, the background scenes",
temperature=0.0,
max_new_tokens=1024,
)
print(state["answer"], "\n")
def split_into_chunks(lst, num_chunks):
"""Split a list into a specified number of chunks."""
# Calculate the chunk size using integer division. Note that this may drop some items if not evenly divisible.
chunk_size = len(lst) // num_chunks
if chunk_size == 0:
chunk_size = len(lst)
# Use list comprehension to generate chunks. The last chunk will take any remainder if the list size isn't evenly divisible.
chunks = [lst[i : i + chunk_size] for i in range(0, len(lst), chunk_size)]
# Ensure we have exactly num_chunks chunks, even if some are empty
chunks.extend([[] for _ in range(num_chunks - len(chunks))])
return chunks
def save_batch_results(batch_video_files, states, cur_chunk, batch_idx, save_dir):
csv_filename = f"{save_dir}/chunk_{cur_chunk}_batch_{batch_idx}.csv"
with open(csv_filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["video_name", "answer"])
for video_path, state in zip(batch_video_files, states):
video_name = os.path.basename(video_path)
writer.writerow([video_name, state["answer"]])
def compile_and_cleanup_final_results(cur_chunk, num_batches, save_dir):
final_csv_filename = f"{save_dir}/final_results_chunk_{cur_chunk}.csv"
with open(final_csv_filename, "w", newline="") as final_csvfile:
writer = csv.writer(final_csvfile)
writer.writerow(["video_name", "answer"])
for batch_idx in range(num_batches):
batch_csv_filename = f"{save_dir}/chunk_{cur_chunk}_batch_{batch_idx}.csv"
with open(batch_csv_filename, "r") as batch_csvfile:
reader = csv.reader(batch_csvfile)
next(reader) # Skip header row
for row in reader:
writer.writerow(row)
os.remove(batch_csv_filename)
def find_video_files(video_dir):
# Check if the video_dir is actually a file
if os.path.isfile(video_dir):
# If it's a file, return it as a single-element list
return [video_dir]
# Original logic to find video files in a directory
video_files = []
for root, dirs, files in os.walk(video_dir):
for file in files:
if file.endswith((".mp4", ".avi", ".mov")):
video_files.append(os.path.join(root, file))
return video_files
def batch(video_dir, save_dir, cur_chunk, num_chunks, num_frames=16, batch_size=64):
video_files = find_video_files(video_dir)
chunked_video_files = split_into_chunks(video_files, num_chunks)[cur_chunk]
num_batches = 0
for i in range(0, len(chunked_video_files), batch_size):
batch_video_files = chunked_video_files[i : i + batch_size]
print(f"Processing batch of {len(batch_video_files)} video(s)...")
if not batch_video_files:
print("No video files found in the specified directory.")
return
batch_input = [
{
"num_frames": num_frames,
"video_path": video_path,
"question": "Please provide a detailed description of the video, focusing on the main subjects, their actions, the background scenes.",
}
for video_path in batch_video_files
]
start_time = time.perf_counter()
states = video_qa.run_batch(batch_input, max_new_tokens=512, temperature=0.2)
total_time = time.perf_counter() - start_time
average_time = total_time / len(batch_video_files)
print(
f"Number of videos in batch: {len(batch_video_files)}. Average processing time per video: {average_time:.2f} seconds. Total time for this batch: {total_time:.2f} seconds"
)
save_batch_results(batch_video_files, states, cur_chunk, num_batches, save_dir)
num_batches += 1
compile_and_cleanup_final_results(cur_chunk, num_batches, save_dir)
if __name__ == "__main__":
url = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"
cache_dir = os.path.expanduser("~/.cache")
file_path = os.path.join(cache_dir, "jobs.mp4")
os.makedirs(cache_dir, exist_ok=True)
response = requests.get(url)
response.raise_for_status() # Raise an exception for bad responses
with open(file_path, "wb") as f:
f.write(response.content)
print(f"File downloaded and saved to: {file_path}")
# Create the parser
parser = argparse.ArgumentParser(
description="Run video processing with specified port."
)
# Add an argument for the port
parser.add_argument(
"--port",
type=int,
default=30000,
help="The master port for distributed serving.",
)
parser.add_argument(
"--chunk-idx", type=int, default=0, help="The index of the chunk to process."
)
parser.add_argument(
"--num-chunks", type=int, default=8, help="The number of chunks to process."
)
parser.add_argument(
"--save-dir",
type=str,
default="./work_dirs/llava_video",
help="The directory to save the processed video files.",
)
parser.add_argument(
"--video-dir",
type=str,
default=os.path.expanduser("~/.cache/jobs.mp4"),
help="The directory or path for the processed video files.",
)
parser.add_argument(
"--model-path",
type=str,
default="lmms-lab/LLaVA-NeXT-Video-7B",
help="The model path for the video processing.",
)
parser.add_argument(
"--num-frames",
type=int,
default=16,
help="The number of frames to process in each video.",
)
parser.add_argument("--mm_spatial_pool_stride", type=int, default=2)
# Parse the arguments
args = parser.parse_args()
cur_port = args.port
cur_chunk = args.chunk_idx
num_chunks = args.num_chunks
num_frames = args.num_frames
if "34b" in args.model_path.lower():
tokenizer_path = "liuhaotian/llava-v1.6-34b-tokenizer"
elif "7b" in args.model_path.lower():
tokenizer_path = "llava-hf/llava-1.5-7b-hf"
else:
print("Invalid model path. Please specify a valid model path.")
exit()
model_override_args = {}
model_override_args["mm_spatial_pool_stride"] = args.mm_spatial_pool_stride
model_override_args["architectures"] = ["LlavaVidForCausalLM"]
model_override_args["num_frames"] = args.num_frames
model_override_args["model_type"] = "llava"
if "34b" in args.model_path.lower():
model_override_args["image_token_index"] = 64002
if args.num_frames == 32:
model_override_args["rope_scaling"] = {"factor": 2.0, "rope_type": "linear"}
model_override_args["max_sequence_length"] = 4096 * 2
model_override_args["tokenizer_model_max_length"] = 4096 * 2
elif args.num_frames < 32:
pass
else:
print(
"The maximum number of frames to process is 32. Please specify a valid number of frames."
)
exit()
runtime = sgl.Runtime(
model_path=args.model_path, # "liuhaotian/llava-v1.6-vicuna-7b",
tokenizer_path=tokenizer_path,
port=cur_port,
json_model_override_args=json.dumps(model_override_args),
tp_size=1,
)
sgl.set_default_backend(runtime)
print(f"chat template: {runtime.endpoint.chat_template.name}")
# Run a single request
print("\n========== single ==========\n")
root = args.video_dir
if os.path.isfile(root):
video_files = [root]
else:
video_files = [
os.path.join(root, f)
for f in os.listdir(root)
if f.endswith((".mp4", ".avi", ".mov"))
] # Add more extensions if needed
start_time = time.perf_counter() # Start time for processing a single video
for cur_video in video_files[:1]:
print(cur_video)
single(cur_video, num_frames)
end_time = time.perf_counter() # End time for processing a single video
total_time = end_time - start_time
average_time = total_time / len(
video_files
) # Calculate the average processing time
print(f"Average processing time per video: {average_time:.2f} seconds")
runtime.shutdown()
# # Run a batch of requests
# print("\n========== batch ==========\n")
# if not os.path.exists(args.save_dir):
# os.makedirs(args.save_dir)
# batch(args.video_dir, args.save_dir, cur_chunk, num_chunks, num_frames, num_chunks)
# runtime.shutdown()

View File

@@ -0,0 +1,131 @@
#!/bin/bash
##### USAGE #####
# - First node:
# ```sh
# bash examples/usage/llava_video/srt_example_llava_v.sh K 0 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
# ```
# - Second node:
# ```sh
# bash examples/usage/llava_video/srt_example_llava_v.sh K 1 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
# ```
# - The K node:
# ```sh
# bash examples/usage/llava_video/srt_example_llava_v.sh K K-1 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
# ```
# Replace `K`, `YOUR_VIDEO_PATH`, `YOUR_MODEL_PATH`, and `FRAMES_PER_VIDEO` with your specific details.
# CURRENT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CURRENT_ROOT=$(dirname "$0")
echo ${CURRENT_ROOT}
cd ${CURRENT_ROOT}
export PYTHONWARNINGS=ignore
START_TIME=$(date +%s) # Capture start time
NUM_NODES=$1
CUR_NODES_IDX=$2
VIDEO_DIR=$3
MODEL_PATH=$4
NUM_FRAMES=$5
# FRAME_FORMAT=$6
# FRAME_FORMAT=$(echo $FRAME_FORMAT | tr '[:lower:]' '[:upper:]')
# # Check if FRAME_FORMAT is either JPEG or PNG
# if [[ "$FRAME_FORMAT" != "JPEG" && "$FRAME_FORMAT" != "PNG" ]]; then
# echo "Error: FRAME_FORMAT must be either JPEG or PNG."
# exit 1
# fi
# export TARGET_FRAMES=$TARGET_FRAMES
echo "Each video you will sample $NUM_FRAMES frames"
# export FRAME_FORMAT=$FRAME_FORMAT
# echo "The frame format is $FRAME_FORMAT"
# Assuming GPULIST is a bash array containing your GPUs
GPULIST=(0 1 2 3 4 5 6 7)
LOCAL_CHUNKS=${#GPULIST[@]}
echo "Number of GPUs in GPULIST: $LOCAL_CHUNKS"
ALL_CHUNKS=$((NUM_NODES * LOCAL_CHUNKS))
# Calculate GPUs per chunk
GPUS_PER_CHUNK=1
echo $GPUS_PER_CHUNK
for IDX in $(seq 1 $LOCAL_CHUNKS); do
(
START=$(((IDX-1) * GPUS_PER_CHUNK))
LENGTH=$GPUS_PER_CHUNK # Length for slicing, not the end index
CHUNK_GPUS=(${GPULIST[@]:$START:$LENGTH})
# Convert the chunk GPUs array to a comma-separated string
CHUNK_GPUS_STR=$(IFS=,; echo "${CHUNK_GPUS[*]}")
LOCAL_IDX=$((CUR_NODES_IDX * LOCAL_CHUNKS + IDX))
echo "Chunk $(($LOCAL_IDX - 1)) will run on GPUs $CHUNK_GPUS_STR"
# Calculate the port for this chunk. Ensure it's incremented by 5 for each chunk.
PORT=$((10000 + RANDOM % 55536))
MAX_RETRIES=10
RETRY_COUNT=0
COMMAND_STATUS=1 # Initialize as failed
while [ $RETRY_COUNT -lt $MAX_RETRIES ] && [ $COMMAND_STATUS -ne 0 ]; do
echo "Running chunk $(($LOCAL_IDX - 1)) on GPUs $CHUNK_GPUS_STR with port $PORT. Attempt $(($RETRY_COUNT + 1))"
#!/bin/bash
CUDA_VISIBLE_DEVICES=$CHUNK_GPUS_STR python3 srt_example_llava_v.py \
--port $PORT \
--num-chunks $ALL_CHUNKS \
--chunk-idx $(($LOCAL_IDX - 1)) \
--save-dir work_dirs/llava_next_video_inference_results \
--video-dir $VIDEO_DIR \
--model-path $MODEL_PATH \
--num-frames $NUM_FRAMES #&
wait $! # Wait for the process to finish and capture its exit status
COMMAND_STATUS=$?
if [ $COMMAND_STATUS -ne 0 ]; then
echo "Execution failed for chunk $(($LOCAL_IDX - 1)), attempt $(($RETRY_COUNT + 1)). Retrying..."
RETRY_COUNT=$(($RETRY_COUNT + 1))
sleep 180 # Wait a bit before retrying
else
echo "Execution succeeded for chunk $(($LOCAL_IDX - 1))."
fi
done
if [ $COMMAND_STATUS -ne 0 ]; then
echo "Execution failed for chunk $(($LOCAL_IDX - 1)) after $MAX_RETRIES attempts."
fi
) #&
sleep 2 # Slight delay to stagger the start times
done
wait
cat work_dirs/llava_next_video_inference_results/final_results_chunk_*.csv > work_dirs/llava_next_video_inference_results/final_results_node_${CUR_NODES_IDX}.csv
END_TIME=$(date +%s) # Capture end time
ELAPSED_TIME=$(($END_TIME - $START_TIME))
echo "Total execution time: $ELAPSED_TIME seconds."

View File

@@ -0,0 +1,155 @@
"""
Usage:
***Note: for speculative execution to work, user must put all "gen" in "assistant".
Show in "assistant" the desired answer format. Each "gen" term should have a stop token.
The stream mode is not supported in speculative execution.
E.g.
correct:
sgl.assistant("\nName:" + sgl.gen("name", stop="\n") + "\nBirthday:" + sgl.gen("birthday", stop="\n") + "\nJob:" + sgl.gen("job", stop="\n"))
incorrect:
s += sgl.assistant("\nName:" + sgl.gen("name", stop="\n"))
s += sgl.assistant("\nBirthday:" + sgl.gen("birthday", stop="\n"))
s += sgl.assistant("\nJob:" + sgl.gen("job", stop="\n"))
export OPENAI_API_KEY=sk-******
python3 openai_chat_speculative.py
"""
import sglang as sgl
from sglang import OpenAI, function, set_default_backend
@function(num_api_spec_tokens=256)
def gen_character_spec(s):
s += sgl.system("You are a helpful assistant.")
s += sgl.user("Construct a character within the following format:")
s += sgl.assistant(
"Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
)
s += sgl.user("Please generate new Name, Birthday and Job.\n")
s += sgl.assistant(
"Name:"
+ sgl.gen("name", stop="\n")
+ "\nBirthday:"
+ sgl.gen("birthday", stop="\n")
+ "\nJob:"
+ sgl.gen("job", stop="\n")
)
@function(num_api_spec_tokens=256)
def gen_character_spec_no_few_shot(s):
s += sgl.user("Construct a character. For each field stop with a newline\n")
s += sgl.assistant(
"Name:"
+ sgl.gen("name", stop="\n")
+ "\nAge:"
+ sgl.gen("age", stop="\n")
+ "\nJob:"
+ sgl.gen("job", stop="\n")
)
@function
def gen_character_normal(s):
s += sgl.system("You are a helpful assistant.")
s += sgl.user("What's the answer of 23 + 8?")
s += sgl.assistant(sgl.gen("answer", max_tokens=64))
@function(num_api_spec_tokens=1024)
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user("Answer questions in the following format:")
s += sgl.user(
"Question 1: What is the capital of France?\nQuestion 2: What is the population of this city?\n"
)
s += sgl.assistant(
"Answer 1: The capital of France is Paris.\nAnswer 2: The population of Paris in 2024 is estimated to be around 2.1 million for the city proper.\n"
)
s += sgl.user("Question 1: " + question_1 + "\nQuestion 2: " + question_2)
s += sgl.assistant(
"Answer 1: "
+ sgl.gen("answer_1", stop="\n")
+ "\nAnswer 2: "
+ sgl.gen("answer_2", stop="\n")
)
def test_spec_single_turn():
backend.token_usage.reset()
state = gen_character_spec.run()
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- name:", state["name"])
print("-- birthday:", state["birthday"])
print("-- job:", state["job"])
print(backend.token_usage)
def test_inaccurate_spec_single_turn():
state = gen_character_spec_no_few_shot.run()
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- name:", state["name"])
print("\n-- age:", state["age"])
print("\n-- job:", state["job"])
def test_normal_single_turn():
state = gen_character_normal.run()
for m in state.messages():
print(m["role"], ":", m["content"])
def test_spec_multi_turn():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions in the capital of the United States.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
print("\n-- answer_2 --\n", state["answer_2"])
def test_spec_multi_turn_stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
if __name__ == "__main__":
backend = OpenAI("gpt-4-turbo")
set_default_backend(backend)
print("\n========== test spec single turn ==========\n")
# expect reasonable answer for each field
test_spec_single_turn()
print("\n========== test inaccurate spec single turn ==========\n")
# expect incomplete or unreasonable answers
test_inaccurate_spec_single_turn()
print("\n========== test normal single turn ==========\n")
# expect reasonable answer
test_normal_single_turn()
print("\n========== test spec multi turn ==========\n")
# expect answer with same format as in the few shot
test_spec_multi_turn()
print("\n========== test spec multi turn stream ==========\n")
# expect error in stream_executor: stream is not supported...
test_spec_multi_turn_stream()

View File

@@ -0,0 +1,54 @@
"""
Usage:
python3 openai_speculative.py
"""
from sglang import OpenAI, function, gen, set_default_backend
@function(num_api_spec_tokens=64)
def gen_character_spec(s):
s += "Construct a character within the following format:\n"
s += "Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
s += "\nPlease generate new Name, Birthday and Job.\n"
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
s += "\nJob:" + gen("job", stop="\n") + "\n"
@function
def gen_character_no_spec(s):
s += "Construct a character within the following format:\n"
s += "Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
s += "\nPlease generate new Name, Birthday and Job.\n"
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
s += "\nJob:" + gen("job", stop="\n") + "\n"
@function(num_api_spec_tokens=64)
def gen_character_spec_no_few_shot(s):
# s += "Construct a character with name, birthday, and job:\n"
s += "Construct a character:\n"
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
s += "\nJob:" + gen("job", stop="\n") + "\n"
if __name__ == "__main__":
backend = OpenAI("gpt-3.5-turbo-instruct")
set_default_backend(backend)
for function in [
gen_character_spec,
gen_character_no_spec,
gen_character_spec_no_few_shot,
]:
backend.token_usage.reset()
print(f"function: {function.func.__name__}")
state = function.run()
print("...name:", state["name"])
print("...birthday:", state["birthday"])
print("...job:", state["job"])
print(backend.token_usage)
print()

View File

@@ -0,0 +1,40 @@
"""
Usage:
python3 parallel_sample.py
"""
import sglang as sgl
@sgl.function
def parallel_sample(s, question, n):
s += (
"Question: Compute 1 + 2 + 3\n"
"Reasoning: I need to use a calculator.\n"
"Tool: calculator\n"
"Answer: 6\n"
"Question: Compute 3 + 2 + 2\n"
"Reasoning: I will try a calculator.\n"
"Tool: calculator\n"
"Answer: 7\n"
)
s += "Question: " + question + "\n"
forks = s.fork(n)
forks += "Reasoning:" + sgl.gen("reasoning", stop="\n") + "\n"
forks += "Tool:" + sgl.gen("tool", choices=["calculator", "browser"]) + "\n"
forks += "Answer:" + sgl.gen("answer", stop="\n") + "\n"
forks.join()
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo-instruct"))
# sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
state = parallel_sample.run(question="Compute 5 + 2 + 4.", n=5, temperature=1.0)
for i in range(5):
obj = {
"reasoning": state["reasoning"][i],
"tool": state["tool"][i],
"answer": state["answer"][i],
}
print(f"[{i}], {obj}")

View File

@@ -0,0 +1,408 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# RAG Powered by SGLang & Chroma Evaluated using Parea\n",
"\n",
"In this notebook, we will build a simple RAG pipeline using SGLang to execute our LLM calls, Chroma as vector database for retrieval and [Parea](https://www.parea.ai) for tracing and evaluation. We will then evaluate the performance of our RAG pipeline. The dataset we will use was created by [Virat](https://twitter.com/virattt) and contains 100 questions, contexts and answers from the Airbnb 2023 10k filing.\n",
"\n",
"The RAG pipeline consists of two steps:\n",
"1. Retrieval: Given a question, we retrieve the relevant context from all provided contexts.\n",
"2. Generation: Given the question and the retrieved context, we generate an answer.\n",
"\n",
" This notebook requires an OpenAI API key.\n",
"\n",
" This notebook requires a Parea API key, which can be created [here](https://docs.parea.ai/api-reference/authentication#parea-api-key)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setting up the environment\n",
"\n",
"We will first install the necessary packages: `sglang`, `parea-ai` and `chromadb`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# note, if you use a Mac M1 chip, you might need to install grpcio 1.59.0 first such that installing chromadb works\n",
"# !pip install grpcio==1.59.0\n",
"\n",
"!pip install sglang[openai] parea-ai chromadb"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Create a Parea API key as outlined [here](https://docs.parea.ai/api-reference/authentication#parea-api-key) and save it in a `.env` file as `PAREA_API_KEY=your-api-key`."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Indexing the data\n",
"\n",
"Now it's time to download the data & index it! For that, we create a collection called `contexts` in Chroma and add the contexts as documents."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"from typing import List\n",
"\n",
"import chromadb\n",
"\n",
"path_qca = \"airbnb-2023-10k-qca.json\"\n",
"\n",
"if not os.path.exists(path_qca):\n",
" !wget https://virattt.github.io/datasets/abnb-2023-10k.json -O airbnb-2023-10k-qca.json\n",
"\n",
"with open(path_qca, \"r\") as f:\n",
" question_context_answers = json.load(f)\n",
"\n",
"chroma_client = chromadb.PersistentClient()\n",
"collection = chroma_client.get_or_create_collection(name=\"contexts\")\n",
"if collection.count() == 0:\n",
" collection.add(\n",
" documents=[qca[\"context\"] for qca in question_context_answers],\n",
" ids=[str(i) for i in range(len(question_context_answers))],\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Defining the RAG pipeline\n",
"\n",
"We will start with importing the necessary packages, setting up tracing of OpenAI calls via Parea and setting OpenAI as the default backend for SGLang."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"from sglang import function, user, assistant, gen, set_default_backend, OpenAI\n",
"from sglang.lang.interpreter import ProgramState\n",
"from parea import Parea, trace\n",
"\n",
"\n",
"load_dotenv()\n",
"\n",
"os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n",
"\n",
"p = Parea(api_key=os.getenv(\"PAREA_API_KEY\"), project_name=\"rag_sglang\")\n",
"p.integrate_with_sglang()\n",
"\n",
"set_default_backend(OpenAI(\"gpt-3.5-turbo\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we can define our retrieval step shown below. Notice, the `trace` decorator which will automatically trace inputs, output, latency, etc. of that call."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@trace\n",
"def retrieval(question: str) -> List[str]:\n",
" return collection.query(query_texts=[question], n_results=1)[\"documents\"][0]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next we will define the generation step which uses SGLang to execute the LLM call."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@function\n",
"def generation_sglang(s, question: str, *context: str):\n",
" context = \"\\n\".join(context)\n",
" s += user(\n",
" f\"Given this question:\\n{question}\\n\\nAnd this context:\\n{context}\\n\\nAnswer the question.\"\n",
" )\n",
" s += assistant(gen(\"answer\"))\n",
"\n",
"\n",
"@trace\n",
"def generation(question: str, *context):\n",
" state: ProgramState = generation_sglang.run(question, *context)\n",
" while not state.stream_executor.is_finished:\n",
" time.sleep(1)\n",
" return state.stream_executor.variables[\"answer\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we can tie it together and execute a sample query."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@trace\n",
"def rag_pipeline(question: str) -> str:\n",
" contexts = retrieval(question)\n",
" return generation(question, *contexts)\n",
"\n",
"\n",
"rag_pipeline(\n",
" \"When did the World Health Organization formally declare an end to the COVID-19 global health emergency?\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Debug Trace\n",
"\n",
"The output is unfortunately wrong! Using the traced pipeline, we can see that\n",
"\n",
"- the context is relevant to the question and contains the correct information\n",
"- but, the generation step is cut off as max tokens is set to 16\n",
"\n",
"When opening the generation step in the playground and rerunning the prompt with max. tokens set to 1000, the correct answer is produced.\n",
"\n",
"![RAG Trace](https://drive.google.com/uc?id=1QI243ogGjzbO01tUrR72g9rFoGzUJqVH)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Evaluating RAG Pipelines\n",
"\n",
"Before we apply above's fix, let's dive into evaluating RAG pipelines.\n",
"\n",
"RAG pipelines consist of a retrieval step to fetch relevant information and a generation step to generate a response to a users question. A RAG pipeline can fail at either step. E.g. the retrieval step can fail to find relevant information which makes generating the correct impossible. Another failure mode is that the generation step doesn't leverage the retrieved information correctly. We will apply the following evaluation metrics to understand different failure modes:\n",
"\n",
"- `context_relevancy`: measures how relevant the context is given the question\n",
"- `percent_target_supported_by_context`: measures how much of the target answer is supported by the context; this will give an upper ceiling of how well the generation step can perform\n",
"- `answer_context_faithfulness`: measures how much the generated answer utilizes the context\n",
"- `answer_matches_target`: measures how well the generated answer matches the target answer judged by a LLM and gives a sense of accuracy of our entire pipeline\n",
"\n",
"To use these evaluation metrics, we can import them from `parea.evals.rag` and `parea.evals.general` and apply them to a function by specifying in the `trace` decorator which evaluation metrics to use. The `@trace` decorator will automatically log the results of the evaluation metrics to the Parea dashboard.\n",
"\n",
"Applying them to the retrieval step:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from parea.evals.rag import (\n",
" context_query_relevancy_factory,\n",
" percent_target_supported_by_context_factory,\n",
")\n",
"\n",
"\n",
"context_relevancy_eval = context_query_relevancy_factory()\n",
"percent_target_supported_by_context = percent_target_supported_by_context_factory()\n",
"\n",
"\n",
"@trace(eval_funcs=[context_relevancy_eval, percent_target_supported_by_context])\n",
"def retrieval(question: str) -> List[str]:\n",
" return collection.query(query_texts=[question], n_results=1)[\"documents\"][0]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we can apply `answer_context_faithfulness` and `answer_matches_target` to the generation step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from parea.evals.general import answer_matches_target_llm_grader_factory\n",
"from parea.evals.rag import answer_context_faithfulness_statement_level_factory\n",
"\n",
"\n",
"answer_context_faithfulness = answer_context_faithfulness_statement_level_factory()\n",
"answer_matches_target_llm_grader = answer_matches_target_llm_grader_factory()\n",
"\n",
"\n",
"@function\n",
"def generation_sglang(s, question: str, *context: str):\n",
" context = \"\\n\".join(context)\n",
" s += user(\n",
" f\"Given this question:\\n{question}\\n\\nAnd this context:\\n{context}\\n\\nAnswer the question.\"\n",
" )\n",
" s += assistant(gen(\"answer\", max_tokens=1_000))\n",
"\n",
"\n",
"@trace(eval_funcs=[answer_context_faithfulness, answer_matches_target_llm_grader])\n",
"def generation(question: str, *context):\n",
" state: ProgramState = generation_sglang.run(question, *context)\n",
" while not state.stream_executor.is_finished:\n",
" time.sleep(1)\n",
" return state.stream_executor.variables[\"answer\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we tie them together & execute the original sample query."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@trace\n",
"def rag_pipeline(question: str) -> str:\n",
" contexts = retrieval(question)\n",
" return generation(question, *contexts)\n",
"\n",
"\n",
"rag_pipeline(\n",
" \"When did the World Health Organization formally declare an end to the COVID-19 global health emergency?\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Great, the answer is correct! Can you spot the line where we fixed the output truncation issue?\n",
"\n",
"The evaluation scores appear in the bottom right of the logs (screenshot below). Note, that there is no score for `answer_matches_target_llm_grader` and `percent_target_supported_by_context` as these evals are automatically skipped if the target answer is not provided.\n",
"\n",
"![Fixed Max. Tokens](max-tokens-fixed-rag-trace.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Running an experiment\n",
"\n",
"Now we are (almost) ready to evaluate the performance of our RAG pipeline on the entire dataset. First, we will need to apply the `nest_asyncio` package to avoid issues with the Jupyter notebook event loop."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install nest-asyncio\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Running the actual experiment is straight-forward. For that we use `p.experiment` to initialize the experiment with a name, the data (list of key-value pairs fed into our entry function) and the entry function. We then call `run` on the experiment to execute it. Note, that `target` is a reserved key in the data dictionary and will be used as the target answer for evaluation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e = p.experiment(\n",
" \"RAG\",\n",
" data=[\n",
" {\n",
" \"question\": qca[\"question\"],\n",
" \"target\": qca[\"answer\"],\n",
" }\n",
" for qca in question_context_answers\n",
" ],\n",
" func=rag_pipeline,\n",
").run()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Analyzing the results\n",
"\n",
"When opening above experiment, we will see an overview of the experiment as shown below. The upper half shows a summary of the statistics on the left and charts to investigate the distribution and relationships of scores on the right. The lower half is a table with the individual traces which we can use to debug individual samples.\n",
"\n",
"When looking at the statistics, we can see that the accuracy of our RAG pipeline is 22% as measured by `answer_matches_target_llm_grader`. Though when checking the quality of our retrieval step (`context_query_relevancy`), we can see that our retrieval step is fetching relevant information in only 27% of all samples. As shown in the GIF, we investigate the relationship between the two and see the two scores have 95% agreement. This confirms that the retrieval step is a major bottleneck for our RAG pipeline. So, now it's your turn to improve the retrieval step!\n",
"\n",
"Note, above link isn't publicly accessible but the experiment can be accessed through [here](https://app.parea.ai/public-experiments/parea/rag_sglang/30f0244a-d56c-44ff-bdfb-8f47626304b6).\n",
"\n",
"![Experiment Results](https://drive.google.com/uc?id=1KMtJBU47nPB02Pvv3SPPTK7RnHRh5YdA)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@@ -0,0 +1,109 @@
"""
Usage:
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
python readme_examples.py
"""
import sglang as sgl
@sgl.function
def tool_use(s, question):
s += "To answer this question: " + question + ". "
s += (
"I need to use a "
+ sgl.gen("tool", choices=["calculator", "search engine"])
+ ". "
)
if s["tool"] == "calculator":
s += "The math expression is" + sgl.gen("expression")
elif s["tool"] == "search engine":
s += "The key word to search is" + sgl.gen("word")
@sgl.function
def tip_suggestion(s):
s += (
"Here are two tips for staying healthy: "
"1. Balanced Diet. 2. Regular Exercise.\n\n"
)
forks = s.fork(2)
for i, f in enumerate(forks):
f += f"Now, expand tip {i+1} into a paragraph:\n"
f += sgl.gen(f"detailed_tip", max_tokens=256, stop="\n\n")
s += "Tip 1:" + forks[0]["detailed_tip"] + "\n"
s += "Tip 2:" + forks[1]["detailed_tip"] + "\n"
s += "In summary" + sgl.gen("summary")
@sgl.function
def regular_expression_gen(s):
s += "Q: What is the IP address of the Google DNS servers?\n"
s += "A: " + sgl.gen(
"answer",
temperature=0,
regex=r"((25[0-5]|2[0-4]\d|[01]?\d\d?).){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)",
)
@sgl.function
def text_qa(s, question):
s += "Q: " + question + "\n"
s += "A:" + sgl.gen("answer", stop="\n")
def driver_tool_use():
state = tool_use.run(question="What is the capital of the United States?")
print(state.text())
print("\n")
def driver_tip_suggestion():
state = tip_suggestion.run()
print(state.text())
print("\n")
def driver_regex():
state = regular_expression_gen.run()
print(state.text())
print("\n")
def driver_batching():
states = text_qa.run_batch(
[
{"question": "What is the capital of the United Kingdom?"},
{"question": "What is the capital of France?"},
{"question": "What is the capital of Japan?"},
],
progress_bar=True,
)
for s in states:
print(s.text())
print("\n")
def driver_stream():
state = text_qa.run(
question="What is the capital of France?", temperature=0.1, stream=True
)
for out in state.text_iter():
print(out, end="", flush=True)
print("\n")
if __name__ == "__main__":
# sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo-instruct"))
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
driver_tool_use()
driver_tip_suggestion()
driver_regex()
driver_batching()
driver_stream()

View File

@@ -0,0 +1,35 @@
"""
This example demonstrates how to use `min_tokens` to enforce sgl.gen to generate a longer sequence
Usage:
python3 sgl_gen_min_tokens.py
"""
import sglang as sgl
@sgl.function
def long_answer(s):
s += sgl.user("What is the capital of the United States?")
s += sgl.assistant(sgl.gen("answer", min_tokens=64, max_tokens=128))
@sgl.function
def short_answer(s):
s += sgl.user("What is the capital of the United States?")
s += sgl.assistant(sgl.gen("answer"))
if __name__ == "__main__":
runtime = sgl.Runtime(model_path="meta-llama/Meta-Llama-3.1-8B-Instruct")
sgl.set_default_backend(runtime)
state = long_answer.run()
print("=" * 20)
print("Longer Answer", state["answer"])
state = short_answer.run()
print("=" * 20)
print("Short Answer", state["answer"])
runtime.shutdown()

View File

@@ -0,0 +1,49 @@
"""
Usage:
python3 streaming.py
"""
import asyncio
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo"))
def stream_a_variable():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter(var_name="answer_2"):
print(out, end="", flush=True)
print("\n")
async def async_stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
async for out in state.text_async_iter(var_name="answer_2"):
print(out, end="", flush=True)
print("\n")
if __name__ == "__main__":
stream_a_variable()
asyncio.run(async_stream())

View File

@@ -0,0 +1,10 @@
FROM nvcr.io/nvidia/tritonserver:24.01-py3
WORKDIR /opt
RUN git clone https://github.com/sgl-project/sglang.git
WORKDIR /opt/sglang
RUN pip install --upgrade pip && \
pip install -e "python[all]" && \
pip install datasets

View File

@@ -0,0 +1,35 @@
# sglang_triton
Build the docker image:
```
docker build -t sglang-triton .
```
Then do:
```
docker run -ti --gpus=all --network=host --name sglang-triton -v ./models:/mnt/models sglang-triton
```
inside the docker container:
```
cd sglang
python3 -m sglang.launch_server --model-path mistralai/Mistral-7B-Instruct-v0.2 --port 30000 --mem-fraction-static 0.9
```
with another shell, inside the docker container:
```
docker exec -ti sglang-triton /bin/bash
cd /mnt
tritonserver --model-repository=/mnt/models
```
Send request to the server:
```
curl -X POST http://localhost:8000/v2/models/character_generation/generate \
-H "Content-Type: application/json" \
-d '{
"INPUT_TEXT": ["harry"]
}'
```

View File

@@ -0,0 +1,55 @@
import numpy
import triton_python_backend_utils as pb_utils
from pydantic import BaseModel
import sglang as sgl
from sglang import function
from sglang.srt.constrained.outlines_backend import build_regex_from_object
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
class Character(BaseModel):
name: str
eye_color: str
house: str
@function
def character_gen(s, name):
s += (
name
+ " is a character in Harry Potter. Please fill in the following information about this character.\n"
)
s += sgl.gen(
"json_output", max_tokens=256, regex=build_regex_from_object(Character)
)
class TritonPythonModel:
def initialize(self, args):
print("Initialized.")
def execute(self, requests):
responses = []
for request in requests:
tensor_in = pb_utils.get_input_tensor_by_name(request, "INPUT_TEXT")
if tensor_in is None:
return pb_utils.InferenceResponse(output_tensors=[])
input_list_names = [
i.decode("utf-8") if isinstance(i, bytes) else i
for i in tensor_in.as_numpy().tolist()
]
input_list_dicts = [{"name": i} for i in input_list_names]
states = character_gen.run_batch(input_list_dicts)
character_strs = [state.text() for state in states]
tensor_out = pb_utils.Tensor(
"OUTPUT_TEXT", numpy.array(character_strs, dtype=object)
)
responses.append(pb_utils.InferenceResponse(output_tensors=[tensor_out]))
return responses

View File

@@ -0,0 +1,23 @@
name: "character_generation"
backend: "python"
input [
{
name: "INPUT_TEXT"
data_type: TYPE_STRING
dims: [ -1 ]
}
]
output [
{
name: "OUTPUT_TEXT"
data_type: TYPE_STRING
dims: [ -1 ]
}
]
instance_group [
{
count: 1
kind: KIND_GPU
gpus: [ 0 ]
}
]

View File

@@ -0,0 +1,76 @@
# SGLang Monitoring Setup
This directory contains a ready-to-use monitoring setup for SGLang using Prometheus and Grafana.
## Prerequisites
- Docker and Docker Compose installed
- SGLang server running with metrics enabled
## Usage
1. Start your SGLang server with metrics enabled:
```bash
python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct --port 30000 --enable-metrics
```
By default, the metrics server will run on `127.0.0.1:30000`.
2. Start the monitoring stack:
```bash
cd examples/monitoring
docker compose up
```
3. Access the monitoring interfaces:
- Grafana: [http://localhost:3000](http://localhost:3000)
- Prometheus: [http://localhost:9090](http://localhost:9090)
Default Grafana login credentials:
- Username: `admin`
- Password: `admin`
You'll be prompted to change the password on first login.
4. The SGLang dashboard will be automatically available in the "SGLang Monitoring" folder.
## Troubleshooting
### Port Conflicts
If you see errors like "port is already allocated":
1. Check if you already have Prometheus or Grafana running:
```bash
docker ps | grep -E 'prometheus|grafana'
```
2. Stop any conflicting containers:
```bash
docker stop <container_id>
```
3. Ensure no other services are using ports 9090 and 3000:
```bash
lsof -i :9090
lsof -i :3000
```
### Connection Issues
If Grafana cannot connect to Prometheus:
1. Check that both services are running
2. Verify the datasource configuration in Grafana
3. Check that your SGLang server is properly exposing metrics
## Configuration
- Prometheus configuration: `prometheus.yaml`
- Docker Compose configuration: `docker-compose.yaml`
- Grafana datasource: `grafana/datasources/datasource.yaml`
- Grafana dashboard configuration: `grafana/dashboards/config/dashboard.yaml`
- SGLang dashboard JSON: `grafana/dashboards/json/sglang-dashboard.json`
## Customization
You can customize the monitoring setup by modifying the configuration files as needed.

View File

@@ -0,0 +1,28 @@
version: '3'
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
network_mode: host
volumes:
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
grafana:
image: grafana/grafana:latest
container_name: grafana
network_mode: host
volumes:
- ./grafana/datasources:/etc/grafana/provisioning/datasources
- ./grafana/dashboards/config:/etc/grafana/provisioning/dashboards
- ./grafana/dashboards/json:/var/lib/grafana/dashboards
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Viewer
- GF_AUTH_BASIC_ENABLED=false
- GF_USERS_ALLOW_SIGN_UP=false
- GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/var/lib/grafana/dashboards/sglang-dashboard.json
depends_on:
- prometheus

View File

@@ -0,0 +1,11 @@
apiVersion: 1
providers:
- name: 'SGLang'
orgId: 1
folder: 'SGLang Monitoring'
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: false
options:
path: /var/lib/grafana/dashboards

View File

@@ -0,0 +1,984 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 8,
"links": [],
"panels": [
{
"datasource": {
"default": true,
"type": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 14,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.6.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(sglang:e2e_request_latency_seconds_bucket[$__rate_interval])))\r\n",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "P99",
"range": true,
"refId": "A",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(sglang:e2e_request_latency_seconds_bucket[$__rate_interval])))\r\n",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "P90",
"range": true,
"refId": "B",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(sglang:e2e_request_latency_seconds_bucket[$__rate_interval])))\r\n",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "P50",
"range": true,
"refId": "C",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(sglang:e2e_request_latency_seconds_sum[$__rate_interval]) / rate(sglang:e2e_request_latency_seconds_count[$__rate_interval]))\r\n",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "Avg",
"range": true,
"refId": "D",
"useBackend": false
}
],
"title": "End-to-End Request Latency",
"type": "timeseries"
},
{
"datasource": {
"default": true,
"type": "prometheus"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 17,
"maxDataPoints": 30,
"options": {
"calculate": false,
"calculation": {
"yBuckets": {
"scale": {
"type": "linear"
}
}
},
"cellGap": 1,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "dark-orange",
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 64
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": true
},
"rowsFrame": {
"layout": "auto"
},
"tooltip": {
"mode": "single",
"showColorScale": true,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"reverse": false,
"unit": "secs"
}
},
"pluginVersion": "11.6.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "builder",
"expr": "sum(increase(sglang:e2e_request_latency_seconds_bucket{model_name=~\"$model_name\"}[$__rate_interval])) by (le)\r\n",
"format": "heatmap",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "{{le}}",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "End-to-End Request Latency(s) Heatmap",
"type": "heatmap"
},
{
"datasource": {
"default": true,
"type": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 20,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.6.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(sglang:time_to_first_token_seconds_bucket[$__rate_interval])))\r\n",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "P99",
"range": true,
"refId": "A",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(sglang:time_to_first_token_seconds_bucket[$__rate_interval])))\r\n",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "P90",
"range": true,
"refId": "B",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(sglang:time_to_first_token_seconds_bucket[$__rate_interval])))\r\n",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "P50",
"range": true,
"refId": "C",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(sglang:time_to_first_token_seconds_sum[$__rate_interval]) / rate(sglang:time_to_first_token_seconds_count[$__rate_interval]))\r\n",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "Avg",
"range": true,
"refId": "D",
"useBackend": false
}
],
"title": "Time-To-First-Token Latency",
"type": "timeseries"
},
{
"datasource": {
"default": true,
"type": "prometheus"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 19,
"maxDataPoints": 30,
"options": {
"calculate": false,
"calculation": {
"xBuckets": {
"value": ""
},
"yBuckets": {
"mode": "size",
"scale": {
"type": "linear"
},
"value": ""
}
},
"cellGap": 1,
"color": {
"exponent": 0.5,
"fill": "dark-orange",
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 64
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": true
},
"rowsFrame": {
"layout": "auto"
},
"tooltip": {
"mode": "single",
"showColorScale": true,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"reverse": false
}
},
"pluginVersion": "11.6.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "builder",
"exemplar": false,
"expr": "sum by(le) (increase(sglang:time_to_first_token_seconds_bucket{model_name=~\"$model_name\"}[$__rate_interval]))",
"format": "heatmap",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"interval": "",
"legendFormat": "{{le}}",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Time-To-First-Token Seconds Heatmap",
"type": "heatmap"
},
{
"datasource": {
"default": true,
"type": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 16
},
"id": 7,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.6.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "sglang:num_running_reqs",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"interval": "",
"legendFormat": "{{instance}}",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Num Running Requests",
"type": "timeseries"
},
{
"datasource": {
"default": true,
"type": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 16
},
"id": 18,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.6.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"editorMode": "code",
"expr": "sglang:gen_throughput",
"instant": false,
"legendFormat": "{{instance}}",
"range": true,
"refId": "A"
}
],
"title": "Token Generation Throughput (Tokens / S)",
"type": "timeseries"
},
{
"datasource": {
"default": true,
"type": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 24
},
"id": 11,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.6.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "sglang:cache_hit_rate",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "{{instance}}",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Cache Hit Rate",
"type": "timeseries"
},
{
"datasource": {
"default": true,
"type": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 24
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.6.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "ddyfngn31dg5cf"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "sglang:num_queue_reqs",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "{{instance}}",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Number Queued Requests",
"type": "timeseries"
}
],
"preload": false,
"refresh": "5s",
"schemaVersion": 41,
"tags": [],
"templating": {
"list": [
{
"current": {
"text": "127.0.0.1:30000",
"value": "127.0.0.1:30000"
},
"datasource": {
"type": "prometheus"
},
"definition": "label_values(instance)",
"includeAll": false,
"label": "instance",
"name": "instance",
"options": [],
"query": {
"qryType": 1,
"query": "label_values(instance)",
"refId": "PrometheusVariableQueryEditor-VariableQuery"
},
"refresh": 1,
"regex": "",
"type": "query"
},
{
"current": {
"text": "meta-llama/Llama-3.1-8B-Instruct",
"value": "meta-llama/Llama-3.1-8B-Instruct"
},
"datasource": {
"type": "prometheus"
},
"definition": "label_values(model_name)",
"includeAll": false,
"label": "model name",
"name": "model_name",
"options": [],
"query": {
"qryType": 1,
"query": "label_values(model_name)",
"refId": "PrometheusVariableQueryEditor-VariableQuery"
},
"refresh": 1,
"regex": "",
"type": "query"
}
]
},
"time": {
"from": "now-30m",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "SGLang Dashboard",
"uid": "sglang-dashboard",
"version": 11
}

View File

@@ -0,0 +1,8 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://localhost:9090
isDefault: true
editable: false

View File

@@ -0,0 +1,10 @@
# prometheus.yaml
global:
scrape_interval: 5s
evaluation_interval: 30s
scrape_configs:
- job_name: sglang
static_configs:
- targets:
- '127.0.0.1:30000'

View File

@@ -0,0 +1,176 @@
# gputrc2graph.py
This script processes NVIDIA Nsight Systems (`nsys`) GPU trace files
(`.nsys-rep`) with -t cuda tracing enabled, and generates kernel-level
summaries and visualizations of GPU and non-GPU time. It is useful for
profiling and analyzing nsys profile output.
## Usage
### Command-line Arguments
- `--in_file`
**(required)**
List of input files and their metadata. Each entry should be in the format:
`<nsys-rep>,<engine>,<model>,<elapsed_nonprofiled_sec>`
- `nsys-rep`: Path to the `.nsys-rep` file.
- `engine`: Engine name (e.g., `sglang`).
- `model`: Model name (e.g., `llama`, `gpt-oss`, `ds`).
- `elapsed_nonprofiled_sec`: Wall-clock runtime (in seconds) without
profiling. Specify `0` to use the elapsed time from the nsys-rep file
(this may inflate non-GPU time if actual runtime without profiling is
less). Multiple entries can be provided, separated by spaces.
- `--out_dir`
Output directory for the generated CSV and HTML files.
If not specified, results are saved in the current directory.
- `--title`
Title for the HTML chart/visualization.
- `--nsys_cmd`
Path to the `nsys` command.
Default: `nsys` (assumes it is in your PATH).
Use this if `nsys` is not in your system PATH.
## Notes
- Make sure you have pandas installed. Any version is fine.
- Make sure [nsys](https://developer.nvidia.com/nsight-systems/get-started) is
installed, and specify the path to the `nsys` command with `--nsys_cmd` if it
is not in your PATH. The nsys version must be >= the nsys profile version that
was used to collect the traces when profiling the server, so that nsys can
process the nsys-rep that was generated.
- For more details on available engines and models, see the help string in
the script or run:
```bash
python3 gputrc2graph.py --help
```
## Example 1: analyze a single profile
To analyze the GPU cycles of for example, a llama-3.1-8B model with sglang:
1. Run the following command to collect nsys profile, for sglang server config.
```bash
nsys profile -t cuda -o nsys_res -f true --trace-fork-before-exec=true \
--cuda-graph-trace=node --delay <DELAY> --duration <DURATION> \
python3 -m sglang.launch_server --model meta-llama/Llama-3.1-8B ...
```
where:
- DELAY: how many seconds to delay nsys from collecting profiles, needed so
that profiles aren't captured till sglang server has come up and load
generation starts.
- DURATION: how many seconds for nsys profile to run before generating the
profile. This should be > the duration of the run.
2. After the server starts, run the client load generation command. Once the
test completes, after DURATION amount of time, nsys profile will generate an
nsys_res.nsys-rep file and shut down the server.
3. Run step #1 again, this time starting up the server without collecting the
profile.
4. Run step #2 again, and record the total time to complete the test in
seconds. This value will be used by the script to calculate the
CPU(non-GPU) seconds for the analysis.
5. Say the run elapsed time from step #4 is 132 seconds. Run script to
analyze:
```bash
python3 gputrc2graph.py \
--in_file run1.nsys-rep,sglang,llama,132
```
The command will produce 2 files for analysis:
- result.html: this categorizes kernel names into different categories in a
stacked bar chart.
- result.csv: shows how the kernel names are mapped to the different
categories.
### HTML visualization with result.html
The html file shows the number of elapsed seconds due to different GPU
Substages or categories, which consist of attention kernels as the biggest
category, at 63 seconds, followed by "gemm" kernels. This lets the user
prioritize the kernels to focus on for performance optimizations.
There's also an appended data table underneath the bar chart for copying out to
other post-processing tools.
### Kernel to category mapping with result.csv
Suppose the user would like to focus on improving triton kernels. It's not the
biggest consumer of cycles at .01 sec but perhaps it hasn't been optimized.
The next step is to use the result.csv to dive into what the kernels are which
compose the triton kernel GPU cycles.
## Example 2: analyze multiple profiles
Suppose the user has multiple nsys trace files, captured for different models,
say llama and gpt-oss in this case, and wish to compare their GPU/non-GPU
time, something like the following command can be used.
```bash
python3 gputrc2graph.py \
--in_file run1.nsys-rep,sglang,llama,100 run2.nsys-rep,sglang,gpt-oss,102 \
--out_dir results
```
The analysis process is similar to example 1 but now there will be multiple
stack bar charts that can be compared. The categories for the different
kernels will remain the same, so that it's easy to compare the GPU cycles for
the same categories.
Once a category is shown to have more cycles for one configuration than
another, the next step would be to use the csv file to see what kernels are
mapped into that category, and which kernels are taking the largest amount of
time which would cause a difference for the overall category.
## Example 3: add new classification for a new model
To create a new engine DEF with model ABC, just add another json file in the same directory as
gputrc2graph.py with the same format as the other json files. The script will automatically pick up all the json files in the same directory as engine/model specifications.
Then, for this new model, suppose there are 4 kernels to be classified into
"gemm" and "attn", where the gemm kernels have names with "*H*" or "*I*" in
them, and attn kernels have names with "*J*" or "*K*" in them, just add another
.json file in the same directory as gputrc2graph.py with the same format as
the other json files, like the following:
```json
{
"DEF": {
"ABC": {
"H|I": "gemm",
"J|K": "attn",
"CUDA mem": "non-gpu-H_D_memops",
".*": "misc"
}
}
}
```
Each entry in the dictionary consists of:
- key: a regex used to classify the kernels
- value: the category to classify the kernels into.
The last 2 entries are common for all engine/models, consisting of CUDA memory
operations and a 'misc' for anything that's leftover and can't be classified.
When invoking gputrc2graph.py, specify a trace file with this new model/engine
like the following:
```bash
--in_file new.nsys-rep,DEF,ABC,<runtime>
```
If the engine_DEF.json file already exists, just add the model as a new node in
the existing engine file, after the other models.

View File

@@ -0,0 +1,344 @@
"""
This generates gpu kernel analysis output from nsys rep. Will call nsys
stats -r cuda_gpu_kern_trace, get non-overlapped gpu cycles, then generate
csv and html output for analysis
"""
import argparse
import logging
import os
import regex as re
logger = logging.getLogger(__name__)
# helper data class for annotating kernels
def load_engine_model():
"""returns engine_model built from all json files in the current dir"""
import glob
import json
engine_model = {}
json_files = glob.glob(os.path.join(os.path.dirname(__file__) or ".", "*.json"))
for fname in json_files:
with open(fname, encoding="utf-8") as f:
engine_model.update(json.load(f))
return engine_model
class GPUTrace2Graph:
"""
Parses output of nsys report, generates csv and bar chart output
"""
def __init__(self):
import pandas as pd # avoid importing till needed
self.pd = pd
self.pd.options.mode.copy_on_write = True
# helper functions for generating trace->summary csvs
def gen_nonoverlapped_sum_from_gputrace(self, in_file, out_file):
logger.info("loading %s", in_file)
df = self.pd.read_csv(
in_file, usecols=["Start (ns)", "Duration (ns)", "Device", "Strm", "Name"]
)
df["End (ns)"] = df["Start (ns)"] + df["Duration (ns)"]
df = self.sum_non_overlapping_intervals(df)
# get ready to print table with elapsed times per kernel
df["Instances"] = 1
df_sum = df.groupby("Name", as_index=False).agg(
{"Elapsed Time (ns)": "sum", "Duration (ns)": "sum", "Instances": "size"}
)
# generate csv
df_sum["Total Time (sec)"] = df_sum["Duration (ns)"] / 1e9
df_sum["Elapsed Time (sec)"] = df_sum["Elapsed Time (ns)"] / 1e9
df_sum = df_sum.sort_values(by="Elapsed Time (sec)", ascending=False)
df_sum[["Elapsed Time (sec)", "Total Time (sec)", "Instances", "Name"]].to_csv(
out_file, index=False
)
def sum_non_overlapping_intervals(self, df):
"""
returns new sorted df with Elapsed Time (ns) column using
vectorized operations
"""
logger.info("sorting %s trace records by start time", str(df.shape))
# Sort by start time and reset index
df = df.sort_values(by="Start (ns)").reset_index(drop=True)
# Initialize elapsed time as duration
df["Elapsed Time (ns)"] = df["Duration (ns)"]
# Get numpy arrays for faster operations
starts = df["Start (ns)"].values
ends = df["End (ns)"].values
# Keep track of current interval end
current_end = ends[0]
display_units = max(1, int(len(df) / 100))
# Update current_end for overlapping intervals
for i in range(1, len(df)):
if i % display_units == 0:
print(f"processing trace: {int(i/len(df) * 100)} %", end="\r")
if starts[i] <= current_end:
if ends[i] > current_end:
# Partial overlap
df.iloc[i, df.columns.get_loc("Elapsed Time (ns)")] = (
ends[i] - current_end
)
current_end = ends[i]
else:
# Complete overlap
df.iloc[i, df.columns.get_loc("Elapsed Time (ns)")] = 0
else:
# No overlap
current_end = ends[i]
return df
# functions for generating html files
def make_html(self, df, output_dir, title):
"""make html graph from df"""
import plotly.express as px
if df.empty:
return
output_name = os.path.join(output_dir, "result")
if not title:
title = "Model_Engine"
x = "Model_Engine"
y = "Elapsed Time (sec)"
color = "Category"
""" generate kernel mapping table """
# Sort Model_Engine categories by last field after underscore
df["Model_Engine"] = self.pd.Categorical(
df["Model_Engine"],
sorted(df["Model_Engine"].unique(), key=lambda x: x.split("_")[-1]),
)
df[["Model_Engine", color, "Instances", "Name", y]].sort_values(
by=color
).to_csv(f"{output_name}.csv", index=False)
graph = px.histogram(
df.round(2),
x=x,
y=y,
title=(f"{y} for {title}"),
color=color,
text_auto=True,
)
# wrap x axis labels
graph.update_xaxes(automargin=True)
graph.write_html(f"{output_name}.html")
"""
Generate data table with columns per Model_Engine into result.html
"""
pivot_df = df.pivot_table(
values="Elapsed Time (sec)",
index="Category",
columns="Model_Engine",
aggfunc="sum",
observed=False,
).round(2)
# Add sum row at bottom
pivot_df.loc["total_elapsed_sec"] = pivot_df.sum()
pivot_df.fillna("").to_html("temp.html")
with (
open(f"{output_name}.html", "a", encoding="utf-8") as outfile,
open("temp.html", encoding="utf-8") as infile,
):
outfile.write(infile.read())
os.remove("temp.html")
print(
f"Finished generating: \n"
f" {output_name}.html for stack bar chart \n"
f" {output_name}.csv for Kernel-Category mapping"
)
def anno_gpu_kernname(self, df, mapping):
"""add "Category" column"""
def anno_gpu_kernname_helper(name):
for kern_name, val in mapping.items():
if re.search(kern_name, name):
return val
df["Category"] = df["Name"].apply(anno_gpu_kernname_helper)
def make_nongpu_row(self, df, nongpu_sec):
"""this will append non-gpu time entry at end of df"""
nongpu_row = self.pd.DataFrame([df.iloc[-1]])
nongpu_row["Category"] = nongpu_row["Name"] = "CPU(non-GPU)"
nongpu_row["Instances"] = 1
nongpu_row["Elapsed Time (sec)"] = nongpu_sec
return nongpu_row
def is_valid_file(self, base_file):
"""asserts if base_file is non-existent or is empty"""
assert (
os.path.isfile(base_file) and os.path.getsize(base_file) > 0
), f"{base_file} doesn't exist or is empty"
def should_gen_file(self, new_file, base_file):
"""figure out if new file should be generated from base_file"""
self.is_valid_file(base_file)
if (
os.path.exists(new_file)
and (os.path.getmtime(new_file) > os.path.getmtime(base_file))
and (os.path.getsize(base_file) > 0)
):
logger.info("reusing %s", new_file)
return False
else:
logger.info("generating %s", new_file)
return True
def gen_sum_file(self, file, nsys_cmd):
"""
generates sum file from nsys trace with times per kernel and
returns the name of the sum file
"""
import subprocess
file_dir = os.path.dirname(file)
file_name = os.path.basename(file)
if not file_dir:
file_dir = "."
# Walk through trace and get the total non-overlapped time
nsys_stats_file = os.path.join(file_dir, f"{file_name}_cuda_gpu_trace.csv")
sum_file = os.path.join(file_dir, f"{file_name}_cuda_gpu_kernel_tracesum.csv")
if self.should_gen_file(nsys_stats_file, file):
cmd = [
nsys_cmd,
"stats",
"-r",
"cuda_gpu_trace",
file,
"-o",
f"{file_dir}/{file_name}",
]
cmd_str = " ".join(cmd)
logger.info("+ %s", cmd_str)
# estimate time based on calibrated 240M/min
file_size_mb = os.path.getsize(file) / 1e6
logger.info(
"nsys stats for %.2f MB file expected to take %.2f min",
file_size_mb,
file_size_mb / 240,
)
try:
subprocess.run(cmd, check=True)
except (FileNotFoundError, subprocess.CalledProcessError) as e:
logger.error(
"'%s' failed: %s. Use --nsys_cmd to specify nsys path", cmd_str, e
)
exit(1)
logger.info("generating non-overalapped sum %s", sum_file)
self.gen_nonoverlapped_sum_from_gputrace(nsys_stats_file, sum_file)
self.is_valid_file(sum_file)
logger.info("Finished generating %s", sum_file)
return sum_file
def gen_graph(self, in_file, out_dir, title, nsys_cmd, engine_model):
"""generates graph and csv file from in_file into out_dir"""
# Initialize an empty DataFrame to store combined data
combined_df = self.pd.DataFrame()
for idx, (file, engine, model, total_sec) in enumerate(in_file):
file_dir = os.path.dirname(file)
file_name = os.path.basename(file)
if not file_dir:
file_dir = "."
sum_file = self.gen_sum_file(file, nsys_cmd)
# read kernel summary file
df = self.pd.read_csv(sum_file)
# annotate kernel to their categories
assert engine_model.get(engine), f"engine {engine} unknown"
assert engine_model[engine].get(model), f"model {model} unknown"
# remove nsys-rep from file_name for shorter x-label
file_name = file_name.replace(".nsys-rep", "")
df["Model_Engine"] = f"{model}_{engine}_{file_name}_{idx}"
self.anno_gpu_kernname(df, engine_model[engine][model])
# patch in non-gpu time
gpu_sec = round(df["Elapsed Time (sec)"].sum(), 1)
total_sec = round(float(total_sec), 1)
if total_sec < gpu_sec:
logger.warning(
"Elapsed sec %.2f < GPU sec %.2f resetting Elapsed sec ",
total_sec,
gpu_sec,
)
total_sec = gpu_sec
nongpu_row = self.make_nongpu_row(df, total_sec - gpu_sec)
df = self.pd.concat([df, nongpu_row], ignore_index=True)
combined_df = self.pd.concat([combined_df, df], ignore_index=True)
if out_dir is None:
out_dir = "."
else:
os.makedirs(out_dir, exist_ok=True)
# generate html file
self.make_html(combined_df, out_dir, title)
def parse_tuple(s):
return tuple(s.split(","))
def main():
logging.basicConfig(
format=("%(asctime)s - %(levelname)s - %(message)s"), level=logging.INFO
)
parser = argparse.ArgumentParser(
description=(
"Process nsys rep and generate kernel non-overlapped cycles. \n"
"Example:\n"
"gputrc2graph.py --in_file d1.nsys-rep,sglang,llama,100 \n"
"d2.nsys-rep,sglang,gpt-oss,102 "
'--out_dir results/ --title "Model=gpt-oss SGLANG chart"'
),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# load supported engine_model
engine_model_supported = load_engine_model()
# Get a string representation of supported engine/model combinations
engine_model_supported_str = ", ".join(
f"{engine}:[{', '.join(models.keys())}]"
for engine, models in engine_model_supported.items()
)
parser.add_argument(
"--in_file",
type=parse_tuple,
nargs="+",
help=(
"list of (nsys-rep, engine, model, elapsed_nonprofiled_sec) "
"separated by space. Elapsed_nonprofiled_sec is runtime without "
"profiling used to calculate non-gpu time. Specify 0 to use "
"elapsed time from nsys-rep but that might inflate non-gpu time. "
f"Available engine:[model] are: {engine_model_supported_str} "
f"Example: --infile d1.nsys-rep,sglan,llama,100 "
"d2.nsys-rep,sglang,gpt-oss,102"
),
required=True,
)
parser.add_argument("--out_dir", help=("output dir for result.csv/html"))
parser.add_argument("--title", help=("title for html chart"))
parser.add_argument(
"--nsys_cmd",
help=("nsys cmd, e.g. /usr/bin/nsys, Default: nsys"),
default="nsys",
)
args = parser.parse_args()
gputrace = GPUTrace2Graph()
gputrace.gen_graph(
args.in_file, args.out_dir, args.title, args.nsys_cmd, engine_model_supported
)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
{
"sglang": {
"llama": {
"gemm|nvjet": "gemm",
"fused_moe_kernel|GroupProblemShape|group_gemm_starts|bmm_|GemmUniversal": "moe_gemm",
"moe|sigmoid": "moe",
"CatArrayBatched|prepare_inputs": "prepare_next",
"ncclDevKernel|cross_device_reduce": "nccl_and_custom_ar",
"_norm_|Norm": "norm",
"topk": "topk",
"act_and_mul_": "activation",
"Rotary": "rope",
"SoftMax": "softmax",
"flash|fmha": "attn",
"elementwise": "elementwise",
"fp8_quant|cvt_|quantize": "quantize",
"reduce_kernel": "reduce",
"triton": "triton_kernel",
"CUDA mem": "non-gpu-H_D_memops",
".*": "misc"
},
"ds": {
"block_fp8_matmul": "block_fp8_gemm",
"gemm|matmul|nvjet": "gemm",
"fused_moe_kernel": "moe_gemm",
"moe|expert|sigmoid": "moe",
"CatArrayBatched|write_req_to": "prepare_next",
"ncclDevKernel|cross_device_reduce|all_gather": "nccl_and_custom_ar",
"Norm": "norm",
"topk": "topk",
"activation|act_and_mul": "activation",
"compute_position_kernel": "rope",
"elementwise": "elementwise",
"fp8_quant|quant_fp8|quantize": "quantize",
"SoftMax": "softmax",
"reduce": "reduce",
"_fwd_|create_flash|::mla::|KVCache": "attn",
"CUDA mem": "non-gpu-H_D_memops",
".*": "misc"
},
"gpt-oss": {
"gemm|nvjet": "gemm",
"fused_moe_kernel|_group_gemm|GroupProblemShape|GemmUniversal|bmm_|matmul_ogs_|_topk_forward|_combined_routing|_sum_bitmatrix_rows|_compute_writeback_idx": "moe_gemm",
"moe|sigmoid": "moe",
"CatArrayBatched|prepare_inputs": "prepare_next",
"_norm_|Norm": "norm",
"ncclDevKernel|cross_device_reduce|allreduce": "nccl_and_custom_ar",
"topk|TopK": "topk",
"act_and_mul_": "activation",
"Rotary": "rope",
"SoftMax": "softmax",
"flash|fmha": "attn",
"elementwise": "elementwise",
"fp8_quant|cvt_|quantize": "quantize",
"reduce_kernel": "reduce",
"triton": "triton_kernel",
"CUDA mem": "non-gpu-H_D_memops",
".*": "misc"
}
}
}

View File

@@ -0,0 +1,45 @@
# Runtime examples
The below examples will mostly need you to start a server in a separate terminal before you can execute them. Please see in the code for detailed instruction.
## Native API
* `lora.py`: An example how to use LoRA adapters.
* `multimodal_embedding.py`: An example how perform [multi modal embedding](Alibaba-NLP/gme-Qwen2-VL-2B-Instruct).
* `openai_batch_chat.py`: An example how to process batch requests for chat completions.
* `openai_batch_complete.py`: An example how to process batch requests for text completions.
* **`openai_chat_with_response_prefill.py`**:
An example that demonstrates how to [prefill a response](https://eugeneyan.com/writing/prompting/#prefill-claudes-responses) using the OpenAI API by enabling the `continue_final_message` parameter.
When enabled, the final (partial) assistant message is removed and its content is used as a prefill so that the model continues that message rather than starting a new turn. See [Anthropic's prefill example](https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/prefill-claudes-response#example-structured-data-extraction-with-prefilling) for more context.
* `reward_model.py`: An example how to extract scores from a reward model.
* `vertex_predict.py`: An example how to deploy a model to [Vertex AI](https://cloud.google.com/vertex-ai?hl=en).
## Engine
The `engine` folder contains that examples that show how to use [Offline Engine API](https://docs.sglang.ai/backend/offline_engine_api.html#Offline-Engine-API) for common workflows.
* `custom_server.py`: An example how to deploy a custom server.
* `embedding.py`: An example how to extract embeddings.
* `launch_engine.py`: An example how to launch the Engine.
* `offline_batch_inference_eagle.py`: An example how to perform speculative decoding using [EAGLE](https://docs.sglang.ai/backend/speculative_decoding.html).
* `offline_batch_inference_torchrun.py`: An example how to perform inference using [torchrun](https://pytorch.org/docs/stable/elastic/run.html).
* `offline_batch_inference_vlm.py`: An example how to use VLMs with the engine.
* `offline_batch_inference.py`: An example how to use the engine to perform inference on a batch of examples.
## Hidden States
The `hidden_states` folder contains examples on how to extract hidden states using SGLang. Please note that this might degrade throughput due to cuda graph rebuilding.
* `hidden_states_engine.py`: An example how to extract hidden states using the Engine API.
* `hidden_states_server.py`: An example how to extract hidden states using the Server API.
## Multimodal
SGLang supports multimodal inputs for various model architectures. The `multimodal` folder contains examples showing how to use urls, files or encoded data to make requests to multimodal models. Examples include querying the [Llava-OneVision](multimodal/llava_onevision_server.py) model (image, multi-image, video), Llava-backed [Qwen-Llava](multimodal/qwen_llava_server.py) and [Llama3-Llava](multimodal/llama3_llava_server.py) models (image, multi-image), and Mistral AI's [Pixtral](multimodal/pixtral_server.py) (image, multi-image).
## Token In, Token Out
The folder `token_in_token_out` shows how to perform inference, where we provide tokens and get tokens as response.
* `token_in_token_out_{llm|vlm}_{engine|server}.py`: Shows how to perform token in, token out workflow for llm/vlm using either the engine or native API.

View File

@@ -0,0 +1,53 @@
from sanic import Sanic, text
from sanic.response import json
import sglang as sgl
engine = None
# Create an instance of the Sanic app
app = Sanic("sanic-server")
# Define an asynchronous route handler
@app.route("/generate", methods=["POST"])
async def generate(request):
prompt = request.json.get("prompt")
if not prompt:
return json({"error": "Prompt is required"}, status=400)
# async_generate returns a dict
result = await engine.async_generate(prompt)
return text(result["text"])
@app.route("/generate_stream", methods=["POST"])
async def generate_stream(request):
prompt = request.json.get("prompt")
if not prompt:
return json({"error": "Prompt is required"}, status=400)
# async_generate returns a dict
result = await engine.async_generate(prompt, stream=True)
# https://sanic.dev/en/guide/advanced/streaming.md#streaming
# init the response
response = await request.respond()
# result is an async generator
async for chunk in result:
await response.send(chunk["text"])
await response.eof()
def run_server():
global engine
engine = sgl.Engine(model_path="meta-llama/Meta-Llama-3.1-8B-Instruct")
app.run(host="0.0.0.0", port=8000, single_process=True)
if __name__ == "__main__":
run_server()

View File

@@ -0,0 +1,27 @@
import sglang as sgl
def main():
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create an LLM.
llm = sgl.Engine(
model_path="Alibaba-NLP/gte-Qwen2-1.5B-instruct", is_embedding=True
)
outputs = llm.encode(prompts)
# Print the outputs (embedding vectors)
for prompt, output in zip(prompts, outputs):
print("===============================")
print(f"Prompt: {prompt}\nEmbedding vector: {output['embedding']}")
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,189 @@
"""
FastAPI server example for text generation using SGLang Engine and demonstrating client usage.
Starts the server, sends requests to it, and prints responses.
Usage:
python fastapi_engine_inference.py --model-path Qwen/Qwen2.5-0.5B-Instruct --tp_size 1 --host 127.0.0.1 --port 8000
"""
import os
import subprocess
import time
from contextlib import asynccontextmanager
import requests
from fastapi import FastAPI, Request
import sglang as sgl
from sglang.utils import terminate_process
engine = None
# Use FastAPI's lifespan manager to initialize/shutdown the engine
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Manages SGLang engine initialization during server startup."""
global engine
# Initialize the SGLang engine when the server starts
# Adjust model_path and other engine arguments as needed
print("Loading SGLang engine...")
engine = sgl.Engine(
model_path=os.getenv("MODEL_PATH"), tp_size=int(os.getenv("TP_SIZE"))
)
print("SGLang engine loaded.")
yield
# Clean up engine resources when the server stops (optional, depends on engine needs)
print("Shutting down SGLang engine...")
# engine.shutdown() # Or other cleanup if available/necessary
print("SGLang engine shutdown.")
app = FastAPI(lifespan=lifespan)
@app.post("/generate")
async def generate_text(request: Request):
"""FastAPI endpoint to handle text generation requests."""
global engine
if not engine:
return {"error": "Engine not initialized"}, 503
try:
data = await request.json()
prompt = data.get("prompt")
max_new_tokens = data.get("max_new_tokens", 128)
temperature = data.get("temperature", 0.7)
if not prompt:
return {"error": "Prompt is required"}, 400
# Use async_generate for non-blocking generation
state = await engine.async_generate(
prompt,
sampling_params={
"max_new_tokens": max_new_tokens,
"temperature": temperature,
},
# Add other parameters like stop, top_p etc. as needed
)
return {"generated_text": state["text"]}
except Exception as e:
return {"error": str(e)}, 500
# Helper function to start the server
def start_server(args, timeout=60):
"""Starts the Uvicorn server as a subprocess and waits for it to be ready."""
base_url = f"http://{args.host}:{args.port}"
command = [
"python",
"-m",
"uvicorn",
"fastapi_engine_inference:app",
f"--host={args.host}",
f"--port={args.port}",
]
process = subprocess.Popen(command, stdout=None, stderr=None)
start_time = time.perf_counter()
with requests.Session() as session:
while time.perf_counter() - start_time < timeout:
try:
# Check the /docs endpoint which FastAPI provides by default
response = session.get(
f"{base_url}/docs", timeout=5
) # Add a request timeout
if response.status_code == 200:
print(f"Server {base_url} is ready (responded on /docs)")
return process
except requests.ConnectionError:
# Specific exception for connection refused/DNS error etc.
pass
except requests.Timeout:
# Specific exception for request timeout
print(f"Health check to {base_url}/docs timed out, retrying...")
pass
except requests.RequestException as e:
# Catch other request exceptions
print(f"Health check request error: {e}, retrying...")
pass
# Use a shorter sleep interval for faster startup detection
time.sleep(1)
# If loop finishes, raise the timeout error
# Attempt to terminate the failed process before raising
if process:
print(
"Server failed to start within timeout, attempting to terminate process..."
)
terminate_process(process) # Use the imported terminate_process
raise TimeoutError(
f"Server failed to start at {base_url} within the timeout period."
)
def send_requests(server_url, prompts, max_new_tokens, temperature):
"""Sends generation requests to the running server for a list of prompts."""
# Iterate through prompts and send requests
for i, prompt in enumerate(prompts):
print(f"\n[{i+1}/{len(prompts)}] Sending prompt: '{prompt}'")
payload = {
"prompt": prompt,
"max_new_tokens": max_new_tokens,
"temperature": temperature,
}
try:
response = requests.post(f"{server_url}/generate", json=payload, timeout=60)
result = response.json()
print(f"Prompt: {prompt}\nResponse: {result['generated_text']}")
except requests.exceptions.Timeout:
print(f" Error: Request timed out for prompt '{prompt}'")
except requests.exceptions.RequestException as e:
print(f" Error sending request for prompt '{prompt}': {e}")
if __name__ == "__main__":
"""Main entry point for the script."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=8000)
parser.add_argument("--model-path", type=str, default="Qwen/Qwen2.5-0.5B-Instruct")
parser.add_argument("--tp_size", type=int, default=1)
args = parser.parse_args()
# Pass the model to the child uvicorn process via an env var
os.environ["MODEL_PATH"] = args.model_path
os.environ["TP_SIZE"] = str(args.tp_size)
# Start the server
process = start_server(args)
# Define the prompts and sampling parameters
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
max_new_tokens = 64
temperature = 0.1
# Define server url
server_url = f"http://{args.host}:{args.port}"
# Send requests to the server
send_requests(server_url, prompts, max_new_tokens, temperature)
# Terminate the server process
terminate_process(process)

View File

@@ -0,0 +1,17 @@
"""
This example demonstrates how to launch the offline engine.
"""
import sglang as sgl
def main():
llm = sgl.Engine(model_path="meta-llama/Meta-Llama-3.1-8B-Instruct")
llm.generate("What is the capital of France?")
llm.shutdown()
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,43 @@
"""
Usage:
python3 offline_batch_inference.py --model meta-llama/Llama-3.1-8B-Instruct
"""
import argparse
import dataclasses
import sglang as sgl
from sglang.srt.server_args import ServerArgs
def main(
server_args: ServerArgs,
):
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = {"temperature": 0.8, "top_p": 0.95}
# Create an LLM.
llm = sgl.Engine(**dataclasses.asdict(server_args))
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
for prompt, output in zip(prompts, outputs):
print("===============================")
print(f"Prompt: {prompt}\nGenerated text: {output['text']}")
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
parser = argparse.ArgumentParser()
ServerArgs.add_cli_args(parser)
args = parser.parse_args()
server_args = ServerArgs.from_cli_args(args)
main(server_args)

View File

@@ -0,0 +1,65 @@
"""
Usage:
python offline_batch_inference_async.py --model-path Qwen/Qwen2-VL-7B-Instruct
Note:
This demo shows the usage of async generation,
which is useful to implement an online-like generation with batched inference.
"""
import argparse
import asyncio
import dataclasses
import time
import sglang as sgl
from sglang.srt.server_args import ServerArgs
class InferenceEngine:
def __init__(self, **kwargs):
self.engine = sgl.Engine(**kwargs)
async def generate(self, prompt, sampling_params):
result = await self.engine.async_generate(prompt, sampling_params)
return result
async def run_server(server_args):
inference = InferenceEngine(**dataclasses.asdict(server_args))
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
] * 100
# Create a sampling params object.
sampling_params = {"temperature": 0.8, "top_p": 0.95}
# Run the generation tasks concurrently in async mode.
tasks = []
for prompt in prompts:
task = asyncio.create_task(inference.generate(prompt, sampling_params))
tasks.append(task)
# Get and print the result
for task in tasks:
await task
while True:
if not task.done():
time.sleep(1)
else:
result = task.result()
print(f"Generated text: {result['text']}")
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
ServerArgs.add_cli_args(parser)
args = parser.parse_args()
server_args = ServerArgs.from_cli_args(args)
asyncio.run(run_server(server_args))

View File

@@ -0,0 +1,38 @@
import sglang as sgl
def main():
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = {"temperature": 0, "max_new_tokens": 30}
# Create an LLM.
llm = sgl.Engine(
model_path="meta-llama/Llama-2-7b-chat-hf",
speculative_algorithm="EAGLE",
speculative_draft_model_path="lmsys/sglang-EAGLE-llama2-chat-7B",
speculative_num_steps=3,
speculative_eagle_topk=4,
speculative_num_draft_tokens=16,
cuda_graph_max_bs=8,
)
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
for prompt, output in zip(prompts, outputs):
print("===============================")
print(f"Prompt: {prompt}\nGenerated text: {output['text']}")
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,74 @@
"""
Usage:
python3 offline_batch_inference.py
"""
from urllib.request import urlopen
import sglang as sgl
def load_prompt() -> str:
# Test cases with various lengths can be found at:
#
# https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/64k.txt
# https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/200k.txt
# https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/600k.txt
# https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/1m.txt
with urlopen(
"https://qianwen-res.oss-cn-beijing.aliyuncs.com"
"/Qwen2.5-1M/test-data/64k.txt",
timeout=5,
) as response:
prompt = response.read().decode("utf-8")
return prompt
# Processing the prompt.
def process_requests(llm: sgl.Engine, prompts: list[str]) -> None:
# Create a sampling params object.
sampling_params = {
"temperature": 0.7,
"top_p": 0.8,
"top_k": 20,
"repetition_penalty": 1.05,
"max_new_tokens": 256,
}
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
for output in outputs:
prompt_token_ids = output["meta_info"]["prompt_tokens"]
generated_text = output["text"]
print(
f"Prompt length: {prompt_token_ids}, " f"Generated text: {generated_text!r}"
)
# Create an LLM.
def initialize_engine() -> sgl.Engine:
llm = sgl.Engine(
model_path="Qwen/Qwen2.5-7B-Instruct-1M",
context_length=1048576,
page_size=256,
attention_backend="dual_chunk_flash_attn",
tp_size=4,
disable_radix_cache=True,
enable_mixed_chunk=False,
enable_torch_compile=False,
chunked_prefill_size=131072,
mem_fraction_static=0.6,
log_level="DEBUG",
)
return llm
def main():
llm = initialize_engine()
prompt = load_prompt()
process_requests(llm, [prompt])
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,52 @@
"""
Usage:
python offline_batch_inference_vlm.py --model-path Qwen/Qwen2-VL-7B-Instruct
"""
import argparse
import dataclasses
import sglang as sgl
from sglang.srt.parser.conversation import chat_templates
from sglang.srt.server_args import ServerArgs
def main(
server_args: ServerArgs,
):
vlm = sgl.Engine(**dataclasses.asdict(server_args))
conv = chat_templates[server_args.chat_template].copy()
image_token = conv.image_token
image_url = "https://github.com/sgl-project/sglang/blob/main/test/lang/example_image.png?raw=true"
prompt = f"What's in this image?\n{image_token}"
sampling_params = {
"temperature": 0.001,
"max_new_tokens": 30,
}
output = vlm.generate(
prompt=prompt,
image_data=image_url,
sampling_params=sampling_params,
)
print("===============================")
print(f"Prompt: {prompt}")
print(f"Generated text: {output['text']}")
vlm.shutdown()
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
parser = argparse.ArgumentParser()
ServerArgs.add_cli_args(parser)
args = parser.parse_args()
server_args = ServerArgs.from_cli_args(args)
main(server_args)

View File

@@ -0,0 +1,54 @@
# SGLang Engine
SGLang provides a direct inference engine without the need for an HTTP server. There are generally these use cases:
- [Offline Batch Inference](#offline-batch-inference)
- [Embedding Generation](#embedding-generation)
- [Custom Server](#custom-server)
- [Token-In-Token-Out for RLHF](#token-in-token-out-for-rlhf)
- [Inference Using FastAPI](#inference-using-fastapi)
## Examples
### [Offline Batch Inference](./offline_batch_inference.py)
In this example, we launch an SGLang engine and feed a batch of inputs for inference. If you provide a very large batch, the engine will intelligently schedule the requests to process efficiently and prevent OOM (Out of Memory) errors.
### [Embedding Generation](./embedding.py)
In this example, we launch an SGLang engine and feed a batch of inputs for embedding generation.
### [Custom Server](./custom_server.py)
This example demonstrates how to create a custom server on top of the SGLang Engine. We use [Sanic](https://sanic.dev/en/) as an example. The server supports both non-streaming and streaming endpoints.
#### Steps
1. Install Sanic:
```bash
pip install sanic
```
2. Run the server:
```bash
python custom_server
```
3. Send requests:
```bash
curl -X POST http://localhost:8000/generate -H "Content-Type: application/json" -d '{"prompt": "The Transformer architecture is..."}'
curl -X POST http://localhost:8000/generate_stream -H "Content-Type: application/json" -d '{"prompt": "The Transformer architecture is..."}' --no-buffer
```
This will send both non-streaming and streaming requests to the server.
### [Token-In-Token-Out for RLHF](../token_in_token_out)
In this example, we launch an SGLang engine, feed tokens as input and generate tokens as output.
### [Inference Using FastAPI](fastapi_engine_inference.py)
This example demonstrates how to create a FastAPI server that uses the SGLang engine for text generation.

View File

@@ -0,0 +1,59 @@
# SPDX-License-Identifier: Apache-2.0
"""
Saves each worker's model state dict directly to a checkpoint, which enables a
fast load path for large tensor-parallel models where each worker only needs to
read its own shard rather than the entire checkpoint.
Example usage:
python save_remote_state.py \
--model-path /path/to/load \
--tensor-parallel-size 8 \
--remote-model-save-url [protocol]://[host]:[port]/[model_name] \
Then, the model can be loaded with
llm = Engine(
model_path="[protocol]://[host]:[port]/[model_name]",
tensor_parallel_size=8,
)
"""
import dataclasses
from argparse import ArgumentParser
from pathlib import Path
from sglang import Engine, ServerArgs
parser = ArgumentParser()
ServerArgs.add_cli_args(parser)
parser.add_argument(
"--remote-model-save-url",
required=True,
type=str,
help="remote address to store model weights",
)
parser.add_argument(
"--remote-draft-model-save-url",
default=None,
type=str,
help="remote address to store draft model weights",
)
def main(args):
engine_args = ServerArgs.from_cli_args(args)
model_path = engine_args.model_path
if not Path(model_path).is_dir():
raise ValueError("model path must be a local directory")
# Create LLM instance from arguments
llm = Engine(**dataclasses.asdict(engine_args))
llm.save_remote_model(
url=args.remote_model_save_url, draft_url=args.remote_draft_model_save_url
)
print("save remote (draft) model successfully")
if __name__ == "__main__":
args = parser.parse_args()
main(args)

View File

@@ -0,0 +1,74 @@
# SPDX-License-Identifier: Apache-2.0
"""
Saves each worker's model state dict directly to a checkpoint, which enables a
fast load path for large tensor-parallel models where each worker only needs to
read its own shard rather than the entire checkpoint.
Example usage:
python save_sharded_state.py \
--model-path /path/to/load \
--quantization deepspeedfp \
--tensor-parallel-size 8 \
--output /path/to/save
Then, the model can be loaded with
llm = Engine(
model_path="/path/to/save",
load_format="sharded_state",
quantization="deepspeedfp",
tensor_parallel_size=8,
)
"""
import dataclasses
import os
import shutil
from argparse import ArgumentParser
from pathlib import Path
from sglang import Engine, ServerArgs
parser = ArgumentParser()
ServerArgs.add_cli_args(parser)
parser.add_argument(
"--output", "-o", required=True, type=str, help="path to output checkpoint"
)
parser.add_argument(
"--file-pattern", type=str, help="string pattern of saved filenames"
)
parser.add_argument(
"--max-file-size",
type=str,
default=5 * 1024**3,
help="max size (in bytes) of each safetensors file",
)
def main(args):
engine_args = ServerArgs.from_cli_args(args)
model_path = engine_args.model_path
if not Path(model_path).is_dir():
raise ValueError("model path must be a local directory")
# Create LLM instance from arguments
llm = Engine(**dataclasses.asdict(engine_args))
Path(args.output).mkdir(exist_ok=True)
llm.save_sharded_model(
path=args.output, pattern=args.file_pattern, max_size=args.max_file_size
)
# Copy metadata files to output directory
for file in os.listdir(model_path):
if os.path.splitext(file)[1] not in (".bin", ".pt", ".safetensors"):
if os.path.isdir(os.path.join(model_path, file)):
shutil.copytree(
os.path.join(model_path, file), os.path.join(args.output, file)
)
else:
shutil.copy(os.path.join(model_path, file), args.output)
if __name__ == "__main__":
args = parser.parse_args()
main(args)

View File

@@ -0,0 +1,66 @@
"""
Usage:
python hidden_states.py
Note that each time you change the `return_hidden_states` parameter,
the cuda graph will be recaptured, which might lead to a performance hit.
So avoid getting hidden states and completions alternately.
"""
import torch
import sglang as sgl
def main():
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create an LLM.
llm = sgl.Engine(
model_path="Alibaba-NLP/gte-Qwen2-1.5B-instruct",
enable_return_hidden_states=True,
)
sampling_params = {
"temperature": 0.8,
"top_p": 0.95,
"max_new_tokens": 10,
}
outputs = llm.generate(
prompts, sampling_params=sampling_params, return_hidden_states=True
)
llm.shutdown()
for prompt, output in zip(prompts, outputs):
for i in range(len(output["meta_info"]["hidden_states"])):
output["meta_info"]["hidden_states"][i] = torch.tensor(
output["meta_info"]["hidden_states"][i], dtype=torch.bfloat16
)
print("===============================")
print(
f"Prompt: {prompt}\n"
f"Generated text: {output['text']}\n"
f"Prompt_Tokens: {output['meta_info']['prompt_tokens']}\t"
f"Completion_tokens: {output['meta_info']['completion_tokens']}"
)
print("Hidden states: ")
hidden_states = torch.cat(
[
i.unsqueeze(0) if len(i.shape) == 1 else i
for i in output["meta_info"]["hidden_states"]
]
)
print(hidden_states)
print()
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,81 @@
"""
Usage:
python hidden_states_server.py
Note that each time you change the `return_hidden_states` parameter,
the cuda graph will be recaptured, which might lead to a performance hit.
So avoid getting hidden states and completions alternately.
"""
import requests
import torch
from sglang.test.test_utils import is_in_ci
from sglang.utils import terminate_process, wait_for_server
if is_in_ci():
from docs.backend.patch import launch_server_cmd
else:
from sglang.utils import launch_server_cmd
def main():
# Launch the server
server_process, port = launch_server_cmd(
"python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-1.5B-instruct --enable-return-hidden-states --host 0.0.0.0"
)
wait_for_server(f"http://localhost:{port}")
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
sampling_params = {
"temperature": 0.8,
"top_p": 0.95,
"max_new_tokens": 10,
}
json_data = {
"text": prompts,
"sampling_params": sampling_params,
"return_hidden_states": True,
}
response = requests.post(
f"http://localhost:{port}/generate",
json=json_data,
)
terminate_process(server_process)
outputs = response.json()
for prompt, output in zip(prompts, outputs):
for i in range(len(output["meta_info"]["hidden_states"])):
output["meta_info"]["hidden_states"][i] = torch.tensor(
output["meta_info"]["hidden_states"][i], dtype=torch.bfloat16
)
print("===============================")
print(
f"Prompt: {prompt}\n"
f"Generated text: {output['text']}\n"
f"Prompt_Tokens: {output['meta_info']['prompt_tokens']}\t"
f"Completion_tokens: {output['meta_info']['completion_tokens']}"
)
print("Hidden states: ")
hidden_states = torch.cat(
[
i.unsqueeze(0) if len(i.shape) == 1 else i
for i in output["meta_info"]["hidden_states"]
]
)
print(hidden_states)
print()
if __name__ == "__main__":
main()

37
examples/runtime/lora.py Normal file
View File

@@ -0,0 +1,37 @@
# launch server
# python -m sglang.launch_server --model mistralai/Mistral-7B-Instruct-v0.3 --lora-paths /home/ying/test_lora lora1=/home/ying/test_lora_1 lora2=/home/ying/test_lora_2 --disable-radix --disable-cuda-graph --max-loras-per-batch 4
# send requests
# lora_path[i] specifies the LoRA used for text[i], so make sure they have the same length
# use None to specify base-only prompt, e.x. "lora_path": [None, "/home/ying/test_lora"]
import json
import requests
url = "http://127.0.0.1:30000"
json_data = {
"text": [
"prompt 1",
"prompt 2",
"prompt 3",
"prompt 4",
"prompt 5",
"prompt 6",
"prompt 7",
],
"sampling_params": {"max_new_tokens": 32},
"lora_path": [
"/home/ying/test_lora",
"lora1",
"lora2",
"lora1",
"lora2",
None,
None,
],
}
response = requests.post(
url + "/generate",
json=json_data,
)
print(json.dumps(response.json()))

View File

@@ -0,0 +1,111 @@
"""
Usage:
# Installing latest llava-next: pip install git+https://github.com/LLaVA-VL/LLaVA-NeXT.git
# Installing latest sglang.
# Endpoint Service CLI:
python -m sglang.launch_server --model-path lmms-lab/llama3-llava-next-8b --port=30000
python3 llama3_llava_server.py
Output:
"Friends posing for a fun photo with a life-sized teddy bear, creating a playful and memorable moment."
"""
import argparse
import asyncio
import copy
import json
import aiohttp
import requests
from llava.conversation import conv_llava_llama_3
async def send_request(url, data, delay=0):
await asyncio.sleep(delay)
async with aiohttp.ClientSession() as session:
async with session.post(url, json=data) as resp:
output = await resp.json()
return output
async def test_concurrent(args):
url = f"{args.host}:{args.port}"
prompt = "<image>\nPlease generate caption towards this image."
conv_template = copy.deepcopy(conv_llava_llama_3)
conv_template.append_message(role=conv_template.roles[0], message=prompt)
conv_template.append_message(role=conv_template.roles[1], message=None)
prompt_with_template = conv_template.get_prompt()
response = []
for i in range(1):
response.append(
send_request(
url + "/generate",
{
"text": prompt_with_template,
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
"sampling_params": {
"max_new_tokens": 1024,
"temperature": 0,
"top_p": 1.0,
"presence_penalty": 2,
"frequency_penalty": 2,
"stop": "<|eot_id|>",
},
},
)
)
rets = await asyncio.gather(*response)
for ret in rets:
print(ret["text"])
def test_streaming(args):
url = f"{args.host}:{args.port}"
prompt = "<image>\nPlease generate caption towards this image."
conv_template = copy.deepcopy(conv_llava_llama_3)
conv_template.append_message(role=conv_template.roles[0], message=prompt)
conv_template.append_message(role=conv_template.roles[1], message=None)
prompt_with_template = conv_template.get_prompt()
pload = {
"text": prompt_with_template,
"sampling_params": {
"max_new_tokens": 1024,
"temperature": 0,
"top_p": 1.0,
"presence_penalty": 2,
"frequency_penalty": 2,
"stop": "<|eot_id|>",
},
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
"stream": True,
}
response = requests.post(
url + "/generate",
json=pload,
stream=True,
)
prev = 0
for chunk in response.iter_lines(decode_unicode=False):
chunk = chunk.decode("utf-8")
if chunk and chunk.startswith("data:"):
if chunk == "data: [DONE]":
break
data = json.loads(chunk[5:].strip("\n"))
output = data["text"].strip()
print(output[prev:], end="", flush=True)
prev = len(output)
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="http://127.0.0.1")
parser.add_argument("--port", type=int, default=30000)
args = parser.parse_args()
asyncio.run(test_concurrent(args))
test_streaming(args)

View File

@@ -0,0 +1,264 @@
"""
Usage:
python3 -m sglang.launch_server --model-path lmms-lab/llava-onevision-qwen2-72b-ov --port=30000 --tp-size=8
python3 llava_onevision_server.py
"""
import base64
import io
import os
import sys
import time
import numpy as np
import openai
import requests
from decord import VideoReader, cpu
from PIL import Image
# pip install httpx==0.23.3
# pip install decord
# pip install protobuf==3.20.0
def download_video(url, cache_dir):
file_path = os.path.join(cache_dir, "jobs.mp4")
os.makedirs(cache_dir, exist_ok=True)
response = requests.get(url)
response.raise_for_status()
with open(file_path, "wb") as f:
f.write(response.content)
print(f"File downloaded and saved to: {file_path}")
return file_path
def create_openai_client(base_url):
return openai.Client(api_key="EMPTY", base_url=base_url)
def image_stream_request_test(client):
print("----------------------Image Stream Request Test----------------------")
stream_request = client.chat.completions.create(
model="default",
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/assets/logo.png"
},
},
{
"type": "text",
"text": "Please describe this image. Please list the benchmarks and the models.",
},
],
},
],
temperature=0.7,
max_tokens=1024,
stream=True,
)
stream_response = ""
for chunk in stream_request:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
stream_response += content
sys.stdout.write(content)
sys.stdout.flush()
print("-" * 30)
def multi_image_stream_request_test(client):
print(
"----------------------Multi-Images Stream Request Test----------------------"
)
stream_request = client.chat.completions.create(
model="default",
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/assets/logo.png"
},
"modalities": "multi-images",
},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/test/lang/example_image.png"
},
"modalities": "multi-images",
},
{
"type": "text",
"text": "I have shown you two images. Please describe the two images to me.",
},
],
},
],
temperature=0.7,
max_tokens=1024,
stream=True,
)
stream_response = ""
for chunk in stream_request:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
stream_response += content
sys.stdout.write(content)
sys.stdout.flush()
print("-" * 30)
def video_stream_request_test(client, video_path):
print("------------------------Video Stream Request Test----------------------")
messages = prepare_video_messages(video_path)
video_request = client.chat.completions.create(
model="default",
messages=messages,
temperature=0,
max_tokens=1024,
stream=True,
)
print("-" * 30)
video_response = ""
for chunk in video_request:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
video_response += content
sys.stdout.write(content)
sys.stdout.flush()
print("-" * 30)
def image_speed_test(client):
print("----------------------Image Speed Test----------------------")
start_time = time.perf_counter()
request = client.chat.completions.create(
model="default",
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/sgl-project/sglang/main/assets/logo.png"
},
},
{
"type": "text",
"text": "Please describe this image. Please list the benchmarks and the models.",
},
],
},
],
temperature=0,
max_tokens=1024,
)
end_time = time.perf_counter()
response = request.choices[0].message.content
print(response)
print("-" * 30)
print_speed_test_results(request, start_time, end_time)
def video_speed_test(client, video_path):
print("------------------------Video Speed Test------------------------")
messages = prepare_video_messages(video_path)
start_time = time.perf_counter()
video_request = client.chat.completions.create(
model="default",
messages=messages,
temperature=0,
max_tokens=1024,
)
end_time = time.perf_counter()
video_response = video_request.choices[0].message.content
print(video_response)
print("-" * 30)
print_speed_test_results(video_request, start_time, end_time)
def prepare_video_messages(video_path):
max_frames_num = 32
vr = VideoReader(video_path, ctx=cpu(0))
total_frame_num = len(vr)
uniform_sampled_frames = np.linspace(
0, total_frame_num - 1, max_frames_num, dtype=int
)
frame_idx = uniform_sampled_frames.tolist()
frames = vr.get_batch(frame_idx).asnumpy()
base64_frames = []
for frame in frames:
pil_img = Image.fromarray(frame)
buff = io.BytesIO()
pil_img.save(buff, format="JPEG")
base64_str = base64.b64encode(buff.getvalue()).decode("utf-8")
base64_frames.append(base64_str)
messages = [{"role": "user", "content": []}]
for base64_frame in base64_frames:
frame_format = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_frame}"},
"modalities": "video",
}
messages[0]["content"].append(frame_format)
prompt = {"type": "text", "text": "Please describe the video in detail."}
messages[0]["content"].append(prompt)
return messages
def print_speed_test_results(request, start_time, end_time):
total_tokens = request.usage.total_tokens
completion_tokens = request.usage.completion_tokens
prompt_tokens = request.usage.prompt_tokens
print(f"Total tokens: {total_tokens}")
print(f"Completion tokens: {completion_tokens}")
print(f"Prompt tokens: {prompt_tokens}")
print(f"Time taken: {end_time - start_time} seconds")
print(f"Token per second: {total_tokens / (end_time - start_time)}")
print(f"Completion token per second: {completion_tokens / (end_time - start_time)}")
print(f"Prompt token per second: {prompt_tokens / (end_time - start_time)}")
def main():
url = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"
cache_dir = os.path.expanduser("~/.cache")
video_path = download_video(url, cache_dir)
client = create_openai_client("http://127.0.0.1:30000/v1")
image_stream_request_test(client)
multi_image_stream_request_test(client)
video_stream_request_test(client, video_path)
image_speed_test(client)
video_speed_test(client, video_path)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,127 @@
"""
Usage:
# Run a Pixtral model with SGLang:
# HuggingFace:
python -m sglang.launch_server --model-path mistral-community/pixtral-12b --port=30000
# ModelScope:
python -m sglang.launch_server --model-path AI-ModelScope/pixtral-12b --port=30000
# Then test it with:
python pixtral_server.py
This script tests Pixtral model with both single and multiple images.
"""
import argparse
import asyncio
import json
import aiohttp
import requests
IMAGE_TOKEN_SEP = "\n[IMG]"
ROUTE = "/generate"
async def send_request(url, data, delay=0):
await asyncio.sleep(delay)
async with aiohttp.ClientSession() as session:
async with session.post(url, json=data) as resp:
output = await resp.json()
return output
async def test_concurrent(args):
url = f"{args.host}:{args.port}{ROUTE}"
# Single image test
if args.single_image:
prompt = f"<s>[INST]Describe this image in detail.{IMAGE_TOKEN_SEP}[/INST]"
image_url = "https://picsum.photos/id/237/400/300"
modality = ["image"]
# Multiple images test
else:
image_urls = [
"https://picsum.photos/id/237/400/300",
"https://picsum.photos/id/27/500/500",
]
prompt = f"<s>[INST]How many photos are there? Describe each in a very short sentence.{IMAGE_TOKEN_SEP * len(image_urls)}[/INST]"
image_url = image_urls
modality = ["multi-images"]
response = await send_request(
url,
{
"text": prompt,
"image_data": image_url,
"sampling_params": {
"max_new_tokens": 100,
"temperature": 0.7,
"top_p": 0.9,
},
"modalities": modality,
},
)
print(f"Response: {response}")
if "text" in response:
print("\nOutput text:", response["text"])
def test_streaming(args):
url = f"{args.host}:{args.port}/generate"
# Single image test
if args.single_image:
prompt = f"<s>[INST]Describe this image in detail.{IMAGE_TOKEN_SEP}[/INST]"
image_data = "https://picsum.photos/id/237/400/300"
modality = ["image"]
# Multiple images test
else:
image_urls = [
"https://picsum.photos/id/237/400/300",
"https://picsum.photos/id/27/500/500",
]
prompt = f"<s>[INST]How many photos are there? Describe each in a very short sentence.{IMAGE_TOKEN_SEP * len(image_urls)}[/INST]"
image_data = image_urls
modality = ["multi-images"]
pload = {
"text": prompt,
"image_data": image_data,
"sampling_params": {"max_new_tokens": 100, "temperature": 0.7, "top_p": 0.9},
"modalities": modality,
"stream": True,
}
response = requests.post(url, json=pload, stream=True)
print("Streaming response:")
prev = 0
for chunk in response.iter_lines(decode_unicode=False):
chunk = chunk.decode("utf-8")
if chunk and chunk.startswith("data:"):
if chunk == "data: [DONE]":
break
data = json.loads(chunk[5:].strip("\n"))
output = data["text"].strip()
print(output[prev:], end="", flush=True)
prev = len(output)
print("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="http://127.0.0.1")
parser.add_argument("--port", type=int, default=30000)
parser.add_argument(
"--single-image",
action="store_true",
help="Test with single image instead of multiple images",
)
parser.add_argument("--no-stream", action="store_true", help="Don't test streaming")
args = parser.parse_args()
asyncio.run(test_concurrent(args))
if not args.no_stream:
test_streaming(args)

View File

@@ -0,0 +1,111 @@
"""
Usage:
# Installing latest llava-next: pip install git+https://github.com/LLaVA-VL/LLaVA-NeXT.git
# Installing latest sglang.
# Endpoint Service CLI:
python -m sglang.launch_server --model-path lmms-lab/llava-next-72b --port=30000 --tp-size=8
python3 qwen_llava_server.py
Output:
"Two children pose with a large teddy bear, one holding a smaller stuffed bear, in a room with an American flag and potted plants."
"""
import argparse
import asyncio
import copy
import json
import aiohttp
import requests
from llava.conversation import conv_qwen
async def send_request(url, data, delay=0):
await asyncio.sleep(delay)
async with aiohttp.ClientSession() as session:
async with session.post(url, json=data) as resp:
output = await resp.json()
return output
async def test_concurrent(args):
url = f"{args.host}:{args.port}"
prompt = "<image>\nPlease generate caption towards this image."
conv_template = copy.deepcopy(conv_qwen)
conv_template.append_message(role=conv_template.roles[0], message=prompt)
conv_template.append_message(role=conv_template.roles[1], message=None)
prompt_with_template = conv_template.get_prompt()
response = []
for i in range(1):
response.append(
send_request(
url + "/generate",
{
"text": prompt_with_template,
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
"sampling_params": {
"max_new_tokens": 1024,
"temperature": 0,
"top_p": 1.0,
"presence_penalty": 2,
"frequency_penalty": 2,
"stop": "<|im_end|>",
},
},
)
)
rets = await asyncio.gather(*response)
for ret in rets:
print(ret["text"])
def test_streaming(args):
url = f"{args.host}:{args.port}"
prompt = "<image>\nPlease generate caption towards this image."
conv_template = copy.deepcopy(conv_qwen)
conv_template.append_message(role=conv_template.roles[0], message=prompt)
conv_template.append_message(role=conv_template.roles[1], message=None)
prompt_with_template = conv_template.get_prompt()
pload = {
"text": prompt_with_template,
"sampling_params": {
"max_new_tokens": 1024,
"temperature": 0,
"top_p": 1.0,
"presence_penalty": 2,
"frequency_penalty": 2,
"stop": "<|im_end|>",
},
"image_data": "https://farm4.staticflickr.com/3175/2653711032_804ff86d81_z.jpg",
"stream": True,
}
response = requests.post(
url + "/generate",
json=pload,
stream=True,
)
prev = 0
for chunk in response.iter_lines(decode_unicode=False):
chunk = chunk.decode("utf-8")
if chunk and chunk.startswith("data:"):
if chunk == "data: [DONE]":
break
data = json.loads(chunk[5:].strip("\n"))
output = data["text"].strip()
print(output[prev:], end="", flush=True)
prev = len(output)
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="http://127.0.0.1")
parser.add_argument("--port", type=int, default=30000)
args = parser.parse_args()
asyncio.run(test_concurrent(args))
test_streaming(args)

View File

@@ -0,0 +1,18 @@
# launch server
# python -m sglang.launch_server --model-path Alibaba-NLP/gme-Qwen2-VL-2B-Instruct --is-embedding
import requests
url = "http://127.0.0.1:30000"
text_input = "Represent this image in embedding space."
image_path = "https://huggingface.co/datasets/liuhaotian/llava-bench-in-the-wild/resolve/main/images/023.jpg"
payload = {
"model": "gme-qwen2-vl",
"input": [{"text": text_input}, {"image": image_path}],
}
response = requests.post(url + "/v1/embeddings", json=payload).json()
print("Embeddings:", [x.get("embedding") for x in response.get("data", [])])

View File

@@ -0,0 +1,53 @@
"""
Usage:
1) Launch the server in one terminal:
python -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --port 30000
2) Run this script in another terminal:
python openai_chat_with_response_prefill.py
This example demonstrates two chat completion calls:
- One with continue_final_message enabled (the final assistant message is used as a prefill).
- One without continue_final_message (the final assistant message remains, starting a new turn).
"""
import openai
client = openai.Client(base_url="http://127.0.0.1:30000/v1", api_key="EMPTY")
messages = [
{"role": "system", "content": "You are a helpful AI assistant."},
{
"role": "user",
"content": """
Extract the name, size, price, and color from this product description as a JSON object:
<description>
The SmartHome Mini is a compact smart home assistant available in black or white for only $49.99.
At just 5 inches wide, it lets you control lights, thermostats, and other connected devices via voice or app—
no matter where you place it in your home.
This affordable little hub brings convenient hands-free control to your smart devices.
</description>
""",
},
{"role": "assistant", "content": "{\n"},
]
# Calling the API with continue_final_message enabled.
print("=== Prefill with continue_final_messagem ===")
response_with = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
temperature=0,
extra_body={"continue_final_message": True},
)
print(response_with.choices[0].message.content)
# Calling the API without continue_final_message (using default behavior).
print("\n=== Prefill without continue_final_message ===")
response_without = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
temperature=0,
)
print(response_without.choices[0].message.content)

View File

@@ -0,0 +1,32 @@
# launch server
# python -m sglang.launch_server --model LxzGordon/URM-LLaMa-3.1-8B --is-embedding
import requests
url = "http://127.0.0.1:30000"
PROMPT = (
"What is the range of the numeric output of a sigmoid node in a neural network?"
)
RESPONSE1 = "The output of a sigmoid node is bounded between -1 and 1."
RESPONSE2 = "The output of a sigmoid node is bounded between 0 and 1."
json_data = {
"conv": [
[
{"role": "user", "content": PROMPT},
{"role": "assistant", "content": RESPONSE1},
],
[
{"role": "user", "content": PROMPT},
{"role": "assistant", "content": RESPONSE2},
],
],
}
response = requests.post(
url + "/classify",
json=json_data,
).json()
print(response)
print("scores:", [x["embedding"] for x in response])

View File

@@ -0,0 +1,43 @@
"""
This example demonstrates how to provide tokenized ids to LLM as input instead of text prompt, i.e. a token-in-token-out workflow.
"""
import sglang as sgl
from sglang.srt.hf_transformers_utils import get_tokenizer
MODEL_PATH = "meta-llama/Llama-3.1-8B-Instruct"
def main():
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = {"temperature": 0.8, "top_p": 0.95}
# Tokenize inputs
tokenizer = get_tokenizer(MODEL_PATH)
token_ids_list = [tokenizer.encode(prompt) for prompt in prompts]
# Create an LLM.
llm = sgl.Engine(model_path=MODEL_PATH, skip_tokenizer_init=True)
outputs = llm.generate(input_ids=token_ids_list, sampling_params=sampling_params)
# Print the outputs.
for prompt, output in zip(prompts, outputs):
decode_output = tokenizer.decode(output["output_ids"])
print("===============================")
print(
f"Prompt: {prompt}\nGenerated token ids: {output['output_ids']}\nGenerated text: {decode_output}"
)
print()
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,68 @@
"""
Usage:
python token_in_token_out_llm_server.py
"""
import requests
from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.test.test_utils import is_in_ci
from sglang.utils import terminate_process, wait_for_server
if is_in_ci():
from docs.backend.patch import launch_server_cmd
else:
from sglang.utils import launch_server_cmd
MODEL_PATH = "meta-llama/Llama-3.1-8B-Instruct"
def main():
# Launch the server
server_process, port = launch_server_cmd(
f"python -m sglang.launch_server --model-path {MODEL_PATH} --skip-tokenizer-init --host 0.0.0.0"
)
wait_for_server(f"http://localhost:{port}")
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = {"temperature": 0.8, "top_p": 0.95}
# Tokenize inputs
tokenizer = get_tokenizer(MODEL_PATH)
token_ids_list = [tokenizer.encode(prompt) for prompt in prompts]
json_data = {
"input_ids": token_ids_list,
"sampling_params": sampling_params,
}
response = requests.post(
f"http://localhost:{port}/generate",
json=json_data,
)
outputs = response.json()
for prompt, output in zip(prompts, outputs):
print("===============================")
decode_output = tokenizer.decode(output["output_ids"])
print(
f"Prompt: {prompt}\nGenerated token ids: {output['output_ids']}\nGenerated text: {decode_output}"
)
print()
terminate_process(server_process)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,74 @@
import argparse
import dataclasses
from typing import Tuple
from transformers import AutoProcessor
from sglang import Engine
from sglang.lang.chat_template import get_chat_template_by_model_path
from sglang.srt.configs.model_config import ModelConfig
from sglang.srt.server_args import ServerArgs
from sglang.test.test_utils import DEFAULT_IMAGE_URL
def get_input_ids(
server_args: ServerArgs, model_config: ModelConfig
) -> Tuple[list[int], list]:
chat_template = get_chat_template_by_model_path(model_config.model_path)
text = f"{chat_template.image_token}What is in this picture?"
image_data = [DEFAULT_IMAGE_URL]
processor = AutoProcessor.from_pretrained(
model_config.model_path, trust_remote_code=server_args.trust_remote_code
)
input_ids = (
processor.tokenizer(
text=[text],
return_tensors="pt",
)
.input_ids[0]
.tolist()
)
return input_ids, image_data
def token_in_out_example(
server_args: ServerArgs,
):
input_ids, image_data = get_input_ids(
server_args,
ModelConfig(
server_args.model_path,
trust_remote_code=server_args.trust_remote_code,
model_override_args=server_args.json_model_override_args,
),
)
backend = Engine(**dataclasses.asdict(server_args))
output = backend.generate(
input_ids=input_ids,
image_data=image_data,
sampling_params={
"temperature": 0.8,
"max_new_tokens": 32,
},
)
print("===============================")
print(f"Output token ids: ", output["output_ids"])
backend.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
ServerArgs.add_cli_args(parser)
args = [
"--model-path=Qwen/Qwen2-VL-2B",
]
args = parser.parse_args(args=args)
server_args = ServerArgs.from_cli_args(args)
server_args.skip_tokenizer_init = True
token_in_out_example(server_args)

View File

@@ -0,0 +1,78 @@
"""
Usage:
python token_in_token_out_vlm_server.py
"""
from typing import Tuple
import requests
from transformers import AutoProcessor
from sglang.lang.chat_template import get_chat_template_by_model_path
from sglang.test.test_utils import DEFAULT_IMAGE_URL, is_in_ci
from sglang.utils import terminate_process, wait_for_server
if is_in_ci():
from docs.backend.patch import launch_server_cmd
else:
from sglang.utils import launch_server_cmd
MODEL_PATH = "Qwen/Qwen2-VL-2B"
def get_input_ids() -> Tuple[list[int], list]:
chat_template = get_chat_template_by_model_path(MODEL_PATH)
text = f"{chat_template.image_token}What is in this picture?"
image_data = [DEFAULT_IMAGE_URL]
processor = AutoProcessor.from_pretrained(MODEL_PATH)
input_ids = (
processor.tokenizer(
text=[text],
return_tensors="pt",
)
.input_ids[0]
.tolist()
)
return input_ids, image_data
def main():
# Launch the server
server_process, port = launch_server_cmd(
f"python -m sglang.launch_server --model-path {MODEL_PATH} --skip-tokenizer-init --host 0.0.0.0"
)
wait_for_server(f"http://localhost:{port}")
input_ids, image_data = get_input_ids()
sampling_params = {
"temperature": 0.8,
"max_new_tokens": 32,
}
json_data = {
"input_ids": input_ids,
"image_data": image_data,
"sampling_params": sampling_params,
}
response = requests.post(
f"http://localhost:{port}/generate",
json=json_data,
)
output = response.json()
print("===============================")
print(f"Output token ids: ", output["output_ids"])
terminate_process(server_process)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,66 @@
"""
Usage:
python -m sglang.launch_server --model meta-llama/Llama-2-7b-hf --port 30000
python vertex_predict.py
This example shows the request and response formats of the prediction route for
Google Cloud Vertex AI Online Predictions.
Vertex AI SDK for Python is recommended for deploying models to Vertex AI
instead of a local server. After deploying the model to a Vertex AI Online
Prediction Endpoint, send requests via the Python SDK:
response = endpoint.predict(
instances=[
{"text": "The capital of France is"},
{"text": "What is a car?"},
],
parameters={"sampling_params": {"max_new_tokens": 16}},
)
print(response.predictions)
More details about get online predictions from Vertex AI can be found at
https://cloud.google.com/vertex-ai/docs/predictions/get-online-predictions.
"""
from dataclasses import dataclass
from typing import List, Optional
import requests
@dataclass
class VertexPrediction:
predictions: List
class LocalVertexEndpoint:
def __init__(self) -> None:
self.base_url = "http://127.0.0.1:30000"
def predict(self, instances: List[dict], parameters: Optional[dict] = None):
response = requests.post(
self.base_url + "/vertex_generate",
json={
"instances": instances,
"parameters": parameters,
},
)
return VertexPrediction(predictions=response.json()["predictions"])
endpoint = LocalVertexEndpoint()
# Predict with a single prompt.
response = endpoint.predict(instances=[{"text": "The capital of France is"}])
print(response.predictions)
# Predict with multiple prompts and parameters.
response = endpoint.predict(
instances=[
{"text": "The capital of France is"},
{"text": "What is a car?"},
],
parameters={"sampling_params": {"max_new_tokens": 16}},
)
print(response.predictions)