Docs: Implemented frontend docs (#3791)

Co-authored-by: Chayenne <zhaochen20@outlook.com>
This commit is contained in:
simveit
2025-02-27 00:30:05 +01:00
committed by GitHub
parent 7c1692aa90
commit acd1a15921
8 changed files with 599 additions and 328 deletions

View File

@@ -0,0 +1,38 @@
"""
Usage:
python hidden_states.py
Note that we are actively working on moving return_hidden_states to the sampling_params.
"""
import sglang as sgl
def main():
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create an LLM.
llm = sgl.Engine(
model_path="Alibaba-NLP/gte-Qwen2-1.5B-instruct",
return_hidden_states=True,
)
sampling_params = {"temperature": 0.8, "top_p": 0.95, "max_new_tokens": 10}
outputs = llm.generate(prompts, sampling_params=sampling_params)
for prompt, output in zip(prompts, outputs):
print("===============================")
print(
f"Prompt: {prompt}\nGenerated text: {output['text']}\nPrompt_Tokens: {output['meta_info']['prompt_tokens']}\tCompletion_tokens: {output['meta_info']['completion_tokens']}\nHidden states: {[i.shape for i in output['meta_info']['hidden_states']]}"
)
print()
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
# Spawn starts a fresh program every time, if there is no __main__, it will run into infinite loop to keep spawning processes from sgl.Engine
if __name__ == "__main__":
main()

View File

@@ -5,56 +5,45 @@ python offline_batch_inference_vlm.py --model-path Qwen/Qwen2-VL-7B-Instruct --c
import argparse
import dataclasses
import io
import os
from transformers import AutoProcessor
import requests
from PIL import Image
import sglang as sgl
from sglang.srt.openai_api.adapter import v1_chat_generate_request
from sglang.srt.openai_api.protocol import ChatCompletionRequest
from sglang.srt.conversation import chat_templates
from sglang.srt.server_args import ServerArgs
def main(
server_args: ServerArgs,
):
# Create an LLM.
vlm = sgl.Engine(**dataclasses.asdict(server_args))
# prepare prompts.
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Whats in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://github.com/sgl-project/sglang/blob/main/test/lang/example_image.png?raw=true",
},
},
],
}
]
chat_request = ChatCompletionRequest(
messages=messages,
model=server_args.model_path,
temperature=0.8,
top_p=0.95,
)
gen_request, _ = v1_chat_generate_request(
[chat_request],
vlm.tokenizer_manager,
)
conv = chat_templates[server_args.chat_template].copy()
image_token = conv.image_token
outputs = vlm.generate(
input_ids=gen_request.input_ids,
image_data=gen_request.image_data,
sampling_params=gen_request.sampling_params,
image_url = "https://github.com/sgl-project/sglang/blob/main/test/lang/example_image.png?raw=true"
prompt = f"What's in this image?\n{image_token}"
sampling_params = {
"temperature": 0.001,
"max_new_tokens": 30,
}
output = vlm.generate(
prompt=prompt,
image_data=image_url,
sampling_params=sampling_params,
)
print("===============================")
print(f"Prompt: {messages[0]['content'][0]['text']}")
print(f"Generated text: {outputs['text']}")
print(f"Prompt: {prompt}")
print(f"Generated text: {output['text']}")
vlm.shutdown()
# The __main__ condition is necessary here because we use "spawn" to create subprocesses
@@ -63,5 +52,6 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser()
ServerArgs.add_cli_args(parser)
args = parser.parse_args()
server_args = ServerArgs.from_cli_args(args)
main(server_args)