From 4725e3f65218b441789b685c84dda810fcb62a79 Mon Sep 17 00:00:00 2001 From: Qiaolin Yu Date: Tue, 4 Mar 2025 22:31:50 -0500 Subject: [PATCH] Add examples for returning hidden states when using the server (#4074) --- docs/backend/offline_engine_api.ipynb | 2 +- docs/backend/sampling_params.md | 2 +- .../hidden_states_engine.py} | 0 .../hidden_states/hidden_states_server.py | 69 +++++++++++++++++++ 4 files changed, 71 insertions(+), 2 deletions(-) rename examples/runtime/{engine/hidden_states.py => hidden_states/hidden_states_engine.py} (100%) create mode 100644 examples/runtime/hidden_states/hidden_states_server.py diff --git a/docs/backend/offline_engine_api.ipynb b/docs/backend/offline_engine_api.ipynb index 1ee45d4f4..f3de53b37 100644 --- a/docs/backend/offline_engine_api.ipynb +++ b/docs/backend/offline_engine_api.ipynb @@ -33,7 +33,7 @@ "source": [ "## Advanced Usage\n", "\n", - "The engine supports [vlm inference](https://github.com/sgl-project/sglang/blob/main/examples/runtime/engine/offline_batch_inference_vlm.py) as well as [extracting hidden states](https://github.com/sgl-project/sglang/blob/main/examples/runtime/engine/hidden_states.py). \n", + "The engine supports [vlm inference](https://github.com/sgl-project/sglang/blob/main/examples/runtime/engine/offline_batch_inference_vlm.py) as well as [extracting hidden states](https://github.com/sgl-project/sglang/blob/main/examples/runtime/hidden_states). \n", "\n", "Please see [the examples](https://github.com/sgl-project/sglang/tree/main/examples/runtime/engine) for further use cases." ] diff --git a/docs/backend/sampling_params.md b/docs/backend/sampling_params.md index 01a9d61bd..5f967b986 100644 --- a/docs/backend/sampling_params.md +++ b/docs/backend/sampling_params.md @@ -17,7 +17,7 @@ The `/generate` endpoint accepts the following parameters in JSON format. For in * `stream: bool = False` Whether to stream the output. * `lora_path: Optional[Union[List[Optional[str]], Optional[str]]] = None` Path to LoRA weights. * `custom_logit_processor: Optional[Union[List[Optional[str]], str]] = None` Custom logit processor for advanced sampling control. For usage see below. -* `return_hidden_states: bool = False` Whether to return hidden states of the model. Note that each time it changes, the cuda graph will be recaptured, which might lead to a performance hit. See the [examples](https://github.com/sgl-project/sglang/blob/main/examples/runtime/engine/hidden_states.py) for more information. +* `return_hidden_states: bool = False` Whether to return hidden states of the model. Note that each time it changes, the cuda graph will be recaptured, which might lead to a performance hit. See the [examples](https://github.com/sgl-project/sglang/blob/main/examples/runtime/hidden_states) for more information. ## Sampling params diff --git a/examples/runtime/engine/hidden_states.py b/examples/runtime/hidden_states/hidden_states_engine.py similarity index 100% rename from examples/runtime/engine/hidden_states.py rename to examples/runtime/hidden_states/hidden_states_engine.py diff --git a/examples/runtime/hidden_states/hidden_states_server.py b/examples/runtime/hidden_states/hidden_states_server.py new file mode 100644 index 000000000..a198c7c23 --- /dev/null +++ b/examples/runtime/hidden_states/hidden_states_server.py @@ -0,0 +1,69 @@ +""" +Usage: + +python hidden_states_server.py + +Note that each time you change the `return_hidden_states` parameter, +the cuda graph will be recaptured, which might lead to a performance hit. +So avoid getting hidden states and completions alternately. +""" + +import requests + +from sglang.test.test_utils import is_in_ci +from sglang.utils import print_highlight, terminate_process, wait_for_server + +if is_in_ci(): + from docs.backend.patch import launch_server_cmd +else: + from sglang.utils import launch_server_cmd + + +def main(): + # Launch the server + server_process, port = launch_server_cmd( + "python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-1.5B-instruct --host 0.0.0.0" + ) + wait_for_server(f"http://localhost:{port}") + + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + sampling_params = { + "temperature": 0.8, + "top_p": 0.95, + "max_new_tokens": 10, + } + + json_data = { + "text": prompts, + "sampling_params": sampling_params, + "return_hidden_states": True, + } + + response = requests.post( + f"http://localhost:{port}/generate", + json=json_data, + ) + + outputs = response.json() + for prompt, output in zip(prompts, outputs): + print("===============================") + print( + f"Prompt: {prompt}\n" + f"Generated text: {output['text']}\n" + f"Prompt_Tokens: {output['meta_info']['prompt_tokens']}\t" + f"Completion_tokens: {output['meta_info']['completion_tokens']}\n" + f"Hidden states: {output['meta_info']['hidden_states']}" + ) + print() + + terminate_process(server_process) + + +if __name__ == "__main__": + main()