Add examples for returning hidden states when using the server (#4074)
This commit is contained in:
@@ -33,7 +33,7 @@
|
||||
"source": [
|
||||
"## Advanced Usage\n",
|
||||
"\n",
|
||||
"The engine supports [vlm inference](https://github.com/sgl-project/sglang/blob/main/examples/runtime/engine/offline_batch_inference_vlm.py) as well as [extracting hidden states](https://github.com/sgl-project/sglang/blob/main/examples/runtime/engine/hidden_states.py). \n",
|
||||
"The engine supports [vlm inference](https://github.com/sgl-project/sglang/blob/main/examples/runtime/engine/offline_batch_inference_vlm.py) as well as [extracting hidden states](https://github.com/sgl-project/sglang/blob/main/examples/runtime/hidden_states). \n",
|
||||
"\n",
|
||||
"Please see [the examples](https://github.com/sgl-project/sglang/tree/main/examples/runtime/engine) for further use cases."
|
||||
]
|
||||
|
||||
@@ -17,7 +17,7 @@ The `/generate` endpoint accepts the following parameters in JSON format. For in
|
||||
* `stream: bool = False` Whether to stream the output.
|
||||
* `lora_path: Optional[Union[List[Optional[str]], Optional[str]]] = None` Path to LoRA weights.
|
||||
* `custom_logit_processor: Optional[Union[List[Optional[str]], str]] = None` Custom logit processor for advanced sampling control. For usage see below.
|
||||
* `return_hidden_states: bool = False` Whether to return hidden states of the model. Note that each time it changes, the cuda graph will be recaptured, which might lead to a performance hit. See the [examples](https://github.com/sgl-project/sglang/blob/main/examples/runtime/engine/hidden_states.py) for more information.
|
||||
* `return_hidden_states: bool = False` Whether to return hidden states of the model. Note that each time it changes, the cuda graph will be recaptured, which might lead to a performance hit. See the [examples](https://github.com/sgl-project/sglang/blob/main/examples/runtime/hidden_states) for more information.
|
||||
|
||||
## Sampling params
|
||||
|
||||
|
||||
69
examples/runtime/hidden_states/hidden_states_server.py
Normal file
69
examples/runtime/hidden_states/hidden_states_server.py
Normal file
@@ -0,0 +1,69 @@
|
||||
"""
|
||||
Usage:
|
||||
|
||||
python hidden_states_server.py
|
||||
|
||||
Note that each time you change the `return_hidden_states` parameter,
|
||||
the cuda graph will be recaptured, which might lead to a performance hit.
|
||||
So avoid getting hidden states and completions alternately.
|
||||
"""
|
||||
|
||||
import requests
|
||||
|
||||
from sglang.test.test_utils import is_in_ci
|
||||
from sglang.utils import print_highlight, terminate_process, wait_for_server
|
||||
|
||||
if is_in_ci():
|
||||
from docs.backend.patch import launch_server_cmd
|
||||
else:
|
||||
from sglang.utils import launch_server_cmd
|
||||
|
||||
|
||||
def main():
|
||||
# Launch the server
|
||||
server_process, port = launch_server_cmd(
|
||||
"python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-1.5B-instruct --host 0.0.0.0"
|
||||
)
|
||||
wait_for_server(f"http://localhost:{port}")
|
||||
|
||||
prompts = [
|
||||
"Hello, my name is",
|
||||
"The president of the United States is",
|
||||
"The capital of France is",
|
||||
"The future of AI is",
|
||||
]
|
||||
|
||||
sampling_params = {
|
||||
"temperature": 0.8,
|
||||
"top_p": 0.95,
|
||||
"max_new_tokens": 10,
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"text": prompts,
|
||||
"sampling_params": sampling_params,
|
||||
"return_hidden_states": True,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"http://localhost:{port}/generate",
|
||||
json=json_data,
|
||||
)
|
||||
|
||||
outputs = response.json()
|
||||
for prompt, output in zip(prompts, outputs):
|
||||
print("===============================")
|
||||
print(
|
||||
f"Prompt: {prompt}\n"
|
||||
f"Generated text: {output['text']}\n"
|
||||
f"Prompt_Tokens: {output['meta_info']['prompt_tokens']}\t"
|
||||
f"Completion_tokens: {output['meta_info']['completion_tokens']}\n"
|
||||
f"Hidden states: {output['meta_info']['hidden_states']}"
|
||||
)
|
||||
print()
|
||||
|
||||
terminate_process(server_process)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user