From 8c3b420eec03ea94e4ccce04681891558ca892ca Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Sun, 29 Dec 2024 23:57:16 -0800 Subject: [PATCH] [Docs] clean up structured outputs docs (#2654) --- .github/workflows/pr-test.yml | 2 +- benchmark/deepseek_v3/README.md | 14 +++------ docs/backend/backend.md | 4 +-- docs/backend/openai_api_completions.ipynb | 8 ++--- ...ecoding.ipynb => structured_outputs.ipynb} | 31 +++++++------------ docs/references/sampling_params.md | 31 +++++++++---------- python/sglang/srt/openai_api/protocol.py | 6 ++-- python/sglang/srt/sampling/sampling_params.py | 10 +++++- python/sglang/srt/server.py | 2 ++ python/sglang/srt/server_args.py | 24 +++++++------- 10 files changed, 62 insertions(+), 70 deletions(-) rename docs/backend/{constrained_decoding.ipynb => structured_outputs.ipynb} (90%) diff --git a/.github/workflows/pr-test.yml b/.github/workflows/pr-test.yml index 36b53baa3..d474d7425 100644 --- a/.github/workflows/pr-test.yml +++ b/.github/workflows/pr-test.yml @@ -52,7 +52,7 @@ jobs: runs-on: 1-gpu-runner strategy: matrix: - range: [0-6, 6-15, 15-23, 23-30, 30-100] + range: [0-6, 6-16, 16-23, 23-30, 30-100] steps: - name: Checkout code uses: actions/checkout@v3 diff --git a/benchmark/deepseek_v3/README.md b/benchmark/deepseek_v3/README.md index 8bd8fe974..9c61af88f 100644 --- a/benchmark/deepseek_v3/README.md +++ b/benchmark/deepseek_v3/README.md @@ -1,13 +1,13 @@ # DeepSeek V3 Support -The SGLang and DeepSeek teams worked together to get DeepSeek V3 FP8 running on NVIDIA and AMD GPUs **from day one**. SGLang also has supported [MLA optimization](https://lmsys.org/blog/2024-09-04-sglang-v0-3/#deepseek-multi-head-latent-attention-mla-throughput-optimizations) and [DP attention](https://lmsys.org/blog/2024-12-04-sglang-v0-4/#data-parallelism-attention-for-deepseek-models), making SGLang one of the best open-source LLM engines for running DeepSeek models. +The SGLang and DeepSeek teams collaborated to get DeepSeek V3 FP8 running on NVIDIA and AMD GPUs **from day one**. SGLang also supports [MLA optimization](https://lmsys.org/blog/2024-09-04-sglang-v0-3/#deepseek-multi-head-latent-attention-mla-throughput-optimizations) and [DP attention](https://lmsys.org/blog/2024-12-04-sglang-v0-4/#data-parallelism-attention-for-deepseek-models), making SGLang one of the best open-source LLM engines for running DeepSeek models. SGLang is the inference engine recommended by the official [DeepSeek team](https://github.com/deepseek-ai/DeepSeek-V3/tree/main?tab=readme-ov-file#62-inference-with-sglang-recommended). Special thanks to Meituan's Search & Recommend Platform Team and Baseten's Model Performance Team for implementing the model, and DataCrunch for providing GPU resources. ## Hardware Recommendation - 8 x NVIDIA H200 GPUs -If you do not have GPUs with large enough memory, please try multi-node tensor parallelism ([help 1](https://github.com/sgl-project/sglang/blob/637de9e8ce91fd3e92755eb2a842860925954ab1/docs/backend/backend.md?plain=1#L88-L95) [help 2](https://github.com/sgl-project/sglang/blob/637de9e8ce91fd3e92755eb2a842860925954ab1/docs/backend/backend.md?plain=1#L152-L168)). Here is an example serving with [2 H20 node](https://github.com/sgl-project/sglang/tree/main/benchmark/deepseek_v3#example-serving-with-2-h208) +If you do not have GPUs with large enough memory, please try multi-node tensor parallelism. There is an example serving with [2 H20 nodes](https://github.com/sgl-project/sglang/tree/main/benchmark/deepseek_v3#example-serving-with-2-h208) below. ## Installation & Launch @@ -61,10 +61,10 @@ For example, there are two H20 nodes, each with 8 GPUs. The first node's IP is ` ```bash # node 1 -GLOO_SOCKET_IFNAME=eth0 python -m sglang.launch_server --model-path deepseek-ai/DeepSeek-V3 --tp 16 --nccl-init 10.0.0.1:5000 --nnodes 2 --node-rank 0 --trust-remote-code +python -m sglang.launch_server --model-path deepseek-ai/DeepSeek-V3 --tp 16 --nccl-init 10.0.0.1:5000 --nnodes 2 --node-rank 0 --trust-remote-code # node 2 -GLOO_SOCKET_IFNAME=eth0 python -m sglang.launch_server --model-path deepseek-ai/DeepSeek-V3 --tp 16 --nccl-init 10.0.0.1:5000 --nnodes 2 --node-rank 1 --trust-remote-code +python -m sglang.launch_server --model-path deepseek-ai/DeepSeek-V3 --tp 16 --nccl-init 10.0.0.1:5000 --nnodes 2 --node-rank 1 --trust-remote-code ``` If you have two H100 nodes, the usage is similar to the aforementioned H20. @@ -72,9 +72,3 @@ If you have two H100 nodes, the usage is similar to the aforementioned H20. ## DeepSeek V3 Optimization Plan https://github.com/sgl-project/sglang/issues/2591 - -## Appendix - -SGLang is the inference engine officially recommended by the DeepSeek team. - -https://github.com/deepseek-ai/DeepSeek-V3/tree/main?tab=readme-ov-file#62-inference-with-sglang-recommended diff --git a/docs/backend/backend.md b/docs/backend/backend.md index 79d17b809..4f014f21e 100644 --- a/docs/backend/backend.md +++ b/docs/backend/backend.md @@ -159,10 +159,10 @@ python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-405B-Instr # Run 405B (fp16) on two nodes ## on the first node, replace the `172.16.4.52:20000` with your own first node ip address and port -GLOO_SOCKET_IFNAME=eth0 python3 -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-405B-Instruct --tp 16 --nccl-init-addr 172.16.4.52:20000 --nnodes 2 --node-rank 0 --disable-cuda-graph +python3 -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-405B-Instruct --tp 16 --nccl-init-addr 172.16.4.52:20000 --nnodes 2 --node-rank 0 ## on the first node, replace the `172.16.4.52:20000` with your own first node ip address and port -GLOO_SOCKET_IFNAME=eth0 python3 -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-405B-Instruct --tp 16 --nccl-init-addr 172.16.4.52:20000 --nnodes 2 --node-rank 1 --disable-cuda-graph +python3 -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-405B-Instruct --tp 16 --nccl-init-addr 172.16.4.52:20000 --nnodes 2 --node-rank 1 ``` diff --git a/docs/backend/openai_api_completions.ipynb b/docs/backend/openai_api_completions.ipynb index a7e3cacff..0f88fd964 100644 --- a/docs/backend/openai_api_completions.ipynb +++ b/docs/backend/openai_api_completions.ipynb @@ -221,17 +221,15 @@ "metadata": {}, "source": [ "## Structured Outputs (JSON, Regex, EBNF)\n", - "You can specify a JSON schema, Regular Expression or [EBNF](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form) to constrain the model output. The model output will be guaranteed to follow the given constraints. \n", + "You can specify a JSON schema, [regular expression](https://en.wikipedia.org/wiki/Regular_expression) or [EBNF](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form) to constrain the model output. The model output will be guaranteed to follow the given constraints. Only one constraint parameter (`json_schema`, `regex`, or `ebnf`) can be specified for a request.\n", "\n", "SGLang supports two grammar backends:\n", "\n", - "- [Outlines](https://github.com/dottxt-ai/outlines) (default): Supports JSON schema and Regular Expression constraints.\n", + "- [Outlines](https://github.com/dottxt-ai/outlines) (default): Supports JSON schema and regular expression constraints.\n", "- [XGrammar](https://github.com/mlc-ai/xgrammar): Supports JSON schema and EBNF constraints.\n", " - XGrammar currently uses the [GGML BNF format](https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md)\n", "\n", - "> 🔔 Only one constraint parameter (`json_schema`, `regex`, or `ebnf`) can be specified at a time.\n", - "\n", - "Initialise xgrammar backend using `--grammar-backend xgrammar` flag\n", + "Initialize the XGrammar backend using `--grammar-backend xgrammar` flag\n", "```bash\n", "python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct \\\n", "--port 30000 --host 0.0.0.0 --grammar-backend [xgrammar|outlines] # xgrammar or outlines (default: outlines)\n", diff --git a/docs/backend/constrained_decoding.ipynb b/docs/backend/structured_outputs.ipynb similarity index 90% rename from docs/backend/constrained_decoding.ipynb rename to docs/backend/structured_outputs.ipynb index 88cf4a717..93c21e739 100644 --- a/docs/backend/constrained_decoding.ipynb +++ b/docs/backend/structured_outputs.ipynb @@ -11,20 +11,22 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "With SGLang, You can define a JSON schema, EBNF or regular expression to constrain the model's output.\n", + "## Structured Outputs (JSON, Regex, EBNF)\n", + "You can specify a JSON schema, [regular expression](https://en.wikipedia.org/wiki/Regular_expression) or [EBNF](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form) to constrain the model output. The model output will be guaranteed to follow the given constraints. Only one constraint parameter (`json_schema`, `regex`, or `ebnf`) can be specified for a request.\n", "\n", - "[JSON Schema](https://json-schema.org/): Formats output into structured JSON objects with validation rules.\n", + "SGLang supports two grammar backends:\n", "\n", - "[EBNF (Extended Backus-Naur Form)](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form): Defines complex syntax rules, especially for recursive patterns like nested structures.\n", + "- [Outlines](https://github.com/dottxt-ai/outlines) (default): Supports JSON schema and regular expression constraints.\n", + "- [XGrammar](https://github.com/mlc-ai/xgrammar): Supports JSON schema and EBNF constraints.\n", + " - XGrammar currently uses the [GGML BNF format](https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md)\n", "\n", - "[Regular Expressions](https://en.wikipedia.org/wiki/Regular_expression): Matches text patterns for simple validation and formatting.\n", + "Initialize the XGrammar backend using `--grammar-backend xgrammar` flag\n", + "```bash\n", + "python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct \\\n", + "--port 30000 --host 0.0.0.0 --grammar-backend [xgrammar|outlines] # xgrammar or outlines (default: outlines)\n", + "```\n", "\n", - "## Grammar Backend\n", - "\n", - "SGLang has two backends: [Outlines](https://github.com/dottxt-ai/outlines) (default) and [XGrammar](https://blog.mlc.ai/2024/11/22/achieving-efficient-flexible-portable-structured-generation-with-xgrammar). We suggest using XGrammar whenever possible for its better performance. For more details, see [XGrammar technical overview](https://blog.mlc.ai/2024/11/22/achieving-efficient-flexible-portable-structured-generation-with-xgrammar).\n", - "\n", - "* Xgrammar Backend: JSON and EBNF\n", - "* Outlines Backend: JSON and regular expressions" + "We suggest using XGrammar whenever possible for its better performance. For more details, see [XGrammar technical overview](https://blog.mlc.ai/2024/11/22/achieving-efficient-flexible-portable-structured-generation-with-xgrammar)." ] }, { @@ -208,15 +210,6 @@ "metadata": {}, "outputs": [], "source": [ - "from sglang.utils import (\n", - " execute_shell_command,\n", - " wait_for_server,\n", - " terminate_process,\n", - " print_highlight,\n", - ")\n", - "\n", - "import requests\n", - "\n", "server_process = execute_shell_command(\n", " \"\"\"\n", "python3 -m sglang.launch_server --model-path meta-llama/Llama-3.2-1B-Instruct --port=30010 --grammar-backend xgrammar\n", diff --git a/docs/references/sampling_params.md b/docs/references/sampling_params.md index a796667ec..5dad3fd12 100644 --- a/docs/references/sampling_params.md +++ b/docs/references/sampling_params.md @@ -39,10 +39,9 @@ The `sampling_params` follows this format ```python # The maximum number of output tokens max_new_tokens: int = 128, -# Stop when hitting any of the strings in this list. +# Stop when hitting any of the strings in this list stop: Optional[Union[str, List[str]]] = None, -# Stop when hitting any of the token_ids in this list. Could be useful when mixed with -# `min_new_tokens`. +# Stop when hitting any of the token_ids in this list stop_token_ids: Optional[List[int]] = [], # Sampling temperature temperature: float = 1.0, @@ -52,26 +51,26 @@ top_p: float = 1.0, top_k: int = -1, # Min-p sampling min_p: float = 0.0, -# Whether to ignore EOS token. +# Whether to ignore EOS token ignore_eos: bool = False, -# Whether to skip the special tokens during detokenization. +# Whether to skip the special tokens during detokenization skip_special_tokens: bool = True, -# Whether to add spaces between special tokens during detokenization. +# Whether to add spaces between special tokens during detokenization spaces_between_special_tokens: bool = True, # Do parallel sampling and return `n` outputs. n: int = 1, ## Structured Outputs -# Only one of the below three can be set at a time: +# Only one of the below three can be set for a request. -# Constrains the output to follow a given regular expression. -regex: Optional[str] = None, -# Constrains the output to follow a given JSON schema. +# Constrain the output to follow a given JSON schema. json_schema: Optional[str] = None, -# Constrains the output to follow a given EBNF Grammar. +# Constrain the output to follow a given regular expression. +regex: Optional[str] = None, +# Constrain the output to follow a given EBNF grammar. ebnf: Optional[str] = None, -## Penalties. See [Performance Implications on Penalties] section below for more informations. +## Penalties. # Float that penalizes new tokens based on their frequency in the generated text so far. # Values > 0 encourage the model to use new tokens, while values < 0 encourage the model to @@ -185,17 +184,15 @@ The `image_data` can be a file name, a URL, or a base64 encoded string. See also Streaming is supported in a similar manner as [above](#streaming). ### Structured Outputs (JSON, Regex, EBNF) -You can specify a JSON schema, Regular Expression or [EBNF](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form) to constrain the model output. The model output will be guaranteed to follow the given constraints. +You can specify a JSON schema, regular expression or [EBNF](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form) to constrain the model output. The model output will be guaranteed to follow the given constraints. Only one constraint parameter (`json_schema`, `regex`, or `ebnf`) can be specified for a request. SGLang supports two grammar backends: -- [Outlines](https://github.com/dottxt-ai/outlines) (default): Supports JSON schema and Regular Expression constraints. +- [Outlines](https://github.com/dottxt-ai/outlines) (default): Supports JSON schema and regular expression constraints. - [XGrammar](https://github.com/mlc-ai/xgrammar): Supports JSON schema and EBNF constraints. - XGrammar currently uses the [GGML BNF format](https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md) -> 🔔 Only one constraint parameter (`json_schema`, `regex`, or `ebnf`) can be specified at a time. - -Initialise xgrammar backend using `--grammar-backend xgrammar` flag +Initialize the XGrammar backend using `--grammar-backend xgrammar` flag ```bash python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct \ --port 30000 --host 0.0.0.0 --grammar-backend [xgrammar|outlines] # xgrammar or outlines (default: outlines) diff --git a/python/sglang/srt/openai_api/protocol.py b/python/sglang/srt/openai_api/protocol.py index 2599cea3d..4fbe20846 100644 --- a/python/sglang/srt/openai_api/protocol.py +++ b/python/sglang/srt/openai_api/protocol.py @@ -171,15 +171,15 @@ class CompletionRequest(BaseModel): top_k: int = -1 min_p: float = 0.0 min_tokens: int = 0 - regex: Optional[str] = None json_schema: Optional[str] = None + regex: Optional[str] = None + ebnf: Optional[str] = None repetition_penalty: float = 1.0 stop_token_ids: Optional[List[int]] = None no_stop_trim: bool = False ignore_eos: bool = False skip_special_tokens: bool = True lora_path: Optional[Union[List[Optional[str]], Optional[str]]] = None - ebnf: Optional[str] = None class CompletionResponseChoice(BaseModel): @@ -315,13 +315,13 @@ class ChatCompletionRequest(BaseModel): min_p: float = 0.0 min_tokens: int = 0 regex: Optional[str] = None + ebnf: Optional[str] = None repetition_penalty: float = 1.0 stop_token_ids: Optional[List[int]] = None no_stop_trim: bool = False ignore_eos: bool = False skip_special_tokens: bool = True lora_path: Optional[Union[List[Optional[str]], Optional[str]]] = None - ebnf: Optional[str] = None class FunctionResponse(BaseModel): diff --git a/python/sglang/srt/sampling/sampling_params.py b/python/sglang/srt/sampling/sampling_params.py index 55a2c910d..2c3817e1b 100644 --- a/python/sglang/srt/sampling/sampling_params.py +++ b/python/sglang/srt/sampling/sampling_params.py @@ -19,6 +19,14 @@ _SAMPLING_EPS = 1e-6 class SamplingParams: + """ + The sampling parameters. + + See docs/references/sampling_params.md or + https://sgl-project.github.io/references/sampling_params.html + for the documentation. + """ + def __init__( self, max_new_tokens: int = 128, @@ -33,9 +41,9 @@ class SamplingParams: repetition_penalty: float = 1.0, min_new_tokens: int = 0, spaces_between_special_tokens: bool = True, - regex: Optional[str] = None, n: int = 1, json_schema: Optional[str] = None, + regex: Optional[str] = None, ebnf: Optional[str] = None, no_stop_trim: bool = False, ignore_eos: bool = False, diff --git a/python/sglang/srt/server.py b/python/sglang/srt/server.py index ebf153e17..d95ce5931 100644 --- a/python/sglang/srt/server.py +++ b/python/sglang/srt/server.py @@ -578,6 +578,8 @@ def _set_envs_and_config(server_args: ServerArgs): os.environ["NCCL_NVLS_ENABLE"] = "0" os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "4" + if "GLOO_SOCKET_IFNAME" not in os.environ: + os.environ["GLOO_SOCKET_IFNAME"] = "eth0" # Set prometheus env vars if server_args.enable_metrics: diff --git a/python/sglang/srt/server_args.py b/python/sglang/srt/server_args.py index f7177c2d9..58a6a6a82 100644 --- a/python/sglang/srt/server_args.py +++ b/python/sglang/srt/server_args.py @@ -42,7 +42,6 @@ class ServerArgs: model_path: str tokenizer_path: Optional[str] = None tokenizer_mode: str = "auto" - skip_tokenizer_init: bool = False load_format: str = "auto" trust_remote_code: bool = True dtype: str = "auto" @@ -54,6 +53,7 @@ class ServerArgs: chat_template: Optional[str] = None is_embedding: bool = False revision: Optional[str] = None + skip_tokenizer_init: bool = False return_token_ids: bool = False # Port for the HTTP server @@ -276,17 +276,6 @@ class ServerArgs: "tokenizer if available, and 'slow' will " "always use the slow tokenizer.", ) - parser.add_argument( - "--skip-tokenizer-init", - action="store_true", - help="If set, skip init tokenizer and pass input_ids in generate request", - ) - parser.add_argument( - "--return-token-ids", - action="store_true", - default=ServerArgs.return_token_ids, - help="Whether to return token IDs in the output, this may introduce additional overhead.", - ) parser.add_argument( "--load-format", type=str, @@ -394,6 +383,17 @@ class ServerArgs: "name, a tag name, or a commit id. If unspecified, will use " "the default version.", ) + parser.add_argument( + "--skip-tokenizer-init", + action="store_true", + help="If set, skip init tokenizer and pass input_ids in generate request", + ) + parser.add_argument( + "--return-token-ids", + action="store_true", + default=ServerArgs.return_token_ids, + help="Whether to return token IDs in the output, this may introduce additional overhead.", + ) # Memory and scheduling parser.add_argument(