diff --git a/docs/backend/openai_api_completions.ipynb b/docs/backend/openai_api_completions.ipynb
index 29def290b..4ccb2197c 100644
--- a/docs/backend/openai_api_completions.ipynb
+++ b/docs/backend/openai_api_completions.ipynb
@@ -97,14 +97,23 @@
"\n",
"#### Enabling Model Thinking/Reasoning\n",
"\n",
- "You can use `chat_template_kwargs` to enable or disable the model's internal thinking or reasoning process output. Set `\"enable_thinking\": True` within `chat_template_kwargs` to include the reasoning steps in the response. This requires launching the server with a compatible reasoning parser (e.g., `--reasoning-parser qwen3` for Qwen3 models).\n",
+ "You can use `chat_template_kwargs` to enable or disable the model's internal thinking or reasoning process output. Set `\"enable_thinking\": True` within `chat_template_kwargs` to include the reasoning steps in the response. This requires launching the server with a compatible reasoning parser.\n",
+ "\n",
+ "**Reasoning Parser Options:**\n",
+ "- `--reasoning-parser deepseek-r1`: For DeepSeek-R1 family models (R1, R1-0528, R1-Distill)\n",
+ "- `--reasoning-parser qwen3`: For standard Qwen3 models that support `enable_thinking` parameter\n",
+ "- `--reasoning-parser qwen3-thinking`: For Qwen3-Thinking models (e.g., Qwen/Qwen3-235B-A22B-Thinking-2507) that always generate thinking content\n",
+ "- `--reasoning-parser kimi`: For Kimi thinking models\n",
"\n",
"Here's an example demonstrating how to enable thinking and retrieve the reasoning content separately (using `separate_reasoning: True`):\n",
"\n",
"```python\n",
- "# Ensure the server is launched with a compatible reasoning parser, e.g.:\n",
+ "# For standard Qwen3 models with enable_thinking support:\n",
"# python3 -m sglang.launch_server --model-path QwQ/Qwen3-32B-250415 --reasoning-parser qwen3 ...\n",
"\n",
+ "# For Qwen3-Thinking models that always think:\n",
+ "# python3 -m sglang.launch_server --model-path Qwen/Qwen3-235B-A22B-Thinking-2507 --reasoning-parser qwen3-thinking ...\n",
+ "\n",
"from openai import OpenAI\n",
"\n",
"# Modify OpenAI's API key and API base to use SGLang's API server.\n",
@@ -123,7 +132,7 @@
" model=model,\n",
" messages=messages,\n",
" extra_body={\n",
- " \"chat_template_kwargs\": {\"enable_thinking\": True},\n",
+ " \"chat_template_kwargs\": {\"enable_thinking\": True}, # Only for standard Qwen3 models\n",
" \"separate_reasoning\": True\n",
" }\n",
")\n",
@@ -149,6 +158,8 @@
"\n",
"Setting `\"enable_thinking\": False` (or omitting it) will result in `reasoning_content` being `None`.\n",
"\n",
+ "**Note for Qwen3-Thinking models:** These models always generate thinking content and do not support the `enable_thinking` parameter. When using `--reasoning-parser qwen3-thinking`, the model will always produce reasoning content regardless of the `enable_thinking` setting.\n",
+ "\n",
"Here is an example of a detailed chat completion request using standard OpenAI parameters:"
]
},
diff --git a/docs/backend/separate_reasoning.ipynb b/docs/backend/separate_reasoning.ipynb
index 50a91b897..cd0ab23c4 100644
--- a/docs/backend/separate_reasoning.ipynb
+++ b/docs/backend/separate_reasoning.ipynb
@@ -6,14 +6,27 @@
"source": [
"# Reasoning Parser\n",
"\n",
- "SGLang supports parsing reasoning content our from \"normal\" content for reasoning models such as [DeepSeek R1](https://huggingface.co/deepseek-ai/DeepSeek-R1).\n",
+ "SGLang supports parsing reasoning content out from \"normal\" content for reasoning models such as [DeepSeek R1](https://huggingface.co/deepseek-ai/DeepSeek-R1).\n",
"\n",
"## Supported Models & Parsers\n",
"\n",
- "| Model | Reasoning tags | Parser |\n",
- "|---------|-----------------------------|------------------|\n",
- "| [DeepSeek‑R1 series](https://huggingface.co/collections/deepseek-ai/deepseek-r1-678e1e131c0169c0bc89728d) | `` … `` | `deepseek-r1` |\n",
- "| [Qwen3 and QwQ series](https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f) | `` … `` | `qwen3` |"
+ "| Model | Reasoning tags | Parser | Notes |\n",
+ "|---------|-----------------------------|------------------|-------|\n",
+ "| [DeepSeek‑R1 series](https://huggingface.co/collections/deepseek-ai/deepseek-r1-678e1e131c0169c0bc89728d) | `` … `` | `deepseek-r1` | Supports all variants (R1, R1-0528, R1-Distill) |\n",
+ "| [Standard Qwen3 models](https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f) | `` … `` | `qwen3` | Supports `enable_thinking` parameter |\n",
+ "| [Qwen3-Thinking models](https://huggingface.co/Qwen/Qwen3-235B-A22B-Thinking-2507) | `` … `` | `qwen3-thinking` | Always generates thinking content |\n",
+ "| [Kimi models](https://huggingface.co/collections/MoonshotAI/kimi-675e30c072b7ba7e79833be7) | `◁think▷` … `◁/think▷` | `kimi` | Uses special thinking delimiters |\n",
+ "\n",
+ "### Model-Specific Behaviors\n",
+ "\n",
+ "**DeepSeek-R1 Family:**\n",
+ "- DeepSeek-R1: No `` start tag, jumps directly to thinking content\n",
+ "- DeepSeek-R1-0528: Generates both `` start and `` end tags\n",
+ "- Both are handled by the same `deepseek-r1` parser\n",
+ "\n",
+ "**Qwen3 Family:**\n",
+ "- Standard Qwen3 (e.g., Qwen3-2507): Use `qwen3` parser, supports `enable_thinking` in chat templates\n",
+ "- Qwen3-Thinking (e.g., Qwen3-235B-A22B-Thinking-2507): Use `qwen3-thinking` parser, always thinks"
]
},
{
@@ -353,36 +366,61 @@
"```python\n",
"class DeepSeekR1Detector(BaseReasoningFormatDetector):\n",
" \"\"\"\n",
- " Detector for DeepSeek-R1 model.\n",
- " Assumes reasoning format:\n",
- " ()*(.*)\n",
- " Returns all the text before the tag as `reasoning_text`\n",
- " and the rest of the text as `normal_text`.\n",
- "\n",
- " Args:\n",
- " stream_reasoning (bool): If False, accumulates reasoning content until the end tag.\n",
- " If True, streams reasoning content as it arrives.\n",
+ " Detector for DeepSeek-R1 family models.\n",
+ " \n",
+ " Supported models:\n",
+ " - DeepSeek-R1: Always generates thinking content without start tag\n",
+ " - DeepSeek-R1-0528: Generates thinking content with start tag\n",
+ " \n",
+ " This detector handles both patterns automatically.\n",
" \"\"\"\n",
"\n",
- " def __init__(self, stream_reasoning: bool = False):\n",
- " # DeepSeek-R1 is assumed to be reasoning until `` token\n",
- " super().__init__(\"\", \"\", True, stream_reasoning=stream_reasoning)\n",
- " # https://github.com/sgl-project/sglang/pull/3202#discussion_r1950153599\n",
+ " def __init__(self, stream_reasoning: bool = True):\n",
+ " super().__init__(\"\", \"\", force_reasoning=True, stream_reasoning=stream_reasoning)\n",
+ "\n",
+ "\n",
+ "class Qwen3Detector(BaseReasoningFormatDetector):\n",
+ " \"\"\"\n",
+ " Detector for standard Qwen3 models that support enable_thinking parameter.\n",
+ " \n",
+ " These models can switch between thinking and non-thinking modes:\n",
+ " - enable_thinking=True: Generates ... tags\n",
+ " - enable_thinking=False: No thinking content generated\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(self, stream_reasoning: bool = True):\n",
+ " super().__init__(\"\", \"\", force_reasoning=False, stream_reasoning=stream_reasoning)\n",
+ "\n",
+ "\n",
+ "class Qwen3ThinkingDetector(BaseReasoningFormatDetector):\n",
+ " \"\"\"\n",
+ " Detector for Qwen3-Thinking models (e.g., Qwen3-235B-A22B-Thinking-2507).\n",
+ " \n",
+ " These models always generate thinking content without start tag.\n",
+ " They do not support the enable_thinking parameter.\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(self, stream_reasoning: bool = True):\n",
+ " super().__init__(\"\", \"\", force_reasoning=True, stream_reasoning=stream_reasoning)\n",
"\n",
"\n",
"class ReasoningParser:\n",
" \"\"\"\n",
- " Parser that handles both streaming and non-streaming scenarios for extracting\n",
- " reasoning content from model outputs.\n",
- "\n",
- " Args:\n",
- " model_type (str): Type of model to parse reasoning from\n",
- " stream_reasoning (bool): If False, accumulates reasoning content until complete.\n",
- " If True, streams reasoning content as it arrives.\n",
+ " Parser that handles both streaming and non-streaming scenarios.\n",
+ " \n",
+ " Usage:\n",
+ " # For standard Qwen3 models with enable_thinking support\n",
+ " parser = ReasoningParser(\"qwen3\")\n",
+ " \n",
+ " # For Qwen3-Thinking models that always think\n",
+ " parser = ReasoningParser(\"qwen3-thinking\")\n",
" \"\"\"\n",
"\n",
- " DetectorMap: Dict[str, BaseReasoningFormatDetector] = {\n",
- " \"deepseek-r1\": DeepSeekR1Detector\n",
+ " DetectorMap: Dict[str, Type[BaseReasoningFormatDetector]] = {\n",
+ " \"deepseek-r1\": DeepSeekR1Detector,\n",
+ " \"qwen3\": Qwen3Detector,\n",
+ " \"qwen3-thinking\": Qwen3ThinkingDetector,\n",
+ " \"kimi\": KimiDetector,\n",
" }\n",
"\n",
" def __init__(self, model_type: str = None, stream_reasoning: bool = True):\n",
@@ -395,13 +433,13 @@
"\n",
" self.detector = detector_class(stream_reasoning=stream_reasoning)\n",
"\n",
- " def parse_non_stream(self, full_text: str) -> StreamingParseResult:\n",
- " \"\"\"Non-streaming call: one-time parsing\"\"\"\n",
+ " def parse_non_stream(self, full_text: str) -> Tuple[str, str]:\n",
+ " \"\"\"Returns (reasoning_text, normal_text)\"\"\"\n",
" ret = self.detector.detect_and_parse(full_text)\n",
" return ret.reasoning_text, ret.normal_text\n",
"\n",
- " def parse_stream_chunk(self, chunk_text: str) -> StreamingParseResult:\n",
- " \"\"\"Streaming call: incremental parsing\"\"\"\n",
+ " def parse_stream_chunk(self, chunk_text: str) -> Tuple[str, str]:\n",
+ " \"\"\"Returns (reasoning_text, normal_text) for the current chunk\"\"\"\n",
" ret = self.detector.parse_streaming_increment(chunk_text)\n",
" return ret.reasoning_text, ret.normal_text\n",
"```"
diff --git a/python/sglang/srt/reasoning_parser.py b/python/sglang/srt/reasoning_parser.py
index 9e18554f1..87915c541 100644
--- a/python/sglang/srt/reasoning_parser.py
+++ b/python/sglang/srt/reasoning_parser.py
@@ -118,6 +118,14 @@ class DeepSeekR1Detector(BaseReasoningFormatDetector):
Returns all the text before the tag as `reasoning_text`
and the rest of the text as `normal_text`.
+ Supported models:
+ - DeepSeek-R1: Always generates thinking content without start tag
+ - DeepSeek-R1-0528: Generates thinking content with start tag
+
+ Format patterns:
+ - DeepSeek-R1: "I need to think about this...The answer is 42."
+ - DeepSeek-R1-0528: "I need to think about this...The answer is 42."
+
Args:
stream_reasoning (bool): If False, accumulates reasoning content until the end tag.
If True, streams reasoning content as it arrives.
@@ -136,11 +144,20 @@ class DeepSeekR1Detector(BaseReasoningFormatDetector):
class Qwen3Detector(BaseReasoningFormatDetector):
"""
- Detector for Qwen3 model.
+ Detector for standard Qwen3 models (e.g., Qwen/Qwen3-235B-A22B).
Assumes reasoning format:
()*(.*)
- Returns all the text before the tag as `reasoning_text`
- and the rest of the text as `normal_text`.
+
+ Qwen3 models released before 07/2025 supports switching between thinking mode and normal
+ mode using `enable_thinking` parameter in the request parameter.
+ - enable_thinking=True: "reasoning contentThe answer is 42."
+ - enable_thinking=False: "The answer is 42." (no thinking tokens)
+
+ This detector handles both cases.
+
+ NOTE: Do NOT use this detector for Qwen3-Thinking models (e.g., Qwen3-Thinking-2507).
+ Those models always generate thinking content without start tags.
+ Use "qwen3-thinking" parser type for those models instead.
Args:
stream_reasoning (bool): If False, accumulates reasoning content until the end tag.
@@ -148,7 +165,6 @@ class Qwen3Detector(BaseReasoningFormatDetector):
"""
def __init__(self, stream_reasoning: bool = True):
- # Qwen3 won't be in reasoning mode when user passes `enable_thinking=False`
super().__init__(
"",
"",
@@ -157,6 +173,31 @@ class Qwen3Detector(BaseReasoningFormatDetector):
)
+class Qwen3ThinkingDetector(BaseReasoningFormatDetector):
+ """
+ Detector for Qwen3-Thinking models (e.g., Qwen3-Thinking-2507).
+ Assumes reasoning format:
+ *(.*)
+
+ These models always generate thinking content without start tag.
+ They do not support the enable_thinking parameter and always think.
+
+ Format: "I need to think about this...The answer is 42."
+
+ Args:
+ stream_reasoning (bool): If False, accumulates reasoning content until the end tag.
+ If True, streams reasoning content as it arrives.
+ """
+
+ def __init__(self, stream_reasoning: bool = True):
+ super().__init__(
+ "",
+ "",
+ force_reasoning=True,
+ stream_reasoning=stream_reasoning,
+ )
+
+
class KimiDetector(BaseReasoningFormatDetector):
"""
Detector for Kimi Thinking model.
@@ -189,6 +230,7 @@ class ReasoningParser:
DetectorMap: Dict[str, Type[BaseReasoningFormatDetector]] = {
"deepseek-r1": DeepSeekR1Detector,
"qwen3": Qwen3Detector,
+ "qwen3-thinking": Qwen3ThinkingDetector,
"kimi": KimiDetector,
}
diff --git a/test/srt/test_reasoning_parser.py b/test/srt/test_reasoning_parser.py
index e4200ed23..7f3359144 100644
--- a/test/srt/test_reasoning_parser.py
+++ b/test/srt/test_reasoning_parser.py
@@ -5,6 +5,7 @@ from sglang.srt.reasoning_parser import (
DeepSeekR1Detector,
KimiDetector,
Qwen3Detector,
+ Qwen3ThinkingDetector,
ReasoningParser,
StreamingParseResult,
)
@@ -180,6 +181,14 @@ class TestDeepSeekR1Detector(CustomTestCase):
self.assertEqual(result.reasoning_text, "I think this is the answer")
self.assertEqual(result.normal_text, "The final answer is 42.")
+ def test_detect_and_parse_with_start_token(self):
+ """Test parsing deepseek-ai/DeepSeek-R1-0528 format, which generates the token."""
+ text = "I need to think about this.The answer is 42."
+ result = self.detector.detect_and_parse(text)
+ # Should be treated as reasoning because force_reasoning=True
+ self.assertEqual(result.reasoning_text, "I need to think about this.")
+ self.assertEqual(result.normal_text, "The answer is 42.")
+
class TestQwen3Detector(CustomTestCase):
def setUp(self):
@@ -207,6 +216,52 @@ class TestQwen3Detector(CustomTestCase):
self.assertEqual(result.reasoning_text, "")
+class TestQwen3ThinkingDetector(CustomTestCase):
+ def setUp(self):
+ self.detector = Qwen3ThinkingDetector()
+
+ def test_init(self):
+ """Test Qwen3ThinkingDetector initialization."""
+ self.assertEqual(self.detector.think_start_token, "")
+ self.assertEqual(self.detector.think_end_token, "")
+ self.assertTrue(self.detector._in_reasoning) # force_reasoning=True
+ self.assertTrue(self.detector.stream_reasoning)
+
+ def test_detect_and_parse_qwen3_thinking_format(self):
+ """Test parsing Qwen3-Thinking format (no start tag)."""
+ text = "I need to think about this step by step.The answer is 42."
+ result = self.detector.detect_and_parse(text)
+ self.assertEqual(
+ result.reasoning_text, "I need to think about this step by step."
+ )
+ self.assertEqual(result.normal_text, "The answer is 42.")
+
+ def test_detect_and_parse_with_start_token(self):
+ """Test parsing Qwen3-Thinking with optional start tag."""
+ text = "I need to think about this.The answer is 42."
+ result = self.detector.detect_and_parse(text)
+ # Should work because base class logic handles both force_reasoning=True OR start token
+ self.assertEqual(result.reasoning_text, "I need to think about this.")
+ self.assertEqual(result.normal_text, "The answer is 42.")
+
+ def test_streaming_qwen3_thinking_format(self):
+ """Test streaming parse of Qwen3-Thinking format."""
+ # First chunk without start
+ result = self.detector.parse_streaming_increment("I need to")
+ self.assertEqual(result.reasoning_text, "I need to")
+ self.assertEqual(result.normal_text, "")
+
+ # More reasoning content
+ result = self.detector.parse_streaming_increment(" think about this.")
+ self.assertEqual(result.reasoning_text, " think about this.")
+ self.assertEqual(result.normal_text, "")
+
+ # End token with normal text
+ result = self.detector.parse_streaming_increment("The answer is 42.")
+ self.assertEqual(result.reasoning_text, "") # Buffer cleared
+ self.assertEqual(result.normal_text, "The answer is 42.")
+
+
class TestKimiDetector(CustomTestCase):
def setUp(self):
self.detector = KimiDetector()
@@ -265,6 +320,9 @@ class TestReasoningParser(CustomTestCase):
parser = ReasoningParser("qwen3")
self.assertIsInstance(parser.detector, Qwen3Detector)
+ parser = ReasoningParser("qwen3-thinking")
+ self.assertIsInstance(parser.detector, Qwen3ThinkingDetector)
+
parser = ReasoningParser("kimi")
self.assertIsInstance(parser.detector, KimiDetector)
@@ -312,11 +370,13 @@ class TestReasoningParser(CustomTestCase):
"""Test case insensitive model type matching."""
parser1 = ReasoningParser("DeepSeek-R1")
parser2 = ReasoningParser("QWEN3")
- parser3 = ReasoningParser("Kimi")
+ parser3 = ReasoningParser("QWEN3-THINKING")
+ parser4 = ReasoningParser("Kimi")
self.assertIsInstance(parser1.detector, DeepSeekR1Detector)
self.assertIsInstance(parser2.detector, Qwen3Detector)
- self.assertIsInstance(parser3.detector, KimiDetector)
+ self.assertIsInstance(parser3.detector, Qwen3ThinkingDetector)
+ self.assertIsInstance(parser4.detector, KimiDetector)
def test_stream_reasoning_parameter(self):
"""Test stream_reasoning parameter is passed correctly."""
@@ -398,6 +458,40 @@ class TestIntegrationScenarios(CustomTestCase):
self.assertEqual(reasoning, "")
self.assertEqual(normal, "Just the answer.")
+ def test_qwen3_thinking_complete_response(self):
+ """Test complete Qwen3-Thinking response parsing."""
+ parser = ReasoningParser("qwen3-thinking")
+ text = "Let me solve this step by step. The equation is x + 2 = 5. Subtracting 2 from both sides gives x = 3.The solution is x = 3."
+
+ reasoning, normal = parser.parse_non_stream(text)
+ self.assertIn("step by step", reasoning)
+ self.assertIn("x = 3", reasoning)
+ self.assertEqual(normal, "The solution is x = 3.")
+
+ def test_qwen3_thinking_streaming_scenario(self):
+ """Test Qwen3-Thinking streaming scenario."""
+ parser = ReasoningParser("qwen3-thinking")
+
+ chunks = [
+ "I need to analyze",
+ " this problem carefully.",
+ " Let me break it down.",
+ "",
+ "The final answer is 42.",
+ ]
+
+ all_reasoning = ""
+ all_normal = ""
+
+ for chunk in chunks:
+ reasoning, normal = parser.parse_stream_chunk(chunk)
+ all_reasoning += reasoning
+ all_normal += normal
+
+ self.assertIn("analyze", all_reasoning)
+ self.assertIn("break it down", all_reasoning)
+ self.assertIn("final answer", all_normal)
+
if __name__ == "__main__":
unittest.main()