bugfix: Fix multiple finish_reason chunks and tool_calls finish reason check (#8417)

This commit is contained in:
Chang Su
2025-07-27 13:31:06 -07:00
committed by GitHub
parent e983d66680
commit b47eda3316
4 changed files with 500 additions and 235 deletions

View File

@@ -233,6 +233,7 @@ class TestOpenAIServer(CustomTestCase):
is_firsts = {}
is_finished = {}
finish_reason_counts = {}
for response in generator:
usage = response.usage
if usage is not None:
@@ -245,6 +246,7 @@ class TestOpenAIServer(CustomTestCase):
finish_reason = response.choices[0].finish_reason
if finish_reason is not None:
is_finished[index] = True
finish_reason_counts[index] = finish_reason_counts.get(index, 0) + 1
data = response.choices[0].delta
@@ -284,6 +286,15 @@ class TestOpenAIServer(CustomTestCase):
index, True
), f"index {index} is not found in the response"
# Verify that each choice gets exactly one finish_reason chunk
for index in range(parallel_sample_num):
assert (
index in finish_reason_counts
), f"No finish_reason found for index {index}"
assert (
finish_reason_counts[index] == 1
), f"Expected 1 finish_reason chunk for index {index}, got {finish_reason_counts[index]}"
def test_completion(self):
for echo in [False, True]:
for logprobs in [None, 5]:
@@ -420,91 +431,6 @@ The SmartHome Mini is a compact smart home assistant available in black or white
client.models.retrieve("non-existent-model")
# -------------------------------------------------------------------------
# EBNF Test Class: TestOpenAIServerEBNF
# Launches the server with xgrammar, has only EBNF tests
# -------------------------------------------------------------------------
class TestOpenAIServerEBNF(CustomTestCase):
@classmethod
def setUpClass(cls):
cls.model = DEFAULT_SMALL_MODEL_NAME_FOR_TEST
cls.base_url = DEFAULT_URL_FOR_TEST
cls.api_key = "sk-123456"
# passing xgrammar specifically
other_args = ["--grammar-backend", "xgrammar"]
cls.process = popen_launch_server(
cls.model,
cls.base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
api_key=cls.api_key,
other_args=other_args,
)
cls.base_url += "/v1"
cls.tokenizer = get_tokenizer(DEFAULT_SMALL_MODEL_NAME_FOR_TEST)
@classmethod
def tearDownClass(cls):
kill_process_tree(cls.process.pid)
def test_ebnf(self):
"""
Ensure we can pass `ebnf` to the local openai server
and that it enforces the grammar.
"""
client = openai.Client(api_key=self.api_key, base_url=self.base_url)
ebnf_grammar = r"""
root ::= "Hello" | "Hi" | "Hey"
"""
pattern = re.compile(r"^(Hello|Hi|Hey)[.!?]*\s*$")
response = client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "You are a helpful EBNF test bot."},
{"role": "user", "content": "Say a greeting (Hello, Hi, or Hey)."},
],
temperature=0,
max_tokens=32,
extra_body={"ebnf": ebnf_grammar},
)
text = response.choices[0].message.content.strip()
self.assertTrue(len(text) > 0, "Got empty text from EBNF generation")
self.assertRegex(text, pattern, f"Text '{text}' doesn't match EBNF choices")
def test_ebnf_strict_json(self):
"""
A stricter EBNF that produces exactly {"name":"Alice"} format
with no trailing punctuation or extra fields.
"""
client = openai.Client(api_key=self.api_key, base_url=self.base_url)
ebnf_grammar = r"""
root ::= "{" pair "}"
pair ::= "\"name\"" ":" string
string ::= "\"" [A-Za-z]+ "\""
"""
pattern = re.compile(r'^\{"name":"[A-Za-z]+"\}$')
response = client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "EBNF mini-JSON generator."},
{
"role": "user",
"content": "Generate single key JSON with only letters.",
},
],
temperature=0,
max_tokens=64,
extra_body={"ebnf": ebnf_grammar},
)
text = response.choices[0].message.content.strip()
self.assertTrue(len(text) > 0, "Got empty text from EBNF strict JSON test")
self.assertRegex(
text, pattern, f"Text '{text}' not matching the EBNF strict JSON shape"
)
class TestOpenAIV1Rerank(CustomTestCase):
@classmethod
def setUpClass(cls):

View File

@@ -197,6 +197,134 @@ class ServingChatTestCase(unittest.TestCase):
self.assertEqual(params["min_new_tokens"], 5)
self.assertEqual(params["stop"], ["</s>"])
async def test_unstreamed_tool_args_completion(self):
"""Test that remaining tool call arguments are sent when generation finishes."""
# Mock FunctionCallParser with detector that has partial tool call data
mock_parser = Mock()
mock_detector = Mock()
# Simulate a tool call that was partially streamed
mock_detector.prev_tool_call_arr = [
{
"name": "get_weather",
"arguments": {"location": "San Francisco", "unit": "celsius"},
}
]
mock_detector.streamed_args_for_tool = [
'{"location": "San Francisco"' # Partial arguments streamed so far
]
mock_parser.detector = mock_detector
content = {
"meta_info": {
"id": "chatcmpl-test123",
}
}
request = ChatCompletionRequest(
model="test",
messages=[{"role": "user", "content": "What's the weather?"}],
tools=[{"type": "function", "function": {"name": "get_weather"}}],
)
# Test the completion method
result = self.chat._check_for_unstreamed_tool_args(
parser=mock_parser,
content=content,
request=request,
finish_reason_type="stop",
index=0,
)
# Should return a chunk with remaining arguments
self.assertIsNotNone(result, "Should return chunk with remaining arguments")
self.assertIn('"arguments":', result, "Should contain arguments field")
self.assertIn(
', "unit": "celsius"}', result, "Should contain remaining arguments"
)
self.assertIn(
'"finish_reason":null',
result,
"Should not include finish_reason in completion chunk",
)
async def test_unstreamed_tool_args_no_completion_needed(self):
"""Test that no completion chunk is sent when all arguments were already streamed."""
# Mock FunctionCallParser with detector that has complete tool call data
mock_parser = Mock()
mock_detector = Mock()
# Simulate a tool call that was completely streamed
mock_detector.prev_tool_call_arr = [
{"name": "get_weather", "arguments": {"location": "San Francisco"}}
]
mock_detector.streamed_args_for_tool = [
'{"location": "San Francisco"}' # All arguments already streamed
]
mock_parser.detector = mock_detector
content = {
"meta_info": {
"id": "chatcmpl-test123",
}
}
request = ChatCompletionRequest(
model="test",
messages=[{"role": "user", "content": "What's the weather?"}],
tools=[{"type": "function", "function": {"name": "get_weather"}}],
)
# Test the completion method
result = self.chat._check_for_unstreamed_tool_args(
parser=mock_parser,
content=content,
request=request,
finish_reason_type="stop",
index=0,
)
# Should return None since no completion is needed
self.assertIsNone(result, "Should return None when no completion is needed")
async def test_unstreamed_tool_args_no_parser_data(self):
"""Test that no completion chunk is sent when parser has no tool call data."""
# Mock FunctionCallParser with empty detector
mock_parser = Mock()
mock_detector = Mock()
mock_detector.prev_tool_call_arr = []
mock_detector.streamed_args_for_tool = []
mock_parser.detector = mock_detector
content = {
"meta_info": {
"id": "chatcmpl-test123",
}
}
request = ChatCompletionRequest(
model="test",
messages=[{"role": "user", "content": "What's the weather?"}],
tools=[{"type": "function", "function": {"name": "get_weather"}}],
)
# Test the completion method
result = self.chat._check_for_unstreamed_tool_args(
parser=mock_parser,
content=content,
request=request,
finish_reason_type="stop",
index=0,
)
# Should return None since there's no parser data
self.assertIsNone(
result, "Should return None when parser has no tool call data"
)
if __name__ == "__main__":
unittest.main(verbosity=2)