feat: append more comprehensive fields in messages instead of merely role and content (#5996)

This commit is contained in:
mlmz
2025-05-06 02:43:34 +08:00
committed by GitHub
parent 82653f6622
commit a68ed76682
3 changed files with 66 additions and 59 deletions

View File

@@ -38,7 +38,9 @@
" from patch import launch_server_cmd\n",
"else:\n",
" from sglang.utils import launch_server_cmd\n",
" import nest_asyncio\n",
"\n",
" nest_asyncio.apply()\n",
"\n",
"server_process, port = launch_server_cmd(\n",
" \"python3 -m sglang.launch_server --model-path Qwen/Qwen2.5-7B-Instruct --tool-call-parser qwen25 --host 0.0.0.0\" # qwen25\n",
@@ -164,7 +166,7 @@
"response_non_stream = client.chat.completions.create(\n",
" model=model_name,\n",
" messages=messages,\n",
" temperature=0.1,\n",
" temperature=0,\n",
" top_p=0.95,\n",
" max_tokens=1024,\n",
" stream=False, # Non-streaming\n",
@@ -219,7 +221,7 @@
"response_stream = client.chat.completions.create(\n",
" model=model_name,\n",
" messages=messages,\n",
" temperature=0.1,\n",
" temperature=0,\n",
" top_p=0.95,\n",
" max_tokens=1024,\n",
" stream=True, # Enable streaming\n",
@@ -309,22 +311,23 @@
"metadata": {},
"outputs": [],
"source": [
"call_data = json.loads(full_arguments)\n",
"\n",
"messages.append(\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": \"\",\n",
" \"tool_calls\": {\"name\": \"get_current_weather\", \"arguments\": full_arguments},\n",
" }\n",
")\n",
"messages.append(response_non_stream.choices[0].message)\n",
"\n",
"# Call the corresponding tool function\n",
"tool_name = messages[-1][\"tool_calls\"][\"name\"]\n",
"tool_call = messages[-1].tool_calls[0]\n",
"tool_name = tool_call.function.name\n",
"tool_to_call = available_tools[tool_name]\n",
"result = tool_to_call(**call_data)\n",
"result = tool_to_call(**(json.loads(tool_call.function.arguments)))\n",
"print_highlight(f\"Function call result: {result}\")\n",
"messages.append({\"role\": \"tool\", \"content\": result, \"name\": tool_name})\n",
"# messages.append({\"role\": \"tool\", \"content\": result, \"name\": tool_name})\n",
"messages.append(\n",
" {\n",
" \"role\": \"tool\",\n",
" \"tool_call_id\": tool_call.id,\n",
" \"content\": str(result),\n",
" \"name\": tool_name,\n",
" }\n",
")\n",
"\n",
"print_highlight(f\"Updated message history: {messages}\")"
]
@@ -345,7 +348,7 @@
"final_response = client.chat.completions.create(\n",
" model=model_name,\n",
" messages=messages,\n",
" temperature=0.1,\n",
" temperature=0,\n",
" top_p=0.95,\n",
" stream=False,\n",
" tools=tools,\n",
@@ -391,7 +394,7 @@
" \"sampling_params\": {\n",
" \"skip_special_tokens\": False,\n",
" \"max_new_tokens\": 1024,\n",
" \"temperature\": 0.1,\n",
" \"temperature\": 0,\n",
" \"top_p\": 0.95,\n",
" },\n",
"}\n",
@@ -452,7 +455,7 @@
"\n",
"sampling_params = {\n",
" \"max_new_tokens\": 1024,\n",
" \"temperature\": 0.1,\n",
" \"temperature\": 0,\n",
" \"top_p\": 0.95,\n",
" \"skip_special_tokens\": False,\n",
"}\n",
@@ -540,14 +543,6 @@
"outputs": [],
"source": [
"import openai\n",
"from sglang.utils import wait_for_server, print_highlight, terminate_process\n",
"from sglang.test.test_utils import is_in_ci\n",
"\n",
"\n",
"if is_in_ci():\n",
" from patch import launch_server_cmd\n",
"else:\n",
" from sglang.utils import launch_server_cmd\n",
"\n",
"server_process, port = launch_server_cmd(\n",
" \" python3 -m sglang.launch_server --model-path meta-llama/Llama-3.2-1B-Instruct --tool-call-parser pythonic --tp 1\" # llama-3.2-1b-instruct\n",
@@ -624,8 +619,8 @@
"response_non_stream = client.chat.completions.create(\n",
" model=model_name,\n",
" messages=messages,\n",
" temperature=0.8,\n",
" top_p=0.8,\n",
" temperature=0,\n",
" top_p=0.9,\n",
" stream=False, # Non-streaming\n",
" tools=tools,\n",
")\n",
@@ -635,8 +630,8 @@
"response_stream = client.chat.completions.create(\n",
" model=model_name,\n",
" messages=messages,\n",
" temperature=0.8,\n",
" top_p=0.8,\n",
" temperature=0,\n",
" top_p=0.9,\n",
" stream=True,\n",
" tools=tools,\n",
")\n",