[CI] Improve Docs CI Efficiency (#3587)

Co-authored-by: zhaochenyang20 <zhaochen20@outlook.com>
This commit is contained in:
Shi Shuai
2025-02-15 03:57:00 +00:00
committed by GitHub
parent 862dd76c76
commit 7443197a63
19 changed files with 366 additions and 231 deletions

View File

@@ -34,21 +34,23 @@
"metadata": {},
"outputs": [],
"source": [
"from sglang.utils import (\n",
" execute_shell_command,\n",
" wait_for_server,\n",
" terminate_process,\n",
" print_highlight,\n",
")\n",
"from sglang.test.test_utils import is_in_ci\n",
"\n",
"embedding_process = execute_shell_command(\n",
"if is_in_ci():\n",
" from patch import launch_server_cmd\n",
"else:\n",
" from sglang.utils import launch_server_cmd\n",
"\n",
"from sglang.utils import wait_for_server, print_highlight, terminate_process\n",
"\n",
"embedding_process, port = launch_server_cmd(\n",
" \"\"\"\n",
"python3 -m sglang.launch_server --model-path meta-llama/Llama-3.2-11B-Vision-Instruct \\\n",
" --port=30000 --chat-template=llama_3_vision\n",
" --chat-template=llama_3_vision\n",
"\"\"\"\n",
")\n",
"\n",
"wait_for_server(\"http://localhost:30000\")"
"wait_for_server(f\"http://localhost:{port}\")"
]
},
{
@@ -68,32 +70,36 @@
"source": [
"import subprocess\n",
"\n",
"curl_command = \"\"\"\n",
"curl -s http://localhost:30000/v1/chat/completions \\\n",
" -d '{\n",
"curl_command = f\"\"\"\n",
"curl -s http://localhost:{port}/v1/chat/completions \\\\\n",
" -d '{{\n",
" \"model\": \"meta-llama/Llama-3.2-11B-Vision-Instruct\",\n",
" \"messages\": [\n",
" {\n",
" {{\n",
" \"role\": \"user\",\n",
" \"content\": [\n",
" {\n",
" {{\n",
" \"type\": \"text\",\n",
" \"text\": \"Whats in this image?\"\n",
" },\n",
" {\n",
" }},\n",
" {{\n",
" \"type\": \"image_url\",\n",
" \"image_url\": {\n",
" \"image_url\": {{\n",
" \"url\": \"https://github.com/sgl-project/sglang/blob/main/test/lang/example_image.png?raw=true\"\n",
" }\n",
" }\n",
" }}\n",
" }}\n",
" ]\n",
" }\n",
" }}\n",
" ],\n",
" \"max_tokens\": 300\n",
" }'\n",
" }}'\n",
"\"\"\"\n",
"\n",
"response = subprocess.check_output(curl_command, shell=True).decode()\n",
"print_highlight(response)\n",
"\n",
"\n",
"response = subprocess.check_output(curl_command, shell=True).decode()\n",
"print_highlight(response)"
]
},
@@ -112,7 +118,7 @@
"source": [
"import requests\n",
"\n",
"url = \"http://localhost:30000/v1/chat/completions\"\n",
"url = f\"http://localhost:{port}/v1/chat/completions\"\n",
"\n",
"data = {\n",
" \"model\": \"meta-llama/Llama-3.2-11B-Vision-Instruct\",\n",
@@ -152,7 +158,7 @@
"source": [
"from openai import OpenAI\n",
"\n",
"client = OpenAI(base_url=\"http://localhost:30000/v1\", api_key=\"None\")\n",
"client = OpenAI(base_url=f\"http://localhost:{port}/v1\", api_key=\"None\")\n",
"\n",
"response = client.chat.completions.create(\n",
" model=\"meta-llama/Llama-3.2-11B-Vision-Instruct\",\n",
@@ -196,7 +202,7 @@
"source": [
"from openai import OpenAI\n",
"\n",
"client = OpenAI(base_url=\"http://localhost:30000/v1\", api_key=\"None\")\n",
"client = OpenAI(base_url=f\"http://localhost:{port}/v1\", api_key=\"None\")\n",
"\n",
"response = client.chat.completions.create(\n",
" model=\"meta-llama/Llama-3.2-11B-Vision-Instruct\",\n",
@@ -236,7 +242,7 @@
"metadata": {},
"outputs": [],
"source": [
"terminate_process(embedding_process)"
"terminate_process(embedding_process, port)"
]
},
{