diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml new file mode 100644 index 000000000..6aa908c0c --- /dev/null +++ b/.github/workflows/deploy-docs.yml @@ -0,0 +1,107 @@ +name: Build Documentation + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + workflow_dispatch: + +jobs: + execute-notebooks: + runs-on: 1-gpu-runner + if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - name: Install dependencies + run: | + pip install --upgrade pip + pip install -e "python[all]" + pip install -r docs/requirements.txt + pip install nbconvert jupyter_client ipykernel ipywidgets matplotlib + pip install transformers==4.45.2 + pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ --force-reinstall + + - name: Setup Jupyter Kernel + run: | + python -m ipykernel install --user --name python3 --display-name "Python 3" + + - name: Execute notebooks + env: + HF_HOME: /hf_home + SGLANG_IS_IN_CI: true + CUDA_VISIBLE_DEVICES: 0 + run: | + cd docs/en + for nb in *.ipynb; do + if [ -f "$nb" ]; then + echo "Executing $nb" + jupyter nbconvert --to notebook --execute --inplace "$nb" \ + --ExecutePreprocessor.timeout=600 \ + --ExecutePreprocessor.kernel_name=python3 + fi + done + + build-and-deploy: + if: github.event_name == 'push' && github.ref == 'refs/heads/main' + runs-on: 1-gpu-runner + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - name: Cache Python dependencies + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + + - name: Install dependencies + run: | + pip install --upgrade pip + pip install -e "python[all]" + pip install -r docs/requirements.txt + pip install nbconvert jupyter_client ipykernel ipywidgets matplotlib + pip install transformers==4.45.2 + pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ --force-reinstall + + - name: Install Pandoc + run: | + apt-get update + apt-get install -y pandoc + + - name: Build documentation + run: | + cd docs/en + make html + + - name: Push to sgl-project.github.io + env: + GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} + run: | + cd docs/en/_build/html + git clone https://$GITHUB_TOKEN@github.com/sgl-project/sgl-project.github.io.git ../sgl-project.github.io + cp -r * ../sgl-project.github.io + cd ../sgl-project.github.io + git config user.name "zhaochenyang20" + git config user.email "zhaochenyang20@gmail.com" + git add . + git commit -m "$(date +'%Y-%m-%d %H:%M:%S') - Update documentation" + git push https://$GITHUB_TOKEN@github.com/sgl-project/sgl-project.github.io.git main + cd .. + rm -rf sgl-project.github.io diff --git a/.gitignore b/.gitignore index 14f5212ec..dfb0b79a1 100644 --- a/.gitignore +++ b/.gitignore @@ -186,3 +186,6 @@ work_dirs/ *.csv !logo.png + +# docs +/docs/en/_build \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7489004bd..62dcd455a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,11 +7,11 @@ repos: hooks: - id: isort - repo: https://github.com/psf/black - rev: 24.4.2 + rev: 24.10.0 hooks: - id: black - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - - id: no-commit-to-branch + - id: no-commit-to-branch \ No newline at end of file diff --git a/docs/en/conf.py b/docs/en/conf.py index 5a7ed2dbf..5e01613ec 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -26,6 +26,9 @@ extensions = [ "myst_parser", "sphinx_copybutton", "sphinxcontrib.mermaid", + "nbsphinx", + "sphinx.ext.mathjax", + "sphinx.ext.autodoc", ] autosectionlabel_prefix_document = True @@ -123,3 +126,5 @@ intersphinx_mapping = { "numpy": ("https://numpy.org/doc/stable", None), "torch": ("https://pytorch.org/docs/stable", None), } + +html_theme = "sphinx_book_theme" diff --git a/docs/en/embedding_model.ipynb b/docs/en/embedding_model.ipynb new file mode 100644 index 000000000..f2e155b02 --- /dev/null +++ b/docs/en/embedding_model.ipynb @@ -0,0 +1,160 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Embedding Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Launch A Server" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Embedding server is ready. Proceeding with the next steps.\n" + ] + } + ], + "source": [ + "import subprocess\n", + "import time\n", + "import requests\n", + "\n", + "# Equivalent to running this in the shell:\n", + "# python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-7B-instruct --port 30010 --host 0.0.0.0 --is-embedding --log-level error\n", + "embedding_process = subprocess.Popen(\n", + " [\n", + " \"python\",\n", + " \"-m\",\n", + " \"sglang.launch_server\",\n", + " \"--model-path\",\n", + " \"Alibaba-NLP/gte-Qwen2-7B-instruct\",\n", + " \"--port\",\n", + " \"30010\",\n", + " \"--host\",\n", + " \"0.0.0.0\",\n", + " \"--is-embedding\",\n", + " \"--log-level\",\n", + " \"error\",\n", + " ],\n", + " text=True,\n", + " stdout=subprocess.DEVNULL,\n", + " stderr=subprocess.DEVNULL,\n", + ")\n", + "\n", + "while True:\n", + " try:\n", + " response = requests.get(\n", + " \"http://localhost:30010/v1/models\",\n", + " headers={\"Authorization\": \"Bearer None\"},\n", + " )\n", + " if response.status_code == 200:\n", + " break\n", + " except requests.exceptions.RequestException:\n", + " time.sleep(1)\n", + "\n", + "print(\"Embedding server is ready. Proceeding with the next steps.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use Curl" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0.0083160400390625, 0.0006804466247558594, -0.00809478759765625, -0.0006995201110839844, 0.0143890380859375, -0.0090179443359375, 0.01238250732421875, 0.00209808349609375, 0.0062103271484375, -0.003047943115234375]\n" + ] + } + ], + "source": [ + "# Get the first 10 elements of the embedding\n", + "\n", + "! curl -s http://localhost:30010/v1/embeddings \\\n", + " -H \"Content-Type: application/json\" \\\n", + " -H \"Authorization: Bearer None\" \\\n", + " -d '{\"model\": \"Alibaba-NLP/gte-Qwen2-7B-instruct\", \"input\": \"Once upon a time\"}' \\\n", + " | python3 -c \"import sys, json; print(json.load(sys.stdin)['data'][0]['embedding'][:10])\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using OpenAI Compatible API" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0.00603485107421875, -0.0190582275390625, -0.01273345947265625, 0.01552581787109375, 0.0066680908203125, -0.0135955810546875, 0.01131439208984375, 0.0013713836669921875, -0.0089874267578125, 0.021759033203125]\n" + ] + } + ], + "source": [ + "import openai\n", + "\n", + "client = openai.Client(\n", + " base_url=\"http://127.0.0.1:30010/v1\", api_key=\"None\"\n", + ")\n", + "\n", + "# Text embedding example\n", + "response = client.embeddings.create(\n", + " model=\"Alibaba-NLP/gte-Qwen2-7B-instruct\",\n", + " input=\"How are you today\",\n", + ")\n", + "\n", + "embedding = response.data[0].embedding[:10]\n", + "print(embedding)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "AlphaMeemory", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/en/index.rst b/docs/en/index.rst index 73dee5b5b..2f28ad87c 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -15,7 +15,17 @@ The core features include: :caption: Getting Started install.md + send_request.ipynb + +.. toctree:: + :maxdepth: 1 + :caption: Backend Tutorial backend.md + + +.. toctree:: + :maxdepth: 1 + :caption: Frontend Tutorial frontend.md .. toctree:: @@ -29,3 +39,4 @@ The core features include: choices_methods.md benchmark_and_profiling.md troubleshooting.md + embedding_model.ipynb \ No newline at end of file diff --git a/docs/en/send_request.ipynb b/docs/en/send_request.ipynb new file mode 100644 index 000000000..a305ccfb8 --- /dev/null +++ b/docs/en/send_request.ipynb @@ -0,0 +1,214 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Quick Start" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Launch a server\n", + "\n", + "This code uses `subprocess.Popen` to start an SGLang server process, equivalent to executing \n", + "\n", + "```bash\n", + "python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct \\\n", + "--port 30000 --host 0.0.0.0 --log-level warning\n", + "```\n", + "in your command line and wait for the server to be ready." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Server is ready. Proceeding with the next steps.\n" + ] + } + ], + "source": [ + "import subprocess\n", + "import time\n", + "import requests\n", + "import os\n", + "\n", + "server_process = subprocess.Popen(\n", + " [\n", + " \"python\",\n", + " \"-m\",\n", + " \"sglang.launch_server\",\n", + " \"--model-path\",\n", + " \"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n", + " \"--port\",\n", + " \"30000\",\n", + " \"--host\",\n", + " \"0.0.0.0\",\n", + " \"--log-level\",\n", + " \"error\",\n", + " ],\n", + " text=True,\n", + " stdout=subprocess.DEVNULL,\n", + " stderr=subprocess.DEVNULL,\n", + ")\n", + "\n", + "while True:\n", + " try:\n", + " response = requests.get(\n", + " \"http://localhost:30000/v1/models\",\n", + " headers={\"Authorization\": \"Bearer None\"},\n", + " )\n", + " if response.status_code == 200:\n", + " break\n", + " except requests.exceptions.RequestException:\n", + " time.sleep(1)\n", + "\n", + "print(\"Server is ready. Proceeding with the next steps.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Send a Request\n", + "\n", + "Once the server is running, you can send test requests using curl." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\"id\":\"1449c9c20d4448299431a57facc68d7a\",\"object\":\"chat.completion\",\"created\":1729816891,\"model\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\"choices\":[{\"index\":0,\"message\":{\"role\":\"assistant\",\"content\":\"LLM stands for Large Language Model. It's a type of artificial intelligence (AI) designed to process and generate human-like language. LLMs are trained on vast amounts of text data, which enables them to learn patterns, relationships, and nuances of language.\\n\\nLarge Language Models are typically trained using a technique called deep learning, where multiple layers of artificial neural networks are used to analyze and understand the input data. This training process involves feeding the model massive amounts of text data, which it uses to learn and improve its language understanding and generation capabilities.\\n\\nSome key characteristics of LLMs include:\\n\\n1. **Language understanding**: LLMs can comprehend natural language, including its syntax, semantics, and context.\\n2. **Language generation**: LLMs can generate text, including responses to user input, articles, stories, and more.\\n3. **Contextual understanding**: LLMs can understand the context in which language is being used, including the topic, tone, and intent.\\n4. **Self-supervised learning**: LLMs can learn from large datasets without explicit supervision or labeling.\\n\\nLLMs have a wide range of applications, including:\\n\\n1. **Virtual assistants**: LLMs power virtual assistants like Siri, Alexa, and Google Assistant.\\n2. **Language translation**: LLMs can translate text from one language to another.\\n3. **Text summarization**: LLMs can summarize long pieces of text into shorter, more digestible versions.\\n4. **Content generation**: LLMs can generate content, such as news articles, product descriptions, and social media posts.\\n5. **Chatbots**: LLMs can power chatbots that can have human-like conversations with users.\\n\\nThe Large Language Model I am, is a type of LLM that has been trained on a massive dataset of text and can answer a wide range of questions and engage in conversation.\"},\"logprobs\":null,\"finish_reason\":\"stop\",\"matched_stop\":128009}],\"usage\":{\"prompt_tokens\":47,\"total_tokens\":426,\"completion_tokens\":379,\"prompt_tokens_details\":null}}" + ] + } + ], + "source": [ + "!curl http://localhost:30000/v1/chat/completions \\\n", + " -H \"Content-Type: application/json\" \\\n", + " -H \"Authorization: Bearer None\" \\\n", + " -d '{\"model\": \"meta-llama/Meta-Llama-3.1-8B-Instruct\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"What is a LLM?\"}]}'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using OpenAI Compatible API\n", + "\n", + "SGLang supports OpenAI-compatible APIs. Here are Python examples:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ChatCompletion(id='16757c3dd6e14a6e9bafd1122f84e4c5', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Here are 3 countries and their capitals:\\n\\n1. **Country:** Japan\\n**Capital:** Tokyo\\n\\n2. **Country:** Australia\\n**Capital:** Canberra\\n\\n3. **Country:** Brazil\\n**Capital:** Brasília', refusal=None, role='assistant', function_call=None, tool_calls=None), matched_stop=128009)], created=1729816893, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=46, prompt_tokens=49, total_tokens=95, prompt_tokens_details=None))\n" + ] + } + ], + "source": [ + "import openai\n", + "\n", + "# Always assign an api_key, even if not specified during server initialization.\n", + "# Setting an API key during server initialization is strongly recommended.\n", + "\n", + "client = openai.Client(\n", + " base_url=\"http://127.0.0.1:30000/v1\", api_key=\"None\"\n", + ")\n", + "\n", + "# Chat completion example\n", + "\n", + "response = client.chat.completions.create(\n", + " model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a helpful AI assistant\"},\n", + " {\"role\": \"user\", \"content\": \"List 3 countries and their capitals.\"},\n", + " ],\n", + " temperature=0,\n", + " max_tokens=64,\n", + ")\n", + "print(response)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import signal\n", + "import gc\n", + "import torch\n", + "\n", + "def terminate_process(process):\n", + " try:\n", + " process.terminate()\n", + " try:\n", + " process.wait(timeout=5)\n", + " except subprocess.TimeoutExpired:\n", + " if os.name != 'nt':\n", + " try:\n", + " pgid = os.getpgid(process.pid)\n", + " os.killpg(pgid, signal.SIGTERM)\n", + " time.sleep(1)\n", + " if process.poll() is None:\n", + " os.killpg(pgid, signal.SIGKILL)\n", + " except ProcessLookupError:\n", + " pass\n", + " else:\n", + " process.kill()\n", + " process.wait()\n", + " except Exception as e:\n", + " print(f\"Warning: {e}\")\n", + " finally:\n", + " gc.collect()\n", + " if torch.cuda.is_available():\n", + " torch.cuda.empty_cache()\n", + " torch.cuda.ipc_collect()\n", + "\n", + "terminate_process(server_process)\n", + "time.sleep(2)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "AlphaMeemory", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/requirements.txt b/docs/requirements.txt index 2f86ac997..03510959b 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -8,3 +8,5 @@ sphinxcontrib-mermaid pillow pydantic urllib3<2.0.0 +nbsphinx +pandoc \ No newline at end of file