add native api docs (#1883)
Co-authored-by: Chayenne <zhaochenyang@g.ucla.edu>
This commit is contained in:
@@ -30,7 +30,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-11-01T02:47:32.337369Z",
|
||||
@@ -39,59 +39,7 @@
|
||||
"shell.execute_reply": "2024-11-01T02:47:59.539861Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
|
||||
" warnings.warn(\n",
|
||||
"[2024-10-31 22:40:37] server_args=ServerArgs(model_path='Alibaba-NLP/gte-Qwen2-7B-instruct', tokenizer_path='Alibaba-NLP/gte-Qwen2-7B-instruct', tokenizer_mode='auto', skip_tokenizer_init=False, load_format='auto', trust_remote_code=False, dtype='auto', kv_cache_dtype='auto', quantization=None, context_length=None, device='cuda', served_model_name='Alibaba-NLP/gte-Qwen2-7B-instruct', chat_template=None, is_embedding=True, host='0.0.0.0', port=30010, mem_fraction_static=0.88, max_running_requests=None, max_total_tokens=None, chunked_prefill_size=8192, max_prefill_tokens=16384, schedule_policy='lpm', schedule_conservativeness=1.0, tp_size=1, stream_interval=1, random_seed=309155486, constrained_json_whitespace_pattern=None, decode_log_interval=40, log_level='info', log_level_http=None, log_requests=False, show_time_cost=False, api_key=None, file_storage_pth='SGLang_storage', enable_cache_report=False, watchdog_timeout=600, dp_size=1, load_balance_method='round_robin', dist_init_addr=None, nnodes=1, node_rank=0, json_model_override_args='{}', enable_double_sparsity=False, ds_channel_config_path=None, ds_heavy_channel_num=32, ds_heavy_token_num=256, ds_heavy_channel_type='qk', ds_sparse_decode_threshold=4096, lora_paths=None, max_loras_per_batch=8, attention_backend='flashinfer', sampling_backend='flashinfer', grammar_backend='outlines', disable_flashinfer=False, disable_flashinfer_sampling=False, disable_radix_cache=False, disable_regex_jump_forward=False, disable_cuda_graph=False, disable_cuda_graph_padding=False, disable_disk_cache=False, disable_custom_all_reduce=False, disable_mla=False, disable_penalizer=False, disable_nan_detection=False, enable_overlap_schedule=False, enable_mixed_chunk=False, enable_torch_compile=False, torch_compile_max_bs=32, cuda_graph_max_bs=160, torchao_config='', enable_p2p_check=False, triton_attention_reduce_in_fp32=False, num_continuous_decode_steps=1)\n",
|
||||
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
|
||||
" warnings.warn(\n",
|
||||
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
|
||||
" warnings.warn(\n",
|
||||
"[2024-10-31 22:40:42 TP0] Init torch distributed begin.\n",
|
||||
"[2024-10-31 22:40:43 TP0] Load weight begin. avail mem=47.27 GB\n",
|
||||
"[2024-10-31 22:40:43 TP0] lm_eval is not installed, GPTQ may not be usable\n",
|
||||
"INFO 10-31 22:40:44 weight_utils.py:243] Using model weights format ['*.safetensors']\n",
|
||||
"Loading safetensors checkpoint shards: 0% Completed | 0/7 [00:00<?, ?it/s]\n",
|
||||
"Loading safetensors checkpoint shards: 14% Completed | 1/7 [00:00<00:03, 1.97it/s]\n",
|
||||
"Loading safetensors checkpoint shards: 29% Completed | 2/7 [00:01<00:03, 1.40it/s]\n",
|
||||
"Loading safetensors checkpoint shards: 43% Completed | 3/7 [00:02<00:03, 1.11it/s]\n",
|
||||
"Loading safetensors checkpoint shards: 57% Completed | 4/7 [00:03<00:03, 1.00s/it]\n",
|
||||
"Loading safetensors checkpoint shards: 71% Completed | 5/7 [00:04<00:02, 1.07s/it]\n",
|
||||
"Loading safetensors checkpoint shards: 86% Completed | 6/7 [00:05<00:01, 1.10s/it]\n",
|
||||
"Loading safetensors checkpoint shards: 100% Completed | 7/7 [00:07<00:00, 1.12s/it]\n",
|
||||
"Loading safetensors checkpoint shards: 100% Completed | 7/7 [00:07<00:00, 1.02s/it]\n",
|
||||
"\n",
|
||||
"[2024-10-31 22:40:51 TP0] Load weight end. type=Qwen2ForCausalLM, dtype=torch.float16, avail mem=32.91 GB\n",
|
||||
"[2024-10-31 22:40:51 TP0] Memory pool end. avail mem=4.56 GB\n",
|
||||
"[2024-10-31 22:40:52 TP0] max_total_num_tokens=509971, max_prefill_tokens=16384, max_running_requests=2049, context_len=131072\n",
|
||||
"[2024-10-31 22:40:52] INFO: Started server process [1752367]\n",
|
||||
"[2024-10-31 22:40:52] INFO: Waiting for application startup.\n",
|
||||
"[2024-10-31 22:40:52] INFO: Application startup complete.\n",
|
||||
"[2024-10-31 22:40:52] INFO: Uvicorn running on http://0.0.0.0:30010 (Press CTRL+C to quit)\n",
|
||||
"[2024-10-31 22:40:52] INFO: 127.0.0.1:41676 - \"GET /v1/models HTTP/1.1\" 200 OK\n",
|
||||
"[2024-10-31 22:40:53] INFO: 127.0.0.1:41678 - \"GET /get_model_info HTTP/1.1\" 200 OK\n",
|
||||
"[2024-10-31 22:40:53 TP0] Prefill batch. #new-seq: 1, #new-token: 6, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
|
||||
"[2024-10-31 22:40:54] INFO: 127.0.0.1:41684 - \"POST /encode HTTP/1.1\" 200 OK\n",
|
||||
"[2024-10-31 22:40:54] The server is fired up and ready to roll!\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<strong style='color: #00008B;'><br><br> NOTE: Typically, the server runs in a separate terminal.<br> In this notebook, we run the server and notebook code together, so their outputs are combined.<br> To improve clarity, the server logs are displayed in the original black color, while the notebook outputs are highlighted in blue.<br> </strong>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sglang.utils import (\n",
|
||||
" execute_shell_command,\n",
|
||||
@@ -119,7 +67,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-11-01T02:47:59.543958Z",
|
||||
@@ -128,28 +76,7 @@
|
||||
"shell.execute_reply": "2024-11-01T02:47:59.590809Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[2024-10-31 22:40:57 TP0] Prefill batch. #new-seq: 1, #new-token: 4, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
|
||||
"[2024-10-31 22:40:57] INFO: 127.0.0.1:51746 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<strong style='color: #00008B;'>Text embedding (first 10): [0.0083160400390625, 0.0006804466247558594, -0.00809478759765625, -0.0006995201110839844, 0.0143890380859375, -0.0090179443359375, 0.01238250732421875, 0.00209808349609375, 0.0062103271484375, -0.003047943115234375]</strong>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess, json\n",
|
||||
"\n",
|
||||
@@ -176,7 +103,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-11-01T02:47:59.594229Z",
|
||||
@@ -185,28 +112,7 @@
|
||||
"shell.execute_reply": "2024-11-01T02:48:00.005255Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[2024-10-31 22:40:58 TP0] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 3, cache hit rate: 21.43%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
|
||||
"[2024-10-31 22:40:58] INFO: 127.0.0.1:51750 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<strong style='color: #00008B;'>Text embedding (first 10): [0.00829315185546875, 0.0007004737854003906, -0.00809478759765625, -0.0006799697875976562, 0.01438140869140625, -0.00897979736328125, 0.0123748779296875, 0.0020923614501953125, 0.006195068359375, -0.0030498504638671875]</strong>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import openai\n",
|
||||
"\n",
|
||||
@@ -233,7 +139,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-11-01T02:48:00.008858Z",
|
||||
@@ -242,36 +148,7 @@
|
||||
"shell.execute_reply": "2024-11-01T02:48:01.871573Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[2024-10-31 22:41:00 TP0] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 3, cache hit rate: 33.33%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
|
||||
"[2024-10-31 22:41:00] INFO: 127.0.0.1:51762 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<strong style='color: #00008B;'>Input IDs embedding (first 10): [0.00829315185546875, 0.0007004737854003906, -0.00809478759765625, -0.0006799697875976562, 0.01438140869140625, -0.00897979736328125, 0.0123748779296875, 0.0020923614501953125, 0.006195068359375, -0.0030498504638671875]</strong>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
|
||||
Reference in New Issue
Block a user