Improve docs (#662)

This commit is contained in:
Ying Sheng
2024-07-19 10:58:03 -07:00
committed by GitHub
parent 630479c3a6
commit e87c7fd501
6 changed files with 75 additions and 41 deletions

View File

@@ -4,7 +4,8 @@
- `srt`: The backend engine for running local models. (SRT = SGLang Runtime).
- `test`: Test utilities.
- `api.py`: Public API.
- `bench_latency.py`: Benchmark utilities.
- `bench_latency.py`: Benchmark a single static batch.
- `bench_serving.py`: Benchmark online serving with dynamic requests.
- `global_config.py`: The global configs and constants.
- `launch_server.py`: The entry point of launching local server.
- `utils.py`: Common utilities.

View File

@@ -1,3 +1,5 @@
"""Check environment configurations and dependency versions."""
import importlib
import os
import resource

View File

@@ -13,25 +13,26 @@ from sglang.srt.sampling_params import SamplingParams
@dataclass
class GenerateReqInput:
# The input prompt
text: Optional[Union[List[str], str]] = None
# The token ids for text; one can either specify text or input_ids
# The input prompt. It can be a single prompt or a batch of prompts.
text: Union[List[str], str]
# The token ids for text; one can either specify text or input_ids.
input_ids: Optional[Union[List[List[int]], List[int]]] = None
# The image input
# The image input. It can be a file name, a url, or base64 encoded string.
# See also python/sglang/srt/utils.py:load_image.
image_data: Optional[Union[List[str], str]] = None
# The sampling_params
# The sampling_params.
sampling_params: Union[List[Dict], Dict] = None
# The request id
# The request id.
rid: Optional[Union[List[str], str]] = None
# Whether to return logprobs
# Whether to return logprobs.
return_logprob: Optional[Union[List[bool], bool]] = None
# The start location of the prompt for return_logprob
# The start location of the prompt for return_logprob.
logprob_start_len: Optional[Union[List[int], int]] = None
# The number of top logprobs to return
# The number of top logprobs to return.
top_logprobs_num: Optional[Union[List[int], int]] = None
# Whether to detokenize tokens in logprobs
# Whether to detokenize tokens in logprobs.
return_text_in_logprobs: bool = False
# Whether to stream output
# Whether to stream output.
stream: bool = False
def post_init(self):

View File

@@ -74,21 +74,6 @@ async def health() -> Response:
return Response(status_code=200)
def get_model_list():
"""Available models."""
model_names = [tokenizer_manager.model_path]
return model_names
@app.get("/v1/models")
def available_models():
"""Show available models."""
model_cards = []
for model_name in get_model_list():
model_cards.append(ModelCard(id=model_name, root=model_name))
return ModelList(data=model_cards)
@app.get("/get_model_info")
async def get_model_info():
result = {
@@ -154,6 +139,16 @@ async def openai_v1_chat_completions(raw_request: Request):
return await v1_chat_completions(tokenizer_manager, raw_request)
@app.get("/v1/models")
def available_models():
"""Show available models."""
model_names = [tokenizer_manager.model_path]
model_cards = []
for model_name in model_names:
model_cards.append(ModelCard(id=model_name, root=model_name))
return ModelList(data=model_cards)
def _set_global_server_args(server_args: ServerArgs):
global global_server_args_dict
global_server_args_dict = {