Fix the default argument of OpenAI Chat completion (#605)
This commit is contained in:
@@ -42,7 +42,6 @@ pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.3/
|
||||
git clone https://github.com/sgl-project/sglang.git
|
||||
cd sglang
|
||||
|
||||
pip install --upgrade pip
|
||||
pip install -e "python[all]"
|
||||
|
||||
# Install FlashInfer CUDA kernels
|
||||
|
||||
@@ -134,7 +134,7 @@ class ChatCompletionRequest(BaseModel):
|
||||
logit_bias: Optional[Dict[str, float]] = None
|
||||
logprobs: Optional[bool] = False
|
||||
top_logprobs: Optional[int] = None
|
||||
max_tokens: Optional[int] = None
|
||||
max_tokens: Optional[int] = 16
|
||||
n: Optional[int] = 1
|
||||
presence_penalty: Optional[float] = 0.0
|
||||
response_format: Optional[ResponseFormat] = None
|
||||
|
||||
Reference in New Issue
Block a user