Include context length in /v1/models response. (#4809)

This commit is contained in:
Jon Durbin
2025-03-27 23:23:18 -04:00
committed by GitHub
parent e84f4ba0ab
commit 04eb6062e4
3 changed files with 14 additions and 1 deletions

View File

@@ -561,7 +561,13 @@ def available_models():
served_model_names = [_global_state.tokenizer_manager.served_model_name]
model_cards = []
for served_model_name in served_model_names:
model_cards.append(ModelCard(id=served_model_name, root=served_model_name))
model_cards.append(
ModelCard(
id=served_model_name,
root=served_model_name,
max_model_len=_global_state.tokenizer_manager.model_config.context_len,
)
)
return ModelList(data=model_cards)

View File

@@ -28,6 +28,7 @@ class ModelCard(BaseModel):
created: int = Field(default_factory=lambda: int(time.time()))
owned_by: str = "sglang"
root: Optional[str] = None
max_model_len: Optional[int] = None
class ModelList(BaseModel):