release v0.1.10

This commit is contained in:
Lianmin Zheng
2024-01-30 15:37:43 +00:00
parent 873d0e8537
commit a49dc52bfa
3 changed files with 3 additions and 2 deletions

View File

@@ -351,6 +351,7 @@ python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port
```
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000 --mem-fraction-static 0.7
```
- You can turn on [flashinfer](docs/flashinfer.md) to acclerate the inference by using highly optimized CUDA kernels.
### Supported Models
- Llama

View File

@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "sglang"
version = "0.1.9"
version = "0.1.10"
description = "A structured generation langauge for LLMs."
readme = "README.md"
requires-python = ">=3.8"

View File

@@ -1,4 +1,4 @@
__version__ = "0.1.9"
__version__ = "0.1.10"
from sglang.api import *
from sglang.global_config import global_config