Fix flush cache (#627)
This commit is contained in:
@@ -1,9 +1,8 @@
|
||||
|
||||
# Benchmark Latency and Throughput
|
||||
|
||||
## SGLang
|
||||
|
||||
### Launch server
|
||||
### Launch a server
|
||||
```
|
||||
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
|
||||
```
|
||||
@@ -33,6 +32,11 @@ wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/r
|
||||
python3 bench_serving.py --backend srt --port 30000 --tokenizer meta-llama/Llama-2-7b-chat-hf --dataset ShareGPT_V3_unfiltered_cleaned_split.json --num-prompts 10 --request-rate 10
|
||||
```
|
||||
|
||||
### Profile with Nsight
|
||||
1. To profile a single batch, use `nsys profile --cuda-graph-trace=node python3 -m sglang.bench_latency --model meta-llama/Meta-Llama-3-8B --batch-size 64 --input-len 512`
|
||||
2. To profile a server, use `nsys profile --cuda-graph-trace=node python3 -m sglang.launch_server --model meta-llama/Meta-Llama-3-8B`.
|
||||
|
||||
|
||||
## Other baselines
|
||||
|
||||
### vLLM
|
||||
@@ -64,4 +68,4 @@ python -m lightllm.server.api_server --model_dir ~/model_weights/Llama-2-7b-chat
|
||||
|
||||
```
|
||||
python3 bench_serving.py --backend lightllm --port 22000 --tokenizer meta-llama/Llama-2-7b-chat-hf --dataset ShareGPT_V3_unfiltered_cleaned_split.json --num-prompts 10 --request-rate 10
|
||||
```
|
||||
```
|
||||
|
||||
@@ -102,8 +102,8 @@ def run_one_batch_size(bs):
|
||||
output_throughput = bs * max_new_tokens / latency
|
||||
overall_throughput = bs * (input_len + output_len) / latency
|
||||
print(f"latency: {latency:.2f} s")
|
||||
print(f"decode throughput: {output_throughput:.2f} token/s")
|
||||
print(f"overall throughput: {overall_throughput:.2f} token/s")
|
||||
print(f"output throughput: {output_throughput:.2f} token/s")
|
||||
print(f"(input + output) throughput: {overall_throughput:.2f} token/s")
|
||||
|
||||
with open("results.jsonl", "a") as fout:
|
||||
res = {
|
||||
|
||||
Reference in New Issue
Block a user