docs: update doc (#716)
This commit is contained in:
@@ -37,7 +37,7 @@ The core features include:
|
||||
|
||||
### Method 1: With pip
|
||||
```
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install "sglang[all]"
|
||||
|
||||
# Install FlashInfer CUDA kernels
|
||||
|
||||
28
benchmark/blog_v0_2/405b_sglang.sh
Normal file
28
benchmark/blog_v0_2/405b_sglang.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
# create ~/llama-3.1-405b-fp8-dummy and create config.json and tokenizer:
|
||||
# config.json from https://gist.github.com/zhyncs/748597c44d47b45fa15866a4ae2c2b29?permalink_comment_id=5128893
|
||||
# wget https://huggingface.co/neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a8/resolve/main/tokenizer.json?download=true
|
||||
# wget wget https://huggingface.co/neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a8/resolve/main/tokenizer_config.json?download=true
|
||||
|
||||
# Launch sglang
|
||||
# python -m sglang.launch_server --model ~/llama-3.1-405b-fp8-dummy/ --load-format dummy --tp 8 --quant fp8 --disable-radix --mem-frac 0.88
|
||||
|
||||
# offline
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 2500 --random-input 1024 --random-output 1024 --random-range-ratio 0.5 > sglang/log11
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 2500 --random-input 4096 --random-output 1024 --random-range-ratio 0.5 > sglang/log12
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 2500 --random-input 1024 --random-output 512 --random-range-ratio 0.5 > sglang/log13
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 2500 --random-input 4096 --random-output 512 --random-range-ratio 0.5 > sglang/log14
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name sharegpt --num-prompt 2500 > sglang/log21
|
||||
|
||||
# online
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 300 --request-rate 1 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > sglang/log31
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 600 --request-rate 2 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > sglang/log32
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 1200 --request-rate 4 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > sglang/log33
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 2400 --request-rate 8 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > sglang/log34
|
||||
python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 3200 --request-rate 16 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > sglang/log35
|
||||
# python3 -m sglang.bench_serving --backend sglang --dataset-name random --num-prompt 1000 --request-rate 32 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > sglang/log36
|
||||
# python3 -m sglang.bench_serving --backend sglang --dataset-name sharegpt --num-prompt 1000 --request-rate 1 > sglang/log41
|
||||
# python3 -m sglang.bench_serving --backend sglang --dataset-name sharegpt --num-prompt 1000 --request-rate 2 > sglang/log42
|
||||
# python3 -m sglang.bench_serving --backend sglang --dataset-name sharegpt --num-prompt 1000 --request-rate 4 > sglang/log43
|
||||
# python3 -m sglang.bench_serving --backend sglang --dataset-name sharegpt --num-prompt 1000 --request-rate 8 > sglang/log44
|
||||
# python3 -m sglang.bench_serving --backend sglang --dataset-name sharegpt --num-prompt 1000 --request-rate 16 > sglang/log45
|
||||
# python3 -m sglang.bench_serving --backend sglang --dataset-name sharegpt --num-prompt 1000 --request-rate 32 > sglang/log46
|
||||
23
benchmark/blog_v0_2/405b_trt.sh
Normal file
23
benchmark/blog_v0_2/405b_trt.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
# Launch trtllm
|
||||
# https://gist.github.com/zhyncs/748597c44d47b45fa15866a4ae2c2b29?permalink_comment_id=5129302
|
||||
|
||||
# offline
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 2500 --random-input 1024 --random-output 1024 --random-range-ratio 0.5 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log11
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 2500 --random-input 4096 --random-output 1024 --random-range-ratio 0.5 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log12
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 2500 --random-input 1024 --random-output 512 --random-range-ratio 0.5 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log13
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 2500 --random-input 4096 --random-output 512 --random-range-ratio 0.5 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log14
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name sharegpt --num-prompt 2500 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log21
|
||||
|
||||
# online
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 300 --request-rate 1 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log31
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 600 --request-rate 2 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log32
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 1200 --request-rate 4 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log33
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 2400 --request-rate 8 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log34
|
||||
python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 3200 --request-rate 16 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log35
|
||||
# python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name random --num-prompt 1000 --request-rate 32 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log36
|
||||
# python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name sharegpt --num-prompt 1000 --request-rate 1 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log41
|
||||
# python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name sharegpt --num-prompt 1000 --request-rate 2 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log42
|
||||
# python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name sharegpt --num-prompt 1000 --request-rate 4 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log43
|
||||
# python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name sharegpt --num-prompt 1000 --request-rate 8 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log44
|
||||
# python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name sharegpt --num-prompt 1000 --request-rate 16 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log45
|
||||
# python3 ../../python/sglang/bench_serving.py --backend trt --dataset-name sharegpt --num-prompt 1000 --request-rate 32 --model meta-llama/Meta-Llama-3-8B-Instruct > trtllm/log46
|
||||
29
benchmark/blog_v0_2/405b_vllm.sh
Normal file
29
benchmark/blog_v0_2/405b_vllm.sh
Normal file
@@ -0,0 +1,29 @@
|
||||
# create ~/llama-3.1-405b-fp8-dummy and create config.json and tokenizer:
|
||||
# config.json from https://gist.github.com/zhyncs/748597c44d47b45fa15866a4ae2c2b29?permalink_comment_id=5128893
|
||||
# (remove the new llama3 rope_scaling entry to run with vLLM 0.5.2)
|
||||
# wget https://huggingface.co/neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a8/resolve/main/tokenizer.json?download=true
|
||||
# wget wget https://huggingface.co/neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a8/resolve/main/tokenizer_config.json?download=true
|
||||
|
||||
# Launch vllm
|
||||
# python3 -m vllm.entrypoints.openai.api_server --model ~/llama-3.1-405b-fp8-dummy/ --load-format dummy --disable-log-requests --tensor-parallel-size 8 --max-model-len 10000
|
||||
|
||||
# offline
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 2500 --random-input 1024 --random-output 1024 --random-range-ratio 0.5 > vllm/log11
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 2500 --random-input 4096 --random-output 1024 --random-range-ratio 0.5 > vllm/log12
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 2500 --random-input 1024 --random-output 512 --random-range-ratio 0.5 > vllm/log13
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 2500 --random-input 4096 --random-output 512 --random-range-ratio 0.5 > vllm/log14
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name sharegpt --num-prompt 2500 > vllm/log21
|
||||
|
||||
# online
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 300 --request-rate 1 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > vllm/log31
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 600 --request-rate 2 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > vllm/log32
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 1200 --request-rate 4 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > vllm/log33
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 2400 --request-rate 8 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > vllm/log34
|
||||
python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 3200 --request-rate 16 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > vllm/log35
|
||||
# python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name random --num-prompt 1000 --request-rate 32 --random-input 4096 --random-output 1024 --random-range-ratio 0.125 > vllm/log36
|
||||
# python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name sharegpt --num-prompt 1000 --request-rate 1 > vllm/log41
|
||||
# python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name sharegpt --num-prompt 1000 --request-rate 2 > vllm/log42
|
||||
# python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name sharegpt --num-prompt 1000 --request-rate 4 > vllm/log43
|
||||
# python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name sharegpt --num-prompt 1000 --request-rate 8 > vllm/log44
|
||||
# python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name sharegpt --num-prompt 1000 --request-rate 16 > vllm/log45
|
||||
# python3 ../../python/sglang/bench_serving.py --backend vllm --dataset-name sharegpt --num-prompt 1000 --request-rate 32 > vllm/log46
|
||||
@@ -8,7 +8,7 @@
|
||||
git clone https://github.com/sgl-project/sglang.git
|
||||
cd sglang
|
||||
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade pip setuptools wheel
|
||||
pip install -e "python[all]"
|
||||
|
||||
pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.3/
|
||||
@@ -91,7 +91,7 @@ cat sglang_online_benchmark.jsonl | cut -d':' -f9 | cut -d',' -f1
|
||||
|
||||
## Other
|
||||
|
||||
We tried using vLLM 0.5.3.post1, but it often crashes under high loads, so we are using the older version, vLLM 0.5.2.
|
||||
We tried using vLLM 0.5.3.post1, but it often crashes under high loads, and it seems to have similar or worse performance compared to vLLM 0.5.2 from our partial benchmarking, so we are using the older version, vLLM 0.5.2.
|
||||
|
||||
Preparation for TensorRT LLM can refer to https://github.com/sgl-project/tensorrt-demo. Specifically, we used a batch size of 512, a max input length of 8192, and a max number of tokens of 8192. The instance count for preprocessing and postprocessing in Triton Server is 16.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user