misc: update e2e test benchmark config (#825)

This commit is contained in:
Yineng Zhang
2024-07-30 19:19:23 +10:00
committed by GitHub
parent b579ecf028
commit f52eda35ea
2 changed files with 8 additions and 7 deletions

View File

@@ -1,10 +1,10 @@
name: PR E2E Test name: PR E2E Test
on: on:
push:
branches: [ main ]
pull_request: pull_request:
branches: [ main ] branches: [ main ]
paths:
- "python/sglang/*"
workflow_dispatch: workflow_dispatch:
jobs: jobs:
@@ -26,22 +26,22 @@ jobs:
- name: Launch server and run benchmark - name: Launch server and run benchmark
run: | run: |
python3 -m sglang.launch_server --model /home/lmzheng/zhyncs/Meta-Llama-3.1-8B-Instruct --port 8413 & python3 -m sglang.launch_server --model /home/lmzheng/zhyncs/Meta-Llama-3.1-8B-Instruct --port 8413 --disable-radix-cache &
echo "Waiting for server to start..." echo "Waiting for server to start..."
for i in {1..60}; do for i in {1..120}; do
if curl -s http://127.0.0.1:8413/health; then if curl -s http://127.0.0.1:8413/health; then
echo "Server is up!" echo "Server is up!"
break break
fi fi
if [ $i -eq 60 ]; then if [ $i -eq 120 ]; then
echo "Server failed to start within 60 seconds" echo "Server failed to start within 120 seconds"
exit 1 exit 1
fi fi
sleep 1 sleep 1
done done
python3 -m sglang.bench_serving --backend sglang --port 8413 cd /home/lmzheng/zhyncs && python3 -m sglang.bench_serving --backend sglang --port 8413 --dataset-name random --num-prompts 3000 --random-input 256 --random-output 512
echo "Stopping server..." echo "Stopping server..."
kill -9 $(ps aux | grep sglang | grep Meta-Llama-3.1-8B-Instruct | grep -v grep | awk '{print $2}') kill -9 $(ps aux | grep sglang | grep Meta-Llama-3.1-8B-Instruct | grep -v grep | awk '{print $2}')

View File

@@ -1,5 +1,6 @@
# Adapted from https://github.com/vllm-project/vllm/blob/6366efc67b0aedd2c1721c14385370e50b297fb3/benchmarks/backend_request_func.py # Adapted from https://github.com/vllm-project/vllm/blob/6366efc67b0aedd2c1721c14385370e50b297fb3/benchmarks/backend_request_func.py
# Adapted from https://github.com/vllm-project/vllm/blob/6366efc67b0aedd2c1721c14385370e50b297fb3/benchmarks/benchmark_serving.py # Adapted from https://github.com/vllm-project/vllm/blob/6366efc67b0aedd2c1721c14385370e50b297fb3/benchmarks/benchmark_serving.py
""" """
Benchmark online serving. Benchmark online serving.