minor: add random use case (#2408)
This commit is contained in:
8
.github/workflows/experiment-runner.yml
vendored
8
.github/workflows/experiment-runner.yml
vendored
@@ -2,6 +2,10 @@ name: Experiment Runner
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
script:
|
||||
description: "Experiment Runner Script"
|
||||
default: "configs/sharegpt_config.yaml"
|
||||
|
||||
concurrency:
|
||||
group: experiment-runner-${{ github.ref }}
|
||||
@@ -20,7 +24,7 @@ jobs:
|
||||
bash scripts/ci_install_dependency.sh
|
||||
|
||||
- name: Test experiment runner
|
||||
timeout-minutes: 10
|
||||
timeout-minutes: 120
|
||||
run: |
|
||||
cd test/srt
|
||||
python3 experiment_runner.py --config configs/sharegpt_config.yaml
|
||||
python3 experiment_runner.py --config ${{ inputs.script }}
|
||||
|
||||
25
test/srt/configs/random_config.yaml
Normal file
25
test/srt/configs/random_config.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
tasks:
|
||||
- name: sglang-128-4
|
||||
server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache
|
||||
client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 128 --random-output 4 --request-rate 24 --num-prompt 1440
|
||||
- name: vllm-128-4
|
||||
server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests
|
||||
client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 128 --random-output 4 --request-rate 24 --num-prompt 1440
|
||||
- name: sglang-2000-100
|
||||
server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache
|
||||
client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 2000 --random-output 100 --request-rate 2 --num-prompt 120
|
||||
- name: vllm-2000-100
|
||||
server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests
|
||||
client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 2000 --random-output 100 --request-rate 2 --num-prompt 120
|
||||
- name: sglang-4000-200
|
||||
server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache
|
||||
client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 4000 --random-output 200 --request-rate 8 --num-prompt 480
|
||||
- name: vllm-4000-200
|
||||
server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests
|
||||
client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 4000 --random-output 200 --request-rate 8 --num-prompt 480
|
||||
- name: sglang-32000-100
|
||||
server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache
|
||||
client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 32000 --random-output 100 --request-rate 1 --num-prompt 60
|
||||
- name: vllm-32000-100
|
||||
server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests
|
||||
client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 32000 --random-output 100 --request-rate 1 --num-prompt 60
|
||||
Reference in New Issue
Block a user