[CI] Add benchmark workflows (#1014)

### What this PR does / why we need it?

Add benchmark workflows

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?
Run locally

---------

Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
Li Wang
2025-05-30 22:42:44 +08:00
committed by GitHub
parent 5a1689fc64
commit d9fb027068
5 changed files with 217 additions and 31 deletions

View File

@@ -1,5 +1,6 @@
#!/bin/bash
set -e
check_npus() {
# shellcheck disable=SC2155
@@ -48,7 +49,7 @@ wait_for_server() {
# wait for vllm server to start
# return 1 if vllm server crashes
timeout 1200 bash -c '
until curl -X POST localhost:8000/v1/completions; do
until curl -s -X POST localhost:8000/v1/completions || curl -s -X POST localhost:8000/v1/chat/completions; do
sleep 1
done' && return 0 || return 1
}
@@ -67,6 +68,16 @@ kill_npu_processes() {
}
update_json_field() {
local json_file="$1"
local field_name="$2"
local field_value="$3"
jq --arg value "$field_value" \
--arg key "$field_name" \
'.[$key] = $value' "$json_file" > "${json_file}.tmp" && \
mv "${json_file}.tmp" "$json_file"
}
run_latency_tests() {
# run latency tests using `benchmark_latency.py`
@@ -103,7 +114,9 @@ run_latency_tests() {
# run the benchmark
eval "$latency_command"
# echo model_name to result file
model_name=$(echo "$latency_params" | jq -r '.model')
update_json_field "$RESULTS_FOLDER/${test_name}.json" "model_name" "$model_name"
kill_npu_processes
done
@@ -144,7 +157,9 @@ run_throughput_tests() {
# run the benchmark
eval "$throughput_command"
# echo model_name to result file
model_name=$(echo "$throughput_params" | jq -r '.model')
update_json_field "$RESULTS_FOLDER/${test_name}.json" "model_name" "$model_name"
kill_npu_processes
done
@@ -242,8 +257,13 @@ cleanup() {
rm -rf ./vllm_benchmarks
}
cleanup_on_error() {
echo "An error occurred. Cleaning up results folder..."
rm -rf $RESULTS_FOLDER
}
get_benchmarks_scripts() {
git clone -b main --depth=1 git@github.com:vllm-project/vllm.git && \
git clone -b main --depth=1 https://github.com/vllm-project/vllm.git && \
mv vllm/benchmarks vllm_benchmarks
rm -rf ./vllm
}
@@ -263,9 +283,8 @@ main() {
export VLLM_HOST_IP=$(hostname -I | awk '{print $1}')
# turn of the reporting of the status of each request, to clean up the terminal output
export VLLM_LOG_LEVEL="WARNING"
# set env
export VLLM_USE_MODELSCOPE="True"
export HF_ENDPOINT="https://hf-mirror.com"
# prepare for benchmarking
@@ -278,6 +297,7 @@ main() {
declare -g RESULTS_FOLDER=results
mkdir -p $RESULTS_FOLDER
trap cleanup_on_error ERR
ensure_sharegpt_downloaded
# benchmarks
run_serving_tests $QUICK_BENCHMARK_ROOT/tests/serving-tests.json

View File

@@ -1,20 +1,21 @@
[
{
"test_name": "latency_llama8B_tp1",
"test_name": "latency_qwen2_5vl_7B_tp1",
"parameters": {
"model": "LLM-Research/Meta-Llama-3.1-8B-Instruct",
"model": "Qwen/Qwen2.5-VL-7B-Instruct",
"tensor_parallel_size": 1,
"load_format": "dummy",
"max_model_len": 16384,
"num_iters_warmup": 5,
"num_iters": 15
}
},
{
"test_name": "latency_qwen2_5_7B_tp1",
"test_name": "latency_qwen3_8B_tp1",
"parameters": {
"model": "Qwen/Qwen2.5-7B-Instruct",
"model": "Qwen/Qwen3-8B",
"tensor_parallel_size": 1,
"load_format": "dummy",
"max_model_len": 16384,
"num_iters_warmup": 5,
"num_iters": 15
}

View File

@@ -1,6 +1,6 @@
[
{
"test_name": "serving_llama8B_tp1",
"test_name": "serving_qwen2_5vl_7B_tp1",
"qps_list": [
1,
4,
@@ -8,23 +8,26 @@
"inf"
],
"server_parameters": {
"model": "LLM-Research/Meta-Llama-3.1-8B-Instruct",
"model": "Qwen/Qwen2.5-VL-7B-Instruct",
"tensor_parallel_size": 1,
"swap_space": 16,
"disable_log_stats": "",
"disable_log_requests": "",
"load_format": "dummy"
"trust_remote_code": "",
"max_model_len": 16384
},
"client_parameters": {
"model": "LLM-Research/Meta-Llama-3.1-8B-Instruct",
"backend": "vllm",
"dataset_name": "sharegpt",
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
"model": "Qwen/Qwen2.5-VL-7B-Instruct",
"backend": "openai-chat",
"dataset_name": "hf",
"hf_split": "train",
"endpoint": "/v1/chat/completions",
"dataset_path": "lmarena-ai/vision-arena-bench-v0.1",
"num_prompts": 200
}
},
{
"test_name": "serving_qwen2_5_7B_tp1",
"test_name": "serving_qwen3_8B_tp1",
"qps_list": [
1,
4,
@@ -32,7 +35,7 @@
"inf"
],
"server_parameters": {
"model": "Qwen/Qwen2.5-7B-Instruct",
"model": "Qwen/Qwen3-8B",
"tensor_parallel_size": 1,
"swap_space": 16,
"disable_log_stats": "",
@@ -40,10 +43,10 @@
"load_format": "dummy"
},
"client_parameters": {
"model": "Qwen/Qwen2.5-7B-Instruct",
"model": "Qwen/Qwen3-8B",
"backend": "vllm",
"dataset_name": "sharegpt",
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
"dataset_path": "/root/.cache/datasets/sharegpt/ShareGPT_V3_unfiltered_cleaned_split.json",
"num_prompts": 200
}
}

View File

@@ -1,24 +1,26 @@
[
{
"test_name": "throughput_llama8B_tp1",
"test_name": "throughput_qwen3_8B_tp1",
"parameters": {
"model": "LLM-Research/Meta-Llama-3.1-8B-Instruct",
"model": "Qwen/Qwen3-8B",
"tensor_parallel_size": 1,
"load_format": "dummy",
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
"dataset_path": "/root/.cache/datasets/sharegpt/ShareGPT_V3_unfiltered_cleaned_split.json",
"num_prompts": 200,
"backend": "vllm"
}
},
{
"test_name": "throughput_qwen2_5_7B_tp1",
"test_name": "throughput_qwen2_5vl_7B_tp1",
"parameters": {
"model": "Qwen/Qwen2.5-7B-Instruct",
"model": "Qwen/Qwen2.5-VL-7B-Instruct",
"tensor_parallel_size": 1,
"load_format": "dummy",
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
"num_prompts": 200,
"backend": "vllm"
"backend": "vllm-chat",
"dataset_name": "hf",
"hf_split": "train",
"max_model_len": 16384,
"dataset_path": "lmarena-ai/vision-arena-bench-v0.1",
"num_prompts": 200
}
}
]