### What this PR does / why we need it?
vLLM now names the process with VLLM prefix after
https://github.com/vllm-project/vllm/pull/21445, we should kill the
correct process name after one iteration benchmark to avoid OOM issue
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.10.1.1
- vLLM main:
e599e2c65e
---------
Signed-off-by: wangli <wangli858794774@gmail.com>
79 lines
1.9 KiB
JSON
79 lines
1.9 KiB
JSON
[
|
|
{
|
|
"test_name": "serving_qwen2_5vl_7B_tp1",
|
|
"qps_list": [
|
|
1,
|
|
4,
|
|
16,
|
|
"inf"
|
|
],
|
|
"server_parameters": {
|
|
"model": "Qwen/Qwen2.5-VL-7B-Instruct",
|
|
"tensor_parallel_size": 1,
|
|
"swap_space": 16,
|
|
"disable_log_stats": "",
|
|
"disable_log_requests": "",
|
|
"trust_remote_code": "",
|
|
"max_model_len": 16384
|
|
},
|
|
"client_parameters": {
|
|
"model": "Qwen/Qwen2.5-VL-7B-Instruct",
|
|
"endpoint_type": "openai-chat",
|
|
"dataset_name": "hf",
|
|
"hf_split": "train",
|
|
"endpoint": "/v1/chat/completions",
|
|
"dataset_path": "lmarena-ai/vision-arena-bench-v0.1",
|
|
"num_prompts": 200,
|
|
"no_stream": ""
|
|
}
|
|
},
|
|
{
|
|
"test_name": "serving_qwen3_8B_tp1",
|
|
"qps_list": [
|
|
1,
|
|
4,
|
|
16,
|
|
"inf"
|
|
],
|
|
"server_parameters": {
|
|
"model": "Qwen/Qwen3-8B",
|
|
"tensor_parallel_size": 1,
|
|
"swap_space": 16,
|
|
"disable_log_stats": "",
|
|
"disable_log_requests": "",
|
|
"load_format": "dummy"
|
|
},
|
|
"client_parameters": {
|
|
"model": "Qwen/Qwen3-8B",
|
|
"endpoint_type": "vllm",
|
|
"dataset_name": "sharegpt",
|
|
"dataset_path": "/github/home/.cache/datasets/ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
"num_prompts": 200
|
|
}
|
|
},
|
|
{
|
|
"test_name": "serving_qwen2_5_7B_tp1",
|
|
"qps_list": [
|
|
1,
|
|
4,
|
|
16,
|
|
"inf"
|
|
],
|
|
"server_parameters": {
|
|
"model": "Qwen/Qwen2.5-7B-Instruct",
|
|
"tensor_parallel_size": 1,
|
|
"swap_space": 16,
|
|
"disable_log_stats": "",
|
|
"disable_log_requests": "",
|
|
"load_format": "dummy"
|
|
},
|
|
"client_parameters": {
|
|
"model": "Qwen/Qwen2.5-7B-Instruct",
|
|
"endpoint_type": "vllm",
|
|
"dataset_name": "sharegpt",
|
|
"dataset_path": "/github/home/.cache/datasets/ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
"num_prompts": 200
|
|
}
|
|
}
|
|
]
|