### What this PR does / why we need it? - Add qwen2.5-7b test - Optimize the documentation to be more developer-friendly Signed-off-by: xuedinge233 <damow890@gmail.com> Co-authored-by: xuedinge233 <damow890@gmail.com>
26 lines
628 B
JSON
26 lines
628 B
JSON
[
|
|
{
|
|
"test_name": "throughput_llama8B_tp1",
|
|
"parameters": {
|
|
"model": "LLM-Research/Meta-Llama-3.1-8B-Instruct",
|
|
"tensor_parallel_size": 1,
|
|
"load_format": "dummy",
|
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
"num_prompts": 200,
|
|
"backend": "vllm"
|
|
}
|
|
},
|
|
{
|
|
"test_name": "throughput_qwen2_5_7B_tp1",
|
|
"parameters": {
|
|
"model": "Qwen/Qwen2.5-7B-Instruct",
|
|
"tensor_parallel_size": 1,
|
|
"load_format": "dummy",
|
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
"num_prompts": 200,
|
|
"backend": "vllm"
|
|
}
|
|
}
|
|
]
|
|
|