### What this PR does / why we need it? This pr purpose to add multi-node test, on the first step, add `deepseek-v3` dp+tp+ep test ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 --------- Signed-off-by: wangli <wangli858794774@gmail.com>
42 lines
1.3 KiB
JSON
42 lines
1.3 KiB
JSON
[
|
|
{
|
|
"test_name": "test_deepseek_v3",
|
|
"disaggregate_prefill": false,
|
|
"enable_multithread_load": false,
|
|
"num_nodes": 2,
|
|
"server_parameters": {
|
|
"leader_config": {
|
|
"model": "vllm-ascend/DeepSeek-V3-W8A8",
|
|
"additional_config": {
|
|
"ascend_scheduler_config": {
|
|
"enabled": true
|
|
},
|
|
"torchair_graph_config": {
|
|
"enabled": true
|
|
}
|
|
}
|
|
},
|
|
"worker_config": {
|
|
"model": "vllm-ascend/DeepSeek-V3-W8A8",
|
|
"additional_config": {
|
|
"ascend_scheduler_config": {
|
|
"enabled": true
|
|
},
|
|
"torchair_graph_config": {
|
|
"enabled": true
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"client_parameters": {
|
|
"model": "vllm-ascend/DeepSeek-V3-W8A8",
|
|
"backend": "vllm",
|
|
"dataset_name": "sharegpt",
|
|
"dataset_path": "/root/.cache/datasets/ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
"num_prompts": 200,
|
|
"request_rate": 1
|
|
},
|
|
"accuracy_parameters": {}
|
|
}
|
|
]
|