From 917324862e223b0835abb8e145cbd31d3d176558 Mon Sep 17 00:00:00 2001 From: JieXin Liang Date: Wed, 23 Apr 2025 02:08:45 +0800 Subject: [PATCH] [fix] reduce dp capture bs (#5634) Co-authored-by: alcanerian --- python/sglang/srt/model_executor/cuda_graph_runner.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/sglang/srt/model_executor/cuda_graph_runner.py b/python/sglang/srt/model_executor/cuda_graph_runner.py index 92cf0388e..8d3f63e15 100644 --- a/python/sglang/srt/model_executor/cuda_graph_runner.py +++ b/python/sglang/srt/model_executor/cuda_graph_runner.py @@ -134,7 +134,8 @@ def get_batch_sizes_to_capture(model_runner: ModelRunner): ) gpu_mem = get_device_memory_capacity() - if gpu_mem is not None and gpu_mem > 81920: + # Batch size of each rank will not become so large when DP is on + if gpu_mem is not None and gpu_mem > 81920 and server_args.dp_size == 1: capture_bs += list(range(160, 257, 8)) if max(capture_bs) > model_runner.req_to_token_pool.size: