From 971a0dfa32f7521c77c2eeb1180cc9a4fa0100aa Mon Sep 17 00:00:00 2001 From: Baizhou Zhang Date: Sun, 8 Jun 2025 05:13:22 -0700 Subject: [PATCH] Extend cuda graph capture bs for B200 (#6937) --- python/sglang/srt/model_executor/cuda_graph_runner.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/sglang/srt/model_executor/cuda_graph_runner.py b/python/sglang/srt/model_executor/cuda_graph_runner.py index 36d3a1b25..5583a0884 100644 --- a/python/sglang/srt/model_executor/cuda_graph_runner.py +++ b/python/sglang/srt/model_executor/cuda_graph_runner.py @@ -139,6 +139,8 @@ def get_batch_sizes_to_capture(model_runner: ModelRunner): gpu_mem = get_device_memory_capacity() if gpu_mem is not None and gpu_mem > 96 * 1024: capture_bs += list(range(160, 257, 8)) + if gpu_mem is not None and gpu_mem > 180 * 1000: + capture_bs += list(range(256, 513, 16)) if max(capture_bs) > model_runner.req_to_token_pool.size: # In some cases (e.g., with a small GPU or --max-running-requests), the #max-running-requests