Support cuda graph for DP attention (#2061)

This commit is contained in:
Ke Bao
2024-11-18 08:29:20 +08:00
committed by GitHub
parent 11f881d173
commit 62832bb272
9 changed files with 88 additions and 26 deletions

View File

@@ -191,11 +191,12 @@ class ServerArgs:
if self.enable_dp_attention:
self.dp_size = self.tp_size
self.chunked_prefill_size = self.chunked_prefill_size // 2
self.disable_cuda_graph = True
self.cuda_graph_max_bs = min(self.cuda_graph_max_bs, 96)
self.enable_overlap_schedule = False
logger.warning(
f"DP attention is enabled. The chunked prefill size is adjusted to {self.chunked_prefill_size} to avoid MoE workload issue. "
"The CUDA graph is disabled. Data parallel size is adjust to be the same as tensor parallel size."
f"The CUDA graph max batch size is adjusted to {self.cuda_graph_max_bs}. "
"Data parallel size is adjusted to be the same as tensor parallel size."
)
if self.enable_overlap_schedule: