From 69af3ec35fd18b2c8caef1d1840c8e7d54ec9fac Mon Sep 17 00:00:00 2001 From: JieXin Liang Date: Tue, 20 May 2025 12:40:21 +0800 Subject: [PATCH] [doc] add note for get_num_kv_splits in triton_backend (#6444) --- python/sglang/srt/layers/attention/triton_backend.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/sglang/srt/layers/attention/triton_backend.py b/python/sglang/srt/layers/attention/triton_backend.py index 0aa3a695e..2bedcf077 100644 --- a/python/sglang/srt/layers/attention/triton_backend.py +++ b/python/sglang/srt/layers/attention/triton_backend.py @@ -155,6 +155,9 @@ class TritonAttnBackend(AttentionBackend): seq_lens: torch.Tensor, ): num_token, num_seq = num_kv_splits.shape[0], seq_lens.shape[0] + # NOTE(alcanderian): Considering speculative_decodeing, + # num_kv_splits.shape[0] will be topk * real_num_token. + # And the real_num_token is num_seq in decoding phase. num_group = num_token // num_seq assert (