diff --git a/python/sglang/srt/layers/attention/flashattention_backend.py b/python/sglang/srt/layers/attention/flashattention_backend.py index b0615be3c..740b46b6b 100644 --- a/python/sglang/srt/layers/attention/flashattention_backend.py +++ b/python/sglang/srt/layers/attention/flashattention_backend.py @@ -1617,7 +1617,7 @@ class FlashAttentionBackend(AttentionBackend): metadata.max_seq_len_k + self.page_size - 1 ) // self.page_size - normal_decode_set_medadata( + normal_decode_set_metadata( metadata.cache_seqlens_int32, metadata.cu_seqlens_k, metadata.page_table, @@ -1666,7 +1666,7 @@ class FlashAttentionBackend(AttentionBackend): max_seq_pages = (max_len + self.page_size - 1) // self.page_size metadata.max_seq_len_k = max_len - normal_decode_set_medadata( + normal_decode_set_metadata( metadata.cache_seqlens_int32, metadata.cu_seqlens_k, metadata.page_table, @@ -2089,7 +2089,7 @@ class FlashAttentionMultiStepBackend: # @torch.compile(dynamic=True, backend=get_compiler_backend()) # TODO: fuse these kernels # NOTE: torch.compile makes it slower in speculative decoding -def normal_decode_set_medadata( +def normal_decode_set_metadata( cache_seqlens_int32: torch.Tensor, cu_seqlens_k: torch.Tensor, page_table: torch.Tensor,