[Fix] Fix a bug for flashmla to run R1 model (#5875)

Co-authored-by: pengcuo <dgpengcuo@gmail.com>
This commit is contained in:
pengcuo
2025-04-29 16:03:13 +08:00
committed by GitHub
parent 8465f035d1
commit 8e5a6d3441

View File

@@ -241,6 +241,9 @@ class FlashMLABackend(FlashInferMLAAttnBackend):
seq_lens_cpu,
)
def get_cuda_graph_seq_len_fill_value(self):
return 1024
def forward_decode(
self,
q: torch.Tensor,