[1/2] Speed up trtllm_mla attention backend (>10% e2e) (#10473)

This commit is contained in:
fzyzcjy
2025-09-16 02:53:21 +08:00
committed by GitHub
parent 5c08d7d21d
commit 3b25dc127a
6 changed files with 119 additions and 3 deletions

View File

@@ -172,6 +172,7 @@ void downcast_fp8(
void copy_to_gpu_no_ce(const at::Tensor& input, at::Tensor& output);
void concat_mla_k(torch::Tensor k, torch::Tensor k_nope, torch::Tensor k_rope);
void concat_mla_absorb_q(at::Tensor a, at::Tensor b, at::Tensor out);
#ifdef USE_ROCM
void gelu_quick(at::Tensor& out, const at::Tensor& input);