Reduce hardcoded logic of kernel usage (#707)

This commit is contained in:
Mingyi
2024-07-23 16:42:21 -07:00
committed by GitHub
parent 9f94728f5a
commit a523a3c13a
2 changed files with 23 additions and 13 deletions

View File

@@ -85,9 +85,9 @@ class RadixAttention(nn.Module):
return o
def extend_forward_flashinfer(self, q, k, v, input_metadata: InputMetadata):
self.store_kv_cache(k, v, input_metadata)
if not input_metadata.use_ragged:
self.store_kv_cache(k, v, input_metadata)
if input_metadata.total_num_tokens <= 4096:
o = input_metadata.flashinfer_prefill_wrapper_paged.forward(
q.contiguous().view(-1, self.tp_q_head_num, self.head_dim),
input_metadata.token_to_kv_pool.get_kv_buffer(self.layer_id),
@@ -122,6 +122,8 @@ class RadixAttention(nn.Module):
o, _ = merge_state(o1, s1, o2, s2)
self.store_kv_cache(k, v, input_metadata)
if input_metadata.total_num_tokens >= global_config.layer_sync_threshold:
torch.cuda.synchronize()