Double vision prefill throughput by defaulting to optimal vision attention backend (#8484)

Co-authored-by: Xiang (Kevin) Li <lik@nvidia.com>
This commit is contained in:
Kevin Xiang Li
2025-08-13 02:08:30 -07:00
committed by GitHub
parent 35e6bc92e3
commit 3b3b3baf9f
3 changed files with 20 additions and 7 deletions

View File

@@ -245,6 +245,8 @@ class VisionTritonAttention(nn.Module):
k: torch.Tensor,
v: torch.Tensor,
cu_seqlens: Optional[torch.Tensor],
bsz: int,
seq_len: int,
**kwargs,
) -> torch.Tensor:
r"""
@@ -253,6 +255,8 @@ class VisionTritonAttention(nn.Module):
Returns:
[b * s, h, head_size]
"""
if cu_seqlens is None:
cu_seqlens = _get_cu_seqlens_for_shape(bsz, seq_len, device=q.device)
# [b * s, head, head_size]
output = torch.empty_like(q)
@@ -401,7 +405,11 @@ class VisionAttention(nn.Module):
# priority: server_args > passed qkv_backend > sdpa
if global_server_args_dict["mm_attention_backend"] is None:
if qkv_backend is None:
qkv_backend = "sdpa"
if is_cuda():
# Double prefill throughput by setting attn backend to Triton on CUDA
qkv_backend = "triton_attn"
else:
qkv_backend = "sdpa"
print_info_once(f"Multimodal attention backend not set. Use {qkv_backend}.")
else:
qkv_backend = global_server_args_dict["mm_attention_backend"]

View File

@@ -114,7 +114,7 @@ class Qwen2_5_VisionBlock(nn.Module):
num_heads: int,
hidden_act="silu",
norm_layer: Type[nn.Module] = None,
attn_implementation: Optional[str] = "sdpa",
attn_implementation: Optional[str] = None,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
) -> None:
@@ -123,7 +123,12 @@ class Qwen2_5_VisionBlock(nn.Module):
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.norm1 = Qwen2RMSNorm(dim, eps=1e-6)
self.norm2 = Qwen2RMSNorm(dim, eps=1e-6)
if attn_implementation == "sdpa":
if attn_implementation is None:
softmax_in_single_precision = False
qkv_backend = None
flatten_batch = True
elif attn_implementation == "sdpa":
softmax_in_single_precision = False
qkv_backend = "sdpa"
flatten_batch = True
@@ -268,7 +273,6 @@ class Qwen2_5_VisionTransformer(nn.Module):
num_heads=num_heads,
hidden_act=vision_config.hidden_act,
norm_layer=norm_layer,
attn_implementation="sdpa",
quant_config=quant_config,
prefix=add_prefix(f"blocks.{i}", prefix),
)