Reduce overhead for fa by not calling heavy CUDA property check (#7375)
This commit is contained in:
@@ -25,10 +25,10 @@ def is_fa3_supported(device=None) -> bool:
|
||||
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/#shared-memory-8-x
|
||||
# And for sgl-kernel right now, we can build fa3 on sm80/sm86/sm89/sm90a.
|
||||
# That means if you use A100/A*0/L20/L40/L40s/4090 you can use fa3.
|
||||
return (
|
||||
return (torch.version.cuda >= "12.3") and (
|
||||
torch.cuda.get_device_capability(device)[0] == 9
|
||||
or torch.cuda.get_device_capability(device)[0] == 8
|
||||
) and (torch.version.cuda >= "12.3")
|
||||
)
|
||||
|
||||
|
||||
DISABLE_BACKWARD = True
|
||||
|
||||
Reference in New Issue
Block a user