[qwen3-omni] Add Qwen3-Omni moe thinker

This commit is contained in:
2025-10-09 17:51:14 +08:00
parent bc57e2ef60
commit 24fab12b2f
8 changed files with 1543 additions and 37 deletions

View File

@@ -54,6 +54,14 @@ def check_xformers_availability():
return USE_XFORMERS_OPS
def check_upstream_fa_availability(dtype: torch.dtype):
if dtype in (torch.float16, torch.bfloat16) and current_platform.is_cuda(
) and current_platform.has_device_capability(80):
from transformers.utils import is_flash_attn_2_available
return is_flash_attn_2_available()
return False
class Attention(nn.Module):
"""Attention layer.