forked from EngineX-Cambricon/enginex-mlu370-vllm
add qwen3
This commit is contained in:
17
vllm-v0.6.2/vllm/attention/__init__.py
Normal file
17
vllm-v0.6.2/vllm/attention/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from vllm.attention.backends.abstract import (AttentionBackend,
|
||||
AttentionMetadata,
|
||||
AttentionMetadataBuilder,
|
||||
AttentionState, AttentionType)
|
||||
from vllm.attention.layer import Attention
|
||||
from vllm.attention.selector import get_attn_backend
|
||||
|
||||
__all__ = [
|
||||
"Attention",
|
||||
"AttentionBackend",
|
||||
"AttentionMetadata",
|
||||
"AttentionType",
|
||||
"AttentionMetadataBuilder",
|
||||
"Attention",
|
||||
"AttentionState",
|
||||
"get_attn_backend",
|
||||
]
|
||||
Reference in New Issue
Block a user