Fix log for chunked prefix cache (#11624)
This commit is contained in:
@@ -174,6 +174,15 @@ MLA_ATTENTION_BACKENDS = [
|
|||||||
"nsa",
|
"nsa",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
CHUNKED_PREFIX_CACHE_SUPPORTED_ATTENTION_BACKENDS = [
|
||||||
|
"flashinfer",
|
||||||
|
"fa3",
|
||||||
|
"fa4",
|
||||||
|
"flashmla",
|
||||||
|
"cutlass_mla",
|
||||||
|
"trtllm_mla",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def add_mla_attention_backend(backend_name):
|
def add_mla_attention_backend(backend_name):
|
||||||
if backend_name not in MLA_ATTENTION_BACKENDS:
|
if backend_name not in MLA_ATTENTION_BACKENDS:
|
||||||
@@ -604,7 +613,11 @@ class ModelRunner:
|
|||||||
f"{self.model_config.hf_config.model_type}"
|
f"{self.model_config.hf_config.model_type}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if not self.use_mla_backend:
|
if (
|
||||||
|
not self.use_mla_backend
|
||||||
|
or server_args.attention_backend
|
||||||
|
not in CHUNKED_PREFIX_CACHE_SUPPORTED_ATTENTION_BACKENDS
|
||||||
|
):
|
||||||
server_args.disable_chunked_prefix_cache = True
|
server_args.disable_chunked_prefix_cache = True
|
||||||
|
|
||||||
if not server_args.disable_chunked_prefix_cache:
|
if not server_args.disable_chunked_prefix_cache:
|
||||||
|
|||||||
Reference in New Issue
Block a user