Fix log for chunked prefix cache (#11624)

This commit is contained in:
Baizhou Zhang
2025-10-14 11:49:33 -07:00
committed by GitHub
parent 49345a68cf
commit c224a4c6cc

View File

@@ -174,6 +174,15 @@ MLA_ATTENTION_BACKENDS = [
"nsa",
]
CHUNKED_PREFIX_CACHE_SUPPORTED_ATTENTION_BACKENDS = [
"flashinfer",
"fa3",
"fa4",
"flashmla",
"cutlass_mla",
"trtllm_mla",
]
def add_mla_attention_backend(backend_name):
if backend_name not in MLA_ATTENTION_BACKENDS:
@@ -604,7 +613,11 @@ class ModelRunner:
f"{self.model_config.hf_config.model_type}"
)
if not self.use_mla_backend:
if (
not self.use_mla_backend
or server_args.attention_backend
not in CHUNKED_PREFIX_CACHE_SUPPORTED_ATTENTION_BACKENDS
):
server_args.disable_chunked_prefix_cache = True
if not server_args.disable_chunked_prefix_cache: