Revert "Replace enable_flashinfer_mla argument with attention_backend" (#5048)

This commit is contained in:
Lianmin Zheng
2025-04-03 13:30:56 -07:00
committed by GitHub
parent b8b6008f47
commit 74885a848b
8 changed files with 20 additions and 21 deletions

View File

@@ -76,6 +76,7 @@ global_server_args_dict = {
"device": ServerArgs.device,
"speculative_accept_threshold_single": ServerArgs.speculative_accept_threshold_single,
"speculative_accept_threshold_acc": ServerArgs.speculative_accept_threshold_acc,
"enable_flashinfer_mla": ServerArgs.enable_flashinfer_mla,
"enable_flashmla": ServerArgs.enable_flashmla,
"disable_radix_cache": ServerArgs.disable_radix_cache,
"flashinfer_mla_disable_ragged": ServerArgs.flashinfer_mla_disable_ragged,
@@ -1434,7 +1435,7 @@ class ScheduleBatch(ScheduleBatchDisaggregationDecodeMixin):
# Create seq_lens_cpu when needed
if (
global_server_args_dict["attention_backend"] == "flashinfer_mla"
global_server_args_dict["enable_flashinfer_mla"]
or global_server_args_dict["enable_flashmla"]
or global_server_args_dict["attention_backend"] == "fa3"
):