Add Support for Page Size greater than 1 for Flashinfer MLA Backend (#8593)

Signed-off-by: Pavani Majety <pmajety@nvidia.com>
This commit is contained in:
Pavani Majety
2025-08-21 18:15:06 -07:00
committed by GitHub
parent 0b3a5b1151
commit 3cc3d9b950
5 changed files with 292 additions and 105 deletions

View File

@@ -120,5 +120,49 @@ class TestFlashinferMLAMTP(CustomTestCase):
self.assertGreater(avg_spec_accept_length, 2.5)
class TestFlashinferMLAPageSize16(CustomTestCase):
@classmethod
def setUpClass(cls):
cls.model = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
cls.base_url = DEFAULT_URL_FOR_TEST
other_args = ["--trust-remote-code"]
if torch.cuda.is_available() and torch.version.cuda:
other_args.extend(
[
"--cuda-graph-max-bs",
"4",
"--attention-backend",
"flashinfer",
"--page-size",
"16",
]
)
cls.process = popen_launch_server(
cls.model,
cls.base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=other_args,
)
@classmethod
def tearDownClass(cls):
kill_process_tree(cls.process.pid)
def test_gsm8k(self):
args = SimpleNamespace(
num_shots=5,
data_path=None,
num_questions=200,
max_new_tokens=512,
parallel=128,
host="http://127.0.0.1",
port=int(self.base_url.split(":")[-1]),
)
metrics = run_eval_few_shot_gsm8k(args)
print(metrics)
self.assertGreater(metrics["accuracy"], 0.615)
if __name__ == "__main__":
unittest.main()