Split local attention test from fa3 test (#5774)

This commit is contained in:
Baizhou Zhang
2025-04-27 01:03:31 -07:00
committed by GitHub
parent 981a2619d5
commit a45a4b239d
3 changed files with 74 additions and 18 deletions

View File

@@ -10,7 +10,6 @@ from sglang.test.few_shot_gsm8k import run_eval as run_eval_few_shot_gsm8k
from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_TEST,
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3,
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION,
DEFAULT_MODEL_NAME_FOR_TEST_MLA,
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
@@ -127,22 +126,6 @@ class TestFlashAttention3MLA(BaseFlashAttentionTest):
return DEFAULT_SERVER_ARGS
class TestFlashAttention3LocalAttn(BaseFlashAttentionTest):
"""Test FlashAttention3 with Model with local attention, e.g. Llama 4."""
accuracy_threshold = 0.70
model = DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION
@classmethod
def get_server_args(cls):
cloned_args = DEFAULT_SERVER_ARGS.copy()
# remove --enable-torch-compile from cloned_args since llama4 does not support it for now
cloned_args.remove("--enable-torch-compile")
# we cannot use scout's 10m context due to this bug: https://github.com/sgl-project/sglang/issues/5755
cloned_args.extend(["--tp", "4", "--context-length", "1000000"])
return cloned_args
class TestFlashAttention3SpeculativeDecode(BaseFlashAttentionTest):
"""Test FlashAttention3 with speculative decode enabled with Llama 3.1 8B and its eagle3 model"""