Drop ascend scheduler (#4623)

It's safe to drop ascend scheduler now. The related test and doc has
been removed already


- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-12-05 09:03:45 +08:00
committed by GitHub
parent 00b4fb80de
commit ea54388e19
12 changed files with 34 additions and 767 deletions

View File

@@ -99,7 +99,7 @@ class TestAscendRowParallelLinear(BaseLinearTest):
ascend_config._ASCEND_CONFIG = MagicMock()
ascend_config._ASCEND_CONFIG.oproj_tensor_parallel_size = 2
ascend_config._ASCEND_CONFIG.ascend_scheduler_config.enabled = False
ascend_config._ASCEND_CONFIG.recompute_scheduler_enable = False
linear = AscendRowParallelLinear(
input_size=16,

View File

@@ -209,12 +209,7 @@ class TestAscendLogitsProcessor(unittest.TestCase):
return_value=torch.randn(1, self.vocab_size)),
patch(
"vllm_ascend.ops.vocab_parallel_embedding.get_lmhead_tp_group.all_gather",
return_value=torch.randn(1, self.vocab_size)),
patch(
"vllm_ascend.core.schedule_config.AscendSchedulerConfig.initialize_from_config",
return_value=MagicMock(max_num_batched_tokens=1000,
max_model_len=512,
enable_chunked_prefill=False))
return_value=torch.randn(1, self.vocab_size))
]
for p in self.patches:

View File

@@ -32,7 +32,6 @@ class TestNPUPlatform(TestBase):
def mock_vllm_ascend_config():
mock_ascend_config = MagicMock()
mock_ascend_config.torchair_graph_config.enabled = False
mock_ascend_config.ascend_scheduler_config.enabled = False
mock_ascend_config.enable_shared_expert_dp = False
return mock_ascend_config