init v0.11.0rc0

This commit is contained in:
2025-10-14 10:38:28 +08:00
parent 67afd0ea78
commit 66dc16f966
278 changed files with 28130 additions and 11708 deletions

View File

@@ -27,7 +27,6 @@ class TestAscendSchedulerConfig(TestBase):
max_model_len=8192,
is_multimodal_model=False,
send_delta_data=False,
scheduler_delay_factor=0,
)
def test_initialize_from_config_with_default(self):
@@ -36,7 +35,6 @@ class TestAscendSchedulerConfig(TestBase):
self.basic_scheduler_config, {})
self.assertEqual(ascend_config.enable_chunked_prefill, False)
self.assertEqual(ascend_config.policy, "fcfs")
self.assertEqual(ascend_config.num_scheduler_steps, 1)
self.assertEqual(ascend_config.scheduler_cls,
"vllm_ascend.core.scheduler.AscendScheduler")
self.assertEqual(ascend_config.max_num_encoder_input_tokens, 8192)
@@ -49,19 +47,21 @@ class TestAscendSchedulerConfig(TestBase):
AscendSchedulerConfig(
enable_chunked_prefill=False,
policy="fcfs",
num_scheduler_steps=1,
scheduler_cls="vllm_ascend.core.scheduler.AscendScheduler",
max_num_batched_tokens=2048,
max_model_len=2048,
max_long_partial_prefills=1,
long_prefill_token_threshold=512,
),
)
self.assertEqual(ascend_config.enable_chunked_prefill, False)
self.assertEqual(ascend_config.policy, "fcfs")
self.assertEqual(ascend_config.num_scheduler_steps, 1)
self.assertEqual(ascend_config.scheduler_cls,
"vllm_ascend.core.scheduler.AscendScheduler")
self.assertEqual(ascend_config.max_num_batched_tokens, 2048)
self.assertEqual(ascend_config.encoder_cache_size, 2048)
self.assertEqual(ascend_config.max_long_partial_prefills, 1)
self.assertEqual(ascend_config.long_prefill_token_threshold, 512)
def test_not_implemented_policy(self):
with self.assertRaises(NotImplementedError) as context:
@@ -78,28 +78,6 @@ class TestAscendSchedulerConfig(TestBase):
str(context.exception),
)
def test_not_implemented_multimodal(self):
with self.assertRaises(NotImplementedError) as context:
AscendSchedulerConfig.initialize_from_config(
SchedulerConfig(is_multimodal_model=True), {})
self.assertIn("currently AscendScheduler only supports LLM models",
str(context.exception))
def test_not_implemented_multi_step(self):
with self.assertRaises(NotImplementedError) as context:
AscendSchedulerConfig.initialize_from_config(
self.basic_scheduler_config,
AscendSchedulerConfig(
num_scheduler_steps=2,
max_num_batched_tokens=2048,
max_model_len=2048,
),
)
self.assertIn(
"currently AscendScheduler doesn't support multi-step",
str(context.exception),
)
def test_not_implemented_send_delta_data(self):
with self.assertRaises(NotImplementedError) as context:
AscendSchedulerConfig.initialize_from_config(
@@ -115,27 +93,17 @@ class TestAscendSchedulerConfig(TestBase):
str(context.exception),
)
def test_not_implemented_delay_factor(self):
with self.assertRaises(NotImplementedError) as context:
AscendSchedulerConfig.initialize_from_config(
self.basic_scheduler_config,
AscendSchedulerConfig(
delay_factor=1,
max_num_batched_tokens=2048,
max_model_len=2048,
),
)
self.assertIn(
"currently AscendScheduler doesn't support scheduler_delay_factor",
str(context.exception),
)
def test_no_override(self):
ascend_config = AscendSchedulerConfig.initialize_from_config(
self.basic_scheduler_config, {})
self.assertEqual(ascend_config.max_num_encoder_input_tokens, 8192)
self.assertEqual(ascend_config.encoder_cache_size, 8192)
def test_valid_config_with_multimodal(self):
config = AscendSchedulerConfig.initialize_from_config(
SchedulerConfig(is_multimodal_model=True), {})
self.assertTrue(config.is_multimodal_model)
def test_valid_config_with_chunked_prefill(self):
ascend_config = AscendSchedulerConfig.initialize_from_config(
self.basic_scheduler_config,
@@ -165,3 +133,16 @@ class TestAscendSchedulerConfig(TestBase):
)
self.assertIn("max_num_batched_tokens (2048)", str(context.exception))
self.assertIn("max_model_len (4096)", str(context.exception))
def test_initialize_from_config_with_pd_transfer(self):
ascend_config = AscendSchedulerConfig.initialize_from_config(
self.basic_scheduler_config,
AscendSchedulerConfig(
enable_pd_transfer=True,
decode_max_num_seqs=48,
max_num_batched_tokens=4096,
max_model_len=4096,
),
)
self.assertEqual(ascend_config.enable_pd_transfer, True)
self.assertEqual(ascend_config.decode_max_num_seqs, 48)