Bump vLLM version to v0.11.2 What's broken and changed by vLLM: 1. structured_output is broken by https://github.com/vllm-project/vllm/pull/26866 2. get_mrope_input_positions is broken by https://github.com/vllm-project/vllm/pull/28399 3. graph mode is broken by https://github.com/vllm-project/vllm/pull/25110 we'll upgrade torch to 2.8 to fix the problem later 4. embedding is broken by https://github.com/vllm-project/vllm/pull/27583 5. `get_attn_backend_cls` and attention backend is broken are broken by https://github.com/vllm-project/vllm/pull/28534 6. spec decode is broken by https://github.com/vllm-project/vllm/pull/28771 7. sp feature is broken by https://github.com/vllm-project/vllm/pull/27126 8. mtp is broken by https://github.com/vllm-project/vllm/pull/27922 9. lora is broken by https://github.com/vllm-project/vllm/pull/21068 10. execute_model is broken by https://github.com/vllm-project/vllm/pull/26866 11. `VLLM_DISABLE_SHARED_EXPERTS_STREAM` env is broken by https://github.com/vllm-project/vllm/pull/28159 12. kv cahe is broken by https://github.com/vllm-project/vllm/pull/27753 13. dp is broken by https://github.com/vllm-project/vllm/pull/25110 What's broken and changed by ourself: 1. qwen vl is broken by https://github.com/vllm-project/vllm/pull/28455 We'll remove model files in the future to avoid this kind of error 2. Engine core is broken by https://github.com/vllm-project/vllm/pull/23691 We'll remove the patch file in the future. 3. Ascend scheduler is broken by https://github.com/vllm-project/vllm/pull/28733 We'll remove ascend scheudler later. 4. qwen3-next is broken by https://github.com/vllm-project/vllm/pull/28083 We'll remove model files in the future to avoid this kind of error 5. qwen vl is broken by https://github.com/vllm-project/vllm/pull/27764. We'll remove model files in the future Known issue: 1. ray doesn't work 2. the accuracy of qwen3-next is not correct 3. qwen3-vl is broken 4. prefix cache+ ascend scheduler + deepseek v2 lite is broken. Co-authored-by: MengqingCao <cmq0113@163.com> Co-authored-by: hfadzxy <starmoon_zhang@163.com> Co-authored-by: leo-pony <nengjunma@outlook.com> Co-authored-by: 22dimensions <waitingwind@foxmail.com> Co-authored-by: shen-shanshan <467638484@qq.com> - vLLM version: v0.11.2 --------- Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com> Signed-off-by: MengqingCao <cmq0113@163.com> Signed-off-by: hfadzxy <starmoon_zhang@163.com> Signed-off-by: leo-pony <nengjunma@outlook.com> Co-authored-by: MengqingCao <cmq0113@163.com> Co-authored-by: hfadzxy <starmoon_zhang@163.com> Co-authored-by: leo-pony <nengjunma@outlook.com>
135 lines
5.6 KiB
Python
135 lines
5.6 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from vllm.config import SchedulerConfig
|
|
|
|
from tests.ut.base import TestBase
|
|
from vllm_ascend.core.schedule_config import AscendSchedulerConfig
|
|
|
|
|
|
class TestAscendSchedulerConfig(TestBase):
|
|
|
|
def setUp(self):
|
|
self.basic_scheduler_config = SchedulerConfig(
|
|
max_num_batched_tokens=8192,
|
|
max_model_len=8192,
|
|
is_multimodal_model=False,
|
|
send_delta_data=False,
|
|
)
|
|
|
|
def test_initialize_from_config_with_default(self):
|
|
# No additional config given, check the default value here.
|
|
ascend_config = AscendSchedulerConfig.initialize_from_config(
|
|
self.basic_scheduler_config, {})
|
|
self.assertEqual(ascend_config.enable_chunked_prefill, False)
|
|
self.assertEqual(ascend_config.policy, "fcfs")
|
|
self.assertEqual(ascend_config.scheduler_cls,
|
|
"vllm_ascend.core.scheduler.AscendScheduler")
|
|
self.assertEqual(ascend_config.max_num_encoder_input_tokens, 8192)
|
|
self.assertEqual(ascend_config.encoder_cache_size, 8192)
|
|
|
|
def test_initialize_from_config_with_override(self):
|
|
# test override
|
|
ascend_config = AscendSchedulerConfig.initialize_from_config(
|
|
self.basic_scheduler_config,
|
|
AscendSchedulerConfig(
|
|
enable_chunked_prefill=False,
|
|
policy="fcfs",
|
|
scheduler_cls="vllm_ascend.core.scheduler.AscendScheduler",
|
|
max_num_batched_tokens=8192,
|
|
max_model_len=2048,
|
|
max_long_partial_prefills=1,
|
|
long_prefill_token_threshold=512,
|
|
),
|
|
)
|
|
self.assertEqual(ascend_config.enable_chunked_prefill, False)
|
|
self.assertEqual(ascend_config.policy, "fcfs")
|
|
self.assertEqual(ascend_config.scheduler_cls,
|
|
"vllm_ascend.core.scheduler.AscendScheduler")
|
|
self.assertEqual(ascend_config.max_num_batched_tokens, 8192)
|
|
self.assertEqual(ascend_config.encoder_cache_size, 8192)
|
|
self.assertEqual(ascend_config.max_long_partial_prefills, 1)
|
|
self.assertEqual(ascend_config.long_prefill_token_threshold, 512)
|
|
|
|
def test_not_implemented_policy(self):
|
|
with self.assertRaises(NotImplementedError) as context:
|
|
AscendSchedulerConfig.initialize_from_config(
|
|
self.basic_scheduler_config,
|
|
AscendSchedulerConfig(
|
|
policy="custom_policy",
|
|
max_num_batched_tokens=8192,
|
|
max_model_len=2048,
|
|
),
|
|
)
|
|
self.assertIn(
|
|
"currently AscendScheduler only supports fcfs policy",
|
|
str(context.exception),
|
|
)
|
|
|
|
def test_no_override(self):
|
|
ascend_config = AscendSchedulerConfig.initialize_from_config(
|
|
self.basic_scheduler_config, {})
|
|
self.assertEqual(ascend_config.max_num_encoder_input_tokens, 8192)
|
|
self.assertEqual(ascend_config.encoder_cache_size, 8192)
|
|
|
|
def test_valid_config_with_multimodal(self):
|
|
config = AscendSchedulerConfig.initialize_from_config(
|
|
SchedulerConfig(is_multimodal_model=True,
|
|
max_num_batched_tokens=8192), {})
|
|
self.assertTrue(config.is_multimodal_model)
|
|
|
|
def test_valid_config_with_chunked_prefill(self):
|
|
ascend_config = AscendSchedulerConfig.initialize_from_config(
|
|
self.basic_scheduler_config,
|
|
AscendSchedulerConfig(
|
|
enable_chunked_prefill=True,
|
|
max_num_batched_tokens=8192,
|
|
max_model_len=8192,
|
|
),
|
|
)
|
|
self.assertEqual(ascend_config.max_num_batched_tokens, 8192)
|
|
self.assertEqual(ascend_config.max_model_len, 8192)
|
|
self.assertTrue(ascend_config.enable_chunked_prefill)
|
|
|
|
def test_invalid_config_without_chunked_prefill(self):
|
|
with self.assertRaises(ValueError) as context:
|
|
AscendSchedulerConfig.initialize_from_config(
|
|
self.basic_scheduler_config,
|
|
AscendSchedulerConfig(
|
|
enable_chunked_prefill=False,
|
|
max_num_batched_tokens=2048,
|
|
max_model_len=8192,
|
|
),
|
|
)
|
|
self.assertIn(
|
|
"Ascend scheduler is enabled without chunked prefill feature",
|
|
str(context.exception),
|
|
)
|
|
self.assertIn("max_num_batched_tokens (2048)", str(context.exception))
|
|
self.assertIn("max_model_len (8192)", str(context.exception))
|
|
|
|
def test_initialize_from_config_with_pd_transfer(self):
|
|
ascend_config = AscendSchedulerConfig.initialize_from_config(
|
|
self.basic_scheduler_config,
|
|
AscendSchedulerConfig(
|
|
enable_pd_transfer=True,
|
|
decode_max_num_seqs=48,
|
|
max_num_batched_tokens=8192,
|
|
max_model_len=4096,
|
|
),
|
|
)
|
|
self.assertEqual(ascend_config.enable_pd_transfer, True)
|
|
self.assertEqual(ascend_config.decode_max_num_seqs, 48)
|