diff --git a/tests/ut/test_platform.py b/tests/ut/test_platform.py index 0f91ca4d..914cd7f4 100644 --- a/tests/ut/test_platform.py +++ b/tests/ut/test_platform.py @@ -307,6 +307,26 @@ class TestNPUPlatform(TestBase): self.assertEqual(vllm_config.cache_config.block_size, 128) + def test_update_block_size_for_backend_preserves_hybrid_block_size(self): + vllm_config = TestNPUPlatform.mock_vllm_config() + vllm_config.model_config.is_hybrid = True + vllm_config.cache_config.block_size = 1024 + vllm_config.cache_config.user_specified_block_size = False + + self.platform.update_block_size_for_backend(vllm_config) + + self.assertEqual(vllm_config.cache_config.block_size, 1024) + + def test_update_block_size_for_backend_preserves_user_block_size(self): + vllm_config = TestNPUPlatform.mock_vllm_config() + vllm_config.model_config.is_hybrid = False + vllm_config.cache_config.block_size = 512 + vllm_config.cache_config.user_specified_block_size = True + + self.platform.update_block_size_for_backend(vllm_config) + + self.assertEqual(vllm_config.cache_config.block_size, 512) + @patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization") @patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3) @patch("vllm_ascend.ascend_config.init_ascend_config") diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 55e1408c..0dda6be2 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -172,6 +172,20 @@ class NPUPlatform(Platform): def inference_mode(cls): return torch.inference_mode() + @classmethod + def update_block_size_for_backend(cls, vllm_config: VllmConfig) -> None: + cache_config = vllm_config.cache_config + if cache_config.user_specified_block_size: + # User specified --block-size; keep it. + return + model_config = vllm_config.model_config + if model_config is not None and model_config.is_hybrid: + # Hybrid attention+mamba models rely on the model-specific sizing + # logic rather than the generic platform default. + return + + super().update_block_size_for_backend(vllm_config) + @classmethod def set_device(cls, device: torch.device): torch.npu.set_device(device)