From 9d0b7c8e98032086204c0af22c342a845ab7f9c0 Mon Sep 17 00:00:00 2001 From: Qi Mao Date: Sun, 22 Mar 2026 11:21:49 +0800 Subject: [PATCH] [Platform][BugFix] Preserve hybrid block size on Ascend (#7528) ### What this PR does / why we need it This PR fixes a startup regression for Ascend hybrid attention + mamba models after upgrading to vLLM `0.18.0`. However, after the vLLM `0.18.0` upgrade, worker initialization still calls the generic platform hook: - `current_platform.update_block_size_for_backend(vllm_config)` ### How this PR fixes it This PR keeps the fix strictly inside `vllm-ascend`. It adds an Ascend override for `NPUPlatform.update_block_size_for_backend()`: - for hybrid models, do not run the generic upstream block-size fallback - preserve the block size that was already computed by the hybrid model-specific config logic - for non-hybrid models, keep the original upstream behavior unchanged - vLLM version: v0.18.0 - vLLM main: https://github.com/vllm-project/vllm/commit/8b6325758cce5f9c36d38f2462edbd368b97a07c --------- Signed-off-by: maoxx241 Signed-off-by: Mengqing Cao Co-authored-by: Mengqing Cao --- tests/ut/test_platform.py | 20 ++++++++++++++++++++ vllm_ascend/platform.py | 14 ++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/tests/ut/test_platform.py b/tests/ut/test_platform.py index 0f91ca4d..914cd7f4 100644 --- a/tests/ut/test_platform.py +++ b/tests/ut/test_platform.py @@ -307,6 +307,26 @@ class TestNPUPlatform(TestBase): self.assertEqual(vllm_config.cache_config.block_size, 128) + def test_update_block_size_for_backend_preserves_hybrid_block_size(self): + vllm_config = TestNPUPlatform.mock_vllm_config() + vllm_config.model_config.is_hybrid = True + vllm_config.cache_config.block_size = 1024 + vllm_config.cache_config.user_specified_block_size = False + + self.platform.update_block_size_for_backend(vllm_config) + + self.assertEqual(vllm_config.cache_config.block_size, 1024) + + def test_update_block_size_for_backend_preserves_user_block_size(self): + vllm_config = TestNPUPlatform.mock_vllm_config() + vllm_config.model_config.is_hybrid = False + vllm_config.cache_config.block_size = 512 + vllm_config.cache_config.user_specified_block_size = True + + self.platform.update_block_size_for_backend(vllm_config) + + self.assertEqual(vllm_config.cache_config.block_size, 512) + @patch("vllm_ascend.quantization.utils.maybe_auto_detect_quantization") @patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3) @patch("vllm_ascend.ascend_config.init_ascend_config") diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 55e1408c..0dda6be2 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -172,6 +172,20 @@ class NPUPlatform(Platform): def inference_mode(cls): return torch.inference_mode() + @classmethod + def update_block_size_for_backend(cls, vllm_config: VllmConfig) -> None: + cache_config = vllm_config.cache_config + if cache_config.user_specified_block_size: + # User specified --block-size; keep it. + return + model_config = vllm_config.model_config + if model_config is not None and model_config.is_hybrid: + # Hybrid attention+mamba models rely on the model-specific sizing + # logic rather than the generic platform default. + return + + super().update_block_size_for_backend(vllm_config) + @classmethod def set_device(cls, device: torch.device): torch.npu.set_device(device)