From 70f076331f0e1454847906cbae234c4413943b15 Mon Sep 17 00:00:00 2001 From: Shanshan Shen <467638484@qq.com> Date: Fri, 21 Nov 2025 15:04:35 +0800 Subject: [PATCH] [MM][Bugfix] Add error log for VL models when enabling FLASHCOMM (#4222) ### What this PR does / why we need it? Add error log for VL models when enabling `VLLM_ASCEND_ENABLE_FLASHCOMM1=1` or `VLLM_ASCEND_ENABLE_FLASHCOMM=1` (for backward compatibility). This is a temporary fix for https://github.com/vllm-project/vllm-ascend/issues/4132. ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? Signed-off-by: shen-shanshan <467638484@qq.com> --- vllm_ascend/platform.py | 10 +++++++++- vllm_ascend/utils.py | 10 ++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 6bcad2c..4a2b099 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -32,7 +32,7 @@ from vllm_ascend.ascend_config import (check_ascend_config, get_ascend_config, from vllm_ascend.torchair.utils import (check_torchair_cache_exist, delete_torchair_cache_file) from vllm_ascend.utils import (ASCEND_QUANTIZATION_METHOD, enable_sp, is_310p, - update_aclgraph_sizes, + is_vl_model, update_aclgraph_sizes, update_default_aclgraph_sizes) if TYPE_CHECKING: @@ -303,6 +303,14 @@ class NPUPlatform(Platform): vllm_config.scheduler_config) vllm_config.scheduler_config = recompute_scheduler_config + if is_vl_model(vllm_config): + if bool(int(os.getenv("VLLM_ASCEND_ENABLE_FLASHCOMM", '0'))) or \ + bool(int(os.getenv("VLLM_ASCEND_ENABLE_FLASHCOMM1", '0'))): + raise ValueError( + "Currently, VL models doesn't support " + "FLASHCOMM in vllm-ascend. We will fix this in the future. " + "Please set VLLM_ASCEND_ENABLE_FLASHCOMM1=0.") + @classmethod def get_attn_backend_cls( cls, diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 8f21ef7..52a88ec 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -57,6 +57,7 @@ _ASCEND_CUSTOMOP_IS_REIGISTERED = False _DEFAULT_BUFFER_SIZE = 200 _MIN_DP_BUFFER_SIZE = 50 _IS_MOE_MODEL = None +_IS_VL_MODEL = None _ENABLE_SP = None _HAS_LAYER_IDX = None _ENABLE_NZ = None @@ -696,6 +697,15 @@ def _is_contain_expert(config: Any): return False +def is_vl_model(vllm_config: VllmConfig): + """Checks if the model is a VL model by config""" + global _IS_VL_MODEL + if _IS_VL_MODEL is None: + model_configs = vllm_config.model_config.hf_config.to_dict() + _IS_VL_MODEL = "VL" in model_configs["architectures"][0] + return _IS_VL_MODEL + + def weak_ref_tensor(tensor: Any) -> Any: """ Create a weak reference to a tensor.