diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 6bcad2c..4a2b099 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -32,7 +32,7 @@ from vllm_ascend.ascend_config import (check_ascend_config, get_ascend_config, from vllm_ascend.torchair.utils import (check_torchair_cache_exist, delete_torchair_cache_file) from vllm_ascend.utils import (ASCEND_QUANTIZATION_METHOD, enable_sp, is_310p, - update_aclgraph_sizes, + is_vl_model, update_aclgraph_sizes, update_default_aclgraph_sizes) if TYPE_CHECKING: @@ -303,6 +303,14 @@ class NPUPlatform(Platform): vllm_config.scheduler_config) vllm_config.scheduler_config = recompute_scheduler_config + if is_vl_model(vllm_config): + if bool(int(os.getenv("VLLM_ASCEND_ENABLE_FLASHCOMM", '0'))) or \ + bool(int(os.getenv("VLLM_ASCEND_ENABLE_FLASHCOMM1", '0'))): + raise ValueError( + "Currently, VL models doesn't support " + "FLASHCOMM in vllm-ascend. We will fix this in the future. " + "Please set VLLM_ASCEND_ENABLE_FLASHCOMM1=0.") + @classmethod def get_attn_backend_cls( cls, diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 8f21ef7..52a88ec 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -57,6 +57,7 @@ _ASCEND_CUSTOMOP_IS_REIGISTERED = False _DEFAULT_BUFFER_SIZE = 200 _MIN_DP_BUFFER_SIZE = 50 _IS_MOE_MODEL = None +_IS_VL_MODEL = None _ENABLE_SP = None _HAS_LAYER_IDX = None _ENABLE_NZ = None @@ -696,6 +697,15 @@ def _is_contain_expert(config: Any): return False +def is_vl_model(vllm_config: VllmConfig): + """Checks if the model is a VL model by config""" + global _IS_VL_MODEL + if _IS_VL_MODEL is None: + model_configs = vllm_config.model_config.hf_config.to_dict() + _IS_VL_MODEL = "VL" in model_configs["architectures"][0] + return _IS_VL_MODEL + + def weak_ref_tensor(tensor: Any) -> Any: """ Create a weak reference to a tensor.