diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 5559df8c..873862bb 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -31,7 +31,7 @@ from vllm_ascend.ascend_config import (check_ascend_config, get_ascend_config, from vllm_ascend.torchair.utils import (check_torchair_cache_exist, delete_torchair_cache_file) from vllm_ascend.utils import (ASCEND_QUANTIZATION_METHOD, enable_sp, is_310p, - prefill_context_parallel_enable, + is_vl_model, prefill_context_parallel_enable, update_aclgraph_sizes, update_cudagraph_capture_sizes, update_default_aclgraph_sizes, vllm_version_is) @@ -380,6 +380,14 @@ class NPUPlatform(Platform): "needs to be equal if use cp or dcp > 1 in P/D disaggregate scenario." ) + if is_vl_model(vllm_config): + if bool(int(os.getenv("VLLM_ASCEND_ENABLE_FLASHCOMM", '0'))) or \ + bool(int(os.getenv("VLLM_ASCEND_ENABLE_FLASHCOMM1", '0'))): + raise ValueError( + "Currently, VL models doesn't support " + "FLASHCOMM in vllm-ascend. We will fix this in the future. " + "Please set VLLM_ASCEND_ENABLE_FLASHCOMM1=0.") + @classmethod def import_kernels(cls) -> None: # Directly importing vllm_ascend_C prevents ASCEND_RT_VISIBLE_DEVICES diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 7fd73826..8dc60911 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -57,6 +57,7 @@ _ASCEND_CUSTOMOP_IS_REIGISTERED = False _DEFAULT_BUFFER_SIZE = 200 _MIN_DP_BUFFER_SIZE = 50 _IS_MOE_MODEL = None +_IS_VL_MODEL = None _ENABLE_SP = None _HAS_LAYER_IDX = None _SUBSCRIBED_COMPUTE_STREAMS = set() @@ -829,6 +830,15 @@ def _is_contain_expert(config: Any): return False +def is_vl_model(vllm_config: VllmConfig): + """Checks if the model is a VL model by config""" + global _IS_VL_MODEL + if _IS_VL_MODEL is None: + model_configs = vllm_config.model_config.hf_config.to_dict() + _IS_VL_MODEL = "VL" in model_configs["architectures"][0] + return _IS_VL_MODEL + + def weak_ref_tensor(tensor: Any) -> Any: """ Create a weak reference to a tensor.