[MM][Bugfix] Add error log for VL models when enabling FLASHCOMM (#4272)

### What this PR does / why we need it?
Add error log for VL models when enabling
`VLLM_ASCEND_ENABLE_FLASHCOMM1=1` or `VLLM_ASCEND_ENABLE_FLASHCOMM=1`
(for backward compatibility).

This is a temporary fix for
https://github.com/vllm-project/vllm-ascend/issues/4132.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.11.0
- vLLM main:
2918c1b49c

Signed-off-by: shen-shanshan <467638484@qq.com>
This commit is contained in:
Shanshan Shen
2025-11-21 15:04:18 +08:00
committed by GitHub
parent 4573c855b7
commit 8e3b834bf7
2 changed files with 19 additions and 1 deletions

View File

@@ -57,6 +57,7 @@ _ASCEND_CUSTOMOP_IS_REIGISTERED = False
_DEFAULT_BUFFER_SIZE = 200
_MIN_DP_BUFFER_SIZE = 50
_IS_MOE_MODEL = None
_IS_VL_MODEL = None
_ENABLE_SP = None
_HAS_LAYER_IDX = None
_SUBSCRIBED_COMPUTE_STREAMS = set()
@@ -829,6 +830,15 @@ def _is_contain_expert(config: Any):
return False
def is_vl_model(vllm_config: VllmConfig):
"""Checks if the model is a VL model by config"""
global _IS_VL_MODEL
if _IS_VL_MODEL is None:
model_configs = vllm_config.model_config.hf_config.to_dict()
_IS_VL_MODEL = "VL" in model_configs["architectures"][0]
return _IS_VL_MODEL
def weak_ref_tensor(tensor: Any) -> Any:
"""
Create a weak reference to a tensor.