[Bugfix] Add defensive check for multimodal_config (#6230)

### What this PR does / why we need it?

In vLLM-Omni, there exists the empty `ModelConfig`. We need to add a
check before accessing the sub-field of model_config.

### Does this PR introduce _any_ user-facing change?

No

### How was this patch tested?

Will checked by CI.

- vLLM version: v0.14.0
- vLLM main:
d68209402d

Signed-off-by: gcanlin <canlinguosdu@gmail.com>
This commit is contained in:
Canlin Guo
2026-01-25 17:39:19 +08:00
committed by GitHub
parent 2928ae2af5
commit b45bd92c2b

View File

@@ -653,14 +653,15 @@ class NPUPlatform(Platform):
If GPU-specific or currently unsupported parameters are set by the user, If GPU-specific or currently unsupported parameters are set by the user,
log a warning and reset them to safe values. log a warning and reset them to safe values.
""" """
model_config = vllm_config.model_config
# ==================== 1. Model Config ==================== # ==================== 1. Model Config ====================
if vllm_config.model_config: if model_config:
# Disable Cascade Attention (GPU feature) # Disable Cascade Attention (GPU feature)
if getattr(vllm_config.model_config, "disable_cascade_attn", False): if getattr(model_config, "disable_cascade_attn", False):
logger.warning( logger.warning(
"Parameter '--disable-cascade-attn' is a GPU-specific feature. Resetting to False for Ascend." "Parameter '--disable-cascade-attn' is a GPU-specific feature. Resetting to False for Ascend."
) )
vllm_config.model_config.disable_cascade_attn = False model_config.disable_cascade_attn = False
# ==================== 2. Parallel Config ==================== # ==================== 2. Parallel Config ====================
if vllm_config.parallel_config: if vllm_config.parallel_config:
@@ -684,14 +685,15 @@ class NPUPlatform(Platform):
vllm_config.cache_config.cpu_kvcache_space_bytes = None vllm_config.cache_config.cpu_kvcache_space_bytes = None
# ==================== 4. MultiModal Config ==================== # ==================== 4. MultiModal Config ====================
if vllm_config.model_config.multimodal_config: multimodal_config = getattr(model_config, "multimodal_config", None) if model_config else None
if multimodal_config:
# Ascend uses a different mechanism for Multi-Modal attention # Ascend uses a different mechanism for Multi-Modal attention
if getattr(vllm_config.model_config.multimodal_config, "mm_encoder_attn_backend", None) is not None: if getattr(multimodal_config, "mm_encoder_attn_backend", None) is not None:
logger.warning( logger.warning(
"Parameter '--mm-encoder-attn-backend' is set but Ascend uses " "Parameter '--mm-encoder-attn-backend' is set but Ascend uses "
"a plugin mechanism for multi-modal attention. Resetting to None." "a plugin mechanism for multi-modal attention. Resetting to None."
) )
vllm_config.model_config.multimodal_config.mm_encoder_attn_backend = None multimodal_config.mm_encoder_attn_backend = None
# ==================== 5. Observability Config ==================== # ==================== 5. Observability Config ====================
if vllm_config.observability_config: if vllm_config.observability_config: