From 1d70f93cfc4dd5b5bf1b365c1882864187e2d846 Mon Sep 17 00:00:00 2001 From: Chranos <826995883@qq.com> Date: Mon, 9 Feb 2026 15:24:55 +0800 Subject: [PATCH] debugging --- .../vllm/model_executor/models/registry.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/vllm-v0.6.2/vllm/model_executor/models/registry.py b/vllm-v0.6.2/vllm/model_executor/models/registry.py index 2fd3a37..5ce9ad8 100644 --- a/vllm-v0.6.2/vllm/model_executor/models/registry.py +++ b/vllm-v0.6.2/vllm/model_executor/models/registry.py @@ -28,6 +28,9 @@ from .interfaces_base import is_embedding_model, is_text_generation_model logger = init_logger(__name__) +# Cache for architectures that have already been logged +_logged_transformers_architectures: set = set() + # yapf: disable _TEXT_GENERATION_MODELS = { # [Decoder-only] @@ -403,11 +406,14 @@ class _ModelRegistry: model_module = getattr(transformers, architecture, None) if model_module is not None: # Model exists in transformers, can use TransformersForCausalLM wrapper - logger.info( - "Architecture %s found in transformers library, " - "using TransformersForCausalLM wrapper", - architecture - ) + # Only log once per architecture to avoid spam + if architecture not in _logged_transformers_architectures: + _logged_transformers_architectures.add(architecture) + logger.info( + "Architecture %s found in transformers library, " + "using TransformersForCausalLM wrapper", + architecture + ) return "TransformersForCausalLM" # Get auto_map from hf_config