[Misc]fix logger which does not take effects in patches (#7402)
### What this PR does / why we need it?
This PR fixes the logger initialization in patches so that the log info
can be displayed as expected.
### Does this PR introduce _any_ user-facing change?
No.
- vLLM version: v0.17.0
- vLLM main:
4497431df6
---------
Signed-off-by: Angazenn <supperccell@163.com>
This commit is contained in:
@@ -8,7 +8,7 @@ import vllm
|
||||
from vllm.config import ParallelConfig
|
||||
from vllm.distributed.ec_transfer.ec_connector.base import ECConnectorMetadata
|
||||
from vllm.distributed.kv_transfer.kv_connector.v1.base import KVConnectorMetadata
|
||||
from vllm.logger import init_logger
|
||||
from vllm.logger import logger
|
||||
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
|
||||
from vllm.transformers_utils.config import maybe_register_config_serialize_by_value
|
||||
from vllm.utils.system_utils import decorate_logs, set_process_title
|
||||
@@ -23,8 +23,6 @@ from vllm.v1.request import Request, RequestStatus
|
||||
from vllm.v1.structured_output import StructuredOutputManager
|
||||
from vllm.v1.utils import record_function_or_nullcontext
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
class BalanceScheduler(Scheduler):
|
||||
def __init__(
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import math
|
||||
|
||||
import vllm.model_executor.models.config
|
||||
from vllm.logger import init_logger
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.models import ModelRegistry
|
||||
from vllm.model_executor.models.config import MambaModelConfig
|
||||
from vllm.utils.math_utils import cdiv
|
||||
@@ -21,7 +21,6 @@ def verify_and_update_config(cls, vllm_config) -> None:
|
||||
Args:
|
||||
vllm_config: vLLM Config
|
||||
"""
|
||||
logger = init_logger(__name__)
|
||||
# Enable FULL_AND_PIECEWISE by default
|
||||
MambaModelConfig.verify_and_update_config(vllm_config)
|
||||
|
||||
|
||||
@@ -22,11 +22,9 @@
|
||||
import os
|
||||
|
||||
from vllm.config.model import ModelConfig
|
||||
from vllm.logger import init_logger
|
||||
from vllm.logger import logger
|
||||
from vllm.platforms import current_platform
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
_original_verify_quantization = getattr(ModelConfig, "_verify_quantization", None)
|
||||
_original_verify_cuda_graph = getattr(ModelConfig, "_verify_cuda_graph", None)
|
||||
|
||||
|
||||
@@ -18,11 +18,8 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from vllm.logger import init_logger
|
||||
from vllm.model_executor.models.kimi_k25_vit import Learnable2DInterpPosEmbDivided_fixed, get_rope_shape_decorate
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
@get_rope_shape_decorate
|
||||
def get_rope_shape(org, interpolation_mode, shape):
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from vllm.logger import init_logger
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.model_loader.weight_utils import maybe_remap_kv_scale_name
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
class ImportPatchDecorator:
|
||||
"""Import patch decorator"""
|
||||
|
||||
Reference in New Issue
Block a user