[CI] Add pre-commit check for patch logger (#7446)

### What this PR does / why we need it?
See https://github.com/vllm-project/vllm-ascend/pull/7402, pre-commit
hook will forbid init_logger(__name__) in vllm_ascend patch modules

- vLLM version: v0.17.0
- vLLM main:
8a680463fa

---------

Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
Li Wang
2026-03-19 16:53:20 +08:00
committed by GitHub
parent 38e637eef5
commit 83a4065b4b
14 changed files with 99 additions and 49 deletions

View File

@@ -21,7 +21,7 @@ from typing import Any, Optional, cast
import torch
from compressed_tensors.quantization import QuantizationArgs, QuantizationStrategy, QuantizationType
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.linear import LinearBase, UnquantizedLinearMethod
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS, register_quantization_config
@@ -37,8 +37,6 @@ from vllm_ascend.utils import COMPRESSED_TENSORS_METHOD
from .methods import AscendLinearScheme, AscendMoEScheme
logger = init_logger(__name__)
# Remove the original compressed_tensors method to replace with our implementation
def _remove_quantization_method():

View File

@@ -31,7 +31,7 @@ from typing import Any, Optional
import torch
from vllm.config import get_current_vllm_config
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.linear import LinearBase
@@ -47,8 +47,6 @@ from .methods import get_scheme_class
# The config filename that ModelSlim generates after quantizing a model.
MODELSLIM_CONFIG_FILENAME = "quant_model_description.json"
logger = init_logger(__name__)
# key: model_type
# value: vLLM prefix -> HF prefix mapping (used to convert vLLM layer names to HF format
# for looking up keys in quant_model_description.json)

View File

@@ -19,12 +19,10 @@ import json
from pathlib import Path
from vllm import envs
from vllm.logger import init_logger
from vllm.logger import logger
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD, COMPRESSED_TENSORS_METHOD
logger = init_logger(__name__)
def get_model_file(
model: str | Path,