[Misc] Fix logger bug (#2024)

1. Remove useless logger
2. Fix logger bug, same problem as
https://github.com/vllm-project/vllm-ascend/pull/515

- vLLM version: v0.10.0
- vLLM main:
18cc33dd60

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-07-28 15:59:09 +08:00
committed by GitHub
parent 3ad582c9a9
commit 34cfdf5520
6 changed files with 4 additions and 19 deletions

View File

@@ -22,13 +22,11 @@ import llm_datadist # type: ignore
import torch
from vllm.distributed.kv_transfer.kv_lookup_buffer.base import \
KVLookupBufferBase
from vllm.logger import init_logger
from vllm.logger import logger
from vllm_ascend.distributed.kv_transfer.simple_pipe import SimplePipe
from vllm_ascend.distributed.kv_transfer.utils import TORCH_DTYPE_TO_NPU_DTYPE
logger = init_logger(__name__)
# Hash a string into a int32 value.
def int32_hash(data):

View File

@@ -26,14 +26,12 @@ import torch_npu
import torchair # type: ignore
import zmq # type: ignore
from vllm.distributed.kv_transfer.kv_pipe.base import KVPipeBase
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.utils import get_ip
import vllm_ascend.envs as envs
from vllm_ascend.distributed.kv_transfer.utils import NPU_DTYPE_TO_TORCH_DTYPE
logger = init_logger(__name__)
class SimplePipe(KVPipeBase):

View File

@@ -33,7 +33,7 @@ from vllm.distributed import (divide, get_pp_group,
from vllm.distributed.parallel_state import (get_dp_group, get_ep_group,
get_tp_group, get_world_group)
from vllm.forward_context import get_forward_context
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
@@ -60,8 +60,6 @@ from vllm.sequence import IntermediateTensors
from vllm_ascend.ascend_config import get_ascend_config
from vllm_ascend.utils import ACL_FORMAT_FRACTAL_NZ, is_310p
logger = init_logger(__name__)
_ROUTER_SCALE = None

View File

@@ -1,10 +1,6 @@
from vllm.logger import init_logger
from .context import (get_multistream_layer_context,
get_multistream_microbatch_context)
logger = init_logger(__name__)
# vllm v1 use get_forward_context to get the attn_metadata,
# we can use this decorator to update the attn metadata

View File

@@ -4,14 +4,11 @@ from typing import Optional
import torch
import torch.nn as nn
import vllm.v1.sample.rejection_sampler as rs
from vllm.logger import init_logger
from vllm.v1.sample.metadata import SamplingMetadata
from vllm.v1.sample.rejection_sampler import (RejectionSampler, compute_probs,
generate_uniform_probs)
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
logger = init_logger(__name__)
PLACEHOLDER_TOKEN_ID = -1
GREEDY_TEMPERATURE = -1
# Maximum number of speculative draft tokens allowed per request in a single

View File

@@ -7,7 +7,7 @@ from vllm.attention.layer import Attention
from vllm.config import (CompilationLevel, VllmConfig,
get_layers_from_vllm_config)
from vllm.distributed.parallel_state import get_pp_group
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.model_executor.model_loader import get_model
from vllm.model_executor.models import supports_multimodal
from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM
@@ -17,8 +17,6 @@ from vllm_ascend.ascend_forward_context import set_ascend_forward_context
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
from vllm_ascend.attention.attention_v1 import AscendAttentionState
logger = init_logger(__name__)
PADDING_SLOT_ID = -1