[MISC] fix logger (#515)
logger in vllm-ascend doesn't work. This PR fix the issue. Fix: https://github.com/vllm-project/vllm-ascend/issues/431 Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -35,7 +35,7 @@ from vllm.core.scheduler import SchedulerOutputs
|
||||
from vllm.distributed import get_kv_transfer_group, get_pp_group
|
||||
from vllm.forward_context import set_forward_context
|
||||
from vllm.inputs import INPUT_REGISTRY, InputRegistry
|
||||
from vllm.logger import init_logger
|
||||
from vllm.logger import logger
|
||||
from vllm.lora.layers import LoRAMapping
|
||||
from vllm.lora.request import LoRARequest
|
||||
from vllm.model_executor import SamplingMetadata, SamplingMetadataCache
|
||||
@@ -63,8 +63,6 @@ from vllm.worker.model_runner_base import (
|
||||
if TYPE_CHECKING:
|
||||
from vllm.attention.backends.abstract import AttentionBackend
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
TModelInputForNPU = TypeVar('TModelInputForNPU', bound="ModelInputForNPU")
|
||||
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ from vllm.config import VllmConfig
|
||||
from vllm.distributed.parallel_state import get_pp_group
|
||||
from vllm.forward_context import set_forward_context
|
||||
from vllm.inputs import INPUT_REGISTRY
|
||||
from vllm.logger import init_logger
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.fused_moe import FusedMoE
|
||||
from vllm.model_executor.model_loader import get_model
|
||||
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs
|
||||
@@ -56,8 +56,6 @@ if TYPE_CHECKING:
|
||||
|
||||
NPU_PAGED_ATTENTION_MASK_VALUE = -10000
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
class NPUModelRunner:
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple,
|
||||
import torch
|
||||
from torch import nn
|
||||
from vllm.distributed import get_pp_group
|
||||
from vllm.logger import init_logger
|
||||
from vllm.model_executor.layers.sampler import (PromptLogprobs, SampleLogprobs,
|
||||
SamplerOutput,
|
||||
SamplingMetadata, get_logprobs,
|
||||
@@ -30,8 +29,6 @@ from vllm_ascend.worker.model_runner import (
|
||||
if TYPE_CHECKING:
|
||||
from vllm.attention.backends.abstract import AttentionBackend
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
@dataclass(frozen=False)
|
||||
class StatefulModelInputForNPU(StatefulModelInput):
|
||||
|
||||
@@ -28,7 +28,7 @@ from vllm.config import ParallelConfig, VllmConfig
|
||||
from vllm.distributed import (ensure_model_parallel_initialized,
|
||||
init_distributed_environment,
|
||||
set_custom_all_reduce)
|
||||
from vllm.logger import init_logger
|
||||
from vllm.logger import logger
|
||||
from vllm.lora.request import LoRARequest
|
||||
from vllm.model_executor import set_random_seed
|
||||
from vllm.model_executor.layers.sampler import SamplerOutput
|
||||
@@ -48,8 +48,6 @@ from vllm_ascend.utils import try_register_lib
|
||||
from vllm_ascend.worker.model_runner import NPUModelRunner
|
||||
from vllm_ascend.worker.pooling_model_runner import NPUPoolingModelRunner
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
class NPUWorker(LocalOrDistributedWorkerBase):
|
||||
"""A worker class that executes (a partition of) the model on a NPU.
|
||||
|
||||
@@ -29,7 +29,7 @@ from vllm.config import ParallelConfig, VllmConfig
|
||||
from vllm.distributed import (ensure_model_parallel_initialized,
|
||||
init_distributed_environment,
|
||||
set_custom_all_reduce)
|
||||
from vllm.logger import init_logger
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor import set_random_seed
|
||||
from vllm.platforms import current_platform
|
||||
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE
|
||||
@@ -42,8 +42,6 @@ from vllm.v1.worker.worker_base import WorkerBase
|
||||
|
||||
from vllm_ascend.worker.model_runner_v1 import NPUModelRunner
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
class NPUWorker(WorkerBase):
|
||||
|
||||
|
||||
Reference in New Issue
Block a user