[MISC] fix logger (#515)

logger in vllm-ascend doesn't work. This PR fix the issue.

Fix: https://github.com/vllm-project/vllm-ascend/issues/431

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-04-15 10:18:05 +08:00
committed by GitHub
parent 5c6d79687c
commit f6af1d2471
9 changed files with 6 additions and 27 deletions

View File

@@ -29,7 +29,6 @@ from vllm.config import TaskOption
from vllm.distributed.parallel_state import (destroy_distributed_environment,
destroy_model_parallel)
from vllm.inputs import ExplicitEncoderDecoderPrompt, TextPrompt, TokensPrompt
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import BeamSearchParams
from vllm.utils import is_list_of
@@ -37,8 +36,6 @@ from vllm.utils import is_list_of
from tests.model_utils import (TokensTextLogprobs,
TokensTextLogprobsPromptLogprobs)
logger = init_logger(__name__)
_M = TypeVar("_M")
_PromptMultiModalInput = Union[List[_M], List[List[_M]]]

View File

@@ -22,7 +22,7 @@ from typing import TYPE_CHECKING, Optional, Tuple
import torch
import torch_npu # noqa: F401
import vllm.envs as envs
from vllm.logger import init_logger
from vllm.logger import logger
try:
# register custom ops into torch_library here
@@ -48,8 +48,6 @@ else:
os.environ["RAY_EXPERIMENTAL_NOSET_ASCEND_RT_VISIBLE_DEVICES"] = "1"
logger = init_logger(__name__)
class NPUPlatform(Platform):

View File

@@ -21,7 +21,6 @@ from typing import Any, Callable, Dict, List, Mapping, Optional
import torch
import torch_npu # noqa: F401
from vllm.distributed import get_tensor_model_parallel_rank
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe import (FusedMoE, FusedMoEMethodBase,
FusedMoeWeightScaleSupported)
from vllm.model_executor.layers.fused_moe.layer import \
@@ -41,8 +40,6 @@ from vllm.model_executor.utils import set_weight_attrs
from .quantizer import AscendQuantizer
logger = init_logger(__name__)
@register_quantization_config("ascend")
class AscendQuantConfig(QuantizationConfig):

View File

@@ -17,9 +17,7 @@
# limitations under the License.
#
import torch
from vllm.logger import init_logger
logger = init_logger(__name__)
from vllm.logger import logger
def try_register_lib(lib_name: str, lib_info: str = ""):

View File

@@ -35,7 +35,7 @@ from vllm.core.scheduler import SchedulerOutputs
from vllm.distributed import get_kv_transfer_group, get_pp_group
from vllm.forward_context import set_forward_context
from vllm.inputs import INPUT_REGISTRY, InputRegistry
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.lora.layers import LoRAMapping
from vllm.lora.request import LoRARequest
from vllm.model_executor import SamplingMetadata, SamplingMetadataCache
@@ -63,8 +63,6 @@ from vllm.worker.model_runner_base import (
if TYPE_CHECKING:
from vllm.attention.backends.abstract import AttentionBackend
logger = init_logger(__name__)
TModelInputForNPU = TypeVar('TModelInputForNPU', bound="ModelInputForNPU")

View File

@@ -32,7 +32,7 @@ from vllm.config import VllmConfig
from vllm.distributed.parallel_state import get_pp_group
from vllm.forward_context import set_forward_context
from vllm.inputs import INPUT_REGISTRY
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.model_loader import get_model
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs
@@ -56,8 +56,6 @@ if TYPE_CHECKING:
NPU_PAGED_ATTENTION_MASK_VALUE = -10000
logger = init_logger(__name__)
class NPUModelRunner:

View File

@@ -7,7 +7,6 @@ from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple,
import torch
from torch import nn
from vllm.distributed import get_pp_group
from vllm.logger import init_logger
from vllm.model_executor.layers.sampler import (PromptLogprobs, SampleLogprobs,
SamplerOutput,
SamplingMetadata, get_logprobs,
@@ -30,8 +29,6 @@ from vllm_ascend.worker.model_runner import (
if TYPE_CHECKING:
from vllm.attention.backends.abstract import AttentionBackend
logger = init_logger(__name__)
@dataclass(frozen=False)
class StatefulModelInputForNPU(StatefulModelInput):

View File

@@ -28,7 +28,7 @@ from vllm.config import ParallelConfig, VllmConfig
from vllm.distributed import (ensure_model_parallel_initialized,
init_distributed_environment,
set_custom_all_reduce)
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.lora.request import LoRARequest
from vllm.model_executor import set_random_seed
from vllm.model_executor.layers.sampler import SamplerOutput
@@ -48,8 +48,6 @@ from vllm_ascend.utils import try_register_lib
from vllm_ascend.worker.model_runner import NPUModelRunner
from vllm_ascend.worker.pooling_model_runner import NPUPoolingModelRunner
logger = init_logger(__name__)
class NPUWorker(LocalOrDistributedWorkerBase):
"""A worker class that executes (a partition of) the model on a NPU.

View File

@@ -29,7 +29,7 @@ from vllm.config import ParallelConfig, VllmConfig
from vllm.distributed import (ensure_model_parallel_initialized,
init_distributed_environment,
set_custom_all_reduce)
from vllm.logger import init_logger
from vllm.logger import logger
from vllm.model_executor import set_random_seed
from vllm.platforms import current_platform
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE
@@ -42,8 +42,6 @@ from vllm.v1.worker.worker_base import WorkerBase
from vllm_ascend.worker.model_runner_v1 import NPUModelRunner
logger = init_logger(__name__)
class NPUWorker(WorkerBase):