diff --git a/.github/workflows/vllm_ascend_test.yaml b/.github/workflows/vllm_ascend_test.yaml index fce99fa..b7ceeb8 100644 --- a/.github/workflows/vllm_ascend_test.yaml +++ b/.github/workflows/vllm_ascend_test.yaml @@ -48,7 +48,7 @@ jobs: max-parallel: 2 matrix: os: [linux-arm64-npu-1, linux-arm64-npu-4] - vllm_verison: [main, v0.8.4] + vllm_verison: [main, v0.8.5] concurrency: group: > ${{ diff --git a/Dockerfile b/Dockerfile index 021ed59..1c8b87e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -37,7 +37,7 @@ RUN pip config set global.index-url ${PIP_INDEX_URL} # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git -ARG VLLM_TAG=v0.8.4 +ARG VLLM_TAG=v0.8.5 RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/Dockerfile.openEuler b/Dockerfile.openEuler index 538d533..2525b36 100644 --- a/Dockerfile.openEuler +++ b/Dockerfile.openEuler @@ -34,7 +34,7 @@ COPY . /workspace/vllm-ascend/ # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git -ARG VLLM_TAG=v0.8.4 +ARG VLLM_TAG=v0.8.5 RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. diff --git a/vllm_ascend/__init__.py b/vllm_ascend/__init__.py index c6b162f..7588e70 100644 --- a/vllm_ascend/__init__.py +++ b/vllm_ascend/__init__.py @@ -23,9 +23,5 @@ def register(): def register_model(): - # TODO: fixme when TritonPlaceholder fixed - from vllm_ascend.utils import vllm_version_is - if vllm_version_is("0.8.4"): - import vllm_ascend.patch.worker.patch_0_8_4.patch_tritonplaceholder # noqa from .models import register_model register_model() diff --git a/vllm_ascend/patch/__init__.py b/vllm_ascend/patch/__init__.py index e73e66a..445a167 100644 --- a/vllm_ascend/patch/__init__.py +++ b/vllm_ascend/patch/__init__.py @@ -24,9 +24,9 @@ # each worker's `__init__` function. # # Then in each kind of patch, there are three folders: -# - patch_0_8_4: contains the patches applied when vllm version is 0.8.4. +# - patch_0_8_5: contains the patches applied when vllm version is 0.8.5. # - patch_main: contains the patches applied when vllm version is main branch. -# - patch_common: contains the patches applied in both 0.8.4 and main branch. +# - patch_common: contains the patches applied in both 0.8.5 and main branch. # # In the future, with the vllm version upgrade, the new patch folder such as # patch_0_8_5, patch_0_8_6, etc. will be added to manage the patch for different @@ -42,18 +42,6 @@ # -------------------------------- # * Platform Patch: # ================= -# ** File: platform/patch_0_8_4/patch_config.py** -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# 1. `vllm.config.ModelConfig.__init__()` -# Why: -# It is hard coded for sleep mode to support cuda platform only -# How: -# Using a new method to check if sleep mode is available -# Related PR (if no, explain why): 1. refused by vllm. 2. vllm doesn't support 3. prepare to submit.... -# https://github.com/vllm-project/vllm/pull/16562 -# Future Plan: -# This patch is only used for 084 and can't be revert. just keep as it is. -# # ** File: platform/patch_common/patch_distributed.py** # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # 1. `vllm.distributed.parallel_state.destroy_model_parallel()` @@ -100,33 +88,6 @@ # # * Worker Patch: # =============== -# ** File: worker/patch_0_8_4/patch_metrics.py ** -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# 1. `vllm.spec_decode.metrics.AsyncMetricsCollector.init_tensors` and -# `vllm.spec_decode.metrics.AsyncMetricsCollector._copy_rejsample_metrics_async` -# Why: -# There are cuda hard code (torch.cuda.Stream) in `AsyncMetricsCollector.init_tensors` and -# `AsyncMetricsCollector._copy_rejsample_metrics_async` -# How: -# Replace it with the corresponding npu method -# Related PR (if no, explain why): 1. refused by vllm. 2. vllm doesn't support 3. prepare to submit.... -# https://github.com/vllm-project/vllm/pull/14411 -# Future Plan: -# Revert it when the related pr is merged in vllm. -# -# ** File: worker/patch_0_8_4/patch_spec_decode_worker.py ** -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# 1. `vllm.spec_decode.spec_decode_worker.SpecDecodeWorker._configure_model_sampler_for_spec_decode` -# Why: -# vLLM `Remove Sampler from Model Code` so vllm-ascend needs a patch to run in v0.8.4. -# How: -# Use vLLM 0.8.4 method tp patch it. -# Related PR (if no, explain why): 1. refused by vllm. 2. vllm doesn't support 3. prepare to submit.... -# - https://github.com/vllm-project/vllm/pull/17084 -# - https://github.com/vllm-project/vllm-ascend/pull/636 -# Future Plan: -# Follow v0.8.4 version strategy. -# # ** File: worker/patch_common/patch_metrics.py ** # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # 1. `vllm.spec_decode.metrics.AsyncMetricsCollector.maybe_collect_rejsample_metrics` @@ -197,15 +158,4 @@ # - https://github.com/vllm-project/vllm-ascend/pull/395 # Future Plan: # Revert it when the related pr is merged in vllm and vllm-ascend. -# -# ** File: worker/patch_0_8_4/patch_tritonplaceholder.py ** -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# 1. `triton` Module -# Why: -# Triton is not supported on npu currently, importing triton will break vllm-ascend -# How: -# ditto -# Related PR (if no, explain why): 1. refused by vllm. 2. vllm doesn't support 3. prepare to submit.... -# TritonPlaceholder is only available in vllm>0.8.4 -# Future Plan: -# Revert it when branch main doesn't maintain v0.8.4. +# \ No newline at end of file diff --git a/vllm_ascend/patch/platform/__init__.py b/vllm_ascend/patch/platform/__init__.py index 3bf1f68..dc217f9 100644 --- a/vllm_ascend/patch/platform/__init__.py +++ b/vllm_ascend/patch/platform/__init__.py @@ -17,8 +17,8 @@ from vllm_ascend.utils import vllm_version_is # Import specific patches for different versions -if vllm_version_is("0.8.4"): - from vllm_ascend.patch.platform import patch_0_8_4 # noqa: F401 +if vllm_version_is("0.8.5"): + from vllm_ascend.patch.platform import patch_0_8_5 # noqa: F401 from vllm_ascend.patch.platform import patch_common # noqa: F401 else: from vllm_ascend.patch.platform import patch_common # noqa: F401 diff --git a/vllm_ascend/patch/platform/patch_0_8_4/patch_config.py b/vllm_ascend/patch/platform/patch_0_8_4/patch_config.py deleted file mode 100644 index 4a30aaa..0000000 --- a/vllm_ascend/patch/platform/patch_0_8_4/patch_config.py +++ /dev/null @@ -1,243 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# This file is a part of the vllm-ascend project. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import json -import warnings -from importlib.util import find_spec -from typing import Any, Final, Literal, Mapping, Optional, Union - -import torch -import vllm.envs as envs -from vllm.config import (HfOverrides, ModelConfig, ModelImpl, PoolerConfig, - TaskOption, _get_and_verify_dtype, - _get_and_verify_max_len, get_min_sliding_window, - get_served_model_name, logger) -from vllm.transformers_utils.config import (ConfigFormat, get_config, - get_hf_image_processor_config, - get_hf_text_config) -from vllm.transformers_utils.utils import maybe_model_redirect - - -def new_init( - self, - model: str, - task: Union[TaskOption, Literal["draft"]], - tokenizer: str, - tokenizer_mode: str, - trust_remote_code: bool, - dtype: Union[str, torch.dtype], - seed: int, - hf_config_path: Optional[str] = None, - allowed_local_media_path: str = "", - revision: Optional[str] = None, - code_revision: Optional[str] = None, - rope_scaling: Optional[dict[str, Any]] = None, - rope_theta: Optional[float] = None, - tokenizer_revision: Optional[str] = None, - max_model_len: Optional[int] = None, - spec_target_max_model_len: Optional[int] = None, - quantization: Optional[str] = None, - enforce_eager: Optional[bool] = None, - max_seq_len_to_capture: Optional[int] = None, - max_logprobs: int = 20, - disable_sliding_window: bool = False, - disable_cascade_attn: bool = False, - skip_tokenizer_init: bool = False, - served_model_name: Optional[Union[str, list[str]]] = None, - limit_mm_per_prompt: Optional[Mapping[str, int]] = None, - use_async_output_proc: bool = True, - config_format: ConfigFormat = ConfigFormat.AUTO, - hf_token: Optional[Union[bool, str]] = None, - hf_overrides: Optional[HfOverrides] = None, - mm_processor_kwargs: Optional[dict[str, Any]] = None, - disable_mm_preprocessor_cache: bool = False, - override_neuron_config: Optional[dict[str, Any]] = None, - override_pooler_config: Optional["PoolerConfig"] = None, - logits_processor_pattern: Optional[str] = None, - generation_config: str = "auto", - enable_sleep_mode: bool = False, - override_generation_config: Optional[dict[str, Any]] = None, - model_impl: Union[str, ModelImpl] = ModelImpl.AUTO, -) -> None: - self.model = maybe_model_redirect(model) - self.tokenizer = maybe_model_redirect(tokenizer) - - self.hf_config_path = hf_config_path - if isinstance(hf_config_path, str): - self.hf_config_path = maybe_model_redirect(hf_config_path) - - self.tokenizer_mode = tokenizer_mode - self.trust_remote_code = trust_remote_code - self.allowed_local_media_path = allowed_local_media_path - self.seed = seed - self.revision = revision - self.code_revision = code_revision - self.rope_scaling = rope_scaling - self.rope_theta = rope_theta - self.model_impl = model_impl - - if hf_overrides is None: - hf_overrides = {} - - if callable(hf_overrides): - hf_overrides_kw: dict[str, Any] = {} - hf_overrides_fn = hf_overrides - else: - hf_overrides_kw = hf_overrides - hf_overrides_fn = None - - if rope_scaling is not None: - hf_override: dict[str, Any] = {"rope_scaling": rope_scaling} - hf_overrides_kw.update(hf_override) - hf_overrides_str = json.dumps(hf_overrides) - msg = ("`--rope-scaling` will be removed in a future release. " - f"'Please instead use `--hf-overrides '{hf_overrides_str}'`") - warnings.warn(DeprecationWarning(msg), stacklevel=2) - if rope_theta is not None: - hf_override = {"rope_theta": rope_theta} - hf_overrides_kw.update(hf_override) - hf_overrides_str = json.dumps(hf_overrides) - msg = ("`--rope-theta` will be removed in a future release. " - f"'Please instead use `--hf-overrides '{hf_overrides_str}'`") - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - self.maybe_pull_model_tokenizer_for_s3(model, tokenizer) - - if (backend := envs.VLLM_ATTENTION_BACKEND - ) and backend == "FLASHINFER" and find_spec("flashinfer") is None: - raise ValueError( - "VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer " - "module was not found. See " - "https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile " # noqa: E501 - "for instructions on how to install it.") - - # The tokenizer version is consistent with the model version by default. - if tokenizer_revision is None: - self.tokenizer_revision = revision - else: - self.tokenizer_revision = tokenizer_revision - self.quantization = quantization - self.enforce_eager = enforce_eager - self.max_seq_len_to_capture = max_seq_len_to_capture - self.max_logprobs = max_logprobs - self.disable_sliding_window = disable_sliding_window - self.disable_cascade_attn = disable_cascade_attn - self.skip_tokenizer_init = skip_tokenizer_init - self.enable_sleep_mode = enable_sleep_mode - - from vllm.platforms import current_platform - - hf_config = get_config(self.hf_config_path or self.model, - trust_remote_code, revision, code_revision, - config_format) - - if hf_overrides_kw: - logger.info("Overriding HF config with %s", hf_overrides_kw) - hf_config.update(hf_overrides_kw) - if hf_overrides_fn: - logger.info("Overriding HF config with %s", hf_overrides_fn) - hf_config = hf_overrides_fn(hf_config) - - self.hf_config = hf_config - - self.hf_text_config = get_hf_text_config(self.hf_config) - self.attention_chunk_size = getattr(self.hf_text_config, - "attention_chunk_size", None) - self.encoder_config = self._get_encoder_config() - self.hf_image_processor_config = get_hf_image_processor_config( - self.model, hf_token=hf_token, revision=revision) - self.dtype = _get_and_verify_dtype(self.hf_config, dtype) - self.use_async_output_proc = use_async_output_proc - self.mm_processor_kwargs = mm_processor_kwargs - self.disable_mm_preprocessor_cache = disable_mm_preprocessor_cache - - # Set enforce_eager to False if the value is unset. - if self.enforce_eager is None: - self.enforce_eager = False - - interleaved_attn_models = ["gemma2", "gemma3_text", "cohere2"] - sliding_window = getattr(self.hf_text_config, "sliding_window", None) - has_interleaved_attention = (sliding_window is not None) and ( - isinstance(sliding_window, list) or - (self.hf_text_config.model_type in interleaved_attn_models)) - - if (not self.disable_sliding_window and has_interleaved_attention): - if (backend := - envs.VLLM_ATTENTION_BACKEND) in ("XFORMERS", "FLASHINFER"): - sliding_window_len_min = get_min_sliding_window( - self.hf_text_config.sliding_window) - - logger.warning_once( - f"{self.hf_text_config.model_type} has interleaved " - "attention, which is currently not supported by the " - f"{backend} backend. Disabling sliding window and capping " - "the max length to the sliding window size " - f"({sliding_window_len_min}).") - self.disable_sliding_window = True - else: - # for a model with interleaved attention, - # the scheduler and the model treat it as full attention - # (i.e., not dropping any tokens outside the window). - # only the attention layer itself is aware of the sliding - # window, and use the window size to compute the attention. - self.hf_text_config.interleaved_sliding_window = sliding_window - delattr(self.hf_text_config, "sliding_window") - sliding_window = None - - self.max_model_len = _get_and_verify_max_len( - hf_config=self.hf_text_config, - max_model_len=max_model_len, - disable_sliding_window=self.disable_sliding_window, - sliding_window_len=self.get_hf_config_sliding_window(), - spec_target_max_model_len=spec_target_max_model_len, - encoder_config=self.encoder_config) - self.served_model_name = get_served_model_name(model, served_model_name) - self.multimodal_config = self._init_multimodal_config(limit_mm_per_prompt) - if not self.skip_tokenizer_init: - self._verify_tokenizer_mode() - - self.is_attention_free = self._init_attention_free() - self.is_hybrid = self._init_is_hybrid() - self.has_noops = self._init_has_noops() - self.has_inner_state = self._init_has_inner_state() - - if current_platform.is_neuron(): - self.override_neuron_config = override_neuron_config - else: - self.override_neuron_config = None - - supported_tasks, task = self._resolve_task(task) - self.supported_tasks = supported_tasks - self.task: Final = task # type: ignore - if self.task in ("draft", "generate"): - self.truncation_side = "left" - else: - self.truncation_side = "right" - - self.pooler_config = self._init_pooler_config(override_pooler_config) - self.logits_processor_pattern = logits_processor_pattern - - self.generation_config = generation_config - self.override_generation_config = override_generation_config or {} - - self._verify_quantization() - self._verify_cuda_graph() - self._verify_bnb_config() - - -# The platform assertion is deleted to support the npu platform. -ModelConfig.__init__ = new_init diff --git a/vllm_ascend/patch/platform/patch_0_8_4/__init__.py b/vllm_ascend/patch/platform/patch_0_8_5/__init__.py similarity index 90% rename from vllm_ascend/patch/platform/patch_0_8_4/__init__.py rename to vllm_ascend/patch/platform/patch_0_8_5/__init__.py index a058380..116c73c 100644 --- a/vllm_ascend/patch/platform/patch_0_8_4/__init__.py +++ b/vllm_ascend/patch/platform/patch_0_8_5/__init__.py @@ -14,5 +14,3 @@ # See the License for the specific language governing permissions and # limitations under the License. # - -import vllm_ascend.patch.platform.patch_0_8_4.patch_config # noqa diff --git a/vllm_ascend/patch/worker/__init__.py b/vllm_ascend/patch/worker/__init__.py index 4b6b83d..3c9c0b8 100644 --- a/vllm_ascend/patch/worker/__init__.py +++ b/vllm_ascend/patch/worker/__init__.py @@ -18,8 +18,8 @@ from vllm_ascend.utils import vllm_version_is # Import specific patches for different versions -if vllm_version_is("0.8.4"): - from vllm_ascend.patch.worker import patch_0_8_4 # noqa: F401 +if vllm_version_is("0.8.5"): + from vllm_ascend.patch.worker import patch_0_8_5 # noqa: F401 from vllm_ascend.patch.worker import patch_common # noqa: F401 else: from vllm_ascend.patch.worker import patch_common # noqa: F401 diff --git a/vllm_ascend/patch/worker/patch_0_8_4/patch_metrics.py b/vllm_ascend/patch/worker/patch_0_8_4/patch_metrics.py deleted file mode 100644 index b3c98fc..0000000 --- a/vllm_ascend/patch/worker/patch_0_8_4/patch_metrics.py +++ /dev/null @@ -1,59 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# This file is a part of the vllm-ascend project. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from typing import Callable, Optional, Union - -import torch -from vllm.spec_decode.metrics import (AsyncMetricsCollector, - SpecDecodeWorkerMetrics) - -Timer = Callable[[], float] - -# TODO: revert this patch when the cuda hard code is removed in vllm -# init_tensors: Modified the hard-coded cuda judgment logic to npu; -# maybe_collect_rejsample_metrics: Removed the check for current_platform.is_cuda_alike() - - -def init_tensors(self, - rank: int, - device_type: Union[torch.device, str] = 'npu') -> None: - self._rank = rank - if isinstance(device_type, torch.device): - device_type = device_type.type - if device_type == 'npu': - self._copy_stream = torch.npu.Stream() - - -def maybe_collect_rejsample_metrics( - self, k: int) -> Optional[SpecDecodeWorkerMetrics]: - - # If a copy was initiated in the previous call, collect and return. - if self._in_flight_copy is not None: - ready_event = self._in_flight_copy - self._in_flight_copy = None - return self._collect_rejsample_metrics(k, ready_event) - - # Otherwise, check if we should start a new copy. - if self._should_collect_rejsample_metrics(self._timer()): - assert self._in_flight_copy is None - self._in_flight_copy = self._copy_rejsample_metrics_async() - - return None - - -AsyncMetricsCollector.init_tensors = init_tensors -AsyncMetricsCollector.maybe_collect_rejsample_metrics = maybe_collect_rejsample_metrics diff --git a/vllm_ascend/patch/worker/patch_0_8_4/patch_spec_decode_worker.py b/vllm_ascend/patch/worker/patch_0_8_4/patch_spec_decode_worker.py deleted file mode 100644 index 710894e..0000000 --- a/vllm_ascend/patch/worker/patch_0_8_4/patch_spec_decode_worker.py +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# This file is a part of the vllm-ascend project. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from vllm.spec_decode.spec_decode_worker import SpecDecodeWorker - - -def _configure_model_sampler_for_spec_decode(self): - (self.scorer_worker.model_runner.model.sampler.include_gpu_probs_tensor - ) = True - (self.scorer_worker.model_runner.model.sampler. - should_modify_greedy_probs_inplace) = True - self.proposer_worker.set_include_gpu_probs_tensor() - self.proposer_worker.set_should_modify_greedy_probs_inplace() - - -SpecDecodeWorker._configure_model_sampler_for_spec_decode = _configure_model_sampler_for_spec_decode diff --git a/vllm_ascend/patch/worker/patch_0_8_4/patch_tritonplaceholder.py b/vllm_ascend/patch/worker/patch_0_8_4/patch_tritonplaceholder.py deleted file mode 100644 index 6271753..0000000 --- a/vllm_ascend/patch/worker/patch_0_8_4/patch_tritonplaceholder.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# Copyright 2023 The vLLM team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# This file is a part of the vllm-ascend project. -# Adapted from vllm/triton_utils/importing.py -# - -import importlib -import sys -import types -from importlib.util import find_spec - -from vllm.logger import logger - -HAS_TRITON = ( - find_spec("triton") is not None - or find_spec("pytorch-triton-xpu") is not None # Not compatible -) - -if not HAS_TRITON: - logger.info("Triton not installed or not compatible; certain GPU-related" - " functions will not be available.") - - class TritonPlaceholder(types.ModuleType): - - def __init__(self): - super().__init__("triton") - self.jit = self._dummy_decorator("jit") - self.autotune = self._dummy_decorator("autotune") - self.heuristics = self._dummy_decorator("heuristics") - self.language = TritonLanguagePlaceholder() - self.__spec__ = importlib.machinery.ModuleSpec( - name="triton", loader=None, origin="placeholder") - logger.warning_once( - "Triton is not installed. Using dummy decorators. " - "Install it via `pip install triton` to enable kernel" - " compilation.") - - def _dummy_decorator(self, name): - - def decorator(func=None, **kwargs): - if func is None: - return lambda f: f - return func - - return decorator - - class TritonLanguagePlaceholder(types.ModuleType): - - def __init__(self): - super().__init__("triton.language") - self.constexpr = None - self.dtype = None - - sys.modules['triton'] = TritonPlaceholder() - sys.modules['triton.language'] = TritonLanguagePlaceholder() - -if 'triton' in sys.modules: - logger.info("Triton module has been replaced with a placeholder.") diff --git a/vllm_ascend/patch/worker/patch_0_8_4/__init__.py b/vllm_ascend/patch/worker/patch_0_8_5/__init__.py similarity index 82% rename from vllm_ascend/patch/worker/patch_0_8_4/__init__.py rename to vllm_ascend/patch/worker/patch_0_8_5/__init__.py index c822006..116c73c 100644 --- a/vllm_ascend/patch/worker/patch_0_8_4/__init__.py +++ b/vllm_ascend/patch/worker/patch_0_8_5/__init__.py @@ -14,6 +14,3 @@ # See the License for the specific language governing permissions and # limitations under the License. # - -import vllm_ascend.patch.worker.patch_0_8_4.patch_metrics # noqa -import vllm_ascend.patch.worker.patch_0_8_4.patch_tritonplaceholder # noqa diff --git a/vllm_ascend/patch/worker/patch_common/patch_multi_step_worker.py b/vllm_ascend/patch/worker/patch_common/patch_multi_step_worker.py index 2ae6cab..ca87729 100644 --- a/vllm_ascend/patch/worker/patch_common/patch_multi_step_worker.py +++ b/vllm_ascend/patch/worker/patch_common/patch_multi_step_worker.py @@ -22,7 +22,6 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.spec_decode.multi_step_worker import MultiStepWorker -from vllm_ascend.utils import vllm_version_is from vllm_ascend.worker.draft_model_runner import TP1DraftModelRunner @@ -93,16 +92,14 @@ def set_include_gpu_probs_tensor(self) -> None: # Need include_gpu_probs_tensor for MultiSteoWorker if hasattr(self.model_runner.model, "sampler"): self.model_runner.model.sampler.include_gpu_probs_tensor = True - if not vllm_version_is("0.8.4"): - self.model_runner.sampler.include_gpu_probs_tensor = True + self.model_runner.sampler.include_gpu_probs_tensor = True def set_should_modify_greedy_probs_inplace(self) -> None: if hasattr(self.model_runner.model, "sampler"): self.model_runner.model.sampler.should_modify_greedy_probs_inplace = ( True) - if not vllm_version_is("0.8.4"): - self.model_runner.sampler.should_modify_greedy_probs_inplace = True + self.model_runner.sampler.should_modify_greedy_probs_inplace = True MultiStepWorker.sampler_output = torch.inference_mode()(sampler_output) diff --git a/vllm_ascend/worker/draft_model_runner.py b/vllm_ascend/worker/draft_model_runner.py index 7122569..1306b1e 100644 --- a/vllm_ascend/worker/draft_model_runner.py +++ b/vllm_ascend/worker/draft_model_runner.py @@ -28,7 +28,6 @@ from vllm.worker.model_runner_base import (ModelRunnerBase, ModelRunnerWrapperBase) from vllm_ascend.attention.attention import AscendMetadata -from vllm_ascend.utils import vllm_version_is # A flag to enable debug prints for the updated input tensors # before each step. @@ -287,17 +286,11 @@ class TP1DraftModelRunner(ModelRunnerWrapperBase): if not self.is_driver_worker: return [] # Sample the next token. - if vllm_version_is("0.8.4"): - output = self.model.sample( - logits=logits, - sampling_metadata=model_input.sampling_metadata, - ) - else: - assert self.model_runner.sampler is not None - output = self.model_runner.sampler( - logits=logits, - sampling_metadata=model_input.sampling_metadata, - ) + assert self.model_runner.sampler is not None + output = self.model_runner.sampler( + logits=logits, + sampling_metadata=model_input.sampling_metadata, + ) outputs.append(output) if model_input.attn_metadata.num_prefills == 0 \ diff --git a/vllm_ascend/worker/model_runner.py b/vllm_ascend/worker/model_runner.py index e08fd08..b208ed1 100644 --- a/vllm_ascend/worker/model_runner.py +++ b/vllm_ascend/worker/model_runner.py @@ -34,6 +34,7 @@ from vllm.attention.backends.utils import CommonAttentionState from vllm.config import VllmConfig from vllm.core.scheduler import SchedulerOutputs from vllm.distributed import get_pp_group +from vllm.distributed.kv_transfer import get_kv_transfer_group from vllm.forward_context import set_forward_context from vllm.inputs import INPUT_REGISTRY, InputRegistry from vllm.logger import logger @@ -42,7 +43,7 @@ from vllm.lora.request import LoRARequest from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager from vllm.model_executor import SamplingMetadata, SamplingMetadataCache from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding -from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.model_loader import get_model from vllm.model_executor.model_loader.tensorizer import TensorizerConfig from vllm.model_executor.models import supports_lora, supports_multimodal @@ -63,13 +64,6 @@ from vllm.worker.model_runner_base import ( _init_attn_metadata_from_tensor_dict, _init_sampling_metadata_from_tensor_dict) -from vllm_ascend.utils import vllm_version_is - -if vllm_version_is("0.8.4"): - from vllm.distributed import get_kv_transfer_group -else: - from vllm.distributed.kv_transfer import get_kv_transfer_group - if TYPE_CHECKING: from vllm.attention.backends.abstract import AttentionBackend @@ -935,12 +929,7 @@ class NPUModelRunnerBase(ModelRunnerBase[TModelInputForNPU]): self.sampling_metadata_cache: SamplingMetadataCache = \ SamplingMetadataCache() \ if self.parallel_config.pipeline_parallel_size == 1 else None - - if vllm_version_is("0.8.4"): - self.sampler = None - else: - from vllm.model_executor.layers.sampler import get_sampler - self.sampler = get_sampler() + self.sampler = get_sampler() def get_model(self) -> nn.Module: return self.model @@ -1409,17 +1398,10 @@ class NPUModelRunner(NPUModelRunnerBase[ModelInputForNPUWithSamplingMetadata]): model_input.async_callback() # Sample the next token. - if vllm_version_is("0.8.4"): - output = self.model.sample( - logits=logits, - sampling_metadata=model_input.sampling_metadata, - ) - else: - assert self.sampler is not None - output = self.sampler( - logits=logits, - sampling_metadata=model_input.sampling_metadata, - ) + output = self.sampler( + logits=logits, + sampling_metadata=model_input.sampling_metadata, + ) if (self.observability_config is not None and self.observability_config.collect_model_forward_time and output is not None): diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 5e5c003..bd508a4 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -47,13 +47,13 @@ from vllm.v1.core.encoder_cache_manager import compute_encoder_budget from vllm.v1.kv_cache_interface import (FullAttentionSpec, KVCacheConfig, KVCacheSpec) from vllm.v1.outputs import EMPTY_MODEL_RUNNER_OUTPUT, ModelRunnerOutput +from vllm.v1.sample.sampler import Sampler from vllm.v1.utils import bind_kv_cache from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch from vllm_ascend.attention.attention import AttentionMaskBuilder from vllm_ascend.attention.attention_v1 import AscendAttentionState from vllm_ascend.platform import NPUPlatform -from vllm_ascend.utils import vllm_version_is if TYPE_CHECKING: import xgrammar as xgr # type: ignore[import-untyped] @@ -291,11 +291,7 @@ class NPUModelRunner: self.attn_mask_builder = AttentionMaskBuilder.initialize_from_len( self.attn_mask_len, self.dtype) - if vllm_version_is("0.8.4"): - self.sampler = None - else: - from vllm.v1.sample.sampler import Sampler - self.sampler = Sampler() + self.sampler = Sampler() def _update_states(self, scheduler_output: "SchedulerOutput") -> None: """Update the cached states and the persistent batch with the scheduler @@ -346,34 +342,19 @@ class NPUModelRunner: generator.manual_seed(sampling_params.seed) else: generator = None - if vllm_version_is("0.8.4"): - self.requests[req_id] = CachedRequestState( - req_id=req_id, - prompt_token_ids=new_req_data.prompt_token_ids, - prompt=new_req_data.prompt, - mm_inputs=new_req_data.mm_inputs, - mm_positions=new_req_data.mm_positions, - sampling_params=sampling_params, - generator=generator, - block_ids=new_req_data.block_ids, - num_computed_tokens=new_req_data.num_computed_tokens, - output_token_ids=[], - lora_request=new_req_data.lora_request, - ) - else: - # the prompt removed by: https://github.com/vllm-project/vllm/pull/17214 - self.requests[req_id] = CachedRequestState( - req_id=req_id, - prompt_token_ids=new_req_data.prompt_token_ids, - mm_inputs=new_req_data.mm_inputs, - mm_positions=new_req_data.mm_positions, - sampling_params=sampling_params, - generator=generator, - block_ids=new_req_data.block_ids, - num_computed_tokens=new_req_data.num_computed_tokens, - output_token_ids=[], - lora_request=new_req_data.lora_request, - ) + + self.requests[req_id] = CachedRequestState( + req_id=req_id, + prompt_token_ids=new_req_data.prompt_token_ids, + mm_inputs=new_req_data.mm_inputs, + mm_positions=new_req_data.mm_positions, + sampling_params=sampling_params, + generator=generator, + block_ids=new_req_data.block_ids, + num_computed_tokens=new_req_data.num_computed_tokens, + output_token_ids=[], + lora_request=new_req_data.lora_request, + ) req_ids_to_add.append(req_id) @@ -666,17 +647,10 @@ class NPUModelRunner: # Sample the next token and get logprobs if needed. sampling_metadata = self.input_batch.sampling_metadata - if vllm_version_is("0.8.4"): - sampler_output = self.model.sample( - logits=logits, - sampling_metadata=sampling_metadata, - ) - else: - assert self.sampler is not None - sampler_output = self.sampler( - logits=logits, - sampling_metadata=sampling_metadata, - ) + sampler_output = self.sampler( + logits=logits, + sampling_metadata=sampling_metadata, + ) # TODO(woosuk): The following loop can be slow since it iterates over # the requests one by one. Optimize. diff --git a/vllm_ascend/worker/multi_step_runner.py b/vllm_ascend/worker/multi_step_runner.py index 7753604..ac2b685 100644 --- a/vllm_ascend/worker/multi_step_runner.py +++ b/vllm_ascend/worker/multi_step_runner.py @@ -23,7 +23,6 @@ from vllm.worker.multi_step_model_runner import (ModelOutput, PythonizationCache, StatefulModelInput) -from vllm_ascend.utils import vllm_version_is from vllm_ascend.worker.model_runner import ( ModelInputForNPUWithSamplingMetadata, NPUModelRunnerBase) @@ -318,13 +317,7 @@ class MultiStepModelNPURunner(NPUModelRunnerBase[StatefulModelInputForNPU]): dtype=torch.long, device="cpu", pin_memory=True) - - if vllm_version_is("0.8.4"): - self._base_model_runner.model.sampler.include_gpu_probs_tensor = ( - True) - else: - assert self._base_model_runner.sampler is not None - self._base_model_runner.sampler.include_gpu_probs_tensor = True + self._base_model_runner.sampler.include_gpu_probs_tensor = True if frozen_model_input.sampling_metadata: frozen_model_input.sampling_metadata.skip_sampler_cpu_output = ( True) diff --git a/vllm_ascend/worker/worker.py b/vllm_ascend/worker/worker.py index 6cfa9ca..63912d6 100644 --- a/vllm_ascend/worker/worker.py +++ b/vllm_ascend/worker/worker.py @@ -28,6 +28,7 @@ from vllm.config import VllmConfig, set_current_vllm_config from vllm.distributed import (ensure_model_parallel_initialized, init_distributed_environment, set_custom_all_reduce) +from vllm.distributed.kv_transfer import ensure_kv_transfer_initialized from vllm.logger import logger from vllm.lora.request import LoRARequest from vllm.model_executor import set_random_seed @@ -46,15 +47,10 @@ from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, WorkerBase, from vllm_ascend.device_allocator.camem import CaMemAllocator from vllm_ascend.distributed.parallel_state import init_ascend_model_parallel from vllm_ascend.platform import NPUPlatform -from vllm_ascend.utils import try_register_lib, vllm_version_is +from vllm_ascend.utils import try_register_lib from vllm_ascend.worker.model_runner import NPUModelRunner from vllm_ascend.worker.pooling_model_runner import NPUPoolingModelRunner -if vllm_version_is("0.8.4"): - from vllm.distributed import ensure_kv_transfer_initialized -else: - from vllm.distributed.kv_transfer import ensure_kv_transfer_initialized - class NPUWorker(LocalOrDistributedWorkerBase): """A worker class that executes (a partition of) the model on a NPU. diff --git a/vllm_ascend/worker/worker_v1.py b/vllm_ascend/worker/worker_v1.py index 3dacd13..b31c8f1 100644 --- a/vllm_ascend/worker/worker_v1.py +++ b/vllm_ascend/worker/worker_v1.py @@ -29,6 +29,7 @@ from vllm.config import VllmConfig from vllm.distributed import (ensure_model_parallel_initialized, init_distributed_environment, set_custom_all_reduce) +from vllm.distributed.kv_transfer import ensure_kv_transfer_initialized from vllm.logger import logger from vllm.model_executor import set_random_seed from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE @@ -41,14 +42,9 @@ from vllm.v1.worker.worker_base import WorkerBase from vllm_ascend.distributed.parallel_state import init_ascend_model_parallel from vllm_ascend.platform import NPUPlatform -from vllm_ascend.utils import try_register_lib, vllm_version_is +from vllm_ascend.utils import try_register_lib from vllm_ascend.worker.model_runner_v1 import NPUModelRunner -if vllm_version_is("0.8.4"): - from vllm.distributed import ensure_kv_transfer_initialized -else: - from vllm.distributed.kv_transfer import ensure_kv_transfer_initialized - class NPUWorker(WorkerBase):