[Main2Main][Deps][Misc] Upgrade vLLM to v0.15.0 (#6470)
### What this PR does / why we need it?
This PR upgrades the vLLM dependency from `v0.14.1` to `v0.15.0`. This
involves:
- Updating the `VLLM_TAG` in all `Dockerfile`.
- Updating the vLLM version in `docs/source/conf.py`.
- Removing conditional code paths specific to `v0.14.1` across the
codebase, which simplifies maintenance.
- Fix `TypeError: MMEncoderAttention.__init__() got an unexpected
keyword argument 'multimodal_config'` due to
https://github.com/vllm-project/vllm/pull/31972.
- Fix `_shared_experts: 'NoneType' object is not callable` due to
https://github.com/vllm-project/vllm/pull/32082 by
https://github.com/vllm-project/vllm-ascend/pull/6335.
- Fix `ReshapeAndCacheOperation setup failed!` due to
https://github.com/vllm-project/vllm/pull/25954 by overriding attention
metadata slots.
This upgrade is necessary to keep the project aligned with the latest
features, bug fixes, and API changes in the vLLM project.
### Does this PR introduce _any_ user-facing change?
No, this is an internal dependency update and does not introduce any
user-facing changes.
### How was this patch tested?
CI is expected to pass with these changes, ensuring that all existing
tests are successful with the new vLLM version.
- vLLM version: v0.14.1
- vLLM main:
dc917cceb8
co-authored-by: shen-shanshan <467638484@qq.com>
---------
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -19,7 +19,6 @@ from vllm_ascend.utils import (
|
||||
is_drafter_moe_model,
|
||||
is_moe_model,
|
||||
speculative_enable_dispatch_gmm_combine_decode,
|
||||
vllm_version_is,
|
||||
)
|
||||
|
||||
|
||||
@@ -57,11 +56,9 @@ def set_ascend_forward_context(
|
||||
"num_tokens_across_dp": num_tokens_across_dp,
|
||||
"cudagraph_runtime_mode": aclgraph_runtime_mode,
|
||||
"batch_descriptor": batch_descriptor,
|
||||
"skip_compiled": skip_compiled,
|
||||
}
|
||||
|
||||
if not vllm_version_is("0.14.1"):
|
||||
forward_context_kwargs["skip_compiled"] = skip_compiled
|
||||
|
||||
with set_forward_context(**forward_context_kwargs):
|
||||
forward_context = get_forward_context()
|
||||
|
||||
|
||||
@@ -278,6 +278,8 @@ class AscendAttentionMetadataBuilder(AttentionMetadataBuilder[AscendMetadata]):
|
||||
seq_lens = common_attn_metadata.seq_lens_cpu[:num_reqs]
|
||||
|
||||
slot_mapping = common_attn_metadata.slot_mapping[:num_actual_tokens]
|
||||
# this slot_mapping override doesn't work since vllm will override it again. We should fix it vllm.
|
||||
# see: https://github.com/vllm-project/vllm/blob/ce88756b967c2c5006746a424c15dd59a284ed8c/vllm/model_executor/layers/attention/cross_attention.py#L117
|
||||
if isinstance(self.kv_cache_spec, CrossAttentionSpec):
|
||||
seq_lens = common_attn_metadata.seq_lens
|
||||
slot_mapping = common_attn_metadata.slot_mapping.to(torch.int32)
|
||||
@@ -873,7 +875,9 @@ class AscendAttentionBackendImpl(AttentionImpl):
|
||||
value=value[: attn_metadata.num_actual_tokens] if not encoder_decoder else value,
|
||||
key_cache=self.key_cache,
|
||||
value_cache=self.value_cache,
|
||||
slot_mapping=slots[: attn_metadata.num_actual_tokens] if not encoder_decoder else slots,
|
||||
# quick fix to make sure slots is int32 for cross attention case.
|
||||
# see: https://github.com/vllm-project/vllm/blob/ce88756b967c2c5006746a424c15dd59a284ed8c/vllm/model_executor/layers/attention/cross_attention.py#L117
|
||||
slot_mapping=slots[: attn_metadata.num_actual_tokens] if not encoder_decoder else slots.to(torch.int32),
|
||||
)
|
||||
if self.is_kv_producer:
|
||||
attn_metadata.reshape_cache_event.record()
|
||||
|
||||
@@ -8,6 +8,7 @@ import vllm.envs as envs_vllm
|
||||
from vllm.config import VllmConfig, get_current_vllm_config
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
from vllm.utils.math_utils import cdiv, round_down
|
||||
from vllm.v1.attention.backend import AttentionBackend, AttentionCGSupport, MLAAttentionImpl # type: ignore
|
||||
@@ -44,18 +45,12 @@ from vllm_ascend.ops.layer_shard_linear import (
|
||||
from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
|
||||
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
|
||||
from vllm_ascend.quantization.methods import AscendW8A8LinearMethod
|
||||
from vllm_ascend.utils import ACL_FORMAT_FRACTAL_ND, maybe_trans_nz, vllm_version_is, weak_ref_tensors
|
||||
from vllm_ascend.utils import ACL_FORMAT_FRACTAL_ND, maybe_trans_nz, weak_ref_tensors
|
||||
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
|
||||
# isort: off
|
||||
if vllm_version_is("0.14.1"):
|
||||
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder
|
||||
# isort: on
|
||||
|
||||
MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024
|
||||
BUILD_METADATA_STEP_PREFILL = 0
|
||||
|
||||
@@ -9,6 +9,7 @@ from vllm.config import VllmConfig, get_current_vllm_config
|
||||
from vllm.distributed import get_tensor_model_parallel_world_size, get_tp_group
|
||||
from vllm.forward_context import get_forward_context
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
from vllm.v1.attention.backend import AttentionBackend, AttentionCGSupport, MLAAttentionImpl # type: ignore
|
||||
@@ -45,17 +46,11 @@ from vllm_ascend.utils import (
|
||||
enable_dsa_cp,
|
||||
enable_dsa_cp_with_layer_shard,
|
||||
maybe_trans_nz,
|
||||
vllm_version_is,
|
||||
)
|
||||
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
if vllm_version_is("0.14.1"):
|
||||
from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder
|
||||
# isort: on
|
||||
|
||||
# token count limits within bmm_transpose operator
|
||||
BMM_TRANS_MAX_SUPPORTED_TOKENS = 1024
|
||||
|
||||
@@ -512,6 +512,14 @@ class AscendSharedFusedMoE(SharedFusedMoE, AscendFusedMoE):
|
||||
hidden_states: torch.Tensor,
|
||||
router_logits: torch.Tensor,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
if self._shared_experts is None:
|
||||
fused_out = AscendFusedMoE.forward(
|
||||
self,
|
||||
hidden_states=hidden_states,
|
||||
router_logits=router_logits,
|
||||
)
|
||||
shared_out = None
|
||||
return shared_out, fused_out
|
||||
shared_out, fused_out = AscendFusedMoE.forward(
|
||||
self,
|
||||
hidden_states=hidden_states,
|
||||
@@ -571,6 +579,9 @@ class AscendSharedFusedMoE(SharedFusedMoE, AscendFusedMoE):
|
||||
)
|
||||
routed_out = fused_moe_results.routed_out
|
||||
|
||||
if self._shared_experts is None:
|
||||
return routed_out
|
||||
|
||||
if self.multistream_overlap_gate:
|
||||
fc3_context = get_flash_common3_context()
|
||||
assert fc3_context is not None
|
||||
|
||||
@@ -38,7 +38,6 @@ class AscendMMEncoderAttention(MMEncoderAttention):
|
||||
scale: float | None = None,
|
||||
num_kv_heads: int | None = None,
|
||||
prefix: str = "",
|
||||
multimodal_config: MultiModalConfig | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
@@ -56,7 +55,6 @@ class AscendMMEncoderAttention(MMEncoderAttention):
|
||||
scale=scale,
|
||||
num_kv_heads=num_kv_heads,
|
||||
prefix=prefix,
|
||||
multimodal_config=multimodal_config,
|
||||
)
|
||||
|
||||
def reshape_qkv_to_3d(
|
||||
|
||||
@@ -25,5 +25,5 @@ from vllm_ascend.utils import vllm_version_is
|
||||
if os.getenv("DYNAMIC_EPLB", "false").lower() in ("true", "1") or os.getenv("EXPERT_MAP_RECORD", "false") == "true":
|
||||
import vllm_ascend.patch.platform.patch_multiproc_executor # noqa
|
||||
|
||||
if envs.VLLM_ASCEND_BALANCE_SCHEDULING and vllm_version_is("0.14.0"):
|
||||
if envs.VLLM_ASCEND_BALANCE_SCHEDULING and vllm_version_is("0.15.0"):
|
||||
import vllm_ascend.patch.platform.patch_balance_schedule # noqa
|
||||
|
||||
@@ -19,8 +19,6 @@ from vllm.v1.executor.multiproc_executor import (
|
||||
set_multiprocessing_worker_envs,
|
||||
)
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
|
||||
class AscendMultiprocExecutor(MultiprocExecutor):
|
||||
def _init_executor(self) -> None:
|
||||
@@ -177,9 +175,8 @@ class AscendWorkerProc(WorkerProc):
|
||||
"ready_pipe": (reader, writer),
|
||||
"death_pipe": death_reader,
|
||||
"shared_worker_lock": shared_worker_lock,
|
||||
"is_driver_worker": is_driver_worker,
|
||||
}
|
||||
if not vllm_version_is("0.14.1"):
|
||||
process_kwargs["is_driver_worker"] = is_driver_worker
|
||||
# Run EngineCore busy loop in background process.
|
||||
proc = context.Process(
|
||||
target=WorkerProc.worker_main,
|
||||
|
||||
@@ -41,7 +41,7 @@ from vllm_ascend.ops.rotary_embedding import update_cos_sin
|
||||
from vllm_ascend.ops.triton.spec_decode.utils import \
|
||||
prepare_inputs_padded_kernel
|
||||
from vllm_ascend.ops.triton.triton_utils import get_vectorcore_num
|
||||
from vllm_ascend.utils import enable_sp, shared_expert_dp_enabled, vllm_version_is
|
||||
from vllm_ascend.utils import enable_sp, shared_expert_dp_enabled
|
||||
|
||||
# Currently we will fix block size to a small one since `num_reqs` can't be too large
|
||||
_PREPARE_INPUTS_BLOCK_SIZE = 4
|
||||
@@ -456,11 +456,8 @@ class EagleProposer(VllmEagleProposer):
|
||||
self.input_ids[last_token_indices] = next_token_ids
|
||||
if self.use_cuda_graph and \
|
||||
num_tokens <= self.runner.cudagraph_batch_sizes[-1]:
|
||||
if vllm_version_is('0.14.1'):
|
||||
num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens)
|
||||
else:
|
||||
num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[
|
||||
num_tokens]
|
||||
num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[
|
||||
num_tokens]
|
||||
else:
|
||||
num_input_tokens = num_tokens
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
|
||||
from vllm_ascend.compilation.acl_graph import ACLGraphWrapper
|
||||
from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
|
||||
from vllm_ascend.spec_decode.eagle_proposer import EagleProposer
|
||||
from vllm_ascend.utils import lmhead_tp_enable, vllm_version_is
|
||||
from vllm_ascend.utils import lmhead_tp_enable
|
||||
|
||||
|
||||
class MtpProposer(EagleProposer):
|
||||
@@ -245,12 +245,8 @@ class MtpProposer(EagleProposer):
|
||||
# Note(qcs): We may need to refactor these check logics.
|
||||
if self.use_cuda_graph and num_scheduled_tokens <= self.runner.cudagraph_batch_sizes[
|
||||
-1]:
|
||||
if vllm_version_is('0.14.1'):
|
||||
num_input_tokens = self.vllm_config.pad_for_cudagraph(
|
||||
num_scheduled_tokens)
|
||||
else:
|
||||
num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[
|
||||
num_scheduled_tokens]
|
||||
num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[
|
||||
num_scheduled_tokens]
|
||||
else:
|
||||
# Eager mode, no padding needed
|
||||
num_input_tokens = num_tokens
|
||||
|
||||
@@ -28,14 +28,11 @@ from vllm.v1.worker.gpu.cudagraph_utils import CudaGraphManager
|
||||
from vllm.v1.worker.gpu.cudagraph_utils import \
|
||||
prepare_inputs_to_capture as prepare_inputs_to_capture_gpu
|
||||
from vllm.v1.worker.gpu.input_batch import InputBuffers
|
||||
from vllm.v1.attention.backend import AttentionMetadataBuilder
|
||||
|
||||
from vllm_ascend.worker.v2.utils import torch_cuda_wrapper
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is('0.14.1'):
|
||||
from vllm.v1.attention.backends.utils import AttentionMetadataBuilder
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionMetadataBuilder
|
||||
|
||||
|
||||
|
||||
class AclGraphManager(CudaGraphManager):
|
||||
|
||||
@@ -24,17 +24,13 @@ import numpy as np
|
||||
import torch
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.v1.kv_cache_interface import EncoderOnlyAttentionSpec, KVCacheConfig
|
||||
from vllm.v1.attention.backend import AttentionMetadataBuilder
|
||||
|
||||
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
|
||||
from vllm_ascend.attention.attention_v1 import AscendAttentionState
|
||||
from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
|
||||
AscendPrefillContextParallelMetadata)
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is('0.14.1'):
|
||||
from vllm.v1.attention.backends.utils import AttentionMetadataBuilder
|
||||
else:
|
||||
from vllm.v1.attention.backend import AttentionMetadataBuilder
|
||||
|
||||
_ATTENTION_MASK_BUILDER = None
|
||||
|
||||
|
||||
Reference in New Issue
Block a user