### What this PR does / why we need it?
1. ✅ Upgrade vllm commit to: 0115
(8471b27df97c3eb79f891802fc0e858f8f7ac6a0)
Modify import paths due to the refactors:
https://github.com/vllm-project/vllm/pull/32245
https://github.com/vllm-project/vllm/pull/32060
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21034239336/job/60490156965?pr=5913
2. ✅Upgrade vllm commit to: 0119
(9a1f16da1e423ede2c2f52a9850cbfbb39cefe96)
Fix `WorkerProc.__init__() missing 1 required positional argument:
'is_driver_worker'` due to
https://github.com/vllm-project/vllm/pull/28506
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21156263050/job/60841668755?5569
3. ✅Upgrade vllm commit to:
0120(148117ea2e689cd43df4be6892671a17cdae5833)
1. Add `skip_compiled` param in `set_forward_context` due to
https://github.com/vllm-project/vllm/pull/30385
2. Modify `tests/ut/spec_decode/test_eagle_proposer.py` due to
https://github.com/vllm-project/vllm/pull/24322
change `self.max_num_tokens =
vllm_config.scheduler_config.max_num_batched_tokens + max_batch_size`
3. Modify UT import paths due to the
refactors:https://github.com/vllm-project/vllm/pull/32060
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21204851770/job/60999046946
4. ✅Upgrade vllm commit to:
0121(f23fb5a7c1b61350c5c40ca1115d3bf8cf2b8cc9)
1. vLLM switched `uses_mrope` from target to draft model config, making
`positions`/`mrope_positions` mutually exclusive, breaking vllm-ascend's
direct self.positions access and tests missing
`draft_model_config.uses_mrope`.
https://github.com/vllm-project/vllm/pull/32048
2. Moved bs_to_padded_graph_size from CompilationConfig to
CudagraphDispatcher due to the refactor
https://github.com/vllm-project/vllm/pull/30143
3. Remove unused `maybe_setup_kv_connector` due to
https://github.com/vllm-project/vllm/pull/32077
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21217728738/job/61043738834
6. ✅Upgrade vllm commit to:
0122(8ebf271bb6d1e7e9b1a55be73d755ef1a57dbbe5)
Updating FusedMoEParallelConfig (added enable_eplb) and FusedMoEConfig
due to https://github.com/vllm-project/vllm/pull/32414
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21249922546/job/61148613054
8. ✅Upgrade vllm commit to:
0123(dc917cceb877dfd13f98c538c4c96158047d98bd)
Setting temperature=0.0 due to the removal of the default temperature
value in https://github.com/vllm-project/vllm/pull/32723
Test result:
https://github.com/vllm-project/vllm-ascend/actions/runs/21280796875
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.14.0
- vLLM main:
d68209402d
---------
Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com>
Co-authored-by: wjunLu <wjunlu217@gmail.com>
93 lines
3.2 KiB
Python
93 lines
3.2 KiB
Python
# Adapt from https://github.com/vllm-project/vllm/blob/main/vllm/v1/worker/gpu/aclgraph_utils.py
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
from contextlib import contextmanager
|
|
from typing import Any
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
from vllm.config import VllmConfig
|
|
from vllm.v1.kv_cache_interface import KVCacheConfig
|
|
from vllm.v1.worker.gpu.block_table import BlockTables
|
|
from vllm.v1.worker.gpu.cudagraph_utils import CudaGraphManager
|
|
from vllm.v1.worker.gpu.cudagraph_utils import \
|
|
prepare_inputs_to_capture as prepare_inputs_to_capture_gpu
|
|
from vllm.v1.worker.gpu.input_batch import InputBuffers
|
|
|
|
from vllm_ascend.worker.v2.utils import torch_cuda_wrapper
|
|
from vllm_ascend.utils import vllm_version_is
|
|
|
|
if vllm_version_is('0.14.1'):
|
|
from vllm.v1.attention.backends.utils import AttentionMetadataBuilder
|
|
else:
|
|
from vllm.v1.attention.backend import AttentionMetadataBuilder
|
|
|
|
|
|
class AclGraphManager(CudaGraphManager):
|
|
"""ACL Graph Manager for Ascend NPUs."""
|
|
|
|
def __init__(self, vllm_config: VllmConfig, device: torch.device):
|
|
with torch_cuda_wrapper():
|
|
super().__init__(vllm_config, device)
|
|
|
|
def capture_graph(
|
|
self,
|
|
num_tokens: int,
|
|
model: nn.Module,
|
|
input_buffers: InputBuffers,
|
|
block_tables: BlockTables,
|
|
attn_metadata_builders: list[AttentionMetadataBuilder],
|
|
kv_cache_config: KVCacheConfig,
|
|
) -> None:
|
|
with (torch_cuda_wrapper(), prepare_capture_inputs_wrapper()):
|
|
super().capture_graph(
|
|
num_tokens,
|
|
model,
|
|
input_buffers,
|
|
block_tables,
|
|
attn_metadata_builders,
|
|
kv_cache_config,
|
|
)
|
|
|
|
|
|
@contextmanager
|
|
def prepare_capture_inputs_wrapper():
|
|
"""Context manager to override input preparation for NPU graph capture."""
|
|
# TODO(Ronald1995): make prepare_inputs_to_capture as static method
|
|
# in CudaGraphManager.
|
|
global prepare_inputs_to_capture_gpu
|
|
try:
|
|
ori_func = prepare_inputs_to_capture_gpu
|
|
prepare_inputs_to_capture_gpu = prepare_inputs_to_capture
|
|
yield
|
|
finally:
|
|
prepare_inputs_to_capture_gpu = ori_func
|
|
|
|
|
|
def prepare_inputs_to_capture(
|
|
num_reqs: int,
|
|
num_tokens: int,
|
|
input_buffers: InputBuffers,
|
|
block_tables: BlockTables,
|
|
attn_metadata_builders: list[AttentionMetadataBuilder],
|
|
max_model_len: int,
|
|
kv_cache_config: KVCacheConfig,
|
|
) -> dict[str, Any]:
|
|
# TODO(Ronald1995): Implement NPU specific input preparation.
|
|
return {}
|