Files
xc-llm-ascend/vllm_ascend/worker/v2/input_batch.py
Ronald c980e68d40 [Feature] support aclgraph for model runner v2 (#7110)
### What this PR does / why we need it?
This PR aims to support aclgraph for model runner v2, please see RFC
#5208. The PR contains these modifications:
- adapt to newest commit of vllm main branch.
- supply a unified interface of extra forward context for both model
runner v1 and model runner v2.
- implement graph mode for main model. 

### Does this PR introduce _any_ user-facing change?
no

### How was this patch tested?

- vLLM version: v0.16.0
- vLLM main:
4034c3d32e

---------

Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
2026-03-13 09:11:46 +08:00

104 lines
3.6 KiB
Python

# Adapt from https://github.com/vllm-project/vllm/blob/main/vllm/v1/worker/gpu/input_batch.py
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from dataclasses import asdict, dataclass
import numpy as np
import torch
from vllm.v1.worker.gpu.input_batch import InputBatch, InputBuffers
from vllm_ascend.attention.attention_v1 import AscendAttentionState
class AscendInputBuffers(InputBuffers):
"""Input buffers for Ascend NPUs."""
def __init__(
self,
max_num_reqs: int,
max_num_tokens: int,
device: torch.device,
):
super().__init__(
max_num_reqs,
max_num_tokens,
device,
)
del self.query_start_loc
# NOTE: For FULL mode we change +1 to +2 to reserve extra space for padding.
# See _pad_query_start_loc_for_fia.
self.query_start_loc: torch.Tensor = torch.zeros(
max_num_reqs + 2,
dtype=torch.int32,
device=device,
)
# Create seq_lens_cpu and seq_lens_np.
# npu's attention backend still needs seq_lens on CPU side.
self.seq_lens_cpu: torch.Tensor = torch.zeros(
max_num_reqs,
dtype=torch.int32,
device="cpu",
)
# seq_len_np and seq_lens_cpu share the same memory.
# define seq_lens_np for easier calculation with numpy.
self.seq_lens_np: np.ndarray = self.seq_lens_cpu.numpy()
@dataclass
class AscendInputBatch(InputBatch):
"""Input batch for Ascend NPUs."""
# Create seq_lens_np.
# npu's attention backend still needs seq_lens on CPU side.
seq_lens_np: np.ndarray
# attn_state is used to build attention metadata.
attn_state: AscendAttentionState | None = None
@classmethod
def make_dummy(
cls,
num_reqs: int,
num_tokens: int,
input_buffers: AscendInputBuffers,
device: torch.device,
) -> "AscendInputBatch":
"""Override the make_dummy method to calculate seq_lens_np."""
input_batch = InputBatch.make_dummy(
num_reqs,
num_tokens,
input_buffers,
device,
)
# seq_len equals to query_len
input_buffers.seq_lens_np[:num_reqs] = num_tokens // num_reqs
input_buffers.seq_lens_np[num_reqs - 1] += num_tokens % num_reqs
# Pad for full CUDA graph mode.
input_buffers.seq_lens_np[num_reqs:] = 0
seq_lens_np = input_buffers.seq_lens_np[:num_reqs]
input_batch.seq_lens_np = seq_lens_np
# A dummy run for dp or memory profiling.
# When dummy run for dp, num_tokens is set to 1,
# so attn_state is set to DecodeOnly.
# when dummy run for memory profiling,
# attention metadata isn't needed,
# we can also set attn_state to AscendAttentionState.DecodeOnly.
input_batch.attn_state = AscendAttentionState.DecodeOnly
return cls(**asdict(input_batch), seq_lens_np=seq_lens_np)