# Adapt from https://github.com/vllm-project/vllm/blob/main/vllm/v1/worker/gpu/block_table.py # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is a part of the vllm-ascend project. # import torch from vllm.v1.worker.gpu.block_table import BlockTables class AscendBlockTables(BlockTables): """Block table for Ascend NPUs.""" def __init__( self, block_sizes: list[int], max_num_reqs: int, max_num_batched_tokens: int, max_model_len: int, device: torch.device, cp_size: int = 1, cp_rank: int = 0, cp_interleave: int = 1, ): super().__init__( block_sizes, max_num_reqs, max_num_batched_tokens, max_model_len, device, cp_size, cp_rank, cp_interleave, ) # because we will override these attribute, delete these attribute to # make sure it's collected by python gc immediately. del self.slot_mappings # vllm-ascend' reshape_and_cache function requires slot_mappings to be int32. # so we need to redefine slot_mappings to be int32. self.slot_mappings: torch.Tensor = torch.zeros( self.num_kv_cache_groups, self.max_num_batched_tokens, dtype=torch.int32, device=self.device, )