### What this PR does / why we need it?
Add two custom kernels(bgmv_shrink and bgmv expand) to solve the
performance of LoRA
### Does this PR introduce _any_ user-facing change?
no user-facing change
### How was this patch tested?
we add Unit Test file to test the custom ascendc kernel. See
vllm-ascend/tests/e2e/singlecard/ops/test_bgmv_expand.py and
vllm-ascend/tests/e2e/singlecard/ops/test_bgmv_expand.py
Based on the actual test of the QWen2.5 7B model using vllm-ascend
version v0.9.2.rc1, the TTFT, TPOT and throughput have increased by
about 70%.
- vLLM version: v0.9.2
- vLLM main:
40d86ee412
---------
Signed-off-by: taoxudonghaha <justsheldon@163.com>
113 lines
3.9 KiB
Python
113 lines
3.9 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import torch
|
|
|
|
|
|
def bgmv_shrink(inputs: torch.Tensor,
|
|
lora_a_weights: torch.Tensor,
|
|
output_tensor: torch.Tensor,
|
|
lora_indices_tensor: torch.Tensor,
|
|
scaling: float = 1.0):
|
|
return torch.ops._C.bgmv_shrink(
|
|
inputs,
|
|
lora_a_weights,
|
|
lora_indices_tensor,
|
|
output_tensor,
|
|
scaling,
|
|
)
|
|
|
|
|
|
def bgmv_expand(inputs: torch.Tensor,
|
|
lora_b_weights: torch.Tensor,
|
|
output_tensor: torch.Tensor,
|
|
lora_indices_tensor: torch.Tensor,
|
|
add_inputs: bool = True):
|
|
return torch.ops._C.bgmv_expand(
|
|
inputs,
|
|
lora_b_weights,
|
|
lora_indices_tensor,
|
|
output_tensor,
|
|
0,
|
|
output_tensor.size(1),
|
|
)
|
|
|
|
|
|
def bgmv_expand_slice(inputs: torch.Tensor,
|
|
lora_b_weights: torch.Tensor,
|
|
output_tensor: torch.Tensor,
|
|
lora_indices_tensor: torch.Tensor,
|
|
slice_offset: int,
|
|
slice_size: int,
|
|
add_inputs: bool = True):
|
|
return torch.ops._C.bgmv_expand(inputs, lora_b_weights,
|
|
lora_indices_tensor, output_tensor,
|
|
slice_offset, slice_size)
|
|
|
|
|
|
def sgmv_shrink(
|
|
inputs: torch.Tensor,
|
|
lora_a_weights: torch.Tensor,
|
|
output_tensor: torch.Tensor,
|
|
b_seq_start_loc: torch.Tensor,
|
|
seq_len_tensor: torch.Tensor,
|
|
lora_indices_tensor: torch.Tensor,
|
|
batches: int,
|
|
max_seq_length: int,
|
|
token_nums: int,
|
|
scaling: float,
|
|
):
|
|
exploded_indices = torch.repeat_interleave(lora_indices_tensor,
|
|
seq_len_tensor)
|
|
|
|
bgmv_shrink(inputs, lora_a_weights, output_tensor, exploded_indices,
|
|
scaling)
|
|
|
|
|
|
def sgmv_expand(inputs: torch.Tensor,
|
|
lora_b_weights: torch.Tensor,
|
|
output_tensor: torch.Tensor,
|
|
b_seq_start_loc: torch.Tensor,
|
|
seq_len_tensor: torch.Tensor,
|
|
lora_indices_tensor: torch.Tensor,
|
|
batches: int,
|
|
max_seq_length: int,
|
|
token_nums: int,
|
|
add_inputs: bool = False):
|
|
exploded_indices = torch.repeat_interleave(lora_indices_tensor,
|
|
seq_len_tensor)
|
|
|
|
bgmv_expand(inputs, lora_b_weights, output_tensor, exploded_indices,
|
|
add_inputs)
|
|
|
|
|
|
def sgmv_expand_slice(inputs: torch.Tensor,
|
|
lora_b_weights: torch.Tensor,
|
|
output_tensor: torch.Tensor,
|
|
b_seq_start_loc: torch.Tensor,
|
|
seq_len_tensor: torch.Tensor,
|
|
lora_indices_tensor: torch.Tensor,
|
|
batches: int,
|
|
max_seq_length: int,
|
|
token_nums: int,
|
|
slice_offset: int,
|
|
slice_size: int,
|
|
add_inputs: bool = False):
|
|
exploded_indices = torch.repeat_interleave(lora_indices_tensor,
|
|
seq_len_tensor)
|
|
|
|
bgmv_expand_slice(inputs, lora_b_weights, output_tensor, exploded_indices,
|
|
slice_offset, slice_size, add_inputs)
|