Refactor the ops PyTorch adapter,cleanup for csrc/torch_binding.cpp (#6732)
### What this PR does / why we need it?
Refactor the ops PyTorch adapter,cleanup for csrc/torch_binding.cpp,
more details see
https://github.com/vllm-project/vllm-ascend/issues/6486
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
install the new package to test the new modification, here is the
result:
- vLLM version: v0.15.0
- vLLM main:
9562912cea
---------
Signed-off-by: liziyu <liziyu16@huawei.com>
Signed-off-by: wangxiaoteng <wangxiaoteng@huawei.com>
Signed-off-by: luomin2005 <luomin2005@huawei.com>
Co-authored-by: liziyu <56102866+liziyu179@users.noreply.github.com>
Co-authored-by: wangxiaoteng <wangxiaoteng@huawei.com>
This commit is contained in:
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) Huawei Technologies Co., Ltd. 2026. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef SPARSE_FLASH_ATTENTION_TORCH_ADPT_H
|
||||
#define SPARSE_FLASH_ATTENTION_TORCH_ADPT_H
|
||||
namespace vllm_ascend {
|
||||
|
||||
at::Tensor npu_sparse_flash_attention(
|
||||
const at::Tensor &query, const at::Tensor &key, const at::Tensor &value,
|
||||
const at::Tensor &sparse_indices, double scale_value, int64_t sparse_block_size,
|
||||
const c10::optional<at::Tensor> &block_table,
|
||||
const c10::optional<at::Tensor> &actual_seq_lengths_query,
|
||||
const c10::optional<at::Tensor> &actual_seq_lengths_kv,
|
||||
const c10::optional<at::Tensor> &query_rope,
|
||||
const c10::optional<at::Tensor> &key_rope, c10::string_view layout_query,
|
||||
c10::string_view layout_kv,
|
||||
int64_t sparse_mode)
|
||||
{
|
||||
std::string layout_query_str = std::string(layout_query);
|
||||
std::string layout_kv_str = std::string(layout_kv);
|
||||
|
||||
for (size_t i = 0; i < query.sizes().size(); i++) {
|
||||
TORCH_CHECK(query.size(i) > 0, "All values within query's shape should be greater "
|
||||
"than 0, but shape[", i, "] is ", query.size(i));
|
||||
}
|
||||
// construct the output tensor
|
||||
at::Tensor output = at::empty(query.sizes(), query.options().dtype(query.dtype()));
|
||||
// convert str
|
||||
char *layout_query_ptr = const_cast<char *>(layout_query_str.c_str());
|
||||
char *layout_kv_ptr = const_cast<char *>(layout_kv_str.c_str());
|
||||
|
||||
EXEC_NPU_CMD(
|
||||
aclnnSparseFlashAttention,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
sparse_indices,
|
||||
block_table,
|
||||
actual_seq_lengths_query,
|
||||
actual_seq_lengths_kv,
|
||||
query_rope,
|
||||
key_rope,
|
||||
scale_value,
|
||||
sparse_block_size,
|
||||
layout_query_ptr,
|
||||
layout_kv_ptr,
|
||||
sparse_mode,
|
||||
output);
|
||||
return output;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
Reference in New Issue
Block a user