[Performance]: Custom AscendC Kernel of Multi-Step Prepare Input (#814)

### What this PR does / why we need it?

- According to https://github.com/vllm-project/vllm-ascend/issues/807,
we pull request for customer ascendc kernel of multi-step.
- also a bug we found in multi_step_runner.py is fixed when we use
multi-step on V0 Engine.


### Does this PR introduce _any_ user-facing change?

no user-facing change


### How was this patch tested?
we add Unit Test file and offline inference file to test the custom
ascendc kernel. See test/ops/test_multi_step.py and
examples/offline_multi_step.py

---------

Signed-off-by: wan_danfeng <wonderful199082@126.com>
This commit is contained in:
Wan_Danfeng
2025-05-20 09:31:30 +08:00
committed by GitHub
parent 00e0243561
commit 5cf9ff18e9
11 changed files with 629 additions and 35 deletions

View File

@@ -98,6 +98,87 @@ std::tuple<at::Tensor, at::Tensor> rotary_embedding(at::Tensor &positions, at::T
cmd.Run();
return {query_dst, key_dst};
}
void verify_tensor(std::string const& name, at::Tensor const& t,
int64_t const size_0, int64_t const size_1,
c10::ScalarType const type) {
bool size_0_cond = true;
if (size_0 != -1) {
size_0_cond = t.size(0) == size_0;
}
bool size_1_cond = true;
if (size_1 != -1) {
size_1_cond = t.size(1) == size_1;
}
bool is_contiguous = t.is_contiguous();
bool same_type = t.dtype() == type;
bool pass = size_0_cond && size_1_cond && is_contiguous && same_type;
if (!pass) {
TORCH_CHECK(false, "tensor: name = ", name, ", shape = ", t.sizes(),
" is_cont = ", t.is_contiguous(), ", type = ", t.dtype(),
" is not as expected: shape = [", size_0, ", ", size_1,
"], type = ", type);
}
}
void advance_step_flashattn_ascendc(
int64_t num_seqs, int64_t num_queries, int64_t block_size,
at::Tensor& input_tokens,
at::Tensor& sampled_token_ids,
at::Tensor& input_positions,
at::Tensor& seq_lens,
at::Tensor& slot_mapping,
at::Tensor& block_tables
){
// Verify all tensors
verify_tensor("input_tokens", input_tokens, num_seqs, -1, at::kLong);
verify_tensor("sampled_token_ids", sampled_token_ids, num_queries, 1,at::kLong);
verify_tensor("input_positions", input_positions, num_seqs, -1, at::kLong);
verify_tensor("seq_lens", seq_lens, num_seqs, -1, at::kInt);
verify_tensor("slot_mapping", slot_mapping, num_seqs, -1, at::kInt);
verify_tensor("block_tables", block_tables, num_seqs, -1, at::kInt);
int64_t* input_tokens_ptr = input_tokens.data_ptr<int64_t>();
int64_t* sampled_token_ids_ptr = sampled_token_ids.data_ptr<int64_t>();
int64_t* input_positions_ptr = input_positions.data_ptr<int64_t>();
int32_t* seq_lens_ptr = seq_lens.data_ptr<int32_t>();
int32_t* slot_mapping_ptr = slot_mapping.data_ptr<int32_t>();
int32_t* block_tables_ptr = block_tables.data_ptr<int32_t>();
int32_t device_id;
aclrtGetDevice(&device_id);
auto npu_stream = c10_npu::getCurrentNPUStream(device_id);
aclrtStream stream = npu_stream.stream();
// aclrtStream stream = c10_npu::getCurrentNPUStream().stream();
at_npu::native::OpCommand cmd;
cmd.Name("advance_step_flashattn_ascendc");
cmd.SetCustomHandler([stream, num_seqs, num_queries,
block_size, input_tokens_ptr, sampled_token_ids_ptr,
input_positions_ptr, seq_lens_ptr, slot_mapping_ptr,
block_tables_ptr, block_tables]() -> int {
launch_advance_step_flashattn(stream,
num_seqs,
num_queries,
block_size,
input_tokens_ptr,
sampled_token_ids_ptr,
input_positions_ptr,
seq_lens_ptr,
slot_mapping_ptr,
block_tables_ptr,
block_tables.stride(0));
return 0;
});
cmd.Run();
return ;
}
} // namespace vllm_ascend
TORCH_LIBRARY_EXPAND(_C, ops)
@@ -113,6 +194,11 @@ TORCH_LIBRARY_EXPAND(_C, ops)
" Tensor! key, int head_size,"
" Tensor cos_sin_cache, bool is_neox) -> (Tensor query, Tensor key)");
ops.impl("rotary_embedding", torch::kPrivateUse1, &vllm_ascend::rotary_embedding);
ops.def(
"advance_step_flashattn_ascendc(int num_seqs, int num_queries, int block_size,"
" Tensor! input_tokens, Tensor! sampled_token_ids, Tensor! input_positions,"
" Tensor! seq_lens, Tensor! slot_mapping, Tensor! block_tables) -> ()");
ops.impl("advance_step_flashattn_ascendc", torch::kPrivateUse1, &vllm_ascend::advance_step_flashattn_ascendc);
}
REGISTER_EXTENSION(_C)