[Misc][V0 Deprecation] Remove V0 Related Custom Ops (#1871)

### What this PR does / why we need it?
This PR is a part of
https://github.com/vllm-project/vllm-ascend/issues/1620.

- vLLM version: v0.9.2
- vLLM main:
ca4eb82bcb

---------

Signed-off-by: shen-shanshan <467638484@qq.com>
This commit is contained in:
Shanshan Shen
2025-07-18 23:06:03 +08:00
committed by GitHub
parent 3e39d7234c
commit 8a91e6e59c
5 changed files with 0 additions and 591 deletions

View File

@@ -204,87 +204,6 @@ std::tuple<at::Tensor, at::Tensor> get_masked_input_and_mask(
cmd.Run();
return {masked_input, mask};
}
void verify_tensor(std::string const& name, at::Tensor const& t,
int64_t const size_0, int64_t const size_1,
c10::ScalarType const type) {
bool size_0_cond = true;
if (size_0 != -1) {
size_0_cond = t.size(0) == size_0;
}
bool size_1_cond = true;
if (size_1 != -1) {
size_1_cond = t.size(1) == size_1;
}
bool is_contiguous = t.is_contiguous();
bool same_type = t.dtype() == type;
bool pass = size_0_cond && size_1_cond && is_contiguous && same_type;
if (!pass) {
TORCH_CHECK(false, "tensor: name = ", name, ", shape = ", t.sizes(),
" is_cont = ", t.is_contiguous(), ", type = ", t.dtype(),
" is not as expected: shape = [", size_0, ", ", size_1,
"], type = ", type);
}
}
void advance_step_flashattn_ascendc(
int64_t num_seqs, int64_t num_queries, int64_t block_size,
at::Tensor& input_tokens,
at::Tensor& sampled_token_ids,
at::Tensor& input_positions,
at::Tensor& seq_lens,
at::Tensor& slot_mapping,
at::Tensor& block_tables
){
// Verify all tensors
verify_tensor("input_tokens", input_tokens, num_seqs, -1, at::kLong);
verify_tensor("sampled_token_ids", sampled_token_ids, num_queries, 1,at::kLong);
verify_tensor("input_positions", input_positions, num_seqs, -1, at::kLong);
verify_tensor("seq_lens", seq_lens, num_seqs, -1, at::kInt);
verify_tensor("slot_mapping", slot_mapping, num_seqs, -1, at::kInt);
verify_tensor("block_tables", block_tables, num_seqs, -1, at::kInt);
int64_t* input_tokens_ptr = input_tokens.data_ptr<int64_t>();
int64_t* sampled_token_ids_ptr = sampled_token_ids.data_ptr<int64_t>();
int64_t* input_positions_ptr = input_positions.data_ptr<int64_t>();
int32_t* seq_lens_ptr = seq_lens.data_ptr<int32_t>();
int32_t* slot_mapping_ptr = slot_mapping.data_ptr<int32_t>();
int32_t* block_tables_ptr = block_tables.data_ptr<int32_t>();
int32_t device_id;
aclrtGetDevice(&device_id);
auto npu_stream = c10_npu::getCurrentNPUStream(device_id);
aclrtStream stream = npu_stream.stream();
// aclrtStream stream = c10_npu::getCurrentNPUStream().stream();
at_npu::native::OpCommand cmd;
cmd.Name("advance_step_flashattn_ascendc");
cmd.SetCustomHandler([stream, num_seqs, num_queries,
block_size, input_tokens_ptr, sampled_token_ids_ptr,
input_positions_ptr, seq_lens_ptr, slot_mapping_ptr,
block_tables_ptr, block_tables]() -> int {
launch_advance_step_flashattn(stream,
num_seqs,
num_queries,
block_size,
input_tokens_ptr,
sampled_token_ids_ptr,
input_positions_ptr,
seq_lens_ptr,
slot_mapping_ptr,
block_tables_ptr,
block_tables.stride(0));
return 0;
});
cmd.Run();
return ;
}
} // namespace vllm_ascend
TORCH_LIBRARY_EXPAND(_C, ops)
@@ -309,12 +228,6 @@ TORCH_LIBRARY_EXPAND(_C, ops)
" int added_vocab_start_index, "
" int added_vocab_end_index) -> (Tensor masked_input, Tensor mask)");
ops.impl("get_masked_input_and_mask", torch::kPrivateUse1, &vllm_ascend::get_masked_input_and_mask);
ops.def(
"advance_step_flashattn_ascendc(int num_seqs, int num_queries, int block_size,"
" Tensor! input_tokens, Tensor! sampled_token_ids, Tensor! input_positions,"
" Tensor! seq_lens, Tensor! slot_mapping, Tensor! block_tables) -> ()");
ops.impl("advance_step_flashattn_ascendc", torch::kPrivateUse1, &vllm_ascend::advance_step_flashattn_ascendc);
}
REGISTER_EXTENSION(_C)