fix moe_align_block_size (#2615)

This commit is contained in:
HandH1998
2024-12-27 23:32:53 +08:00
committed by GitHub
parent 70dc2fbe2d
commit 77d1210b36
4 changed files with 24 additions and 18 deletions

View File

@@ -118,31 +118,19 @@ __global__ void moe_align_block_size_kernel(scalar_t* __restrict__ topk_ids, int
}
void moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts, int64_t block_size,
torch::Tensor sorted_token_ids, torch::Tensor experts_ids,
torch::Tensor num_tokens_post_pad) {
torch::Tensor sorted_token_ids, torch::Tensor experts_ids, torch::Tensor num_tokens_post_pad,
torch::Tensor token_cnts_buffer, torch::Tensor cumsum_buffer) {
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
DISPATCH_INTEGRAL_TYPES(topk_ids.scalar_type(), "moe_align_block_size_kernel", [&] {
// calc needed amount of shared mem for `tokens_cnts` and `cumsum`
// tensors
const int32_t num_thread = max((int32_t)num_experts, WARP_SIZE);
const int32_t mem_tokens_cnts = ((num_experts + 1) * num_experts) * sizeof(int32_t);
const int32_t mem_cumsum = (num_experts + 1) * sizeof(int32_t);
// allocate global memory
int32_t* tokens_cnts;
int32_t* cumsum;
cudaMalloc(&tokens_cnts, mem_tokens_cnts);
cudaMalloc(&cumsum, mem_cumsum);
// set dynamic shared mem
auto kernel = moe_align_block_size_kernel<scalar_t>;
kernel<<<1, num_thread, 0, stream>>>(topk_ids.data_ptr<scalar_t>(), sorted_token_ids.data_ptr<int32_t>(),
experts_ids.data_ptr<int32_t>(), num_tokens_post_pad.data_ptr<int32_t>(),
num_experts, block_size, topk_ids.numel(), tokens_cnts, cumsum);
cudaFree(tokens_cnts);
cudaFree(cumsum);
num_experts, block_size, topk_ids.numel(),
token_cnts_buffer.data_ptr<int32_t>(), cumsum_buffer.data_ptr<int32_t>());
});
}

View File

@@ -8,6 +8,8 @@ def moe_align_block_size(
sorted_token_ids,
experts_ids,
num_tokens_post_pad,
token_cnts_buffer,
cumsum_buffer,
):
_moe_align_block_size(
topk_ids,
@@ -16,4 +18,6 @@ def moe_align_block_size(
sorted_token_ids,
experts_ids,
num_tokens_post_pad,
token_cnts_buffer,
cumsum_buffer,
)