[chore] Remove unused ep_moe cuda kernels (#9956)

This commit is contained in:
hlu1
2025-09-06 01:35:50 -07:00
committed by GitHub
parent 039cef76aa
commit 5f1eb20484
13 changed files with 4 additions and 1110 deletions

View File

@@ -209,18 +209,6 @@ TORCH_LIBRARY_FRAGMENT(sgl_kernel, m) {
"num_fused_shared_experts, float routed_scaling_factor, bool apply_routed_scaling_factor_on_output) -> "
"(Tensor[])");
m.impl("moe_fused_gate", torch::kCUDA, &moe_fused_gate);
m.def(
"ep_moe_pre_reorder(Tensor input, Tensor gateup_input, Tensor src2dst, Tensor topk_ids, Tensor "
"a1_scales, int start_expert_id, int end_expert_id, int topk, bool use_per_token_if_dynamic) -> ()");
m.impl("ep_moe_pre_reorder", torch::kCUDA, &ep_moe_pre_reorder);
m.def(
"ep_moe_silu_and_mul(Tensor gateup_output, Tensor down_input, Tensor reorder_topk_ids, Tensor scales, int "
"start_expert_id, int end_expert_id) -> ()");
m.impl("ep_moe_silu_and_mul", torch::kCUDA, &ep_moe_silu_and_mul);
m.def(
"ep_moe_post_reorder(Tensor down_output, Tensor output, Tensor src2dst, Tensor topk_ids, Tensor "
"topk_weights, int start_expert_id, int end_expert_id, int topk) -> ()");
m.impl("ep_moe_post_reorder", torch::kCUDA, &ep_moe_post_reorder);
m.def(
"fp8_blockwise_scaled_grouped_mm(Tensor output, Tensor a_ptrs, Tensor b_ptrs, Tensor out_ptrs, Tensor "
"a_scales_ptrs, Tensor b_scales_ptrs, Tensor a, Tensor b, Tensor scales_a, Tensor scales_b, Tensor "

View File

@@ -1,181 +0,0 @@
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <THC/THCAtomics.cuh>
#include <flashinfer/vec_dtypes.cuh>
#include "utils.h"
template <typename scalar_t>
__global__ void ep_pre_reorder_cuda_kernel(
const scalar_t* __restrict__ input_ptr,
scalar_t* __restrict__ gateup_input_ptr,
const int* __restrict__ src2dst_ptr,
const int* __restrict__ topk_ids_ptr,
const float* __restrict__ a1_scales_ptr,
int start_expert_id,
int end_expert_id,
int topk,
int hidden_size,
bool use_per_token_if_dynamic) {
int token_idx = blockIdx.x;
int tid = threadIdx.x;
const scalar_t* src_ptr = input_ptr + int64_t(token_idx) * hidden_size;
const int* token_src2dst = src2dst_ptr + token_idx * topk;
const int* token_topk_ids = topk_ids_ptr + token_idx * topk;
float scale = 1.0f;
if (a1_scales_ptr != nullptr and use_per_token_if_dynamic) {
scale = 1.0f / a1_scales_ptr[token_idx];
}
for (int k = 0; k < topk; ++k) {
int expert_id = token_topk_ids[k];
if (expert_id < start_expert_id || expert_id > end_expert_id) continue;
if (a1_scales_ptr != nullptr) {
if (!use_per_token_if_dynamic) {
scale = 1.0f / a1_scales_ptr[expert_id - start_expert_id];
}
}
int dst_idx = token_src2dst[k];
scalar_t* dst_ptr = gateup_input_ptr + int64_t(dst_idx) * hidden_size;
constexpr uint32_t vec_size = 16 / sizeof(scalar_t);
using vec_t = flashinfer::vec_t<scalar_t, vec_size>;
int vec_elements = (hidden_size / vec_size) * vec_size;
for (int idx = tid; idx < hidden_size / vec_size; idx += blockDim.x) {
vec_t input_vec, output_vec;
input_vec.cast_load(src_ptr + idx * vec_size);
#pragma unroll
for (uint32_t i = 0; i < vec_size; ++i) {
float val = static_cast<float>(input_vec[i]);
output_vec[i] = static_cast<scalar_t>(val * scale);
}
output_vec.cast_store(dst_ptr + idx * vec_size);
}
for (int idx = vec_elements + tid; idx < hidden_size; idx += blockDim.x) {
float val = static_cast<float>(src_ptr[idx]);
dst_ptr[idx] = static_cast<scalar_t>(val * scale);
}
}
}
template <typename scalar_t>
__global__ void ep_post_reorder_cuda_kernel(
const scalar_t* __restrict__ down_output_ptr,
scalar_t* __restrict__ output_ptr,
const int* __restrict__ src2dst_ptr,
const int* __restrict__ topk_ids_ptr,
const scalar_t* __restrict__ topk_weights_ptr,
int start_expert_id,
int end_expert_id,
int topk,
int hidden_size) {
const int token_idx = blockIdx.x;
const int tid = threadIdx.x;
const int* token_src2dst = src2dst_ptr + token_idx * topk;
const int* token_topk_ids = topk_ids_ptr + token_idx * topk;
const scalar_t* token_topk_weights = topk_weights_ptr + token_idx * topk;
scalar_t* dst_ptr = output_ptr + static_cast<int64_t>(token_idx) * hidden_size;
constexpr uint32_t vec_size = 16 / sizeof(scalar_t);
using vec_t = flashinfer::vec_t<scalar_t, vec_size>;
const int vec_iters = hidden_size / vec_size;
for (int idx = tid; idx < vec_iters; idx += blockDim.x) {
float acc[vec_size] = {0};
for (int k = 0; k < topk; ++k) {
const int expert_id = token_topk_ids[k];
if (expert_id < start_expert_id || expert_id > end_expert_id) continue;
const int src_row = token_src2dst[k];
const scalar_t* src_ptr = down_output_ptr + static_cast<int64_t>(src_row) * hidden_size;
const float weight = static_cast<float>(token_topk_weights[k]);
vec_t src_vec;
src_vec.cast_load(src_ptr + idx * vec_size);
#pragma unroll
for (uint32_t i = 0; i < vec_size; ++i) {
acc[i] += static_cast<float>(src_vec[i]) * weight;
}
}
vec_t out_vec;
#pragma unroll
for (uint32_t i = 0; i < vec_size; ++i)
out_vec[i] = static_cast<scalar_t>(acc[i]);
out_vec.cast_store(dst_ptr + idx * vec_size);
}
}
void ep_moe_pre_reorder(
torch::Tensor input,
torch::Tensor gateup_input,
torch::Tensor src2dst,
torch::Tensor topk_ids,
torch::Tensor a1_scales,
int64_t start_expert_id,
int64_t end_expert_id,
int64_t topk,
bool use_per_token_if_dynamic) {
const int total_blocks = input.size(0);
const int block_size = 512;
dim3 grid(total_blocks);
dim3 block(block_size);
int hidden_size = input.size(1);
DISPATCH_PYTORCH_DTYPE_TO_CTYPE_FLOAT_FP16(input.scalar_type(), scalar_t, [&] {
ep_pre_reorder_cuda_kernel<scalar_t><<<grid, block>>>(
static_cast<scalar_t*>(input.data_ptr()),
static_cast<scalar_t*>(gateup_input.data_ptr()),
src2dst.data_ptr<int>(),
topk_ids.data_ptr<int>(),
a1_scales.defined() ? a1_scales.data_ptr<float>() : nullptr,
start_expert_id,
end_expert_id,
topk,
hidden_size,
use_per_token_if_dynamic);
return true;
});
}
void ep_moe_post_reorder(
torch::Tensor down_output,
torch::Tensor output,
torch::Tensor src2dst,
torch::Tensor topk_ids,
torch::Tensor topk_weights,
int64_t start_expert_id,
int64_t end_expert_id,
int64_t topk) {
const int total_tokens = output.size(0);
const int block_size = 512;
dim3 grid(total_tokens);
dim3 block(block_size);
const int hidden_size = output.size(1);
DISPATCH_PYTORCH_DTYPE_TO_CTYPE_FLOAT_FP16(down_output.scalar_type(), scalar_t, [&] {
ep_post_reorder_cuda_kernel<scalar_t><<<grid, block>>>(
static_cast<scalar_t*>(down_output.data_ptr()),
static_cast<scalar_t*>(output.data_ptr()),
src2dst.data_ptr<int>(),
topk_ids.data_ptr<int>(),
static_cast<scalar_t*>(topk_weights.data_ptr()),
static_cast<int>(start_expert_id),
static_cast<int>(end_expert_id),
static_cast<int>(topk),
hidden_size);
return true;
});
}

View File

@@ -1,115 +0,0 @@
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#include <THC/THCAtomics.cuh>
#include <algorithm>
#include <flashinfer/vec_dtypes.cuh>
#include "utils.h"
using namespace flashinfer;
template <typename scalar_t>
__device__ inline scalar_t silu_quantize(float x);
template <>
__device__ inline float silu_quantize<float>(float x) {
float y = x / (1.f + __expf(-x));
return y;
}
template <>
__device__ inline __half silu_quantize<__half>(float x) {
float y = x / (1.f + __expf(-x));
return __float2half_rn(y);
}
template <>
__device__ inline __nv_bfloat16 silu_quantize<__nv_bfloat16>(float x) {
float y = x / (1.f + __expf(-x));
return __float2bfloat16_rn(y);
}
template <typename scalar_t>
__global__ void ep_moe_act_and_mul_cuda_kernel(
const scalar_t* __restrict__ gateup_output,
scalar_t* __restrict__ down_input,
const int* __restrict__ reorder_topk_ids,
const float* __restrict__ scales,
int start_expert_id,
int end_expert_id,
int hidden_size) {
constexpr uint32_t vec_size = 16 / sizeof(scalar_t);
using vec_t = flashinfer::vec_t<scalar_t, vec_size>;
const int64_t token_idx = blockIdx.x;
const int64_t thread_idx = threadIdx.x;
const int64_t stride = blockDim.x;
const int half_hidden_size = hidden_size >> 1;
const int expert_id = reorder_topk_ids[token_idx];
if (expert_id < start_expert_id || expert_id > end_expert_id) return;
const scalar_t* gate_output_ptr = gateup_output + static_cast<int64_t>(token_idx) * hidden_size;
const scalar_t* up_output_ptr = gate_output_ptr + half_hidden_size;
scalar_t* dst_ptr = down_input + static_cast<int64_t>(token_idx) * half_hidden_size;
scalar_t scale_q = static_cast<scalar_t>(scales ? (1.f / scales[expert_id - start_expert_id]) : 1.f);
const uint32_t vec_elements = half_hidden_size / vec_size;
#pragma unroll 1
for (uint32_t idx = thread_idx; idx < vec_elements; idx += stride) {
vec_t gate_vec, up_vec, out_vec;
gate_vec.load(gate_output_ptr + idx * vec_size);
up_vec.load(up_output_ptr + idx * vec_size);
#pragma unroll
for (uint32_t i = 0; i < vec_size; ++i) {
float gate_f = static_cast<float>(gate_vec[i]);
scalar_t gate_q = silu_quantize<scalar_t>(gate_f);
scalar_t prod = gate_q * up_vec[i] * scale_q;
out_vec[i] = prod;
}
out_vec.store(dst_ptr + idx * vec_size);
}
const int64_t scalar_start = static_cast<int64_t>(vec_elements) * vec_size + thread_idx;
#pragma unroll 1
for (int64_t idx = scalar_start; idx < half_hidden_size; idx += stride) {
float gate_f = static_cast<float>(gate_output_ptr[idx]);
scalar_t gate_q = silu_quantize<scalar_t>(gate_f);
dst_ptr[idx] = gate_q * up_output_ptr[idx] * scale_q;
}
}
void ep_moe_silu_and_mul(
torch::Tensor gateup_output,
torch::Tensor down_input,
torch::Tensor reorder_topk_ids,
torch::Tensor scales,
int64_t start_expert_id,
int64_t end_expert_id) {
const int total_tokens = gateup_output.size(0);
const int hidden_size = gateup_output.size(1);
DISPATCH_PYTORCH_DTYPE_TO_CTYPE_FLOAT_FP16(gateup_output.scalar_type(), scalar_t, [&] {
dim3 grid(total_tokens);
constexpr uint32_t vec_size = 16 / sizeof(scalar_t);
const int half_hidden_size = hidden_size >> 1;
uint32_t threads = (half_hidden_size + vec_size - 1) / vec_size;
threads = std::max<uint32_t>(threads, 256);
threads = ((threads + 31) & ~31U);
dim3 block(std::min(threads, 1024U));
ep_moe_act_and_mul_cuda_kernel<scalar_t><<<grid, block>>>(
static_cast<scalar_t*>(gateup_output.data_ptr()),
static_cast<scalar_t*>(down_input.data_ptr()),
reorder_topk_ids.data_ptr<int>(),
scales.defined() ? scales.data_ptr<float>() : nullptr,
static_cast<int>(start_expert_id),
static_cast<int>(end_expert_id),
hidden_size);
return true;
});
}