[Kernel] Add moe_gating_top_k operator support for Ascend NPU (#5579)

### What this PR does / why we need it?

1.replace moe_gating_top_k from torch_npu with custom op
2.enable the  renorm function of moe_gating_top_k in softmax scenerio

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?
No need test

- vLLM version: v0.13.0
- vLLM main:
7157596103

---------

Signed-off-by: ZCG12345 <2097562023@qq.com>
This commit is contained in:
ZCG12345
2026-01-07 21:42:31 +08:00
committed by GitHub
parent 1165b2c863
commit 3be8e33fe9
32 changed files with 4667 additions and 13 deletions

View File

@@ -1219,10 +1219,65 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> npu_moe_init_routing_
return std::tie(expanded_x, expanded_row_idx, expert_tokens_count_or_cumsum, expanded_scale);
}
std::tuple<at::Tensor, at::Tensor, at::Tensor> moe_gating_top_k(
const at::Tensor& x,
int64_t k,
int64_t k_group,
int64_t group_count,
int64_t group_select_mode,
int64_t renorm,
int64_t norm_type,
bool out_flag,
double routed_scaling_factor,
double eps,
const c10::optional<at::Tensor>& bias_opt
)
{
TORCH_CHECK(x.dim() == 2, "The x should be 2D");
TORCH_CHECK(
x.scalar_type() == at::kHalf || x.scalar_type() == at::kFloat || x.scalar_type() == at::kBFloat16,
"float16、float32 or bfloat16 tensor expected but got a tensor with dtype: ",
x.scalar_type());
auto x_size = x.sizes();
auto rows = x_size[0];
auto expert_num = x_size[1];
const at::Tensor &bias = c10::value_or_else(bias_opt, [] { return at::Tensor(); });
if (bias.defined()) {
TORCH_CHECK(x.scalar_type() == bias.scalar_type(), "The dtype of x and bias should be same");
TORCH_CHECK(bias.dim() == 1, "The bias should be 1D");
auto bias_size = bias.sizes();
TORCH_CHECK(bias_size[0] == expert_num, "The bias first dim should be same as x second dim");
}
at::Tensor y = at::empty({rows, k}, x.options());
at::Tensor expert_idx = at::empty({rows, k}, x.options().dtype(at::kInt));
at::Tensor out = at::empty({rows, expert_num}, x.options().dtype(at::kFloat));
EXEC_NPU_CMD(aclnnMoeGatingTopK,
x,
bias,
k,
k_group,
group_count,
group_select_mode,
renorm,
norm_type,
out_flag,
routed_scaling_factor,
eps,
y,
expert_idx,
out
);
return std::tuple<at::Tensor, at::Tensor, at::Tensor>(y,expert_idx,out);
}
} // namespace vllm_ascend
TORCH_LIBRARY_EXPAND(CONCAT(_C, _ascend), ops)
{
// vLLM-Ascend custom ops
ops.def("weak_ref_tensor(Tensor input) -> Tensor");
ops.impl("weak_ref_tensor", torch::kPrivateUse1, &vllm_ascend::weak_ref_tensor);
@@ -1358,6 +1413,7 @@ TORCH_LIBRARY_EXPAND(CONCAT(_C, _ascend), ops)
"num_ranks) -> Tensor");
ops.impl("combine_prefill", torch::kPrivateUse1,
&vllm_ascend::combine_prefill);
ops.def(
"npu_moe_init_routing_custom(Tensor x, Tensor expert_idx, *, Tensor? scale=None, Tensor? offset=None, int active_num=-1, "
" int expert_capacity=-1, int expert_num=-1, int drop_pad_mode=0, int expert_tokens_num_type=0, "
@@ -1365,4 +1421,21 @@ TORCH_LIBRARY_EXPAND(CONCAT(_C, _ascend), ops)
" int row_idx_type=0) -> (Tensor, Tensor, Tensor, Tensor)"
);
ops.impl("npu_moe_init_routing_custom", torch::kPrivateUse1, &vllm_ascend::npu_moe_init_routing_custom);
// vLLM-Ascend custom ops
ops.def(
"moe_gating_top_k(Tensor x, "
"int k, "
"int k_group, "
"int group_count, "
"int group_select_mode, "
"int renorm, "
"int norm_type, "
"bool out_flag, "
"float routed_scaling_factor, "
"float eps,"
"Tensor? bias_opt=None)"
"-> (Tensor y ,Tensor expert_idx, Tensor out)"
);
ops.impl("moe_gating_top_k", torch::kPrivateUse1,&vllm_ascend::moe_gating_top_k);
}