Revert "[refactor]support gatingtopk operator generalization (#4356)" (#4873)

This reverts commit c4a11a745a.

ops npu_gating_top_k caused Qwen3-30B precision problem, so revert it.

Signed-off-by: 1092626063 <1092626063@qq.com>
This commit is contained in:
1092626063
2025-12-10 15:45:20 +08:00
committed by GitHub
parent 9a144bc7be
commit ceadc2788d
4 changed files with 56 additions and 92 deletions

View File

@@ -28,8 +28,7 @@ import torch
import torch_npu
from vllm.model_executor.layers.activation import SiluAndMul
from vllm_ascend.ops.moe.experts_selector import (check_npu_moe_gating_top_k,
select_experts)
from vllm_ascend.ops.moe.experts_selector import select_experts
from vllm_ascend.ops.moe.moe_mlp import unified_apply_mlp
from vllm_ascend.ops.moe.token_dispatcher import TokenDispatcherWithAllGather
@@ -297,10 +296,7 @@ def test_select_experts(
e_score_correction_bias=e_score_correction_bias,
)
call_moe_gatingtopk = check_npu_moe_gating_top_k(
hidden_states, topk, topk_group, num_expert_group, scoring_func,
custom_routing_function)
if not call_moe_gatingtopk and use_grouped_topk:
if use_grouped_topk:
mock_native_grouped_topk.assert_called_once()
else:
mock_native_grouped_topk.assert_not_called()