### What this PR does / why we need it?
1.Add the implementation of normal Aclnn operators: MoeCombineNormal,
MoeDispatchNormal, NotifyDispatch,and DispatchLayout.
- MoeCombineNormal: Implements the combine logic within MoE operations.
- MoeDispatchNormal: Implements the dispatch logic within MoE
operations.
- NotifyDispatch: Exchanges topk_idx information among different ranks
to calculate the device memory required for the dispatch stage.
- DispatchLayout: Used to calculate information related to the device
memory layout for the dispatch stage.
2.Provide PyTorch interfaces for normal operators—get_dispatch_layout,
dispatch_prefill, and combine_prefill—to be used for MoE communication
during the prefill stage in vLLM.
- get_dispatch_layout: Calculates information related to the device
memory layout for the dispatch operator, and is called before
dispatch_prefill.
- dispatch_prefill: Initiates the dispatch operation.
- combine_prefill: Initiates the combine operation.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
The functionality has already been validated using the local Qwen model.
Test cases will be added after support for multi-NPU use cases in the CI
pipeline is finalized.
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
Signed-off-by: shiro-zzzz <zhangdianhao@huawei.com>
52 lines
1.7 KiB
C++
52 lines
1.7 KiB
C++
#include "register/op_def_registry.h"
|
|
|
|
namespace ops {
|
|
class DispatchLayout : public OpDef {
|
|
public:
|
|
explicit DispatchLayout(const char *name) : OpDef(name)
|
|
{
|
|
this->Input("topkIdx")
|
|
.ParamType(REQUIRED)
|
|
.DataType({ge::DT_INT64})
|
|
.Format({ge::FORMAT_ND})
|
|
.UnknownShapeFormat({ge::FORMAT_ND});
|
|
|
|
this->Attr("num_tokens").Int();
|
|
this->Attr("num_ranks").Int();
|
|
this->Attr("num_experts").Int();
|
|
this->Attr("num_topk").Int();
|
|
|
|
this->Output("numTokensPerRank")
|
|
.ParamType(REQUIRED)
|
|
.DataType({ge::DT_INT32})
|
|
.Format({ge::FORMAT_ND})
|
|
.UnknownShapeFormat({ge::FORMAT_ND});
|
|
this->Output("numTokensPerExpert")
|
|
.ParamType(REQUIRED)
|
|
.DataType({ge::DT_INT32})
|
|
.Format({ge::FORMAT_ND})
|
|
.UnknownShapeFormat({ge::FORMAT_ND});
|
|
this->Output("isTokenInRank")
|
|
.ParamType(REQUIRED)
|
|
.DataType({ge::DT_INT32})
|
|
.Format({ge::FORMAT_ND})
|
|
.UnknownShapeFormat({ge::FORMAT_ND});
|
|
|
|
OpAICoreConfig aicore_config;
|
|
aicore_config.DynamicCompileStaticFlag(true)
|
|
.DynamicFormatFlag(true)
|
|
.DynamicRankSupportFlag(true)
|
|
.DynamicShapeSupportFlag(true)
|
|
.NeedCheckSupportFlag(false)
|
|
.PrecisionReduceFlag(true)
|
|
.ExtendCfgInfo("aclnnSupport.value", "support_aclnn")
|
|
.ExtendCfgInfo("jitCompile.flag", "static_true")
|
|
.ExtendCfgInfo("multiKernelSupportDynamicGraph.value", "multi_kernel");
|
|
|
|
this->AICore().AddConfig("ascend910_93", aicore_config);
|
|
}
|
|
};
|
|
|
|
OP_ADD(DispatchLayout);
|
|
} // namespace ops
|