add dispatch_gmm_combine kernel (#3532)
### What this PR does / why we need it? This PR introduces the Ascend implementation of the `dispatch_ffn_combine` kernel and wires it into the vLLM-Ascend runtime, together with follow‑up fixes to ensure the kernel builds and runs correctly in CI. - Add full host and device implementation of the `dispatch_ffn_combine` kernel under `csrc/dispatch_ffn_combine`, including tiling logic, MOE routing helpers, and kernel utilities for quantized FFN dispatch. - Integrate the new kernel with the PyTorch binding (csrc/torch_binding.cpp, csrc/torch_binding_meta.cpp) and the Ascend runtime (vllm_ascend/ascend_forward_context.py, vllm_ascend/worker/model_runner_v1.py). - Extend fused MoE communication and token dispatch support in `vllm_ascend/ops/fused_moe`, adding methods/utilities needed by the new dispatch path. - Update quantization logic in vllm_ascend/quantization/w8a8_dynamic.py to support the new FFN dispatch flow. - Fix kernel build issues by adjusting `csrc/build_aclnn.sh`, CMake configuration, and include/namespace usage in the new kernel files. - Add an end‑to‑end nightly test `tests/e2e/nightly/ops/test_dispatch_ffn_combine.py` and helper utilities in `vllm_ascend/utils.py` to validate the new kernel. ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.12.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.12.0 --------- Signed-off-by: mojave2 <chenchen145@huawei.com> Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
66
csrc/dispatch_ffn_combine/op_host/CMakeLists.txt
Normal file
66
csrc/dispatch_ffn_combine/op_host/CMakeLists.txt
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
# This file is a part of the CANN Open Software.
|
||||
# Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
# Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
# INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See LICENSE in the root of the software repository for the full text of the License.
|
||||
# ======================================================================================================================
|
||||
|
||||
set(_DISPATCH_FFN_INC_OPTS)
|
||||
if (EXISTS ${ASCEND_CANN_PACKAGE_PATH}/aarch64-linux/ascendc/include)
|
||||
list(APPEND _DISPATCH_FFN_INC_OPTS -I${ASCEND_CANN_PACKAGE_PATH}/aarch64-linux/ascendc/include)
|
||||
elseif (EXISTS ${ASCEND_CANN_PACKAGE_PATH}/arm64-linux/ascendc/include)
|
||||
list(APPEND _DISPATCH_FFN_INC_OPTS -I${ASCEND_CANN_PACKAGE_PATH}/arm64-linux/ascendc/include)
|
||||
elseif (EXISTS ${ASCEND_CANN_PACKAGE_PATH}/${CMAKE_SYSTEM_PROCESSOR}-linux/ascendc/include)
|
||||
list(APPEND _DISPATCH_FFN_INC_OPTS -I${ASCEND_CANN_PACKAGE_PATH}/${CMAKE_SYSTEM_PROCESSOR}-linux/ascendc/include)
|
||||
endif()
|
||||
if (EXISTS ${CMAKE_SOURCE_DIR}/third_party/catlass/include)
|
||||
list(APPEND _DISPATCH_FFN_INC_OPTS -I${CMAKE_SOURCE_DIR}/third_party/catlass/include)
|
||||
endif()
|
||||
|
||||
add_ops_compile_options(
|
||||
OP_NAME DispatchFFNCombine
|
||||
OPTIONS --cce-auto-sync=on
|
||||
-Wno-deprecated-declarations
|
||||
-Werror
|
||||
-DHCCL_COMM
|
||||
${_DISPATCH_FFN_INC_OPTS}
|
||||
)
|
||||
|
||||
target_sources(op_host_aclnnInner PRIVATE
|
||||
dispatch_ffn_combine_def.cpp
|
||||
)
|
||||
|
||||
target_sources(opapi PRIVATE
|
||||
aclnn_dispatch_ffn_combine.cpp
|
||||
)
|
||||
|
||||
if (NOT BUILD_OPEN_PROJECT)
|
||||
target_sources(aclnn_ops_train PRIVATE
|
||||
aclnn_dispatch_ffn_combine.cpp
|
||||
)
|
||||
|
||||
target_sources(aclnn_ops_infer PRIVATE
|
||||
aclnn_dispatch_ffn_combine.cpp
|
||||
)
|
||||
endif ()
|
||||
|
||||
target_sources(optiling PRIVATE
|
||||
dispatch_ffn_combine_tiling.cpp
|
||||
)
|
||||
|
||||
target_include_directories(optiling PRIVATE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../op_kernel
|
||||
)
|
||||
|
||||
target_sources(opsproto PRIVATE
|
||||
dispatch_ffn_combine_proto.cpp
|
||||
)
|
||||
|
||||
file(GLOB _GMM_Aclnn_header "${CMAKE_CURRENT_SOURCE_DIR}/aclnn_dispatch_ffn_combine.h")
|
||||
|
||||
install(FILES ${_GMM_Aclnn_header}
|
||||
DESTINATION ${ACLNN_INC_INSTALL_DIR} OPTIONAL
|
||||
)
|
||||
@@ -0,0 +1,84 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
#include "aclnn_dispatch_ffn_combine.h"
|
||||
#include <algorithm>
|
||||
// #include "aclnn_kernels/common/op_error_check.h"
|
||||
// #include "opdev/op_log.h"
|
||||
// #include "opdev/common_types.h"
|
||||
// #include "opdev/platform.h"
|
||||
// #include "ophost/matmul_util.h"
|
||||
#include <unistd.h>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <fcntl.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/file.h>
|
||||
#include <climits>
|
||||
#include "../op_host/error_log.h"
|
||||
// using namespace op;
|
||||
|
||||
// using namespace op;
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
static constexpr size_t TWO_DIMS = 2;
|
||||
static constexpr int64_t KVALUE_MIN = 256;
|
||||
static constexpr int64_t KVALUE_MAX = 65535;
|
||||
static constexpr size_t HCCL_GROUP_NAME_MAX = 128U;
|
||||
enum NnopbaseHcclServerType {
|
||||
NNOPBASE_HCCL_SERVER_TYPE_AICPU = 0,
|
||||
NNOPBASE_HCCL_SERVER_TYPE_MTE,
|
||||
NNOPBASE_HCCL_SERVER_TYPE_END
|
||||
};
|
||||
|
||||
extern aclnnStatus aclnnInnerDispatchFFNCombineGetWorkspaceSize(const aclTensor* x, const aclTensor* weight1, const aclTensor* weight2,
|
||||
const aclTensor* expertId, const aclTensor* scale1, const aclTensor* scale2,
|
||||
const aclTensor* probs,
|
||||
const char* group, int64_t maxOutputSize,
|
||||
bool transB, bool weightNz,
|
||||
const aclTensor* out,
|
||||
uint64_t* workspaceSize, aclOpExecutor** executor);
|
||||
extern aclnnStatus aclnnInnerDispatchFFNCombine(void *workspace, uint64_t workspaceSize,
|
||||
aclOpExecutor *executor, aclrtStream stream);
|
||||
extern "C" void __attribute__((weak)) NnopbaseSetHcclServerType(void *executor, NnopbaseHcclServerType sType);
|
||||
|
||||
|
||||
|
||||
aclnnStatus aclnnDispatchFFNCombineGetWorkspaceSize(const aclTensor* x, const aclTensor* weight1, const aclTensor* weight2,
|
||||
const aclTensor* expertId, const aclTensor* scale1, const aclTensor* scale2,
|
||||
const aclTensor* probs,
|
||||
const char* group, int64_t maxOutputSize,
|
||||
const aclTensor* out,
|
||||
uint64_t* workspaceSize, aclOpExecutor** executor)
|
||||
{
|
||||
bool transB = false;
|
||||
bool weightNz = true;
|
||||
|
||||
aclnnStatus ret = aclnnInnerDispatchFFNCombineGetWorkspaceSize(x, weight1, weight2, expertId, scale1, scale2, probs, group,
|
||||
maxOutputSize, transB, weightNz,
|
||||
out, workspaceSize, executor);
|
||||
return ret;
|
||||
}
|
||||
|
||||
aclnnStatus aclnnDispatchFFNCombine(void* workspace, uint64_t workspaceSize, aclOpExecutor *executor, aclrtStream stream)
|
||||
{
|
||||
if (NnopbaseSetHcclServerType) {
|
||||
NnopbaseSetHcclServerType(executor, NNOPBASE_HCCL_SERVER_TYPE_MTE);
|
||||
}
|
||||
aclnnStatus ret = aclnnInnerDispatchFFNCombine(workspace, workspaceSize, executor, stream);
|
||||
return ret;
|
||||
}
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -0,0 +1,61 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
|
||||
#ifndef OP_API_INC_DISPATCH_FFN_COMBINE_
|
||||
#define OP_API_INC_DISPATCH_FFN_COMBINE_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "aclnn/aclnn_base.h"
|
||||
#include "hccl/hccl.h"
|
||||
#include "hccl/hccl_types.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* 算子功能:实现分布式MoE从InitRouting到Unpermute全部算子的融合
|
||||
* @brief aclnnDispatchFFNCombine的第一段接口,根据具体的计算流程,计算workspace大小。
|
||||
* @domain aclnn_ops_infer
|
||||
* @param [in] a: matmul左矩阵,数据类型支持:float16, bf16。
|
||||
* @param [in] b: matmul右矩阵,数据类型支持:float16, bf16。
|
||||
* @param [in] bias: 偏置,数据类型支持:float16, bf16。
|
||||
* @param [in] group: 标识通信域名称的字符串。
|
||||
* @param [in] worldsize: 通信域size,支持2/4/8卡。
|
||||
* @param [in] epRankId: ep本卡Id。取值范围[0, worldSize),各卡的rankId不能重复
|
||||
* @param [out] c: 计算+通信的结果,数据类型:同输入。
|
||||
* @param [out] workspaceSize: 返回需要在npu device侧申请的workspace大小。
|
||||
* @param [out] executor: 返回op执行器,包含了算子计算流程。
|
||||
* @return aclnnStatus: 返回状态码
|
||||
*/
|
||||
__attribute__((visibility("default"))) aclnnStatus aclnnDispatchFFNCombineGetWorkspaceSize(const aclTensor* x, const aclTensor* weight1, const aclTensor* weight2,
|
||||
const aclTensor* expertId, const aclTensor* scale1, const aclTensor* scale2,
|
||||
const aclTensor* probs,
|
||||
const char* group, int64_t maxOutputSize,
|
||||
const aclTensor* out,
|
||||
uint64_t* workspaceSize, aclOpExecutor** executor);
|
||||
|
||||
/**
|
||||
* @brief aclnnDispatchGmmCombine的第二段接口,用于执行计算。
|
||||
* @param [in] workspace: 在npu device侧申请的workspace内存起址。
|
||||
* @param [in] workspace_size: 在npu device侧申请的workspace大小,由第一段接口aclnnDispatchFFNCombineGetWorkspaceSize获取。
|
||||
* @param [in] exector: op执行器,包含了算子计算流程。
|
||||
* @param [in] stream: acl stream流。
|
||||
* @return aclnnStatus: 返回状态码
|
||||
*/
|
||||
__attribute__((visibility("default"))) aclnnStatus aclnnDispatchFFNCombine(void* workspace, uint64_t workspaceSize, aclOpExecutor* executor,
|
||||
aclrtStream stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // OP_API_INC_GMM_ALLTOALLV_
|
||||
@@ -0,0 +1,88 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* \file dispatch_ffn_combine_def.cpp
|
||||
* \brief
|
||||
*/
|
||||
#include "register/op_def_registry.h"
|
||||
|
||||
namespace ops {
|
||||
class DispatchFFNCombine : public OpDef {
|
||||
public:
|
||||
explicit DispatchFFNCombine(const char *name) : OpDef(name) {
|
||||
this->Input("a")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_FLOAT16, ge::DT_BF16, ge::DT_BF16})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
this->Input("w1")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_INT8, ge::DT_INT8, ge::DT_INT8})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ})
|
||||
.IgnoreContiguous();
|
||||
this->Input("w2")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_INT8, ge::DT_INT8, ge::DT_INT8})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ})
|
||||
.IgnoreContiguous();
|
||||
this->Input("expertIdx")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_INT32, ge::DT_INT32, ge::DT_INT32})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
this->Input("scale1")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_INT64, ge::DT_INT64, ge::DT_INT64})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
this->Input("scale2")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_INT64, ge::DT_INT64, ge::DT_INT64})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
this->Input("probs")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_FLOAT, ge::DT_FLOAT, ge::DT_FLOAT})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
|
||||
// 输出
|
||||
this->Output("out")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_FLOAT16, ge::DT_BF16, ge::DT_BF16})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND,ge::FORMAT_ND});
|
||||
|
||||
this->Attr("group").AttrType(REQUIRED).String();
|
||||
this->Attr("M").AttrType(OPTIONAL).Int();
|
||||
this->Attr("transB").AttrType(OPTIONAL).Bool(false);
|
||||
this->Attr("weightNz").AttrType(OPTIONAL).Bool(false);
|
||||
|
||||
OpAICoreConfig aicore_config;
|
||||
aicore_config.DynamicCompileStaticFlag(true)
|
||||
.DynamicFormatFlag(true)
|
||||
.DynamicRankSupportFlag(true)
|
||||
.DynamicShapeSupportFlag(true)
|
||||
.NeedCheckSupportFlag(false)
|
||||
.PrecisionReduceFlag(true)
|
||||
.ExtendCfgInfo("aclnnSupport.value", "support_aclnn")
|
||||
.ExtendCfgInfo("jitCompile.flag", "static_false")
|
||||
.ExtendCfgInfo("multiKernelSupportDynamicGraph.value", "multi_kernel");
|
||||
this->AICore().AddConfig("ascend910_93", aicore_config);
|
||||
this->AICore().AddConfig("ascend910b", aicore_config);
|
||||
this->MC2().HcclGroup("group");
|
||||
}
|
||||
};
|
||||
|
||||
OP_ADD(DispatchFFNCombine);
|
||||
} // namespace ops
|
||||
@@ -0,0 +1,40 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* \file dispatch_ffn_proto.cpp
|
||||
* \brief
|
||||
*/
|
||||
#include <graph/utils/type_utils.h>
|
||||
#include <register/op_impl_registry.h>
|
||||
// #include "../../common/ophost/op_util.h"
|
||||
// #include "../../common/ophost/hcom_topo_info.h"
|
||||
// #include "log/ops_log.h"
|
||||
|
||||
using namespace ge;
|
||||
namespace ops {
|
||||
const size_t ATTR_GROUP = 0;
|
||||
const size_t ATTR_RANK_SIZE = 1;
|
||||
const size_t SUPPORT_DIM_SIZE = 2;
|
||||
|
||||
static ge::graphStatus InferShapeDispatchFFNCombine(gert::InferShapeContext* context) {
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
static ge::graphStatus InferDataTypeDispatchFFNCombine(gert::InferDataTypeContext* context) {
|
||||
// auto d_type = context->GetInputDataType(0);
|
||||
// context->SetOutputDataType(0, d_type);
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
IMPL_OP_INFERSHAPE(DispatchFFNCombine)
|
||||
.InferShape(InferShapeDispatchFFNCombine)
|
||||
.InferDataType(InferDataTypeDispatchFFNCombine);
|
||||
} // namespace ops
|
||||
@@ -0,0 +1,265 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
/*!
|
||||
* \file dispatch_ffn_tiling.cpp
|
||||
* \brief
|
||||
*/
|
||||
#include "vector"
|
||||
#include "register/tilingdata_base.h"
|
||||
#include "tiling/tiling_api.h"
|
||||
#include "error_log.h"
|
||||
#include "hcom_topo_info.h"
|
||||
#include "register/op_def_registry.h"
|
||||
#include "dispatch_ffn_combine_tiling.h"
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <algorithm>
|
||||
#include "moe_init_routing_quant_v2/moe_init_routing_quant_v2_tiling.h"
|
||||
|
||||
using namespace AscendC;
|
||||
using namespace ge;
|
||||
|
||||
namespace {
|
||||
// 1. 常量定义
|
||||
const char *K_INNER_DEBUG = "DispatchFFNCombine Tiling Debug";
|
||||
constexpr uint32_t ATTR_GROUP_INDEX = 0;
|
||||
constexpr uint32_t ATTR_MAX_OUTPUT_SIZE_INDEX = 1;
|
||||
constexpr uint32_t ATTR_IS_TRANS_B = 2;
|
||||
constexpr uint32_t ATTR_WEIGHT_NZ = 3;
|
||||
constexpr uint64_t INIT_TILINGKEY = 1000000;
|
||||
constexpr uint64_t TILINGKEY_TRANS_B = 1U;
|
||||
constexpr uint64_t TILINGKEY_WEIGHT_NZ = 10;
|
||||
constexpr uint32_t X_INDEX = 0;
|
||||
constexpr uint32_t WEIGHT_INDEX = 1;
|
||||
constexpr uint32_t WEIGHT2_INDEX = 2;
|
||||
constexpr uint32_t EXPERTID_INDEX = 3;
|
||||
constexpr uint32_t BLOCK_NUM = 20;
|
||||
constexpr uint32_t SYSTEM_NEED_WORKSPACE = 16 * 1024 * 1024;
|
||||
}
|
||||
|
||||
namespace optiling {
|
||||
|
||||
static int32_t CeilDev(int32_t num, int32_t div)
|
||||
{
|
||||
if (div == 0) {
|
||||
return 0;
|
||||
}
|
||||
return (num + div - 1) / div;
|
||||
}
|
||||
|
||||
// 解析并校验 rankId, group, worldSize, isTransB 属性值
|
||||
static ge::graphStatus DispatchFFNCombineCheckAttrAndSetTiling(gert::TilingContext *context, DispatchFFNCombineInfo& info)
|
||||
{
|
||||
auto attrs = context->GetAttrs();
|
||||
OP_TILING_CHECK(attrs == nullptr, OP_LOGE(K_INNER_DEBUG, "attrs is null."), return ge::GRAPH_FAILED);
|
||||
|
||||
// todo:Attr相关tilingdata的设置、校验、打印
|
||||
auto groupPtr = attrs->GetAttrPointer<char>(static_cast<int>(ATTR_GROUP_INDEX));
|
||||
auto maxOutputSizePtr = attrs->GetAttrPointer<int>(ATTR_MAX_OUTPUT_SIZE_INDEX);
|
||||
auto is_trans_b = attrs->GetAttrPointer<bool>(ATTR_IS_TRANS_B);
|
||||
auto weight_nz = attrs->GetAttrPointer<bool>(ATTR_WEIGHT_NZ);
|
||||
OP_TILING_CHECK(groupPtr == nullptr || strlen(groupPtr) == 0,
|
||||
OP_LOGE(K_INNER_DEBUG, "group is invalid."), return GRAPH_FAILED);
|
||||
|
||||
OP_TILING_CHECK(is_trans_b == nullptr,
|
||||
OP_LOGE(K_INNER_DEBUG, "is_trans_b is invalid."), return GRAPH_FAILED);
|
||||
OP_TILING_CHECK(weight_nz == nullptr,
|
||||
OP_LOGE(K_INNER_DEBUG, "weight_nz is invalid."), return GRAPH_FAILED);
|
||||
|
||||
info.maxOutputSize = *maxOutputSizePtr;
|
||||
info.isTransposeB = *is_trans_b;
|
||||
info.isWeightNz = *weight_nz;
|
||||
|
||||
int64_t rankSize;
|
||||
(void)ge::HcomTopoInfo::Instance().GetGroupRankSize(groupPtr, rankSize);
|
||||
info.worldSize = rankSize;
|
||||
|
||||
OP_LOGD(K_INNER_DEBUG, "maxOutputSize=%d ", info.maxOutputSize);
|
||||
OP_LOGD(K_INNER_DEBUG, "rankSize=%d ", info.worldSize);
|
||||
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
// 提取输入张量 A 和 B 的形状,计算出 M、K、N 值
|
||||
static ge::graphStatus DispatchFFNCombineCheckShapeAndSetTiling(gert::TilingContext *context, DispatchFFNCombineInfo &info)
|
||||
{
|
||||
const char *nodeName = context->GetNodeName();
|
||||
// OPS_LOG_I(nodeName, "DispatchFFnCombine DispatchFFNCombineCheckShapeAndSetTiling.");
|
||||
|
||||
const gert::StorageShape *aStorageShape = context->GetInputShape(X_INDEX);
|
||||
const gert::StorageShape *bStorageShape = context->GetInputShape(WEIGHT_INDEX);
|
||||
const gert::StorageShape *expertIdxShape = context->GetInputShape(EXPERTID_INDEX);
|
||||
uint32_t M = aStorageShape->GetStorageShape().GetDim(0);
|
||||
uint32_t K = aStorageShape->GetStorageShape().GetDim(1);
|
||||
uint32_t expertPerRank = bStorageShape->GetStorageShape().GetDim(0);
|
||||
uint32_t N = bStorageShape->GetStorageShape().GetDim(2);
|
||||
uint32_t topK = expertIdxShape->GetStorageShape().GetDim(1);
|
||||
|
||||
info.M = M;
|
||||
info.N = N;
|
||||
info.K = K;
|
||||
info.expertPerRank = expertPerRank;
|
||||
info.topK = topK;
|
||||
OP_LOGD(K_INNER_DEBUG, "M=%d ", info.M);
|
||||
OP_LOGD(K_INNER_DEBUG, "K=%d ", info.K);
|
||||
OP_LOGD(K_INNER_DEBUG, "N=%d ", info.N);
|
||||
OP_LOGD(K_INNER_DEBUG, "expertPerRank=%d ", info.expertPerRank);
|
||||
OP_LOGD(K_INNER_DEBUG, "topK=%d ", info.topK);
|
||||
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
// 获取当前芯片平台的 AI Core 数目、UB 容量等硬件信息。
|
||||
static ge::graphStatus DispatchFFNCombineGetPlatformInfoAndSetTiling(gert::TilingContext *context, DispatchFFNCombineInfo& info)
|
||||
{
|
||||
auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());
|
||||
uint32_t aivNum = ascendcPlatform.GetCoreNumAiv();
|
||||
uint64_t ubSize = 0U;
|
||||
ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ubSize);
|
||||
info.aivNum = aivNum;
|
||||
info.totalUbSize = ubSize;
|
||||
|
||||
OP_LOGD(K_INNER_DEBUG, "aivNum=%d", info.aivNum);
|
||||
OP_LOGD(K_INNER_DEBUG, "ubSize=%lu", info.totalUbSize);
|
||||
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
void SetTilingData(CoCTiling &cocTilingData, DispatchFFNCombineInfo &info)
|
||||
{
|
||||
cocTilingData.m0 = 128;
|
||||
cocTilingData.k0 = 256;
|
||||
cocTilingData.n0 = 256;
|
||||
cocTilingData.swizzleDirect = 1;
|
||||
cocTilingData.swizzleOffset = 7;
|
||||
cocTilingData.ubMoveNum = 16 * 1024;
|
||||
cocTilingData.pValue = 1;
|
||||
cocTilingData.commNpuSplit = info.worldSize;
|
||||
cocTilingData.commDataSplit = 1;
|
||||
cocTilingData.lenPerLoop = cocTilingData.m0 * cocTilingData.n0 / 2;
|
||||
}
|
||||
|
||||
// 主调度函数:
|
||||
// 获取 tilingData ➝ 检查 Attr ➝ 检查 Shape ➝ 获取平台信息
|
||||
// ➝ 调用 SetTilingData(根据rank数目) ➝ 设置 blockDim ➝ 设置 tilingKey ➝ 设置 workspace ➝ 配置通信参数
|
||||
|
||||
static ge::graphStatus DispatchFFNCombineTilingFuncImpl(gert::TilingContext *context)
|
||||
{
|
||||
const char *nodeName = context->GetNodeName();
|
||||
OP_LOGI(nodeName, "Enter DispatchFFNCombine tiling func.");
|
||||
|
||||
// 1. tilingData
|
||||
DispatchFFNCombineTilingData *tilingData = context->GetTilingData<DispatchFFNCombineTilingData>();
|
||||
OP_TILING_CHECK(tilingData == nullptr, OP_LOGE(nodeName, "tilingData is nullptr."),
|
||||
return ge::GRAPH_FAILED);
|
||||
OP_LOGI(nodeName, "DispatchFFNCombine get tilingData.");
|
||||
DispatchFFNCombineInfo& info = tilingData->dispatchFFNCombineInfo;
|
||||
OP_LOGI(nodeName, "DispatchFFNCombine get tilingData info.");
|
||||
|
||||
OP_TILING_CHECK(DispatchFFNCombineCheckAttrAndSetTiling(context, info) != ge::GRAPH_SUCCESS,
|
||||
OP_LOGE(context->GetNodeName(), "DispatchFFNCombine CheckAttrAndSetTiling Failed"),
|
||||
return ge::GRAPH_FAILED);
|
||||
OP_TILING_CHECK(DispatchFFNCombineCheckShapeAndSetTiling(context, info) != ge::GRAPH_SUCCESS,
|
||||
OP_LOGE(context->GetNodeName(), "DispatchFFNCombine CheckShapeAndSetTiling Failed"),
|
||||
return ge::GRAPH_FAILED);
|
||||
OP_TILING_CHECK(DispatchFFNCombineGetPlatformInfoAndSetTiling(context, info) != ge::GRAPH_SUCCESS,
|
||||
OP_LOGE(context->GetNodeName(), "DispatchFFNCombine GetPlatformInfoAndSetTiling Failed"),
|
||||
return ge::GRAPH_FAILED);
|
||||
|
||||
SetTilingData(tilingData->cocTiling, info);
|
||||
|
||||
// 2. set blockDim
|
||||
uint32_t blockDim = 1U;
|
||||
auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());
|
||||
auto aicNum = ascendcPlatform.GetCoreNumAic();
|
||||
auto aivNum = ascendcPlatform.GetCoreNumAiv();
|
||||
blockDim = ascendcPlatform.CalcTschBlockDim(aivNum, aicNum, aivNum);
|
||||
context->SetBlockDim(blockDim);
|
||||
|
||||
// 3. set tilingKey
|
||||
uint64_t tilingKey = INIT_TILINGKEY;
|
||||
tilingKey += info.isTransposeB ? TILINGKEY_TRANS_B : 0;
|
||||
tilingKey += info.isWeightNz ? TILINGKEY_WEIGHT_NZ : 0;
|
||||
context->SetTilingKey(tilingKey);
|
||||
|
||||
OP_LOGD(K_INNER_DEBUG, "tilingKey=%d", tilingKey);
|
||||
|
||||
optiling::MoeInitRoutingQuantV2TilingBase moeInitRoutingQuantV2TilingBase;
|
||||
int64_t inuptXDtypeSize = sizeof(int16_t);
|
||||
int64_t scaleDim0 = 0;
|
||||
int64_t ubSize = 196352;
|
||||
int64_t expertCapacity = 0;
|
||||
int64_t expertNum = info.expertPerRank * info.worldSize;
|
||||
int64_t activeNum = 0;
|
||||
int64_t dropPadMode = 0;
|
||||
int64_t expertTokensCountOrCumsumFlag = 2;
|
||||
bool expertTokensBeforeCapacityFlag = false;
|
||||
int64_t quantMode = 1;
|
||||
uint32_t aivNumInitRouting = 2 * BLOCK_NUM;
|
||||
moeInitRoutingQuantV2TilingBase.DoTiling(info.M, info.K, info.topK, expertCapacity, expertNum, activeNum, dropPadMode,
|
||||
expertTokensCountOrCumsumFlag, expertTokensBeforeCapacityFlag, inuptXDtypeSize, quantMode, scaleDim0, aivNumInitRouting, ubSize);
|
||||
uint64_t initRoutingQuantTilingKey = moeInitRoutingQuantV2TilingBase.tilingKey_;
|
||||
size_t initRoutingWorkspace = moeInitRoutingQuantV2TilingBase.workspaceSize_;
|
||||
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData = moeInitRoutingQuantV2TilingBase.quantTilingData;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.vbsComputeParamsOp = moeInitRoutingQuantV2TilingBase.quantTilingData.vbsComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.vmsMiddleComputeParamsOp = moeInitRoutingQuantV2TilingBase.quantTilingData.vmsMiddleComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.sortOutComputeParamsOp = moeInitRoutingQuantV2TilingBase.quantTilingData.sortOutComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.srcToDstComputeParamsOp = moeInitRoutingQuantV2TilingBase.quantTilingData.srcToDstComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.srcToDstCapacityComputeParamsOp = moeInitRoutingQuantV2TilingBase.quantTilingData.srcToDstCapacityComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.gatherOutComputeParamsOp = moeInitRoutingQuantV2TilingBase.quantTilingData.gatherOutComputeParamsOp;
|
||||
tilingData->cocTiling.initRoutingQuantTilingKey = initRoutingQuantTilingKey;
|
||||
|
||||
// 4. workspace
|
||||
size_t *workSpaces = context->GetWorkspaceSizes(1);
|
||||
OP_TILING_CHECK(workSpaces == nullptr, OP_LOGE(nodeName, "workSpaces is nullptr."),
|
||||
return ge::GRAPH_FAILED);
|
||||
|
||||
uint32_t n2 = info.K;
|
||||
uint32_t k2 = info.N / 2;
|
||||
|
||||
uint64_t cocWorkspace = (info.M + 256 - 1) / 256 * 256 * info.topK *sizeof(int32_t) +
|
||||
info.worldSize * info.worldSize * info.expertPerRank * sizeof(int32_t) * 3 +
|
||||
info.maxOutputSize * sizeof(float) * 2 +
|
||||
std::max(info.maxOutputSize * info.N * sizeof(int16_t), info.maxOutputSize * n2 * sizeof(int16_t)) +
|
||||
std::max(info.maxOutputSize * info.K * sizeof(int8_t), info.maxOutputSize * k2 * sizeof(int8_t));
|
||||
|
||||
workSpaces[0] = SYSTEM_NEED_WORKSPACE + std::max(cocWorkspace, initRoutingWorkspace);
|
||||
|
||||
|
||||
// 5. communication
|
||||
auto attrs = context->GetAttrs();
|
||||
auto group = attrs->GetAttrPointer<char>(static_cast<int>(ATTR_GROUP_INDEX));
|
||||
uint32_t opType = 8U;
|
||||
std::string algConfig = "AlltoAll=level0:fullmesh;level1:pairwise";
|
||||
AscendC::Mc2CcTilingConfig mc2CcTilingConfig(group, opType, algConfig);
|
||||
mc2CcTilingConfig.GetTiling(tilingData->mc2InitTiling);
|
||||
mc2CcTilingConfig.GetTiling(tilingData->mc2CcTiling);
|
||||
|
||||
OP_LOGI(nodeName, "Leave DispatchFFNCombine tiling func.");
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
static ge::graphStatus DispatchFFNCombineTilingFunc(gert::TilingContext* context)
|
||||
{
|
||||
return DispatchFFNCombineTilingFuncImpl(context);
|
||||
}
|
||||
|
||||
struct DispatchFFNCombineCompileInfo {};
|
||||
ge::graphStatus TilingParseForDispatchFFNCombine(gert::TilingParseContext *context)
|
||||
{
|
||||
(void)context;
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
IMPL_OP_OPTILING(DispatchFFNCombine)
|
||||
.Tiling(DispatchFFNCombineTilingFunc)
|
||||
.TilingParse<DispatchFFNCombineCompileInfo>(TilingParseForDispatchFFNCombine);
|
||||
} // namespace optiling
|
||||
47
csrc/dispatch_ffn_combine/op_host/error_log.h
Normal file
47
csrc/dispatch_ffn_combine/op_host/error_log.h
Normal file
@@ -0,0 +1,47 @@
|
||||
#ifndef OPS_BUILT_IN_OP_TILING_ERROR_LOG_H_
|
||||
#define OPS_BUILT_IN_OP_TILING_ERROR_LOG_H_
|
||||
|
||||
#include <string>
|
||||
#include "toolchain/slog.h"
|
||||
|
||||
#define OP_LOGI(opname, ...)
|
||||
#define OP_LOGW(opname, ...) \
|
||||
do { \
|
||||
printf("[WARN][%s] ", (opname)); \
|
||||
printf(__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
} while (0)
|
||||
|
||||
#define OP_LOGE_WITHOUT_REPORT(opname, ...) \
|
||||
do { \
|
||||
printf("[ERRORx][%s] ", (opname)); \
|
||||
printf(__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
} while (0)
|
||||
|
||||
#define OP_LOGE(opname, ...) \
|
||||
do { \
|
||||
printf("[ERROR][%s] ", (opname)); \
|
||||
printf(__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
} while (0)
|
||||
|
||||
#define OP_LOGD(opname, ...)
|
||||
|
||||
namespace optiling {
|
||||
|
||||
#define VECTOR_INNER_ERR_REPORT_TILIING(op_name, err_msg, ...) \
|
||||
do { \
|
||||
OP_LOGE_WITHOUT_REPORT(op_name, err_msg, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define OP_TILING_CHECK(cond, log_func, expr) \
|
||||
do { \
|
||||
if (cond) { \
|
||||
log_func; \
|
||||
expr; \
|
||||
} \
|
||||
} while (0)
|
||||
} // namespace optiling
|
||||
|
||||
#endif // OPS_BUILT_IN_OP_TILING_ERROR_LOG_H_
|
||||
72
csrc/dispatch_ffn_combine/op_host/hcom_topo_info.h
Normal file
72
csrc/dispatch_ffn_combine/op_host/hcom_topo_info.h
Normal file
@@ -0,0 +1,72 @@
|
||||
/* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
* ===================================================================================================================*/
|
||||
|
||||
#ifndef METADEF_CXX_INC_EXTERNAL_HCOM_HCOM_TOPO_INFO_H_
|
||||
#define METADEF_CXX_INC_EXTERNAL_HCOM_HCOM_TOPO_INFO_H_
|
||||
|
||||
#include <unordered_map>
|
||||
#include <mutex>
|
||||
|
||||
using Status = int32_t;
|
||||
|
||||
namespace ge {
|
||||
static constexpr uint32_t COMM_MESH = 0b1U;
|
||||
static constexpr uint32_t COMM_SWITCH = (COMM_MESH << 1U);
|
||||
static constexpr uint32_t COMM_RING = (COMM_MESH << 2U);
|
||||
static constexpr uint32_t COMM_PAIRWISE = (COMM_MESH << 3U);
|
||||
class HcomTopoInfo {
|
||||
public:
|
||||
enum class TopoLevel {
|
||||
L0 = 0,
|
||||
L1,
|
||||
MAX,
|
||||
};
|
||||
struct TopoLevelDesc {
|
||||
uint32_t comm_sets;
|
||||
uint32_t rank_size;
|
||||
};
|
||||
using TopoDescs = TopoLevelDesc[static_cast<int32_t>(TopoLevel::MAX)];
|
||||
struct TopoInfo {
|
||||
int64_t rank_size;
|
||||
void *notify_handle;
|
||||
TopoDescs topo_level_descs;
|
||||
};
|
||||
static HcomTopoInfo &Instance();
|
||||
bool TopoInfoHasBeenSet(const char_t *group);
|
||||
bool TryGetGroupTopoInfo(const char_t *group, TopoInfo &info);
|
||||
Status SetGroupTopoInfo(const char_t *group, const TopoInfo &info);
|
||||
Status GetGroupRankSize(const char_t *group, int64_t &rank_size);
|
||||
TopoDescs *GetGroupTopoDesc(const char_t *group);
|
||||
Status GetGroupNotifyHandle(const char_t *group, void *¬ify_handle);
|
||||
void UnsetGroupTopoInfo(const char_t *group) {
|
||||
const std::lock_guard<std::mutex> lock(mutex_);
|
||||
(void) rank_info_.erase(group);
|
||||
}
|
||||
|
||||
Status SetGroupOrderedStream(const char_t *group, void *stream);
|
||||
Status GetGroupOrderedStream(const char_t *group, void *&stream);
|
||||
void UnsetGroupOrderedStream(const char_t *group) {
|
||||
const std::lock_guard<std::mutex> lock(mutex_);
|
||||
(void) group_to_ordered_stream_.erase(group);
|
||||
};
|
||||
|
||||
Status SetGroupOrderedStream(const int32_t device_id, const char_t *group, void *stream);
|
||||
Status GetGroupOrderedStream(const int32_t device_id, const char_t *group, void *&stream);
|
||||
void UnsetGroupOrderedStream(const int32_t device_id, const char_t *group);
|
||||
private:
|
||||
HcomTopoInfo() = default;
|
||||
~HcomTopoInfo() = default;
|
||||
std::unordered_map<std::string, TopoInfo> rank_info_;
|
||||
std::mutex mutex_;
|
||||
std::unordered_map<std::string, void*> group_to_ordered_stream_; // 通信域保序流
|
||||
std::unordered_map<int32_t, std::unordered_map<std::string, void*>> device_id_to_group_to_ordered_stream_; // 通信域保序流
|
||||
};
|
||||
}
|
||||
|
||||
#endif // METADEF_CXX_INC_EXTERNAL_HCOM_HCOM_TOPO_INFO_H_
|
||||
9
csrc/dispatch_ffn_combine/op_host/tiling_args.h
Normal file
9
csrc/dispatch_ffn_combine/op_host/tiling_args.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef TILING_ARGS_H
|
||||
#define TILING_ARGS_H
|
||||
#include <cstdint>
|
||||
|
||||
namespace Moe {
|
||||
constexpr uint64_t COMBINE_STATE_WIN_OFFSET = 3U * 1024UL * 1024UL;
|
||||
constexpr uint64_t NOTIFY_DISPATCH_WIN_OFFSET = 204U * 1024UL * 1024UL;
|
||||
} // namespace Moe
|
||||
#endif // TILING_ARGS_H
|
||||
Reference in New Issue
Block a user