add dispath_ffn_combine_bf16 (#5866)
### What this PR does / why we need it?
add dispath_ffn_combine_bf16
- vLLM version: v0.13.0
- vLLM main:
bde38c11df
---------
Signed-off-by: guanguan0308 <1546542263@qq.com>
This commit is contained in:
66
csrc/dispatch_ffn_combine_bf16/op_host/CMakeLists.txt
Normal file
66
csrc/dispatch_ffn_combine_bf16/op_host/CMakeLists.txt
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
# This file is a part of the CANN Open Software.
|
||||
# Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
# Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
# INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See LICENSE in the root of the software repository for the full text of the License.
|
||||
# ======================================================================================================================
|
||||
|
||||
set(_DISPATCH_FFN_INC_OPTS)
|
||||
if (EXISTS ${ASCEND_CANN_PACKAGE_PATH}/aarch64-linux/ascendc/include)
|
||||
list(APPEND _DISPATCH_FFN_INC_OPTS -I${ASCEND_CANN_PACKAGE_PATH}/aarch64-linux/ascendc/include)
|
||||
elseif (EXISTS ${ASCEND_CANN_PACKAGE_PATH}/arm64-linux/ascendc/include)
|
||||
list(APPEND _DISPATCH_FFN_INC_OPTS -I${ASCEND_CANN_PACKAGE_PATH}/arm64-linux/ascendc/include)
|
||||
elseif (EXISTS ${ASCEND_CANN_PACKAGE_PATH}/${CMAKE_SYSTEM_PROCESSOR}-linux/ascendc/include)
|
||||
list(APPEND _DISPATCH_FFN_INC_OPTS -I${ASCEND_CANN_PACKAGE_PATH}/${CMAKE_SYSTEM_PROCESSOR}-linux/ascendc/include)
|
||||
endif()
|
||||
if (EXISTS ${CMAKE_SOURCE_DIR}/third_party/catlass/include)
|
||||
list(APPEND _DISPATCH_FFN_INC_OPTS -I${CMAKE_SOURCE_DIR}/third_party/catlass/include)
|
||||
endif()
|
||||
|
||||
add_ops_compile_options(
|
||||
OP_NAME DispatchFFNCombineBF16
|
||||
OPTIONS --cce-auto-sync=on
|
||||
-Wno-deprecated-declarations
|
||||
-Werror
|
||||
-DHCCL_COMM
|
||||
${_DISPATCH_FFN_INC_OPTS}
|
||||
)
|
||||
|
||||
target_sources(op_host_aclnnInner PRIVATE
|
||||
dispatch_ffn_combine_bf16_def.cpp
|
||||
)
|
||||
|
||||
target_sources(opapi PRIVATE
|
||||
aclnn_dispatch_ffn_combine_bf16.cpp
|
||||
)
|
||||
|
||||
if (NOT BUILD_OPEN_PROJECT)
|
||||
target_sources(aclnn_ops_train PRIVATE
|
||||
aclnn_dispatch_ffn_combine_bf16.cpp
|
||||
)
|
||||
|
||||
target_sources(aclnn_ops_infer PRIVATE
|
||||
aclnn_dispatch_ffn_combine_bf16.cpp
|
||||
)
|
||||
endif ()
|
||||
|
||||
target_sources(optiling PRIVATE
|
||||
dispatch_ffn_combine_bf16_tiling.cpp
|
||||
)
|
||||
|
||||
target_include_directories(optiling PRIVATE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../op_kernel
|
||||
)
|
||||
|
||||
target_sources(opsproto PRIVATE
|
||||
dispatch_ffn_combine_bf16_proto.cpp
|
||||
)
|
||||
|
||||
file(GLOB _GMM_Aclnn_header "${CMAKE_CURRENT_SOURCE_DIR}/aclnn_dispatch_ffn_combine_bf16.h")
|
||||
|
||||
install(FILES ${_GMM_Aclnn_header}
|
||||
DESTINATION ${ACLNN_INC_INSTALL_DIR} OPTIONAL
|
||||
)
|
||||
@@ -0,0 +1,84 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
#include "aclnn_dispatch_ffn_combine_bf16.h"
|
||||
#include <algorithm>
|
||||
// #include "aclnn_kernels/common/op_error_check.h"
|
||||
// #include "opdev/op_log.h"
|
||||
// #include "opdev/common_types.h"
|
||||
// #include "opdev/platform.h"
|
||||
// #include "ophost/matmul_util.h"
|
||||
#include <unistd.h>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <fcntl.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/file.h>
|
||||
#include <climits>
|
||||
#include "../op_host/error_log.h"
|
||||
// using namespace op;
|
||||
|
||||
// using namespace op;
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
static constexpr size_t TWO_DIMS = 2;
|
||||
static constexpr int64_t KVALUE_MIN = 256;
|
||||
static constexpr int64_t KVALUE_MAX = 65535;
|
||||
static constexpr size_t HCCL_GROUP_NAME_MAX = 128U;
|
||||
enum NnopbaseHcclServerType {
|
||||
NNOPBASE_HCCL_SERVER_TYPE_AICPU = 0,
|
||||
NNOPBASE_HCCL_SERVER_TYPE_MTE,
|
||||
NNOPBASE_HCCL_SERVER_TYPE_END
|
||||
};
|
||||
|
||||
extern aclnnStatus aclnnInnerDispatchFFNCombineBF16GetWorkspaceSize(const aclTensor* x, const aclTensorList* weight1, const aclTensorList* weight2,
|
||||
const aclTensor* expertId, const aclTensorList* scale1, const aclTensorList* scale2,
|
||||
const aclTensor* probs,
|
||||
const char* group, int64_t maxOutputSize,
|
||||
bool transB, bool weightNz,
|
||||
const aclTensor* out,
|
||||
uint64_t* workspaceSize, aclOpExecutor** executor);
|
||||
extern aclnnStatus aclnnInnerDispatchFFNCombineBF16(void *workspace, uint64_t workspaceSize,
|
||||
aclOpExecutor *executor, aclrtStream stream);
|
||||
extern "C" void __attribute__((weak)) NnopbaseSetHcclServerType(void *executor, NnopbaseHcclServerType sType);
|
||||
|
||||
|
||||
|
||||
aclnnStatus aclnnDispatchFFNCombineBF16GetWorkspaceSize(const aclTensor* x, const aclTensorList* weight1, const aclTensorList* weight2,
|
||||
const aclTensor* expertId, const aclTensorList* scale1, const aclTensorList* scale2,
|
||||
const aclTensor* probs,
|
||||
const char* group, int64_t maxOutputSize,
|
||||
const aclTensor* out,
|
||||
uint64_t* workspaceSize, aclOpExecutor** executor)
|
||||
{
|
||||
bool transB = false;
|
||||
bool weightNz = true;
|
||||
|
||||
aclnnStatus ret = aclnnInnerDispatchFFNCombineBF16GetWorkspaceSize(x, weight1, weight2, expertId, scale1, scale2, probs, group,
|
||||
maxOutputSize, transB, weightNz,
|
||||
out, workspaceSize, executor);
|
||||
return ret;
|
||||
}
|
||||
|
||||
aclnnStatus aclnnDispatchFFNCombineBF16(void* workspace, uint64_t workspaceSize, aclOpExecutor *executor, aclrtStream stream)
|
||||
{
|
||||
if (NnopbaseSetHcclServerType) {
|
||||
NnopbaseSetHcclServerType(executor, NNOPBASE_HCCL_SERVER_TYPE_MTE);
|
||||
}
|
||||
aclnnStatus ret = aclnnInnerDispatchFFNCombineBF16(workspace, workspaceSize, executor, stream);
|
||||
return ret;
|
||||
}
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -0,0 +1,39 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
|
||||
#ifndef OP_API_INC_DISPATCH_FFN_COMBINE_BF16_
|
||||
#define OP_API_INC_DISPATCH_FFN_COMBINE_BF16_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "aclnn/aclnn_base.h"
|
||||
#include "hccl/hccl.h"
|
||||
#include "hccl/hccl_types.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
__attribute__((visibility("default"))) aclnnStatus aclnnDispatchFFNCombineBF16GetWorkspaceSize(const aclTensor* x, const aclTensorList* weight1, const aclTensorList* weight2,
|
||||
const aclTensor* expertId, const aclTensorList* scale1, const aclTensorList* scale2,
|
||||
const aclTensor* probs,
|
||||
const char* group, int64_t maxOutputSize,
|
||||
const aclTensor* out,
|
||||
uint64_t* workspaceSize, aclOpExecutor** executor);
|
||||
|
||||
|
||||
__attribute__((visibility("default"))) aclnnStatus aclnnDispatchFFNCombineBF16(void* workspace, uint64_t workspaceSize, aclOpExecutor* executor,
|
||||
aclrtStream stream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // OP_API_INC_DISPATCH_FFN_COMBINE_BF16_
|
||||
@@ -0,0 +1,88 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* \file dispatch_ffn_combine_bf16_def.cpp
|
||||
* \brief
|
||||
*/
|
||||
#include "register/op_def_registry.h"
|
||||
|
||||
namespace ops {
|
||||
class DispatchFFNCombineBF16 : public OpDef {
|
||||
public:
|
||||
explicit DispatchFFNCombineBF16(const char *name) : OpDef(name) {
|
||||
this->Input("a")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_FLOAT16, ge::DT_BF16, ge::DT_FLOAT16, ge::DT_BF16})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
this->Input("w1")
|
||||
.ParamType(DYNAMIC)
|
||||
.DataType({ge::DT_FLOAT16, ge::DT_BF16, ge::DT_FLOAT16, ge::DT_BF16})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ, ge::FORMAT_FRACTAL_NZ})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ, ge::FORMAT_FRACTAL_NZ})
|
||||
.IgnoreContiguous();
|
||||
this->Input("w2")
|
||||
.ParamType(DYNAMIC)
|
||||
.DataType({ge::DT_FLOAT16, ge::DT_BF16, ge::DT_FLOAT16, ge::DT_BF16})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ, ge::FORMAT_FRACTAL_NZ})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ, ge::FORMAT_FRACTAL_NZ})
|
||||
.IgnoreContiguous();
|
||||
this->Input("expertIdx")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_INT32, ge::DT_INT32, ge::DT_INT32, ge::DT_INT32})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
this->Input("scale1")
|
||||
.ParamType(DYNAMIC)
|
||||
.DataType({ge::DT_INT64, ge::DT_INT64, ge::DT_INT64, ge::DT_INT64})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
this->Input("scale2")
|
||||
.ParamType(DYNAMIC)
|
||||
.DataType({ge::DT_INT64, ge::DT_INT64, ge::DT_INT64, ge::DT_INT64})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
this->Input("probs")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_FLOAT, ge::DT_FLOAT, ge::DT_FLOAT, ge::DT_FLOAT})
|
||||
.Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
|
||||
// 输出
|
||||
this->Output("out")
|
||||
.ParamType(REQUIRED)
|
||||
.DataType({ge::DT_FLOAT16, ge::DT_BF16, ge::DT_FLOAT16, ge::DT_BF16})
|
||||
.Format({ ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
|
||||
.UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
|
||||
|
||||
this->Attr("group").AttrType(REQUIRED).String();
|
||||
this->Attr("M").AttrType(OPTIONAL).Int();
|
||||
this->Attr("transB").AttrType(OPTIONAL).Bool(false);
|
||||
this->Attr("weightNz").AttrType(OPTIONAL).Bool(false);
|
||||
|
||||
OpAICoreConfig aicore_config;
|
||||
aicore_config.DynamicCompileStaticFlag(true)
|
||||
.DynamicFormatFlag(true)
|
||||
.DynamicRankSupportFlag(true)
|
||||
.DynamicShapeSupportFlag(true)
|
||||
.NeedCheckSupportFlag(false)
|
||||
.PrecisionReduceFlag(true)
|
||||
.ExtendCfgInfo("aclnnSupport.value", "support_aclnn")
|
||||
.ExtendCfgInfo("jitCompile.flag", "static_false")
|
||||
.ExtendCfgInfo("multiKernelSupportDynamicGraph.value", "multi_kernel");
|
||||
this->AICore().AddConfig("ascend910_93", aicore_config);
|
||||
// this->AICore().AddConfig("ascend910b", aicore_config);
|
||||
this->MC2().HcclGroup("group");
|
||||
}
|
||||
};
|
||||
|
||||
OP_ADD(DispatchFFNCombineBF16);
|
||||
} // namespace ops
|
||||
@@ -0,0 +1,40 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* \file dispatch_ffn_proto.cpp
|
||||
* \brief
|
||||
*/
|
||||
#include <graph/utils/type_utils.h>
|
||||
#include <register/op_impl_registry.h>
|
||||
// #include "../../common/ophost/op_util.h"
|
||||
// #include "../../common/ophost/hcom_topo_info.h"
|
||||
// #include "log/ops_log.h"
|
||||
|
||||
using namespace ge;
|
||||
namespace ops {
|
||||
const size_t ATTR_GROUP = 0;
|
||||
const size_t ATTR_RANK_SIZE = 1;
|
||||
const size_t SUPPORT_DIM_SIZE = 2;
|
||||
|
||||
static ge::graphStatus InferShapeDispatchFFNCombineBF16(gert::InferShapeContext* context) {
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
static ge::graphStatus InferDataTypeDispatchFFNCombineBF16(gert::InferDataTypeContext* context) {
|
||||
// auto d_type = context->GetInputDataType(0);
|
||||
// context->SetOutputDataType(0, d_type);
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
IMPL_OP_INFERSHAPE(DispatchFFNCombineBF16)
|
||||
.InferShape(InferShapeDispatchFFNCombineBF16)
|
||||
.InferDataType(InferDataTypeDispatchFFNCombineBF16);
|
||||
} // namespace ops
|
||||
@@ -0,0 +1,278 @@
|
||||
/**
|
||||
* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
/*!
|
||||
* \file dispatch_ffn_tiling.cpp
|
||||
* \brief
|
||||
*/
|
||||
#include "vector"
|
||||
#include "register/tilingdata_base.h"
|
||||
#include "tiling/tiling_api.h"
|
||||
#include "error_log.h"
|
||||
#include "hcom_topo_info.h"
|
||||
#include "register/op_def_registry.h"
|
||||
#include "dispatch_ffn_combine_bf16_tiling.h"
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <algorithm>
|
||||
#include "moe_init_routing_v2/moe_init_routing_v2_tiling.h"
|
||||
|
||||
using namespace AscendC;
|
||||
using namespace ge;
|
||||
|
||||
namespace {
|
||||
const char *K_INNER_DEBUG = "DispatchFFNCombineBF16 Tiling Debug";
|
||||
constexpr uint32_t ATTR_GROUP_INDEX = 0;
|
||||
constexpr uint32_t ATTR_MAX_OUTPUT_SIZE_INDEX = 1;
|
||||
constexpr uint32_t ATTR_IS_TRANS_B = 2;
|
||||
constexpr uint32_t ATTR_WEIGHT_NZ = 3;
|
||||
constexpr uint64_t INIT_TILINGKEY = 1000000;
|
||||
constexpr uint64_t TILINGKEY_TRANS_B = 1U;
|
||||
constexpr uint64_t TILINGKEY_WEIGHT_NZ = 10;
|
||||
constexpr uint32_t X_INDEX = 0;
|
||||
constexpr uint32_t WEIGHT_INDEX = 1;
|
||||
constexpr uint32_t WEIGHT2_INDEX = 2;
|
||||
constexpr uint32_t EXPERTID_INDEX = 3;
|
||||
constexpr uint32_t BLOCK_NUM = 20;
|
||||
constexpr uint32_t SYSTEM_NEED_WORKSPACE = 16 * 1024 * 1024;
|
||||
}
|
||||
|
||||
namespace optiling {
|
||||
|
||||
static int32_t CeilDev(int32_t num, int32_t div)
|
||||
{
|
||||
if (div == 0) {
|
||||
return 0;
|
||||
}
|
||||
return (num + div - 1) / div;
|
||||
}
|
||||
|
||||
static ge::graphStatus DispatchFFNCombineBF16CheckAttrAndSetTiling(gert::TilingContext *context, DispatchFFNCombineBF16Info& info)
|
||||
{
|
||||
auto attrs = context->GetAttrs();
|
||||
OP_TILING_CHECK(attrs == nullptr, OP_LOGE(K_INNER_DEBUG, "attrs is null."), return ge::GRAPH_FAILED);
|
||||
|
||||
auto groupPtr = attrs->GetAttrPointer<char>(static_cast<int>(ATTR_GROUP_INDEX));
|
||||
auto maxOutputSizePtr = attrs->GetAttrPointer<int>(ATTR_MAX_OUTPUT_SIZE_INDEX);
|
||||
auto is_trans_b = attrs->GetAttrPointer<bool>(ATTR_IS_TRANS_B);
|
||||
auto weight_nz = attrs->GetAttrPointer<bool>(ATTR_WEIGHT_NZ);
|
||||
OP_TILING_CHECK(groupPtr == nullptr || strlen(groupPtr) == 0,
|
||||
OP_LOGE(K_INNER_DEBUG, "group is invalid."), return GRAPH_FAILED);
|
||||
|
||||
OP_TILING_CHECK(is_trans_b == nullptr,
|
||||
OP_LOGE(K_INNER_DEBUG, "is_trans_b is invalid."), return GRAPH_FAILED);
|
||||
OP_TILING_CHECK(weight_nz == nullptr,
|
||||
OP_LOGE(K_INNER_DEBUG, "weight_nz is invalid."), return GRAPH_FAILED);
|
||||
|
||||
info.maxOutputSize = *maxOutputSizePtr;
|
||||
info.isTransposeB = *is_trans_b;
|
||||
info.isWeightNz = *weight_nz;
|
||||
|
||||
int64_t rankSize;
|
||||
(void)ge::HcomTopoInfo::Instance().GetGroupRankSize(groupPtr, rankSize);
|
||||
info.worldSize = rankSize;
|
||||
|
||||
OP_LOGD(K_INNER_DEBUG, "maxOutputSize=%d ", info.maxOutputSize);
|
||||
OP_LOGD(K_INNER_DEBUG, "rankSize=%d ", info.worldSize);
|
||||
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
static ge::graphStatus DispatchFFNCombineBF16CheckShapeAndSetTiling(gert::TilingContext *context, DispatchFFNCombineBF16Info &info)
|
||||
{
|
||||
const char *nodeName = context->GetNodeName();
|
||||
|
||||
const gert::StorageShape *aStorageShape = context->GetInputShape(X_INDEX);
|
||||
auto expertIdxTensor = context->GetDynamicInputTensor(EXPERTID_INDEX, 0);
|
||||
uint32_t M = aStorageShape->GetStorageShape().GetDim(0);
|
||||
uint32_t K = aStorageShape->GetStorageShape().GetDim(1);
|
||||
|
||||
auto wTensor = context->GetDynamicInputTensor(WEIGHT_INDEX, 0);
|
||||
uint32_t wTensorDims = wTensor->GetOriginShape().GetDimNum();
|
||||
uint32_t N = wTensor->GetStorageShape().GetDim(wTensorDims - 1);
|
||||
|
||||
uint32_t topK = expertIdxTensor->GetStorageShape().GetDim(1);
|
||||
uint32_t listLen = 0;
|
||||
while (true) {
|
||||
auto wTensorT = context->GetDynamicInputTensor(WEIGHT_INDEX, ++listLen);
|
||||
if (wTensorT == nullptr) {break;}
|
||||
}
|
||||
|
||||
uint32_t expertPerRank;
|
||||
if (listLen == 1) {
|
||||
expertPerRank = wTensor->GetStorageShape().GetDim(0);
|
||||
} else {
|
||||
expertPerRank = listLen;
|
||||
}
|
||||
|
||||
info.M = M;
|
||||
info.N = N;
|
||||
info.K = K;
|
||||
info.expertPerRank = expertPerRank;
|
||||
info.topK = topK;
|
||||
info.listLen = listLen;
|
||||
OP_LOGD(K_INNER_DEBUG, "M=%d ", info.M);
|
||||
OP_LOGD(K_INNER_DEBUG, "K=%d ", info.K);
|
||||
OP_LOGD(K_INNER_DEBUG, "N=%d ", info.N);
|
||||
OP_LOGD(K_INNER_DEBUG, "expertPerRank=%d ", info.expertPerRank);
|
||||
OP_LOGD(K_INNER_DEBUG, "topK=%d ", info.topK);
|
||||
OP_LOGD(K_INNER_DEBUG, "listLen=%d ", info.listLen);
|
||||
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
static ge::graphStatus DispatchFFNCombineBF16GetPlatformInfoAndSetTiling(gert::TilingContext *context, DispatchFFNCombineBF16Info& info)
|
||||
{
|
||||
auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());
|
||||
uint32_t aivNum = ascendcPlatform.GetCoreNumAiv();
|
||||
uint64_t ubSize = 0U;
|
||||
ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ubSize);
|
||||
info.aivNum = aivNum;
|
||||
info.totalUbSize = ubSize;
|
||||
|
||||
OP_LOGD(K_INNER_DEBUG, "aivNum=%d", info.aivNum);
|
||||
OP_LOGD(K_INNER_DEBUG, "ubSize=%lu", info.totalUbSize);
|
||||
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
void SetTilingData(CoCTiling &cocTilingData, DispatchFFNCombineBF16Info &info)
|
||||
{
|
||||
cocTilingData.m0 = 128;
|
||||
cocTilingData.k0 = 256;
|
||||
cocTilingData.n0 = 256;
|
||||
cocTilingData.swizzleDirect = 1;
|
||||
cocTilingData.swizzleOffset = 7;
|
||||
cocTilingData.ubMoveNum = 16 * 1024;
|
||||
cocTilingData.pValue = 1;
|
||||
cocTilingData.commNpuSplit = info.worldSize;
|
||||
cocTilingData.commDataSplit = 1;
|
||||
cocTilingData.lenPerLoop = cocTilingData.m0 * cocTilingData.n0 / 2;
|
||||
}
|
||||
|
||||
static ge::graphStatus DispatchFFNCombineBF16TilingFuncImpl(gert::TilingContext *context)
|
||||
{
|
||||
const char *nodeName = context->GetNodeName();
|
||||
OP_LOGI(nodeName, "Enter DispatchFFNCombineBF16 tiling func.");
|
||||
|
||||
// 1. tilingData
|
||||
DispatchFFNCombineBF16TilingData *tilingData = context->GetTilingData<DispatchFFNCombineBF16TilingData>();
|
||||
OP_TILING_CHECK(tilingData == nullptr, OP_LOGE(nodeName, "tilingData is nullptr."),
|
||||
return ge::GRAPH_FAILED);
|
||||
OP_LOGI(nodeName, "DispatchFFNCombineBF16 get tilingData.");
|
||||
DispatchFFNCombineBF16Info& info = tilingData->dispatchFFNCombineBF16Info;
|
||||
OP_LOGI(nodeName, "DispatchFFNCombineBF16 get tilingData info.");
|
||||
|
||||
OP_TILING_CHECK(DispatchFFNCombineBF16CheckAttrAndSetTiling(context, info) != ge::GRAPH_SUCCESS,
|
||||
OP_LOGE(context->GetNodeName(), "DispatchFFNCombineBF16 CheckAttrAndSetTiling Failed"),
|
||||
return ge::GRAPH_FAILED);
|
||||
OP_TILING_CHECK(DispatchFFNCombineBF16CheckShapeAndSetTiling(context, info) != ge::GRAPH_SUCCESS,
|
||||
OP_LOGE(context->GetNodeName(), "DispatchFFNCombineBF16 CheckShapeAndSetTiling Failed"),
|
||||
return ge::GRAPH_FAILED);
|
||||
OP_TILING_CHECK(DispatchFFNCombineBF16GetPlatformInfoAndSetTiling(context, info) != ge::GRAPH_SUCCESS,
|
||||
OP_LOGE(context->GetNodeName(), "DispatchFFNCombineBF16 GetPlatformInfoAndSetTiling Failed"),
|
||||
return ge::GRAPH_FAILED);
|
||||
|
||||
SetTilingData(tilingData->cocTiling, info);
|
||||
|
||||
// 2. set blockDim
|
||||
uint32_t blockDim = 1U;
|
||||
auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());
|
||||
auto aicNum = ascendcPlatform.GetCoreNumAic();
|
||||
auto aivNum = ascendcPlatform.GetCoreNumAiv();
|
||||
blockDim = ascendcPlatform.CalcTschBlockDim(aivNum, aicNum, aivNum);
|
||||
context->SetBlockDim(blockDim);
|
||||
|
||||
// 3. set tilingKey
|
||||
uint64_t tilingKey = INIT_TILINGKEY;
|
||||
tilingKey += info.isTransposeB ? TILINGKEY_TRANS_B : 0;
|
||||
tilingKey += info.isWeightNz ? TILINGKEY_WEIGHT_NZ : 0;
|
||||
context->SetTilingKey(tilingKey);
|
||||
|
||||
OP_LOGD(K_INNER_DEBUG, "tilingKey=%d", tilingKey);
|
||||
|
||||
optiling::MoeInitRoutingV2TilingBase moeInitRoutingQuantV2TilingBase;
|
||||
int64_t inuptXDtypeSize = sizeof(int16_t);
|
||||
int64_t scaleDim0 = 0;
|
||||
int64_t ubSize = 196352;
|
||||
int64_t expertCapacity = 0;
|
||||
int64_t expertNum = info.expertPerRank * info.worldSize;
|
||||
int64_t activeNum = info.M * info.topK;
|
||||
int64_t dropPadMode = 0;
|
||||
int64_t expertTokensCountOrCumsumFlag = 2;
|
||||
bool expertTokensBeforeCapacityFlag = false;
|
||||
int64_t quantMode = 1;
|
||||
uint32_t aivNumInitRouting = 2 * BLOCK_NUM;
|
||||
moeInitRoutingQuantV2TilingBase.DoTiling(info.M, info.K, info.topK, expertCapacity, expertNum, activeNum, dropPadMode,
|
||||
expertTokensCountOrCumsumFlag, expertTokensBeforeCapacityFlag, inuptXDtypeSize, quantMode, scaleDim0, aivNumInitRouting, ubSize);
|
||||
uint64_t initRoutingQuantTilingKey = moeInitRoutingQuantV2TilingBase.tilingKey_;
|
||||
size_t initRoutingWorkspace = moeInitRoutingQuantV2TilingBase.workspaceSize_;
|
||||
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData = moeInitRoutingQuantV2TilingBase.moeInitRoutingTilingData;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.vbsComputeParamsOp = moeInitRoutingQuantV2TilingBase.moeInitRoutingTilingData.vbsComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.vmsMiddleComputeParamsOp = moeInitRoutingQuantV2TilingBase.moeInitRoutingTilingData.vmsMiddleComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.sortOutComputeParamsOp = moeInitRoutingQuantV2TilingBase.moeInitRoutingTilingData.sortOutComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.srcToDstComputeParamsOp = moeInitRoutingQuantV2TilingBase.moeInitRoutingTilingData.srcToDstComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.srcToDstCapacityComputeParamsOp = moeInitRoutingQuantV2TilingBase.moeInitRoutingTilingData.srcToDstCapacityComputeParamsOp;
|
||||
tilingData->cocTiling.moeInitRoutingQuantV2TilingData.gatherOutComputeParamsOp = moeInitRoutingQuantV2TilingBase.moeInitRoutingTilingData.gatherOutComputeParamsOp;
|
||||
tilingData->cocTiling.initRoutingQuantTilingKey = initRoutingQuantTilingKey;
|
||||
// OP_LOGE(initRoutingTilingKey, " initRoutingTilingKey.");
|
||||
OP_LOGD(K_INNER_DEBUG, "tilingKey=%ld", initRoutingQuantTilingKey);
|
||||
|
||||
// 4. workspace
|
||||
size_t *workSpaces = context->GetWorkspaceSizes(1);
|
||||
OP_TILING_CHECK(workSpaces == nullptr, OP_LOGE(nodeName, "workSpaces is nullptr."),
|
||||
return ge::GRAPH_FAILED);
|
||||
|
||||
uint32_t n2 = info.K;
|
||||
uint32_t k2 = info.N / 2;
|
||||
|
||||
uint64_t cocWorkspace = (info.M + 256 - 1) / 256 * 256 * info.topK *sizeof(int32_t) +
|
||||
info.worldSize * info.worldSize * info.expertPerRank * sizeof(int32_t) * 3 +
|
||||
info.maxOutputSize * sizeof(float) * 2 +
|
||||
info.maxOutputSize * info.N * sizeof(int16_t) +
|
||||
info.maxOutputSize * n2 * sizeof(int16_t) +
|
||||
info.maxOutputSize * info.K * sizeof(int16_t) +
|
||||
info.maxOutputSize * k2 * sizeof(int16_t) +
|
||||
info.worldSize * sizeof(int32_t) * 16;
|
||||
// std::max(info.maxOutputSize * info.N * sizeof(int16_t), info.maxOutputSize * n2 * sizeof(int16_t)) +
|
||||
// std::max(info.maxOutputSize * info.K * sizeof(int8_t), info.maxOutputSize * k2 * sizeof(int8_t));
|
||||
|
||||
workSpaces[0] = SYSTEM_NEED_WORKSPACE + std::max(cocWorkspace, initRoutingWorkspace);
|
||||
|
||||
|
||||
// 5. communication
|
||||
auto attrs = context->GetAttrs();
|
||||
auto group = attrs->GetAttrPointer<char>(static_cast<int>(ATTR_GROUP_INDEX));
|
||||
uint32_t opType = 8U;
|
||||
std::string algConfig = "AlltoAll=level0:fullmesh;level1:pairwise";
|
||||
AscendC::Mc2CcTilingConfig mc2CcTilingConfig(group, opType, algConfig);
|
||||
mc2CcTilingConfig.GetTiling(tilingData->mc2InitTiling);
|
||||
mc2CcTilingConfig.GetTiling(tilingData->mc2CcTiling);
|
||||
|
||||
OP_LOGI(nodeName, "Leave DispatchFFNCombineBF16 tiling func.");
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
static ge::graphStatus DispatchFFNCombineBF16TilingFunc(gert::TilingContext* context)
|
||||
{
|
||||
return DispatchFFNCombineBF16TilingFuncImpl(context);
|
||||
}
|
||||
|
||||
struct DispatchFFNCombineBF16CompileInfo {};
|
||||
ge::graphStatus TilingParseForDispatchFFNCombineBF16(gert::TilingParseContext *context)
|
||||
{
|
||||
(void)context;
|
||||
return ge::GRAPH_SUCCESS;
|
||||
}
|
||||
|
||||
IMPL_OP_OPTILING(DispatchFFNCombineBF16)
|
||||
.Tiling(DispatchFFNCombineBF16TilingFunc)
|
||||
.TilingParse<DispatchFFNCombineBF16CompileInfo>(TilingParseForDispatchFFNCombineBF16);
|
||||
} // namespace optiling
|
||||
47
csrc/dispatch_ffn_combine_bf16/op_host/error_log.h
Normal file
47
csrc/dispatch_ffn_combine_bf16/op_host/error_log.h
Normal file
@@ -0,0 +1,47 @@
|
||||
#ifndef OPS_BUILT_IN_OP_TILING_ERROR_LOG_H_
|
||||
#define OPS_BUILT_IN_OP_TILING_ERROR_LOG_H_
|
||||
|
||||
#include <string>
|
||||
#include "toolchain/slog.h"
|
||||
|
||||
#define OP_LOGI(opname, ...)
|
||||
#define OP_LOGW(opname, ...) \
|
||||
do { \
|
||||
printf("[WARN][%s] ", (opname)); \
|
||||
printf(__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
} while (0)
|
||||
|
||||
#define OP_LOGE_WITHOUT_REPORT(opname, ...) \
|
||||
do { \
|
||||
printf("[ERRORx][%s] ", (opname)); \
|
||||
printf(__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
} while (0)
|
||||
|
||||
#define OP_LOGE(opname, ...) \
|
||||
do { \
|
||||
printf("[ERROR][%s] ", (opname)); \
|
||||
printf(__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
} while (0)
|
||||
|
||||
#define OP_LOGD(opname, ...)
|
||||
|
||||
namespace optiling {
|
||||
|
||||
#define VECTOR_INNER_ERR_REPORT_TILIING(op_name, err_msg, ...) \
|
||||
do { \
|
||||
OP_LOGE_WITHOUT_REPORT(op_name, err_msg, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define OP_TILING_CHECK(cond, log_func, expr) \
|
||||
do { \
|
||||
if (cond) { \
|
||||
log_func; \
|
||||
expr; \
|
||||
} \
|
||||
} while (0)
|
||||
} // namespace optiling
|
||||
|
||||
#endif // OPS_BUILT_IN_OP_TILING_ERROR_LOG_H_
|
||||
72
csrc/dispatch_ffn_combine_bf16/op_host/hcom_topo_info.h
Normal file
72
csrc/dispatch_ffn_combine_bf16/op_host/hcom_topo_info.h
Normal file
@@ -0,0 +1,72 @@
|
||||
/* Copyright (c) 2025 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
* ===================================================================================================================*/
|
||||
|
||||
#ifndef METADEF_CXX_INC_EXTERNAL_HCOM_HCOM_TOPO_INFO_H_
|
||||
#define METADEF_CXX_INC_EXTERNAL_HCOM_HCOM_TOPO_INFO_H_
|
||||
|
||||
#include <unordered_map>
|
||||
#include <mutex>
|
||||
|
||||
using Status = int32_t;
|
||||
|
||||
namespace ge {
|
||||
static constexpr uint32_t COMM_MESH = 0b1U;
|
||||
static constexpr uint32_t COMM_SWITCH = (COMM_MESH << 1U);
|
||||
static constexpr uint32_t COMM_RING = (COMM_MESH << 2U);
|
||||
static constexpr uint32_t COMM_PAIRWISE = (COMM_MESH << 3U);
|
||||
class HcomTopoInfo {
|
||||
public:
|
||||
enum class TopoLevel {
|
||||
L0 = 0,
|
||||
L1,
|
||||
MAX,
|
||||
};
|
||||
struct TopoLevelDesc {
|
||||
uint32_t comm_sets;
|
||||
uint32_t rank_size;
|
||||
};
|
||||
using TopoDescs = TopoLevelDesc[static_cast<int32_t>(TopoLevel::MAX)];
|
||||
struct TopoInfo {
|
||||
int64_t rank_size;
|
||||
void *notify_handle;
|
||||
TopoDescs topo_level_descs;
|
||||
};
|
||||
static HcomTopoInfo &Instance();
|
||||
bool TopoInfoHasBeenSet(const char_t *group);
|
||||
bool TryGetGroupTopoInfo(const char_t *group, TopoInfo &info);
|
||||
Status SetGroupTopoInfo(const char_t *group, const TopoInfo &info);
|
||||
Status GetGroupRankSize(const char_t *group, int64_t &rank_size);
|
||||
TopoDescs *GetGroupTopoDesc(const char_t *group);
|
||||
Status GetGroupNotifyHandle(const char_t *group, void *¬ify_handle);
|
||||
void UnsetGroupTopoInfo(const char_t *group) {
|
||||
const std::lock_guard<std::mutex> lock(mutex_);
|
||||
(void) rank_info_.erase(group);
|
||||
}
|
||||
|
||||
Status SetGroupOrderedStream(const char_t *group, void *stream);
|
||||
Status GetGroupOrderedStream(const char_t *group, void *&stream);
|
||||
void UnsetGroupOrderedStream(const char_t *group) {
|
||||
const std::lock_guard<std::mutex> lock(mutex_);
|
||||
(void) group_to_ordered_stream_.erase(group);
|
||||
};
|
||||
|
||||
Status SetGroupOrderedStream(const int32_t device_id, const char_t *group, void *stream);
|
||||
Status GetGroupOrderedStream(const int32_t device_id, const char_t *group, void *&stream);
|
||||
void UnsetGroupOrderedStream(const int32_t device_id, const char_t *group);
|
||||
private:
|
||||
HcomTopoInfo() = default;
|
||||
~HcomTopoInfo() = default;
|
||||
std::unordered_map<std::string, TopoInfo> rank_info_;
|
||||
std::mutex mutex_;
|
||||
std::unordered_map<std::string, void*> group_to_ordered_stream_; // Ordered stream for the communication domain
|
||||
std::unordered_map<int32_t, std::unordered_map<std::string, void*>> device_id_to_group_to_ordered_stream_; // Ordered stream for the communication domain
|
||||
};
|
||||
}
|
||||
|
||||
#endif // METADEF_CXX_INC_EXTERNAL_HCOM_HCOM_TOPO_INFO_H_
|
||||
9
csrc/dispatch_ffn_combine_bf16/op_host/tiling_args.h
Normal file
9
csrc/dispatch_ffn_combine_bf16/op_host/tiling_args.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef TILING_ARGS_H
|
||||
#define TILING_ARGS_H
|
||||
#include <cstdint>
|
||||
|
||||
namespace Moe {
|
||||
constexpr uint64_t COMBINE_STATE_WIN_OFFSET = 3U * 1024UL * 1024UL;
|
||||
constexpr uint64_t NOTIFY_DISPATCH_WIN_OFFSET = 204U * 1024UL * 1024UL;
|
||||
} // namespace Moe
|
||||
#endif // TILING_ARGS_H
|
||||
Reference in New Issue
Block a user