add mla_preprocess kernel (#3226)
### What this PR does / why we need it? - Adds the `mla_preprocess` custom kernel to provide an optimized pre-processing operator for Multi-head Latent Attention (MLA) on Ascend NPUs. - Wires the new kernel into the C++ extension pipeline so vLLM can invoke it directly, cutting Python-side tensor shuffling and memory copies that previously bottlenecked MLA compilation paths. ### Does this PR introduce any user-facing change? - No. The change only introduces a low-level kernel; public APIs and inference behavior remain unchanged. ### How was this patch tested? - Dedicated Ascend kernels are not covered by our CI yet, so no extra automated tests were added. Future MLA-focused regression runs will cover this path. - vLLM version: v0.11.0 Signed-off-by: Chen Chen <0109chenchen@gmail.com>
This commit is contained in:
38
csrc/mla_preprocess/op_kernel/kernel/set_fpc.h
Normal file
38
csrc/mla_preprocess/op_kernel/kernel/set_fpc.h
Normal file
@@ -0,0 +1,38 @@
|
||||
/* Adapted from
|
||||
* https://gitee.com/ascend/ascend-transformer-boost.git
|
||||
*
|
||||
* Copyright (c) 2024 Huawei Technologies Co., Ltd.
|
||||
* This file is a part of the CANN Open Software.
|
||||
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
||||
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
||||
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
* See LICENSE in the root of the software repository for the full text of the License.
|
||||
*/
|
||||
#ifndef INCLUDE_SET_FPC_H
|
||||
#define INCLUDE_SET_FPC_H
|
||||
|
||||
#include "hardware.h"
|
||||
#include "kernel_tensor.h"
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// SetQuantPreAddr
|
||||
/////////////////////////////////////////////////////
|
||||
template <ArchType ArchTag, typename DataType>
|
||||
struct SetQuantPreAddr {
|
||||
__aicore__ SetQuantPreAddr(AscendC::LocalTensor<DataType> quantPreTensor) {};
|
||||
};
|
||||
|
||||
template <typename DataType>
|
||||
struct SetQuantPreAddr<ArchType::ASCEND_V220, DataType> {
|
||||
static constexpr uint32_t QUANT_PRE_ADDR_MASK = 0xffff;
|
||||
static constexpr uint32_t USELESS_BIT_NUM = 7;
|
||||
static constexpr uint32_t QUANT_PRE_BIT_POS_IN_FPC = 8;
|
||||
|
||||
__aicore__ SetQuantPreAddr(AscendC::LocalTensor<DataType> quantPreTensor)
|
||||
{
|
||||
uint64_t quantPreAddr = (uint64_t)(__fbuf__ uint64_t *)quantPreTensor.GetPhyAddr();
|
||||
AscendC::SetFixPipeConfigImpl(quantPreTensor);
|
||||
};
|
||||
};
|
||||
#endif
|
||||
Reference in New Issue
Block a user