1.文件架构
Add
|-- input // 存放脚本生成的输入数据目录
|-- output // 存放算子运行输出数据和真值数据的目录
|-- CMakeLists.txt -> ../kernel_template/CMakeLists.txt // 编译工程文件
|-- add_custom.cpp // 算子kernel实现
|-- add_custom.py // 输入数据和真值数据生成脚本文件
|-- cmake -> ../kernel_template/cmake // 编译工程文件
|-- data_utils.h -> ../kernel_template/data_utils.h // 数据读入写出函数
|-- main.cpp // 主函数,调用算子的应用程序,含CPU域及NPU域调用
|-- run.sh -> ../kernel_template/run.sh // 编译运行算子的脚本
代码下载地址:samples: CANN Samples - Gitee.com
下载后得到的文件目录如下图所示
在文件中,我们主要关注的是核函数和Add算子的实现
2.算子分析
首先进行算子分析,Add算子的实现为
x + y = z
因此
1.算子计算逻辑为:
- 明确输入和输出。
- Add算子有两个输入:x与y,输出为z。本样例中算子的输入支持的数据类型为half(float16),算子输出的数据类型与输入数据类型相同。算子输入支持shape(8,2048),输出shape与输入shape相同。算子输入支持的format为:ND。
- 确定核函数名称和参数。
- 您可以自定义核函数名称,本样例中核函数命名为add_custom。根据对算子输入输出的分析,确定核函数有3个参数x,y,z;x,y为输入在Global Memory上的内存地址,z为输出在Global Memory上的内存地址。
- 确定算子实现所需接口。
- 实现涉及外部存储和内部存储间的数据搬运,查看Ascend C API参考中的数据搬移接口,需要使用DataCopy来实现数据搬移。本样例只涉及矢量计算的加法操作,查看Ascend C API参考中的矢量计算接口矢量计算,初步分析可使用双目指令Add接口Add实现x+y。计算中使用到的Tensor数据结构,使用Queue队列进行管理,会使用到EnQue、DeQue等接口。
3.核函数定义核实现
3.1根据之前学习的核函数的声明规则,核函数的声明为
extern "C" __global__ __aicore__ void add_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z)
{
//实现内容
}
3.2 补充实现内容
extern "C" __global__ __aicore__ void add_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z)
{
KernelAdd op; //创建算子类对象
op.Init(x, y, z); //调用init()
op.Process(); //调用process()
}
3.3封装调用,#ifndef CCEKTTEST表示该封装函数仅在编译运行NPU侧的算子时会用到,编译运行CPU侧的算子时,可以直接调用add_custom函数
#ifndef __CCE_KT_TEST__
// call of kernel function
void add_custom_do(uint32_t blockDim, void* l2ctrl, void* stream, uint8_t* x, uint8_t* y, uint8_t* z)
{
add_custom<<<blockDim, l2ctrl, stream>>>(x, y, z);
}
#endif
4.算子类实现
根据上一节介绍,核函数中会调用算子类的Init和Process函数,本节具体讲解如何基于编程范式实现算子类。
根据矢量编程范式对Add算子的实现流程进行设计的思路如下,设计完成后得到的Add算子实现流程图参见下图
- Add算子的实现流程分为3个基本任务:CopyIn,Compute,CopyOut。CopyIn任务负责将Global Memory上的输入Tensor xGm和yGm搬运至Local Memory,分别存储在xLocal,yLocal,Compute任务负责对xLocal,yLocal执行加法操作,计算结果存储在zLocal中,CopyOut任务负责将输出数据从zLocal搬运至Global Memory上的输出Tensor zGm中。
- CopyIn,Compute任务间通过VECIN队列inQueueX,inQueueY进行通信和同步,Compute,CopyOut任务间通过VECOUT队列outQueueZ进行通信和同步。
- 任务间交互使用到的内存、临时变量使用到的内存统一使用pipe内存管理对象进行管理。
init()具体代码如下:
constexpr int32_t TOTAL_LENGTH = 8 * 2048; // total length of data
constexpr int32_t USE_CORE_NUM = 8; // num of core used
constexpr int32_t BLOCK_LENGTH = TOTAL_LENGTH / USE_CORE_NUM; // length computed of each core
constexpr int32_t TILE_NUM = 8; // split data into 8 tiles for each core
constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue
constexpr int32_t TILE_LENGTH = BLOCK_LENGTH / TILE_NUM / BUFFER_NUM; // seperate to 2 parts, due to double buffer
__aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z)
{
// get start index for current core, core parallel
xGm.SetGlobalBuffer((__gm__ half*)x + BLOCK_LENGTH * GetBlockIdx(), BLOCK_LENGTH);
yGm.SetGlobalBuffer((__gm__ half*)y + BLOCK_LENGTH * GetBlockIdx(), BLOCK_LENGTH);
zGm.SetGlobalBuffer((__gm__ half*)z + BLOCK_LENGTH * GetBlockIdx(), BLOCK_LENGTH);
// pipe alloc memory to queue, the unit is Bytes
pipe.InitBuffer(inQueueX, BUFFER_NUM, TILE_LENGTH * sizeof(half));
pipe.InitBuffer(inQueueY, BUFFER_NUM, TILE_LENGTH * sizeof(half));
pipe.InitBuffer(outQueueZ, BUFFER_NUM, TILE_LENGTH * sizeof(half));
}
基于矢量编程范式,将核函数的实现分为3个基本任务:CopyIn,Compute,CopyOut。Process函数中通过如下方式调用这三个函数。
process代码如下:
__aicore__ inline void Process()
{
// loop count need to be doubled, due to double buffer
constexpr int32_t loopCount = TILE_NUM * BUFFER_NUM;
// tiling strategy, pipeline parallel
for (int32_t i = 0; i < loopCount; i++) {
CopyIn(i);
Compute(i);
CopyOut(i);
}
}
//Stage1:CopyIn函数实现
//使用DataCopy接口将GlobalTensor数据拷贝到LocalTensor。
//使用EnQue将LocalTensor放入VecIn的Queue中。
__aicore__ inline void CopyIn(int32_t progress)
{
// alloc tensor from queue memory
LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
LocalTensor<half> yLocal = inQueueY.AllocTensor<half>();
// copy progress_th tile from global tensor to local tensor
DataCopy(xLocal, xGm[progress * TILE_LENGTH], TILE_LENGTH);
DataCopy(yLocal, yGm[progress * TILE_LENGTH], TILE_LENGTH);
// enque input tensors to VECIN queue
inQueueX.EnQue(xLocal);
inQueueY.EnQue(yLocal);
}
//Stage2:Compute函数实现。
//使用DeQue从VecIn中取出LocalTensor。
//使用Ascend C接口Add完成矢量计算。
//使用EnQue将计算结果LocalTensor放入到VecOut的Queue中。
//使用FreeTensor释放不再使用的LocalTensor。
__aicore__ inline void Compute(int32_t progress)
{
// deque input tensors from VECIN queue
LocalTensor<half> xLocal = inQueueX.DeQue<half>();
LocalTensor<half> yLocal = inQueueY.DeQue<half>();
LocalTensor<half> zLocal = outQueueZ.AllocTensor<half>();
// call Add instr for computation
Add(zLocal, xLocal, yLocal, TILE_LENGTH);
// enque the output tensor to VECOUT queue
outQueueZ.EnQue<half>(zLocal);
// free input tensors for reuse
inQueueX.FreeTensor(xLocal);
inQueueY.FreeTensor(yLocal);
}
//Stage3:CopyOut函数实现。
//使用DeQue接口从VecOut的Queue中取出LocalTensor。
//使用DataCopy接口将LocalTensor拷贝到GlobalTensor上。
//使用FreeTensor将不再使用的LocalTensor进行回收。
__aicore__ inline void CopyOut(int32_t progress)
{
// deque output tensor from VECOUT queue
LocalTensor<half> zLocal = outQueueZ.DeQue<half>();
// copy progress_th tile from local tensor to global tensor
DataCopy(zGm[progress * TILE_LENGTH], zLocal, TILE_LENGTH);
// free output tensor for reuse
outQueueZ.FreeTensor(zLocal);
}
5.完整样例
/*
* Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
*
* Function : z = x + y
* This sample is a very basic sample that implements vector add on Ascend plaform.
* In this sample:
* Length of x / y / z is 8*2048.
* Num of vector core used in sample is 8.
* Length for each core to compute is 2048.
* Tiles for each core is 8 which means we add 2048/8=256 elements in one loop.
*
* This is just a tile strategy for demonstration, in fact we can compute at most 128*255
* elements in one loop for b16 type.
*/
#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t TOTAL_LENGTH = 8 * 2048; // total length of data
constexpr int32_t USE_CORE_NUM = 8; // num of core used
constexpr int32_t BLOCK_LENGTH = TOTAL_LENGTH / USE_CORE_NUM; // length computed of each core
constexpr int32_t TILE_NUM = 8; // split data into 8 tiles for each core
constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue
constexpr int32_t TILE_LENGTH = BLOCK_LENGTH / TILE_NUM / BUFFER_NUM; // seperate to 2 parts, due to double buffer
class KernelAdd {
public:
__aicore__ inline KernelAdd() {}
__aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z)
{
// get start index for current core, core parallel
xGm.SetGlobalBuffer((__gm__ half*)x + BLOCK_LENGTH * GetBlockIdx(), BLOCK_LENGTH);
yGm.SetGlobalBuffer((__gm__ half*)y + BLOCK_LENGTH * GetBlockIdx(), BLOCK_LENGTH);
zGm.SetGlobalBuffer((__gm__ half*)z + BLOCK_LENGTH * GetBlockIdx(), BLOCK_LENGTH);
// pipe alloc memory to queue, the unit is Bytes
pipe.InitBuffer(inQueueX, BUFFER_NUM, TILE_LENGTH * sizeof(half));
pipe.InitBuffer(inQueueY, BUFFER_NUM, TILE_LENGTH * sizeof(half));
pipe.InitBuffer(outQueueZ, BUFFER_NUM, TILE_LENGTH * sizeof(half));
}
__aicore__ inline void Process()
{
// loop count need to be doubled, due to double buffer
constexpr int32_t loopCount = TILE_NUM * BUFFER_NUM;
// tiling strategy, pipeline parallel
for (int32_t i = 0; i < loopCount; i++) {
CopyIn(i);
Compute(i);
CopyOut(i);
}
}
private:
__aicore__ inline void CopyIn(int32_t progress)
{
// alloc tensor from queue memory
LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
LocalTensor<half> yLocal = inQueueY.AllocTensor<half>();
// copy progress_th tile from global tensor to local tensor
DataCopy(xLocal, xGm[progress * TILE_LENGTH], TILE_LENGTH);
DataCopy(yLocal, yGm[progress * TILE_LENGTH], TILE_LENGTH);
// enque input tensors to VECIN queue
inQueueX.EnQue(xLocal);
inQueueY.EnQue(yLocal);
}
__aicore__ inline void Compute(int32_t progress)
{
// deque input tensors from VECIN queue
LocalTensor<half> xLocal = inQueueX.DeQue<half>();
LocalTensor<half> yLocal = inQueueY.DeQue<half>();
LocalTensor<half> zLocal = outQueueZ.AllocTensor<half>();
// call Add instr for computation
Add(zLocal, xLocal, yLocal, TILE_LENGTH);
// enque the output tensor to VECOUT queue
outQueueZ.EnQue<half>(zLocal);
// free input tensors for reuse
inQueueX.FreeTensor(xLocal);
inQueueY.FreeTensor(yLocal);
}
__aicore__ inline void CopyOut(int32_t progress)
{
// deque output tensor from VECOUT queue
LocalTensor<half> zLocal = outQueueZ.DeQue<half>();
// copy progress_th tile from local tensor to global tensor
DataCopy(zGm[progress * TILE_LENGTH], zLocal, TILE_LENGTH);
// free output tensor for reuse
outQueueZ.FreeTensor(zLocal);
}
private:
TPipe pipe;
// create queues for input, in this case depth is equal to buffer num
TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX, inQueueY;
// create queue for output, in this case depth is equal to buffer num
TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueZ;
GlobalTensor<half> xGm, yGm, zGm;
};
// implementation of kernel function
extern "C" __global__ __aicore__ void add_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z)
{
KernelAdd op;
op.Init(x, y, z);
op.Process();
}
#ifndef __CCE_KT_TEST__
// call of kernel function
void add_custom_do(uint32_t blockDim, void* l2ctrl, void* stream, uint8_t* x, uint8_t* y, uint8_t* z)
{
add_custom<<<blockDim, l2ctrl, stream>>>(x, y, z);
}
#endif
运行结果如下:红框内两结果应该相同。
本文暂时没有评论,来添加一个吧(●'◡'●)