创建个人网站的流程,广州网络营销公司排名,ftp上传wordpress程序要多长时间,wordpress单页网站在本页跳转本次主要讨论下AscendC算子的开发流程#xff0c;基于Kernel直调工程的算子开发。
1 AscendC算子开发的基本流程
使用Ascend C完成Add算子核函数开发#xff1b; 使用ICPU_RUN_KF CPU调测宏完成算子核函数CPU侧运行验证#xff1b; 使用内核调用符…本次主要讨论下AscendC算子的开发流程基于Kernel直调工程的算子开发。
1 AscendC算子开发的基本流程
使用Ascend C完成Add算子核函数开发 使用ICPU_RUN_KF CPU调测宏完成算子核函数CPU侧运行验证 使用内核调用符完成算子核函数NPU侧运行验证。 在正式的开发之前还需要先完成环境准备和算子分析工作开发Ascend C算子的基本流程如下图所示
2 核函数开发
本次以add_custom.cpp作为参考用例。Gitee也有对应工程和完整代码。 operator/AddCustomSample/KernelLaunch/AddKernelInvocationNeo · Ascend/samples - 码云 - 开源中国 (gitee.com)
2.1 核函数定义
首先要根据核函数定义 核函数-编程模型-Ascend C算子开发-算子开发-开发指南-CANN社区版8.0.RC3.alpha003开发文档-昇腾社区 (hiascend.com) 的规则进行核函数的定义并在核函数中调用算子类的Init和Process函数。
// 给CPU调用
extern C __global__ __aicore__ void add_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z)
{KernelAdd op;op.Init(x, y, z);op.Process();
}// 给NPU调用
#ifndef ASCENDC_CPU_DEBUG
void add_custom_do(uint32_t blockDim, void *stream, uint8_t *x, uint8_t *y, uint8_t *z)
{add_customblockDim, nullptr, stream(x, y, z);
}
#endif2.2 算子类定义
根据矢量编程范式实现算子类本样例中定义KernelAdd算子类其具体成员如下
class KernelAdd {
public:__aicore__ inline KernelAdd(){}// 初始化函数完成内存初始化相关操作__aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z){}// 核心处理函数实现算子逻辑调用私有成员函数CopyIn、Compute、CopyOut完成矢量算子的三级流水操作__aicore__ inline void Process(){}private:// 搬入函数完成CopyIn阶段的处理被核心Process函数调用__aicore__ inline void CopyIn(int32_t progress){}// 计算函数完成Compute阶段的处理被核心Process函数调用__aicore__ inline void Compute(int32_t progress){}// 搬出函数完成CopyOut阶段的处理被核心Process函数调用__aicore__ inline void CopyOut(int32_t progress){}private:AscendC::TPipe pipe; //Pipe内存管理对象AscendC::TQueAscendC::QuePosition::VECIN, BUFFER_NUM inQueueX, inQueueY; //输入数据Queue队列管理对象QuePosition为VECINAscendC::TQueAscendC::QuePosition::VECOUT, BUFFER_NUM outQueueZ; //输出数据Queue队列管理对象QuePosition为VECOUTAscendC::GlobalTensorhalf xGm; //管理输入输出Global Memory内存地址的对象其中xGm, yGm为输入zGm为输出AscendC::GlobalTensorhalf yGm;AscendC::GlobalTensorhalf zGm;
};核函数调用关系图
2.3 实现InitCopyInComputeCopyOut这个4个关键函数
Init函数初始化输入资源
__aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z){xGm.SetGlobalBuffer((__gm__ half *)x BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);yGm.SetGlobalBuffer((__gm__ half *)y BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);zGm.SetGlobalBuffer((__gm__ half *)z BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);pipe.InitBuffer(inQueueX, BUFFER_NUM, TILE_LENGTH * sizeof(half));pipe.InitBuffer(inQueueY, BUFFER_NUM, TILE_LENGTH * sizeof(half));pipe.InitBuffer(outQueueZ, BUFFER_NUM, TILE_LENGTH * sizeof(half));}
Process函数中通过如下方式调用这三个__aicore__ inline void Process(){// loop count need to be doubled, due to double bufferconstexpr int32_t loopCount TILE_NUM * BUFFER_NUM;// tiling strategy, pipeline parallelfor (int32_t i 0; i loopCount; i) {CopyIn(i);Compute(i);CopyOut(i);}}CopyIn函数中通过如下方式调用这三个 1、使用DataCopy接口将GlobalTensor数据拷贝到LocalTensor。 2、使用EnQue将LocalTensor放入VecIn的Queue中。
__aicore__ inline void CopyIn(int32_t progress){// alloc tensor from queue memoryAscendC::LocalTensorhalf xLocal inQueueX.AllocTensorhalf();AscendC::LocalTensorhalf yLocal inQueueY.AllocTensorhalf();// copy progress_th tile from global tensor to local tensorAscendC::DataCopy(xLocal, xGm[progress * TILE_LENGTH], TILE_LENGTH);AscendC::DataCopy(yLocal, yGm[progress * TILE_LENGTH], TILE_LENGTH);// enque input tensors to VECIN queueinQueueX.EnQue(xLocal);inQueueY.EnQue(yLocal);}Compute函数实现。 1、使用DeQue从VecIn中取出LocalTensor。 2、使用Ascend C接口Add完成矢量计算。 3、使用EnQue将计算结果LocalTensor放入到VecOut的Queue中。 4、使用FreeTensor将释放不再使用的LocalTensor。
__aicore__ inline void Compute(int32_t progress)
{// deque input tensors from VECIN queueAscendC::LocalTensorhalf xLocal inQueueX.DeQuehalf();AscendC::LocalTensorhalf yLocal inQueueY.DeQuehalf();AscendC::LocalTensorhalf zLocal outQueueZ.AllocTensorhalf();// call Add instr for computationAscendC::Add(zLocal, xLocal, yLocal, TILE_LENGTH);// enque the output tensor to VECOUT queueoutQueueZ.EnQuehalf(zLocal);// free input tensors for reuseinQueueX.FreeTensor(xLocal);inQueueY.FreeTensor(yLocal);
}CopyOut函数实现。 1、使用DeQue接口从VecOut的Queue中取出LocalTensor。 2、使用DataCopy接口将LocalTensor拷贝到GlobalTensor上。 3、使用FreeTensor将不再使用的LocalTensor进行回收。 __aicore__ inline void CopyOut(int32_t progress)
{// deque output tensor from VECOUT queueAscendC::LocalTensorhalf zLocal outQueueZ.DeQuehalf();// copy progress_th tile from local tensor to global tensorAscendC::DataCopy(zGm[progress * TILE_LENGTH], zLocal, TILE_LENGTH);// free output tensor for reuseoutQueueZ.FreeTensor(zLocal);
}3 核函数的运行验证
异构计算架构中NPUkernel侧与CPUhost侧是协同工作的完成了kernel侧核函数开发后即可编写host侧的核函数调用程序实现从host侧的APP程序调用算子执行计算过程。
3.1 编写CPU侧调用程序 // 使用GmAlloc分配共享内存并进行数据初始化uint8_t* x (uint8_t*)AscendC::GmAlloc(inputByteSize);uint8_t* y (uint8_t*)AscendC::GmAlloc(inputByteSize);uint8_t* z (uint8_t*)AscendC::GmAlloc(outputByteSize);ReadFile(./input/input_x.bin, inputByteSize, x, inputByteSize);ReadFile(./input/input_y.bin, inputByteSize, y, inputByteSize);// 调用ICPU_RUN_KF调测宏完成核函数CPU侧的调用AscendC::SetKernelMode(KernelMode::AIV_MODE);ICPU_RUN_KF(add_custom, blockDim, x, y, z); // use this macro for cpu debug// 输出数据写出WriteFile(./output/output_z.bin, z, outputByteSize);// 调用GmFree释放申请的资源AscendC::GmFree((void *)x);AscendC::GmFree((void *)y);AscendC::GmFree((void *)z);3.2 编写NPU侧运行算子的调用程序 // AscendCL初始化CHECK_ACL(aclInit(nullptr));// 运行管理资源申请int32_t deviceId 0;CHECK_ACL(aclrtSetDevice(deviceId));aclrtStream stream nullptr;CHECK_ACL(aclrtCreateStream(stream));// 分配Host内存uint8_t *xHost, *yHost, *zHost;uint8_t *xDevice, *yDevice, *zDevice;CHECK_ACL(aclrtMallocHost((void**)(xHost), inputByteSize));CHECK_ACL(aclrtMallocHost((void**)(yHost), inputByteSize));CHECK_ACL(aclrtMallocHost((void**)(zHost), outputByteSize));// 分配Device内存CHECK_ACL(aclrtMalloc((void**)xDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));CHECK_ACL(aclrtMalloc((void**)yDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));CHECK_ACL(aclrtMalloc((void**)zDevice, outputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));// Host内存初始化ReadFile(./input/input_x.bin, inputByteSize, xHost, inputByteSize);ReadFile(./input/input_y.bin, inputByteSize, yHost, inputByteSize);CHECK_ACL(aclrtMemcpy(xDevice, inputByteSize, xHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE));CHECK_ACL(aclrtMemcpy(yDevice, inputByteSize, yHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE));// 用内核调用符调用核函数完成指定的运算,add_custom_do中封装了调用add_custom_do(blockDim, nullptr, stream, xDevice, yDevice, zDevice);CHECK_ACL(aclrtSynchronizeStream(stream));// 将Device上的运算结果拷贝回HostCHECK_ACL(aclrtMemcpy(zHost, outputByteSize, zDevice, outputByteSize, ACL_MEMCPY_DEVICE_TO_HOST));WriteFile(./output/output_z.bin, zHost, outputByteSize);// 释放申请的资源CHECK_ACL(aclrtFree(xDevice));CHECK_ACL(aclrtFree(yDevice));CHECK_ACL(aclrtFree(zDevice));CHECK_ACL(aclrtFreeHost(xHost));CHECK_ACL(aclrtFreeHost(yHost));CHECK_ACL(aclrtFreeHost(zHost));// AscendCL去初始化CHECK_ACL(aclrtDestroyStream(stream));CHECK_ACL(aclrtResetDevice(deviceId));CHECK_ACL(aclFinalize());3.3 完整main.cpp
/*** file main.cpp** Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.** This program is distributed in the hope that it will be useful,* but WITHOUT ANY WARRANTY; without even the implied warranty of* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.*/
#include data_utils.h
#ifndef ASCENDC_CPU_DEBUG
#include acl/acl.h
extern void add_custom_do(uint32_t blockDim, void *stream, uint8_t *x, uint8_t *y, uint8_t *z);
#else
#include tikicpulib.h
extern C __global__ __aicore__ void add_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z);
#endifint32_t main(int32_t argc, char *argv[])
{uint32_t blockDim 8;size_t inputByteSize 8 * 2048 * sizeof(uint16_t);size_t outputByteSize 8 * 2048 * sizeof(uint16_t);#ifdef ASCENDC_CPU_DEBUGuint8_t *x (uint8_t *)AscendC::GmAlloc(inputByteSize);uint8_t *y (uint8_t *)AscendC::GmAlloc(inputByteSize);uint8_t *z (uint8_t *)AscendC::GmAlloc(outputByteSize);ReadFile(./input/input_x.bin, inputByteSize, x, inputByteSize);ReadFile(./input/input_y.bin, inputByteSize, y, inputByteSize);AscendC::SetKernelMode(KernelMode::AIV_MODE);ICPU_RUN_KF(add_custom, blockDim, x, y, z); // use this macro for cpu debugWriteFile(./output/output_z.bin, z, outputByteSize);AscendC::GmFree((void *)x);AscendC::GmFree((void *)y);AscendC::GmFree((void *)z);
#elseCHECK_ACL(aclInit(nullptr));int32_t deviceId 0;CHECK_ACL(aclrtSetDevice(deviceId));aclrtStream stream nullptr;CHECK_ACL(aclrtCreateStream(stream));uint8_t *xHost, *yHost, *zHost;uint8_t *xDevice, *yDevice, *zDevice;CHECK_ACL(aclrtMallocHost((void **)(xHost), inputByteSize));CHECK_ACL(aclrtMallocHost((void **)(yHost), inputByteSize));CHECK_ACL(aclrtMallocHost((void **)(zHost), outputByteSize));CHECK_ACL(aclrtMalloc((void **)xDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));CHECK_ACL(aclrtMalloc((void **)yDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));CHECK_ACL(aclrtMalloc((void **)zDevice, outputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));ReadFile(./input/input_x.bin, inputByteSize, xHost, inputByteSize);ReadFile(./input/input_y.bin, inputByteSize, yHost, inputByteSize);CHECK_ACL(aclrtMemcpy(xDevice, inputByteSize, xHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE));CHECK_ACL(aclrtMemcpy(yDevice, inputByteSize, yHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE));add_custom_do(blockDim, stream, xDevice, yDevice, zDevice);CHECK_ACL(aclrtSynchronizeStream(stream));CHECK_ACL(aclrtMemcpy(zHost, outputByteSize, zDevice, outputByteSize, ACL_MEMCPY_DEVICE_TO_HOST));WriteFile(./output/output_z.bin, zHost, outputByteSize);CHECK_ACL(aclrtFree(xDevice));CHECK_ACL(aclrtFree(yDevice));CHECK_ACL(aclrtFree(zDevice));CHECK_ACL(aclrtFreeHost(xHost));CHECK_ACL(aclrtFreeHost(yHost));CHECK_ACL(aclrtFreeHost(zHost));CHECK_ACL(aclrtDestroyStream(stream));CHECK_ACL(aclrtResetDevice(deviceId));CHECK_ACL(aclFinalize());
#endifreturn 0;
}整体运行起来请参考operator/AddCustomSample/KernelLaunch/AddKernelInvocationNeo · Ascend/samples - 码云 - 开源中国 (gitee.com)