OpenCL 归约 1
▶ 照着书上的代码,写了几个一步归约的计算,只计算一步,将原数组归约到不超过 1024 个工作项
● 代码
// kernel.cl
__kernel void reduce01(__global uint* input, __global uint* output, __local uint* sdata)
{
const unsigned int tid = get_local_id(), blockSize = get_local_size();
unsigned int s; sdata[tid] = input[get_global_id()];
barrier(CLK_LOCAL_MEM_FENCE); // 三种写法,用一种就够
// 1、模法,问题: % 运算很慢
for (s = ; s < blockSize; s <<= )
{
if (tid % ( * s) == )
sdata[tid] += sdata[tid + s];
barrier(CLK_LOCAL_MEM_FENCE);
}
// 2、间隔缩短法,问题:首次迭代只用一半的工作项,之后每次迭代活跃的工作项持续减少
for (s = blockSize / ; s > ; s >>= )
{
if (tid < s)
sdata[tid] += sdata[tid + s];
barrier(CLK_LOCAL_MEM_FENCE);
}
// 3、间隔增长法,问题:当间隔等于某几个数的时候会产生
unsigned int index;
for (s = ; s < blockSize; s <<= )
{
if ((index = * s * tid) < blockSize)
sdata[index] += sdata[index + s];
barrier(CLK_LOCAL_MEM_FENCE);
} if (tid == )
output[get_group_id()] = sdata[];
} __kernel void reduce02(__global uint* input, __global uint* output, __local uint* sdata)
{
const unsigned int tid = get_local_id(), bid = get_group_id(), blockSize = get_local_size();
const unsigned int index = bid * (blockSize * ) + tid;
unsigned int s; sdata[tid] = input[index] + input[index + blockSize];// 读入局部内存时就进行一次归约
barrier(CLK_LOCAL_MEM_FENCE); // 两种写法,用一种就够
// 1、不手动展开循环,仍然有工作项浪费的问题
for (s = blockSize / ; s > ; s >>= )
{
if (tid < s)
sdata[tid] += sdata[tid + s];
barrier(CLK_LOCAL_MEM_FENCE);
}
// 2、手动展开最后的循环
for (s = blockSize / ; s > ; s >>= )// BUG:如果从 64 开始手工归约,在这一行有且仅有一个工作项会算出 640 = 512 + 128 来,其他行却没问题
{
if (tid < s)
sdata[tid] += sdata[tid + s];
barrier(CLK_LOCAL_MEM_FENCE);
}
if (tid < ) // 手动展开最后的归约,注意同步,书中源代码中没有同步,计算结果是错的
{
if (blockSize >= )
sdata[tid] += sdata[tid + ];
barrier(CLK_LOCAL_MEM_FENCE);
if (blockSize >= )
sdata[tid] += sdata[tid + ];
barrier(CLK_LOCAL_MEM_FENCE);
if (blockSize >= )
sdata[tid] += sdata[tid + ];
barrier(CLK_LOCAL_MEM_FENCE);
if (blockSize >= )
sdata[tid] += sdata[tid + ];
barrier(CLK_LOCAL_MEM_FENCE);
if (blockSize >= )
sdata[tid] += sdata[tid + ];
barrier(CLK_LOCAL_MEM_FENCE);
if (blockSize >= )
sdata[tid] += sdata[tid + ];
barrier(CLK_LOCAL_MEM_FENCE);
} if (tid == )
output[bid] = sdata[];
}
// main.c
#include <stdio.h>
#include <stdlib.h>
#include <cl.h> #define BLOCK_SIZE 256 // 工作组内最大工作项数为 1024
#define DATA_SIZE (BLOCK_SIZE * 1024) // 一维最大工作组数为1024 const char *sourceText = "D:/Code/OpenCL/OpenCLProjectTemp/OpenCLProjectTemp/kernel.cl"; int readText(const char* kernelPath, char **pcode)// 读取文本文件放入 pcode,返回字符串长度
{
FILE *fp;
int size;
//printf("<readText> File: %s\n", kernelPath);
fopen_s(&fp, kernelPath, "rb");
if (!fp)
{
printf("Open kernel file failed\n");
getchar();
exit(-);
}
if (fseek(fp, , SEEK_END) != )
{
printf("Seek end of file failed\n");
getchar();
exit(-);
}
if ((size = ftell(fp)) < )
{
printf("Get file position failed\n");
getchar();
exit(-);
}
rewind(fp);
if ((*pcode = (char *)malloc(size + )) == NULL)
{
printf("Allocate space failed\n");
getchar();
exit(-);
}
fread(*pcode, , size, fp);
(*pcode)[size] = '\0';
fclose(fp);
return size + ;
} int main()
{
cl_int status;
cl_uint nPlatform;
clGetPlatformIDs(, NULL, &nPlatform);
cl_platform_id *listPlatform = (cl_platform_id*)malloc(nPlatform * sizeof(cl_platform_id));
clGetPlatformIDs(nPlatform, listPlatform, NULL);
cl_uint nDevice;
clGetDeviceIDs(listPlatform[], CL_DEVICE_TYPE_ALL, , NULL, &nDevice);
cl_device_id *listDevice = (cl_device_id*)malloc(nDevice * sizeof(cl_device_id));
clGetDeviceIDs(listPlatform[], CL_DEVICE_TYPE_ALL, nDevice, listDevice, NULL);
cl_context context = clCreateContext(NULL, nDevice, listDevice, NULL, NULL, &status);
cl_command_queue queue = clCreateCommandQueue(context, listDevice[], CL_QUEUE_PROFILING_ENABLE, &status); //const unsigned int nGroup = DATA_SIZE / BLOCK_SIZE; // reduce01 使用
const unsigned int nGroup = DATA_SIZE / BLOCK_SIZE / ; // reduce02 使用
int *hostA = (cl_int*)malloc(sizeof(cl_int) * DATA_SIZE);
int *hostB = (cl_int*)malloc(sizeof(cl_int) * nGroup);
int i;
unsigned long refSum;
srand();
for (i = , refSum = 0L; i < DATA_SIZE; refSum += (hostA[i++] = ));// rand()));
memset(hostB, , sizeof(int) * nGroup);
cl_mem deviceA = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(cl_int) * DATA_SIZE, hostA, &status);
cl_mem deviceB = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(cl_int) * nGroup, NULL, &status); char *code;
size_t codeLength = readText(sourceText, &code);
cl_program program = clCreateProgramWithSource(context, , (const char**)&code, &codeLength, &status);
status = clBuildProgram(program, nDevice, listDevice, NULL, NULL, NULL);
if (status)
{
char info[];
clGetProgramBuildInfo(program, listDevice[], CL_PROGRAM_BUILD_LOG, , info, NULL);
printf("\n%s\n", info);
}
//cl_kernel kernel = clCreateKernel(program, "reduce01", &status);
cl_kernel kernel = clCreateKernel(program, "reduce02", &status); clSetKernelArg(kernel, , sizeof(cl_mem), (void*)&deviceA);
clSetKernelArg(kernel, , sizeof(cl_mem), (void*)&deviceB);
clSetKernelArg(kernel, , BLOCK_SIZE * sizeof(cl_int), NULL); size_t globalSize = DATA_SIZE, localSize = BLOCK_SIZE;
cl_event ev;
//cl_ulong startTime, endTime;
status = clEnqueueNDRangeKernel(queue, kernel, , NULL, &globalSize, &localSize, , NULL, &ev);
clFinish(queue);
//clGetEventProfilingInfo(ev, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &startTime, NULL); // 不启用计时,因为一趟归约时间太短
//clGetEventProfilingInfo(ev, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &endTime, NULL);
//printf("Time:%lu.%lu\n", (endTime - startTime) / 1000000000, (endTime - startTime) % 1000000000); clEnqueueReadBuffer(queue, deviceB, CL_TRUE, , sizeof(cl_int) * nGroup, hostB, , NULL, NULL);
for (i = ; i < nGroup; refSum -= hostB[i++]);
printf("Result %s.\n", (refSum == ) ? "correct" : "error"); free(hostA);
free(hostB);
free(code);
free(listPlatform);
free(listDevice);
clReleaseContext(context);
clReleaseCommandQueue(queue);
clReleaseProgram(program);
clReleaseKernel(kernel);
clReleaseEvent(ev);
clReleaseMemObject(deviceA);
clReleaseMemObject(deviceB);
getchar();
return ;
}
● 输出结果
Result correct.
OpenCL 归约 1的更多相关文章
- 基于SoCkit的opencl实验1-基础例程
基于SoCkit的opencl实验1-基础例程 准备软硬件 Arrow SoCkit Board 4GB or larger microSD Card Quartus II v14.1 SoCEDS ...
- OPenCL
OpenCLhttp://baike.baidu.com/link?url=7uHWCVUYB3Sau_xh3OOKP-A08_IvmT1SJixdAXKezCuCfkzeSQDiSmesGyVGk8 ...
- kaggle数据挖掘竞赛初步--Titanic<派生属性&维归约>
完整代码: https://github.com/cindycindyhi/kaggle-Titanic 特征工程系列: Titanic系列之原始数据分析和数据处理 Titanic系列之数据变换 Ti ...
- Opencl 并行求和
上周尝试用opencl求极大值,在网上查到大多是求和,所谓的reduction算法.不过思路是一样的. CPP: ; unsigned ; ; ; int nGroup = nGroupSize / ...
- opencl初体验
总结一下,opencl的步骤差不多是这些 先要获取平台的id clGetPlatformIDs(nPlatforms, platform_id, &num_of_platforms) 然后获取 ...
- Altera OpenCL用于计算机领域的13个经典案例(转)
英文出自:Streamcomputing 转自:http://www.csdn.net/article/2013-10-29/2817319-the-application-areas-opencl- ...
- 面向OPENCL的ALTERA SDK
面向OPENCL的ALTERA SDK 使用面向开放计算语言 (OpenCL™) 的 Altera® SDK,用户可以抽象出传统的硬件 FPGA 开发流程,采用更快.更高层面的软件开发流程.在基于 x ...
- OpenCV GPU CUDA OpenCL 配置
首先,正确安装OpenCV,并且通过测试. 我理解GPU的环境配置由3个主要步骤构成. 1. 生成关联文件,即makefile或工程文件 2. 编译生成与使用硬件相关的库文件,包括动态.静态库文件. ...
- CUDA/OpenCL 学习资料
VS2010 NVIDIA OpenCL 开发环境配置 CUDA 在线课程 [经典培训] 全球首套中文CUDA 教程-胡文美教授主讲
随机推荐
- 代理模式及Spring AOP (一)
一.代理模式 在不更改源码的前提下,加入新功能,通常需要用到代理设计模式. 代理设计模式分类: 静态代理 动态代理 jdk动态代理 cglib动态代理 其中spring AOP的底层用的是动态代理.其 ...
- 线上服务内存OOM问题定位[转自58沈剑]
相信大家都有感触,线上服务内存OOM的问题,是最难定位的问题,不过归根结底,最常见的原因: 本身资源不够 申请的太多 资源耗尽 58到家架构部,运维部,58速运技术部联合进行了一次线上服务内存OOM问 ...
- hdu1301 Jungle Roads 最小生成树
The Head Elder of the tropical island of Lagrishan has a problem. A burst of foreign aid money was s ...
- hdu2068 RPG的错排 组合数/递推
#include<stdio.h> ]; long long c(int a,int b) { ,j; ;i>=a-b+,j<=b;i--,j++) sum=sum*i/j; ...
- 【转】python3中bytes和string之间的互相转换
问题: 比对算法测试脚本在python2.7上跑的没问题,在python3上报错,将base64转码之后的串打印出来发现,2.7版本和3是不一样的:2.7就是字符串类型的,但是3是bytes类型的,形 ...
- Cocos2d-X数据、动作、消息的基本操作
加入类的create方法: CREATE_FUNC(ClassName) 使用这个宏能够为类加入一个create方法. 创建类的对象时一律用Class::create()的形式. 在CREATE_FU ...
- TensorFlow笔记-02-Windows下搭建TensorFlow环境(win版非虚拟机)
TensorFlow笔记-02-Windows下搭建TensorFlow环境(win版非虚拟机) 本篇介绍的是在windows系统下,使用 Anaconda+PyCharm,不使用虚拟机,也不使用 L ...
- mac os 里的 JAVA_HOME
google了一下,发现了这篇文章Important Java Directories on Mac OS X(https://developer.apple.com/library/content/ ...
- 机器学习Hands On Lab
fetch_data fetch_mldata默认路径是在scikit_learn_data路径下,mnist的mat文件其实直接放置到scikit_lean/mldata下面即可通过fetch_ml ...
- 浅谈fhq_treap
\(BST\) 二叉查找树,首先它是一颗二叉树,其次它里面每个点都满足以该点左儿子为根的子树里结点的值都小于自己的值,以该点右儿子为根的子树里结点的值都大于自己的值.如果不进行修改,每次查询都是\(O ...