首先添加上Heterogeneous Parallel Programming class 中 lab: Reduction的代码:

myReduction.c

// MP Reduction
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1]; #include <wb.h> #define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -; \
} \
} while() __global__ void reduction(float *g_idata, float *g_odata, unsigned int n){ __shared__ float sdata[BLOCK_SIZE]; // load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : ; __syncthreads(); // do reduction in shared mem, stride is divided by 2,
for (unsigned int s=blockDim.x/; s>; s>>=)
{
//__syncthreads();
if (tid < s)
{
sdata[tid] += sdata[tid + s];
} __syncthreads();
} // write result for this block to global mem
if (tid == ) g_odata[blockIdx.x] = sdata[]; } __global__ void total(float * input, float * output, int len) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[ * BLOCK_SIZE]; //blockDim.x is not okay, compile fail
unsigned int t = threadIdx.x;
unsigned int start = * blockIdx.x * blockDim.x;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = ; if (start + blockDim.x + t < len)
partialSum[blockDim.x + t] = input[start + blockDim.x + t];
else
partialSum[blockDim.x + t] = ; //@@ Traverse the reduction tree
for (unsigned int stride = blockDim.x; stride >= ; stride >>= ) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t == )
output[blockIdx.x] = partialSum[];
} int main(int argc, char ** argv) {
int ii;
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, ), &numInputElements); numOutputElements = numInputElements / (BLOCK_SIZE);
if (numInputElements % (BLOCK_SIZE)) {
numOutputElements++;
} //This for kernel total
/*numOutputElements = numInputElements / (BLOCK_SIZE <<1);
if (numInputElements % (BLOCK_SIZE)<<1) {
numOutputElements++;
} */
hostOutput = (float*) malloc(numOutputElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numInputElements);
wbLog(TRACE, "The number of output elements in the input is ", numOutputElements); wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void **) &deviceInput, numInputElements * sizeof(float));
cudaMalloc((void **) &deviceOutput, numOutputElements * sizeof(float)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput,
hostInput,
numInputElements * sizeof(float),
cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid(numOutputElements, , );
dim3 dimBlock(BLOCK_SIZE, , ); wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
reduction<<<dimGrid,dimBlock>>>(deviceInput, deviceOutput, numInputElements);
//total<<<dimGrid, dimBlock>>>(deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU"); /********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = ; ii < numOutputElements; ii++) {
hostOutput[] += hostOutput[ii];
} wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, ); free(hostInput);
free(hostOutput); return ;
}

4.3 Reduction代码(Heterogeneous Parallel Programming class lab)的更多相关文章

  1. PatentTips - Heterogeneous Parallel Primitives Programming Model

    BACKGROUND 1. Field of the Invention The present invention relates generally to a programming model ...

  2. Notes of Principles of Parallel Programming - TODO

    0.1 TopicNotes of Lin C., Snyder L.. Principles of Parallel Programming. Beijing: China Machine Pres ...

  3. Task Cancellation: Parallel Programming

    http://beyondrelational.com/modules/2/blogs/79/posts/11524/task-cancellation-parallel-programming-ii ...

  4. Samples for Parallel Programming with the .NET Framework

    The .NET Framework 4 includes significant advancements for developers writing parallel and concurren ...

  5. 2018-12-09 疑似bug_中文代码示例之Programming in Scala笔记第九十章

    续前文: 中文代码示例之Programming in Scala笔记第七八章 源文档库: program-in-chinese/Programming_in_Scala_study_notes_zh ...

  6. 2018-11-27 中文代码示例之Programming in Scala笔记第七八章

    续前文: 中文代码示例之Programming in Scala学习笔记第二三章 中文代码示例之Programming in Scala笔记第四五六章. 同样仅节选有意思的例程部分作演示之用. 源文档 ...

  7. 2018-11-16 中文代码示例之Programming in Scala笔记第四五六章

    续前文: 中文代码示例之Programming in Scala学习笔记第二三章. 同样仅节选有意思的例程部分作演示之用. 源文档仍在: program-in-chinese/Programming_ ...

  8. Parallel Programming for FPGAs 学习笔记(1)

    Parallel Programming for FPGAs 学习笔记(1)

  9. Parallel Programming AND Asynchronous Programming

    https://blogs.oracle.com/dave/ Java Memory Model...and the pragmatics of itAleksey Shipilevaleksey.s ...

随机推荐

  1. 你不需要jQuery(二)

    完全没有否定jQuery的意思,jQuery是一个神奇的.非常有用的工具,可以节省我们大量的时间. 但是,有些时候,我们只需要jQuery的一个小功能,来完成一个小任务,完全没有必要加载整个jQuer ...

  2. Matlab计算矩阵间距离

    夜深人静时分,宿舍就我自己,只有蚊子陪伴着我,我慢慢码下这段文字............ 感觉知识结构不完善:上学期看论文,发现类间离散度矩阵和类内离散度矩阵,然后百度,找不到,现在学模式识别,见了, ...

  3. 20130729--Samba的学习

    (一).基本概念 samba是一个能让你的Unix计算机和其它MS Windows计算机相互共享资源的软件. samba提供有关资源共享的三个功能,包括:smbd,执行它可以使Unix能够共享资源给其 ...

  4. VS2005 VS2008 Manifest 配置问题总结

    一.问题 编译某个遗留工程后,运行程序时报错,“由于应用程序的配置不正确,应用程序无法启动.重新安装应用程序可能会解决这个问题.” 查看生成的Manifest文件如下: <?xml versio ...

  5. CURL与PHP-CLI的应用【CLI篇】

    CLI的普通应用 什么是PHP-CLI php-cli是php Command Line Interface的简称,即PHP命令行接口,在windows和linux下都是支持PHP-CLI模式的; 为 ...

  6. WPF之外观模式

    名词解释: 外观模式:为子系统中的一组接口提供一个一致的界面,此模式定义一个高层接口,这个接口使得这一子系统更加容易使用. 必要元素: 一个外观类和多个子系统类(外观类中注入各个子系统类). 上例子: ...

  7. 在Ubuntu 12.04安装和设置Samba实现网上邻居共享

    转载:http://www.startos.com/ubuntu/tips/2012031333097.html          有微小改动. Samba 是一款功能强大的共享工具,可以实现与win ...

  8. 【转】Android设计中的.9.png

    来源:http://isux.tencent.com/android-ui-9-png.html Android设计中的.9.png 注意:当使用9.png做TextView背景时,一定要设置内容区域 ...

  9. 数据结构———KMP

    今天照着课本敲了一下KMP.. 以OJ上的一个题为例敲了一下.. 题目:http://acm.sdut.edu.cn/sdutoj/problem.php?action=showproblem& ...

  10. [原]Unity3D深入浅出 - 常见三维软件与Unity3D的单位比例

    Unity3d系统默认单位是米 Maya:Unity3D  1M:100M 3DMax:Unity3D 100M:1M Cinema 4D:Unity3D 1M:100M Lightwavew:Uni ...