cuda并行计算的几种模式
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <stdlib.h> #define MAX 120
#define MIN 0 cudaError_t addWithCudaStream(int *c, const int *a, const int *b, size_t size,
float* etime);
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size,
float* etime, int type);
__global__ void addKernel(int *c, const int *a, const int *b) {
int i = blockIdx.x;
c[i] = a[i] + b[i];
} __global__ void addKernelThread(int *c, const int *a, const int *b) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main() {
const int arraySize = ;
srand((unsigned) time(NULL));
int a[arraySize] = { , , , , };
int b[arraySize] = { , , , , }; for (int i = ; i < arraySize; i++) {
a[i] = rand() % (MAX + - MIN) + MIN;
b[i] = rand() % (MAX + - MIN) + MIN;
}
int c[arraySize] = { };
// Add vectors in parallel.
cudaError_t cudaStatus;
int num = ;
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceCount(&num);
for (int i = ; i < num; i++) {
cudaGetDeviceProperties(&prop, i);
} float time;
cudaStatus = addWithCudaStream(c, a, b, arraySize, &time);
printf("Elasped time of stream is : %f \n", time);
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[arraySize - - ], a[arraySize - - ], a[arraySize - - ],
a[arraySize - - ], a[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], b[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], c[arraySize - - ], c[arraySize - - ],
c[arraySize - - ], c[arraySize - - ], c[arraySize - - ]);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCudaStream failed!");
return ;
}
cudaStatus = addWithCuda(c, a, b, arraySize, &time, );
printf("Elasped time of Block is : %f \n", time);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCudaStream failed!");
return ;
}
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[arraySize - - ], a[arraySize - - ], a[arraySize - - ],
a[arraySize - - ], a[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], b[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], c[arraySize - - ], c[arraySize - - ],
c[arraySize - - ], c[arraySize - - ], c[arraySize - - ]); cudaStatus = addWithCuda(c, a, b, arraySize, &time, );
printf("Elasped time of thread is : %f \n", time);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCudaStream failed!");
return ;
}
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[arraySize - - ], a[arraySize - - ], a[arraySize - - ],
a[arraySize - - ], a[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], b[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], c[arraySize - - ], c[arraySize - - ],
c[arraySize - - ], c[arraySize - - ], c[arraySize - - ]); cudaStatus = addWithCudaStream(c, a, b, arraySize, &time);
printf("Elasped time of stream is : %f \n", time);
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[arraySize - - ], a[arraySize - - ], a[arraySize - - ],
a[arraySize - - ], a[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], b[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], c[arraySize - - ], c[arraySize - - ],
c[arraySize - - ], c[arraySize - - ], c[arraySize - - ]);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCudaStream failed!");
return ;
}
// cudaThreadExit must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadExit failed!");
return ;
}
return ;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCudaStream(int *c, const int *a, const int *b, size_t size,
float* etime) {
int *dev_a = ;
int *dev_b = ;
int *dev_c = ;
clock_t start, stop;
float time;
cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**) &dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStream_t stream[];
for (int i = ; i < ; i++) {
cudaStreamCreate(&stream[i]); //创建流
}
// Launch a kernel on the GPU with one thread for each element.
for (int i = ; i < ; i++) {
addKernel<<<, , , stream[i]>>>(dev_c + i, dev_a + i, dev_b + i); //执行流
}
start = clock();
cudaDeviceSynchronize();
stop = clock();
time = (float) (stop - start) / CLOCKS_PER_SEC;
*etime = time;
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaThreadSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int),
cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error: for (int i = ; i < ; i++) {
cudaStreamDestroy(stream[i]); //销毁流
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size,
float * etime, int type) {
int *dev_a = ;
int *dev_b = ;
int *dev_c = ;
clock_t start, stop;
float time;
cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**) &dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
} if (type == ) {
start = clock();
addKernel<<<size, >>>(dev_c, dev_a, dev_b);
} else {
start = clock();
addKernelThread<<<, size>>>(dev_c, dev_a, dev_b);
}
stop = clock();
time = (float) (stop - start) / CLOCKS_PER_SEC;
*etime = time;
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaThreadSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int),
cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error: cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
如上文的实现程序,使用了thread并行,block并行,stream并行三种,使用三种方法法进行了五次计算,发现stream第一次计算时会出错,调用的子程序没有变化,没有搞懂?
Elasped time of stream is : 0.000006
{47,86,67,35,16} + {114,39,110,20,101} = {158,123,92,107,127}
Elasped time of Block is : 0.000006
{47,86,67,35,16} + {114,39,110,20,101} = {161,125,177,55,117}
Elasped time of stream is : 0.000008
{47,86,67,35,16} + {114,39,110,20,101} = {161,125,177,55,117}
Elasped time of thread is : 0.000004
{47,86,67,35,16} + {114,39,110,20,101} = {161,125,177,55,117}
Elasped time of stream is : 0.000007
{47,86,67,35,16} + {114,39,110,20,101} = {161,125,177,55,117}
cuda并行计算的几种模式的更多相关文章
- 对称加密和分组加密中的四种模式(ECB、CBC、CFB、OFB)
一. AES对称加密: AES加密 分组 二. 分组密码的填充 分组密码的填充 e.g.: PKCS#5填充方式 三. 流密码: 四. 分组密码加密中的四种模式: 3.1 ECB模式 优点: 1. ...
- win7 64位下自行编译OpenCV2.4.10+CUDA toolkit 5.5的整个过程以及需要注意的问题(opencv+cuda并行计算元素的使用)
首先说明的是,这个帖子是成功的编译了dll,但是这个dll使用的时候还是很容易出现各种问题的. 发现错误可能是由于系统安装了太多版本的opencv,环境变量的设置混乱,造成dll版本加载 ...
- Spark On Yarn的两种模式yarn-cluster和yarn-client深度剖析
Spark On Yarn的优势 每个Spark executor作为一个YARN容器(container)运行.Spark可以使得多个Tasks在同一个容器(container)里面运行 1. Sp ...
- AES加密的四种模式详解
对称加密和分组加密中的四种模式(ECB.CBC.CFB.OFB) 一. AES对称加密: A ...
- Hadoop hadoop的介绍和几种模式
Hadoop简介 Hadoop软件库是一个开源框架,允许使用简单的编程模型跨计算机集群分布式处理大型数据集.它旨在从单个服务器扩展到数千台计算机,每台计算机都提供本地计算和存储.库本身不是依靠硬件来提 ...
- hadoop(1)---hadoop的介绍和几种模式。
一.什么是hadoop? Hadoop软件库是一个开源框架,允许使用简单的编程模型跨计算机集群分布式处理大型数据集.它旨在从单个服务器扩展到数千台计算机,每台计算机都提供本地计算和存储.库本身不是依靠 ...
- javascript 创建对象的7种模式
使用字面量方式创建一个 student 对象: var student = function (){ name : "redjoy", age : 21, sex: women, ...
- javascript面向对象系列第二篇——创建对象的5种模式
× 目录 [1]字面量 [2]工厂模式 [3]构造函数[4]原型模式[5]组合模式 前面的话 如何创建对象,或者说如何更优雅的创建对象,一直是一个津津乐道的话题.本文将从最简单的创建对象的方式入手,逐 ...
- javascript创建对象的几种模式
在js中有几种模式可以创建对象,通过对象操作所包含的属性与方法. 一般来说,构造函数名称的第一个字母为大写字母,非构造函数名称的第一个字母为小写字母,当然,构造函数与一般函数唯一的区别只是调用的方式不 ...
随机推荐
- git和svn
git 分布式管理工具 svn 集中式管理工具 1. Git是分布式的,SVN是集中式的,好处是跟其他同事不会有太多的冲突,自己写的代码放在自己电脑上,一段时间后再提交.合并,也可以不用联网在本地提交 ...
- chrome developer tool—— 断点调试篇
断点,调试器的功能之一,可以让程序中断在需要的地方,从而方便其分析.也可以在一次调试中设置断点,下一次只需让程序自动运行到设置断点位置,便可在上次设置断点的位置中断下来,极大的方便了操作,同时节省了时 ...
- iOS 代理协议
代理,又称委托代理(delegate),是iOS中常用的设计一种模式.顾名思义,它是把某个对象要做的事情委托给别的对象去做.那么别的对象就是这个对象的代理,代替它来打理要做的事.反映到程序中, 首先要 ...
- Swift安装
Server1 .Update sudo apt-get update sudo apt-get upgrade . sudo apt-get install bridge-utils .IP 3.1 ...
- 渗透测试常规思路分析-FREEBUF
最基础但练得好最后也非常厉害 1. 主要由于服务器配置等原因造成的信息泄露 常用google ,bing等搜索工具,轻量级的搜索出一些遗留后门,不想被发现的后台入口,中量级的搜索出一些用户信息泄露, ...
- SQLLDR 教程
)- 总览 http://blog.csdn.net/dbanote/article/details/9153895 )- 命令行参数 http://blog.csdn.net/dbano ...
- 巧用用layer-list做一个卡片背景
<?xml version="1.0" encoding="utf-8"?> <layer-list xmlns:android=" ...
- IOS中限制TextField中输入的类型以及长度
-(BOOL)textField:(UITextField *)textField shouldChangeCharactersInRange:(NSRange)range replacementSt ...
- Android TextView走马灯效果
布局: <TextView android:id="@+id/myTextView" android:layout_width="match_parent" ...
- Swift开发第十二篇——protocol组合&static和class
本篇分为两部分: 一.Swift 中 protocol 组合的使用 二.Swfit 中 static和class 的使用 一.Swift 中 protocol 组合的使用 在 Swift 中我们可以使 ...