矩阵乘法,使用一维线程块和共享内存。并且在静态代码和运行时编译两种条件下使用。

▶ 源代码:静态使用

 #include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <helper_functions.h>
#include <helper_cuda.h> template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y; int aBegin = wA * BLOCK_SIZE * by; // A的行程起点
int aEnd = aBegin + wA - ; // A的行程终点
int aStep = BLOCK_SIZE; // A的跨度(一个 block 为宽 BLOCK_SIZE 的一维条带,各线程分别对应其中的一个元素)
int bBegin = BLOCK_SIZE * bx; // B的行程起点
int bStep = BLOCK_SIZE * wB; // B的跨度(一个 block 为高 BLOCK_SIZE 的一维条带,各线程分别对应其中的一个元素)
float Csub = ; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads(); #pragma unroll// 循环展开为 BLOCK_SIZE 个赋值语句,提高效率
for (int k = ; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
} int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
} void constantInit(float *data, int size, float val)
{
for (int i = ; i < size; ++i)
data[i] = val;
} int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, 0.01f);
dim3 dimsC(dimsB.x, dimsA.y, );
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, mem_size_A);
cudaMalloc((void **) &d_B, mem_size_B);
cudaMalloc((void **) &d_C, mem_size_C);
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); // 热身
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
if (block_size == )
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
else
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
printf("done\n");
cudaDeviceSynchronize(); printf("Computing result using CUDA Kernel...\n");
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
cudaEventRecord(start, NULL); int nIter = ;
for (int j = ; j < nIter; j++)
{
if (block_size == )
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
else
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop); float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y);
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); // 检查结果,要求相对误差:|<x, y>_cpu - <x,y>_gpu| / <|x|, |y|> < eps
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = .e- ; // machine zero
for (int i = ; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
return EXIT_SUCCESS;
else
return EXIT_FAILURE;
} int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
} int devID = ;// 指定设备,默认用0号设备
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaDeviceProp deviceProp;
cudaGetDevice(&devID);
cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
} int block_size = (deviceProp.major < ) ? : ; dim3 dimsA(**block_size, **block_size, );
dim3 dimsB(**block_size, **block_size, ); // 使用命令行指定的A、B的维度参数
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); getchar();
exit(matrix_result);
}

▶ 源代码:运行时编译

 /*matrixMul_kernel.cu*/
template <int BLOCK_SIZE> __device__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - ;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = ;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads();
#pragma unroll
for (int k = ; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
} extern "C" __global__ void matrixMulCUDA_block16(float *C, float *A, float *B, int wA, int wB)
{
matrixMulCUDA<>(C,A,B,wA,wB);
} extern "C" __global__ void matrixMulCUDA_block32(float *C, float *A, float *B, int wA, int wB)
{
matrixMulCUDA<>(C,A,B,wA,wB);
}
 /*matrixMul.cpp*/
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "nvrtc_helper.h"
#include <helper_functions.h> void constantInit(float *data, int size, float val)
{
for (int i = ; i < size; ++i)
data[i] = val;
} int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
CUdeviceptr d_A, d_B, d_C; char *ptx, *kernel_file;
size_t ptxSize;
kernel_file = sdkFindFilePath("matrixMul_kernel.cu", argv[]);
compileFileToPTX(kernel_file, , NULL, &ptx, &ptxSize);
CUmodule module = loadPTX(ptx, argc, argv); dim3 dimsC(dimsB.x, dimsA.y, );
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
cuMemAlloc(&d_A, mem_size_A);
cuMemAlloc(&d_B, mem_size_B);
cuMemAlloc(&d_C, mem_size_C);
cuMemcpyHtoD(d_A, h_A, mem_size_A);
cuMemcpyHtoD(d_B, h_B, mem_size_B); dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); printf("Computing result using CUDA Kernel...\n"); CUfunction kernel_addr;
if (block_size == )
cuModuleGetFunction(&kernel_addr, module, "matrixMulCUDA_block16");
else
cuModuleGetFunction(&kernel_addr, module, "matrixMulCUDA_block32"); void *arr[] = { (void *)&d_C, (void *)&d_A, (void *)&d_B, (void *)&dimsA.x, (void *)&dimsB.x }; // Execute the kernel
int nIter = ; for (int j = ; j < nIter; j++)
{
cuLaunchKernel(kernel_addr,
grid.x, grid.y, grid.z,
threads.x, threads.y, threads.z,
, , &arr[], );
cuCtxSynchronize();
}
cuMemcpyDtoH(h_C, d_C, mem_size_C); printf("Checking computed result for correctness: ");
bool correct = true;
double eps = .e- ;
for (int i = ; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB);
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
free(h_A);
free(h_B);
free(h_C);
cuMemFree(d_A);
cuMemFree(d_B);
cuMemFree(d_C);
if (correct)
return EXIT_SUCCESS;
else
return EXIT_FAILURE;
} int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
} int block_size = ;
dim3 dimsA(**block_size, **block_size, );
dim3 dimsB(**block_size, **block_size, ); if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y);
} exit(EXIT_FAILURE);
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); getchar();
exit(matrix_result);
}

▶ 输出结果:

[Matrix Multiply Using CUDA] - Starting...
GPU Device : "GeForce GTX 1070" with compute capability 6.1 MatrixA(,), MatrixB(,)
Computing result using CUDA Kernel...
done
Performance= 22.95 GFlop/s, Time= 5.712 msec, Size= Ops, WorkgroupSize= threads/block
Checking computed result for correctness: Result = PASS NOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.

▶ 涨姿势:

● 程序写得很烂,各种声明、初始化杂糅。

● 一个根据cuda错误种类返回错误描述的函数

extern __host__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error);

● 预编译命令展开循环

 #pragma unroll
for (i = ; i < m; i++)
c[i] = a[i] + b[i];

等价于

 c[] = a[] + b[];
c[] = a[] + b[];
c[] = a[] + b[];
...
c[m-] = a[m-] + b[m-];

#pragma unroll 命令后面可接数字,表明展开前多少次迭代,例如 #pragma unroll 4

● 核函数泛型编程。可以在调用核函数时传入一个常量参数,变相使用动态数组来规定共享内存等数组的大小。

 template <int BLOCK_SIZE> __global__ void functionName(void)
{
__shared__ int shareArray[BLOCK_SIZE];
...
} cunctionName<> << < blocksize, threadsize >> >();

● 热身,在多次重复实验前提前算一次。对缓存有帮助,有效减小实验结果(计算耗时)的方差。

0_Simple__matrixMul + 0_Simple__matrixMul_nvrtc的更多相关文章

随机推荐

  1. SSM框架—详细整合教程(Spring+SpringMVC+MyBatis)

    很久没有新搭建过框架了,今天搭建一遍.以往都是在eclipse中搭建,今天换Idea吧,目前来说Idea用的还是很多的,但是用习惯了eclipse的朋友,可能会不太习惯 ok.....开始: 注意区分 ...

  2. 日期小demo

    有个项目需求是做个在日期上选择的,就是这种: 网上看了几个日期的demo都太厚重了,移植起来太麻烦,然后打算自己写. 就先写个简化的demo看看,主要有几个关键点: 首先要根据当前日期获取这个月有几天 ...

  3. Sublimetext3安装Emmet插件步骤

    看清楚哦~~这是Sublime text 3不是2的版本,两者的安装还是有区别的,下面的方法是我感觉比较简单的,其他的要命令什么的感觉太复杂了,经测试是OK的. 先关闭Sublime text 3: ...

  4. 记XDCTF的misc之旅---base64隐写

    bWFpbigpe2ludCBpLG5bXT17KCgoMSA8PDEpPDwgKDE8PDEpPDwoMTw8Cm==ICAgICAgIDEpPDwoMTw8KDE+PjEpKSkrKCgxPDwx ...

  5. Football 概率DP poj3071

                                                                                                 Footbal ...

  6. 使用python操作mysql

    版权申明:本文为博主窗户(Colin Cai)原创,欢迎转帖.如要转贴,必须注明原文网址 http://www.cnblogs.com/Colin-Cai/p/7643047.html 作者:窗户 Q ...

  7. Sublime Text保存文件时自动去掉行末空格

    修改一个Sublime Text的用户配置,其中这个配置就是"保存文件时自动去掉每行结束后多余的空格",具体操作如下: 在Sublime Text菜单栏中找到preferences ...

  8. jsp web JavaBean MVC 架构 EL表达式 EL函数 JSTL

     一.JavaBean概念(非常重要) 1.JavaBean就是遵循一定书写规范的Java类型(开发中:封装数据) a.必须有默认的构造方法,类必须是public的   public class  ...

  9. 错误:Cannot set property 'innerHTML' of null

    360浏览器代码编辑器里提示错误:Cannot set property 'innerHTML' of null 原因是代码执行时要调用的内容不存在

  10. ovs2.7 在系统重启后,再次使用时提示数据库无法连接的问题。

    问题现象如下,ovs开始安装后,对ovs的操作是正常的,但是,现在系统重启后,OVS的操作第一条命令就失败,如下: 问题解决方法: 参考  http://blog.csdn.net/xyq54/art ...