0_Simple__vectorAdd + 0_Simple__vectorAdd_nvrtc + 0_Simple__vectorAddDrv
▶ 使用 CUDA Runtime API,运行时编译,Driver API 三种接口计算向量加法
▶ 源代码,CUDA Runtime API
#include <stdio.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <helper_cuda.h> #define ELEMENT 50000 __global__ void vectorAdd(const float *A, const float *B, float *C, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
C[i] = A[i] + B[i];
} int main()
{
printf("\tStart.\n");
size_t size = ELEMENT * sizeof(float); float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
float *d_A = NULL;
float *d_B = NULL;
float *d_C = NULL;
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
for (int i = ; i < ELEMENT; ++i)
{
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
}
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); int threadsPerBlock = ;
int blocksPerGrid = (ELEMENT + threadsPerBlock - ) / threadsPerBlock;
vectorAdd << <blocksPerGrid, threadsPerBlock >> > (d_A, d_B, d_C, ELEMENT);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); for (int i = ; i < ELEMENT; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-)
{
printf("\n\tResult error at i = %d, h_A[i] = %f, h_B[i] = %f, h_C[i] = %f\n", i, h_A[i], h_B[i], h_C[i]);
getchar();
return ;
}
} free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n\tFinish.\n");
getchar();
return ;
}
● 输出结果:
Start.
Finish.
▶ 源代码,运行时编译
// vectorAdd_kernel.cu
extern "C" __global__ void vectorAdd(const float *A, const float *B, float *C, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
C[i] = A[i] + B[i];
}
// vectorAdd.cpp
#include <stdio.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <cuda.h>
#include <nvrtc_helper.h> #define ELEMENT 50000 int main()
{
printf("\n\tStart.\n"); char *ptx, *kernel_file;
size_t ptxSize;
kernel_file = "D:\\Program\\CUDA9.0\\Samples\\0_Simple\\vectorAdd_nvrtc\\vectorAdd_kernel.cu";
compileFileToPTX(kernel_file, , NULL, &ptx, &ptxSize, );
CUmodule module = loadPTX(ptx, , NULL);
CUfunction kernel_addr;
cuModuleGetFunction(&kernel_addr, module, "vectorAdd"); size_t size = ELEMENT * sizeof(float); float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
CUdeviceptr d_A, d_B, d_C;
cuMemAlloc(&d_A, size);
cuMemAlloc(&d_B, size);
cuMemAlloc(&d_C, size);
for (int i = ; i < ELEMENT; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
cuMemcpyHtoD(d_A, h_A, size);
cuMemcpyHtoD(d_B, h_B, size); int threadsPerBlock = ;
dim3 cudaBlockSize(threadsPerBlock,,);
dim3 cudaGridSize((ELEMENT + threadsPerBlock - ) / threadsPerBlock, , );
int element = ELEMENT;
void *arr[] = { (void *)&d_A, (void *)&d_B, (void *)&d_C, (void *)&element};
cuLaunchKernel(kernel_addr, cudaGridSize.x, cudaGridSize.y, cudaGridSize.z, cudaBlockSize.x, cudaBlockSize.y, cudaBlockSize.z, , , &arr[], );
cuCtxSynchronize();
cuMemcpyDtoH(h_C, d_C, size); for (int i = ; i < ELEMENT; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-)
{
printf("\n\tResult error at i = %d, h_A[i] = %f, h_B[i] = %f, h_C[i] = %f\n", i, h_A[i], h_B[i], h_C[i]);
getchar();
return ;
}
} free(h_A);
free(h_B);
free(h_C);
cuMemFree(d_A);
cuMemFree(d_B);
cuMemFree(d_C);
printf("\n\tFinish.\n");
getchar();
return ;
}
● 输出结果:
Start.
> Using CUDA Device []: GeForce GTX
> GPU Device has SM 6.1 compute capability Finish.
▶ 源代码,Driver API,也需要上面的 vectorAdd_kernel.cu,调用核函数有三种方式,中间那种有点问题,结果不对
#include <stdio.h>
#include <helper_cuda.h>
#include <cuda.h>
#include <string>
#include <drvapi_error_string.h> #define ELEMENT 50000
#define PATH "C:\\ProgramData\\NVIDIA Corporation\\CUDA Samples\\v9.1\\0_Simple\\vectorAddDrv\\data\\" #if defined(_WIN64) || defined(__LP64__)
#define PTX_FILE "vectorAdd_kernel64.ptx"
#else
#define PTX_FILE "vectorAdd_kernel32.ptx"
#endif using namespace std; void RandomInit(float *data, int n)
{
for (int i = ; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
} int main(int argc, char **argv)
{
printf("\n\tStart.\n");
cuInit();// 相当于 runtime API 的 cudaSetDevice(0);,要先初始化设备才能创建上下文
CUcontext cuContext;
cuCtxCreate(&cuContext, , ); // 编译
string module_path, ptx_source;
module_path = PATH"vectorAdd_kernel64.ptx";
FILE *fp = fopen(module_path.c_str(), "rb");
fseek(fp, , SEEK_END);
int file_size = ftell(fp);
char *buf = new char[file_size + ];
fseek(fp, , SEEK_SET);
fread(buf, sizeof(char), file_size, fp);
fclose(fp);
buf[file_size] = '\0';
ptx_source = buf;
delete[] buf; CUmodule cuModule;
if (module_path.rfind("ptx") != string::npos)// 使用的是.ptx,需要运行时编译
{
// 设定编译参数,CUjit_option 放置参数名,jitOptVals 放置参数值
const unsigned int jitNumOptions = ;
CUjit_option *jitOptions = new CUjit_option[jitNumOptions];
void **jitOptVals = new void *[jitNumOptions];
// 编译日志长度
jitOptions[] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
int jitLogBufferSize = ;
jitOptVals[] = (void *)(size_t)jitLogBufferSize;
// 编译日志内容
jitOptions[] = CU_JIT_INFO_LOG_BUFFER;
char *jitLogBuffer = new char[jitLogBufferSize];
jitOptVals[] = jitLogBuffer;
// 设定一个内核使用的寄存器数量
jitOptions[] = CU_JIT_MAX_REGISTERS;
int jitRegCount = ;
jitOptVals[] = (void *)(size_t)jitRegCount;
// 编译模块
cuModuleLoadDataEx(&cuModule, ptx_source.c_str(), jitNumOptions, jitOptions, (void **)jitOptVals);
//printf("> PTX JIT log:\n%s\n", jitLogBuffer);// 输出编译日志
delete[] jitLogBuffer;
delete[] jitOptVals;
delete[] jitOptions;
}
else// 使用的是 .cubin,不用编译(本例中不经过这个分支)
cuModuleLoad(&cuModule, module_path.c_str()); CUfunction vecAdd_kernel;
cuModuleGetFunction(&vecAdd_kernel, cuModule, "VecAdd_kernel");// 取出编译好的模块中的函数 // 申请内存,开始运算
int element = ELEMENT;
size_t size = ELEMENT * sizeof(float);
float * h_A, *h_B, *h_C;
CUdeviceptr d_A, d_B, d_C;
h_A = (float *)malloc(size);
h_B = (float *)malloc(size);
h_C = (float *)malloc(size);
RandomInit(h_A, ELEMENT);
RandomInit(h_B, ELEMENT);
cuMemAlloc(&d_A, size);
cuMemAlloc(&d_B, size);
cuMemAlloc(&d_C, size);
cuMemcpyHtoD(d_A, h_A, size);
cuMemcpyHtoD(d_B, h_B, size); int threadsPerBlock = ;
int blocksPerGrid = (ELEMENT + threadsPerBlock - ) / threadsPerBlock;
if () // 三种调用 Driver API 的方式
{
void *args[] = { &d_A, &d_B, &d_C, &element };
cuLaunchKernel(vecAdd_kernel, blocksPerGrid, , , threadsPerBlock, , , , NULL, args, NULL);
}
else if () // 有问题
{
int offset = ;
void *argBuffer[];
*((CUdeviceptr *)&argBuffer[offset]) = d_A;
offset += sizeof(d_A);
*((CUdeviceptr *)&argBuffer[offset]) = d_B;
offset += sizeof(d_B);
*((CUdeviceptr *)&argBuffer[offset]) = d_C;
offset += sizeof(d_C);
*((int *)&argBuffer[offset]) = element;
offset += sizeof(element);
cuLaunchKernel(vecAdd_kernel, blocksPerGrid, , , threadsPerBlock, , , , NULL, NULL, argBuffer);
}
else // 正确的
{
int offset = ;
char argBuffer[];
*((CUdeviceptr *)&argBuffer[offset]) = d_A;
offset += sizeof(d_A);
*((CUdeviceptr *)&argBuffer[offset]) = d_B;
offset += sizeof(d_B);
*((CUdeviceptr *)&argBuffer[offset]) = d_C;
offset += sizeof(d_C);
*((int *)&argBuffer[offset]) = element;
offset += sizeof(element);
void *kernel_launch_config[] =
{ CU_LAUNCH_PARAM_BUFFER_POINTER, argBuffer,CU_LAUNCH_PARAM_BUFFER_SIZE,&offset,CU_LAUNCH_PARAM_END };
cuLaunchKernel(vecAdd_kernel, blocksPerGrid, , , threadsPerBlock, , , , NULL, NULL, (void **)&kernel_launch_config);
}
cuCtxSynchronize();
cuMemcpyDtoH(h_C, d_C, size);
int i;
for (i = ; i < ELEMENT; ++i)
{
float sum = h_A[i] + h_B[i];
if (fabs(h_C[i] - sum) > 1e-7f)
{
printf("Error at i == %d, h_C[i] == %f, sum == %f", i, h_C[i], sum);
break;
}
}
printf("\n\tFinish: %s\n", (i == ELEMENT) ? "Pass" : "Fail");
getchar();
return ;
}
● 输出结果
Start.
Finish.
▶ 涨姿势:
● 从源代码中删减了的部分
CUresult CleanupNoFailure() //检查内存错误的函数
{
CUresult error;
// Free device memory
if (d_A)
error = cuMemFree(d_A);
if (d_B)
error = cuMemFree(d_B);
if (d_C)
error = cuMemFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
error = cuCtxDestroy(cuContext);
return error;
} void Cleanup(bool noError) // 报告错误
{
CUresult error = CleanupNoFailure();
if (!noError || error != CUDA_SUCCESS)
{
printf("Function call failed\nFAILED\n");
exit(EXIT_FAILURE);
}
if (!noprompt)
{
printf("\nPress ENTER to exit...\n");
fflush(stdout);
fflush(stderr);
getchar();
}
} if (error != CUDA_SUCCESS) // 外部调用 cleanup
Cleanup(false); if (argc > ) // 主函数中使用参数 -device=n 指定设备号
{
bool bFound = false;
for (int param = ; param < argc; param++) // 逐个检查参数
{
int string_start = ;
while (argv[param][string_start] == '-') // 跳过 "-" 号
string_start++;
char *string_argv = &argv[param][string_start];
if (!strncmp(string_argv, "device", )) // 看参数是否是 device
{
int len = (int)strlen(string_argv);
while (string_argv[len] != '=')
len--;
devID = atoi(&string_argv[++len]);
bFound = true;
}
if (bFound)
break;
}
}
0_Simple__vectorAdd + 0_Simple__vectorAdd_nvrtc + 0_Simple__vectorAddDrv的更多相关文章
随机推荐
- AFN不支持 "text/html" 的数据的问题:unacceptable content-type: text/html
使用AFN若遇到这个错误信息 Request failed: unacceptable content-type: text/html bug原因: 这不是AFNetworking的问题 这是做Ser ...
- Spring Boot(5) 集成Hibernate 日志配置
https://blog.csdn.net/ZNG_XP/article/details/78131809 https://blog.csdn.net/u011998835/article/detai ...
- graphql 数据导入工具
graphql 是比 比较方便的工具,但是数据导入也是一个比较重要的工具类似prisma 包含一个seed 的功能,类似docker我们使用mysql 数据库的initdb.d,但是那个基本上就 是添 ...
- Linux下的Nginx部署禅道
基本思路:先安装好nginx和mysql和php,上传禅道的源码.把禅道的源码包扔到 nginx/apache 的工程路径内或者nginx/apache内的配置文件指向nginx的路径,然后将ngin ...
- 启用Win8/10(中文版/核心版/家庭版)中被阉割的远程桌面服务端
Windows 8/8.1/10 标准版(中文版/核心版/家庭版)中取消了远程桌面服务端,想通过远程连接到自己的电脑就很麻烦了,第三方远程桌面速度又不理想(如TeamViewer).通过以下方法可让系 ...
- FineUI4.0以后如何调用JS事件
F.ready(function() { // 你的代码 }); F.ready(function () { var searchClientID = '<%= TextB ...
- CF 914F Substrings in a String——bitset处理匹配
题目:http://codeforces.com/contest/914/problem/F 可以对原字符串的每种字母开一个 bitset .第 i 位的 1 表示这种字母在第 i 位出现了. 考虑能 ...
- Tomcat 去除项目名称
再tomcat的conf下server.xml 里, 再<host>...</host>的标签之间添加 <Context path="" docBa ...
- qt 中文乱码
首先呢,声明一下,QString 是不存在中文支持问题的,很多人遇到问题,并不是本身 QString 的问题,而是没有将自己希望的字符串正确赋给QString. 很简单的问题,"我是中文&q ...
- windows下使用vscode编写运行以及调试Python
更新于2018年10月: 首先去python官网下载python3 地址:https://www.python.org/downloads/windows/ 下载好后直接安装 记得勾选添加环境变量 ...