Use Compressed Sparse Row Format (CSR) to represent matrix

 #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "gputimer.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#define WARP_SIZE 32 __global__ void
spmv_csr_vector_kernel ( const int num_rows ,
const int * ptr ,
const int * indices ,
const double * data ,
const double * x ,
double * y)
{
__shared__ double vals [WARP_SIZE];
int thread_id = blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int warp_id = thread_id / WARP_SIZE; // global warp index
int lane = thread_id & (WARP_SIZE - ); // thread index within the warp
// one warp per row
int row = warp_id ;
if ( row < num_rows )
{
int row_start = ptr [ row ];
int row_end = ptr [ row +];
// compute running sum per thread
vals [ threadIdx.x ] = ;
for ( int jj = row_start + lane ; jj < row_end ; jj += WARP_SIZE)
vals [ threadIdx.x ] += data [ jj ] * x [ indices [ jj ]];
// parallel reduction in shared memory
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
// first thread writes the result
if ( lane == )
y[ row ] += vals [ threadIdx.x ];
}
} __global__ void
spmv_csr_scalar_kernel ( const int num_rows ,
const int * ptr ,
const int * indices ,
const double * data ,
const double * x ,
double * y)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if( row < num_rows )
{
double dot = ;
int row_start = ptr [ row ];
int row_end = ptr [ row +];
for (int jj = row_start ; jj < row_end ; jj ++)
dot += data [ jj ] * x[ indices [ jj ]];
y[ row ] += dot ;
}
} int main(int argc,char **argv)
{
double h_data[]={,,,,,,,,};
int h_col[]={,,,,,,,,};
int h_ptr[]={,,,,};
double h_x[]={,,,,};
double h_y[]={,,,};
int num_rows=; double *d_data;
int *d_col;
int *d_ptr;
double *d_x;
double *d_y; cudaMalloc((void**) &d_data,sizeof(double)*);
cudaMalloc((void**) &d_col,sizeof(int)*);
cudaMalloc((void**) &d_ptr,sizeof(int)*);
cudaMalloc((void**) &d_x,sizeof(double)*);
cudaMalloc((void**) &d_y,sizeof(double)*);
cudaMemcpy((void*)d_data, (void*)h_data, sizeof(double)*, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_col, (void*)h_col, sizeof(int)*, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_ptr, (void*)h_ptr, sizeof(int)*, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_x, (void*)h_x, sizeof(double)*, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_y, (void*)h_y, sizeof(double)*, cudaMemcpyHostToDevice); GpuTimer timer;
timer.Start();
spmv_csr_vector_kernel<<<num_rows,>>>(num_rows,d_ptr,d_col,d_data,d_x,d_y);
//spmv_csr_scalar_kernel<<<1,32>>>(num_rows,d_ptr,d_col,d_data,d_x,d_y);
timer.Stop();
printf("Duration: %g ms\n",timer.Elapsed()); cudaMemcpy((void*)h_y, (void*)d_y, sizeof(double)*, cudaMemcpyDeviceToHost); for(int i=;i<num_rows;i++)
printf("%.5f ",h_y[i]);
printf("\n"); return ;
}

ref:

http://www.nvidia.com/docs/IO/66889/nvr-2008-004.pdf  

ch4.3

CUDA[4] sample program: matrix-vector multiplication的更多相关文章

  1. ACM学习历程——UVA442 Matrix Chain Multiplication(栈)

    Description   Matrix Chain Multiplication  Matrix Chain Multiplication  Suppose you have to evaluate ...

  2. Matrix Chain Multiplication[HDU1082]

    Matrix Chain Multiplication Time Limit: 2000/1000 MS (Java/Others)    Memory Limit: 65536/32768 K (J ...

  3. UVA 442 二十 Matrix Chain Multiplication

    Matrix Chain Multiplication Time Limit:3000MS     Memory Limit:0KB     64bit IO Format:%lld & %l ...

  4. UVa 442 Matrix Chain Multiplication(矩阵链,模拟栈)

    意甲冠军  由于矩阵乘法计算链表达的数量,需要的计算  后的电流等于行的矩阵的矩阵的列数  他们乘足够的人才  非法输出error 输入是严格合法的  即使仅仅有两个相乘也会用括号括起来  并且括号中 ...

  5. Matrix Chain Multiplication(表达式求值用栈操作)

    题目链接:http://acm.hdu.edu.cn/showproblem.php?pid=1082 Matrix Chain Multiplication Time Limit: 2000/100 ...

  6. UVA——442 Matrix Chain Multiplication

    442 Matrix Chain MultiplicationSuppose you have to evaluate an expression like A*B*C*D*E where A,B,C ...

  7. 例题6-3 Matrix Chain Multiplication ,Uva 442

    这个题思路没有任何问题,但还是做了近三个小时,其中2个多小时调试 得到的经验有以下几点: 一定学会调试,掌握输出中间量的技巧,加强gdb调试的学习 有时候代码不对,得到的结果却是对的(之后总结以下常见 ...

  8. UVa442 Matrix Chain Multiplication

    // UVa442 Matrix Chain Multiplication // 题意:输入n个矩阵的维度和一些矩阵链乘表达式,输出乘法的次数.假定A和m*n的,B是n*p的,那么AB是m*p的,乘法 ...

  9. uva-442 Matrix Chain Multiplication

    Suppose you have to evaluate an expression like A*B*C*D*E where A,B,C,D and E are matrices. Since ma ...

随机推荐

  1. SQLServer 清空某个库所有表

    select @n=1 insert #temp(tablename) SELECT distinct sobjects.name FROM sysobjects sobjects WHERE sob ...

  2. OOM问题定位

      一:堆内存溢出 Java创建的对象一般都是分配在堆中,如果是由于过期对象没能回收(内存泄漏)或者对象过多导致放不下(内存溢出),一般报错: Exception in thread \"m ...

  3. jpg转yuv420抠图后转为jpg

    最近遇到个需求,已有全景图和其中的人脸坐标,将人脸小图从全景图中抠出来,最开始使用libjpeg,奈何使用libjpeg将jpg转为yuv420的资料实在少,libjpeg自身的readme和exam ...

  4. leetcode338

    public class Solution { public int[] CountBits(int num) { ]; ; i <= num; i++) { ; var cur = i; do ...

  5. 再谈PHP设计模式

    设计模式 单例模式解决的是如何在整个项目中创建唯一对象实例的问题,工厂模式解决的是如何不通过new建立实例对象的方法. 单例模式 $_instance必须声明为静态的私有变量 构造函数和析构函数必须声 ...

  6. React Native在window下的环境搭建(一)

    React Native官方开发文档 以下是本人抄录的: 初次接触React Native感觉和React很像,却是有点类似,但不完全是,React Native有自己的组件对象,不过它也自定义的组件 ...

  7. 【原】The Linux Command Line - Processes

    ps - report a snapshot of current processes top - display tasks job - list active jobs bg - place a ...

  8. day42 字段的增删改查详细操作

    复习 # 1.表的详细操作 create table nt like ot; # 只复制表的结构包括约束 create table nt select * from ot where 1=2; # 复 ...

  9. VueJs学习参考的例子

    his is a vue+mint's demo ,for loler(PAD LOL) https://github.com/yuanman0109/vue2.0-Mint-lolbox   An ...

  10. EasyChat简易聊天室实现

    我是个技术新人,刚刚毕业,平时遇到问题都是在网上查找资料解决,而很多经验都来自园子,于是我也想有自己的园子,把自己的编程快乐与大家分享. 在学校学习的期间,老师带我们做winform,那时候我什么都不 ...