Use Compressed Sparse Row Format (CSR) to represent matrix

 #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "gputimer.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#define WARP_SIZE 32 __global__ void
spmv_csr_vector_kernel ( const int num_rows ,
const int * ptr ,
const int * indices ,
const double * data ,
const double * x ,
double * y)
{
__shared__ double vals [WARP_SIZE];
int thread_id = blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int warp_id = thread_id / WARP_SIZE; // global warp index
int lane = thread_id & (WARP_SIZE - ); // thread index within the warp
// one warp per row
int row = warp_id ;
if ( row < num_rows )
{
int row_start = ptr [ row ];
int row_end = ptr [ row +];
// compute running sum per thread
vals [ threadIdx.x ] = ;
for ( int jj = row_start + lane ; jj < row_end ; jj += WARP_SIZE)
vals [ threadIdx.x ] += data [ jj ] * x [ indices [ jj ]];
// parallel reduction in shared memory
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
if ( lane < ) vals [ threadIdx.x ] += vals [ threadIdx.x + ];
// first thread writes the result
if ( lane == )
y[ row ] += vals [ threadIdx.x ];
}
} __global__ void
spmv_csr_scalar_kernel ( const int num_rows ,
const int * ptr ,
const int * indices ,
const double * data ,
const double * x ,
double * y)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if( row < num_rows )
{
double dot = ;
int row_start = ptr [ row ];
int row_end = ptr [ row +];
for (int jj = row_start ; jj < row_end ; jj ++)
dot += data [ jj ] * x[ indices [ jj ]];
y[ row ] += dot ;
}
} int main(int argc,char **argv)
{
double h_data[]={,,,,,,,,};
int h_col[]={,,,,,,,,};
int h_ptr[]={,,,,};
double h_x[]={,,,,};
double h_y[]={,,,};
int num_rows=; double *d_data;
int *d_col;
int *d_ptr;
double *d_x;
double *d_y; cudaMalloc((void**) &d_data,sizeof(double)*);
cudaMalloc((void**) &d_col,sizeof(int)*);
cudaMalloc((void**) &d_ptr,sizeof(int)*);
cudaMalloc((void**) &d_x,sizeof(double)*);
cudaMalloc((void**) &d_y,sizeof(double)*);
cudaMemcpy((void*)d_data, (void*)h_data, sizeof(double)*, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_col, (void*)h_col, sizeof(int)*, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_ptr, (void*)h_ptr, sizeof(int)*, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_x, (void*)h_x, sizeof(double)*, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_y, (void*)h_y, sizeof(double)*, cudaMemcpyHostToDevice); GpuTimer timer;
timer.Start();
spmv_csr_vector_kernel<<<num_rows,>>>(num_rows,d_ptr,d_col,d_data,d_x,d_y);
//spmv_csr_scalar_kernel<<<1,32>>>(num_rows,d_ptr,d_col,d_data,d_x,d_y);
timer.Stop();
printf("Duration: %g ms\n",timer.Elapsed()); cudaMemcpy((void*)h_y, (void*)d_y, sizeof(double)*, cudaMemcpyDeviceToHost); for(int i=;i<num_rows;i++)
printf("%.5f ",h_y[i]);
printf("\n"); return ;
}

ref:

http://www.nvidia.com/docs/IO/66889/nvr-2008-004.pdf  

ch4.3

CUDA[4] sample program: matrix-vector multiplication的更多相关文章

  1. ACM学习历程——UVA442 Matrix Chain Multiplication(栈)

    Description   Matrix Chain Multiplication  Matrix Chain Multiplication  Suppose you have to evaluate ...

  2. Matrix Chain Multiplication[HDU1082]

    Matrix Chain Multiplication Time Limit: 2000/1000 MS (Java/Others)    Memory Limit: 65536/32768 K (J ...

  3. UVA 442 二十 Matrix Chain Multiplication

    Matrix Chain Multiplication Time Limit:3000MS     Memory Limit:0KB     64bit IO Format:%lld & %l ...

  4. UVa 442 Matrix Chain Multiplication(矩阵链,模拟栈)

    意甲冠军  由于矩阵乘法计算链表达的数量,需要的计算  后的电流等于行的矩阵的矩阵的列数  他们乘足够的人才  非法输出error 输入是严格合法的  即使仅仅有两个相乘也会用括号括起来  并且括号中 ...

  5. Matrix Chain Multiplication(表达式求值用栈操作)

    题目链接:http://acm.hdu.edu.cn/showproblem.php?pid=1082 Matrix Chain Multiplication Time Limit: 2000/100 ...

  6. UVA——442 Matrix Chain Multiplication

    442 Matrix Chain MultiplicationSuppose you have to evaluate an expression like A*B*C*D*E where A,B,C ...

  7. 例题6-3 Matrix Chain Multiplication ,Uva 442

    这个题思路没有任何问题,但还是做了近三个小时,其中2个多小时调试 得到的经验有以下几点: 一定学会调试,掌握输出中间量的技巧,加强gdb调试的学习 有时候代码不对,得到的结果却是对的(之后总结以下常见 ...

  8. UVa442 Matrix Chain Multiplication

    // UVa442 Matrix Chain Multiplication // 题意:输入n个矩阵的维度和一些矩阵链乘表达式,输出乘法的次数.假定A和m*n的,B是n*p的,那么AB是m*p的,乘法 ...

  9. uva-442 Matrix Chain Multiplication

    Suppose you have to evaluate an expression like A*B*C*D*E where A,B,C,D and E are matrices. Since ma ...

随机推荐

  1. 深度森林DeepForest

    级联森林(Cascade Forest) 级联森林结构的图示.级联的每个级别包括两个随机森林(蓝色字体标出)和两个完全随机树木森林(黑色). 假设有三个类要预测,因此,每个森林将输出三维类向量,然后将 ...

  2. eclipse中配置MAVEN并使用阿里云代理

    一.下载MAVENhttp://maven.apache.org/download.cgi 二.配置MAVEN环境变量名:M2_HOME变量值:F:\maven\apache-maven-3.0.3找 ...

  3. ORACLE重装之后恢复数据库,相当于sqlserver的附加数据库

    在开发机器上经常会遇到重装系统的问题,重装之前如果ORACLE没有及时备份的话重装之后就纠结了,数据还原很头疼. 各种娘中只能找到一些ORACLE安装与重装系统前目录相同的解决办法,目录不同就没招了. ...

  4. !!!常用SVG代码

    http://www.w3school.com.cn/svg/svg_examples.asp svg实例 http://www.w3school.com.cn/svg/svg_reference.a ...

  5. docker报错

    用docker搭建环境时可能会遇到错误:No releases available for package "xxxx" No releases available for pac ...

  6. CORSFilter 跨域资源访问

    CORS 定义 Cross-Origin Resource Sharing(CORS)跨来源资源共享是一份浏览器技术的规范,提供了 Web 服务从不同域传来沙盒脚本的方法,以避开浏览器的同源策略,是 ...

  7. Binary Space Partitioning

    [Binary Space Partitioning] BSP was discovered by John Carmack used BSP trees in Doom and Quake. Alt ...

  8. Jenkins之定时任务

    H的用法: H 10 * * *  ,这里H不是小时的意思,符号H(代表“Hash”,后面用“散列”代替) 符号H 在一定范围内可被认为是一个随机值,但实际上它是任务名称的一个散列而不是随机函数,每个 ...

  9. 命名空间 extern的用法 static全局变量

    std是标准库中的命名空间: 关于extern的用法可以参考文献http://blog.163.com/sunjinxia%40126/blog/static/94984879201312145021 ...

  10. 11. Container With Most Water (JAVA)

    Given n non-negative integers a1, a2, ..., an , where each represents a point at coordinate (i, ai). ...