0_Simple__simpleCooperativeGroups
▶ 协作组,CUDA9.0 的新特性
▶ 源代码,如何获得协作组的编号?
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cooperative_groups.h> #define THREAD_PER_BLOCK 64 using namespace cooperative_groups; // 注意使用命名空间 __device__ int sumReduction(thread_group g, int *x, int val) // 规约设备函数,要求共享内存 int *x 要够放得下 g.size() 个参加规约的元素
{
int lane = g.thread_rank(); // 线程在协作组中的编号,教程中名字就叫 line ID for (int i = g.size() / ; i > ; i /= )
{
x[lane] = val; // 第一次迭代该步相当于初始化,以后迭代该步相当于存储上一次迭代的结果
g.sync(); // 协作组同步
if (lane < i)
val += x[lane + i]; // 利用每个线程局部变量 val 记录当前结果
g.sync();
}
if (g.thread_rank() == ) // 零号线程返回计算结果
return val;
else
return -;
} __global__ void cgkernel()
{
extern __shared__ int workspace[]; thread_block group = this_thread_block(); // 将线程块内所有线程打包为一个协作组
int groupSize = group.size(); // 获得协作组大小(线程个数)
int input = group.thread_rank(); // 获得线程在协作组内的编号,并作为计算输入
int output = sumReduction(group, workspace, input); // 规约计算,注意直接使用共享内存作为工作空间
int expectedOutput = (groupSize - )*groupSize / ; // 预期计算结果,0 + 1 + 2 +...+ 63 = 2016 if (group.thread_rank() == ) // 0 号线程报告计算结果,宣布开始新的 4 个协作组的计算任务
{
printf("\n\tSum of thread 0 ~ %d in group is %d (expected %d)\n", group.size() - , output, expectedOutput);
printf("\n\tNow creating %d groups, each of size 16 threads:\n", group.size() / );
}
group.sync(); // 协作组同步 thread_block_tile<> group16 = tiled_partition<>(group); // 每16个线程分割为一个协作组(只能使用 2 的整数次幂) int offset = group.thread_rank() - group16.thread_rank(); // 各协作组使用的共享内存的地址偏移量
printf("%d -> thread_rank = %d, group16.thread_rank = %d, offset = %d\n", threadIdx.x, group.thread_rank(), group16.thread_rank(), offset);
// dim3 group.group_index() 打印出来全是 (0, 0, 0),dim3 group.thread_index() 打印出来跟 group.thread_rank() 一样 input = group16.thread_rank(); // 获得线程在新协作组中的编号,并作为计算输入
output = sumReduction(group16, workspace + offset, input); // 规约计算,注意工作空间的地址偏移
expectedOutput = * / ; // 预期计算结果,0 + 1 + 2 +...+ 16 = 120 if (group16.thread_rank() == ) // 各协作组零号线程报告计算结果
printf("\n\tSum of all ranks 0..15 in group16 is %d (expected %d)\n", output, expectedOutput);
return;
} int main()
{
printf("\n\tStart with %d threads.\n", THREAD_PER_BLOCK); cgkernel << <, THREAD_PER_BLOCK, THREAD_PER_BLOCK * sizeof(int) >> > ();
cudaDeviceSynchronize(); printf("\n\tFinish.\n");
getchar();
return ;
}
● 输出结果
Start with threads.
Sum of thread ~ in group is (expected )
Now creating groups, each of size threads:
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
-> thread_rank = , group16.thread_rank = , offset =
Sum of all ranks .. in group16 is (expected )
Sum of all ranks .. in group16 is (expected )
Sum of all ranks .. in group16 is (expected )
Sum of all ranks .. in group16 is (expected )
Finish.
▶ 涨姿势:
● 相关定义
// cooperative_groups_helper.h
# if !defined(_CG_QUALIFIER)
# define _CG_QUALIFIER __forceinline__ __device__
# endif # define die() assert(); // cooperative_groups.h(调整顺序)
class thread_group // 通用线程组类型
{
friend _CG_QUALIFIER thread_group this_thread();
friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
friend class thread_block; protected:
union __align__()
{
unsigned int type : ;
struct
{
unsigned int type : ;
unsigned int size : ;
unsigned int mask;
} coalesced;
struct
{
void* ptr[];
} buffer;
} _data; _CG_QUALIFIER thread_group operator=(const thread_group& src); _CG_QUALIFIER thread_group(__internal::groupType type)
{
_data.type = type;
}
#if __cplusplus >= 201103L
static_assert(sizeof(_data) == , "Failed size check");
#endif public:
_CG_QUALIFIER unsigned int size() const;
_CG_QUALIFIER unsigned int thread_rank() const;
_CG_QUALIFIER void sync() const;
}; class thread_block : public thread_group
{
friend _CG_QUALIFIER thread_block this_thread_block();
friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz); _CG_QUALIFIER thread_block() : thread_group(__internal::ThreadBlock) {} _CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const
{
const bool pow2_tilesz = ((tilesz & (tilesz - )) == ); if (tilesz == || (tilesz > ) || !pow2_tilesz)
{
die();
return (thread_block());
} unsigned int mask;
unsigned int base_offset = thread_rank() & (~(tilesz - ));
unsigned int masklength = min(size() - base_offset, tilesz);
mask = (unsigned int)(-) >> ( - masklength);
mask <<= (__internal::laneid() & ~(tilesz - ));
thread_group tile = thread_group(__internal::CoalescedTile);
tile._data.coalesced.mask = mask;
tile._data.coalesced.size = __popc(mask);
return (tile);
} public:
_CG_QUALIFIER void sync() const { __internal::cta::sync(); }
_CG_QUALIFIER unsigned int size() const { return (__internal::cta::size()); }
_CG_QUALIFIER unsigned int thread_rank() const { return (__internal::cta::thread_rank()); }
_CG_QUALIFIER dim3 group_index() const { return (__internal::cta::group_index()); }
_CG_QUALIFIER dim3 thread_index() const { return (__internal::cta::thread_index()); }
}; _CG_QUALIFIER thread_block this_thread_block()// 范例代码中用到的,实际是调用了 thread_block 的构造函数
{
return (thread_block());
} template <unsigned int Size>
class thread_block_tile;
template <> class thread_block_tile<> : public __thread_block_tile_base<> { };
template <> class thread_block_tile<> : public __thread_block_tile_base<> { };
template <> class thread_block_tile<> : public __thread_block_tile_base<> { };
template <> class thread_block_tile<> : public __thread_block_tile_base<> { };
template <> class thread_block_tile<> : public __thread_block_tile_base<> { };
template <> class thread_block_tile<> : public __thread_block_tile_base<> { }; template <unsigned int Size>
class __thread_block_tile_base : public thread_group
{
static const unsigned int numThreads = Size;
_CG_QUALIFIER unsigned int build_mask() const
{
unsigned int mask;
if (numThreads == )
mask = 0xFFFFFFFF;
else
{
mask = (unsigned int)(-) >> ( - numThreads);
mask <<= (__internal::laneid() & (~(numThreads - )));
}
return (mask);
} protected:
_CG_QUALIFIER __thread_block_tile_base() : thread_group(__internal::CoalescedTile)
{
_data.coalesced.mask = build_mask();
_data.coalesced.size = numThreads;
} public:
_CG_QUALIFIER void sync() const { __syncwarp(build_mask()); }
_CG_QUALIFIER unsigned int thread_rank() const { return (threadIdx.x & (numThreads - )); }
_CG_QUALIFIER unsigned int size() const { return (numThreads); } // PTX supported collectives
_CG_QUALIFIER int shfl(int var, int srcRank) const { return (__shfl_sync(build_mask(), var, srcRank, numThreads)); }
... #ifdef _CG_HAS_FP16_COLLECTIVE
_CG_QUALIFIER __half shfl(__half var, int srcRank) const { return (__shfl_sync(build_mask(), var, srcRank, numThreads)); }
... #endif #ifdef _CG_HAS_MATCH_COLLECTIVE
_CG_QUALIFIER unsigned int match_any(int val) const
{
unsigned int lane_match = build_mask() & __match_any_sync(build_mask(), val);
return (lane_match >> (__internal::laneid() & (~(numThreads - ))));
}
...
#endif
};
● 用到的线程协作相关函数
thread_block threadBlockGroup = this_thread_block(); // 将当前线程块分配为一个协作组 thread_block_tile<> tiledPartition16 = tiled_partition<>(threadBlockGroup); // 协作组分组 int in = tiledPartition16.thread_rank(); // 协作组中线程的编号 tiledPartition16.sync(); // 协作组同步
0_Simple__simpleCooperativeGroups的更多相关文章
随机推荐
- HPU 1166: 阶乘问题(一)
1166: 阶乘问题(一) [数学] 时间限制: 1 Sec 内存限制: 128 MB提交: 58 解决: 24 统计 题目描述 小H对阶乘!很感兴趣.现在他想知道N!N!的位数,由于NN太大了,所以 ...
- WinFrom调试时,弹出你正在调试发布的版本
把下图这里改成DEBUG就好 还有项目属性里面也要改一下 问题解决!
- synchronized (lock) 买票demo 线程安全
加锁防止多个线程执行同一段代码! /** http://blog.51cto.com/wyait/1916898 * @author * @since 11/10/2018 * 某电影院目前正在上映贺 ...
- 【CQOI2008】中位数
题不难,但是思路有意思,这个是我自己想出来的OvO 原题: 给出1~n的一个排列,统计该排列有多少个长度为奇数的连续子序列的中位数是b.中位数是指把所有元素从小到大排列后,位于中间的数. n<= ...
- BZOJ4713 迷失的字符串
分析 首先考虑只有一个串时的做法,可以进行背包dp,记\(f(i,j)\)表示从\(i\)的子树中某点出发到\(i\)能否匹配字符串的\(1 \dots j\)位且\(i\)与\(j\)匹配.同时记\ ...
- WebSocket(二)-WebSocket、Socket、TCP、HTTP区别
原文地址:Socket 与 WebSocket 1. 概述 WebSocket 是为了满足基于 Web 的日益增长的实时通信需求而产生的.在传统的 Web 中,要实现实时通信,通用的方式是采用 HTT ...
- Sencha Touch+PhoneGap打造超级奶爸之喂养记(一) 源码免费提供(转)
起源 非常高兴我的宝宝健康平安的出生了.对于初次做奶爸的我,喜悦过后,面临着各中担心,担心宝宝各项指标是否正常.最初几天都是在医院待着,从出生那一天开始,护士妹妹隔一段时间就会来问宝宝的喂奶,大小便, ...
- java基本数据类型和引用类型
这些基本的数据类型是点不出东西来的 3种引用类型 类class 接口interface 数组array 第一个 : 类 Integer Long Boolean Byte Characte ...
- MySQL--忘记MYSQL管理员密码
如root用户密码,可以按照以下方式来修改: STEP1: 停止MySQL服务 ps -ef | grep -v 'grep' | grep 'mysqld' | awk '{print $2}' | ...
- 通过torodb && hasura graphql 让mongodb 快速支持graphql api
torodb 可以方便的将mongo 数据实时同步到pg,hasura graphql 可以方便的将pg 数据暴露为graphql api,集成在一起真的很方便 环境准备 docker-compose ...