[root@localhost mpi]# mpicc -c base.c
[root@localhost mpi]# mpicc -o base base.o
[root@localhost mpi]# mpirun -np 4 ./base

mpicc cos.c -o cos -lm

一.运行实例
1.hello world

#include <stdio.h>
#include "mpi.h"
int main(int argc, char**argv){
MPI_Init(&argc, &argv);
printf("Hello world.\n");
MPI_Finalize();
return 0;
}

2.每个进程输出自己是第几个进程

#include <stdio.h>
#include "mpi.h"
int main(int argc, char **argv){
MPI_Comm comm = MPI_COMM_WORLD;
int size, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
printf("This is process %d of %d processes.\n", rank, size);
MPI_Finalize();
return 0;
}

3.点对点。0号进程给1号进程发消息,1号进程接收消息

#include<stdio.h>
#include<mpi.h>
#include<string.h>
int main(int argc, char **argv){
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Status status; int size, rank; char str[100];
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
if (rank == 0) {
strcpy(str, "hello world");
printf("Process 0 send 1 to process %s.\n", str);
MPI_Send(str, strlen(str) + 1, MPI_CHAR, 1, 99, comm);
}
else if (rank == 1) {
MPI_Recv(str, 100, MPI_CHAR, 0, 99, comm, &status);
printf("Process 1 receives messages %s.\n", str);
}
MPI_Finalize();
return 0;
}

4.点对点。进程之间互相接收和发送消息
#include<stdio.h>
#include<mpi.h>
#include<string.h>
int main(int argc, char **argv){
const int limit = 10;
int rank, count = 0;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
while (count < limit){
if (rank == 0){
count++;
MPI_Send(&count, 1, MPI_INT, 1, 10, MPI_COMM_WORLD);
printf("0 sent %d to 1\n", count);
MPI_Recv(&count, 1, MPI_INT, 1, 20, MPI_COMM_WORLD, &status);
printf("0 received %d from 1\n", count);
}
else {
MPI_Recv(&count, 1, MPI_INT, 0, 10, MPI_COMM_WORLD, &status);
printf("1 received %d from 0\n", count);
count++;
MPI_Send(&count, 1, MPI_INT, 0, 20, MPI_COMM_WORLD);
printf("1 sent %d to 0\n", count);
}
}
MPI_Finalize();
}

5.点对点。进程之间循环发送一个数,0->1->2->0(假设开启了两个进程)

#include<stdio.h>
#include<mpi.h>
#include<string.h>
int main(int argc, char** argv){
int rank, size, token, source, dest;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
source = rank == 0 ? size -1 : rank - 1;
dest = (rank + 1) % size;
token = 100;
if (rank == 0){
MPI_Ssend(&token, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);
printf("Process %d sends token to %d.\n", rank, dest);
MPI_Recv(&token, 1, MPI_INT, source, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Process %d receives token from %d.\n", rank, source);
}
else {
MPI_Recv(&token, 1, MPI_INT, source, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Process %d receives token from %d.\n", rank, source);
MPI_Ssend(&token, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);
printf("Process %d sends token to %d.\n", rank, dest);
}
MPI_Finalize();
return 0;
}

6.给0号进程赋值。然后把0号进程的值广播出去。每个进程接收到的值和0号进程一样

#include<stdio.h>
#include<mpi.h>
#include<string.h>
int main(int argc, char** argv){
int arr[3], i, rank;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0){
for (i = 0; i < 3; i++)
arr[i] = i + 1;
}
MPI_Bcast(arr, 3, MPI_INT, 0, MPI_COMM_WORLD);
printf("Process %d receives:", rank);
for (i = 0; i < 3; i++)
printf("%d ", arr[i]);
putchar('\n');
MPI_Finalize();
return 0;
}

7.0号进程收集其他所有进程的值
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
int main(int argc, char **argv){
int rank, size, sbuf[3], *rbuf, i;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < 3; i++)
sbuf[i] = rank * 10 + i;
if (rank == 0)
rbuf = (int*)malloc(sizeof(int) * 3 * size);
MPI_Gather(sbuf, 3, MPI_INT, rbuf, 3, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0){
printf("Process 0 receives:");
for (i = 0; i < size * 3; i++)
printf("%d ", rbuf[i]);
putchar('\n');
}
MPI_Finalize();
return 0;
}

8.把0号进程的值发给所有进程,每个进程得到的值不一样
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
int main(int argc, char** argv){
int rank, size, *sbuf, rbuf[3], i;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0){
sbuf = (int *) malloc(sizeof(int) * 3 * size);
for (i = 0; i < size * 3; i++)
sbuf[i] = i + 1;
}
MPI_Scatter(sbuf, 3, MPI_INT, rbuf, 3, MPI_INT, 0, MPI_COMM_WORLD);
printf("Process %d receives: ", rank);
for (i = 0; i < 3; i++)
printf("%d ", rbuf[i]);
putchar('\n');
MPI_Finalize();
return 0;
}

9.与gather不同,所有进程都收集到其他进程的消息
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
int main(int argc, char **argv){
int rank, size, sbuf[3], *rbuf, i;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < 3; i++)
sbuf[i] = rank * 10 + i;
rbuf = (int*)malloc(sizeof(int) * 3 * size);
MPI_Allgather(sbuf, 3, MPI_INT, rbuf, 3, MPI_INT, MPI_COMM_WORLD);
printf("Process %d receives:", rank);
for (i = 0; i < size * 3; i++)
printf("%d ", rbuf[i]);
putchar('\n');
free(rbuf);
MPI_Finalize();
return 0;
}

10.有三个任务,把

task1 task2 task3
0 1 2
3 4 5
6 7 8
9 10 11
转换成:
rank0 rank1 rank2 rank3
0 3 6 9
1 4 7 10
2 5 8 11
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
int main(int argc, char** argv){
int rank, size, *sbuf, *rbuf, i;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
sbuf = (int*)malloc(size * 3 * sizeof(int));
rbuf = (int*)malloc(size * 3 * sizeof(int));
for (i = 0; i < size * 3; i++)
sbuf[i] = rank * 10 + i;
printf("Before exchange, process %d has ", rank);
for (i = 0; i < size * 3; i++)
printf("%d ", sbuf[i]);
putchar('\n');
MPI_Alltoall(sbuf, 3, MPI_INT, rbuf, 3, MPI_INT, MPI_COMM_WORLD);
printf("After exchange, process %d has ", rank);
for (i = 0; i < size * 3; i++)
printf("%d ", rbuf[i]);
putchar('\n');
MPI_Finalize();
free(sbuf);
free(rbuf);
return 0;
}

11.对每个任务(一列)求和
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
int main(int argc, char** argv) {
int size, rank, sbuf[3], rbuf[3], i;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
for (i = 0; i < 3; i++) sbuf[i] = rank * 10 + i;
printf("Process %d has: ", rank);
for (i = 0; i < 3; i++) printf("%d ", sbuf[i]);
putchar('\n');
MPI_Reduce(sbuf, rbuf, 3, MPI_INT, MPI_SUM, 0,MPI_COMM_WORLD);
if (rank == 0) {
printf("Total sum = ");
for (i = 0; i < 3; i++) printf("%d ",rbuf[i]);
putchar('\n');
}
MPI_Finalize();
}

12.对每个任务(一列)求最大值,并求出其所在进程编号,0号进程输出结果
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
typedef struct{ int val; int rank;}DATATYPE;
int main(int argc, char** argv){
int size, rank, i; DATATYPE sbuf[3], rbuf[3];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
srand(time(NULL) + rank);
printf("Process %d has ", rank);
for (i = 0; i < 3; i++){
sbuf[i].val = rand() % 100;
sbuf[i].rank = rank;
printf("%d ", sbuf[i].val);
}
putchar('\n');
MPI_Reduce(sbuf, rbuf, 3, MPI_2INT, MPI_MAXLOC, 0, MPI_COMM_WORLD);
if (rank == 0){
printf("max value and location are:\n");
for (i = 0; i < 3; i++) printf("value = %d, location = %d\n", rbuf
[i].val, rbuf[i].rank);
}
MPI_Finalize();
return 0;
}

13.对每一列求最大值,每个进程都得到结果并输出
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
int main(int argc, char** argv) {
int size, rank, sbuf[3], rbuf[3], i;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
for (i = 0; i < 3; i++) sbuf[i] = rank * 10 + i;
printf("Process %d has: ", rank);
for (i = 0; i < 3; i++) printf("%d ", sbuf[i]);
putchar('\n');
MPI_Allreduce(sbuf, rbuf, 3, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
printf("Total sum of process %d = ", rank);
for (i = 0; i < 3; i++) printf("%d ",rbuf[i]);
putchar('\n');
MPI_Finalize();
}

14.每遍历一个进程对其与前面的进程的每一列求和
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
int main(int argc, char** argv) {
int size, rank, sbuf[3], rbuf[3], i;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
for (i = 0; i < 3; i++) sbuf[i] = rank * 10 + i;
printf("Process %d has: ", rank);
for (i = 0; i < 3; i++) printf("%d ", sbuf[i]); putchar('\n');
MPI_Scan(sbuf, rbuf, 3, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
printf("Process %d has results ", rank);
for (i = 0; i < 3; i++) printf("%d ", rbuf[i]);
putchar('\n');
MPI_Finalize();
}

二.函数
1.send

MPI_Send(
void* data,// starting address of the data to be sent
int count,//number of elements to be sent (not bytes)
MPI_Datatype datatype,//MPI datatype of each element
int destination, //rank of destination process
int tag,//message identifier (set by user)
MPI_Comm comm) //MPI communicator of processors involved

2.recv

MPI_Recv(
void* data, //starting address of buffer to store message
int count, //number of elements to be received (not bytes)
MPI_Datatype datatype, //MPI datatype of each element
int source,// rank of source process
int tag,//message identifier (set by user)
MPI_Comm comm, //MPI communicator of processors involved
MPI_Status* status) //structure of information about the message

3.Broadcast把一个进程的内容发送给所有进程广播

int MPI_Bcast(
void* buffer,//starting address of buffer
int count, //number of entries in buffer
MPI_Datatype datatype,//data type of buffer
int root, //rank of broadcast root
MPI_Comm comm)//communicator

4.gather0号进程收集其他进程的消息

int MPI_Gather(
const void* sendbuf, //starting address of send buffer
int sendcount, //number of elements in send buffer
MPI_Datatype sendtype,//data type of send buffer elements
void* recvbuf, //address of receive buffer (significant only at root)
int recvcount,//number of elements for any single receive (significant only at root)
MPI_Datatype recvtype,//data type of recv buffer elements(significant only at root)
int root,//rank of receiving process
MPI_Comm comm)//communicator

5.scatter 把0号进程的值发给所有进程,每个进程得到的值不一样

int MPI_Scatter (
void * sendbuf , // pointer to send buffer
int sendcount , // items to send per process
MPI_Datatype sendtype , // type of send buffer data
void * recvbuf , // pointer to receive buffer
int recvcount , // number of items to receive
MPI_Datatype recvtype , // type of receive buffer data
int root , // rank of sending process
MPI_Comm comm ) // MPI communicator to use

6.allgather与gather不同,allgather不只0号进程收集其他进程的消息,所有进程都会收集到其他进程的消息

int MPI_Allgather (
void * sendbuf , // pointer to send buffer
int sendcount , // number of items to send
MPI_Datatype sendtype , // type of send buffer data
void * recvbuf , // pointer to receive buffer
int recvcount , // items to receive per process
MPI_Datatype recvtype , // type of receive buffer data
MPI_Comm comm ) // MPI communicator to use

7.alltoall

int MPI_Alltoall(
const void *sendbuf,
int sendcount, //number of elements to send to each process
MPI_Datatype sendtype, //data type of send buffer elements
void *recvbuf, //address of receive buffer
int recvcount, //number of elements received from any process
MPI_Datatype recvtype, //data type of receive buffer elements
MPI_Comm comm) //communicator

8.reduce

MPI_Reduce(
void* send_data, //address of send buffer
void* recv_data, //address of receive buffer
int count,//number of elements in send buffer
MPI_Datatype datatype, //data type of elements of send buffer
MPI_Op op, //reduce operation MPI的操作函数PPT87页
int root, rank of root process
MPI_Comm communicator) //communicator

9.Allreduce

MPI_Allreduce(
void* send_data, //address of send buffer
void* recv_data,//address of receive buffer
int count, //number of elements in send buffer
MPI_Datatype datatype, //data type of elements of send buffer
MPI_Op op, //reduce operation
MPI_Comm communicator) //communicator

10.Scan

int MPI_Scan(
const void* sendbuf,//address of send buffer
void* recvbuf, //address of receive buffer
int count, //number of elements in send buffer
MPI_Datatype datatype, //data type of elements of send buffer
MPI_Op op,//reduce operation
MPI_Comm comm) //communicator

云计算--MPI的更多相关文章

  1. 【转】OpenStack和Docker、ServerLess能不能决定云计算胜负吗?

    还记得在十多年前,SaaS鼻祖SalesForce喊出的口号『No Software』吗?SalesForce在这个口号声中开创了SaaS行业,并成为当今市值460亿美元的SaaS之王.今天谈谈『No ...

  2. 从CPU/OS到虚拟机和云计算

      从CPU/OS到虚拟机和云计算  作者:张冬            关于软硬件谁为主导这个话题,套用一句谚语就是三十年河东三十年河西.风水轮流转.软件和硬件一定是相互促进.相互拆台又相互搭台的. ...

  3. 云计算下PAAS的解析一

    云计算下PAAS的解析一       PaaS是Platform-as-a-Service的缩写,意思是平台即服务. 把服务器平台作为一种服务提供的商业模式.通过网络进行程序提供的服务称之为SaaS( ...

  4. 查找素数Eratosthenes筛法的mpi程序

    思路: 只保留奇数 (1)由输入的整数n确定存储奇数(不包括1)的数组大小: n=(n%2==0)?(n/2-1):((n-1)/2);//n为存储奇数的数组大小,不包括基数1 (2)由数组大小n.进 ...

  5. kmeans算法并行化的mpi程序

    用c语言写了kmeans算法的串行程序,再用mpi来写并行版的,貌似参照着串行版来写并行版,效果不是很赏心悦目~ 并行化思路: 使用主从模式.由一个节点充当主节点负责数据的划分与分配,其他节点完成本地 ...

  6. DevOps是云计算时代的开发与运营

    DevOps(英文Development和Operations的组合)是一组过程.方法与系统的统称,用于促进开发(应用程序/软件工程).技术运营和质量保障(QA)部门之间的沟通.协作与整合.[1] 它 ...

  7. 云计算之路-阿里云上:从ASP.NET线程角度对“黑色30秒”问题的全新分析

    在这篇博文中,我们抛开对阿里云的怀疑,完全从ASP.NET的角度进行分析,看能不能找到针对问题现象的更合理的解释. “黑色30秒”问题现象的主要特征是:排队的请求(Requests Queued)突增 ...

  8. SOA、ESB、NServiceBus、云计算 总结

    SOA SOA 是通过功能组件化.服务化,来实现系统集成.解决信息孤岛,这是其主要目标.而更进一步则是实现更快响应业务的变化.更快推出新的应用系统.与此同时,SOA 还实现了整合资源,资源复用. SO ...

  9. 云计算与 OpenStack - 每天5分钟玩转 OpenStack(14)

    “云计算” 算是近年来最热的词了.现在 IT 行业见面不说这三个字您都不好意思跟人家打招呼. 对于云计算,学术界有各种定义,大家有兴趣可以百度一下. CloudMan 这里主要想从技术的角度谈谈对云计 ...

随机推荐

  1. 20135234mqy-——信息安全系统设计基础第十三周学习总结

    第十一章 网络编程 11.1 客户端-服务器编程模型 基本操作:事务 当一个客户端需要服务时,向服务器发送一个请求,发起一个事务. 服务器收到请求后,解释它,并以适当的方式操作它的资源. 服务器给客户 ...

  2. [福大软工] Z班 个人项目自动测试结果

    个人项目第二次测试结果[9.16] 注:下表中的成绩满分为25分,正确性测试 共5个,每个3分.效率测试共 2个,每个5分. 根据数据统计分档如下, // 前为档级,后为分数. 参数为50000 0- ...

  3. Mybatis:Eclipse引入dtd约束文件使得xml文件有提示

    https://blog.csdn.net/lsx2017/article/details/82558135

  4. Winform设置开机启动-操作注册表

    #region 设置开机运行 /// <summary> /// 设置开机运行 /// </summary> /// <param name="R_startP ...

  5. 组件 -- Button

    .btn --------------------------------- button的背景色: .btn-primary .btn-success .btn-secondary .btn-dan ...

  6. JS animate动画

    <!DOCTYPE html><html lang="zh-cn"><head> <meta charset="utf-8&qu ...

  7. Python进阶-字符串格式化

    目录 前言 %格式化 str.format() f-Strings 特殊符号处理 前言 在 Python 3.6 之前,字符串格式化方法主要有两种: %格式化 str.format() 在Python ...

  8. python学习大全:python基础进阶+人工智能+机器学习+神经网络

    首先用数据说话,看看资料大小,达到675G承诺:真实资料.不加密.(鉴于太多朋友加我QQ,我无法及时回复,) 方便的朋友给我点赞.评论下,谢谢!(内容较大,多次保存) [hide]链接:[url]ht ...

  9. BZOJ5462 APIO2018新家(线段树+堆)

    一个显然的做法是二分答案后转化为查询区间颜色数,可持久化线段树记录每个位置上一个同色位置,离线后set+树状数组套线段树维护.这样是三个log的. 注意到我们要知道的其实只是是否所有颜色都在该区间出现 ...

  10. BZOJ 3993 [SDOI2015]星际战争 | 网络流 二分答案

    链接 BZOJ 3993 题解 这道题挺棵的-- 二分答案t,然后源点向武器连t * b[i], 武器向能攻击的敌人连1, 敌人向汇点连a[i],如果最大流等于所有敌人的a[i]之和则可行. #inc ...