/*文件名:hello.c*/
#include "mpi.h"
#include <stdio.h>
int main(int argc,char **argv) {
MPI_Init(&argc,&argv); //并行部分开始
printf("hello parallel world!\n");
MPI_Finalize(); //并行部分结束
}
============================
/*文件名:who.c*/
#include "mpi.h"
#include <stdio.h>
int main(int argc,char **argv) {
int myid, numprocs;
int namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);//获得本进程ID
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);//获得总的进程数目
MPI_Get_processor_name(processor_name,&namelen);//获得本进程的机器名
printf("Hello World! Process %d of %d on %s\n",myid, numprocs, processor_name);
MPI_Finalize();
}
==================================
/*文件名:message.c*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char** argv) {
int myid, numprocs, source;
MPI_Status status;
char message[100];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
if (myid != 0) {
strcpy(message, "Hello World!");//为发送字符串赋值
//发送字符串时长度要加1,从而包括串结束标志
MPI_Send(message,strlen(message)+1, MPI_CHAR, 0,99,MPI_COMM_WORLD);
}
else {
//除0进程的其他进程接收来自于0进程的字符串数据
for (source = 1; source < numprocs; source++) {
MPI_Recv(message, 100, MPI_CHAR, source, 99,MPI_COMM_WORLD, &status);
printf("I am process %d. I recv string '%s' from process %d.\n", myid, message,source);
}
}
MPI_Finalize();
}
==================================
/*文件名:mtpi.c*/
#include"mpi.h"
#include <stdio.h>
#include <stdlib.h>
int main(int argc,char **argv) {
int myid, numprocs;
int namelen,source;
long count=1000000;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Status status;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);//得到当前进程的进程号
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);//得到通信域中的总进程数
MPI_Get_processor_name(processor_name,&namelen);//得到节点主机名称
srand((int)time(0));//设置随机种子
double y;
double x;
long m=0,m1=0,i=0,p=0;
double pi=0.0,n=0.0;
for(i=0;i<count;i++) {
x=(double)rand()/(double)RAND_MAX;//得到0~1之间的随机数,x坐标
y=(double)rand()/(double)RAND_MAX;//得到0~1之间的随机数,y坐标
if((x-0.5)*(x-0.5)+(y-0.5)*(y-0.5)<0.25)//判断产生的随机点坐标是否在圆内
m++;
}
n=4.0*m/1000000;
printf("Process %d of %d on %s pi= %f\n",myid,numprocs,processor_name,n);
if(myid!=0) { //判断是否是主节点
MPI_Send(&m,1,MPI_DOUBLE,0,1,MPI_COMM_WORLD);//子节点向主节点传送结果
}
else {
p=m;
/*分别接收来自于不同子节点的数据*/
for(source=1;source<numprocs;source++) {
MPI_Recv(&m1,1,MPI_DOUBLE,source,1,MPI_COMM_WORLD,&status);
//主节点接收数据
p+=m1;
}
printf("pi= %f\n",4.0*p/(count* numprocs));//汇总计算pi值
}
MPI_Finalize();
}
========================================
/*文件名 inte.c*/
#define N 100000000
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "mpi.h"
int main(int argc, char** argv) {
int myid,numprocs;
int i;
double local=0.0;
double inte,tmp=0.0,x;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
srand((int)time(0));//设置随机数种子
/*各节点分别计算一部分积分值*/
/*以下代码在不同节点运行的结果不同*/
for(i=myid;i<N;i=i+numprocs) {
x=10.0*rand()/(RAND_MAX+1.0);//求函数值
tmp=x*x/N;
local=tmp+local;//各节点计算面积和
}
//计算总的面积和,得到积分值
MPI_Reduce(&local,&inte,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
if(myid==0) {
printf("The integal of x*x=%16.15f\n",inte);
}
MPI_Finalize();
}
====================================
/*文件名 myreduce.c*/
#define N 100000000
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "mpi.h"
void Myreduce(double *sendbuf, double *recvbuf,int count,int root);
//定义自己的reduce函数
int main(int argc, char** argv) {
int myid,numprocs;
int i;
double local=0.0;
double inte,tmp=0.0,x;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
/*采用归约对y=x*x在[1,10]区间求积分*/
srand((int)time(0));
for(i=myid;i<N;i=i+numprocs) {
x=10.0*rand()/(RAND_MAX+1.0);
tmp=x*x/N;
local=tmp+local;
}
Myreduce(&local,&inte,1,0); //调用自定义的规约函数
if(myid==0) {
printf("The integal of x*x=%16.15f\n",inte);
}
MPI_Finalize();
}
/*自定义的归约函数,sendbuf为发送缓冲区,recvbuf为接收缓冲区,count为数据个数,root为指定根节点*/
/*该函数实现归约求和的功能*/
void Myreduce(double *sendbuf,double *recvbuf,int count,int root) {
MPI_Status status;
int i;
int myid,numprocs;
*recvbuf=0.0;
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
double *tmp;
//非root节点向root节点发送数据
if(myid!=root) {
MPI_Send(sendbuf,count,MPI_DOUBLE,root,99,MPI_COMM_WORLD);
}
//root节点接收数据并对数据求和,完成规约操作
if(myid==root) {
*recvbuf=*sendbuf;
for(i=0;i<numprocs;i++) {
if(i!=root) {
MPI_Recv(tmp,count,MPI_DOUBLE,i,99,MPI_COMM_WORLD,&status);
*recvbuf=*recvbuf+*tmp;
}
}
}