C++ 在MPI中拆分和传递数组块
我是MPI新手,试图通过编写一个简单的C程序来理解它的含义。我只想拆分一个数组并将块发送到N个处理器。因此,每个处理器将在其块中找到本地分钟。然后程序(在根目录或其他地方)找到全局最小值 我研究了C++ 在MPI中拆分和传递数组块,c++,c,arrays,parallel-processing,mpi,C++,C,Arrays,Parallel Processing,Mpi,我是MPI新手,试图通过编写一个简单的C程序来理解它的含义。我只想拆分一个数组并将块发送到N个处理器。因此,每个处理器将在其块中找到本地分钟。然后程序(在根目录或其他地方)找到全局最小值 我研究了MPI\u Send、MPI Isend或MPI\u Bcast函数,但对在何处使用一个函数而不是另一个函数有点困惑。我需要一些关于我的课程总体结构的提示: #include <stdio.h> #include <stdlib.h> #include <mpi.h>
MPI\u Send
、MPI Isend
或MPI\u Bcast
函数,但对在何处使用一个函数而不是另一个函数有点困惑。我需要一些关于我的课程总体结构的提示:
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define N 9 // array size
int A[N] = {0,2,1,5,4,3,7,6,8}; // this is a dummy array
int main(int argc, char *argv[]) {
int i, k = 0, size, rank, source = 0, dest = 1, count;
int tag = 1234;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
count = N/(size-1); // think size = 4 for this example
int *tempArray = malloc(count * sizeof(int));
int *localMins = malloc((size-1) * sizeof(int));
if (rank == 0) {
for(i=0; i<size; i+=count)
{
// Is it better to use MPI_Isend or MPI_Bcast here?
MPI_Send(&A[i], count, MPI_INT, dest, tag, MPI_COMM_WORLD);
printf("P0 sent a %d elements to P%d.\n", count, dest);
dest++;
}
}
else {
for(i=0; i<size; i+=count)
{
MPI_Recv(tempArray, count, MPI_INT, 0, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
localMins[k] = findMin(tempArray, count);
printf("Min for P%d is %d.\n", rank, localMins[k]);
k++;
}
}
MPI_Finalize();
int gMin = findMin(localMins, (size-1)); // where should I assign this
printf("Global min: %d\n", gMin); // and where should I print the results?
return 0;
}
#包括
#包括
#包括
#定义n9//数组大小
int A[N]={0,2,1,5,4,3,7,6,8};//这是一个虚拟数组
int main(int argc,char*argv[]){
int i,k=0,大小,等级,来源=0,目的地=1,计数;
int tag=1234;
MPI_Init(&argc,&argv);
MPI_通信大小(MPI_通信世界和大小);
MPI通信等级(MPI通信世界和等级);
count=N/(size-1);//对于这个例子,考虑size=4
int*tempArray=malloc(count*sizeof(int));
int*localMins=malloc((大小-1)*sizeof(int));
如果(秩==0){
对于(i=0;i,您的代码有几个问题(正如您已经指出的),并且正如一些评论员已经提到的,有其他方法可以使用MPI调用执行您正在尝试执行的操作
然而,我将重新调整您的代码的用途,并尝试不做太多更改,以便向您展示正在发生的事情
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define N 9 // array size
int A[N] = {0,2,1,5,4,3,7,6,8}; // this is a dummy array that should only be initialized on rank == ROOT
int main(int argc, char *argv[]) {
int size;
int rank;
const int VERY_LARGE_INT = 999999;
const int ROOT = 0; // the master rank that holds A to begin with
int tag = 1234;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size); // think size = 4 for this example
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/*
How many numbers you send from ROOT to each other rank.
Note that for this implementation to work, (size-1) must divide N.
*/
int count = N/(size-1);
int *localArray = (int *)malloc(count * sizeof(int));
int localMin; // minimum computed on rank i
int globalMin; // will only be valid on rank == ROOT
/* rank == ROOT sends portion of A to every other rank */
if (rank == ROOT) {
for(int dest = 1; dest < size; ++dest)
{
// If you are sending information from one rank to another, you use MPI_Send or MPI_Isend.
// If you are sending information from one rank to ALL others, then every rank must call MPI_Bcast (similar to MPI_Reduce below)
MPI_Send(&A[(dest-1)*count], count, MPI_INT, dest, tag, MPI_COMM_WORLD);
printf("P0 sent a %d elements to P%d.\n", count, dest);
}
localMin = VERY_LARGE_INT; // needed for MPI_Reduce below
}
/* Every other rank is receiving one message: from ROOT into local array */
else {
MPI_Recv(localArray, count, MPI_INT, ROOT, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
localMin = findMin(localArray, count);
printf("Min for P%d is %d.\n", rank, localMin);
}
/*
At this point, every rank in communicator has valid information stored in localMin.
Use MPI_Reduce in order to find the global min among all ranks.
Store this single globalMin on rank == ROOT.
*/
MPI_Reduce(&localMin, &globalMin, 1, MPI_INT, MPI_MIN, ROOT, MPI_COMM_WORLD);
if (rank == ROOT)
printf("Global min: %d\n", globalMin);
/* The last thing you do is Finalize MPI. Nothing should come after. */
MPI_Finalize();
return 0;
}
#包括
#包括
#包括
#定义n9//数组大小
int A[N]={0,2,1,5,4,3,7,6,8};//这是一个伪数组,只应在秩==根上初始化
int main(int argc,char*argv[]){
整数大小;
整数秩;
常数int非常大int=999999;
const int ROOT=0;//包含一个开头的主列组
int tag=1234;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_Comm_WORLD,&size);//对于本例,考虑size=4
MPI通信等级(MPI通信世界和等级);
/*
您从根目录向彼此发送多少个数字。
请注意,要使此实现正常工作,(size-1)必须除以N。
*/
整数计数=N/(大小-1);
int*localArray=(int*)malloc(count*sizeof(int));
int localMin;//在秩i上计算的最小值
int globalMin;//仅在秩==根上有效
/*rank==根将A的一部分发送到每一个其他列组*/
if(秩==根){
对于(int dest=1;dest
充分披露:我还没有测试过这段代码,但除了一些小的打字错误,它应该可以工作
查看这段代码,看看你是否能理解为什么我移动了你的MPI\u Send
和MPI\u Recv
调用。要理解这一点,请注意每个列组都在读取你给出的每一行代码。因此,在你的else
语句中,不应该有一个for
接收循环
此外,通讯器中的每个列组都必须调用MPI集合(如MPI\u Reduce
和MPI\u Bcast
),这些调用的“源”和“目标”列组是函数输入参数的一部分,或由集合本身暗示
最后,为您做一点家庭作业:您能理解为什么这不是一个很好的查找数组a
全局最小值的实现吗?提示:在完成MPI\u发送后,rank==ROOT
做什么?您如何更好地分解这个问题,使每个列组都能更均匀地执行工作?有一个问题您的代码有几个问题(正如您已经指出的),正如一些评论员已经提到的,有其他方法可以使用MPI调用执行您正在尝试执行的操作
然而,我将重新调整您的代码的用途,并尝试不做太多更改,以便向您展示正在发生的事情
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define N 9 // array size
int A[N] = {0,2,1,5,4,3,7,6,8}; // this is a dummy array that should only be initialized on rank == ROOT
int main(int argc, char *argv[]) {
int size;
int rank;
const int VERY_LARGE_INT = 999999;
const int ROOT = 0; // the master rank that holds A to begin with
int tag = 1234;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size); // think size = 4 for this example
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/*
How many numbers you send from ROOT to each other rank.
Note that for this implementation to work, (size-1) must divide N.
*/
int count = N/(size-1);
int *localArray = (int *)malloc(count * sizeof(int));
int localMin; // minimum computed on rank i
int globalMin; // will only be valid on rank == ROOT
/* rank == ROOT sends portion of A to every other rank */
if (rank == ROOT) {
for(int dest = 1; dest < size; ++dest)
{
// If you are sending information from one rank to another, you use MPI_Send or MPI_Isend.
// If you are sending information from one rank to ALL others, then every rank must call MPI_Bcast (similar to MPI_Reduce below)
MPI_Send(&A[(dest-1)*count], count, MPI_INT, dest, tag, MPI_COMM_WORLD);
printf("P0 sent a %d elements to P%d.\n", count, dest);
}
localMin = VERY_LARGE_INT; // needed for MPI_Reduce below
}
/* Every other rank is receiving one message: from ROOT into local array */
else {
MPI_Recv(localArray, count, MPI_INT, ROOT, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
localMin = findMin(localArray, count);
printf("Min for P%d is %d.\n", rank, localMin);
}
/*
At this point, every rank in communicator has valid information stored in localMin.
Use MPI_Reduce in order to find the global min among all ranks.
Store this single globalMin on rank == ROOT.
*/
MPI_Reduce(&localMin, &globalMin, 1, MPI_INT, MPI_MIN, ROOT, MPI_COMM_WORLD);
if (rank == ROOT)
printf("Global min: %d\n", globalMin);
/* The last thing you do is Finalize MPI. Nothing should come after. */
MPI_Finalize();
return 0;
}
#包括
#包括
#包括
#定义n9//数组大小
int A[N]={0,2,1,5,4,3,7,6,8};//这是一个伪数组,只应在秩==根上初始化
int main(int argc,char*argv[]){
整数大小;
整数秩;
常数int非常大int=999999;
const int ROOT=0;//包含一个开头的主列组
int tag=1234;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_Comm_WORLD,&size);//对于本例,考虑size=4
MPI通信等级(MPI通信世界和等级);
/*
您从根目录向彼此发送多少个数字。
请注意,要使此实现正常工作,(size-1)必须除以N。
*/
整数计数=N/(大小-1);
int*localArray=(int*)malloc(count*sizeof(int));
int localMin;//在秩i上计算的最小值
int globalMin;//仅在秩==根上有效
/*秩==根发送一个