MPI编译错误
我对MPI编程非常陌生,我正在尝试运行一个应该并行排序的程序,例如8个或16个或更多的数字,当我编译并运行该程序时,我遇到了分段错误。下面是我想要编译的代码。如有任何建议,将不胜感激MPI编译错误,mpi,Mpi,我对MPI编程非常陌生,我正在尝试运行一个应该并行排序的程序,例如8个或16个或更多的数字,当我编译并运行该程序时,我遇到了分段错误。下面是我想要编译的代码。如有任何建议,将不胜感激 #include <stdio.h> #include <stdlib.h> #include <mpi.h> /* The IncOrder function that is called by qsort is defined as follows */ in
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
/* The IncOrder function that is called by qsort is defined as follows */
int IncOrder(const void *e1, const void *e2)
{
return (*((int *)e1) - *((int *)e2));
}
int main(int argc, char *argv[]){
int n; /* The total number of elements to be sorted */
int npes; /* The total number of processes */
int myrank; /* The rank of the calling process */
int nlocal; /* The local num of elements, and d array that stores */
int *elmnts; /* The array that stores the local elements */
int *relmnts; /* The array that stores the received elements */
int oddrank; /* The rank of d process during odd-phase comm */
int evenrank; /* The rank of the process during even-phase com */
int *wspace; /* Working space during the compare-split operation */
int i;
MPI_Status status;
/* Initialize MPI and get system information */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &npes);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
n = atoi(argv[1]);
nlocal = n/npes; /* Compute the number of elements to be stored locally. */
/* Allocate memory for the various arrays */
elmnts = (int *)malloc(nlocal*sizeof(int));
relmnts = (int *)malloc(nlocal*sizeof(int));
wspace = (int *)malloc(nlocal*sizeof(int));
/* Fill-in the elmnts array with random elements */
srandom(myrank);
for (i=0; i<nlocal; i++)
elmnts[i] = random();
/* Sort the local elements using the built-in quicksort routine */
qsort(elmnts, nlocal, sizeof(int), IncOrder);
/* Determine the rank of the processors that myrank needs to com during
* ics/ccc.gifthe */
/* odd and even phases of the algorithm */
if (myrank%2 == 0) {
oddrank = myrank-1;
evenrank = myrank+1;
} else {
oddrank = myrank+1;
evenrank = myrank-1;
}
/* Set the ranks of the processors at the end of the linear */
if (oddrank == -1 || oddrank == npes)
oddrank = MPI_PROC_NULL;
if (evenrank == -1 || evenrank == npes)
evenrank = MPI_PROC_NULL;
/* Get into the main loop of the odd-even sorting algorithm */
for (i=0; i<npes-1; i++) {
if (i%2 == 1) /* Odd phase */
MPI_Sendrecv(elmnts, nlocal, MPI_INT, oddrank, 1, relmnts,
nlocal, MPI_INT, oddrank, 1, MPI_COMM_WORLD, &status);
else /* Even phase */
MPI_Sendrecv(elmnts, nlocal, MPI_INT, evenrank, 1, relmnts,
nlocal, MPI_INT, evenrank, 1, MPI_COMM_WORLD, &status);
CompareSplit(nlocal, elmnts, relmnts, wspace, myrank < status.MPI_SOURCE);
}
MPI_Gather(elmnts,nlocal,MPI_INT,relmnts,n,MPI_INT,0,MPI_COMM_WORLD);
/* The master host display the sorted array */
int len = sizeof(elmnts)/sizeof(int);
if(myrank == 0) {
printf("\nSorted array :\n");
for (i=0;i<len;i++) {
printf("%d; ",relmnts[i]);
}
printf("\n");
}
free(elmnts); free(relmnts); free(wspace);
MPI_Finalize();
}
/* This is the CompareSplit function */
CompareSplit(int nlocal, int *elmnts, int *relmnts, int *wspace, int keepsmall){
int i, j, k;
for (i=0; i<nlocal; i++)
wspace[i] = elmnts[i]; /* Copy the elmnts array into the wspace array */
if (keepsmall) { /* Keep the nlocal smaller elements */
for (i=j=k=0; k<nlocal; k++) {
if (j == nlocal || (i < nlocal && wspace[i] < relmnts[j]))
elmnts[k] = wspace[i++];
else
elmnts[k] = relmnts[j++];
}
} else { /* Keep the nlocal larger elements */
for (i=k=nlocal-1, j=nlocal-1; k>=0; k--) {
if (j == 0 || (i >= 0 && wspace[i] >= relmnts[j]))
elmnts[k] = wspace[i--];
else
elmnts[k] = relmnts[j--];
}
}
}
#包括
#包括
#包括
/*qsort调用的IncOrder函数定义如下*/
整数输入(常数无效*e1,常数无效*e2)
{
返回(*(int*)e1)-*((int*)e2));
}
intmain(intargc,char*argv[]){
int n;/*要排序的元素总数*/
int npes;/*进程总数*/
int myrank;/*调用进程的级别*/
int nlocal;/*元素的本地数量,以及存储*/
int*elmnts;/*存储本地元素的数组*/
int*relmnts;/*存储接收元素的数组*/
int oddrank;/*奇数相位通信期间d进程的秩*/
int evenrank;/*进程在偶数阶段com*期间的排名
int*wspace;/*比较拆分操作期间的工作空间*/
int i;
MPI_状态;
/*初始化MPI并获取系统信息*/
MPI_Init(&argc,&argv);
MPI通信大小(MPI通信世界和NPE);
MPI_Comm_rank(MPI_Comm_WORLD和myrank);
n=atoi(argv[1]);
nlocal=n/npes;/*计算本地存储的元素数。*/
/*为各种阵列分配内存*/
elmnts=(int*)malloc(nlocal*sizeof(int));
relmnts=(int*)malloc(nlocal*sizeof(int));
wspace=(int*)malloc(nlocal*sizeof(int));
/*用随机元素填充elmnts数组*/
srandom(myrank);
对于(i=0;i您的分段错误来自于这一行
n = atoi(argv[1]);
由于argv[1]包含字符串(null),n得到一个零值。将n更改为自定义整数,如:n=5,一切都会正常工作。感谢现在代码正在运行,我想从所有处理器中收集所有排序的数字,为清晰起见,我已使用MPI\u Gather调用更新了主代码。