C 程序在MPI_发送时停止

C 程序在MPI_发送时停止,c,parallel-processing,mpi,C,Parallel Processing,Mpi,当我使用多个处理器执行程序时,程序停止工作。 它首先停止MPI\u发送 我做错了什么 #include "mpi.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define SIZE 200000 #define SIZE2 256 #define VYVOD 1 int main(int argc, char *argv[]) { int NX, NT; doub

当我使用多个处理器执行程序时,程序停止工作。 它首先停止
MPI\u发送
我做错了什么

#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>

#define SIZE 200000
#define SIZE2 256
#define VYVOD 1

int main(int argc, char *argv[])
{
    int NX, NT;
    double TK, UM, DX, DY, DT;
    double starttime, endtime;
    int numnode, rank, delta=0, ierr, NXnode;
    double **U;
    double **U1;
    double *sosed1;
    double *sosed2;
    int i, j, k;
    MPI_Status stats;
    NX = 1*(SIZE2+1);
    TK = 20.00;
    UM = 10.0;
    DX = 0.1;
    DY = DX;
    DT = 0.1;
    NT = (TK/DT);

    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD,&numnode);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);

    if(rank == 0)
        printf("\nTotal nodes: %d\n", numnode);

    NX = NX - 2;
    NXnode = (NX-(NX%numnode))/numnode;
    if (rank < (NX%numnode))
    {
        delta = rank * NXnode + rank + 1;
        NXnode++;
    }
    else
    {
        delta = rank * NXnode + (NX%numnode) + 1;
    }

    if(rank == 0){
        printf("Order counting complete, NXnode = %d\n", NXnode);
    }



    U = (double**)malloc(NXnode*sizeof(double*));
    U1 = (double**)malloc(NXnode*sizeof(double*));
    sosed1 = (double*)malloc(SIZE*sizeof(double));
    sosed2 = (double*)malloc(SIZE*sizeof(double));

    for (i=0; i < NXnode; i++)
    {
        U[i] = (double*)malloc(SIZE*sizeof(double));
        U[i][0]=0;
        U[i][SIZE-1]=0;
        U1[i] = (double*)malloc(SIZE*sizeof(double));
        U1[i][0]=0;
        U1[i][SIZE-1]=0;
        if (U[i]==NULL || U1[i]==NULL)
        {
            printf("Error at memory allocation!");
            return 1;
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);
    if(rank == 0){
        starttime = MPI_Wtime();
        printf("Array allocation complete\n");
    }

    for (i = 0; i < NXnode; i++)
    {
        for (j = 1; j < SIZE-1; j++)
        {
            if ((delta)<=(NXnode/2))
            {
                U1[i][j]=2*(UM/NXnode)*(delta+i);
            }
            else
            {
                U1[i][j]=-2*(UM/NXnode) + 2*UM;
            }
        }
    }

    printf("Array init 1 complete, rank %d\n", rank);

    MPI_Barrier(MPI_COMM_WORLD);

    if (rank > 0)
    {
        MPI_Send(&(U1[0][0]), SIZE, MPI_DOUBLE , rank-1, 0, MPI_COMM_WORLD);
        MPI_Recv(&(sosed1[0]), SIZE, MPI_DOUBLE , rank-1, 1, MPI_COMM_WORLD, &stats);
    }
    else
    {
        int initInd = 0;
        for (initInd = 0; initInd < SIZE; initInd++)
        {
            sosed1[initInd]=0;
        }
    }

    if (rank < (numnode-1))
    {
        MPI_Send(&(U1[NXnode-1][0]), SIZE, MPI_DOUBLE , rank+1, 1, MPI_COMM_WORLD);
        MPI_Recv(&(sosed2[0]), SIZE, MPI_DOUBLE , rank+1, 0, MPI_COMM_WORLD, &stats);
    }
    else
    {
        int initInd = 0;
        for (initInd = 0; initInd < SIZE; initInd++)
        {
            sosed2[initInd]=0;
        }
    }

    printf("Send complete, rank %d\n", rank);

    MPI_Barrier(MPI_COMM_WORLD);
    printf("Array init complete, rank %d\n", rank);

    for (k = 1; k <= NT; k++)
    {
        int cycle = 0;
        for (cycle=1; cycle < SIZE-1; cycle++)
        {
            U[0][cycle] = U1[0][cycle] + DT/(DX*DX)*(U1[1][cycle]-2*U1[0][cycle]+sosed1[cycle])+DT/(DY*DY)*(U1[0][cycle+1]+U1[0][cycle-1]-(U1[0][cycle]*2));
        }
        for (i=1; i<NXnode-1; i++)
        {
            for(j=1; j<SIZE-1; j++)
            {
                U[i][j] = U1[i][j] + DT/(DX*DX)*(U1[i+1][j]-2*U1[i][j]+U[i-1][j])+DT/(DY*DY)*(U1[i][j+1]+U1[i][j-1]-(U1[i][j]*2));
            }
        }
        for (cycle=1; cycle < SIZE-1; cycle++)
        {
            U[NXnode-1][cycle]=U1[NXnode-1][cycle]+DT/(DX*DX)*(sosed2[cycle]-2*U1[NXnode-1][cycle]+U1[NXnode-2][cycle])+DT/(DY*DY)*(U1[NXnode-1][cycle+1]+U1[NXnode-1][cycle-1]-(U1[NXnode-1][cycle]*2));
        }

        /*U[0] = U1[0]+DT/(DX*DX)*(U1[0+1]-2*U1[0]+sosed1);
        for (j = 0; j<NXnode; j++)
        {
            U[j]=U1[j]+DT/(DX*DX)*(U1[j+1]-2*U1[j]+U1[j-1]);
        }
        U[NXnode-1]=U1[NXnode-1]+DT/(DX*DX)*(sosed2-2*U1[NXnode-1]+U1[(NXnode-1)-1]);*/

        if (rank > 0)
        {
            MPI_Send(&(U[0][0]), SIZE, MPI_DOUBLE , rank-1, 0, MPI_COMM_WORLD);
        }
        if (rank < (numnode-1))
        {
            MPI_Send(&(U[NXnode-1][0]), SIZE, MPI_DOUBLE , rank+1, 0, MPI_COMM_WORLD);
        }

        if (rank > 0)
        {
            MPI_Recv(&(sosed1[0]), SIZE, MPI_DOUBLE , rank-1, 0, MPI_COMM_WORLD, &stats);
        }
        if (rank < (numnode-1))
        {
            MPI_Recv(&(sosed2[0]), SIZE, MPI_DOUBLE , rank+1, 0, MPI_COMM_WORLD, &stats);
        }
        for (i = 0; i<NXnode; i++)
        {
            for (j=0; j<SIZE; j++)
            {
                U1[i][j]=U[i][j];
            }
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);
        printf("Array count complete, rank %d\n", rank);

    if (rank == 0)
    {
        endtime=MPI_Wtime();
        printf("\n## TIME: %f\n", endtime-starttime);
    }

     MPI_Finalize();
}
#包括“mpi.h”
#包括
#包括
#包括
#定义大小200000
#定义大小2 256
#定义VYVOD 1
int main(int argc,char*argv[])
{
int-NX,NT;
双TK,UM,DX,DY,DT;
双开始时间,结束时间;
int numnode,rank,delta=0,ierr,NXnode;
双**U;
双**U1;
双*sosed1;
双*sosed2;
int i,j,k;
MPI_状态统计;
NX=1*(尺寸2+1);
TK=20.00;
UM=10.0;
DX=0.1;
DY=DX;
DT=0.1;
NT=(TK/DT);
MPI_Init(&argc,&argv);
MPI_Comm_大小(MPI_Comm_WORLD和numnode);
MPI通信等级(MPI通信世界和等级);
如果(秩==0)
printf(“\n总节点:%d\n”,numnode);
NX=NX-2;
NXnode=(NX-(NX%numnode))/numnode;
if(秩<(NX%numnode))
{
delta=rank*NXnode+rank+1;
NXnode++;
}
其他的
{
delta=rank*NXnode+(NX%numnode)+1;
}
如果(秩==0){
printf(“订单计数完成,NXnode=%d\n”,NXnode);
}
U=(双**)malloc(NXnode*sizeof(双*);
U1=(双**)malloc(NXnode*sizeof(双*);
sosed1=(双*)malloc(大小*尺寸(双));
sosed2=(双*)malloc(尺寸*尺寸(双));
对于(i=0;ifor(k=1;k
MPI\u Send
正在阻止执行,直到调用相应的
MPI\u Recv
(可能在另一个进程中)

在您的程序中,除了rank=0之外的所有进程都在第一个障碍之后立即调用
MPI\u Send
,并且没有人准备好
Recv
消息,因此
MPI\u Send
无限阻塞。本质上,每个进程都在等待其消息被具有较低级别的进程接受(秩2等待秩1,秩1等待秩0),秩0根本不接受任何消息(它转到下一个代码块,然后调用
MPI_Send
),因此所有消息都挂起

看起来您缺少秩=0的进程的通信部分(它应该执行类似于
MPI_Recv(从秩1);…;MPI_Send(到秩1);


另一件事是,您使用带有标记
1
MPI\u Send
,但使用标记
0
调用
MPI\u Recv
。这不会耦合。您需要使用相同的标记,或者在接收操作中指定任何
MPI\u标记。
请查看更新。现在我注意到您使用的是不同的标记对于发送和接收。为了使操作相互匹配,您需要使用相同的标记。我更新了答案。
MPI_Barrier(MPI_COMM_WORLD);

if (rank == 0 && numnode > 1)
{
    MPI_Recv(&(sosed2[0]), SIZE, MPI_DOUBLE , rank+1, 0, MPI_COMM_WORLD, &stats);
    MPI_Send(&(U1[NXnode-1][0]), SIZE, MPI_DOUBLE , rank+1, 1, MPI_COMM_WORLD);
    int initInd = 0;
    for (initInd = 0; initInd < SIZE; initInd++)
    {
        sosed1[initInd]=0;
    }
}
else if (rank == 0)
{
    int initInd = 0;
    for (initInd = 0; initInd < SIZE; initInd++)
    {
        sosed2[initInd]=0;
        sosed1[initInd]=0;
    }
}
else if (rank < (numnode-1))
{
    MPI_Send(&(U1[0][0]), SIZE, MPI_DOUBLE , rank-1, 1, MPI_COMM_WORLD);
    MPI_Recv(&(sosed1[0]), SIZE, MPI_DOUBLE , rank-1, 0, MPI_COMM_WORLD, &stats);
    MPI_Recv(&(sosed2[0]), SIZE, MPI_DOUBLE , rank+1, 0, MPI_COMM_WORLD, &stats);
    MPI_Send(&(U1[NXnode-1][0]), SIZE, MPI_DOUBLE , rank+1, 1, MPI_COMM_WORLD);
}
else if (rank == (numnode - 1))
{
    MPI_Send(&(U1[0][0]), SIZE, MPI_DOUBLE , rank-1, 1, MPI_COMM_WORLD);
    MPI_Recv(&(sosed1[0]), SIZE, MPI_DOUBLE , rank-1, 0, MPI_COMM_WORLD, &stats);
    int initInd = 0;
    for (initInd = 0; initInd < SIZE; initInd++)
    {
        sosed2[initInd]=0;
    }
}