MPI#u Scatterv don';工作不好

MPI#u Scatterv don';工作不好,c,parallel-processing,mpi,C,Parallel Processing,Mpi,我想将布尔随机数组的行分散在4个节点之间,但只有节点0正常工作,下面是运行的结果,请帮助我。 问候 intmain(intargc,char*argv[]){ 整数大小,秩,除法大小,和=0,根=0,procgridsize; const int num_顶点=3; 国际**个人,**recbuf; int*sendcounts;//指定要发送到每个处理器的元素数 int*disfs;//条目i指定位移 MPI_状态; int偏移量,行; MPI_Init(&argc,&argv); MPI_通

我想将布尔随机数组的行分散在4个节点之间,但只有节点0正常工作,下面是运行的结果,请帮助我。 问候

intmain(intargc,char*argv[]){
整数大小,秩,除法大小,和=0,根=0,procgridsize;
const int num_顶点=3;
国际**个人,**recbuf;
int*sendcounts;//指定要发送到每个处理器的元素数
int*disfs;//条目i指定位移
MPI_状态;
int偏移量,行;
MPI_Init(&argc,&argv);
MPI_通信大小(MPI_通信世界和大小);
MPI通信等级(MPI通信世界和等级);
分割的大小=(n个初始大小)*个顶点;
行=n_初始_弹出/大小;
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~接收缓冲区的取消加速
recbuf=新整数*[n_初始值_流行/大小];
对于(int i=0;i对于(int i=0;我将进入Stackoverflow!还有另一种分配2D数组
Individual
的方法,它简化了
MPI\u Scatterv()的使用
。事实上,在您发布的代码中,数组在内存中不是连续的,因为行是一行一行分配的。这使得位移的计算不那么明显……请看,或者谢谢,我的问题已经解决了。
int main(int argc, char *argv[]) {


    int size, rank, divided_pop_size, sum = 0, root = 0,procgridsize;
    const int num_vertices = 3;
    int **indivisual, **recbuf;
    int *sendcounts;     //specifying the number of elements to send to each processor
    int *displs;         //Entry i specifies the displacement   
    MPI_Status status;
    int offset,rows;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    divided_pop_size = (n_initial_pop / size)*num_vertices;
    rows = n_initial_pop / size;
    //~~~~~~~~~~~~~~~~~~~~~~~decleration of receive buffer
    recbuf = new int*[n_initial_pop / size];

    for (int i = 0; i < n_initial_pop / size; i++)
        recbuf[i] = new int[num_vertices]; 


    if (rank == root)
    {

        indivisual = new int*[n_initial_pop];

        for (int i = 0; i < n_initial_pop; i++)
            indivisual[i] = new int[num_vertices];

        for (int i = 0; i < n_initial_pop; i++)
        {
            for (int j = 0; j < num_vertices; j++)
            {
                indivisual[i][j] = rand() % 2;
            }
        }
        printf("indivisual array is:\n");
        for (int i = 0; i < n_initial_pop; i++)
        {
            printf("\n");
            for (int j = 0; j < num_vertices; j++)
                printf("  %d", indivisual[i][j]);
        }
    }

    sendcounts = (int*)malloc(sizeof(int)*size);
    displs = (int*)malloc(sizeof(int)*size);

    if (rank == 0) {
        for (int i = 0; i<size; i++) {
            sendcounts[i] = divided_pop_size;

            displs[i] = sum;
            sum += sendcounts[i];

        }
    }
    if (rank == 0) {
        for (int i = 0; i < size; i++) {
            printf("sendcounts[%d] = %d\tdispls[%d] = %d\n", i, sendcounts[i], i, displs[i]);
        }
    }
    MPI_Scatterv(indivisual, sendcounts, displs, MPI_INT, recbuf,
        sendcounts[rank], MPI_INT,
        0, MPI_COMM_WORLD);

    for (int p = 0; p<size; p++) {
        //printf("\nrank : %d", rank);
        if (rank == p) {
            printf("Local process on rank %d is:\n", rank);
            for (int i = 0; i<n_initial_pop / size; i++) {
                putchar('|');
                for (int j = 0; j<num_vertices; j++) {
                    printf("%d ", recbuf[i][j]);
                }
                printf("|\n");
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }



    MPI_Finalize();

    return 0;
}