Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/c/70.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
我能';t计算出MPI_Scatterv和MPI_Gatherv_C_Parallel Processing_Mpi - Fatal编程技术网

我能';t计算出MPI_Scatterv和MPI_Gatherv

我能';t计算出MPI_Scatterv和MPI_Gatherv,c,parallel-processing,mpi,C,Parallel Processing,Mpi,我试图将一个一维n*n数组分散到p处理器上,然后使用“聚集”将数据获取回来。但问题是,当我运行程序时,它会显示垃圾输出。 我使用的是一个n*n的数组A和一个大小为n的数组b // num_rows is the number of rows each process will be receiving,base is the base index and last is the last index . // curr_index_proce is the index of the proces

我试图将一个一维n*n数组分散到p处理器上,然后使用“聚集”将数据获取回来。但问题是,当我运行程序时,它会显示垃圾输出。 我使用的是一个n*n的数组A和一个大小为n的数组b

// num_rows is the number of rows each process will be receiving,base is the base index and last is the last index .
// curr_index_proce is the index of the processor which contains the index i of Array A. 

#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <mpi.h>

#define NUM_EQN 10

#define NUM_ROWS(i,n,p)  ((((i)+1)*(n)/(p)) - ((i)*(n)/(p)))
#define BASE(i,n,p)     (((i)*(n)/(p)))
#define LAST(i,n,p) ((((i)+1)*(n)/(p)) - 1)
#define CUR_INDEX_PROC(i,n,p) ( ((p) * ( (i) + 1 ) - 1 )/(n))

#define MIN(a,n) ((a) > (b) ? (a) : (b))

void gather(float *A,float *b,int n);
void print_array(float *A,float *b,int n){
        int i,j;
        for ( i = 0 ; i < n;i++){
//              printf("i = %d ",i);
                for(j = 0 ; j < n;j++){
                        printf("%6.2f " ,A[i*n+j]);
                }
                printf("\n");
                //printf(" : %6.2f\n",b[i]);
        }
        printf("\n\n\n");
}



int SIZE_A(int n,int p ){
    if ( n%p == 0 )
            return n*n/p;
    else
            return n*(n/p+1);
}

int SIZE_B(int n,int p){
    if ( n%p == 0 )
            return n/p;
    else
            return n/p + 1;
}
int main(int argc,char **argv){
        MPI_Init(&argc,&argv);
        int size,rank;
        MPI_Comm_rank(  MPI_COMM_WORLD,&rank);
        MPI_Comm_size(MPI_COMM_WORLD,&size);
        double time_start, time_finish;
        float *A = malloc(sizeof(float)*NUM_EQN*NUM_EQN);
        float *b = malloc(sizeof(float)*NUM_EQN);
        float *X;
        int i,j;
        FILE *fp = fopen("A","r");
        if (fp == NULL ){
                printf("Error %s Not Found\n",argv[1]);
                exit(0);
                MPI_Finalize();
        }
        for ( i = 0 ; i < NUM_EQN;i++)
                for(j = 0 ; j < NUM_EQN;j++)
                        fscanf(fp,"%f",&A[i*NUM_EQN+j]);
        FILE *fp2 = fopen("b","r");
        if (fp2 == NULL ){
                printf("Error %s Not Found\n",argv[2]);
                exit(0);
        }
        for ( i = 0 ; i < NUM_EQN;i++)
                fscanf(fp2,"%f",&b[i]);
        time_start = - MPI_Wtime();
        gather(A,b,NUM_EQN);
        time_finish = MPI_Wtime();
        char name[100];
        int length;
        MPI_Finalize();
}

void gather(float *A,float *b,int n){
        int pivot,i,col;
        int row, j;
        int size,rank;
        MPI_Comm_rank(  MPI_COMM_WORLD,&rank);
        MPI_Comm_size(MPI_COMM_WORLD,&size);
        float global_max;
        float *A_recv,*b_recv;
        A_recv = malloc(sizeof(float)*(SIZE_A(n,size)));
        b_recv = malloc(sizeof(float)*(SIZE_B(n,size)));
    printf("%d %d \n",SIZE_A(n,size),SIZE_B(n,size));
    if ( rank == 0 ){
            print_array(A,b,n);
    }
                int send_count[size];
                int disp[size];
                int send_count_b[size];
                int disp_b[size];
                for ( i = 0 ; i < size ; i++ ){
                                send_count[i] = 0 ;
                                send_count_b[i] = 0;
                }
                for ( i = 0 ; i < size ; i++ ){
                                send_count[i] = n*NUM_ROWS(i,n,size);
                                if ( i == 0 )
                                        disp[i] = 0;
                                else
                                        disp[i] = disp[i-1] + send_count[i-1];
                                send_count_b[i] = NUM_ROWS(i,n,size);
                                if ( i == 0 )
                                        disp_b[i] = 0 ;
                                else
                                        disp_b[i] = disp_b[i-1] + send_count_b[i-1];
                }
                MPI_Scatterv(A, send_count, disp,MPI_FLOAT, A_recv,SIZE_A(n,size),MPI_FLOAT,0, MPI_COMM_WORLD);
                MPI_Scatterv(b, send_count_b, disp_b,MPI_FLOAT, b_recv,send_count_b[rank], MPI_FLOAT, 0, MPI_COMM_WORLD);
                for ( i = 0 ; i < send_count[rank]; i++ ){
                        for ( j = 0 ; j < n ; j++)
                                printf("%3.2f : ",A_recv[i*n+j]);
                        printf("\n\n");
                }
                MPI_Gatherv(A_recv,SIZE_A(n,size),MPI_FLOAT,A,send_count,disp,MPI_FLOAT,0,MPI_COMM_WORLD);
                MPI_Gatherv(b_recv,send_count_b[rank],MPI_FLOAT,b,send_count_b,disp_b,MPI_FLOAT,0,MPI_COMM_WORLD);
}
//num\u rows是每个进程将接收的行数,base是基本索引,last是最后一个索引。
//curr_index_proce是包含数组A的索引i的处理器的索引。
#包括
#包括
#包括
#包括
#定义数值方程10
#定义行数(i,n,p)(((i)+1)*(n)/(p))-((i)*(n)/(p)))
#定义基数(i,n,p)(((i)*(n)/(p)))
#定义最后一个(i,n,p)(((i)+1)*(n)/(p))-1)
#定义CUR_INDEX_PROC(i,n,p)((p)*(i)+1)-1/(n))
#定义最小值(a,n)((a)>(b)?(a):(b))
无效聚集(浮点*A、浮点*b、整数n);
无效打印_数组(浮点*A,浮点*b,整数n){
int i,j;
对于(i=0;i

请帮帮我。

一件让我感到奇怪的事情是,在另一个函数的主体中放置了scatter_数据函数原型。这很不寻常

其次,查看完整的代码可能会有所帮助。有完整的代码要编译将有助于我们进行一些家庭调试,因为MPI代码通常很难在屏幕上调试

编辑::

我确实清理了您的代码,并添加了一些注释和检查。scatterv和gatherv似乎都工作得很好-我添加了另一个用于存储收集数据的数组(C),而不是原始的数组,该数组包含两个MPI调用前后的相同数据。我还修改了用于分散数据的本地临时数组的大小

有一件事可能会让您感到困惑,那就是用于分散数据的接收缓冲区的大小。原始代码占用了太多的空间,当我增加接收到的数据时,给了我seg错误。例如,对于4x4 a矩阵,原始接收缓冲区大小为8x4,但假设有两个进程,则应为2x4。您的
SIZE\u B(n,SIZE)
函数实际上返回了正确的数字

#define NUM_EQN 4

int main(int argc,char **argv)
{
    MPI_Init(&argc,&argv);
    int size,rank;
    MPI_Comm_rank(  MPI_COMM_WORLD,&rank);
    MPI_Comm_size(MPI_COMM_WORLD,&size);

    float *A = malloc(sizeof(float)*NUM_EQN*NUM_EQN);
    float *b = malloc(sizeof(float)*NUM_EQN);

    int i;
    for (i = 0; i < NUM_EQN * NUM_EQN; i++) A[i] = (float)i;
    for (i = 0; i < NUM_EQN; i++) b[i] = (float)i;

    gather(A,b,NUM_EQN);

    MPI_Finalize();
    printf("End of process %d\n",rank);
}
另外,我在这里找到了一个很好的MPI_Scatterv示例:

另一个很好的例子是:

还有一个建议:
free()
dyna是一个好习惯
void gather(float *A,float *b,int n){

    int i, j;
    int size,rank;
    MPI_Comm_rank(  MPI_COMM_WORLD,&rank);
    MPI_Comm_size(MPI_COMM_WORLD,&size);
    float *A_recv,*b_recv;
    A_recv = malloc(sizeof(float)*(SIZE_A(n,size)));
    b_recv = malloc(sizeof(float)*(SIZE_B(n,size)));    // size A and B are probably the array parts located in current thread
    float *C = malloc(sizeof(float)*NUM_EQN*NUM_EQN);

    printf("n: %d %d %d \n",n,SIZE_A(n,size),SIZE_B(n,size));

    if ( rank == 0 )
    {
        print_array(A,b,n);
    }
    int send_count[size];
    int disp[size];
    int send_count_b[size];
    int disp_b[size];
    for ( i = 0 ; i < size ; i++ )
    {
        send_count[i] = 0 ;
        send_count_b[i] = 0;
    }
    for ( i = 0 ; i < size ; i++ )
    {
        send_count[i] = n*NUM_ROWS(i,n,size);
        if ( i == 0 )
                disp[i] = 0;
        else
                disp[i] = disp[i-1] + send_count[i-1];
        send_count_b[i] = NUM_ROWS(i,n,size);
        if ( i == 0 )
                disp_b[i] = 0 ;
        else
                disp_b[i] = disp_b[i-1] + send_count_b[i-1];

        printf("%d Displacement[%d] : %d Sendcount : %d \n", rank, i, disp[i], send_count[i]);
    }


    MPI_Scatterv(A, send_count, disp ,MPI_FLOAT, A_recv,SIZE_A(n,size),MPI_FLOAT,0, MPI_COMM_WORLD);
    /*
    int MPI_Scatterv( void *sendbuf, int *sendcnts, int *displs,
                 MPI_Datatype sendtype, void *recvbuf, int recvcnt,
                 MPI_Datatype recvtype,
                 int root, MPI_Comm comm)*/

    //MPI_Scatterv(b, send_count_b, disp_b,MPI_FLOAT, b_recv,send_count_b[rank], MPI_FLOAT, 0, MPI_COMM_WORLD);

    printf("%d Receive data : \n",rank);
    for ( i = 0 ; i < SIZE_B(n,size); i++ )
    {
        for ( j = 0 ; j < n ; j++)
        {
            A_recv[i*n+j] = A_recv[i*n+j] + 10.0;
            printf("%d %3.2f : ",rank, A_recv[i*n+j]);

        }

        printf("\n");
    }

    if (rank == 0)
    {
        printf("%d Gather data : \n",rank);
        for ( i = 0 ; i < n; i++ )
        {
            for ( j = 0 ; j < n ; j++)
                printf("%d %3.2f : ",rank, C[i*n+j]);
            printf("\n");
        }
    }

    MPI_Gatherv(A_recv,SIZE_A(n,size),MPI_FLOAT,C,send_count,disp,MPI_FLOAT,0,MPI_COMM_WORLD);

    if (rank == 0)
    {
        printf("%d Gather data : \n",rank);
        for ( i = 0 ; i < n; i++ )
        {
            printf("%d", rank);
            for ( j = 0 ; j < n ; j++)
                printf(" %3.2f : ",C[i*n+j]);
            printf("\n");
        }
    }
    //MPI_Gatherv(b_recv,send_count_b[rank],MPI_FLOAT,b,send_count_b,disp_b,MPI_FLOAT,0,MPI_COMM_WORLD);
}
0 Gather data BEFORE MPI CALL: 
0 -0.00 : 0 -0.00 : 0 0.00 : 0 0.00 : 
0 0.00 : 0 0.00 : 0 0.00 : 0 0.00 : 
0 0.00 : 0 0.00 : 0 0.00 : 0 0.00 : 
0 0.00 : 0 0.00 : 0 0.00 : 0 0.00 : 
0 Gather data AFTER MPI CALL: 
0 10.00 :  11.00 :  12.00 :  13.00 : 
0 14.00 :  15.00 :  16.00 :  17.00 : 
0 18.00 :  19.00 :  20.00 :  21.00 : 
0 22.00 :  23.00 :  24.00 :  25.00 :