Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/c/57.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C语言中的MPI_发送/接收动态分配数组_C_Mpi - Fatal编程技术网

C语言中的MPI_发送/接收动态分配数组

C语言中的MPI_发送/接收动态分配数组,c,mpi,C,Mpi,我试图使用MPI用C编写一个有限体积解算器,但似乎无法通过MPI_Send和MPI_Recv正确传递数组。我需要所有的工作人员对数组中自己的部分进行一些计算,然后将子数组发送回主数组,将子数组重新组合在一起,并将近似值与已知解进行比较。fvm解算器和代码的结构是正确的,我已经对照已知的解决方案检查了串行代码。下面是我尝试将子数组传递回master并在master中接收它们的代码。我已经为Valgrind配置了mpi支持,memcheck工具不喜欢send_output_mpi函数中的分配。这与我

我试图使用MPI用C编写一个有限体积解算器,但似乎无法通过MPI_Send和MPI_Recv正确传递数组。我需要所有的工作人员对数组中自己的部分进行一些计算,然后将子数组发送回主数组,将子数组重新组合在一起,并将近似值与已知解进行比较。fvm解算器和代码的结构是正确的,我已经对照已知的解决方案检查了串行代码。下面是我尝试将子数组传递回master并在master中接收它们的代码。我已经为Valgrind配置了mpi支持,memcheck工具不喜欢send_output_mpi函数中的分配。这与我尝试运行程序时发生的情况一致。Mpiexec中止,信号代码为6

下面是完整的示例代码。数组的传递在recv_output_MPI和send_output_MPI函数中,我尝试将子数组传递回master

#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <mpi.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include "fvm.h"
#include "lab_mpi.h"

int main( int argc, char *argv[] )
{
    double tt0, tt1;

//  MPI variables
    int ierr; // MPI error flag
    int nProc, nWrs, myID, rc;

//-------------Start MPI------------------
    rc = MPI_Init( &argc, &argv );
    if ( rc != MPI_SUCCESS ) {
        printf( "Error starting MPI program. Terminating.\n" );
        MPI_Abort(MPI_COMM_WORLD, rc);
     }
    ierr = MPI_Comm_size( MPI_COMM_WORLD, &nProc );
    assert( !ierr );
    ierr = MPI_Comm_rank( MPI_COMM_WORLD, &myID );
    assert( !ierr );
    nWrs = nProc - 1;
    assert( nWrs == nProc - 1 );

    if ( myID == MASTER ) {
        tt0 = MPI_Wtime( );
        master( nWrs, myID );
        tt1 = MPI_Wtime();
        printf("  Main MR timing = %lf sec on %i workers.\n", tt1 - tt0, nWrs );
        fflush(stdout);
    } else {
        ierr = worker( nWrs, myID );
        printf("  Worker %i exiting; ierr = %i\n", myID, ierr );
        fflush(stdout);
        if ( ierr != 0 )
            printf("  Worker %i exiting with ierr = %i\n", myID, ierr );
        fflush(stdout);
    }

//---------------End MPI-------------------------
    ierr = MPI_Finalize( );
    assert( !ierr );
    exit( EXIT_SUCCESS );
}

/* master MPI function
 * reads input file, packs ints and doubles into arrays, then
 * broadcasts
 * receives output from workers at t = tout
 */
int master( int nWrs, int master )
{
    unsigned int i = 0;
    int Mx; // M/nWrks gives domain decomposition
    double t = 0.0;
    char buffer[50];
    int M, MM;
    unsigned int N_max;
    double t0, t_end, dt_out, a, b, factor, D, dx, dt, t_out, dt_expl, mu;
    double* x_vals;
    double* U;

    //----MPI variables-----
    int numInts = 2;
    int numDbls = 6;
    int ierr, nProc, myID, rc;
    int *intParams;
    double *dblParams;

    fgets(buffer, sizeof(buffer), stdin);
    fscanf(stdin, "%lf %lf %lf %lf %lf %lf %lf %i",
           &t0, &t_end, &dt_out, &a, &b, &factor, &D, &MM);
    printf("t0: %lf t end: %lf  Number of CVs: %i  Factor: %3.2lf\n",
           t0, t_end, MM, factor);
        fflush(stdout);

    M = (b - a) * MM;
    dx = (double)(b - a)/M;
    dt_expl = (dx * dx)/(2 * D);
    dt = factor * dt_expl;
    N_max = (unsigned int)((t_end - t0)/dt + 1);
    t_out = max(dt_out, dt);
    mu = dt/dx;

    x_vals = ( double* ) calloc((M + 2), sizeof(double));
    U = ( double* ) calloc((M + 2), sizeof(double));
    xMesh( a, b, M, x_vals );

    intParams = ( int* ) malloc(( numInts ) * sizeof( int ));
    dblParams = ( double* ) malloc(( numDbls ) * sizeof( double ));

   // Pack arrays of variables to broadcast
    intParams[0] = N_max;
    intParams[1] = M;

    dblParams[0] = D;
    dblParams[1] = t_out;
    dblParams[2] = dt_out;
    dblParams[3] = dt;
    dblParams[4] = a;
    dblParams[5] = b;

    ierr = MPI_Bcast( intParams, numInts, MPI_INT, MASTER, MPI_COMM_WORLD );
    assert( !ierr );
    ierr = MPI_Bcast( dblParams, numDbls, MPI_DOUBLE, MASTER, MPI_COMM_WORLD );
    assert( !ierr );

    // begin timestepping
    for (i = 1; i <= N_max; i++) {
//         sent to workers
//         flux( F, U, M, dx, D, t);
//         pde( F, U, M, D, mu, t, b );
        ierr = MPI_Barrier( MPI_COMM_WORLD );
        assert( !ierr );
        t = i * dt;
        if ( t >= t_out ) {
            recv_output_MPI( nWrs, M, U ); 
            printf( "\nProfile at time: %lf, N-step: %u\n", t, i );
            fflush(stdout);
            compare( U, x_vals, M, D, t );
            t_out += dt_out;
        }
    }

    printf( "\nDone at time: %.6lf and Nsteps: %u\n\n", t, i );
    free( intParams );
    free( dblParams );
    free( U );
    free( x_vals );
    ierr = MPI_Barrier( MPI_COMM_WORLD );
    return ierr;
}
/* Tasks of WORKER:
1. Unpack initial iparms and parms arrays, local Mz = Mz / nWRs
2. Exchange "boundary" values with neighbors
3. Do timestepping computation
4. Send output to MR every dtout
*/
int worker( int nWrs, int Me )
{
    double t = 0.0;
    unsigned int i;
    int ierr;
    int nodeLHS, nodeRHS;
    int numInts = 2;
    int numDbls = 6;
    int* intParams;
    double* dblParams;

    int N_max, M;
    double D, tout, dt_out, dt, a, b, mu, dx;
    double* U;
    double* F;
    double* x_vals;

    intParams = ( int* ) malloc(( numInts ) * sizeof( int ));
    dblParams = ( double* ) malloc(( numDbls ) * sizeof( double ));

    ierr = MPI_Bcast( intParams, numInts, MPI_INT, MASTER, MPI_COMM_WORLD );
    assert( !ierr );
    ierr = MPI_Bcast( dblParams, numDbls, MPI_DOUBLE, MASTER, MPI_COMM_WORLD );
    assert( !ierr );

    N_max = intParams[0];
    M = intParams[1];

    D = dblParams[0];
    tout = dblParams[1];
    dt_out = dblParams[2];
    dt = dblParams[3];
    a = dblParams[4];
    b = dblParams[5];
    mu = (M * dt)/(b - a);
    dx = (double)(b - a)/M;

    x_vals = calloc((M + 2), sizeof(double));
    U = calloc((M + 2), sizeof(double));
    F = calloc((M + 2), sizeof(double));

    xMesh( a, b, M, x_vals );
    init( M, U ); //set u(a) = 1, u(b) = 0

    for ( i = 1; i <=N_max; i++ ) {
        ierr = MPI_Barrier( MPI_COMM_WORLD ); 
        assert( !ierr );
        t = i * dt;
//          flux( nWrs, Me, F, U, M, dx, D, t );
//          pde( nWrs, Me, F, U, M, D, mu, t, b );
        if (t >= tout) {
            send_output_MPI( nWrs, Me, M, U );
            tout += dt_out;
        }
    }

    free ( intParams );
    free ( dblParams );
    deleteMem ( U, F, x_vals );
    fflush(stdout);
    ierr = MPI_Barrier( MPI_COMM_WORLD );
    return ierr;
}
// only done by master
void recv_output_MPI( int nWrs, int M, double* U )
{
    int Ime;
    unsigned int i, source;
    unsigned int msgtag = 1000;
    int ierr, offset;
    int chunkSize = (M + 2)/nWrs;
    unsigned int end;
    double* tmp;

    MPI_Status status;
    MPI_Datatype Mytype;

    ierr = MPI_Type_contiguous( chunkSize, MPI_DOUBLE, &Mytype );
    assert ( !ierr );
    ierr = MPI_Type_commit( &Mytype );
    assert ( !ierr );

    for ( i = 1; i <= nWrs; i++ ) {
        source = i;
        msgtag = i * msgtag;
        offset = (i - 1) * chunkSize;
        end = i * chunkSize;
        tmp = ( double* ) malloc(( chunkSize ) * sizeof( double ));
        ierr = MPI_Recv( &offset, 1, MPI_INT, source, msgtag, MPI_COMM_WORLD, &status ); 
        ierr = MPI_Recv( tmp, chunkSize, MPI_DOUBLE, source, msgtag+1, MPI_COMM_WORLD, &status );
        assert ( !ierr );
        for ( i = offset; i < end; i++ ){
            U[i] = tmp[i];
        }
    }
    ierr = MPI_Type_free( &Mytype );
    assert ( !ierr );
    free( tmp );
    return;
}
void send_output_MPI( int nWrs, int Me, int M, double* U )//TODO:fix
{
    int ierr, msgtag, i;
    int start = (Me - 1) * (M/nWrs)+1;
    int chunkSize = (M + 2)/nWrs;
    unsigned int offset = ( Me - 1 ) * chunkSize;
    unsigned int end = Me * chunkSize;
    double* sendVals  = calloc( chunkSize, sizeof( double ));
    assert( sendVals != NULL );
    MPI_Datatype Mytype;

    ierr = MPI_Type_contiguous( chunkSize, MPI_DOUBLE, &Mytype );
    assert ( !ierr );
    ierr = MPI_Type_commit( &Mytype );
    assert ( !ierr );

    msgtag = Me * 1000;
    ierr = MPI_Send( &offset, 1, MPI_INT, MASTER, msgtag, MPI_COMM_WORLD ); 

//     memcpy( &sendVals[0], &U[offset], chunkSize * sizeof( double ));
    // send part of the U array
    ierr = MPI_Send( sendVals, chunkSize, Mytype, MASTER, msgtag+1, MPI_COMM_WORLD );
    assert( !ierr );
    ierr = MPI_Type_free( &Mytype );
    assert ( !ierr );
    free( sendVals );
    return;
}
/*  produces nodal values for x-array
    takes interval endpoints [a,b] as double and
    M as number of nodal values
    returns pointer to x-array
    precondition: x-array allocated in main,
    postcondition: x-array populated in main
*/
void xMesh( double a, double b, int M, double* x )
{
    unsigned int i;
    double dx = (b - a)/M;
    x[0] = a;
    for (i = 1; i < M+1; i++)
        x[i] = a + (i - 0.5)*dx;
    x[M + 1] = b;
    return;
}
/* function: init
 * takes: endpoints, a and b, x-array and pde struct
 * returns: nothing
 * precondition: U and x array initialized, x-array defined
 * postcondition: u_init set in U array
 */
void init( int M, double* U )
{
    U[0] = 1.0;
    U[M+1] = 0.0;
}

void deleteMem( double* F, double* U, double* x )
{
    free( F );
    free( U );
    free( x );
    return;
}
/* function: compare - compares numerical approximation against the
 *           closed-form solution
 * params: U - pointer to u array
           x - pointer to x (spatial) array
           M - array size
           D - diffusion coefficient
           t - time
   precondition: arrays initialized and containing values
   postcondition: file 'plot.out' appended with values for plotting
*/
void compare( double* U, double* x, int M, double D, double t )
{
    unsigned int j;
    double error, u_exact;
    double err_max = 0.0;
    double val = 2.0 * sqrt( D * t );

    for ( j = 0; j < M+2; j++ ) {
        u_exact = erfc( x[j]/val );
        error = fabs( u_exact - U[j] );
        fprintf( stdout, "%10.8lf\t%18.16lf\t%18.16lf\n", x[j], U[j], u_exact );
        fflush(stdout);
        err_max = max( error, err_max );
    }
    return;
}
#包括
#包括
#包括
#包括
#包括
#包括
#包括
#包括“fvm.h”
#包括“实验室mpi.h”
int main(int argc,char*argv[])
{
双tt0,tt1;
//MPI变量
int ierr;//MPI错误标志
内特nProc、nWrs、myID、rc;
//-------------启动MPI------------------
rc=MPI_Init(&argc,&argv);
如果(rc!=MPI_成功){
printf(“启动MPI程序时出错。正在终止。\n”);
MPI_中止(MPI_通信世界,rc);
}
ierr=MPI通信大小(MPI通信世界和nProc);
断言(!ierr);
ierr=MPI通信等级(MPI通信世界和myID);
断言(!ierr);
nWrs=nProc-1;
断言(nWrs==nProc-1);
如果(myID==MASTER){
tt0=MPI_Wtime();
硕士(nWrs,myID);
tt1=MPI_Wtime();
printf(“主MR计时=%lf秒,在%i个工作线程上。\n”,tt1-tt0,nWrs);
fflush(stdout);
}否则{
ierr=工人(nWrs,myID);
printf(“工作进程%i正在退出;ierr=%i\n”,myID,ierr);
fflush(stdout);
如果(ierr!=0)
printf(“工人%i退出,ierr=%i\n”,myID,ierr);
fflush(stdout);
}
//---------------结束MPI-------------------------
ierr=MPI_Finalize();
断言(!ierr);
退出(退出成功);
}
/*主MPI功能
*读取输入文件,将int和double打包到数组中,然后
*广播
*在t=tout时从工人处接收输出
*/
内部主控(内部nWrs,内部主控)
{
无符号整数i=0;
int Mx;//M/nWrks给出区域分解
双t=0.0;
字符缓冲区[50];
int M,MM;
无符号整数N_max;
双t0,t_端,dt_out,a,b,因子,D,dx,dt,t_out,dt_expl,mu;
双倍x_vals;
双*U;
//----MPI变量-----
int numInts=2;
int numDbls=6;
国际税务局、国家版权局、myID、rc;
int*intParams;
双*dblparam;
fgets(缓冲区、sizeof(缓冲区)、stdin);
fscanf(标准,“%lf%lf%lf%lf%lf%lf%i”,
&t0、&t_结束、&dt_结束、&a、&b、&D、&MM);
printf(“t0:%lf t end:%lf CV数:%i系数:%3.2lf\n”,
t0,t_端,毫米,系数);
fflush(stdout);
M=(b-a)*毫米;
dx=(双)(b-a)/M;
dt_expl=(dx*dx)/(2*D);
dt=系数*dt_expl;
N_max=(无符号整数)((t_end-t0)/dt+1);
t_out=最大值(dt_out,dt);
mu=dt/dx;
x_vals=(double*)calloc((M+2),sizeof(double));
U=(双*)calloc((M+2),sizeof(双));
xMesh(a、b、M、x_vals);
intParams=(int*)malloc((numInts)*sizeof(int));
dblParams=(double*)malloc((numDbls)*sizeof(double));
//打包要广播的变量数组
intParams[0]=N_max;
intParams[1]=M;
dblParams[0]=D;
dblParams[1]=t_out;
dblParams[2]=dt_out;
dblParams[3]=dt;
dblParams[4]=a;
dblParams[5]=b;
ierr=MPI_Bcast(intParams、numInts、MPI_INT、MASTER、MPI_COMM_WORLD);
断言(!ierr);
ierr=MPI\u Bcast(dblParams、numDbls、MPI\u DOUBLE、MASTER、MPI\u COMM\u WORLD);
断言(!ierr);
//开始时间步
for(i=1;i=t_out){
记录输出MPI(nWrs,M,U);
printf(“\N在时间:%lf,N步:%u\N”,t,i的配置文件);
fflush(stdout);
比较(U,x,vals,M,D,t);
t_out+=dt_out;
}
}
printf(“\n一次完成:%.6lf和Nsteps:%u\n\n”,t,i);
免费(intParams);
免费(dblParams);
免费(U);
免费(x_vals);
ierr=MPI_屏障(MPI_通信世界);
回报率;
}
/*工人的任务:
1.解包初始IPARM和parms阵列,本地Mz=Mz/NWR
2.与邻居交换“边界”值
3.进行时间步计算
4.每个dtout向MR发送输出
*/
内部工作者(内部nWrs、内部Me)
{
双t=0.0;
无符号整数i;
内特里尔;
内节点,节点;
int numInts=2;
int numDbls=6;
int*intParams;
双*dblparam;
int N_max,M;
双D,tout,dt_out,dt,a,b,mu,dx;
双*U;
双*F;
双倍x_vals;
intParams=(int*)malloc((numInts)*sizeof(int));
dblParams=(double*)malloc((numDbls)*sizeof(double));
ierr=MPI_Bcast(intParams、numInts、MPI_INT、MASTER、MPI_COMM_WORLD);
断言(!ierr);
ierr=MPI\u Bcast(dblParams、numDbls、MPI\u DOUBLE、MASTER、MPI\u COMM\u WORLD);
断言(!ierr);
N_max=intParams[0];
M=整数参数[1];
D=dblParams[0];
tout=dblParams[1];
dt_out=dblParams[2];
dt=dblParams[3];
a=dblParams[4];
b=dblParams[5];
mu=(M*dt)/(b-a);
dx=(双)(b-a)/M;
x_vals=calloc((M+2),sizeof(double));
U=calloc((M+2),sizeof(双倍));
F=calloc((M+2),sizeof(双倍));
xMesh(a、b、M、x_vals);
init(M,U);//设置U(a)=1,U(b)=0
for(i=1;i=tout){
发送输出MPI(nWrs、Me、M、U);
兜售+=dt_out;
}
}
免费(intParams);
免费的(
ierr = MPI_Send(sendVals, chunkSize, MPI_DOUBLE, MASTER, msgtag+1, MPI_COMM_WORLD);
ierr = MPI_Send(sendVals, 1, Mytype, MASTER, msgtag+1, MPI_COMM_WORLD);
void send_output_MPI( int nWrs, int Me, int M, double* U )
{
    int ierr, msgtag, i;
    int start = (Me - 1) * (M/nWrs)+1;
    int chunkSize = (M + 2)/nWrs;
    unsigned int offset = ( Me - 1 ) * chunkSize;

    msgtag = Me * 1000;
    // send part of the U array
    ierr = MPI_Send( &U[offset], chunkSize, MPI_DOUBLE, MASTER, msgtag+1, MPI_COMM_WORLD );
    return;
}

// only done by master
void recv_output_MPI( int nWrs, int M, double* U )
{
    unsigned int i, source;
    unsigned int msgtag = 1000;
    int ierr, offset;
    int chunkSize = (M + 2)/nWrs;
    MPI_Status status;

    for ( i = 1; i <= nWrs; i++ ) {
        source = i;
        msgtag = i * msgtag;
        offset = (i - 1)*chunkSize;
        ierr = MPI_Recv( &U[offset], chunkSize, MPI_DOUBLE, source, msgtag+1, MPI_COMM_WORLD, &status );

    }
    return;
}