Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/fortran/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
用MPI在Fortran中映射向量_Fortran_Mpi_Mpi Rma - Fatal编程技术网

用MPI在Fortran中映射向量

用MPI在Fortran中映射向量,fortran,mpi,mpi-rma,Fortran,Mpi,Mpi Rma,我尝试使用映射(m)将向量A(m)映射到B(m),即使用单边通信根据适当的过程编号将向量A的元素转换到B。MPI标准手册中有一个例子。我为子程序编写了一个小的主程序,并复制了示例子程序。假设我只有2个进程,我尝试将进程0中的所有元素转移到进程1,反之亦然。然而,我在下面的代码中发现了分段错误 英特尔和Gfrotran编译器也有同样的问题。 以下是我如何编译: mpif90-O0-调试-回溯-检查-ftrapuv mpimapp.f90 运行: mpirun-np 2./a.out mod

我尝试使用映射(m)将向量A(m)映射到B(m),即使用单边通信根据适当的过程编号将向量A的元素转换到B。MPI标准手册中有一个例子。我为子程序编写了一个小的主程序,并复制了示例子程序。假设我只有2个进程,我尝试将进程0中的所有元素转移到进程1,反之亦然。然而,我在下面的代码中发现了分段错误

英特尔和Gfrotran编译器也有同样的问题。 以下是我如何编译:

mpif90-O0-调试-回溯-检查-ftrapuv mpimapp.f90

运行: mpirun-np 2./a.out

    module mpi
        include "mpif.h"
    end module mpi

    program mpimap

        use mpi
        implicit none
        integer, parameter :: m=10
        REAL A(m), B(m) 
        integer map(m)
        integer rank, nproc, ierror, tag, status(MPI_STATUS_SIZE),i

        call MPI_INIT(ierror)
        call MPI_COMM_SIZE(MPI_COMM_WORLD, nproc, ierror)
        call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierror)

        print *, 'node', rank, ': Hello world',nproc

        A(:)=(rank) * .10

        do i=1, m
            if (rank==0) then
                map(i)=1
            else
                map(i)=0
            endif
            print*, 'node', rank, A(m), map(i)
        enddo

       Call MPI_BARRIER(MPI_COMM_WORLD, IERROR)

       Call MAPVALS(A, B, map, m, MPI_COMM_WORLD, nproc) 

        call MPI_FINALIZE(ierror)

    end program mpimap

    SUBROUTINE MAPVALS(A, B, map, m, comm, p) 
    USE MPI 
    INTEGER m, map(m), comm, p 
    REAL A(m), B(m) 
    integer rank

    INTEGER otype(p), oindex(m),   & ! used to construct origin datatypes  
         ttype(p), tindex(m),      & ! used to construct target datatypes 
         count(p), total(p),       & 
         win, ierr 
    INTEGER (KIND=MPI_ADDRESS_KIND) lowerbound, sizeofreal 


     call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)


    ! This part does the work that depends on the locations of B. 
    ! Can be reused while this does not change 

    CALL MPI_TYPE_GET_EXTENT(MPI_REAL, lowerbound, sizeofreal, ierr) 
    CALL MPI_WIN_CREATE(B, m*sizeofreal, sizeofreal, MPI_INFO_NULL,   & 
                         comm, win, ierr) 

    ! This part does the work that depends on the value of map and 
    ! the locations of the arrays. 
    ! Can be reused while these do not change 

    ! Compute number of entries to be received from each process 

    DO i=1,p 
      count(i) = 0 
    END DO 

    DO i=1,m 
      j = map(i)/m+1 
      count(j) = count(j)+1 
    END DO 

    total(1) = 0 
    DO i=2,p 
      total(i) = total(i-1) + count(i-1) 
    END DO 

    DO i=1,p 
      count(i) = 0 
    END DO 

    ! compute origin and target indices of entries. 
    ! entry i at current process is received from location 
    ! k at process (j-1), where map(i) = (j-1)*m + (k-1), 
    ! j = 1..p and k = 1..m 

    DO i=1,m 
      j = map(i)/m+1 
      k = MOD(map(i),m)+1 
      count(j) = count(j)+1 
      oindex(total(j) + count(j)) = i 
      tindex(total(j) + count(j)) = k 
    END DO 



    ! create origin and target datatypes for each get operation 
    DO i=1,p 
      CALL MPI_TYPE_CREATE_INDEXED_BLOCK(count(i), 1, oindex(total(i)+1), & 
                                         MPI_REAL, otype(i), ierr) 
      CALL MPI_TYPE_COMMIT(otype(i), ierr) 
      CALL MPI_TYPE_CREATE_INDEXED_BLOCK(count(i), 1, tindex(total(i)+1), & 
                                         MPI_REAL, ttype(i), ierr) 
      CALL MPI_TYPE_COMMIT(ttype(i), ierr) 
    END DO 

    ! this part does the assignment itself 
    CALL MPI_WIN_FENCE(0, win, ierr) 
    DO i=1,p 
      CALL MPI_GET(A, 1, otype(i), i-1, 0, 1, ttype(i), win, ierr) 
    END DO 
    CALL MPI_WIN_FENCE(0, win, ierr) 

    CALL MPI_WIN_FREE(win, ierr) 
    DO i=1,p 
      CALL MPI_TYPE_FREE(otype(i), ierr) 
      CALL MPI_TYPE_FREE(ttype(i), ierr) 
    END DO 
    RETURN 
    END SUBROUTINE MAPVALS

seg故障发生在什么位置?它在崩溃前打印了什么吗?我无法复制segfault——在Ubuntu Linux和OpenMPI 1.4.1上使用gfortran 4.4.3编译。(mpif90-O2 test.f90-o test)。实际上,我现在检查了英特尔,它告诉我“数组OINDEX的下标1的值11大于10的上限”,这发生在调用MPI_TYPE_CREATE_index_BLOCK(count(I),1,OINDEX(total(I)+1),MPI_REAL,otype(I),ierr)时@HosseinTalebi绝对正确。我刚刚尝试启用-fbounds检查,发现了运行时错误。我在ubuntu 11.1中使用gfortran 4.6或intel 12.1。也许我的主程序输入错误?