Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/fortran/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Types MPI_Type_Create_Hindexed_块生成派生数据类型的错误范围_Types_Fortran_Mpi - Fatal编程技术网

Types MPI_Type_Create_Hindexed_块生成派生数据类型的错误范围

Types MPI_Type_Create_Hindexed_块生成派生数据类型的错误范围,types,fortran,mpi,Types,Fortran,Mpi,使用Fortran,我试图为动态分配的结构构建派生数据类型,但它得到了新类型的错误范围,代码如下: PROGRAM MAIN IMPLICIT NONE INCLUDE 'mpif.h' INTEGER :: I INTEGER :: MYID,NUMPROCS,IError INTEGER :: Extent,Size,Disp(2) INTEGER :: Status(MPI_STATUS_SIZE) INTEGER :: New_Type, Blocks(3), Types(3), Off

使用Fortran,我试图为动态分配的结构构建派生数据类型,但它得到了新类型的错误范围,代码如下:

PROGRAM MAIN
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER :: I
INTEGER :: MYID,NUMPROCS,IError
INTEGER :: Extent,Size,Disp(2)
INTEGER :: Status(MPI_STATUS_SIZE)
INTEGER :: New_Type, Blocks(3), Types(3), Offsets(3), POS(2)
INTEGER :: POS_(4)
INTEGER :: ElmOffset(3),Send_Type
INTEGER :: M

TYPE Struct    
    INTEGER :: N
    REAL :: A
    REAL :: B(2)
END TYPE Struct
TYPE(Struct),ALLOCATABLE :: Structs(:)

    M=9

    CALL MPI_INIT( IError )
    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, NUMPROCS, IError )
    CALL MPI_COMM_RANK( MPI_COMM_WORLD, MYID,     IError )

    ALLOCATE( Structs(M) )
    DO I=1,M
        Structs(I)%N = I*1000 + MYID
        Structs(I)%A = 250.0_8 + MYID*1.0
        Structs(I)%B(1) = 10.0_8 + MYID*1.0
        Structs(I)%B(2) = 20.0_8 + MYID*1.0
    END DO

    CALL MPI_GET_ADDRESS( Structs(1)%N,    POS_(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A,    POS_(2), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), POS_(3), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(2), POS_(4), IError )
    POS_=POS_ - POS_(1)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID, POS_
    END IF

    Types(1) = MPI_INTEGER
    Types(2) = MPI_DOUBLE_PRECISION
    Types(3) = MPI_DOUBLE_PRECISION

    Offsets(1) = 0
    CALL MPI_GET_ADDRESS( Structs(1)%N, Disp(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A, Disp(2), IError )
    Offsets(2) = Offsets(1) + Blocks(1)*( Disp(2)-Disp(1) )
    Disp(1) = Disp(2)
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), Disp(2), IError )
    Offsets(3) = Offsets(2) + Blocks(2)*( Disp(2)-Disp(1) )

    CALL MPI_TYPE_STRUCT( 3, Blocks, Offsets, Types, New_Type, IError )
    CALL MPI_TYPE_COMMIT( New_Type, IError )

    CALL MPI_TYPE_EXTENT(New_Type, Extent, IError)
    CALL MPI_TYPE_SIZE(New_Type, Size, IError)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'New_Type extents = ', Extent
        WRITE(*,*) 'New_Type size = ', Size
    END IF

    CALL MPI_GET_ADDRESS( Structs(1)%N, ElmOffset(1), IError )
    CALL MPI_GET_ADDRESS( Structs(2)%N, ElmOffset(2), IError )
    CALL MPI_GET_ADDRESS( Structs(3)%N, ElmOffset(3), IError )
    ElmOffset=ElmOffset - ElmOffset(1)

    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID,ElmOffset
    END IF

    CALL MPI_TYPE_CREATE_HINDEXED_BLOCK( 3, 1, ElmOffset, New_Type, Send_Type, IError )
    CALL MPI_TYPE_COMMIT( Send_Type, IError )

    CALL MPI_TYPE_EXTENT( Send_Type, Extent, IError )
    CALL MPI_TYPE_SIZE( Send_Type, Size, IError )

    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'Send_Type extents = ', Extent
        WRITE(*,*) 'Send_Type size = ', Size
    END IF

    CALL MPI_TYPE_FREE(Send_Type,IError)
    CALL MPI_TYPE_FREE(New_Type,IError)
    CALL MPI_FINALIZE(IError)

END PROGRAM MAIN
            POS_ : 0  8  16  24
New_Type Extents : 32
   New_Type Size : 28
结果如下:

PROGRAM MAIN
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER :: I
INTEGER :: MYID,NUMPROCS,IError
INTEGER :: Extent,Size,Disp(2)
INTEGER :: Status(MPI_STATUS_SIZE)
INTEGER :: New_Type, Blocks(3), Types(3), Offsets(3), POS(2)
INTEGER :: POS_(4)
INTEGER :: ElmOffset(3),Send_Type
INTEGER :: M

TYPE Struct    
    INTEGER :: N
    REAL :: A
    REAL :: B(2)
END TYPE Struct
TYPE(Struct),ALLOCATABLE :: Structs(:)

    M=9

    CALL MPI_INIT( IError )
    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, NUMPROCS, IError )
    CALL MPI_COMM_RANK( MPI_COMM_WORLD, MYID,     IError )

    ALLOCATE( Structs(M) )
    DO I=1,M
        Structs(I)%N = I*1000 + MYID
        Structs(I)%A = 250.0_8 + MYID*1.0
        Structs(I)%B(1) = 10.0_8 + MYID*1.0
        Structs(I)%B(2) = 20.0_8 + MYID*1.0
    END DO

    CALL MPI_GET_ADDRESS( Structs(1)%N,    POS_(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A,    POS_(2), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), POS_(3), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(2), POS_(4), IError )
    POS_=POS_ - POS_(1)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID, POS_
    END IF

    Types(1) = MPI_INTEGER
    Types(2) = MPI_DOUBLE_PRECISION
    Types(3) = MPI_DOUBLE_PRECISION

    Offsets(1) = 0
    CALL MPI_GET_ADDRESS( Structs(1)%N, Disp(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A, Disp(2), IError )
    Offsets(2) = Offsets(1) + Blocks(1)*( Disp(2)-Disp(1) )
    Disp(1) = Disp(2)
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), Disp(2), IError )
    Offsets(3) = Offsets(2) + Blocks(2)*( Disp(2)-Disp(1) )

    CALL MPI_TYPE_STRUCT( 3, Blocks, Offsets, Types, New_Type, IError )
    CALL MPI_TYPE_COMMIT( New_Type, IError )

    CALL MPI_TYPE_EXTENT(New_Type, Extent, IError)
    CALL MPI_TYPE_SIZE(New_Type, Size, IError)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'New_Type extents = ', Extent
        WRITE(*,*) 'New_Type size = ', Size
    END IF

    CALL MPI_GET_ADDRESS( Structs(1)%N, ElmOffset(1), IError )
    CALL MPI_GET_ADDRESS( Structs(2)%N, ElmOffset(2), IError )
    CALL MPI_GET_ADDRESS( Structs(3)%N, ElmOffset(3), IError )
    ElmOffset=ElmOffset - ElmOffset(1)

    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID,ElmOffset
    END IF

    CALL MPI_TYPE_CREATE_HINDEXED_BLOCK( 3, 1, ElmOffset, New_Type, Send_Type, IError )
    CALL MPI_TYPE_COMMIT( Send_Type, IError )

    CALL MPI_TYPE_EXTENT( Send_Type, Extent, IError )
    CALL MPI_TYPE_SIZE( Send_Type, Size, IError )

    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'Send_Type extents = ', Extent
        WRITE(*,*) 'Send_Type size = ', Size
    END IF

    CALL MPI_TYPE_FREE(Send_Type,IError)
    CALL MPI_TYPE_FREE(New_Type,IError)
    CALL MPI_FINALIZE(IError)

END PROGRAM MAIN
            POS_ : 0  8  16  24
New_Type Extents : 32
   New_Type Size : 28
上面的结果显示没有问题

      ElemOffsets :  0  32  64
Send_Type Extents : -32             <= Problem is here !!! It should be 96
   Send_Type Size :  84
但是,出现了错误:程序异常-访问冲突

我不知道怎么了。。。 但一定是发送类型没有正确创建


如何解决这样的问题?

这个问题是因为在64位操作系统上,地址的大小大于32位整数。因此,函数
intmpi_Get_address(const void*location,MPI_Aint*address)
输出一个
MPI_Aint
,其大小足以包含一个地址。实际上,
MPI_INT
可以比
MPI_INT

在Fortran语言中,
MPI\u Aint
写入。另见第48页的第2.5.6节

因此,每当涉及到地址时,必须使用数据类型
整数(种类=MPI\u地址\u种类)
(对于
位置
显示
偏移量
范围
ElmOffset

一个基于您的修改后的示例代码,将由
mpif90 main.f90-o main-Wall
编译并由
mpirun-np 2 main
运行:

PROGRAM MAIN
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER :: I
INTEGER :: MYID,NUMPROCS,IError
INTEGER :: Size
INTEGER :: Status(MPI_STATUS_SIZE)
INTEGER :: New_Type, Blocks(3), Types(3)
INTEGER :: Send_Type
INTEGER :: M
INTEGER (KIND=MPI_ADDRESS_KIND):: Offsets(3),POS_(4), ElmOffset(3), Disp(2),Extent

TYPE Struct    
    INTEGER :: N
    REAL*8 :: A
    REAL*8 :: B(2)
END TYPE Struct
TYPE(Struct),ALLOCATABLE :: Structs(:)
    WRITE(*,*) 'Size of Integer = ',SIZEOF(M)
    WRITE(*,*) 'Size of Integer (KIND=MPI_ADDRESS_KIND)= ',SIZEOF(Extent)
    M=9

    CALL MPI_INIT( IError )
    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, NUMPROCS, IError )
    CALL MPI_COMM_RANK( MPI_COMM_WORLD, MYID,     IError )

    ALLOCATE( Structs(M) )
    DO I=1,M
        Structs(I)%N = I*1000 + MYID
        Structs(I)%A = 250.0_8 + MYID*1.0
        Structs(I)%B(1) = 10.0_8 + MYID*1.0
        Structs(I)%B(2) = 20.0_8 + MYID*1.0
    END DO

    Blocks(1)=1
    Blocks(2)=1
    Blocks(3)=2

    CALL MPI_GET_ADDRESS( Structs(1)%N,    POS_(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A,    POS_(2), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), POS_(3), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(2), POS_(4), IError )
    POS_=POS_ - POS_(1)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID, POS_
    END IF

    Types(1) = MPI_INTEGER
    Types(2) = MPI_DOUBLE_PRECISION
    Types(3) = MPI_DOUBLE_PRECISION

    Offsets(1) = 0
    CALL MPI_GET_ADDRESS( Structs(1)%N, Disp(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A, Disp(2), IError )
    !Offsets(2) = Offsets(1) + Blocks(1)*( Disp(2)-Disp(1) )
    Offsets(2) = Offsets(1) + ( Disp(2)-Disp(1) )
    Disp(1) = Disp(2)
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), Disp(2), IError )
    !Offsets(3) = Offsets(2) + Blocks(2)*( Disp(2)-Disp(1) )
    Offsets(3) = Offsets(2) + ( Disp(2)-Disp(1) )

    CALL MPI_TYPE_CREATE_STRUCT( 3, Blocks, Offsets, Types, New_Type, IError )
    CALL MPI_TYPE_COMMIT( New_Type, IError )

    CALL MPI_TYPE_GET_EXTENT(New_Type, Extent, IError)
    CALL MPI_TYPE_SIZE(New_Type, Size, IError)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'New_Type extents = ', Extent
        WRITE(*,*) 'New_Type size = ', Size
    END IF

    CALL MPI_GET_ADDRESS( Structs(1)%N, ElmOffset(1), IError )
    CALL MPI_GET_ADDRESS( Structs(2)%N, ElmOffset(2), IError )
    CALL MPI_GET_ADDRESS( Structs(3)%N, ElmOffset(3), IError )
    ElmOffset=ElmOffset - ElmOffset(1)

    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID,ElmOffset
    END IF

    CALL MPI_TYPE_CREATE_HINDEXED_BLOCK( 3, 1, ElmOffset, New_Type, Send_Type, IError )
    CALL MPI_TYPE_COMMIT( Send_Type, IError )

    CALL MPI_TYPE_GET_EXTENT( Send_Type, Extent, IError )
    CALL MPI_TYPE_SIZE( Send_Type, Size, IError )

    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'Send_Type extents = ', Extent
        WRITE(*,*) 'Send_Type size = ', Size
    END IF


    IF (MYID.EQ.0) THEN
        DO I=1,(NUMPROCS-1)
            CALL MPI_SEND( Structs(1)%N, 1, Send_Type, I, 0, MPI_COMM_WORLD, IError)
        END DO       
    ELSE
        CALL MPI_RECV( Structs(1)%N, 1, Send_Type, 0, 0, MPI_COMM_WORLD, Status, IError)

    END IF

    WRITE( (MYID+10),*) Structs(1)%N, Structs(1)%A
    WRITE( (MYID+10),*) Structs(1)%B(1), Structs(1)%B(2)

    WRITE( (MYID+100),*) Structs(3)%N, Structs(3)%A
    WRITE( (MYID+100),*) Structs(3)%B(1), Structs(3)%B(2)

    CALL MPI_TYPE_FREE(Send_Type,IError)
    CALL MPI_TYPE_FREE(New_Type,IError)
    CALL MPI_FINALIZE(IError)

END PROGRAM MAIN

我将
REAL::A
更改为
REAL*8::A
以删除第
Structs(I)%A=250.0_8+MYID*1.0
行中关于双精度浮点转换的警告。正如Hristo Iliev所注意到的,它与使用
MPI\u双精度的新数据类型一致

实现所需的正确方法如下

1) 创建表示一条记录的结构化数据类型

CALL MPI_GET_ADDRESS(Structs(1)%N,    POS_(1), IError)
CALL MPI_GET_ADDRESS(Structs(1)%A,    POS_(2), IError)
CALL MPI_GET_ADDRESS(Structs(1)%B(1), POS_(3), IError)
Offsets = POS_ - POS_(1)

Types(1) = MPI_INTEGER
Types(2) = MPI_REAL
Types(3) = MPI_REAL

Blocks(1) = 1
Blocks(2) = 1
Blocks(3) = 2

CALL MPI_TYPE_CREATE_STRUCT(3, Blocks, Offsets, Types, Elem_Type, IError)
此数据类型现在可用于发送该结构的一条记录:

CALL MPI_TYPE_COMMIT(Elem_Type, IError)
CALL MPI_SEND(Structs(1), 1, Elem_Type, ...)
CALL MPI_TYPE_COMMIT(ElemSized_Type, IError)
CALL MPI_SEND(Structs(1), 3, ElemSized_Type, ...)
2) 要发送多条记录,首先调整新数据类型的大小(强制其范围具有一定的大小)以匹配结构的真实大小。这样做是为了解释编译器可能在记录末尾插入的任何填充

CALL MPI_TYPE_GET_EXTENT(Elem_Type, Lb, Extent, IError)
CALL MPI_GET_ADDRESS(Structs(1)%N, POS_(1), IError)
CALL MPI_GET_ADDRESS(Structs(2)%N, POS_(2), IError)
Extent = POS_(2) - POS_(1)
CALL MPI_TYPE_CREATE_RESIZED(Elem_Type, Lb, Extent, ElemSized_Type, IError)
3) 现在,您可以使用新的数据类型发送结构的多个记录:

CALL MPI_TYPE_COMMIT(Elem_Type, IError)
CALL MPI_SEND(Structs(1), 1, Elem_Type, ...)
CALL MPI_TYPE_COMMIT(ElemSized_Type, IError)
CALL MPI_SEND(Structs(1), 3, ElemSized_Type, ...)
或者,您可以创建一个连续的数据类型,该数据类型一次覆盖三个元素:

CALL MPI_TYPE_CONTIGUOUS(3, ElemSized_Type, BunchOfElements_Type, IError)
CALL MPI_TYPE_COMMMIT(BunchOfElements_Type, IError)
CALL MPI_SEND(Structs(1), 1, BunchOfElements_Type, ...)

注意:不必提交通信或I/O操作中未使用的数据类型。

谢谢您的解释!数据已成功发送!但输出让我困惑:新的_类型范围=160;新_型尺寸=208;为什么类型大小大于其范围?这是因为我在设置
块时出错了。。。它应该是
Blocks(1)=1
而不是
Blocks(1)=SIZEOF(Structs(1)%N)
,它是以字节为单位的块大小,而不是每个块中的项目数。对不起!今晚我会改正的,我终于改正了!函数MPI_TYPE_STRUCT和MPI_TYPE_EXTENT都不推荐使用!相反,我应该使用MPI_TYPE_CREATE_STRUCT和MPI_TYPE_GET_EXTENT。将
REAL
更改为
REAL*8
不仅可以删除警告,还可以减少程序的错误,因为类型构造函数中的数据类型被指定为
MPI\u DOUBLE\u PRECISION
。此外,
MPI\u TYPE\u STRUCT
MPI\u TYPE\u SIZE
已在MPI-3.0中删除。实际上,默认的真实种类设置为8,@hristoilev感谢您的建议!但是还有一个问题。。。