Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/r/78.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
R+;雪&x2B;Fortran+;MPI崩溃_R_Parallel Processing_Fortran_Mpi_Snow - Fatal编程技术网

R+;雪&x2B;Fortran+;MPI崩溃

R+;雪&x2B;Fortran+;MPI崩溃,r,parallel-processing,fortran,mpi,snow,R,Parallel Processing,Fortran,Mpi,Snow,这是一项后续行动 我创建了一个使用MPI Fortran模块的R包。这是模块: Module Fortranpi USE MPI IMPLICIT NONE contains subroutine dboard(darts, dartsscore) integer, intent(in) :: darts double precision, intent(out) :: dartsscore double precision

这是一项后续行动

我创建了一个使用MPI Fortran模块的R包。这是模块:

Module Fortranpi
USE MPI
IMPLICIT NONE
contains
subroutine dboard(darts, dartsscore)
  integer, intent(in)                    :: darts
  double precision, intent(out)          :: dartsscore
  double precision                       :: x_coord, y_coord
  integer                                :: score, n

score = 0
do n = 1, darts
  call random_number(x_coord)
  call random_number(y_coord)

  if ((x_coord**2 + y_coord**2) <= 1.0d0) then
  score = score + 1
  end if
end do

dartsscore = 4.0d0*score/darts

end subroutine dboard

subroutine pi(avepi, DARTS, ROUNDS) bind(C, name="pi_")
  use, intrinsic                         :: iso_c_binding, only : c_double, c_int
  real(c_double), intent(out)            ::  avepi
  integer(c_int), intent(in)             ::  DARTS, ROUNDS
  integer                                ::  MASTER, rank, i, n
  integer, allocatable                   ::  seed(:)
  double precision                       ::  pi_est, homepi, pirecv, pisum

! we set it to zero in the sequential run
rank = 0
! initialize the random number generator
! we make sure the seed is different for each task
call random_seed()
call random_seed(size = n)
allocate(seed(n))
seed = 12 + rank*11
call random_seed(put=seed(1:n))
deallocate(seed)

avepi = 0
do i = 0, ROUNDS-1
  call dboard(darts, pi_est)
  ! calculate the average value of pi over all iterations
  avepi = ((avepi*i) + pi_est)/(i + 1)
end do
end subroutine pi


subroutine MPIpi(avepi, DARTS, ROUNDS) bind(C, name="pi2_")
use, intrinsic                         :: iso_c_binding, only : c_double, c_int
real(c_double), intent(out)            :: avepi
integer(c_int), intent(in)             :: DARTS, ROUNDS
integer                                :: i, n, mynpts, ierr, numprocs, proc_num
integer, allocatable                   :: seed(:)
double precision                       :: pi_est, y, sumpi

  call mpi_init(ierr)
  call mpi_comm_size(MPI_COMM_WORLD, numprocs, ierr)
  call mpi_comm_rank(MPI_COMM_WORLD, proc_num, ierr)

  if (numprocs .eq. 0) then
    mynpts = ROUNDS - (numprocs-1)*(ROUNDS/numprocs)
  else
    mynpts = ROUNDS/numprocs
  endif

  ! initialize the random number generator
  ! we make sure the seed is different for each task
  call random_seed()
  call random_seed(size = n)
  allocate(seed(n))
  seed = 12 + proc_num*11
  call random_seed(put=seed(1:n))
  deallocate(seed)

  y=0.0d0
    do i = 1, mynpts
    call dboard(darts, pi_est)
    y = y + pi_est
  end do

  call mpi_reduce(y, sumpi, 1, mpi_double_precision, mpi_sum, 0, &
                  mpi_comm_world, ierr)
  if (proc_num==0) avepi = sumpi/ROUNDS
  call mpi_finalize(ierr)
end subroutine MPIpi

end module Fortranpi
测试.f90

subroutine test(id, ierr)
use mpi
implicit none
integer*4 id, ierr
call MPI_Comm_rank(MPI_COMM_WORLD, id, ierr)
end subroutine test
hello.R

hello <- function() {
  r <- .Fortran("test", as.integer(0), as.integer(0))
  return(r)
}
helloIn

库(雪)

cl我有
MyPi::
应该是一样的(我想)。我添加了
library(MyPi)
,它没有什么区别:(然后做一些简单的事情,直到它工作。我已经在我的(旧的)“带R的HPC简介”幻灯片中使用了(不是Fortran)示例;请看我的网站。我会看一看。谢谢!我用snow实现了这个功能,但我有一个后续问题。现在我正在使用

  useDynLib(HelloFMPI)
  exportPattern("^[[:alpha:]]+")
subroutine test(id, ierr)
use mpi
implicit none
integer*4 id, ierr
call MPI_Comm_rank(MPI_COMM_WORLD, id, ierr)
end subroutine test
hello <- function() {
  r <- .Fortran("test", as.integer(0), as.integer(0))
  return(r)
}
library(HelloFMPI)
library(snow)
cl <- makeCluster(2, type = "MPI")
clusterEvalQ(cl, HelloFMPI::hello())
stopCluster(cl)
library(snow)
cl <- makeCluster(2, type = "MPI")
clusterEvalQ(cl, MyPi::FMPIpi(DARTS = 5000, ROUNDS = 100))
stopCluster(cl)