Using OpenMPI 1.0.1 compiled with g95 on OS X (same problem on Debian Linux with g95, I have not tested other compilers yet)

mpif90 spawn.f90 -o spawn
In file spawn.f90:35

    MPI_COMM_WORLD, slavecomm, MPI_ERRCODES_IGNORE, ierr )
                                                          1
Error: Generic subroutine 'mpi_comm_spawn' at (1) is not consistent with a specific subroutine interface
make: *** [spawn] Error 1

I can't see the problem with the following, all the arguments match the info presented in the book "Using MPI-2" page 236:

call MPI_Comm_spawn('subprocess', MPI_ARGV_NULL, universe_size-1, MPI_INFO_NULL, 0, &
    MPI_COMM_WORLD, slavecomm, MPI_ERRCODES_IGNORE, ierr )

the entire test program follows:

program main
  USE MPI
  implicit none
  integer :: ierr,size,rank,slavecomm
  integer  (kind=MPI_ADDRESS_KIND) :: universe_size
  integer :: status(MPI_STATUS_SIZE)
  logical :: flag
  integer :: ans
  integer :: k

  call MPI_INIT(ierr)
  call MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr)
  call MPI_COMM_SIZE(MPI_COMM_WORLD,size,ierr)

  if ( size /= 1 ) then
    if ( rank == 0 ) then
      write(*,*) 'Only one master process permitted'
      write(*,*) 'Terminating all but root process'
    else
      call MPI_FINALIZE(ierr)
      stop
    end if
  end if

call MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, universe_size, flag,ierr)
  if ( .not. flag ) then
    write(*,*) 'This MPI does not support UNIVERSE_SIZE.'
    write(*,*) 'How many processes total?'
    read(*,*) universe_size
  else if ( universe_size < 2 ) then
    write(*,*) 'How many processes total?'
    read(*,*) universe_size
  end if
call MPI_Comm_spawn('subprocess', MPI_ARGV_NULL, universe_size-1, MPI_INFO_NULL, 0, &
    MPI_COMM_WORLD, slavecomm, MPI_ERRCODES_IGNORE, ierr )

  do k = 1, universe_size-1
    write(*,*) 'master receiving'
call MPI_RECV( ans, 1, MPI_INTEGER, MPI_ANY_SOURCE, MPI_ANY_TAG, slavecomm, status, ierr )
    write(*,*) 'answer=',ans,' from alpha',k
  end do

  call MPI_COMM_FREE(slavecomm,ierr)

  call MPI_FINALIZE(ierr)
end

Reply via email to