Hello,

When I run this code:

program testcase

    use mpi
    implicit none

    integer :: rank, lsize, rsize, code
    integer :: intercomm

    call MPI_INIT(code)

    call MPI_COMM_GET_PARENT(intercomm, code)
    if (intercomm == MPI_COMM_NULL) then
        call MPI_COMM_SPAWN ("./testcase", MPI_ARGV_NULL, 1, MPI_INFO_NULL,
&
             0, MPI_COMM_WORLD, intercomm, MPI_ERRCODES_IGNORE, code)
        call MPI_COMM_RANK(MPI_COMM_WORLD, rank, code)
        call MPI_COMM_SIZE(MPI_COMM_WORLD, lsize, code)
        call MPI_COMM_SIZE(intercomm, rsize, code)
        if (rank == 0) then
            print *, 'from parent: local size is ', lsize
            print *, 'from parent: remote size is ', rsize
        end if
    else
        call MPI_COMM_SIZE(MPI_COMM_WORLD, lsize, code)
        call MPI_COMM_SIZE(intercomm, rsize, code)
        print *, 'from child: local size is ', lsize
        print *, 'from child: remote size is ', rsize
    end if

    call MPI_FINALIZE (code)

end program testcase

I get the following results with openmpi 1.4.1 and two processes:
 from parent: local size is
2

 from parent: remote size is
2

 from child: local size is
1

 from child: remote size is
1


I would have expected:
 from parent: local size is
2

 from parent: remote size is            1


 from child: local size is
1

 from child: remote size is            2



Could anyone tell me what's going on ? It's not a fortran issue, I can also
replicate it using mpi4py.
Probably related to the universe size: I haven't found a way to hand it to
mpirun.

Cheers,
Pierre

Reply via email to