Win Than Aung wrote:
thanks for your reply jeff

so i tried following



#include <stdio.h>
#include <mpi.h>

int main(int argc, char **argv) {
 int np, me, sbuf = -1, rbuf = -2,mbuf=1000;
int data[2];
 MPI_Init(&argc,&argv);
 MPI_Comm_size(MPI_COMM_WORLD,&np);
 MPI_Comm_rank(MPI_COMM_WORLD,&me);
 if ( np < 2 ) MPI_Abort(MPI_COMM_WORLD,-1);

 if ( me == 1 ) MPI_Send(&sbuf,1,MPI_INT,0,344,MPI_COMM_WORLD);
if(me==2) MPI_Send( &mbuf,1,MPI_INT,0,344,MPI_COMM_WORLD);
if ( me == 0 ) {
  MPI_Recv(data,2,MPI_INT,MPI_ANY_SOURCE,344,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
 }

 MPI_Finalize();

 return 0;
}


it can successfuly receive the one sent from processor 1(me==1) but it failed to receive the one sent from processor 2(me==2)
mpirun -np 3 hello
There is only one receive, so it receives only one message.  When you specify the element count for the receive, you're only specifying the size of the buffer into which the message will be received.  Only after the message has been received can you inquire how big the message actually was.

Here is an example:

% cat a.c
#include <stdio.h>
#include <mpi.h>

int main(int argc, char **argv) {
  int np, me, peer, value;

  MPI_Init(&argc,&argv);
  MPI_Comm_size(MPI_COMM_WORLD,&np);
  MPI_Comm_rank(MPI_COMM_WORLD,&me);

  value = me * me + 1;
  if ( me == 0 ) {
    for ( peer = 0; peer < np; peer++ ) {
      if ( peer != 0 ) MPI_Recv(&value,1,MPI_INT,peer,343,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
      printf("peer %d had value %d\n", peer, value);
    }
  }
  else MPI_Send(&value,1,MPI_INT,0,343,MPI_COMM_WORLD);

  MPI_Finalize();

  return 0;
}
% mpirun -np 3 a.out
peer 0 had value 1
peer 1 had value 2
peer 2 had value 5
%

Alternatively,

#include <stdio.h>
#include <mpi.h>

#define MAXNP 1024
int main(int argc, char **argv) {
  int np, me, peer, value, values[MAXNP];

  MPI_Init(&argc,&argv);
  MPI_Comm_size(MPI_COMM_WORLD,&np);
  if ( np > MAXNP ) MPI_Abort(MPI_COMM_WORLD,-1);
  MPI_Comm_rank(MPI_COMM_WORLD,&me);
  value = me * me + 1;

  MPI_Gather(&value, 1, MPI_INT,
             values, 1, MPI_INT, 0, MPI_COMM_WORLD);

  if ( me == 0 )
    for ( peer = 0; peer < np; peer++ )
      printf("peer %d had value %d\n", peer, values[peer]);

  MPI_Finalize();
  return 0;
}
% mpirun -np 3 a.out
peer 0 had value 1
peer 1 had value 2
peer 2 had value 5
%

Which is better?  Up to you.  The collective routines (like MPI_Gather) do offer MPI implementors (like people developing Open MPI) the opportunity to perform special optimizations (e.g., gather using a binary tree instead of having the root process perform so many receives).

Reply via email to