Hi,

I'm having a problem with Isend, Recv and Test in Linux Mint 16 Petra. The
source is attached.

Open MPI 1.10.2 is configured with
./configure --enable-debug --prefix=/home/<me>/Tool/openmpi-1.10.2-debug

The source is built with
~/Tool/openmpi-1.10.2-debug/bin/mpiCC a5.cpp

and run in one node with
~/Tool/openmpi-1.10.2-debug/bin/mpirun -n 2 ./a.out

The output is in the end. What puzzles me is why MPI_Test is called so many
times, and it takes so long to send a message. Am I doing something wrong?
I'm simulating a more complicated program: MPI 0 Isends data to MPI 1,
computes (usleep here), and calls Test to check if data are sent. MPI 1
Recvs data, and computes.

Thanks in advance.


Best regards,
Zhen

MPI 0: Isend of 0 started at 20:32:35.
MPI 1: Recv of 0 started at 20:32:35.
MPI 0: MPI_Test of 0 at 20:32:35.
MPI 0: MPI_Test of 0 at 20:32:35.
MPI 0: MPI_Test of 0 at 20:32:35.
MPI 0: MPI_Test of 0 at 20:32:35.
MPI 0: MPI_Test of 0 at 20:32:35.
MPI 0: MPI_Test of 0 at 20:32:35.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:36.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:37.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:38.
MPI 0: MPI_Test of 0 at 20:32:39.
MPI 0: MPI_Test of 0 at 20:32:39.
MPI 0: MPI_Test of 0 at 20:32:39.
MPI 0: MPI_Test of 0 at 20:32:39.
MPI 0: MPI_Test of 0 at 20:32:39.
MPI 0: MPI_Test of 0 at 20:32:39.
MPI 1: Recv of 0 finished at 20:32:39.
MPI 0: MPI_Test of 0 at 20:32:39.
MPI 0: Isend of 0 finished at 20:32:39.
#include "mpi.h"
#include <unistd.h>
#include <stdio.h>
#include <vector>
#include <time.h>

int main(int argc, char* argv[])
{
  MPI_Init(&argc, &argv);

  int rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  int n = 999999;
  const int m = 1;
  std::vector<std::vector<int> > vec(m);
  for (int i = 0; i < m; i++)
  {
    vec[i].resize(n);
  }
  MPI_Request mpiRequest[m];
  MPI_Status mpiStatus[m];
  char tt[99] = {0};

  MPI_Barrier(MPI_COMM_WORLD);

  if (rank == 0)
  {
    for (int i = 0; i < m; i++)
    {
      MPI_Isend(&vec[i][0], n, MPI_INT, 1, i, MPI_COMM_WORLD, &mpiRequest[i]);
      time_t t = time(0);
      strftime(tt, 9, "%H:%M:%S", localtime(&t));
      printf("MPI %d: Isend of %d started at %s.\n", rank, i, tt);
    }

    for (int i = 0; i < m; i++)
    {
      int done = 0;
      while (done == 0)
      {
        usleep(100000);
        time_t t = time(0);
        strftime(tt, 9, "%H:%M:%S", localtime(&t));
        printf("MPI %d: MPI_Test of %d at %s.\n", rank, i, tt);
        MPI_Test(&mpiRequest[i], &done, &mpiStatus[i]);
//        printf("MPI %d: MPI_Wait of %d at %s.\n", rank, i, tt);
//        MPI_Wait(&mpiRequest[i], &mpiStatus[i]);
      }

      time_t t = time(0);
      strftime(tt, 9, "%H:%M:%S", localtime(&t));
      printf("MPI %d: Isend of %d finished at %s.\n", rank, i, tt);
    }
  }
  else
  {
    for (int i = 0; i < m; i++)
    {
      time_t t = time(0);
      strftime(tt, 9, "%H:%M:%S", localtime(&t));
      printf("MPI %d: Recv of %d started at %s.\n", rank, i, tt);

      MPI_Recv(&vec[i][0], n, MPI_INT, 0, i, MPI_COMM_WORLD, &mpiStatus[i]);

      t = time(0);
      strftime(tt, 9, "%H:%M:%S", localtime(&t));
      printf("MPI %d: Recv of %d finished at %s.\n", rank, i, tt);
    }
  }

  MPI_Finalize();

  return 0;
}

Reply via email to