That's is you've done: the worker program spawned and the two versions of
the manager that call the spawning.
I you find something wrong please let me know.

Thank you,
   Federico



2009/8/18 Ralph Castain <r...@open-mpi.org>

> ????
>
> Only the root process needs to provide the info keys for spawning anything.
> If that isn't correct, then we have a bug.
>
> Could you send us a code snippet that shows what you were doing?
>
> Thanks
> Ralph
>
>
> 2009/8/18 Federico Golfrè Andreasi <federico.gol...@gmail.com>
>
> I think I've solved my problem:
>>
>> in the previous code the arguments of the MPI_Comm_spawn_multiple where
>> filled only by the "root" process not by all the process in the group. Now
>> all the ranks have all that information and the spawn is done correctly.
>> But I read on http://www.mpi-forum.org/docs/mpi21-report/node203.htm that
>> those information are significant only at root.
>> In any case now it works.
>>
>> Thanks,
>> Federico
>>
>>
>>
>> 2009/8/18 Jeff Squyres <jsquy...@cisco.com>
>>
>>> On Aug 18, 2009, at 5:12 AM, Federico Golfrè Andreasi wrote:
>>>
>>>  In the info object I only set the "host" key (after creatig the object
>>>> with the MPI_Info_create).
>>>>
>>>> I've modified my code to leave out that request and created the array of
>>>> Info object as an array of MPI_INFO_NULL but the problem is still the same.
>>>> The error is thrown only when running with more than one process.
>>>>
>>>> Do I send you a short program for testing it?
>>>>
>>>
>>>
>>> If you have a short program that can replicate the problem, yes, that
>>> would be great.
>>>
>>> --
>>> Jeff Squyres
>>> jsquy...@cisco.com
>>>
>>>
>>> _______________________________________________
>>> users mailing list
>>> us...@open-mpi.org
>>> http://www.open-mpi.org/mailman/listinfo.cgi/users
>>>
>>
>>
>> _______________________________________________
>> users mailing list
>> us...@open-mpi.org
>> http://www.open-mpi.org/mailman/listinfo.cgi/users
>>
>
>
> _______________________________________________
> users mailing list
> us...@open-mpi.org
> http://www.open-mpi.org/mailman/listinfo.cgi/users
>
/*
 *
 * TEST PROGRAM
 * for MPI_COMM_SPAWN_MULTIPLE
 *
 * The first argument of the master program in the exec filename of the slave
 *
 * program MASTER
 * not working version: the only change is in lines 60-65 which are put inside an if block
 *
 * Author:  Federico Golfrè Andreasi
 * Created: 18/08/2009
 *
 */


#include <iostream>
#include "mpi.h"
#define   MAX_HOST_NAME 255

using namespace std;


int main ( int argc, char* argv[] ) {


	int			manager_rank,manager_size;
	char		local_host[MAX_HOST_NAME];
	int			local_host_len;
	MPI_Comm	intercomm;


	// *** MPI SESSION ***

	//  Initialization of MPI session
	MPI_Init(&argc,&argv);


	// *** GET INFORMATION ABOUT THE WORLD COMMUNICATOR ***

	// Get the size and the rank within the comm
	MPI_Comm_rank(MPI_COMM_WORLD,&manager_rank);
	MPI_Comm_size(MPI_COMM_WORLD,&manager_size);
	if (manager_rank==0) cout<<"\n***** STARTING THE SPAWN TEST PROGRAM ****\n";
	// Get the name of the host
	MPI_Get_processor_name(local_host,&local_host_len);
	cout<<" Rank "<<manager_rank<<" runs on host: "<<local_host<<"\n";


	// *** EXECUTING THE SLAVE PROGRAM ***

	// Setting the executable to launch and the number of process for each slave program, and the argv
	char 	*commands[manager_size];
	int 	procs[manager_size];
	char	**argvs[manager_size];
	// Creating the array of Info objects as MPI_INFO_NULL
	MPI_Info infos[manager_size];
	if (manager_rank==0) {
		for (int i=0;i<manager_size;i++) {
			commands[i]=argv[1];
			procs[i]=1;
			argvs[i]=argv;
			infos[i]=MPI_INFO_NULL;
		}
	}
	// Array with the errors
	int		spawn_errors[manager_size];
	// Launching the slaves
	MPI_Barrier(MPI_COMM_WORLD);
	if (manager_rank==0) cout<<"\t***\n Now we are executing: "<<argv[1]<<" program (argc="<<argc<<")\n";
	MPI_Comm_spawn_multiple(manager_size,commands,MPI_ARGVS_NULL,procs,infos,0,MPI_COMM_WORLD,&intercomm,spawn_errors);
	// Check for some errors
	for ( int i=0;i<manager_size;i++ ) {
		if ( spawn_errors[i]!=MPI_SUCCESS ) { cout<<" Error with spawning process "<<i<<"\n"; return EXIT_FAILURE; }
	}

	// *** END OF THE PROGRAM AND PETSC SESSION ***

	if (manager_rank==0) cout<<"**** THE MASTER PROGRAM IS EXITING ****\n";
	MPI_Finalize();
	return EXIT_SUCCESS;

}
/*
 *
 * TEST PROGRAM
 * for MPI_COMM_SPAWN_MULTIPLE
 *
 * The first argument of the master program in the exec filename of the slave
 *
 * program MASTER
 *
 * Author:  Federico Golfrè Andreasi
 * Created: 18/08/2009
 *
 */


#include <iostream>
#include "mpi.h"
#define   MAX_HOST_NAME 255

using namespace std;


int main ( int argc, char* argv[] ) {


	int			manager_rank,manager_size;
	char		local_host[MAX_HOST_NAME];
	int			local_host_len;
	MPI_Comm	intercomm;


	// *** MPI SESSION ***

	//  Initialization of MPI session
	MPI_Init(&argc,&argv);


	// *** GET INFORMATION ABOUT THE WORLD COMMUNICATOR ***

	// Get the size and the rank within the comm
	MPI_Comm_rank(MPI_COMM_WORLD,&manager_rank);
	MPI_Comm_size(MPI_COMM_WORLD,&manager_size);
	if (manager_rank==0) cout<<"\n***** STARTING THE SPAWN TEST PROGRAM ****\n";
	// Get the name of the host
	MPI_Get_processor_name(local_host,&local_host_len);
	cout<<" Rank "<<manager_rank<<" runs on host: "<<local_host<<"\n";


	// *** EXECUTING THE SLAVE PROGRAM ***

	// Setting the executable to launch and the number of process for each slave program, and the argv
	char 	*commands[manager_size];
	int 	procs[manager_size];
	char	**argvs[manager_size];
	// Creating the array of Info objects as MPI_INFO_NULL
	MPI_Info infos[manager_size];
	for (int i=0;i<manager_size;i++) {
		commands[i]=argv[1];
		procs[i]=1;
		argvs[i]=argv;
		infos[i]=MPI_INFO_NULL;
	}
	// Array with the errors
	int		spawn_errors[manager_size];
	// Launching the slaves
	MPI_Barrier(MPI_COMM_WORLD);
	if (manager_rank==0) cout<<"\t***\n Now we are executing: "<<argv[1]<<" program (argc="<<argc<<")\n";
	MPI_Comm_spawn_multiple(manager_size,commands,MPI_ARGVS_NULL,procs,infos,0,MPI_COMM_WORLD,&intercomm,spawn_errors);
	// Check for some errors
	for ( int i=0;i<manager_size;i++ ) {
		if ( spawn_errors[i]!=MPI_SUCCESS ) { cout<<" Error with spawning process "<<i<<"\n"; return EXIT_FAILURE; }
	}

	// *** END OF THE PROGRAM AND PETSC SESSION ***

	if (manager_rank==0) cout<<"**** THE MASTER PROGRAM IS EXITING ****\n";
	MPI_Finalize();
	return EXIT_SUCCESS;

}
/*
 *
 * TEST PROGRAM
 * for MPI_COMM_SPAWN_MULTIPLE
 *
 * program SLAVE
 *
 * Author:  Federico Golfrè Andreasi
 * Created: 18/08/2009
 *
 */


#include <iostream>
#include "mpi.h"

using namespace std;
#define   MAX_PROCESSOR_NAME 255


int main (int argc, char *argv[]) {


	int			worker_rank,worker_size;
	char		local_host[MAX_PROCESSOR_NAME];
	int			local_host_len;


	// *** MPI SESSION ***

	//  Initialization of MPI session
	MPI_Init(&argc,&argv);


	// *** GET INFORMATION ABOUT THE WORKER WORLD COMMUNICATOR ***

	// Get the size and the rank within the worker comm
	MPI_Comm_rank(MPI_COMM_WORLD,&worker_rank);
	MPI_Comm_size(MPI_COMM_WORLD,&worker_size);
	if (worker_rank==0) cout<<"\n***** WORKER PROGRAM ****\n";
	// Get the name of the host
	MPI_Get_processor_name(local_host,&local_host_len);
	cout<<" Rank "<<worker_rank<<" runs on host: "<<local_host<<" (argc="<<argc<<")\n";
	MPI_Barrier(MPI_COMM_WORLD);


	// *** END OF PETSC SESSION ***

	if (worker_rank==0) cout<<"**** WORKER PROGRAM EXITS ****\n\n";
	MPI_Finalize();
	return EXIT_SUCCESS;

}

Reply via email to