There shouldn't be any issue with using g95 instead of gfortran.

Can you double check that you aren't accidentally mixing installations somehow?


On Apr 6, 2011, at 7:52 AM, nicolas cordier wrote:

> hi,
> 
> i need use open-mpi with g95 on debian linux lenny 5.0 - x86_64
> i compile it with FC=g95 F77=g95 and test on my example.c file
> but with g95 mpirun dont use process1 just process 0.
> perhaps my compile option are wrong ?
> 
> i want that mpirun use process 0 and 1 both.
> 
> hostname paola12
> 
> mpicc example.c
> mpirun -np 2 a.out
> C Process 0 on paola12
> 0 [1 3 ]
> 0 [1.000000 3.000000 ]
> C Process 0 on paola12
> 0 [1 3 ]
> 0 [1.000000 3.000000 ]
> 
> 
> with gfortran ( 4.3.2 )  + openmpi 
> mpirun -np 2 a.out
>  
> C Process 0 on paola12 
> C Process 1 on paola12 
> 0 [2 9 ] 
> 1 [2 9 ] 
> 0 [3.000000 6.000000 ] 
> 1 [3.000000 6.000000 
> 
> 
> 
> 
> 
> example.c
> 
> #include <stdlib.h>
> #include <stdio.h>
> #include <math.h>
> #include <mpi.h>
> 
> int main(int argc, char** argv) {
>     MPI_Init(&argc, &argv);
>     int rank;
>     int namelen;
>     char processor_name[MPI_MAX_PROCESSOR_NAME];
>     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
>     MPI_Get_processor_name(processor_name, &namelen);
>     printf("C Process %d on %s \n", rank, processor_name);
>     MPI_Barrier(MPI_COMM_WORLD);
> 
>     int size = 2;
>     int *array, *reducedValues;
>     array = (int *) malloc((size) * sizeof (int));
>     reducedValues = (int *) malloc((size) * sizeof (int));
>     array[0] = rank+1;
>     array[1] = 3;
>     MPI_Allreduce(array, reducedValues, size, MPI_INTEGER, MPI_PROD, 
> MPI_COMM_WORLD);
>     int i;
>     printf("%d [", rank);
>     for (i = 0; i < size; i++) {
>         printf("%d ", reducedValues[i]);
>     }
>     printf("]\n");
>     free(reducedValues);
>     free(array);
> 
>     /* Verif triviale pour un seul entier (OK)
>     size=1;
>     int *array1, *reducedValues1;
>     array1 = (int *) malloc((size) * sizeof (int));
>     reducedValues1 = (int *) malloc((size) * sizeof (int));
>     array[0] = rank+1;
>     MPI_Allreduce(array1, reducedValues1, size, MPI_INTEGER, MPI_PROD, 
> MPI_COMM_WORLD);
>          printf(" C scalaire %d \n", reducedValues1[0]);
>     free(reducedValues1);
>     free(array1);
>     */
> 
>     /* Verif pour les doubles */
>     size=2;
>     double *Darray, *DreducedValues;
>     Darray = (double *) malloc((size) * sizeof (double));
>     DreducedValues = (double *) malloc((size) * sizeof (double));
>     Darray[0] = (rank+1)*1.0;
>     Darray[1] = 3.0;
>     MPI_Allreduce(Darray, DreducedValues, size, MPI_DOUBLE, MPI_SUM, 
> MPI_COMM_WORLD);
>     printf("%d [", rank);
>     for (i = 0; i < size; i++) {
>         printf("%f ", DreducedValues[i]);
>     }
>     printf("]\n");
>     free(DreducedValues);
>     free(Darray);
>     MPI_Finalize();
> }
> 
> 
> greetings.
> 
> nicolas cordier
> _______________________________________________
> users mailing list
> us...@open-mpi.org
> http://www.open-mpi.org/mailman/listinfo.cgi/users


-- 
Jeff Squyres
jsquy...@cisco.com
For corporate legal information go to:
http://www.cisco.com/web/about/doing_business/legal/cri/


Reply via email to