#include #include #include float fct(float x) { return cos(x); } /* Prototype */ float integral(float ai, float h, int n); int main(int argc, char* argv[]) { /*############################################################################### # # # This is an MPI example on parallel integration to demonstrate the use of: # # # # * MPI_Init, MPI_Comm_rank, MPI_Comm_size, MPI_Finalize # # * MPI_Reduce # # * MPI_SUM # # # # Dr. Kadin Tseng # # Scientific Computing and Visualization # # Boston University # # 1998 # # # ###############################################################################*/ int n, p, myid, tag, proc, ierr; float h, integral_sum, a, b, ai, pi, my_int; char line[10]; int master = 0; /* processor performing total sum */ MPI_Comm comm; comm = MPI_COMM_WORLD; ierr = MPI_Init(&argc, &argv); /* starts MPI */ MPI_Comm_rank(comm, &myid); /* get current process id */ MPI_Comm_size(comm, &p); /* get number of processes */ pi = acos(-1.0); /* = 3.14159... */ a = 0.; /* lower limit of integration */ b = pi*1./2.; /* upper limit of integration */ if(myid == master) { printf("The requested number of processors is %d\n",p); printf("Enter number of increments within each process\n"); (void) fgets(line, sizeof(line), stdin); (void) sscanf(line, "%d", &n); } /* Broadcast "n" to all processes */ MPI_Bcast( &n, 1, MPI_INT, master, comm); h = (b-a)/n/p; /* length of increment */ ai = a + myid*n*h; /* lower limit of integration for partition myid */ my_int = integral(ai, h, n); /* 0<=myid<=p-1 */ printf("Process %d has the partial result of %f\n", myid,my_int); MPI_Reduce( &my_int, &integral_sum, 1, MPI_FLOAT, MPI_SUM, master, comm); if(myid == 0) { printf("The result =%f\n",integral_sum); } MPI_Finalize(); /* let MPI finish up ... */ } float integral(float ai, float h, int n) { int j; float aij, integ; integ = 0.0; /* initialize */ for (j=0;j