#include #include #include /* Prototype */ float integral(float ai, float h, int n); int main(int argc, char* argv[]) /*############################################################################### # # # This is an MPI example on parallel integration to demonstrate the use of: # # # # * MPI_Init, MPI_Comm_rank, MPI_Comm_size, MPI_Finalize # # * MPI_Gather # # # # Dr. Kadin Tseng # # Scientific Computing and Visualization # # Boston University # # 1998 # # # ###############################################################################*/ { int n, p, myid, tag, proc, ierr, i; float h, integral_sum, a, b, ai, pi, my_int, buf[50]; int master = 0; /* processor performing total sum */ MPI_Comm comm; comm = MPI_COMM_WORLD; ierr = MPI_Init(&argc, &argv); /* starts MPI */ MPI_Comm_rank(comm, &myid); /* get current process id */ MPI_Comm_size(comm, &p); /* get number of processes */ pi = acos(-1.0); /* = 3.14159... */ a = 0.; /* lower limit of integration */ b = pi*1./2.; /* upper limit of integration */ n = 500; /* number of increment within each process */ h = (b-a)/n/p; /* length of increment */ ai = a + myid*n*h; /* lower limit of integration for partition myid */ my_int = integral(ai, h, n); printf("Process %d has the partial sum of %f\n", myid,my_int); MPI_Gather( &my_int, 1, MPI_FLOAT, buf, 1, MPI_FLOAT, master, comm); if(myid == master) { integral_sum = 0.0; for (i=0; i