! paralleladder.f90 ! © Copyright 2003-4 Dauger Research, Inc. ! ! Our lawyers made us say this: ! DISCLAIMER: We provide the following on an "AS IS" basis. Use it at your own risk. module adderlib ! MPI library include 'mpif.h' contains ! A routine performing elemental work. ! This can be replaced with a much larger routine. function kernelroutine(input) implicit none real :: kernelroutine, input kernelroutine = (input)*(input) end function subroutine computeloop(theSum, theArray, HighestIndex, idproc, nproc) implicit none real :: theSum, mySum ! local copies of the data to be output real, dimension(:) :: theArray integer :: loopEnd, index, HighestIndex, idproc, nproc, offset, ierr real, dimension(:), allocatable :: myArray ! limit of the loop loopEnd=(HighestIndex+nproc-1)/nproc ! just this proc's piece ! this processor's index offset offset=idproc*loopEnd allocate(myArray(loopEnd)) if (allocated(myArray)) then ! loop over indicies do index = 1,loopEnd ! call the kernel routine for each index, and save into the array myArray(index) = kernelroutine(real(index+offset)) ! sum as we go if (index+offset .lt. HighestIndex ) mySum = mySum + myArray(index) end do end if ! proc 0 needs to hold the entire array ! gathers the data from the other arrays ... call MPI_GATHER(myArray, loopEnd, MPI_REAL, theArray, loopEnd, MPI_REAL, 0, MPI_COMM_WORLD, ierr) ! to proc 0 deallocate(myArray) ! performs a parallel sum across processors and saves the result ... call MPI_REDUCE(mySum, mySum, 1, MPI_REAL, MPI_SUM, 0, MPI_COMM_WORLD, ierr) ! ... at proc 0 ! return the sum and the array theSum=mySum end subroutine function ppinit(idproc, nproc) implicit none ! this subroutine initializes parallel processing integer :: idproc ! = processor id integer :: nproc ! = number of real or virtual processors obtained integer :: ierr, ppinit nproc=0 ! initialize the MPI execution environment call MPI_INIT(ierr) if (ierr.eq.0) then ! determine the rank of the calling process in the communicator call MPI_COMM_RANK(MPI_COMM_WORLD, idproc, ierr) ! determine the size of the group associated with a communicator call MPI_COMM_SIZE(MPI_COMM_WORLD, nproc, ierr) end if ppinit = ierr end function subroutine ppexit() implicit none ! this subroutine terminates parallel processing integer :: ierr ! terminate MPI execution environment call MPI_Finalize(ierr) end subroutine end module adderlib program paralleladder use adderlib implicit none ! HighestIndex specifies how highest index to sample integer, parameter :: HighestIndex = 10000 ! main copies of the sum and the array real :: theSum real, dimension(HighestIndex) :: theArray integer :: idproc, nproc, ierr, ios ! initialize parallel processing ierr = ppinit(idproc, nproc) if (ierr.ne.0) stop ! stop right there if there's a problem print *, "I'm processor #", idproc, " in a ", nproc, "-processor cluster." print *,"Beginning computation..." call computeloop(theSum, theArray, HighestIndex, idproc, nproc) if (theSum.gt.0) then ! error checking if (idproc.eq.0) then ! only processor 0 ! save the array into a data file open(unit = 2, file = "output", status = "replace", iostat = ios, form = "unformatted") if (ios .eq. 0) then print *, "writing array...\n" write(unit = 2) theArray endfile 2 end if end if print *,"the sum is ", theSum end if call ppexit() stop end program