Commit ebb9959e authored by Simon Brass's avatar Simon Brass
Browse files

Basic MPI examples in modern FORTRAN

parents
program main
use mpi_f08
implicit none
integer :: ierr, nprocs, myrank
call MPI_init(ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_init error"
end if
call MPI_Comm_size(MPI_Comm_world, nprocs, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_Comm_size error"
end if
call MPI_Comm_rank(MPI_Comm_world, myrank, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_Comm_rank error"
end if
print *, "nprocs=", nprocs, " myrank=", myrank
call MPI_finalize(ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_finalize error"
end if
end program main
program main
use mpi_f08
implicit none
integer :: i, ierr, nprocs, myrank
integer, dimension(5) :: imsg
! --------------------------------------------------
call MPI_init(ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_init error"
end if
call MPI_Comm_size(MPI_Comm_world, nprocs, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_Comm_size error"
end if
call MPI_Comm_rank(MPI_Comm_world, myrank, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_Comm_rank error"
end if
print *, "nprocs=", nprocs, " myrank=", myrank
! --------------------------------------------------
if (myrank == 0) then
imsg = [(i, i=1, 5)]
else
imsg = 0
end if
print *, "Before: ", imsg
call MPI_bcast(imsg, 5, MPI_integer, 0, MPI_Comm_world, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_bcast: ", ierr
end if
print *, "After: ", imsg
! --------------------------------------------------
call MPI_finalize(ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_finalize error"
end if
end program main
program main
use mpi_f08
implicit none
integer :: i, ierr, nprocs, myrank, isend
integer, parameter :: &
& nstart = 100, &
& nend = 100, &
& dim = 10
integer, dimension(:), allocatable :: irecv
type(MPI_Status) :: status
type(MPI_Request) :: request
integer, dimension(:), allocatable :: data
integer :: istart, iend
! --------------------------------------------------
call MPI_init(ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_init error"
end if
call MPI_Comm_size(MPI_Comm_world, nprocs, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_Comm_size error"
end if
call MPI_Comm_rank(MPI_Comm_world, myrank, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_Comm_rank error"
end if
! print *, "nprocs=", nprocs, " myrank=", myrank
! --------------------------------------------------
if (myrank == 0) then
allocate(data(dim))
data = [(i, i=1, dim)]
! Distribute data
do i=1, nprocs - 1
call block_distribute_range(1, dim, nprocs, i, istart, iend)
call MPI_isend(data(istart), iend - istart + 1, MPI_integer, i, 1, MPI_Comm_world, request, ierr)
if (ierr /= MPI_Success) then
print *, "MPI_isend: ", ierr
end if
end do
else
call block_distribute_range(1, dim, nprocs, myrank, istart, iend)
allocate(data(iend - istart + 1))
call MPI_irecv(data, iend - istart + 1, MPI_integer, 0, 1, MPI_Comm_world, request, ierr)
if (ierr /= MPI_Success) then
print *, "MPI_irecv: ", ierr
end if
end if
call MPI_wait(request, status, ierr)
if (ierr /= MPI_Success) then
print *, "MPI_wait: ", ierr
end if
print *, myrank, " : ", data
! --------------------------------------------------
call MPI_finalize(ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_finalize error"
end if
contains
! nstart, nend : range of distribution
! nprocs, rank : number of process and own rank
! istart, iend : specific start and end
subroutine block_distribute_range(nstart, nend, nprocs, rank, istart, iend)
integer, intent(in) :: nstart, nend, nprocs, rank
integer, intent(out) :: istart, iend
integer :: q, r
associate (iterations => nend - nstart + 1)
q = iterations / nprocs
r = mod(iterations, nprocs)
istart = rank * q + nstart + min(rank, r)
iend = istart + q - 1
if (r > rank) then
iend = iend + 1
end if
end associate
end subroutine block_distribute_range
end program main
program main
use mpi_f08
implicit none
integer :: i, ierr, nprocs, myrank, isend
integer, parameter :: &
& nstart = 100, &
& nend = 100, &
& dim = 10
integer, dimension(:), allocatable :: irecv
type(MPI_Status) :: status
type(MPI_Request) :: request
type(MPI_Errhandler) :: errhandler
type(MPI_Comm) :: false_comm
integer, dimension(:), allocatable :: data
integer :: istart, iend
! --------------------------------------------------
call MPI_init(ierr)
call MPI_Comm_size(MPI_Comm_world, nprocs, ierr)
call MPI_Comm_rank(MPI_Comm_world, myrank, ierr)
call MPI_Comm_create_errhandler(mpi_error_function, errhandler)
call MPI_Comm_set_errhandler(MPI_Comm_world, errhandler)
! print *, "nprocs=", nprocs, " myrank=", myrank
! --------------------------------------------------
if (myrank == 0) then
allocate(data(dim))
data = [(i, i=1, dim)]
! Distribute data
do i=1, nprocs - 1
call block_distribute_range(1, dim, nprocs, i, istart, iend)
call MPI_isend(data(istart), iend - istart + 1, MPI_integer, i, 1, MPI_Comm_world, request, ierr)
end do
else
call block_distribute_range(1, dim, nprocs, myrank, istart, iend)
allocate(data(iend - istart + 1))
call MPI_irecv(data, iend - istart + 1, MPI_integer, 0, 1, false_comm, request, ierr)
end if
call MPI_wait(request, status, ierr)
print *, myrank, " : ", data
! --------------------------------------------------
call MPI_finalize(ierr)
contains
! nstart, nend : range of distribution
! nprocs, rank : number of process and own rank
! istart, iend : specific start and end
subroutine block_distribute_range(nstart, nend, nprocs, rank, istart, iend)
integer, intent(in) :: nstart, nend, nprocs, rank
integer, intent(out) :: istart, iend
integer :: q, r
associate (iterations => nend - nstart + 1)
q = iterations / nprocs
r = mod(iterations, nprocs)
istart = rank * q + nstart + min(rank, r)
iend = istart + q - 1
if (r > rank) then
iend = iend + 1
end if
end associate
end subroutine block_distribute_range
subroutine mpi_error_function(comm, error_code)
type(MPI_Comm) :: comm
integer :: error_code
character(len=MPI_MAX_ERROR_STRING) :: error_msg
integer :: error_msg_len
write(*, "(A)") "mpi_error_function: entry"
write(*, "(A, I0)") "mpi_error_function: ", error_code
call MPI_error_string(error_code, error_msg, error_msg_len)
print *, "MPI Error: ", error_code
print *, "MPI Error Message: ", error_msg(1:error_msg_len)
if (error_code /= MPI_SUCCESS) then
call MPI_abort(MPI_Comm_world, error_code)
end if
end subroutine mpi_error_function
end program main
program main
use mpi_f08
implicit none
integer :: i, ierr, nprocs, myrank, isend
integer, dimension(:), allocatable :: irecv
! --------------------------------------------------
call MPI_init(ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_init error"
end if
call MPI_Comm_size(MPI_Comm_world, nprocs, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_Comm_size error"
end if
call MPI_Comm_rank(MPI_Comm_world, myrank, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_Comm_rank error"
end if
print *, "nprocs=", nprocs, " myrank=", myrank
! --------------------------------------------------
allocate(irecv(nprocs))
isend = myrank + 1
call MPI_gather(isend, 1, MPI_integer, irecv, 1, MPI_integer, 0, MPI_Comm_world, ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_gather: ", ierr
end if
if (myrank == 0) then
print *, "irecv=", irecv
end if
! --------------------------------------------------
call MPI_finalize(ierr)
if (ierr /= MPI_SUCCESS) then
print *, "MPI_finalize error"
end if
end program main
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment