Tom,

regardless the (lack of) memory model in Fortran, there is an error in testmpi3.f90

shar_mem is declared as an integer, and hence is not in the shared memory.

i attached my version of testmpi3.f90, which behaves just like the C version,

at least when compiled with -g -O0 and with Open MPI master

/* i replaced shar_mem with fptr_mem */


Cheers,


Gilles




On 10/26/2016 3:29 AM, Tom Rosmond wrote:
All:

I am trying to understand the use of the shared memory features of MPI-3 that allow direct sharing of the memory space of on-node processes. Attached are 2 small test programs, one written in C (testmpi3.c), the other F95 (testmpi3.f90) . They are solving the identical 'halo' exchange problem. 'testmpi3.c' is a simplified version of an example program from a presentation by Mark Lubin of Intel. I wrote 'testmpi3.f90' to mimic the C version.

Also attached are 2 text files of the compile, execution, and output of the respective programs:

CC_testmpi3.txt
F95_testmpi3.txt

Note: All 4 files are contained in the attached 'testmpi3.tar.gz'.

Comparing the outputs of each version, it is clear that the shared memory copies in 'testmpi3.c' are working correctly, but not in 'testmpi3.f90'. As far as I can tell, the 2 programs are equivalent up to line 134 of 'testmpi3.c' and lines 97-101 of 'testmpi3.f90'. I thought the calls to 'c_f_pointer' would produce Fortran pointers that would access the correct shared memory addresses as the C-pointers do in 'testmpi3.c', but clearly that isn't happening. Can anyone explain why not, and what is needed to make this happen. Any suggestions are welcome.

My environment:
 Scientific Linux 6.8
 INTEL FORTRAN and ICC version 15.0.2.164
 OPEN-MPI 2.0.1


T. Rosmond


_______________________________________________
users mailing list
users@lists.open-mpi.org
https://rfd.newmexicoconsortium.org/mailman/listinfo/users

      program test
!
! This program does 'halo' exchange

      use mpi_f08
      USE, INTRINSIC ::  ISO_C_BINDING, ONLY : C_PTR, C_F_POINTER

      implicit none

      integer ierr,nproc,nprocsh,lenname
      integer partners(0:1)
      integer maps_shm(0:1)
      integer irank,ir
      integer icolor,iranksh,idisp,j,jr,ii,numnodes,nx

      integer off_node_partners
      integer on_node_partners
      integer halo_left(0:0)
      integer halo_right(0:0)

      integer(kind=MPI_ADDRESS_KIND) :: isizew
      type(mpi_status):: istat
      type(c_ptr) ::  shar_cptr
      type(mpi_comm) :: mpicomm,shmcomm
      type(mpi_win) :: iwin
      type(mpi_info) :: info
      type(mpi_group) :: world_group,share_group

      integer, pointer :: fptr_left(:)
      integer, pointer :: fptr_right(:)
      integer, pointer :: fptr_mem(:)
      integer, allocatable :: shape(:)

      mpicomm= mpi_comm_world
      info= mpi_info_null

      call MPI_Init(ierr)
      call MPI_Comm_rank(mpicomm,irank,ierr)
      call MPI_Comm_size(mpicomm,nproc,ierr)
      ir= irank+1
!
! Define halo partners ring with periodic BC
!
      partners(0)= irank-1
      partners(1)= irank+1
      if(irank.eq.0) partners(0)= nproc-1
      if(irank.eq.nproc-1) partners(1)= 0
!
! Am runing on fully NUMA system, so split mpicomm explictly to mimic
! cluster nodes.  Here I split into 4 equally sized nodes 

      numnodes= 4

       icolor= (irank*numnodes+1)/nproc
       call mpi_comm_split(mpicomm,icolor,0,shmcomm,ierr)
!
       call mpi_comm_group(mpicomm,world_group,ierr)
       call mpi_comm_group(shmcomm,share_group,ierr)
!
       call 
mpi_group_translate_ranks(world_group,2,partners,share_group,maps_shm,ierr)
!
      off_node_partners=0
      on_node_partners=0
      do j=0,1
       if(maps_shm(j).eq.MPI_UNDEFINED) then
         off_node_partners= off_node_partners+1
       else
         on_node_partners= on_node_partners+1
       endif
      enddo

      if(on_node_partners.eq.0) then 
       isizew=4
       idisp=1
      else
       isizew=4 
       idisp=1
      endif

      allocate(shape(1))
      shape=(/1/)

! allocate shared memory windows

      if(on_node_partners.ne.0) then

      call 
mpi_win_allocate_shared(isizew,idisp,info,shmcomm,shar_cptr,iwin,ierr)
      call c_f_pointer(shar_cptr,fptr_mem,shape)

!     if(on_node_partners.ne.0) then

      do j=0,1
  
       if(maps_shm(j).ne.MPI_UNDEFINED) then
        call mpi_win_shared_query(iwin,maps_shm(j),isizew,idisp,shar_cptr,ierr)
       endif

         if(j.eq.0) then
           call c_f_pointer(shar_cptr,fptr_left,shape)
         else
           call c_f_pointer(shar_cptr,fptr_right,shape)
         endif

      enddo
      endif
!
      call mpi_win_fence(0,iwin,ierr)
!
! fill shared memory with data

       fpt_mem(1) = (irank+1)*100

      call mpi_win_fence(0,iwin,ierr)

      do j=0,1
       jr= 1-j
       if(maps_shm(j) .ne. MPI_UNDEFINED) then ! partner j is on the same node 
        if(j.eq.0) then
          halo_left= fptr_left      !  load from MPI-3/SHM ops
        else
          halo_right= fptr_right      !  load from MPI-3/SHM ops
        endif
        if(j .eq. 0) then
        print('(a,i3,a,i12)'),'from on-node leftside partner to rank=',irank,' 
: value=',halo_left
        else
        print('(a,i3,a,i12)'),'from on-node rightside partner to rank=',irank,' 
: value=',halo_right
        endif
       else ! MPI exchange with off-node partner
        call mpi_send(fptr_mem,1,mpi_integer,partners(j),1,mpicomm,ierr)
        if(j.eq.0) then
         call mpi_recv(halo_left,1,mpi_integer,partners(j),1,mpicomm,istat,ierr)
        else
         call 
mpi_recv(halo_right,1,mpi_integer,partners(j),1,mpicomm,istat,ierr)
        endif

       endif
      enddo

      if(off_node_partners.gt.0) then

        do j=0,1
         if(maps_shm(j) .eq. MPI_UNDEFINED) then ! partner j is off node 
          if(j .eq. 0) then
       print('(a,i3,a,i5)'),'from off-node leftside partner to rank=',irank,' : 
value=',halo_left
          else
       print('(a,i3,a,i5)'),'from off-node rightside partner to rank=',irank,' 
: value=',halo_right
          endif
         endif
        enddo

      endif
!
      print('(a,i2,a,i12,a,i5,a,i12)'),' on rank=',irank,' : 
leftside=',halo_left,' : midpt=',fptr_mem(1),' : rightside=', halo_right

      call mpi_win_free(iwin,ierr)

 1000 continue
      call mpi_finalize(ierr)
      end
_______________________________________________
users mailing list
users@lists.open-mpi.org
https://rfd.newmexicoconsortium.org/mailman/listinfo/users

Reply via email to