C -*- Mode: Fortran; -*- c c----------------------------------------------------------------- c Ravi Samtaney c Copyright 2004 c Princeton Plasma Physics Laboratory c All Rights Reserved c----------------------------------------------------------------- c $Log: bdryexchange.F,v $ c Revision 1.2 2005/10/11 22:06:45 samtaney c Fixed it for eta periodic. It is hard coded for phi to be periodic. c c Revision 1.1.1.1 2005/09/12 17:28:10 samtaney c Original source. c c Revision 1.2 2004/12/09 14:35:07 samtaney c Changed ii = 1-NGHOST, NXLOCAL + NGHOST to ii = IXLO, IXHI which is more correct for this code. c c Revision 1.1.1.1 2004/05/28 19:33:27 samtaney c Original source - copied from 3D Unsplit code. c c Revision 1.1.1.1 2004/04/09 14:58:44 samtaney c Original unimesh Unsplit MHD solver. c c----------------------------------------------------------------- C integer function iprocnum(ipx, ipy, ipz) C C precondition: C ipx, ipy are coordinates of process in process C grid. C postcondition: C return value is a single integer denoting process C -- unique for each process. C use mesh_parms integer ipx, ipy, ipz iprocnum = (ipz-1)*XPROCS*YPROCS+(ipy-1)*XPROCS + ipx return end C ---------------------------------------------------------------- subroutine FluidProcCoord(fluidprocid) C use mesh_common #ifdef PARALLEL use mpistuff #endif integer::fluidprocid c integer:: ijkp(3) c ijkp(1)=iprocx; ijkp(2)=iprocy; ijkp(3)=iprocz; fluidprocid=iproc_idx-1 return end C ---------------------------------------------------------------- subroutine pxpypz(iproc, ipx, ipy, ipz) C C precondition: C iproc is a single integer denoting process C -- unique for each process. C postcondition: C ipx, ipy are coordinates of process in process. C grid. C (i.e., this subroutine is the inverse of function C iprocnum above.) C use mesh_parms #ifdef PARALLEL use mpistuff integer NDIMENSIONS PARAMETER(NDIMENSIONS=3) integer coords(NDIMENSIONS) #endif integer ipx, ipy, ipz, iproc C compute ipx, ipy, ipz c02/21/98 for reorder #ifdef PARALLEL call MPI_Cart_Coords(comm3D, iproc-1, NDIMENSIONS,coords,ierr) Call ErrorHandler(ierr,ERROR_CARTCOORDS) ipx = coords(1) + 1 ipy = coords(2) + 1 ipz = coords(3) + 1 call MPI_Cart_Shift( comm3D, 0, 1, left, right, ierr) ! Neighbors Call ErrorHandler(ierr,ERROR_CARTSHIFT) call MPI_Cart_Shift( comm3D, 1, 1, bottom, top, ierr) Call ErrorHandler(ierr,ERROR_CARTSHIFT) call MPI_cart_Shift( comm3D, 2, 1, behind, forward, ierr) Call ErrorHandler(ierr,ERROR_CARTSHIFT) c$$$ write(6,*) 'NEIGHBORS', ipx, ipy, ipz c$$$ write(6,*) 'X N', left, right c$$$ write(6,*) 'Y N', top, bottom c$$$ write(6,*) 'Z N', forward, behind #else ipx = mod(iproc-1, XPROCS)+1 ipy = mod( (iproc - ipx) / XPROCS, YPROCS)+1 ipz = mod( (iproc - ipx - (ipy-1)*XPROCS) / XPROCS/YPROCS, 1 ZPROCS)+1 #endif return end C ================================================================ C C subroutines to map from global to local indices C C subroutine ilocal(ii, ipx, iiloc) C C precondition: C ii is a global X index. C postcondition: C ipx, iiloc are the corresponding X index into the C process grid and local X index. C use mesh_parms use mesh_common integer ii, ipx, iiloc ipx = (ii-1) / NXlsize + 1 iiloc = mod((ii-1), NXlsize) + 1 return end C ---------------------------------------------------------------- C subroutine jlocal(jj, ipy, jjloc) C C precondition: C jj is a global Y index. C postcondition: C ipy, jjloc are the corresponding Y index into the C process grid and local Y index. C use mesh_parms use mesh_common integer jj, ipy, jjloc ipy = (jj-1) / NYlsize + 1 jjloc = mod((jj-1), NYlsize) + 1 return end C ---------------------------------------------------------------- subroutine klocal(kk, ipz, kkloc) C C precondition: C kk is a global Z index. C postcondition: C ipy, kkloc are the corresponding Z index into the C process grid and local Z index. C use mesh_parms use mesh_common integer kk, ipz, kkloc c ipz = (kk-1) / NZlsize + 1 c kkloc = mod((kk-1), NZlsize) + 1 return end C ================================================================ C C subroutines to map from local to global indices C C ================================================================ subroutine iglobal(ipx, iiloc, ii) C C precondition: C ipx, iiloc are an X index into the process grid and C a local X index. C postcondition: C ii is the corresponding global X index. C use mesh_parms use mesh_common integer ipx, iiloc, ii ii = (ipx-1)*NXlsize + iiloc return end C ---------------------------------------------------------------- subroutine jglobal(ipy, jjloc, jj) C C precondition: C ipy, jjloc are an Y index into the process grid and C a local Y index. C postcondition: C jj is the corresponding global Y index. C use mesh_parms use mesh_common integer ipy, jjloc, jj jj = (ipy-1)*NYlsize + jjloc return end C ---------------------------------------------------------------- subroutine kglobal(ipz, kkloc, kk) C C precondition: C ipz, kkloc are an Z index into the process grid and C a local Z index. C postcondition: C kk is the corresponding global Z index. C use mesh_parms use mesh_common integer ipz, kkloc, kk c kk = (ipz-1)*NZlsize + kkloc return end C ================================================================ c Hard coded for phi periodic #ifdef ETAPERIODIC subroutine Exchange(grid_array,nfields) use mesh use mesh_common #ifdef PARALLEL use mpistuff #endif integer nfields double precision:: grid_array(IXLO:IXHI, & IYLO:IYHI,nfields) #ifdef PARALLEL double precision:: & xbuffer_send(NGHOST,IYLO:IYHI,IZLO:IZHI,nfields) double precision:: & xbuffer_recv(NGHOST,IYLO:IYHI,IZLO:IZHI,nfields) double precision:: & ybuffer_send(IXLO:IXHI,NGHOST,IZLO:IZHI,nfields) double precision:: & ybuffer_recv(IXLO:IXHI,NGHOST,IZLO:IZHI,nfields) double precision:: & zbuffer_send(IXLO:IXHI,IYLO:IYHI,NGHOST,nfields) double precision:: & zbuffer_recv(IXLO:IXHI,IYLO:IYHI,NGHOST,nfields) integer:: XBUFFSIZE, YBUFFSIZE, ZBUFFSIZE integer:: ii,jj,mm,kk,l,idest integer:: iprocnum integer:: msg_id_send_x_low integer:: msg_id_send_x_hi integer:: msg_id_recv_x_low integer:: msg_id_recv_x_hi integer:: msg_id_send_y_low integer:: msg_id_send_y_hi integer:: msg_id_recv_y_low integer:: msg_id_recv_y_hi integer:: msg_id_send_z_low integer:: msg_id_send_z_hi integer:: msg_id_recv_z_low integer:: msg_id_recv_z_hi integer:: ybufsize, zbufsize c C -------X DIRECTION COMMUNICATION c Update x-low boundaries XBUFFSIZE=nfields*(NGHOST) * (IYHI-IYLO+1)*(IZHI-IZLO+1) YBUFFSIZE = nfields*(IXHI-IXLO+1)*(NGHOST) * (IZHI-IZLO+1) ZBUFFSIZE = nfields*(IXHI-IXLO+1) * (IYHI-IYLO+1)* (NGHOST) if (iprocx .gt. 1) then c isource = iprocnum (iprocx-1, iprocy, iprocz)-1 call MPI_Irecv(xbuffer_recv, XBUFFSIZE, MPI_DOUBLE_PRECISION, 1 left, MSG_XCH_XHI_TAG, comm3D, msg_id_recv_x_hi, ierr 2 ) Call ErrorHandler(ierr,ERROR_RECV) endif if (iprocx .lt. XPROCS) then do l=1,nfields,1 do kk=IZLO,IZHI do jj = IYLO,IYHI do mm = 1, NGHOST xbuffer_send(mm,jj,kk,l) = grid_array(NXlsize+1 1 -mm,jj,kk,l) enddo enddo enddo enddo c write(6,*) 'Send to', right, iprocx-1 call MPI_Isend(xbuffer_send, XBUFFSIZE, MPI_DOUBLE_PRECISION, 1 right, MSG_XCH_XHI_TAG, comm3D, msg_id_send_x_hi, ierr) Call ErrorHandler(ierr,ERROR_SEND) endif if (iprocx .gt. 1) then call MPI_Wait(msg_id_recv_x_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do kk=IZLO,IZHI do jj = IYLO,IYHI do mm = 1, NGHOST grid_array(1-mm,jj,kk,l) = xbuffer_recv(mm,jj,kk 1 ,l) enddo enddo enddo enddo endif if (iprocx .lt. XPROCS) then call MPI_Wait(msg_id_send_x_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) endif C update x-high boundaries if (iprocx .lt. XPROCS) then c isource = iprocnum (iprocx+1, iprocy, iprocz)-1 c write(6,*) 'Recv from', right, iprocx-1 call MPI_Irecv(xbuffer_recv, XBUFFSIZE, MPI_DOUBLE_PRECISION, 1 right, MSG_XCH_XLOW_TAG, comm3D, msg_id_recv_x_low, 2 ierr) Call ErrorHandler(ierr,ERROR_RECV) endif if (iprocx .gt. 1) then do l=1,nfields,1 do kk=IZLO,IZHI do jj = IYLO,IYHI do mm = 1, NGHOST xbuffer_send(mm,jj,kk,l) = grid_array(mm,jj,kk,l) enddo enddo enddo enddo c write(6,*) 'Send to', left, iprocx-1 call MPI_Isend(xbuffer_send, XBUFFSIZE, MPI_DOUBLE_PRECISION, 1 left, MSG_XCH_XLOW_TAG, comm3D, msg_id_send_x_low, ierr 2 ) Call ErrorHandler(ierr,ERROR_SEND) endif if (iprocx .lt. XPROCS) then call MPI_Wait(msg_id_recv_x_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do kk=IZLO,IZHI do jj = IYLO,IYHI do mm = 1, NGHOST grid_array(NXlsize+mm,jj,kk,l) = xbuffer_recv(mm 1 ,jj,kk,l) enddo enddo enddo enddo endif if (iprocx .gt. 1) then call MPI_Wait(msg_id_send_x_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) endif C -------Y DIRECTION COMMUNICATION C update y-low boundaries c ybufsize=(ihi-ilo+1)*(IZHI-IZLO+1)*REAL_SIZE*nfields*NGHOST c write(6,*) 'YRecv from', bottom, iprocy-1 call MPI_Irecv(ybuffer_recv, YBUFFSIZE, MPI_DOUBLE_PRECISION 1 , bottom, MSG_XCH_YHI_TAG, comm3D, msg_id_recv_y_hi, 2 ierr) Call ErrorHandler(ierr,ERROR_RECV) do l=1,nfields,1 do kk=IZLO,IZHI do mm = 1, NGHOST do ii = IXLO,IXHI ybuffer_send(ii,mm,kk,l) = grid_array(ii,NYlsize 1 +1-mm,kk,l) enddo enddo enddo enddo c write(6,*) 'YSend to', top, iprocy-1 call MPI_Isend(ybuffer_send, YBUFFSIZE, MPI_DOUBLE_PRECISION 1 , top, MSG_XCH_YHI_TAG, comm3D, msg_id_send_y_hi, ierr) Call ErrorHandler(ierr,ERROR_SEND) call MPI_Wait(msg_id_recv_y_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do kk=IZLO,IZHI do mm = 1, NGHOST do ii = IXLO,IXHI grid_array(ii,1-mm,kk,l) = ybuffer_recv(ii,mm,kk 1 ,l) enddo enddo enddo enddo call MPI_Wait(msg_id_send_y_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) C update y-high boundaries c write(6,*) 'YRecv from ', top, iprocy-1 call MPI_Irecv(ybuffer_recv, YBUFFSIZE, MPI_DOUBLE_PRECISION, 1 top, MSG_XCH_YLOW_TAG, comm3D, msg_id_recv_y_low, 2 ierr) Call ErrorHandler(ierr,ERROR_RECV) do l=1,nfields,1 do kk=IZLO,IZHI do mm = 1, NGHOST do ii = IXLO,IXHI ybuffer_send(ii,mm,kk,l) = grid_array(ii,mm,kk,l) enddo enddo enddo enddo c write(6,*) 'YSend to ', bottom, iprocy-1 call MPI_Isend(ybuffer_send, YBUFFSIZE, MPI_DOUBLE_PRECISION, 1 bottom, MSG_XCH_YLOW_TAG, comm3D, msg_id_send_y_low, 2 ierr) Call ErrorHandler(ierr,ERROR_SEND) call MPI_Wait(msg_id_recv_y_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do kk=IZLO,IZHI do mm = 1, NGHOST do ii = IXLO,IXHI grid_array(ii,NYlsize+mm,kk,l) = ybuffer_recv(ii 1 ,mm,kk,l) enddo enddo enddo enddo call MPI_Wait(msg_id_send_y_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) C -------Z DIRECTION COMMUNICATION C update z-low boundaries c zbufsize=(ihi-ilo+1)*(IYHI-IYLO+1)*REAL_SIZE*nfields*NGHOST c write(6,*) 'ZRecv from ', behind, iprocz-1 call MPI_Irecv(zbuffer_recv, ZBUFFSIZE, MPI_DOUBLE_PRECISION, 1 behind, MSG_XCH_ZHI_TAG, comm3D, msg_id_recv_z_hi, ierr 2 ) Call ErrorHandler(ierr,ERROR_RECV) do l=1,nfields,1 do mm = 1, NGHOST do jj=IYLO,IYHI do ii = IXLO,IXHI zbuffer_send(ii,jj,mm,l) = grid_array(ii,jj 1 ,NZlsize+1-mm,l) enddo enddo enddo enddo c write(6,*) 'ZSend to ', forward, iprocz-1 call MPI_Isend(zbuffer_send, ZBUFFSIZE, MPI_DOUBLE_PRECISION, 1 forward, MSG_XCH_ZHI_TAG, comm3D, msg_id_send_z_hi, 2 ierr) Call ErrorHandler(ierr,ERROR_SEND) call MPI_Wait(msg_id_recv_z_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do mm = 1, NGHOST do jj=IYLO,IYHI do ii = IXLO,IXHI grid_array(ii,jj,1-mm,l) = zbuffer_recv(ii,jj,mm 1 ,l) enddo enddo enddo enddo call MPI_Wait(msg_id_send_z_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) C update z-high boundaries c write(6,*) 'ZRecv from ', forward, iprocz-1 call MPI_Irecv(zbuffer_recv, ZBUFFSIZE, MPI_DOUBLE_PRECISION, 1 forward, MSG_XCH_ZLOW_TAG, comm3D, msg_id_recv_z_low, 2 ierr) Call ErrorHandler(ierr,ERROR_RECV) do l=1,nfields,1 do mm = 1, NGHOST do jj=IYLO,IYHI do ii = IXLO,IXHI zbuffer_send(ii,jj,mm,l) = grid_array(ii,jj,mm,l) enddo enddo enddo enddo c write(6,*) 'Zsend to ', behind, iprocz-1 call MPI_Isend(zbuffer_send, ZBUFFSIZE, MPI_DOUBLE_PRECISION, 1 behind, MSG_XCH_ZLOW_TAG, comm3D, msg_id_send_z_low, 2 ierr) Call ErrorHandler(ierr,ERROR_SEND) call MPI_Wait(msg_id_recv_z_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do mm = 1, NGHOST do jj=IYLO,IYHI do ii = IXLO,IXHI grid_array(ii,jj,NZlsize+mm,l) = zbuffer_recv(ii 1 ,jj,mm,l) enddo enddo enddo enddo call MPI_Wait(msg_id_send_z_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) #endif return end c #else c----------------------------------------------------------------------- c subroutine mesh_update_bdry_async(grid_array,nfields) subroutine Exchange(grid_array,nfields) use mesh use mesh_common #ifdef PARALLEL use mpistuff #endif integer:: nfields double precision:: grid_array(IXLO:IXHI,IYLO:IYHI, & nfields) #ifdef PARALLEL double precision:: xbuffer_send(NGHOST,IYLO:IYHI, & IZLO:IZHI,nfields) double precision:: xbuffer_recv(NGHOST,IYLO:IYHI, & IZLO:IZHI,nfields) double precision:: ybuffer_send(IXLO:IXHI,NGHOST, & IZLO:IZHI,nfields) double precision:: ybuffer_recv(IXLO:IXHI,NGHOST, & IZLO:IZHI,nfields) double precision:: zbuffer_send(IXLO:IXHI,IYLO:IYHI, & NGHOST,nfields) double precision:: zbuffer_recv(IXLO:IXHI,IYLO:IYHI, & NGHOST,nfields) integer:: XBUFFSIZE, YBUFFSIZE, ZBUFFSIZE integer:: ii,jj,mm,kk,l,idest integer:: iprocnum integer:: msg_id_send_x_low integer:: msg_id_send_x_hi integer:: msg_id_recv_x_low integer:: msg_id_recv_x_hi integer:: msg_id_send_y_low integer:: msg_id_send_y_hi integer:: msg_id_recv_y_low integer:: msg_id_recv_y_hi integer:: msg_id_send_z_low integer:: msg_id_send_z_hi integer:: msg_id_recv_z_low integer:: msg_id_recv_z_hi integer:: ybufsize, zbufsize c C -------X DIRECTION COMMUNICATION c Update x-low boundaries XBUFFSIZE=nfields*(NGHOST) * (IYHI-IYLO+1)*(IZHI-IZLO+1) YBUFFSIZE = nfields*(IXHI-IXLO+1)*(NGHOST) * (IZHI-IZLO+1) ZBUFFSIZE = nfields*(IXHI-IXLO+1) * (IYHI-IYLO+1)* (NGHOST) if (iprocx .gt. 1) then c isource = iprocnum (iprocx-1, iprocy, iprocz)-1 call MPI_Irecv(xbuffer_recv, XBUFFSIZE, MPI_DOUBLE_PRECISION, 1 left, MSG_XCH_XHI_TAG, comm3D, msg_id_recv_x_hi, ierr 2 ) Call ErrorHandler(ierr,ERROR_RECV) endif if (iprocx .lt. XPROCS) then do l=1,nfields,1 do kk=IZLO,IZHI do jj = IYLO,IYHI do mm = 1, NGHOST xbuffer_send(mm,jj,kk,l) = grid_array(NXlsize+1 1 -mm,jj,kk,l) enddo enddo enddo enddo call MPI_Isend(xbuffer_send, XBUFFSIZE, MPI_DOUBLE_PRECISION, 1 right, MSG_XCH_XHI_TAG, comm3D, msg_id_send_x_hi, ierr) Call ErrorHandler(ierr,ERROR_SEND) endif if (iprocx .gt. 1) then call MPI_Wait(msg_id_recv_x_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do kk=IZLO,IZHI do jj = IYLO,IYHI do mm = 1, NGHOST grid_array(1-mm,jj,kk,l) = xbuffer_recv(mm,jj,kk 1 ,l) enddo enddo enddo enddo endif if (iprocx .lt. XPROCS) then call MPI_Wait(msg_id_send_x_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) endif C update x-high boundaries if (iprocx .lt. XPROCS) then c isource = iprocnum (iprocx+1, iprocy, iprocz)-1 call MPI_Irecv(xbuffer_recv, XBUFFSIZE, MPI_DOUBLE_PRECISION, 1 right, MSG_XCH_XLOW_TAG, comm3D, msg_id_recv_x_low, 2 ierr) Call ErrorHandler(ierr,ERROR_RECV) endif if (iprocx .gt. 1) then do l=1,nfields,1 do kk=IZLO,IZHI do jj = IYLO,IYHI do mm = 1, NGHOST xbuffer_send(mm,jj,kk,l) = grid_array(mm,jj,kk,l) enddo enddo enddo enddo call MPI_Isend(xbuffer_send, XBUFFSIZE, MPI_DOUBLE_PRECISION, 1 left, MSG_XCH_XLOW_TAG, comm3D, msg_id_send_x_low, ierr 2 ) Call ErrorHandler(ierr,ERROR_SEND) endif if (iprocx .lt. XPROCS) then call MPI_Wait(msg_id_recv_x_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do kk=IZLO,IZHI do jj = IYLO,IYHI do mm = 1, NGHOST grid_array(NXlsize+mm,jj,kk,l) = xbuffer_recv(mm 1 ,jj,kk,l) enddo enddo enddo enddo endif if (iprocx .gt. 1) then call MPI_Wait(msg_id_send_x_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) endif C -------Y DIRECTION COMMUNICATION C update y-low boundaries c ybufsize=(ihi-ilo+1)*(IZHI-IZLO+1)*REAL_SIZE*nfields*NGHOST if (iprocy .gt. 1) then call MPI_Irecv(ybuffer_recv, YBUFFSIZE, MPI_DOUBLE_PRECISION 1 , bottom, MSG_XCH_YHI_TAG, comm3D, msg_id_recv_y_hi, 2 ierr) Call ErrorHandler(ierr,ERROR_RECV) endif if (iprocy .lt. YPROCS) then do l=1,nfields,1 do kk=IZLO,IZHI do mm = 1, NGHOST do ii = IXLO,IXHI ybuffer_send(ii,mm,kk,l) = grid_array(ii,NYlsize 1 +1-mm,kk,l) enddo enddo enddo enddo call MPI_Isend(ybuffer_send, YBUFFSIZE, MPI_DOUBLE_PRECISION 1 , top, MSG_XCH_YHI_TAG, comm3D, msg_id_send_y_hi, ierr) Call ErrorHandler(ierr,ERROR_SEND) endif if (iprocy .gt. 1) then call MPI_Wait(msg_id_recv_y_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do kk=IZLO,IZHI do mm = 1, NGHOST do ii = IXLO,IXHI grid_array(ii,1-mm,kk,l) = ybuffer_recv(ii,mm,kk 1 ,l) enddo enddo enddo enddo endif if (iprocy .lt. YPROCS) then call MPI_Wait(msg_id_send_y_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) endif C update y-high boundaries if (iprocy .lt. YPROCS) then call MPI_Irecv(ybuffer_recv, YBUFFSIZE, MPI_DOUBLE_PRECISION, 1 top, MSG_XCH_YLOW_TAG, comm3D, msg_id_recv_y_low, 2 ierr) Call ErrorHandler(ierr,ERROR_RECV) endif if (iprocy .gt. 1) then do l=1,nfields,1 do kk=IZLO,IZHI do mm = 1, NGHOST do ii = IXLO,IXHI ybuffer_send(ii,mm,kk,l) = grid_array(ii,mm,kk,l) enddo enddo enddo enddo call MPI_Isend(ybuffer_send, YBUFFSIZE, MPI_DOUBLE_PRECISION, 1 bottom, MSG_XCH_YLOW_TAG, comm3D, msg_id_send_y_low, 2 ierr) Call ErrorHandler(ierr,ERROR_SEND) endif if (iprocy .lt. YPROCS) then call MPI_Wait(msg_id_recv_y_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do kk=IZLO,IZHI do mm = 1, NGHOST do ii = IXLO,IXHI grid_array(ii,NYlsize+mm,kk,l) = ybuffer_recv(ii 1 ,mm,kk,l) enddo enddo enddo enddo endif if (iprocy .gt. 1) then call MPI_Wait(msg_id_send_y_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) endif C -------Z DIRECTION COMMUNICATION C update z-low boundaries c zbufsize=(ihi-ilo+1)*(IYHI-IYLO+1)*REAL_SIZE*nfields*NGHOST call MPI_Irecv(zbuffer_recv, ZBUFFSIZE, MPI_DOUBLE_PRECISION, 1 behind, MSG_XCH_ZHI_TAG, comm3D, msg_id_recv_z_hi, ierr 2 ) Call ErrorHandler(ierr,ERROR_RECV) do l=1,nfields,1 do mm = 1, NGHOST do jj=IYLO,IYHI do ii = IXLO,IXHI zbuffer_send(ii,jj,mm,l) = grid_array(ii,jj 1 ,NZlsize+1-mm,l) enddo enddo enddo enddo call MPI_Isend(zbuffer_send, ZBUFFSIZE, MPI_DOUBLE_PRECISION, 1 forward, MSG_XCH_ZHI_TAG, comm3D, msg_id_send_z_hi, 2 ierr) Call ErrorHandler(ierr,ERROR_SEND) call MPI_Wait(msg_id_recv_z_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do mm = 1, NGHOST do jj=IYLO,IYHI do ii = IXLO,IXHI grid_array(ii,jj,1-mm,l) = zbuffer_recv(ii,jj,mm 1 ,l) enddo enddo enddo enddo call MPI_Wait(msg_id_send_z_hi, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) C update z-high boundaries call MPI_Irecv(zbuffer_recv, ZBUFFSIZE, MPI_DOUBLE_PRECISION, 1 forward, MSG_XCH_ZLOW_TAG, comm3D, msg_id_recv_z_low, 2 ierr) Call ErrorHandler(ierr,ERROR_RECV) do l=1,nfields,1 do mm = 1, NGHOST do jj=IYLO,IYHI do ii = IXLO,IXHI zbuffer_send(ii,jj,mm,l) = grid_array(ii,jj,mm,l) enddo enddo enddo enddo call MPI_Isend(zbuffer_send, ZBUFFSIZE, MPI_DOUBLE_PRECISION, 1 behind, MSG_XCH_ZLOW_TAG, comm3D, msg_id_send_z_low, 2 ierr) Call ErrorHandler(ierr,ERROR_SEND) call MPI_Wait(msg_id_recv_z_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) do l=1,nfields,1 do mm = 1, NGHOST do jj=IYLO,IYHI do ii = IXLO,IXHI grid_array(ii,jj,NZlsize+mm,l) = zbuffer_recv(ii 1 ,jj,mm,l) enddo enddo enddo enddo call MPI_Wait(msg_id_send_z_low, status, ierr) Call ErrorHandler(ierr,ERROR_WAIT) #endif return end #endif c----------------------------------------------------------------------- subroutine ErrorHandler(mpierr,errortype) use mesh_common #ifdef PARALLEL use mpistuff #endif integer::mpierr,errortype #ifdef PARALLEL if(mpierr.ne.MPI_SUCCESS) then write(0,*) 'FLUID: MPI RETURN VALUE',iproc_idx,mpierr,errortype endif #endif return end c-----------------------------------------------------------------------