ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
determineDonors.F90
Go to the documentation of this file.
1 ! This subroutine takes a list of all local fringes, sorts it,
2 ! then communicates the required fringes back to their donor
3 ! procs, and flags all required cells as donors. This routine
4 ! does double duty: If the flag useWall is set, it flags the
5 ! donor cells as wallDonors, otherwise it flags the cells as
6 ! regular donors. We do this since the communication structure
7 ! for these two operations are identical
8 
9 subroutine determinedonors(level, sps, fringeList, nFringe, useWall)
10 
11  use constants
12  use block, only: fringetype, flowdoms
14  use utils, only: echk, setpointers
18  implicit none
19 
20  ! Input Params
21  integer(kind=intType), intent(in) :: level, sps, nFringe
22  type(fringetype), intent(inout), dimension(nFringe) :: fringeList
23  logical, intent(in) :: useWall
24 
25  ! Working
26  integer(kind=intType), dimension(:), allocatable :: fringeProc, cumFringeProc
27  integer(kind=intType), dimension(:), allocatable :: tmpInt
28  integer(kind=intType), dimension(:), allocatable :: recvSizes
29  integer(kind=intType), dimension(:), allocatable :: intSendBuf, intRecvBuf
30  integer(kind=intType) :: i, j, k, ii, jj, kk, iii, jjj, kkk, nn, index
31  integer(kind=intType) :: il, jl, kl, dIndex
32  integer(kind=intType) :: iStart, iEnd, iProc, iSize, nFringeProc
33  integer(kind=intType) :: sendCount, recvCount, ierr, totalRecvSize
34  integer mpiStatus(MPI_STATUS_SIZE)
35 
36  ! First sort the fringes such that they are grouped by destination
37  ! procesor.
38  call qsortfringetype(fringelist, nfringe, sortbydonor)
39 
40  !-----------------------------------------------------------------
41  ! Step 15: Now can scan through the fringeList which is guaranteed
42  ! to be sorted by processor. We scan though the (sorted) list
43  ! sequentally, detecting where the processor splits are. Then we
44  ! fire that off to the processor that needs it, again using the
45  ! dynamic sparse data exchange. For the section that is
46  ! on-processor, we can do that donor flagging overlapped with the
47  ! communication.
48  ! -----------------------------------------------------------------
49 
50  allocate (fringeproc(nproc), cumfringeproc(1:nproc + 1))
51  call computefringeprocarray(fringelist, nfringe, &
52  fringeproc, cumfringeproc, nfringeproc)
53 
54  ! nFringeProc is the total number of donor processors from
55  ! fringeList fringeProc(1:nProcFringe) are the donor processors
56  ! for the fringes cumFringeProc(1:nFringeProc) are the cumulative
57  ! offset from in the localFringe Array. Note that we have
58  ! over-estimated their size as nProc.
59 
60  allocate (tmpint(0:nproc - 1), recvsizes(0:nproc - 1))
61  tmpint = 0
62  do j = 1, nfringeproc
63  iproc = fringeproc(j)
64  if (iproc /= myid) then
65  tmpint(iproc) = (cumfringeproc(j + 1) - cumfringeproc(j)) * 2
66  end if
67  end do
68 
69  ! Sum how much data we must receive from each processor.
70  call mpi_alltoall(tmpint, 1, adflow_integer, recvsizes, 1, adflow_integer, &
71  adflow_comm_world, ierr)
72  call echk(ierr, __file__, __line__)
73 
74  ! We will need to send 2 integers to the donor processor:
75  ! donorBlock, dIndex
76 
77  ! Allocate space for the sending and receiving buffers
78  totalrecvsize = sum(recvsizes)
79  allocate (intsendbuf(2 * nfringe), intrecvbuf(totalrecvsize))
80 
81  ! Pack the full buffer with donorBlock, dI, dJ, and dK
82  do j = 1, nfringe
83  intsendbuf(2 * j - 1) = fringelist(j)%donorBlock
84  intsendbuf(2 * j) = fringelist(j)%dIndex
85  end do
86 
87  ! Send the donors back to their own processors.
88  sendcount = 0
89  do j = 1, nfringeproc
90 
91  iproc = fringeproc(j)
92  istart = (cumfringeproc(j) - 1) * 2 + 1
93  isize = (cumfringeproc(j + 1) - cumfringeproc(j)) * 2
94 
95  if (iproc /= myid) then
96  sendcount = sendcount + 1
97  call mpi_isend(intsendbuf(istart), isize, adflow_integer, iproc, myid, &
98  adflow_comm_world, sendrequests(sendcount), ierr)
99  call echk(ierr, __file__, __line__)
100  end if
101  end do
102 
103  ! Non-blocking receives
104  recvcount = 0
105  ii = 1
106  do iproc = 0, nproc - 1
107 
108  if (recvsizes(iproc) > 0) then
109  recvcount = recvcount + 1
110  call mpi_irecv(intrecvbuf(ii), recvsizes(iproc), adflow_integer, &
111  iproc, iproc, adflow_comm_world, recvrequests(recvcount), ierr)
112  call echk(ierr, __file__, __line__)
113 
114  ii = ii + recvsizes(iproc)
115  end if
116  end do
117 
118  ! Local Work to do while we wait for data to send/recv
119  do j = 1, nfringeproc
120 
121  iproc = fringeproc(j)
122  istart = cumfringeproc(j)
123  iend = cumfringeproc(j + 1) - 1
124 
125  if (iproc == myid) then
126  do i = istart, iend
127  nn = fringelist(i)%donorBlock
128  il = flowdoms(nn, level, sps)%il
129  jl = flowdoms(nn, level, sps)%jl
130  kl = flowdoms(nn, level, sps)%kl
131  dindex = fringelist(i)%dIndex
132  call unwindindex(dindex, il, jl, kl, iii, jjj, kkk)
133 
134  ! For the wall donors, we just flag the 1 cell that was
135  ! identified in the fringe search based on the octant.
136  if (usewall) then
137  call setiswalldonor(flowdoms(nn, level, sps)%status(iii, jjj, kkk), .true.)
138  else
139  do kk = 0, 1
140  do jj = 0, 1
141  do ii = 0, 1
142  call setisdonor( &
143  flowdoms(nn, level, sps)%status(iii + ii, jjj + jj, kkk + kk), .true.)
144  end do
145  end do
146  end do
147  end if
148  end do
149  end if
150  end do
151 
152  ! Complete all the sends/receives. We could do overlapping here
153  ! like the frist comm for the fringes/blocks.
154  do i = 1, recvcount
155  call mpi_waitany(recvcount, recvrequests, index, mpistatus, ierr)
156  call echk(ierr, __file__, __line__)
157  end do
158 
159  do i = 1, sendcount
160  call mpi_waitany(sendcount, sendrequests, index, mpistatus, ierr)
161  call echk(ierr, __file__, __line__)
162  end do
163 
164  ! Loop over the full receive buffer that should now be full.
165  do j = 1, totalrecvsize / 2
166 
167  nn = intrecvbuf(2 * j - 1)
168  il = flowdoms(nn, level, sps)%il
169  jl = flowdoms(nn, level, sps)%jl
170  kl = flowdoms(nn, level, sps)%kl
171 
172  dindex = intrecvbuf(2 * j)
173  call unwindindex(dindex, il, jl, kl, iii, jjj, kkk)
174 
175  if (usewall) Then
176  call setiswalldonor(flowdoms(nn, level, sps)%status(iii, jjj, kkk), .true.)
177  else
178  do kk = 0, 1
179  do jj = 0, 1
180  do ii = 0, 1
181  call setisdonor(flowdoms(nn, level, sps)%status(iii + ii, jjj + jj, kkk + kk), .true.)
182  end do
183  end do
184  end do
185  end if
186  end do
187  ! Finished with the buffers and allocatable arrays
188  deallocate (intsendbuf, intrecvbuf, fringeproc, cumfringeproc, tmpint, recvsizes)
189 
190 end subroutine determinedonors
subroutine determinedonors(level, sps, fringeList, nFringe, useWall)
Definition: block.F90:1
type(blocktype), dimension(:, :, :), allocatable, target flowdoms
Definition: block.F90:771
integer, dimension(:), allocatable recvrequests
integer, dimension(:), allocatable sendrequests
integer adflow_comm_world
integer(kind=inttype), parameter sortbydonor
Definition: constants.F90:324
integer(kind=inttype) nclusters
Definition: overset.F90:366
integer(kind=inttype) ndomtotal
Definition: overset.F90:365
integer(kind=inttype), dimension(:), allocatable clusters
Definition: overset.F90:367
subroutine setisdonor(i, flag)
subroutine unwindindex(index, il, jl, kl, i, j, k)
subroutine qsortfringetype(arr, nn, sortType)
subroutine computefringeprocarray(fringes, n, fringeProc, cumFringeProc, nFringeProc)
subroutine setiswalldonor(i, flag)
Definition: utils.F90:1
subroutine echk(errorcode, file, line)
Definition: utils.F90:5722
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237