ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
oversetCommUtilites.F90
Go to the documentation of this file.
2 
3 contains
4 
5  subroutine getcommpattern(oMat, sendList, nSend, recvList, nRecv)
6 
7  use constants
9  use blockpointers, only: ndom
10  use communication, only: nproc, myid
11  use sorting, only: unique
12  implicit none
13 
14  ! Input/output
15  type(csrmatrix), intent(in) :: oMat
16  integer(kind=intType), intent(out) :: sendList(:, :), recvList(:, :)
17  integer(kind=intType), intent(out) :: nSend, nRecv
18 
19  ! Working:
20  integer(kind=intType) :: nn, iDom, nnRow, i, jj, ii, nUniqueProc, iProc
21  integer(kind=intType), dimension(:), allocatable :: procsForThisRow, inverse, blkProc
22  logical :: added
23 
24  ! Generic routine to determine what I need to send/recv based on the
25  ! data provided in the overlap matrix. The '2' in the send and
26  ! receive lists will record the processor and the global 'idom'
27  ! index, which is suffient to use for the subsequent communication
28  ! structure.
29 
30  nsend = 0
31  nrecv = 0
32 
33  ! These variables are used to compact the sending of
34  ! blocks/fringes. The logic is as follows: A a different
35  ! processor may need a block/fringe for more than 1 search. This
36  ! is manifested by having two or more entries is the rows (or
37  ! columns) I own. It would be inefficient to send the same data
38  ! to the same processor more than once, so we "uniquify" the
39  ! processors before we send. There is also another salient
40  ! reason: If we were to send the same data twice, and the other
41  ! processor started using the data, we could get a race condition
42  ! as it was modified the received fringes (during a search) while
43  ! the same fringes were being overwritten by the receive operation.
44 
45  allocate (procsforthisrow(ndomtotal), inverse(ndomtotal), blkproc(ndomtotal))
46 
47  ii = 0
48  do iproc = 0, nproc - 1
49  do i = 1, ndomproc(iproc)
50  ii = ii + 1
51  blkproc(ii) = iproc
52  end do
53  end do
54 
55  ! Loop over the owned rows of the normal matrix
56  do nn = 1, ndom
57  idom = cumdomproc(myid) + nn
58  nnrow = omat%rowPtr(idom + 1) - omat%rowPtr(idom)
59  procsforthisrow(1:nnrow) = omat%assignedProc(omat%rowPtr(idom):omat%rowPtr(idom + 1) - 1)
60  call unique(procsforthisrow, nnrow, nuniqueproc, inverse)
61 
62  do jj = 1, nuniqueproc
63  if (procsforthisrow(jj) /= myid) then
64  ! This intersection requires a row quantity from me
65  nsend = nsend + 1
66  sendlist(1, nsend) = procsforthisrow(jj)
67  sendlist(2, nsend) = idom
68  end if
69  end do
70  end do
71 
72  ! Now we loop back through the whole matrix looking at what I have
73  ! to do. If there is a row I don't own, I will have to receive it:
74 
75  do idom = 1, omat%nRow
76  added = .false.
77  rowloop: do jj = omat%rowPtr(idom), omat%rowPtr(idom + 1) - 1
78 
79  ! I have to do this intersection
80  if (omat%assignedProc(jj) == myid) then
81 
82  ! But I don't know the row entry
83  if (.not. (idom > cumdomproc(myid) .and. idom <= cumdomproc(myid + 1))) then
84 
85  ! Need to back out what proc the iDom correponds to:
86  nrecv = nrecv + 1
87  recvlist(1, nrecv) = blkproc(idom)
88  recvlist(2, nrecv) = idom
89  added = .true.
90  end if
91  end if
92 
93  ! Just move on to the next row since we only need to receive it once.
94  if (added) then
95  exit rowloop
96  end if
97  end do rowloop
98  end do
99 
100  deallocate (procsforthisrow, inverse, blkproc)
101  end subroutine getcommpattern
102 
103  subroutine getosurfcommpattern(oMat, oMatT, sendList, nSend, &
104  recvList, nRecv, rBufSize)
105 
106  ! This subroutine get the the comm pattern to send the oWall types.
107  use constants
108  use blockpointers, only: ndom
110  use communication, only: myid, nproc
111  use sorting, only: unique
112  implicit none
113 
114  ! Input/output
115  type(csrmatrix), intent(in) :: oMat, oMatT
116  integer(kind=intType), intent(out) :: sendList(:, :), recvList(:, :)
117  integer(kind=intType), intent(out) :: nSend, nRecv
118  integer(kind=intType), intent(in) :: rBufSize(nDomTotal)
119 
120  ! Working:
121  integer(kind=intType) :: nn, iDom, jDom, nnRow, nnRowT, i, jj, ii, nUniqueProc, iProc
122  integer(kind=intType), dimension(:), allocatable :: blkProc, toRecv
123  integer(kind=intType), dimension(:), allocatable :: procsForThisRow, inverse
124 
125  nsend = 0
126  nrecv = 0
127 
128  allocate (procsforthisrow(2 * ndomtotal), inverse(2 * ndomtotal), blkproc(ndomtotal))
129 
130  ii = 0
131  do iproc = 0, nproc - 1
132  do i = 1, ndomproc(iproc)
133  ii = ii + 1
134  blkproc(ii) = iproc
135  end do
136  end do
137 
138  ! Loop over the owned rows of the regular matrix and the rows
139  ! transposed matrix (ie columns of the regular matrix)
140  do nn = 1, ndom
141 
142  idom = cumdomproc(myid) + nn
143 
144  ! Only deal with this block if the rbuffer size for the oWall is
145  ! greater than zero. If it is zero, it is empty we don't need to
146  ! deal with it.
147  if (rbufsize(idom) > 0) then
148 
149  nnrow = omat%rowPtr(idom + 1) - omat%rowPtr(idom)
150  procsforthisrow(1:nnrow) = omat%assignedProc(omat%rowPtr(idom):omat%rowPtr(idom + 1) - 1)
151 
152  nnrowt = omatt%rowPtr(idom + 1) - omatt%rowPtr(idom)
153  procsforthisrow(nnrow + 1:nnrow + nnrowt) = &
154  omatt%assignedProc(omatt%rowPtr(idom):omatt%rowPtr(idom + 1) - 1)
155 
156  call unique(procsforthisrow, nnrow + nnrowt, nuniqueproc, inverse)
157 
158  do jj = 1, nuniqueproc
159  if (procsforthisrow(jj) /= myid) then
160  ! This intersection requires a row quantity from me
161  nsend = nsend + 1
162  sendlist(1, nsend) = procsforthisrow(jj)
163  sendlist(2, nsend) = idom
164  end if
165  end do
166  end if
167  end do
168 
169  ! Now we loop back through the whole matrix looking at what I have
170  ! to do. If there is a row or column I don't own, I will have to receive it:
171  allocate (torecv(ndomtotal))
172  torecv = 0
173  do idom = 1, ndomtotal
174  do jj = omat%rowPtr(idom), omat%rowPtr(idom + 1) - 1
175  jdom = omat%colInd(jj)
176  if (omat%assignedProc(jj) == myid) then
177  ! I don't have the row entry:
178  if (.not. (idom > cumdomproc(myid) .and. idom <= cumdomproc(myid + 1))) then
179  torecv(idom) = 1
180  end if
181  ! Don't have the column entry:
182  if (.not. (jdom > cumdomproc(myid) .and. jdom <= cumdomproc(myid + 1))) then
183  torecv(jdom) = 1
184  end if
185  end if
186  end do
187  end do
188 
189  ! Now loop back through and set my recvList. Only add if the
190  ! rBufferSize is larger than zero.
191  do idom = 1, ndomtotal
192  if (torecv(idom) == 1 .and. rbufsize(idom) > 0) then
193  nrecv = nrecv + 1
194  recvlist(1, nrecv) = blkproc(idom)
195  recvlist(2, nrecv) = idom
196  end if
197  end do
198 
199  deallocate (procsforthisrow, inverse, blkproc, torecv)
200  end subroutine getosurfcommpattern
201 
202  subroutine sendoblock(oBlock, iDom, iProc, tagOffset, sendCount)
203 
204  use constants
206  use oversetdata, only: oversetblock
207  use utils, only: echk
208  implicit none
209 
210  ! Input/Output
211  type(oversetblock), intent(inout) :: oBlock
212  integer(kind=intType), intent(in) :: iProc, iDom, tagOffset
213  integer(kind=intType), intent(inout) :: sendCount
214 
215  ! Working
216  integer(kind=intType) :: tag, ierr
217 
218  tag = tagoffset + idom
219  sendcount = sendcount + 1
220  call mpi_isend(oblock%rBuffer, size(oblock%rbuffer), adflow_real, &
221  iproc, tag, adflow_comm_world, sendrequests(sendcount), ierr)
222  call echk(ierr, __file__, __line__)
223 
224  sendcount = sendcount + 1
225  call mpi_isend(oblock%iBuffer, size(oblock%iBuffer), adflow_integer, &
226  iproc, tag, adflow_comm_world, sendrequests(sendcount), ierr)
227  call echk(ierr, __file__, __line__)
228 
229  end subroutine sendoblock
230 
231  subroutine sendofringe(oFringe, iDom, iProc, tagOffset, sendCount)
232 
233  use constants
235  use oversetdata, only: oversetfringe
236  use utils, only: echk
237  implicit none
238 
239  ! Input/Output
240  type(oversetfringe), intent(inout) :: oFringe
241  integer(kind=intType), intent(in) :: iProc, iDom, tagOffset
242  integer(kind=intType), intent(inout) :: sendCount
243 
244  ! Working
245  integer(kind=intType) :: tag, ierr
246 
247  tag = idom + tagoffset
248  sendcount = sendcount + 1
249  call mpi_isend(ofringe%rBuffer, size(ofringe%rbuffer), adflow_real, &
250  iproc, tag, adflow_comm_world, sendrequests(sendcount), ierr)
251  call echk(ierr, __file__, __line__)
252 
253  sendcount = sendcount + 1
254  call mpi_isend(ofringe%iBuffer, size(ofringe%iBuffer), adflow_integer, &
255  iproc, tag, adflow_comm_world, sendrequests(sendcount), ierr)
256  call echk(ierr, __file__, __line__)
257 
258  end subroutine sendofringe
259 
260  subroutine sendosurf(oWall, iDom, iProc, tagOffset, sendCount)
261 
262  use constants
264  use oversetdata, only: oversetwall
265  use utils, only: echk
266  implicit none
267 
268  ! Input/Output
269  type(oversetwall), intent(inout) :: oWall
270  integer(kind=intType), intent(in) :: iProc, iDom, tagOffset
271  integer(kind=intType), intent(inout) :: sendCount
272 
273  ! Working
274  integer(kind=intType) :: tag, ierr
275 
276  tag = idom + tagoffset
277  sendcount = sendcount + 1
278  call mpi_isend(owall%rBuffer, size(owall%rbuffer), adflow_real, &
279  iproc, tag, adflow_comm_world, sendrequests(sendcount), ierr)
280  call echk(ierr, __file__, __line__)
281 
282  sendcount = sendcount + 1
283  call mpi_isend(owall%iBuffer, size(owall%iBuffer), adflow_integer, &
284  iproc, tag, adflow_comm_world, sendrequests(sendcount), ierr)
285  call echk(ierr, __file__, __line__)
286 
287  end subroutine sendosurf
288 
289  subroutine recvoblock(oBlock, iDom, iProc, tagOffset, iSize, rSize, &
290  recvCount, recvInfo)
291 
292  use constants
294  use oversetdata, only: oversetblock
295  use utils, only: echk
296  implicit none
297 
298  ! Input/Output
299  type(oversetblock), intent(inout) :: oBlock
300  integer(kind=intType), intent(in) :: iDom, iProc, tagOffset, rSize, iSize
301  integer(kind=intType), intent(inout) :: recvCount
302  integer(kind=intType), intent(inout) :: recvInfo(2, recvCount + 2)
303 
304  ! Working
305  integer(kind=intType) :: tag, ierr
306 
307  tag = tagoffset + idom
308  allocate (oblock%rBuffer(rsize), oblock%iBuffer(isize))
309 
310  recvcount = recvcount + 1
311  call mpi_irecv(oblock%rBuffer, rsize, adflow_real, &
312  iproc, tag, adflow_comm_world, recvrequests(recvcount), ierr)
313  call echk(ierr, __file__, __line__)
314  recvinfo(:, recvcount) = (/idom, 1/)
315 
316  recvcount = recvcount + 1
317  call mpi_irecv(oblock%iBuffer, isize, adflow_integer, &
318  iproc, tag, adflow_comm_world, recvrequests(recvcount), ierr)
319  call echk(ierr, __file__, __line__)
320  recvinfo(:, recvcount) = (/idom, 2/)
321 
322  end subroutine recvoblock
323 
324  subroutine recvofringe(oFringe, iDom, iProc, tagOffset, iSize, rSize, &
325  recvCount, recvInfo)
326 
327  use constants
329  use oversetdata, only: oversetfringe
330  use utils, only: echk
331  implicit none
332 
333  ! Input/Output
334  type(oversetfringe), intent(inout) :: oFringe
335  integer(kind=intType), intent(in) :: iDom, iProc, tagOffset, rSize, iSize
336  integer(kind=intType), intent(inout) :: recvCount
337  integer(kind=intType), intent(inout) :: recvInfo(2, recvCount + 2)
338 
339  ! Working
340  integer(kind=intType) :: tag, ierr
341 
342  tag = tagoffset + idom
343  allocate (ofringe%rBuffer(rsize), ofringe%iBuffer(isize))
344 
345  recvcount = recvcount + 1
346  call mpi_irecv(ofringe%rBuffer, rsize, adflow_real, &
347  iproc, tag, adflow_comm_world, recvrequests(recvcount), ierr)
348  call echk(ierr, __file__, __line__)
349  recvinfo(:, recvcount) = (/idom, 3/)
350 
351  recvcount = recvcount + 1
352  call mpi_irecv(ofringe%iBuffer, isize, adflow_integer, &
353  iproc, tag, adflow_comm_world, recvrequests(recvcount), ierr)
354  call echk(ierr, __file__, __line__)
355  recvinfo(:, recvcount) = (/idom, 4/)
356 
357  end subroutine recvofringe
358 
359  subroutine recvosurf(oWall, iDom, iProc, tagOffset, iSize, rSize, &
360  recvCount, recvInfo)
361 
362  use constants
364  use oversetdata, only: oversetwall
365  use utils, only: echk
366  implicit none
367 
368  ! Input/Output
369  type(oversetwall), intent(inout) :: oWall
370  integer(kind=intType), intent(in) :: iDom, iProc, tagOffset, rSize, iSize
371  integer(kind=intType), intent(inout) :: recvCount
372  integer(kind=intType), intent(inout) :: recvInfo(2, recvCount + 2)
373 
374  ! Working
375  integer(kind=intType) :: tag, ierr
376 
377  tag = tagoffset + idom
378  allocate (owall%rBuffer(rsize), owall%iBuffer(isize))
379 
380  recvcount = recvcount + 1
381  call mpi_irecv(owall%rBuffer, rsize, adflow_real, &
382  iproc, tag, adflow_comm_world, recvrequests(recvcount), ierr)
383  call echk(ierr, __file__, __line__)
384  recvinfo(:, recvcount) = (/idom, 5/)
385 
386  recvcount = recvcount + 1
387 
388  call mpi_irecv(owall%iBuffer, isize, adflow_integer, &
389  iproc, tag, adflow_comm_world, recvrequests(recvcount), ierr)
390  call echk(ierr, __file__, __line__)
391  recvinfo(:, recvcount) = (/idom, 6/)
392 
393  end subroutine recvosurf
394 
395  subroutine getfringereturnsizes(oFringeSendList, oFringeRecvList, &
396  nOFringeSend, nOfringeRecv, oFringes, &
397  fringeRecvSizes, cumFringeRecv)
398 
399  ! For this data exchange we use the exact *reverse* of fringe
400  ! communication pattern. This communiation simply determines the
401  ! number of fringes that must be returned to the owning process.
402 
403  use constants
405  use utils, only: echk
406  use oversetdata, onlY: oversetfringe
407  implicit none
408 
409  ! Input/output
410  type(oversetfringe), dimension(:) :: oFringes
411  integer(kind=intType), dimension(:, :) :: oFringeSendList, oFringeRecvList
412  integer(kind=intType), dimension(:), allocatable :: cumFringeRecv, fringeRecvSizes
413  integer(kind=intType) :: nOFringeSend, nOfringeRecv
414  ! Working
415  integer(kind=intType) :: sendCount, recvCount
416  integer(kind=intType) :: iDom, iProc, jj, ierr, index, i
417  integer mpiStatus(MPI_STATUS_SIZE)
418 
419  ! Post all the fringe iSends
420  sendcount = 0
421  do jj = 1, nofringerecv
422 
423  iproc = ofringerecvlist(1, jj)
424  idom = ofringerecvlist(2, jj)
425  sendcount = sendcount + 1
426  call mpi_isend(ofringes(idom)%fringeReturnSize, 1, adflow_integer, &
427  iproc, idom, adflow_comm_world, sendrequests(sendcount), ierr)
428  call echk(ierr, __file__, __line__)
429  end do
430 
431  allocate (fringerecvsizes(nofringesend))
432 
433  ! Non-blocking receives
434  recvcount = 0
435  do jj = 1, nofringesend
436 
437  iproc = ofringesendlist(1, jj)
438  idom = ofringesendlist(2, jj)
439  recvcount = recvcount + 1
440 
441  call mpi_irecv(fringerecvsizes(jj), 1, adflow_integer, &
442  iproc, idom, adflow_comm_world, recvrequests(recvcount), ierr)
443  call echk(ierr, __file__, __line__)
444  end do
445 
446  ! Last thing to do wait for all the sends and receives to finish
447  do i = 1, sendcount
448  call mpi_waitany(sendcount, sendrequests, index, mpistatus, ierr)
449  call echk(ierr, __file__, __line__)
450  end do
451 
452  do i = 1, recvcount
453  call mpi_waitany(recvcount, recvrequests, index, mpistatus, ierr)
454  call echk(ierr, __file__, __line__)
455  end do
456 
457  ! Compute the cumulative form of the fringeRecvSizes
458 
459  allocate (cumfringerecv(1:nofringesend + 1))
460  cumfringerecv(1) = 1
461  do jj = 1, nofringesend ! These are the fringes we *sent*
462  ! originally, now are going to receive them
463  ! back
464  cumfringerecv(jj + 1) = cumfringerecv(jj) + fringerecvsizes(jj)
465  end do
466 
467  end subroutine getfringereturnsizes
468 
469  !
470  ! oversetLoadBalance determine the deistributation of donor and
471  ! receiver blocks that will result in approximate even load
472  ! balancing. The sparse matrix structrue of the overla is
473  ! provided. This computation runs on all processors.
474 
475  subroutine oversetloadbalance(overlap)
476 
477  use constants
478  use communication, only: nproc
479  use oversetdata, only: csrmatrix
480  implicit none
481 
482  ! Input/Output
483  type(csrmatrix), intent(inout) :: overlap
484 
485  ! Working paramters
486  integer(kind=intType) :: curRow, jj, jj1, iProc, iRow
487  real(kind=realtype) :: evencost, potentialsum, targetcost
488  real(Kind=realtype) :: totalsearch, totalbuild
489 
490  real(kind=realtype), dimension(0:nProc - 1) :: proccosts
491  real(kind=realtype), dimension(0:nProc) :: cumproccosts
492  real(kind=realtype), dimension(overlap%nRow) :: buildcost
493  real(kind=realtype), parameter :: tol = 0.1_realtype
494  ! real(kind=realType), parameter :: K=10_realType
495  logical, dimension(overlap%nnz) :: blockTaken
496  logical :: increment
497 
498  ! Pointers to make code a litte easier to read
499  integer(kind=intType), pointer, dimension(:) :: rowPtr, assignedProc
500  real(kind=realtype), pointer, dimension(:) :: data
501 
502  ! Set the couple of pointers
503  rowptr => overlap%rowPtr
504  assignedproc => overlap%assignedProc
505  data => overlap%data
506 
507  ! Determine the total search cost:
508  totalsearch = sum(overlap%data)
509 
510  ! Target amount of work for each processor
511  evencost = totalsearch / nproc
512 
513  ! Initialize the taken processor to False
514  blocktaken = .false.
515 
516  ! Initialzie assignedProc to -1 since there could be entries we can
517  ! ignore.
518  assignedproc(:) = -1
519  proccosts = zero
520  cumproccosts(0) = zero
521 
522  ! Initialize the starting point
523  jj = 1
524  iproc = 0
525 
526  ! Find the first row with non-zeros
527  currow = 1
528  do while (rowptr(currow + 1) - rowptr(currow) == 0)
529  currow = currow + 1
530  end do
531 
532  masterloop: do while (currow <= overlap%nRow .and. iproc <= nproc)
533 
534  ! Normally we increment
535  increment = .true.
536 
537  ! This is our current target cost.
538  targetcost = evencost * (iproc + 1)
539 
540  ! It is still possible that data(jj) is zero. That's ok...we'll
541  ! explictly ignore them.
542  if (data(jj) /= zero .and. .not. (blocktaken(jj))) then
543 
544  if (proccosts(iproc) == 0 .or. iproc == nproc - 1) then
545  ! Must be added
546  proccosts(iproc) = proccosts(iproc) + data(jj)
547  blocktaken(jj) = .true.
548  assignedproc(jj) = iproc
549 
550  else
551 
552  ! There is already something in there. See what the
553  ! potential sum will be:
554  potentialsum = cumproccosts(iproc) + proccosts(iproc) + data(jj)
555 
556  if (potentialsum < targetcost - tol * evencost) then
557  ! We are not close to our limit yet so just add it normally
558  proccosts(iproc) = proccosts(iproc) + data(jj)
559  blocktaken(jj) = .true.
560  assignedproc(jj) = iproc
561 
562  else if (potentialsum >= targetcost - tol * evencost .and. &
563  potentialsum <= targetcost + tol * evencost) then
564 
565  ! This one looks perfect. Call it a day...add it and
566  ! move on to the next proc
567 
568  proccosts(iproc) = proccosts(iproc) + data(jj)
569  blocktaken(jj) = .true.
570  assignedproc(jj) = iproc
571 
572  ! Processor can be incremented
573  cumproccosts(iproc + 1) = cumproccosts(iproc) + proccosts(iproc)
574  iproc = iproc + 1
575  else
576  ! This means potentialSum > targetCost + tol*evenCost
577 
578  ! This is somewhat bad news...this may be *horrendly*
579  ! load balanced. The algorithm dictates we *MUST*
580  ! finish this proc no matter what before we go back to
581  ! the outer loop. Essentially we know jj is bad,
582  ! instead scan over the rest of the row and see if we
583  ! can add something else that is decent.
584  increment = .false.
585 
586  restofrow: do jj1 = jj + 1, rowptr(currow + 1) - 1
587 
588  potentialsum = cumproccosts(iproc) + proccosts(iproc) + data(jj1)
589 
590  if (data(jj1) /= zero .and. .not. (blocktaken(jj1))) then
591 
592  if (potentialsum < targetcost - tol * evencost) then
593  !Huh...that one fit in without going
594  ! over....add it and kep going in the loop
595 
596  proccosts(iproc) = proccosts(iproc) + data(jj1)
597  blocktaken(jj1) = .true.
598  assignedproc(jj1) = iproc
599 
600  else if (potentialsum >= targetcost - tol * evencost .and. &
601  potentialsum <= targetcost + tol * evencost) then
602 
603  ! This one fit in perfectly.
604  proccosts(iproc) = proccosts(iproc) + data(jj1)
605  blocktaken(jj1) = .true.
606  assignedproc(jj1) = iproc
607 
608  ! No need to keep going
609  exit restofrow
610 
611  end if
612  end if
613  end do restofrow
614 
615  ! Well, the loop finished, we may or may not have
616  ! added something. If so great...if not, oh well. We
617  ! just keep going to the next proc. That's the greedy
618  ! algorithm for you.
619 
620  ! Processor can be incremented
621  cumproccosts(iproc + 1) = cumproccosts(iproc) + proccosts(iproc)
622  iproc = iproc + 1
623  end if
624  end if
625  end if
626 
627  ! Move 1 in jj, until we reach the end and wrap around.
628  if (increment) then
629  jj = jj + 1
630 
631  ! Switch to the next row:
632  if (jj == rowptr(currow + 1)) then
633 
634  ! This is really tricky...we know we're at the end of the
635  ! row, but we have to SKIP OVER THE EMPTY rows, or else the
636  ! algorithm will crap out. Keep incrementing the curRow
637  ! until we get a row with something in it. Make sure we
638  ! don't go out the end, so check again nRow
639 
640  findnextnonzerorow: do while (jj == rowptr(currow + 1))
641  currow = currow + 1
642  if (currow > overlap%nRow) then
643  exit findnextnonzerorow
644  end if
645  end do findnextnonzerorow
646  end if
647  end if
648  end do masterloop
649 
650  end subroutine oversetloadbalance
651 
652  subroutine exchangefringes(level, sps, commPattern, internal)
653  !
654  ! ExchangeFringes exchanges the donorInformation of the fringes:
655  ! donorProc, donorBlock, dIndex and donorFrac. It does this
656  ! the 1:1 halos for the given level and spectral instance. Since
657  ! we have real values and integer values we will do all the ints
658  ! first and then the reals.
659  !
660  use constants
661  use block, only: fringetype
662  use blockpointers, only: flowdoms
666  implicit none
667  !
668  ! Subroutine arguments.
669  !
670  integer(kind=intType), intent(in) :: level, sps
671 
672  type(commtype), dimension(*), intent(in) :: commPattern
673  type(internalcommtype), dimension(*), intent(in) :: internal
674  !
675  ! Local variables.
676  !
677  integer :: size, procId, ierr, index
678  integer, dimension(mpi_status_size) :: mpiStatus
679 
680  integer(kind=intType) :: i, j, ii, jj, nVar, iFringe, jFringe
681  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
682  integer(kind=intType) :: il, jl, kl, myIndex
683  type(fringetype) :: fringe
684  integer(kind=intType), dimension(:), allocatable :: sendBufInt
685  integer(kind=intType), dimension(:), allocatable :: recvBufInt
686 
687  ! Allocate the memory for the sending and receiving buffers.
688  nvar = 3
689  ii = commpattern(level)%nProcSend
690  ii = commpattern(level)%nsendCum(ii)
691  jj = commpattern(level)%nProcRecv
692  jj = commpattern(level)%nrecvCum(jj)
693 
694  allocate (sendbufint(ii * nvar), recvbufint(jj * nvar), stat=ierr)
695 
696  ! Send the variables. The data is first copied into
697  ! the send buffer after which the buffer is sent asap.
698 
699  ii = 1
700  intsends: do i = 1, commpattern(level)%nProcSend
701 
702  ! Store the processor id and the size of the message
703  ! a bit easier.
704 
705  procid = commpattern(level)%sendProc(i)
706  size = nvar * commpattern(level)%nsend(i)
707 
708  ! Copy the data in the correct part of the send buffer.
709 
710  jj = ii
711  do j = 1, commpattern(level)%nsend(i)
712 
713  ! Store the block id and the indices of the donor
714  ! a bit easier.
715 
716  d1 = commpattern(level)%sendList(i)%block(j)
717  i1 = commpattern(level)%sendList(i)%indices(j, 1)
718  j1 = commpattern(level)%sendList(i)%indices(j, 2)
719  k1 = commpattern(level)%sendList(i)%indices(j, 3)
720 
721  ! Copy integer values to buffer
722  ifringe = flowdoms(d1, level, sps)%fringePtr(1, i1, j1, k1)
723  if (ifringe > 0) then
724  sendbufint(jj) = flowdoms(d1, level, sps)%fringes(ifringe)%donorProc
725  sendbufint(jj + 1) = flowdoms(d1, level, sps)%fringes(ifringe)%donorBlock
726  sendbufint(jj + 2) = flowdoms(d1, level, sps)%fringes(ifringe)%dIndex
727  else
728  sendbufint(jj) = -1
729  sendbufint(jj + 1) = 0
730  sendbufint(jj + 2) = 0
731  end if
732 
733  jj = jj + nvar
734 
735  end do
736 
737  ! Send the data.
738 
739  call mpi_isend(sendbufint(ii), size, adflow_integer, procid, &
740  procid, adflow_comm_world, sendrequests(i), &
741  ierr)
742 
743  ! Set ii to jj for the next processor.
744 
745  ii = jj
746 
747  end do intsends
748 
749  ! Post the nonblocking receives.
750 
751  ii = 1
752  intreceives: do i = 1, commpattern(level)%nProcRecv
753 
754  ! Store the processor id and the size of the message
755  ! a bit easier.
756 
757  procid = commpattern(level)%recvProc(i)
758  size = nvar * commpattern(level)%nrecv(i)
759 
760  ! Post the receive.
761 
762  call mpi_irecv(recvbufint(ii), size, adflow_integer, procid, &
764 
765  ! And update ii.
766 
767  ii = ii + size
768 
769  end do intreceives
770 
771  ! Copy the local data.
772 
773  intlocalcopy: do i = 1, internal(level)%ncopy
774 
775  ! Store the block and the indices of the donor a bit easier.
776 
777  d1 = internal(level)%donorBlock(i)
778  i1 = internal(level)%donorIndices(i, 1)
779  j1 = internal(level)%donorIndices(i, 2)
780  k1 = internal(level)%donorIndices(i, 3)
781 
782  ! Idem for the halo's.
783 
784  d2 = internal(level)%haloBlock(i)
785  i2 = internal(level)%haloIndices(i, 1)
786  j2 = internal(level)%haloIndices(i, 2)
787  k2 = internal(level)%haloIndices(i, 3)
788 
789  ifringe = flowdoms(d1, level, sps)%fringePtr(1, i1, j1, k1)
790  if (ifringe > 0) then
791  ! The sender has an actual fringe. Nowe check if the
792  ! receiver has somewhere already to put it:
793 
794  jfringe = flowdoms(d2, level, sps)%fringePtr(1, i2, j2, k2)
795 
796  ! Setup the new fringe:
797  fringe%myBlock = d2
798 
799  il = flowdoms(d2, level, sps)%il
800  jl = flowdoms(d2, level, sps)%jl
801  kl = flowdoms(d2, level, sps)%kl
802  fringe%myIndex = windindex(i2, j2, k2, il, jl, kl)
803 
804  fringe%donorProc = flowdoms(d1, level, sps)%fringes(ifringe)%donorProc
805  fringe%donorBlock = flowdoms(d1, level, sps)%fringes(ifringe)%donorBlock
806  fringe%dIndex = flowdoms(d1, level, sps)%fringes(ifringe)%dIndex
807  fringe%donorFrac = flowdoms(d1, level, sps)%fringes(ifringe)%donorFrac
808 
809  if (jfringe > 0) then
810  ! Just copy the fringe into the slot. No need to update
811  ! the pointer since it is already correct.
812  flowdoms(d2, level, sps)%fringes(jfringe) = fringe
813  else
814 
815  ! There is no slot available yet. Tack the fringe onto
816  ! the end of the d2 fringe list and set the pointers
817  ! accordingly.
818 
819  call addtofringelist(flowdoms(d2, level, sps)%fringes, &
820  flowdoms(d2, level, sps)%nDonors, fringe)
821 
822  ! Note that all three values (pointer, start and end) are
823  ! all the same.
824  flowdoms(d2, level, sps)%fringePtr(:, i2, j2, k2) = &
825  flowdoms(d2, level, sps)%nDonors
826  end if
827  else
828  ! The donor isn't a receiver so make sure the halo isn't
829  ! either. Just set the fringePtr to 0
830 
831  flowdoms(d2, level, sps)%fringePtr(1, i2, j2, k2) = 0
832  end if
833 
834  end do intlocalcopy
835 
836  ! Complete the nonblocking receives in an arbitrary sequence and
837  ! copy the variables from the buffer into the halo's.
838 
839  size = commpattern(level)%nProcRecv
840  intcompleterecvs: do i = 1, commpattern(level)%nProcRecv
841 
842  ! Complete any of the requests.
843 
844  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
845 
846  ! Copy the data just arrived in the halo's.
847 
848  ii = index
849  jj = nvar * commpattern(level)%nrecvCum(ii - 1)
850  do j = 1, commpattern(level)%nrecv(ii)
851 
852  ! Store the block and the indices of the halo a bit easier.
853 
854  d2 = commpattern(level)%recvList(ii)%block(j)
855  i2 = commpattern(level)%recvList(ii)%indices(j, 1)
856  j2 = commpattern(level)%recvList(ii)%indices(j, 2)
857  k2 = commpattern(level)%recvList(ii)%indices(j, 3)
858 
859  fringe%myBlock = d2
860  ! Recompute my Index:
861  il = flowdoms(d2, level, sps)%il
862  jl = flowdoms(d2, level, sps)%jl
863  kl = flowdoms(d2, level, sps)%kl
864  fringe%myIndex = windindex(i2, j2, k2, il, jl, kl)
865 
866  fringe%donorProc = recvbufint(jj + 1)
867  fringe%donorBlock = recvbufint(jj + 2)
868  fringe%dIndex = recvbufint(jj + 3)
869 
870  ifringe = flowdoms(d2, level, sps)%fringePtr(1, i2, j2, k2)
871  if (ifringe > 0) then
872  ! We have somehwere to to put the data already:
873  flowdoms(d2, level, sps)%fringes(ifringe) = fringe
874  else
875  ! We don't somehwhere to put the fringe to add to the list:
876  call addtofringelist(flowdoms(d2, level, sps)%fringes, &
877  flowdoms(d2, level, sps)%nDonors, fringe)
878 
879  ! Note that all three values (pointer, start and end) are
880  ! all the same.
881  flowdoms(d2, level, sps)%fringePtr(:, i2, j2, k2) = &
882  flowdoms(d2, level, sps)%nDonors
883  end if
884  jj = jj + nvar
885  end do
886 
887  end do intcompleterecvs
888 
889  ! Complete the nonblocking sends.
890 
891  size = commpattern(level)%nProcSend
892  do i = 1, commpattern(level)%nProcSend
893  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
894  end do
895 
896  ! Done with the integer memory.
897 
898  deallocate (sendbufint, recvbufint)
899 
900  ! Now do the real exchange. We can use the regular real buffers here
901  ! since they are large enough
902 
903  ! ================================================================================
904 
905  ! Allocate the memory for the sending and receiving buffers.
906  nvar = 3
907 
908  ! Send the variables. The data is first copied into
909  ! the send buffer after which the buffer is sent asap.
910 
911  ii = 1
912  sends: do i = 1, commpattern(level)%nProcSend
913 
914  ! Store the processor id and the size of the message
915  ! a bit easier.
916 
917  procid = commpattern(level)%sendProc(i)
918  size = nvar * commpattern(level)%nsend(i)
919 
920  ! Copy the data in the correct part of the send buffer.
921 
922  jj = ii
923  do j = 1, commpattern(level)%nsend(i)
924 
925  ! Store the block id and the indices of the donor
926  ! a bit easier.
927 
928  d1 = commpattern(level)%sendList(i)%block(j)
929  i1 = commpattern(level)%sendList(i)%indices(j, 1)
930  j1 = commpattern(level)%sendList(i)%indices(j, 2)
931  k1 = commpattern(level)%sendList(i)%indices(j, 3)
932 
933  ! Copy real values to buffer
934  ifringe = flowdoms(d1, level, sps)%fringePtr(1, i1, j1, k1)
935  if (ifringe > 0) then
936  sendbuffer(jj:jj + 2) = flowdoms(d1, level, sps)%fringes(ifringe)%donorFrac
937  else
938  sendbuffer(jj:jj + 2) = zero
939  end if
940  jj = jj + nvar
941 
942  end do
943 
944  ! Send the data.
945 
946  call mpi_isend(sendbuffer(ii), size, adflow_real, procid, &
947  procid, adflow_comm_world, sendrequests(i), &
948  ierr)
949 
950  ! Set ii to jj for the next processor.
951 
952  ii = jj
953 
954  end do sends
955 
956  ! Post the nonblocking receives.
957 
958  ii = 1
959  receives: do i = 1, commpattern(level)%nProcRecv
960 
961  ! Store the processor id and the size of the message
962  ! a bit easier.
963 
964  procid = commpattern(level)%recvProc(i)
965  size = nvar * commpattern(level)%nrecv(i)
966 
967  ! Post the receive.
968 
969  call mpi_irecv(recvbuffer(ii), size, adflow_real, procid, &
971 
972  ! And update ii.
973 
974  ii = ii + size
975 
976  end do receives
977 
978  ! ***********************************************************
979  ! No local copy since we copied the fringes directly and the
980  ! donorFrac info is already there.
981  ! ***********************************************************
982 
983  ! Complete the nonblocking receives in an arbitrary sequence and
984  ! copy the variables from the buffer into the halo's.
985 
986  size = commpattern(level)%nProcRecv
987  completerecvs: do i = 1, commpattern(level)%nProcRecv
988 
989  ! Complete any of the requests.
990 
991  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
992 
993  ! Copy the data just arrived in the halo's.
994 
995  ii = index
996  jj = nvar * commpattern(level)%nrecvCum(ii - 1)
997  do j = 1, commpattern(level)%nrecv(ii)
998 
999  ! Store the block and the indices of the halo a bit easier.
1000 
1001  d2 = commpattern(level)%recvList(ii)%block(j)
1002  i2 = commpattern(level)%recvList(ii)%indices(j, 1)
1003  j2 = commpattern(level)%recvList(ii)%indices(j, 2)
1004  k2 = commpattern(level)%recvList(ii)%indices(j, 3)
1005 
1006  ! Now, there should already be a spot available for the
1007  ! donorFrac since it was created if necessary in the integer exchange.
1008  ifringe = flowdoms(d2, level, sps)%fringePtr(1, i2, j2, k2)
1009  flowdoms(d2, level, sps)%fringes(ifringe)%donorFrac = recvbuffer(jj + 1:jj + 3)
1010 
1011  jj = jj + nvar
1012  end do
1013 
1014  end do completerecvs
1015 
1016  ! Complete the nonblocking sends.
1017 
1018  size = commpattern(level)%nProcSend
1019  do i = 1, commpattern(level)%nProcSend
1020  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
1021  end do
1022 
1023  end subroutine exchangefringes
1024 
1025  subroutine exchangestatus(level, sps, commPattern, internal)
1026  !
1027  ! ExchangeIsCompute exchanges the isCompute flag for the 1 to 1
1028  ! connectivity for the given level and sps instance.
1029  !
1030  use constants
1031  use blockpointers, only: ndom, ib, jb, kb, flowdoms
1033  use utils, only: setpointers
1035 
1036  implicit none
1037  !
1038  ! Subroutine arguments.
1039  !
1040  integer(kind=intType), intent(in) :: level, sps
1041 
1042  type(commtype), dimension(*), intent(in) :: commPattern
1043  type(internalcommtype), dimension(*), intent(in) :: internal
1044  integer(kind=intType) :: nn
1045 
1046  domainloop: do nn = 1, ndom
1047  flowdoms(nn, level, sps)%intCommVars(1)%var => flowdoms(nn, level, sps)%status(:, :, :)
1048  end do domainloop
1049 
1050  ! Run the generic integer exchange
1051  call whalo1to1intgeneric(1, level, sps, commpattern, internal)
1052 
1053  end subroutine exchangestatus
1054 
1055  subroutine exchangestatustranspose(level, sps, commPattern, internal)
1056 
1057  ! exchangeStatusTranspose performs the *TRANSPOSE* of the normal
1058  ! halo exchange. That means it takes information *in the halo cells*
1059  ! and accumulate it into the *owned cells*. In this particular case,
1060  ! we are transmitting the isDonor and isWallDonor information from
1061  ! the halos to the owned cells. The "accumulate" operation will be
1062  ! an MPI_LOR. Note that this actually hast he same comm structure
1063  ! as 'whalo1to1_b'.
1064 
1065  use constants
1066  use blockpointers, only: flowdoms
1070  implicit none
1071  !
1072  ! Subroutine arguments.
1073  !
1074  integer(kind=intType), intent(in) :: level, sps
1075  type(commtype), dimension(*), intent(in) :: commPattern
1076  type(internalcommtype), dimension(*), intent(in) :: internal
1077  !
1078  ! Local variables.
1079  !
1080  integer :: size, procID, ierr, index
1081  integer, dimension(mpi_status_size) :: mpiStatus
1082 
1083  integer(kind=intType) :: mm
1084  integer(kind=intType) :: i, j, k, ii, jj
1085  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
1086  integer(kind=intType), dimension(:), allocatable :: sendBuf, recvBuf
1087  logical :: CisDonor, CisHole, CisCompute, CisFloodSeed, CisFlooded, CisWallDonor, CisReceiver
1088  logical :: DisDonor, DisHole, DisCompute, DisFloodSeed, DisFlooded, DisWallDonor, DisReceiver
1089  integer(kind=intType) :: cellStatus, donorStatus
1090 
1091  ii = commpattern(level)%nProcSend
1092  ii = commpattern(level)%nsendCum(ii)
1093  jj = commpattern(level)%nProcRecv
1094  jj = commpattern(level)%nrecvCum(jj)
1095 
1096  ! We are exchanging 1 piece of information
1097  allocate (sendbuf(ii), recvbuf(jj), stat=ierr)
1098 
1099  ! Gather up the seeds into the *recv* buffer. Note we loop
1100  ! over nProcRECV here! After the buffer is assembled it is
1101  ! sent off.
1102 
1103  jj = 1
1104  ii = 1
1105  recvs: do i = 1, commpattern(level)%nProcRecv
1106 
1107  ! Store the processor id and the size of the message
1108  ! a bit easier.
1109 
1110  procid = commpattern(level)%recvProc(i)
1111  size = commpattern(level)%nrecv(i)
1112 
1113  ! Copy the data into the buffer
1114 
1115  do j = 1, commpattern(level)%nrecv(i)
1116 
1117  ! Store the block and the indices of the halo a bit easier.
1118 
1119  d2 = commpattern(level)%recvList(i)%block(j)
1120  i2 = commpattern(level)%recvList(i)%indices(j, 1)
1121  j2 = commpattern(level)%recvList(i)%indices(j, 2)
1122  k2 = commpattern(level)%recvList(i)%indices(j, 3)
1123 
1124  recvbuf(jj) = flowdoms(d2, level, sps)%status(i2, j2, k2)
1125  jj = jj + 1
1126 
1127  end do
1128 
1129  ! Send the data.
1130  call mpi_isend(recvbuf(ii), size, adflow_integer, procid, &
1131  procid, adflow_comm_world, sendrequests(i), &
1132  ierr)
1133 
1134  ! Set ii to jj for the next processor.
1135 
1136  ii = jj
1137 
1138  end do recvs
1139 
1140  ! Post the nonblocking receives.
1141 
1142  ii = 1
1143  sends: do i = 1, commpattern(level)%nProcSend
1144 
1145  ! Store the processor id and the size of the message
1146  ! a bit easier.
1147 
1148  procid = commpattern(level)%sendProc(i)
1149  size = commpattern(level)%nsend(i)
1150 
1151  ! Post the receive.
1152 
1153  call mpi_irecv(sendbuf(ii), size, adflow_integer, procid, &
1154  myid, adflow_comm_world, recvrequests(i), ierr)
1155 
1156  ! And update ii.
1157 
1158  ii = ii + size
1159 
1160  end do sends
1161 
1162  ! Copy the local data.
1163 
1164  localcopy: do i = 1, internal(level)%ncopy
1165 
1166  ! Store the block and the indices of the donor a bit easier.
1167 
1168  d1 = internal(level)%donorBlock(i)
1169  i1 = internal(level)%donorIndices(i, 1)
1170  j1 = internal(level)%donorIndices(i, 2)
1171  k1 = internal(level)%donorIndices(i, 3)
1172 
1173  ! Idem for the halo's.
1174 
1175  d2 = internal(level)%haloBlock(i)
1176  i2 = internal(level)%haloIndices(i, 1)
1177  j2 = internal(level)%haloIndices(i, 2)
1178  k2 = internal(level)%haloIndices(i, 3)
1179 
1180  ! OR operation. Note we modify the '1' values ie. the 'donors'
1181  ! which are now receivers because of the transpose operation.
1182  cellstatus = flowdoms(d1, level, sps)%status(i1, j1, k1)
1183  call getstatus(cellstatus, cisdonor, cishole, ciscompute, &
1184  cisfloodseed, cisflooded, ciswalldonor, cisreceiver)
1185 
1186  donorstatus = flowdoms(d2, level, sps)%status(i2, j2, k2)
1187  call getstatus(donorstatus, disdonor, dishole, discompute, &
1188  disfloodseed, disflooded, diswalldonor, disreceiver)
1189 
1190  call setisdonor(flowdoms(d1, level, sps)%status(i1, j1, k1), &
1191  cisdonor .or. disdonor)
1192 
1193  call setiswalldonor(flowdoms(d1, level, sps)%status(i1, j1, k1), &
1194  ciswalldonor .or. diswalldonor)
1195 
1196  end do localcopy
1197 
1198  ! Complete the nonblocking receives in an arbitrary sequence and
1199  ! copy the variables from the buffer into the halo's.
1200 
1201  size = commpattern(level)%nProcSend
1202  completesends: do i = 1, commpattern(level)%nProcSend
1203 
1204  ! Complete any of the requests.
1205 
1206  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
1207 
1208  ! Copy the data just arrived in the halo's.
1209 
1210  ii = index
1211 
1212  jj = commpattern(level)%nsendCum(ii - 1)
1213 
1214  do j = 1, commpattern(level)%nsend(ii)
1215 
1216  ! Store the block and the indices of the halo a bit easier.
1217 
1218  d1 = commpattern(level)%sendList(ii)%block(j)
1219  i1 = commpattern(level)%sendList(ii)%indices(j, 1)
1220  j1 = commpattern(level)%sendList(ii)%indices(j, 2)
1221  k1 = commpattern(level)%sendList(ii)%indices(j, 3)
1222 
1223  cellstatus = flowdoms(d1, level, sps)%status(i1, j1, k1)
1224  call getstatus(cellstatus, cisdonor, cishole, ciscompute, &
1225  cisfloodseed, cisflooded, ciswalldonor, cisreceiver)
1226  jj = jj + 1
1227  donorstatus = sendbuf(jj)
1228  call getstatus(donorstatus, disdonor, dishole, discompute, &
1229  disfloodseed, disflooded, diswalldonor, disreceiver)
1230 
1231  call setisdonor(flowdoms(d1, level, sps)%status(i1, j1, k1), &
1232  cisdonor .or. disdonor)
1233 
1234  call setiswalldonor(flowdoms(d1, level, sps)%status(i1, j1, k1), &
1235  ciswalldonor .or. diswalldonor)
1236  end do
1237  end do completesends
1238 
1239  ! Complete the nonblocking sends.
1240 
1241  size = commpattern(level)%nProcRecv
1242  do i = 1, commpattern(level)%nProcRecv
1243  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
1244  end do
1245 
1246  deallocate (recvbuf, sendbuf)
1247 
1248  end subroutine exchangestatustranspose
1249 
1250  subroutine setupfringeglobalind(level, sps)
1251 
1252  use constants
1253  use blockpointers
1254  use communication
1255  use utils, only: echk
1256  implicit none
1257 
1258  ! This subroutine is used to record the global index of each of
1259  ! the donors for overset fringes. It has the same comm structure
1260  ! as wOverset and flagInvalidDonors.
1261 
1262  !
1263  ! Subroutine arguments.
1264  !
1265  integer(kind=intType), intent(in) :: level, sps
1266 
1267  !
1268  ! Local variables.
1269  !
1270  integer :: size, procId, ierr, index
1271  integer, dimension(mpi_status_size) :: mpiStatus
1272 
1273  integer(kind=intType) :: nVar
1274  integer(kind=intType) :: i, j, k, ii, jj, iii, jjj, kkk, iFringe
1275  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2, ind
1276  integer(kind=intType), dimension(:), allocatable :: sendBufInt
1277  integer(kind=intType), dimension(:), allocatable :: recvBufInt
1278  logical :: invalid
1279  type(commtype), pointer :: commPattern
1280  type(internalcommtype), pointer :: internal
1281 
1282  commpattern => commpatternoverset(level, sps)
1283  internal => internaloverset(level, sps)
1284 
1285  ii = commpattern%nProcSend
1286  ii = commpattern%nsendCum(ii)
1287  jj = commpattern%nProcRecv
1288  jj = commpattern%nrecvCum(jj)
1289  nvar = 8
1290  allocate (sendbufint(ii * nvar), recvbufint(jj * nvar), stat=ierr)
1291 
1292  ! Send the variables. The data is first copied into
1293  ! the send buffer after which the buffer is sent asap.
1294 
1295  ii = 1
1296  sends: do i = 1, commpattern%nProcSend
1297 
1298  ! Store the processor id and the size of the message
1299  ! a bit easier.
1300 
1301  procid = commpattern%sendProc(i)
1302  size = nvar * commpattern%nsend(i)
1303 
1304  ! Copy the data in the correct part of the send buffer.
1305 
1306  jj = ii
1307  do j = 1, commpattern%nsend(i)
1308 
1309  ! Store the block id and the indices of the donor
1310  ! a bit easier.
1311 
1312  d1 = commpattern%sendList(i)%block(j)
1313  i1 = commpattern%sendList(i)%indices(j, 1)
1314  j1 = commpattern%sendList(i)%indices(j, 2)
1315  k1 = commpattern%sendList(i)%indices(j, 3)
1316 
1317  ! Loop over the 8 donors:
1318  do kkk = k1, k1 + 1
1319  do jjj = j1, j1 + 1
1320  do iii = i1, i1 + 1
1321  sendbufint(jj) = flowdoms(d1, level, sps)%globalCell(iii, jjj, kkk)
1322  jj = jj + 1
1323  end do
1324  end do
1325  end do
1326  end do
1327 
1328  ! Send the data.
1329 
1330  call mpi_isend(sendbufint(ii), size, adflow_integer, procid, &
1331  procid, adflow_comm_world, sendrequests(i), &
1332  ierr)
1333  call echk(ierr, __file__, __line__)
1334 
1335  ! Set ii to jj for the next processor.
1336 
1337  ii = jj
1338 
1339  end do sends
1340 
1341  ! Post the nonblocking receives.
1342 
1343  ii = 1
1344  receives: do i = 1, commpattern%nProcRecv
1345 
1346  ! Store the processor id and the size of the message
1347  ! a bit easier.
1348 
1349  procid = commpattern%recvProc(i)
1350  size = nvar * commpattern%nrecv(i)
1351 
1352  ! Post the receive.
1353 
1354  call mpi_irecv(recvbufint(ii), size, adflow_integer, procid, &
1355  myid, adflow_comm_world, recvrequests(i), ierr)
1356  call echk(ierr, __file__, __line__)
1357 
1358  ! And update ii.
1359 
1360  ii = ii + size
1361 
1362  end do receives
1363 
1364  ! Do the local interpolation.
1365  localinterp: do i = 1, internal%ncopy
1366 
1367  ! Store the block and the indices of the donor a bit easier.
1368 
1369  d1 = internal%donorBlock(i)
1370  i1 = internal%donorIndices(i, 1)
1371  j1 = internal%donorIndices(i, 2)
1372  k1 = internal%donorIndices(i, 3)
1373 
1374  ! Idem for the halo's.
1375 
1376  d2 = internal%haloBlock(i)
1377  i2 = internal%haloIndices(i, 1)
1378  j2 = internal%haloIndices(i, 2)
1379  k2 = internal%haloIndices(i, 3)
1380 
1381  ! Loop over the 8 donors:
1382  ind = 0
1383  do kkk = k1, k1 + 1
1384  do jjj = j1, j1 + 1
1385  do iii = i1, i1 + 1
1386  ind = ind + 1
1387  flowdoms(d2, level, sps)%gInd(ind, i2, j2, k2) = &
1388  flowdoms(d1, level, sps)%globalCell(iii, jjj, kkk)
1389  end do
1390  end do
1391  end do
1392  end do localinterp
1393 
1394  ! Complete the nonblocking receives in an arbitrary sequence and
1395  ! copy the variables from the buffer into the halo's.
1396 
1397  size = commpattern%nProcRecv
1398  completerecvs: do i = 1, commpattern%nProcRecv
1399 
1400  ! Complete any of the requests.
1401 
1402  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
1403  call echk(ierr, __file__, __line__)
1404 
1405  ! Copy the data just arrived in the halo's.
1406 
1407  ii = index
1408  jj = nvar * commpattern%nrecvCum(ii - 1)
1409  do j = 1, commpattern%nrecv(ii)
1410 
1411  ! Store the block and the indices of the halo a bit easier.
1412 
1413  d2 = commpattern%recvList(ii)%block(j)
1414  i2 = commpattern%recvList(ii)%indices(j, 1)
1415  j2 = commpattern%recvList(ii)%indices(j, 2)
1416  k2 = commpattern%recvList(ii)%indices(j, 3)
1417 
1418  ! Just set the 8 values
1419  do ind = 1, 8
1420  flowdoms(d2, level, sps)%gInd(ind, i2, j2, k2) = &
1421  recvbufint(jj + ind)
1422  end do
1423 
1424  jj = jj + 8
1425  end do
1426  end do completerecvs
1427 
1428  ! Complete the nonblocking sends.
1429 
1430  size = commpattern%nProcSend
1431  do i = 1, commpattern%nProcSend
1432  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
1433  call echk(ierr, __file__, __line__)
1434  end do
1435  deallocate (sendbufint, recvbufint)
1436 
1437  end subroutine setupfringeglobalind
1438 
1439  subroutine exchangesurfacedelta(zipperFamList, level, sps, commPattern, internal)
1440  !
1441  ! ExchangeSurfaceDelta exchanges surface delta to fill up halo
1442  ! surface cells from adjacent blocks.
1443  !
1444  use constants
1445  use blockpointers, onlY: ndom, flowdoms, nbocos, bctype, bcfaceid, bcdata, &
1446  ib, il, jb, jl, kb, kl
1449  use utils, only: setpointers
1451  use sorting, only: faminlist
1452  implicit none
1453  !
1454  ! Subroutine arguments.
1455  !
1456  integer(kind=intType), intent(in), dimension(:) :: zipperFamList
1457  integer(kind=intType), intent(in) :: level, sps
1458 
1459  type(commtype), dimension(*), intent(in) :: commPattern
1460  type(internalcommtype), dimension(*), intent(in) :: internal
1461 
1462  ! Local
1463  integer(kind=intType) :: i, j, k, ii, nn, mm
1464  real(kind=realtype), dimension(:), allocatable :: psave
1465  real(kind=realtype), dimension(:, :), pointer :: deltaptr
1466 
1467  ! Just cheat by exchangint pressure. saving Pressure, dumping deltaPtr into the pressure,
1468  ! exchanging that and then restoring the pressure
1469 
1470  do nn = 1, ndom
1471  call setpointers(nn, level, sps)
1472 
1473  ! Allocate pointer space for the integer flag communication
1474  allocate (flowdoms(nn, level, sps)%realCommVars(1)%var(1:ib + 1, 1:jb + 1, 1:kb + 1))
1475 
1476  ! Push the surface iblank back to the generic volume variable rVar1
1477  bocoloop: do mm = 1, nbocos
1478  faminclude: if (faminlist(bcdata(mm)%famID, zipperfamlist)) then
1479 
1480  select case (bcfaceid(mm))
1481  case (imin)
1482  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(2 + 1, :, :)
1483  case (imax)
1484  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(il + 1, :, :)
1485  case (jmin)
1486  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(:, 2 + 1, :)
1487  case (jmax)
1488  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(:, jl + 1, :)
1489  case (kmin)
1490  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(:, :, 2 + 1)
1491  case (kmax)
1492  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(:, :, kl + 1)
1493  end select
1494 
1495  ! NO HALOS!
1496  do j = bcdata(mm)%jnBeg + 1, bcdata(mm)%jnEnd
1497  do i = bcdata(mm)%inBeg + 1, bcdata(mm)%inEnd
1498 
1499  ! Remember to account for the pointer offset since
1500  ! the iblank starts at zero
1501  deltaptr(i + 1, j + 1) = bcdata(mm)%delta(i, j)
1502  end do
1503  end do
1504  end if faminclude
1505  end do bocoloop
1506  end do
1507 
1508  ! Exchange the variane
1510 
1511  ! Copy back out
1512  ii = 0
1513  do nn = 1, ndom
1514  call setpointers(nn, level, sps)
1515 
1516  ! Extract the surface iblank from the volume.
1517  bocoloop2: do mm = 1, nbocos
1518  faminclude2: if (faminlist(bcdata(mm)%famID, zipperfamlist)) then
1519 
1520  select case (bcfaceid(mm))
1521  case (imin)
1522  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(2 + 1, :, :)
1523  case (imax)
1524  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(il + 1, :, :)
1525  case (jmin)
1526  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(:, 2 + 1, :)
1527  case (jmax)
1528  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(:, jl + 1, :)
1529  case (kmin)
1530  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(:, :, 2 + 1)
1531  case (kmax)
1532  deltaptr => flowdoms(nn, level, sps)%realCommVars(1)%var(:, :, jl + 1)
1533  end select
1534 
1535  ! INCLUDE THE HALOS!
1536  do j = bcdata(mm)%jcBeg, bcdata(mm)%jcEnd
1537  do i = bcdata(mm)%icBeg, bcdata(mm)%icEnd
1538 
1539  ! Remember to account for the pointer offset since
1540  ! the iblank starts at zero
1541  bcdata(mm)%delta(i, j) = deltaptr(i + 1, j + 1)
1542  end do
1543  end do
1544  end if faminclude2
1545  end do bocoloop2
1546 
1547  ! Now deallocate this pointer
1548  deallocate (flowdoms(nn, level, sps)%realCommVars(1)%var)
1549  end do
1550  end subroutine exchangesurfacedelta
1551 
1552  subroutine exchangesurfaceiblanks(zipperFamList, level, sps, commPattern, internal)
1553  !
1554  ! ExchangeIblank exchanges the 1 to 1 internal halo's for the
1555  ! given level and sps instance.
1556  !
1557  use constants
1558  use blockpointers, only: ndom, ib, jb, kb, iblank, bcdata, &
1559  il, jl, kl, bcfaceid, nbocos, flowdoms, bctype
1561  use utils, only: setpointers
1563  use sorting, only: faminlist
1564  implicit none
1565  !
1566  ! Subroutine arguments.
1567  !
1568  integer(kind=intType), intent(in) :: level, sps
1569  integer(kind=intType), intent(in), dimension(:) :: zipperFamList
1570 
1571  type(commtype), dimension(*), intent(in) :: commPattern
1572  type(internalcommtype), dimension(*), intent(in) :: internal
1573 
1574  ! Local
1575  integer(kind=intType) :: i, j, k, ii, nn, mm
1576  integer(kind=intType), dimension(:), allocatable :: iBlankSave
1577  integer(kind=intType), dimension(:, :), pointer :: ibp
1578 
1579  ! Just cheat by saving iBlank iblank array, resusing itand
1580  ii = 0
1581  do nn = 1, ndom
1582  call setpointers(nn, level, sps)
1583  ii = ii + (ib + 1) * (jb + 1) * (kb + 1)
1584  end do
1585 
1586  allocate (iblanksave(ii))
1587  ii = 0
1588  do nn = 1, ndom
1589  call setpointers(nn, level, sps)
1590  do k = 0, kb
1591  do j = 0, jb
1592  do i = 0, ib
1593  ii = ii + 1
1594  iblanksave(ii) = iblank(i, j, k)
1595  ! The following algorithm uses the volume iblank array to communicate surface blocking
1596  ! across block boundaries. However, for h-topology meshes, for example at a sharp
1597  ! trailing edge this breaks, because the surface on the top an bottom of the shape are
1598  ! not topologically adjacent in the block structure. If we leave the existing volume
1599  ! blanking in place the correct values are communicated, if we zero it as done originally
1600  ! the connection is broken. Therefore the following line is commented out.
1601  !iblank(i,j,k) = 0 !commented out to fix issue with h-topology blocks on the zipper
1602 
1603  end do
1604  end do
1605  end do
1606 
1607  ! Push the surface iblank back to the volume:
1608  bocoloop: do mm = 1, nbocos
1609  faminclude: if (faminlist(bcdata(mm)%famID, zipperfamlist)) then
1610 
1611  select case (bcfaceid(mm))
1612  case (imin)
1613  ibp => iblank(2, :, :)
1614  case (imax)
1615  ibp => iblank(il, :, :)
1616  case (jmin)
1617  ibp => iblank(:, 2, :)
1618  case (jmax)
1619  ibp => iblank(:, jl, :)
1620  case (kmin)
1621  ibp => iblank(:, :, 2)
1622  case (kmax)
1623  ibp => iblank(:, :, kl)
1624  end select
1625 
1626  ! NO HALOS!
1627  do j = bcdata(mm)%jnBeg + 1, bcdata(mm)%jnEnd
1628  do i = bcdata(mm)%inBeg + 1, bcdata(mm)%inEnd
1629 
1630  ! Remember to account for the pointer offset since
1631  ! the iblank starts at zero
1632  ibp(i + 1, j + 1) = bcdata(mm)%iBlank(i, j)
1633  end do
1634  end do
1635  end if faminclude
1636  end do bocoloop
1637  end do
1638 
1639  ! Exchange iblanks
1640  domainloop: do nn = 1, ndom
1641  flowdoms(nn, level, sps)%intCommVars(1)%var => &
1642  flowdoms(nn, level, sps)%iblank(:, :, :)
1643  end do domainloop
1644 
1645  ! Run the generic integer exchange
1646  call whalo1to1intgeneric(1, level, sps, commpattern, internal)
1647 
1648  ii = 0
1649  do nn = 1, ndom
1650  call setpointers(nn, level, sps)
1651 
1652  ! Extract the surface iblank from the volume.
1653  bocoloop2: do mm = 1, nbocos
1654  faminclude2: if (faminlist(bcdata(mm)%famID, zipperfamlist)) then
1655 
1656  select case (bcfaceid(mm))
1657  case (imin)
1658  ibp => iblank(2, :, :)
1659  case (imax)
1660  ibp => iblank(il, :, :)
1661  case (jmin)
1662  ibp => iblank(:, 2, :)
1663  case (jmax)
1664  ibp => iblank(:, jl, :)
1665  case (kmin)
1666  ibp => iblank(:, :, 2)
1667  case (kmax)
1668  ibp => iblank(:, :, kl)
1669  end select
1670 
1671  ! INCLUDE THE HALOS!
1672  do j = bcdata(mm)%jnBeg, bcdata(mm)%jnEnd + 1
1673  do i = bcdata(mm)%inBeg, bcdata(mm)%inEnd + 1
1674  ! Remember to account for the pointer offset since
1675  ! the iblank starts at zero
1676  bcdata(mm)%iBlank(i, j) = ibp(i + 1, j + 1)
1677  end do
1678  end do
1679  end if faminclude2
1680  end do bocoloop2
1681 
1682  ! Restore the saved array
1683  do k = 0, kb
1684  do j = 0, jb
1685  do i = 0, ib
1686  ii = ii + 1
1687  iblank(i, j, k) = iblanksave(ii)
1688  end do
1689  end do
1690  end do
1691  end do
1692  deallocate (iblanksave)
1693  end subroutine exchangesurfaceiblanks
1694 
1695  subroutine emptyoversetcomm(level, sps)
1696 
1697  ! Short cut function to make empty overset comm structure for
1698  ! problems that do not use overset meshes.
1699 
1700  use constants
1702  implicit none
1703 
1704  ! Function
1705  integer(kind=intType), intent(in) :: level, sps
1706 
1707  ! Working
1708  integer(Kind=intType) :: nn, mm, ierr
1709 
1710  commpatternoverset(level, sps)%nProcRecv = 0
1711  allocate (commpatternoverset(level, sps)%recvProc(0))
1712  allocate (commpatternoverset(level, sps)%nRecv(0))
1713  allocate (commpatternoverset(level, sps)%recvList(0))
1714 
1715  commpatternoverset(level, sps)%nProcSend = 0
1716  allocate (commpatternoverset(level, sps)%sendProc(0))
1717  allocate (commpatternoverset(level, sps)%nSend(0))
1718  allocate (commpatternoverset(level, sps)%sendList(0))
1719 
1720  internaloverset(level, sps)%nCopy = 0
1721  allocate (internaloverset(level, sps)%donorBlock(0))
1722  allocate (internaloverset(level, sps)%donorIndices(0, 3))
1723  allocate (internaloverset(level, sps)%donorInterp(0, 8))
1724  allocate (internaloverset(level, sps)%haloBlock(0))
1725  allocate (internaloverset(level, sps)%haloIndices(0, 3))
1726 
1727  end subroutine emptyoversetcomm
1728 
1729  subroutine updateoversetconnectivity(level, sps)
1730 
1731  ! This subroutine updates the overset connectivity for a perturbed
1732  ! mesh. It does *not* completely redo the connectivity. Rather, a
1733  ! newton search on the existing donors are performed using the
1734  ! updated coordinates. This type of update is only applicable if the
1735  ! entire volume mesh is warped as one like with USMesh in IDWarp. This
1736  ! actually ends up being a fairly small correction most of the time,
1737  ! however, desipite looks to the contrary is actually quite fast to
1738  ! run.
1739 
1740  use constants
1741  use communication
1742  use blockpointers, only: ndom, il, jl, kl, xseed, flowdoms, x, ib, jb, kb, &
1743  ie, je, ke, fringes, scratch
1746  use utils, only: setpointers
1747 
1748  implicit none
1749 
1750  ! Input
1751  integer(kind=intType), intent(in) :: level, sps
1752  type(commtype), pointer :: commPattern
1753  type(internalcommtype), pointer :: internal
1754 
1755  ! Working
1756  integer(kind=intType) :: nn, ii, jj, ierr, i, j, k, d1, i1, j1, k1, d2, i2, j2, k2
1757  integer(kind=intType) :: size, procID, index, iii, jjj
1758  integer, dimension(mpi_status_size) :: mpiStatus
1759  real(kind=realtype) :: frac(3), frac0(3), xcen(3)
1760  integer(kind=intType), dimension(8), parameter :: indices = (/1, 2, 4, 3, 5, 6, 8, 7/)
1761 
1762  ! Set a tolerance for checking whether fractions are between 0 and 1
1763  real(kind=realtype) :: fractol = 1e-4
1764 
1765  ! Pointers to the overset comms to make it easier to read
1766  commpattern => commpatternoverset(level, sps)
1767  internal => internaloverset(level, sps)
1768 
1769  ! Step 1: Since we need to update donors for all cells including the
1770  ! donors for double halos, we must know the new cell center
1771  ! locations for the all of these receivers. Unfortunately, the
1772  ! double halos don't have coordinates, so we must first perform a
1773  ! (forward) block-to-block halo exchange to populate the xSeed
1774  ! values for all cells, including double halos.
1775 
1776  do nn = 1, ndom
1777  call setpointers(nn, level, sps)
1778 
1779  if (.not. associated(flowdoms(nn, level, sps)%xSeed)) then
1780  allocate (flowdoms(nn, level, sps)%XSeed(0:ib, 0:jb, 0:kb, 3))
1781  end if
1782  xseed => flowdoms(nn, level, sps)%xSeed
1783  xseed = zero
1784  do k = 2, kl
1785  do j = 2, jl
1786  do i = 2, il
1787  xseed(i, j, k, :) = eighth * ( &
1788  x(i - 1, j - 1, k - 1, :) + &
1789  x(i, j - 1, k - 1, :) + &
1790  x(i - 1, j, k - 1, :) + &
1791  x(i, j, k - 1, :) + &
1792  x(i - 1, j - 1, k, :) + &
1793  x(i, j - 1, k, :) + &
1794  x(i - 1, j, k, :) + &
1795  x(i, j, k, :)) !+ fringes(i,j,k)%offset
1796  end do
1797  end do
1798  end do
1799  end do
1800 
1801  ! Exchange the xSeeds.
1802  do nn = 1, ndom
1803  flowdoms(nn, level, sps)%realCommVars(1)%var => flowdoms(nn, level, sps)%xSeed(:, :, :, 1)
1804  flowdoms(nn, level, sps)%realCommVars(2)%var => flowdoms(nn, level, sps)%xSeed(:, :, :, 2)
1805  flowdoms(nn, level, sps)%realCommVars(3)%var => flowdoms(nn, level, sps)%xSeed(:, :, :, 3)
1806  end do
1807 
1808  ! Run the (foward) generic halo exchange.
1810 
1811  ! Step 2: Next we need to communicate the xSeeds to their donor
1812  ! procs. This means running the overset exchange in REVERSE (ie from
1813  ! receiver to donor). Most of this code will look like
1814  ! wOverset_b. We will runt he newtonUpdate code (below) on the fly
1815  ! as we receive the data, which should hide some of the comm time.
1816 
1817  ! Gather up the seeds into the *recv* buffer. Note we loop over
1818  ! nProcRECV here! After the buffer is assembled it is send off.
1819 
1820  jj = 1
1821  ii = 1
1822  recvs: do i = 1, commpattern%nProcRecv
1823 
1824  ! Store the processor id and the size of the message
1825  ! a bit easier.
1826 
1827  procid = commpattern%recvProc(i)
1828  size = 3 * commpattern%nrecv(i)
1829 
1830  ! Copy the data into the buffer
1831 
1832  do j = 1, commpattern%nrecv(i)
1833 
1834  ! Store the block and the indices to make code a bit easier to read
1835 
1836  d2 = commpattern%recvList(i)%block(j)
1837  i2 = commpattern%recvList(i)%indices(j, 1)
1838  j2 = commpattern%recvList(i)%indices(j, 2)
1839  k2 = commpattern%recvList(i)%indices(j, 3)
1840 
1841  ! Copy the xSeed
1842  recvbuffer(jj) = flowdoms(d2, level, sps)%xSeed(i2, j2, k2, 1)
1843  recvbuffer(jj + 1) = flowdoms(d2, level, sps)%xSeed(i2, j2, k2, 2)
1844  recvbuffer(jj + 2) = flowdoms(d2, level, sps)%xSeed(i2, j2, k2, 3)
1845  jj = jj + 3
1846  end do
1847 
1848  ! Send the data.
1849  call mpi_isend(recvbuffer(ii), size, adflow_real, procid, &
1850  procid, adflow_comm_world, recvrequests(i), &
1851  ierr)
1852 
1853  ! Set ii to jj for the next processor.
1854 
1855  ii = jj
1856 
1857  end do recvs
1858 
1859  ! Post the nonblocking receives.
1860 
1861  ii = 1
1862  sends: do i = 1, commpattern%nProcSend
1863 
1864  ! Store the processor id and the size of the message
1865  ! a bit easier.
1866 
1867  procid = commpattern%sendProc(i)
1868  size = 3 * commpattern%nsend(i)
1869 
1870  ! Post the receive.
1871 
1872  call mpi_irecv(sendbuffer(ii), size, adflow_real, procid, &
1873  myid, adflow_comm_world, sendrequests(i), ierr)
1874 
1875  ! And update ii.
1876 
1877  ii = ii + size
1878 
1879  end do sends
1880 
1881  ! Do the local interpolation.
1882 
1883  localinterp: do i = 1, internal%ncopy
1884 
1885  ! Store the block and the indices of the donor a bit easier.
1886 
1887  d1 = internal%donorBlock(i)
1888  i1 = internal%donorIndices(i, 1)
1889  j1 = internal%donorIndices(i, 2)
1890  k1 = internal%donorIndices(i, 3)
1891 
1892  ! Idem for the halo's.
1893 
1894  d2 = internal%haloBlock(i)
1895  i2 = internal%haloIndices(i, 1)
1896  j2 = internal%haloIndices(i, 2)
1897  k2 = internal%haloIndices(i, 3)
1898 
1899  ! xCen is the '2'. This was the receiver, but since this is
1900  ! reverse, it's now the "input"
1901  xcen = flowdoms(d2, level, sps)%xSeed(i2, j2, k2, :)
1902 
1903  ! Store in the comm structure. We need it for the derivatives.
1904  internal%xCen(i, :) = xcen
1905 
1906  ! Do newton update
1907  frac0 = (/half, half, half/)
1908  call newtonupdate(xcen, &
1909  flowdoms(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), frac0, frac)
1910 
1911  ! Check if the fractions are between 0 and 1
1912  if (maxval(frac) > one + fractol .or. minval(frac) < zero - fractol) then
1913  print *, "Invalid overset connectivity update. Use 'frozen' or 'full' oversetUpdateMode instead."
1914  error stop
1915  end if
1916 
1917  ! Set the new weights
1918  call fractoweights(frac, internal%donorInterp(i, :))
1919  end do localinterp
1920 
1921  ! Complete the nonblocking receives in an arbitrary sequence and
1922  ! copy the variables from the buffer into the halo's.
1923 
1924  size = commpattern%nProcSend
1925  completesends: do i = 1, commpattern%nProcSend
1926 
1927  ! Complete any of the requests.
1928 
1929  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
1930 
1931  ii = index
1932 
1933  jj = 3 * commpattern%nsendCum(ii - 1)
1934  do j = 1, commpattern%nsend(ii)
1935 
1936  ! Store the block and the indices of the halo a bit easier.
1937 
1938  d2 = commpattern%sendList(ii)%block(j)
1939  i2 = commpattern%sendList(ii)%indices(j, 1)
1940  j2 = commpattern%sendList(ii)%indices(j, 2)
1941  k2 = commpattern%sendList(ii)%indices(j, 3)
1942 
1943  xcen = sendbuffer(jj + 1:jj + 3)
1944  jj = jj + 3
1945 
1946  ! Store in the comm structure. We need it for derivatives.
1947  commpattern%sendList(ii)%xCen(j, :) = xcen
1948 
1949  ! Compute new fraction
1950  frac0 = (/half, half, half/)
1951  call newtonupdate(xcen, &
1952  flowdoms(d2, level, sps)%x(i2 - 1:i2 + 1, j2 - 1:j2 + 1, k2 - 1:k2 + 1, :), &
1953  frac0, frac)
1954 
1955  ! Check if the fractions are between zero and one
1956  if (maxval(frac) > one + fractol .or. minval(frac) < zero - fractol) then
1957  print *, "Invalid overset connectivity update. Use 'frozen' or 'full' oversetUpdateMode instead."
1958  error stop
1959  end if
1960 
1961  ! Set the new weights
1962  call fractoweights(frac, commpattern%sendList(ii)%interp(j, :))
1963  end do
1964 
1965  end do completesends
1966 
1967  ! Complete the nonblocking sends.
1968 
1969  size = commpattern%nProcRecv
1970  do i = 1, commpattern%nProcRecv
1971  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
1972  end do
1973 
1974  end subroutine updateoversetconnectivity
1975 #ifndef USE_COMPLEX
1976  subroutine updateoversetconnectivity_d(level, sps)
1977 
1978  ! Forward mode linearization of updateOversetConnectivity
1979 
1980  use constants
1981  use communication
1982  use blockpointers, only: ndom, il, jl, kl, xseed, flowdoms, x, ib, jb, kb, &
1983  ie, je, ke, fringes, scratch, flowdomsd, xd
1986  use utils, only: setpointers_d
1987 
1988  implicit none
1989 
1990  ! Input
1991  integer(kind=intType), intent(in) :: level, sps
1992  type(commtype), pointer :: commPattern
1993  type(internalcommtype), pointer :: internal
1994 
1995  ! Working
1996  integer(kind=intType) :: nn, ii, jj, ierr, i, j, k, d1, i1, j1, k1, d2, i2, j2, k2
1997  integer(kind=intType) :: size, procID, index, iii, jjj
1998  integer, dimension(mpi_status_size) :: mpiStatus
1999  real(kind=realtype) :: frac(3), fracd(3), frac0(3), xcen(3), xcend(3), weight(8)
2000  integer(kind=intType), dimension(8), parameter :: indices = (/1, 2, 4, 3, 5, 6, 8, 7/)
2001 
2002  ! Pointers to the overset comms to make it easier to read
2003  commpattern => commpatternoverset(level, sps)
2004  internal => internaloverset(level, sps)
2005 
2006  ! Step 1: Since we need to update donors for all cells including the
2007  ! donors for double halos, we must know the new cell center
2008  ! locations for the all of these receivers. Unfortunately, the
2009  ! double halos don't have coordinates, so we must first perform a
2010  ! (forward) block-to-block halo exchange to populate the xSeed
2011  ! values for all cells, including double halos.
2012 
2013  do nn = 1, ndom
2014  call setpointers_d(nn, level, sps)
2015 
2016  if (.not. associated(flowdoms(nn, level, sps)%xSeed)) then
2017  allocate (flowdoms(nn, level, sps)%XSeed(0:ib, 0:jb, 0:kb, 3))
2018  end if
2019  xseed => flowdoms(nn, level, sps)%xSeed
2020  xseed = zero
2021  do k = 2, kl
2022  do j = 2, jl
2023  do i = 2, il
2024  xseed(i, j, k, :) = eighth * ( &
2025  x(i - 1, j - 1, k - 1, :) + &
2026  x(i, j - 1, k - 1, :) + &
2027  x(i - 1, j, k - 1, :) + &
2028  x(i, j, k - 1, :) + &
2029  x(i - 1, j - 1, k, :) + &
2030  x(i, j - 1, k, :) + &
2031  x(i - 1, j, k, :) + &
2032  x(i, j, k, :)) !+ fringes(i,j,k)%offset
2033 
2034  ! Offset is not active so the xSeed_d just has the x
2035  ! part. Just dump the values into scratch so we don't
2036  ! acllocate any additional memory.
2037  scratch(i, j, k, 1:3) = eighth * ( &
2038  xd(i - 1, j - 1, k - 1, :) + &
2039  xd(i, j - 1, k - 1, :) + &
2040  xd(i - 1, j, k - 1, :) + &
2041  xd(i, j, k - 1, :) + &
2042  xd(i - 1, j - 1, k, :) + &
2043  xd(i, j - 1, k, :) + &
2044  xd(i - 1, j, k, :) + &
2045  xd(i, j, k, :))
2046  end do
2047  end do
2048  end do
2049  end do
2050 
2051  ! Exchange the xSeeds.
2052  do nn = 1, ndom
2053  flowdoms(nn, level, sps)%realCommVars(1)%var => flowdoms(nn, level, sps)%xSeed(:, :, :, 1)
2054  flowdoms(nn, level, sps)%realCommVars(2)%var => flowdoms(nn, level, sps)%xSeed(:, :, :, 2)
2055  flowdoms(nn, level, sps)%realCommVars(3)%var => flowdoms(nn, level, sps)%xSeed(:, :, :, 3)
2056  flowdoms(nn, level, sps)%realCommVars(4)%var => flowdoms(nn, level, sps)%scratch(:, :, :, 1)
2057  flowdoms(nn, level, sps)%realCommVars(5)%var => flowdoms(nn, level, sps)%scratch(:, :, :, 2)
2058  flowdoms(nn, level, sps)%realCommVars(6)%var => flowdoms(nn, level, sps)%scratch(:, :, :, 3)
2059  end do
2060 
2061  ! Run the (foward) generic halo exchange.
2063 
2064  ! Step 2: Next we need to communicate the xSeeds to their donor
2065  ! procs. This means running the overset exchange in REVERSE (ie from
2066  ! receiver to donor). Most of this code will look like
2067  ! wOverset_b. We will runt he newtonUpdate code (below) on the fly
2068  ! as we receive the data, which should hide some of the comm time.
2069 
2070  ! Gather up the seeds into the *recv* buffer. Note we loop over
2071  ! nProcRECV here! After the buffer is assembled it is send off.
2072 
2073  jj = 1
2074  ii = 1
2075  recvs: do i = 1, commpattern%nProcRecv
2076 
2077  ! Store the processor id and the size of the message
2078  ! a bit easier.
2079 
2080  procid = commpattern%recvProc(i)
2081  size = 6 * commpattern%nrecv(i)
2082 
2083  ! Copy the data into the buffer
2084 
2085  do j = 1, commpattern%nrecv(i)
2086 
2087  ! Store the block and the indices to make code a bit easier to read
2088 
2089  d2 = commpattern%recvList(i)%block(j)
2090  i2 = commpattern%recvList(i)%indices(j, 1)
2091  j2 = commpattern%recvList(i)%indices(j, 2)
2092  k2 = commpattern%recvList(i)%indices(j, 3)
2093 
2094  ! Copy the xSeed and it's derivative
2095  recvbuffer(jj) = flowdoms(d2, level, sps)%xSeed(i2, j2, k2, 1)
2096  recvbuffer(jj + 1) = flowdoms(d2, level, sps)%xSeed(i2, j2, k2, 2)
2097  recvbuffer(jj + 2) = flowdoms(d2, level, sps)%xSeed(i2, j2, k2, 3)
2098  recvbuffer(jj + 3) = flowdoms(d2, level, sps)%scratch(i2, j2, k2, 1)
2099  recvbuffer(jj + 4) = flowdoms(d2, level, sps)%scratch(i2, j2, k2, 2)
2100  recvbuffer(jj + 5) = flowdoms(d2, level, sps)%scratch(i2, j2, k2, 3)
2101 
2102  jj = jj + 6
2103  end do
2104 
2105  ! Send the data.
2106  call mpi_isend(recvbuffer(ii), size, adflow_real, procid, &
2107  procid, adflow_comm_world, recvrequests(i), &
2108  ierr)
2109 
2110  ! Set ii to jj for the next processor.
2111 
2112  ii = jj
2113 
2114  end do recvs
2115 
2116  ! Post the nonblocking receives.
2117 
2118  ii = 1
2119  sends: do i = 1, commpattern%nProcSend
2120 
2121  ! Store the processor id and the size of the message
2122  ! a bit easier.
2123 
2124  procid = commpattern%sendProc(i)
2125  size = 6 * commpattern%nsend(i)
2126 
2127  ! Post the receive.
2128 
2129  call mpi_irecv(sendbuffer(ii), size, adflow_real, procid, &
2130  myid, adflow_comm_world, sendrequests(i), ierr)
2131 
2132  ! And update ii.
2133 
2134  ii = ii + size
2135 
2136  end do sends
2137 
2138  ! Do the local interpolation.
2139 
2140  localinterp: do i = 1, internal%ncopy
2141 
2142  ! Store the block and the indices of the donor a bit easier.
2143 
2144  d1 = internal%donorBlock(i)
2145  i1 = internal%donorIndices(i, 1)
2146  j1 = internal%donorIndices(i, 2)
2147  k1 = internal%donorIndices(i, 3)
2148 
2149  ! Idem for the halo's.
2150 
2151  d2 = internal%haloBlock(i)
2152  i2 = internal%haloIndices(i, 1)
2153  j2 = internal%haloIndices(i, 2)
2154  k2 = internal%haloIndices(i, 3)
2155 
2156  xcen = flowdoms(d2, level, sps)%xSeed(i2, j2, k2, :)
2157  xcend = flowdoms(d2, level, sps)%scratch(i2, j2, k2, 1:3)
2158  frac0 = (/half, half, half/)
2159  call newtonupdate_d(xcen, xcend, &
2160  flowdoms(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), &
2161  flowdomsd(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), &
2162  frac0, frac, fracd)
2163 
2164  ! Set the new weights
2165  call fractoweights_d(frac, fracd, weight, internal%donorInterpd(i, :))
2166 
2167  end do localinterp
2168 
2169  ! Complete the nonblocking receives in an arbitrary sequence and
2170  ! copy the variables from the buffer into the halo's.
2171 
2172  size = commpattern%nProcSend
2173  completesends: do i = 1, commpattern%nProcSend
2174 
2175  ! Complete any of the requests.
2176 
2177  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
2178 
2179  ii = index
2180 
2181  jj = 6 * commpattern%nsendCum(ii - 1)
2182  do j = 1, commpattern%nsend(ii)
2183 
2184  ! Store the block and the indices of the halo a bit easier.
2185 
2186  d2 = commpattern%sendList(ii)%block(j)
2187  i2 = commpattern%sendList(ii)%indices(j, 1)
2188  j2 = commpattern%sendList(ii)%indices(j, 2)
2189  k2 = commpattern%sendList(ii)%indices(j, 3)
2190 
2191  xcen = sendbuffer(jj + 1:jj + 3)
2192  xcend = sendbuffer(jj + 4:jj + 6)
2193  jj = jj + 6
2194 
2195  ! Compute new fraction
2196  frac0 = (/half, half, half/)
2197  call newtonupdate_d(xcen, xcend, &
2198  flowdoms(d2, level, sps)%x(i2 - 1:i2 + 1, j2 - 1:j2 + 1, k2 - 1:k2 + 1, :), &
2199  flowdomsd(d2, level, sps)%x(i2 - 1:i2 + 1, j2 - 1:j2 + 1, k2 - 1:k2 + 1, :), &
2200  frac0, frac, fracd)
2201 
2202  ! Set the new weights
2203  call fractoweights_d(frac, fracd, weight, &
2204  commpattern%sendList(ii)%interpd(j, :))
2205  end do
2206 
2207  end do completesends
2208 
2209  ! Complete the nonblocking sends.
2210 
2211  size = commpattern%nProcRecv
2212  do i = 1, commpattern%nProcRecv
2213  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
2214  end do
2215 
2216  end subroutine updateoversetconnectivity_d
2217 
2218  subroutine updateoversetconnectivity_b(level, sps)
2219 
2220  ! Reverse mode linearization of updateOversetConnectivity
2221 
2222  use constants
2223  use communication
2224  use blockpointers, only: ndom, il, jl, kl, xseed, flowdoms, x, ib, jb, kb, &
2225  ie, je, ke, fringes, scratch, flowdomsd, xd
2228  use utils, only: setpointers_b
2229 
2230  implicit none
2231 
2232  ! Input
2233  integer(kind=intType), intent(in) :: level, sps
2234  type(commtype), pointer :: commPattern
2235  type(internalcommtype), pointer :: internal
2236 
2237  ! Working
2238  integer(kind=intType) :: nn, ii, jj, kk, ierr, i, j, k, d1, i1, j1, k1, d2, i2, j2, k2
2239  integer(kind=intType) :: size, procID, index, iii, jjj
2240  integer, dimension(mpi_status_size) :: mpiStatus
2241  real(kind=realtype) :: frac(3), fracd(3), frac0(3), xcen(3), xcend(3), weight(8), add(3)
2242  integer(kind=intType), dimension(8), parameter :: indices = (/1, 2, 4, 3, 5, 6, 8, 7/)
2243 
2244  ! Pointers to the overset comms to make it easier to read
2245  commpattern => commpatternoverset(level, sps)
2246  internal => internaloverset(level, sps)
2247 
2248  ! Zero out xSeedd (in scratch)
2249  do nn = 1, ndom
2250  flowdoms(nn, 1, sps)%scratch(:, :, :, 1:3) = zero
2251  end do
2252 
2253  ! In reverse the fist thing we must do is the compute the
2254  ! sensitivites of xCen and send it to the receiving
2255  ! processor. This comm pattern is the same as wOverset forward.
2256 
2257  ii = 1
2258  sends: do i = 1, commpattern%nProcSend
2259 
2260  ! Store the processor id and the size of the message
2261  ! a bit easier.
2262 
2263  procid = commpattern%sendProc(i)
2264  size = 3 * commpattern%nsend(i)
2265 
2266  ! Copy the data in the correct part of the send buffer.
2267 
2268  jj = ii
2269  do j = 1, commpattern%nsend(i)
2270 
2271  ! Store the block id and the indices of the donor
2272  ! a bit easier.
2273  d1 = commpattern%sendList(i)%block(j)
2274  i1 = commpattern%sendList(i)%indices(j, 1)
2275  j1 = commpattern%sendList(i)%indices(j, 2)
2276  k1 = commpattern%sendList(i)%indices(j, 3)
2277 
2278  ! -------- Recompute forward pass -------------
2279  xcen = commpattern%sendList(i)%xCen(j, :)
2280 
2281  ! Do newton update
2282  frac0 = (/half, half, half/)
2283  call newtonupdate(xcen, &
2284  flowdoms(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), &
2285  frac0, frac)
2286 
2287  ! Set the new weights
2288  call fractoweights(frac, weight)
2289 
2290  ! ------------- Reverse pass -----------
2291 
2292  ! Transfer the weights back to the frac
2293  call fractoweights_b(frac, fracd, weight, commpattern%sendList(i)%interpd(j, :))
2294 
2295  ! Run the reverse newton update. Note that we are
2296  ! accumulating into the local xd here in the newton_b call.
2297  frac0 = (/half, half, half/)
2298  call newtonupdate_b(xcen, xcend, &
2299  flowdoms(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), &
2300  flowdomsd(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), &
2301  frac0, frac, fracd)
2302 
2303  ! -------------------------------------
2304 
2305  ! We want to send xCend to the receiver
2306  sendbuffer(jj:jj + 2) = xcend
2307  jj = jj + 3
2308 
2309  end do
2310  ! Send the data.
2311 
2312  call mpi_isend(sendbuffer(ii), size, adflow_real, procid, &
2313  procid, adflow_comm_world, sendrequests(i), &
2314  ierr)
2315 
2316  ! Set ii to jj for the next processor.
2317 
2318  ii = jj
2319 
2320  end do sends
2321 
2322  ! Post the nonblocking receives.
2323 
2324  ii = 1
2325  receives: do i = 1, commpattern%nProcRecv
2326 
2327  ! Store the processor id and the size of the message
2328  ! a bit easier.
2329 
2330  procid = commpattern%recvProc(i)
2331  size = 3 * commpattern%nrecv(i)
2332 
2333  ! Post the receive.
2334 
2335  call mpi_irecv(recvbuffer(ii), size, adflow_real, procid, &
2336  myid, adflow_comm_world, recvrequests(i), ierr)
2337 
2338  ! And update ii.
2339 
2340  ii = ii + size
2341 
2342  end do receives
2343 
2344  ! Do the local stuff while we're waiting
2345 
2346  localinterp: do i = 1, internal%ncopy
2347 
2348  ! Store the block and the indices of the donor a bit easier.
2349  d1 = internal%donorBlock(i)
2350  i1 = internal%donorIndices(i, 1)
2351  j1 = internal%donorIndices(i, 2)
2352  k1 = internal%donorIndices(i, 3)
2353 
2354  d2 = internal%haloBlock(i)
2355  i2 = internal%haloIndices(i, 1)
2356  j2 = internal%haloIndices(i, 2)
2357  k2 = internal%haloIndices(i, 3)
2358 
2359  ! -------- Recompute forward pass -------------
2360  xcen = internal%XCen(i, :)
2361 
2362  ! Do newton update
2363  frac0 = (/half, half, half/)
2364  call newtonupdate(xcen, &
2365  flowdoms(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), frac0, frac)
2366 
2367  ! Set the new weights
2368  call fractoweights(frac, weight)
2369 
2370  ! ------------- Reverse pass -----------
2371 
2372  ! Transfer the weights back to the frac
2373  call fractoweights_b(frac, fracd, weight, internal%Donorinterpd(i, :))
2374 
2375  ! Run the reverse newton update. Note that we are
2376  ! accumulating into the local xd here in the newton_b call.
2377  frac0 = (/half, half, half/)
2378  call newtonupdate_b(xcen, xcend, &
2379  flowdoms(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), &
2380  flowdomsd(d1, level, sps)%x(i1 - 1:i1 + 1, j1 - 1:j1 + 1, k1 - 1:k1 + 1, :), &
2381  frac0, frac, fracd)
2382 
2383  ! Accumulate in to the xSeedd (which we're using scratch for)
2384  flowdoms(d2, level, sps)%scratch(i2, j2, k2, 1:3) = &
2385  flowdoms(d2, level, sps)%scratch(i2, j2, k2, 1:3) + xcend
2386 
2387  end do localinterp
2388 
2389  ! Complete the nonblocking receives in an arbitrary sequence and
2390  ! copy the variables from the buffer into the halo's.
2391 
2392  size = commpattern%nProcRecv
2393  completerecvs: do i = 1, commpattern%nProcRecv
2394 
2395  ! Complete any of the requests.
2396 
2397  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
2398 
2399  ! Copy the data just arrived in the halo's.
2400 
2401  ii = index
2402  jj = 3 * commpattern%nrecvCum(ii - 1)
2403  do j = 1, commpattern%nrecv(ii)
2404 
2405  ! Store the block and the indices of the halo a bit easier.
2406 
2407  d2 = commpattern%recvList(ii)%block(j)
2408  i2 = commpattern%recvList(ii)%indices(j, 1)
2409  j2 = commpattern%recvList(ii)%indices(j, 2)
2410  k2 = commpattern%recvList(ii)%indices(j, 3)
2411 
2412  flowdoms(d2, level, sps)%scratch(i2, j2, k2, 1:3) = &
2413  flowdoms(d2, level, sps)%scratch(i2, j2, k2, 1:3) + recvbuffer(jj + 1:jj + 3)
2414 
2415  jj = jj + 3
2416  end do
2417  end do completerecvs
2418 
2419  ! Complete the nonblocking sends.
2420 
2421  size = commpattern%nProcSend
2422  do i = 1, commpattern%nProcSend
2423  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
2424  end do
2425 
2426  ! Now we have accumulated back as far as xSeedd (stored in
2427  ! scratch). We can now do the whalo1to1_b
2428 
2429  ! Exchange the xSeeds in reverse.
2430  do nn = 1, ndom
2431  flowdoms(nn, level, sps)%realCommVars(1)%var => flowdoms(nn, level, sps)%scratch(:, :, :, 1)
2432  flowdoms(nn, level, sps)%realCommVars(2)%var => flowdoms(nn, level, sps)%scratch(:, :, :, 2)
2433  flowdoms(nn, level, sps)%realCommVars(3)%var => flowdoms(nn, level, sps)%scratch(:, :, :, 3)
2434  end do
2435 
2437 
2438  ! Finaly we can push back to the local x
2439  do nn = 1, ndom
2440  call setpointers_b(nn, level, sps)
2441 
2442  do k = 2, kl
2443  do j = 2, jl
2444  do i = 2, il
2445  ! Add is accumulate seed for xSeed (stored in scratch )
2446  add = eighth * scratch(i, j, k, 1:3)
2447  do kk = k - 1, k
2448  do jj = j - 1, j
2449  do ii = i - 1, i
2450  xd(ii, jj, kk, :) = xd(ii, jj, kk, :) + add
2451  end do
2452  end do
2453  end do
2454  end do
2455  end do
2456  end do
2457  end do
2458 
2459  end subroutine updateoversetconnectivity_b
2460 #endif
2461 end module oversetcommutilities
Definition: BCData.F90:1
Definition: block.F90:1
type(fringetype), dimension(:), pointer fringes
integer(kind=inttype) jl
integer(kind=inttype) ie
real(kind=realtype), dimension(:, :, :, :), pointer scratch
integer(kind=inttype), dimension(:, :, :), pointer iblank
integer(kind=inttype) jb
integer(kind=inttype) kb
integer(kind=inttype), dimension(:), pointer bcfaceid
integer(kind=inttype) nbocos
integer(kind=inttype), dimension(:), pointer bctype
integer(kind=inttype) ib
real(kind=realtype), dimension(:, :, :, :), pointer xd
integer(kind=inttype) je
integer(kind=inttype) ke
real(kind=realtype), dimension(:, :, :, :), pointer x
real(kind=realtype), dimension(:, :, :, :), pointer xseed
integer(kind=inttype) kl
integer(kind=inttype) il
real(kind=realtype), dimension(:), allocatable recvbuffer
integer, dimension(:), allocatable recvrequests
real(kind=realtype), dimension(:), allocatable sendbuffer
type(internalcommtype), dimension(:, :), allocatable, target internaloverset
type(internalcommtype), dimension(:), allocatable internalcell_1st
type(internalcommtype), dimension(:), allocatable internalcell_2nd
integer, dimension(:), allocatable sendrequests
type(commtype), dimension(:, :), allocatable, target commpatternoverset
type(commtype), dimension(:), allocatable commpatterncell_1st
type(commtype), dimension(:), allocatable commpatterncell_2nd
integer adflow_comm_world
real(kind=realtype), parameter zero
Definition: constants.F90:71
integer(kind=inttype), parameter imax
Definition: constants.F90:293
integer(kind=inttype), parameter kmin
Definition: constants.F90:296
integer(kind=inttype), parameter jmax
Definition: constants.F90:295
real(kind=realtype), parameter eighth
Definition: constants.F90:84
real(kind=realtype), parameter one
Definition: constants.F90:72
real(kind=realtype), parameter half
Definition: constants.F90:80
integer(kind=inttype), parameter imin
Definition: constants.F90:292
integer(kind=inttype), parameter kmax
Definition: constants.F90:297
integer(kind=inttype), parameter jmin
Definition: constants.F90:294
subroutine whalo1to1realgeneric(nVar, level, sps, commPattern, internal)
subroutine whalo1to1intgeneric(nVar, level, sps, commPattern, internal)
subroutine whalo1to1realgeneric_b(nVar, level, sps, commPattern, internal)
subroutine emptyoversetcomm(level, sps)
subroutine exchangestatus(level, sps, commPattern, internal)
subroutine updateoversetconnectivity(level, sps)
subroutine recvofringe(oFringe, iDom, iProc, tagOffset, iSize, rSize, recvCount, recvInfo)
subroutine updateoversetconnectivity_b(level, sps)
subroutine setupfringeglobalind(level, sps)
subroutine getfringereturnsizes(oFringeSendList, oFringeRecvList, nOFringeSend, nOfringeRecv, oFringes, fringeRecvSizes, cumFringeRecv)
subroutine getosurfcommpattern(oMat, oMatT, sendList, nSend, recvList, nRecv, rBufSize)
subroutine sendofringe(oFringe, iDom, iProc, tagOffset, sendCount)
subroutine recvoblock(oBlock, iDom, iProc, tagOffset, iSize, rSize, recvCount, recvInfo)
subroutine exchangesurfacedelta(zipperFamList, level, sps, commPattern, internal)
subroutine oversetloadbalance(overlap)
subroutine getcommpattern(oMat, sendList, nSend, recvList, nRecv)
subroutine updateoversetconnectivity_d(level, sps)
subroutine exchangestatustranspose(level, sps, commPattern, internal)
subroutine recvosurf(oWall, iDom, iProc, tagOffset, iSize, rSize, recvCount, recvInfo)
subroutine sendosurf(oWall, iDom, iProc, tagOffset, sendCount)
subroutine exchangesurfaceiblanks(zipperFamList, level, sps, commPattern, internal)
subroutine exchangefringes(level, sps, commPattern, internal)
subroutine sendoblock(oBlock, iDom, iProc, tagOffset, sendCount)
integer(kind=inttype), dimension(:), allocatable ndomproc
Definition: overset.F90:364
integer(kind=inttype) ndomtotal
Definition: overset.F90:365
integer(kind=inttype), dimension(:), allocatable cumdomproc
Definition: overset.F90:364
subroutine fractoweights(frac, weights)
subroutine fractoweights_b(frac, fracd, weights, weightsd)
subroutine newtonupdate(xcen, blk, frac0, frac)
subroutine newtonupdate_b(xcen, xcend, blk, blkd, frac0, frac, fracd)
subroutine newtonupdate_d(xcen, xcend, blk, blkd, frac0, frac, fracd)
subroutine fractoweights_d(frac, fracd, weights, weightsd)
subroutine setisdonor(i, flag)
subroutine fractoweights(frac, weights)
subroutine addtofringelist(fringeList, n, fringe)
subroutine newtonupdate(xCen, blk, frac0, frac)
subroutine setiswalldonor(i, flag)
subroutine getstatus(i, isDonor, isHole, isCompute, isFloodSeed, isFlooded, isWallDonor, isReceiver)
integer(kind=inttype) function windindex(i, j, k, il, jl, kl)
logical function faminlist(famID, famList)
Definition: sorting.F90:7
subroutine unique(arr, nn, n_unique, inverse)
Definition: sorting.F90:899
Definition: utils.F90:1
subroutine setpointers_d(nn, level, sps)
Definition: utils.F90:3564
subroutine echk(errorcode, file, line)
Definition: utils.F90:5722
subroutine setpointers_b(nn, level, sps)
Definition: utils.F90:3553
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237