ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
haloExchange.F90
Go to the documentation of this file.
2 
3 contains
4 
5  subroutine whalo1(level, start, end, commPressure, commGamma, &
6  commViscous)
7  !
8  ! whalo1 exchanges all the 1st level internal halo's for the
9  ! cell centered variables.
10  !
11  use constants
12  use blockpointers
13  use communication
15  use inputphysics
17  use iteration
18  use utils, only: setpointers, getcorrectfork
19  use flowutils, only: computeetotblock
20  implicit none
21  !
22  ! Subroutine arguments.
23  !
24  integer(kind=intType), intent(in) :: level, start, end
25  logical, intent(in) :: commPressure, commGamma, commViscous
26  !
27  ! Local variables.
28  !
29  integer(kind=intType) :: nn, mm, ll
30 
31  logical :: correctForK, commLamVis, commEddyVis, commVarGamma
32 
33  ! Set the logicals whether or not to communicate the viscosities.
34 
35  commlamvis = .false.
36  if (viscous .and. commviscous) commlamvis = .true.
37 
38  commeddyvis = .false.
39  if (eddymodel .and. commviscous) commeddyvis = .true.
40 
41  ! Set the logical whether or not to communicate gamma.
42 
43  commvargamma = .false.
44  if (commgamma .and. (cpmodel == cptempcurvefits)) &
45  commvargamma = .true.
46 
47  ! Exchange the 1 to 1 matching 1st level cell halo's.
48 
49  call whalo1to1(level, start, end, commPressure, commVarGamma, &
50  commlamvis, commeddyvis, commpatterncell_1st, &
51  internalcell_1st)
52 
53  ! Exchange the overset cells
54  call woverset(level, start, end, commPressure, commVarGamma, &
55  commlamvis, commeddyvis, commpatternoverset, internaloverset)
56 
57  ! Average any overset orphans.
58 
59  do ll = 1, ntimeintervalsspectral
60  do nn = 1, ndom
61  call setpointers(nn, level, ll)
62  call orphanaverage(start, end, commPressure, commGamma, &
63  commlamvis, commeddyvis)
64  end do
65  end do
66 
67  ! If both the pressure and the total energy has been communicated
68  ! compute the energy again. The reason is that both values are
69  ! interpolated and consequently the values are not consistent.
70  ! The energy depends quadratically on the velocity.
71 
72  bothpande: if (commpressure .and. start <= irhoe .and. &
73  end >= irhoE) then
74 
75  ! First determine whether or not the total energy must be
76  ! corrected for the presence of the turbulent kinetic energy.
77  correctfork = getcorrectfork()
78 
79  ! Loop over the blocks to find the sliding mesh subfaces.
80  ! Use is made of the fact the boundary conditions are identical
81  ! for all spectral solutions. So that loop can be inside the
82  ! test for the sliding mesh subface.
83 
84  domains: do nn = 1, ndom
85  do mm = 1, flowdoms(nn, level, 1)%nBocos
86  if (flowdoms(nn, level, 1)%BCType(mm) == slidinginterface) then
87 
88  ! Loop over the number of spectral solutions.
89 
90  do ll = 1, ntimeintervalsspectral
91 
92  ! Set the pointers for this block and compute the energy
93  ! for the halo cells of this sliding interface subface.
94 
95  call setpointers(nn, level, ll)
96  call computeetotblock(icbeg(mm), icend(mm), &
97  jcbeg(mm), jcend(mm), &
98  kcbeg(mm), kcend(mm), correctfork)
99  end do
100  end if
101  end do
102 
103  end do domains
104 
105  end if bothpande
106 
107  end subroutine whalo1
108 
109  subroutine whalo2(level, start, end, commPressure, commGamma, &
110  commViscous)
111  !
112  ! whalo2 exchanges all the 2nd level internal halo's for the
113  ! cell centered variables.
114  !
115  use constants
116  use blockpointers
117  use communication
118  use flowvarrefstate
119  use inputphysics
121  use iteration
122  use utils, only: setpointers, getcorrectfork
123  use flowutils, only: computeetotblock
124  implicit none
125  !
126  ! Subroutine arguments.
127  !
128  integer(kind=intType), intent(in) :: level, start, end
129  logical, intent(in) :: commPressure, commGamma, commViscous
130  !
131  ! Local variables.
132  !
133  integer(kind=intType) :: nn, ll
134  integer(kind=intType) :: iBeg, iEnd, jBeg, jEnd, kBeg, kEnd
135 
136  logical :: correctForK, commLamVis, commEddyVis, commVarGamma
137 
138  ! Set the logicals whether or not to communicate the viscosities.
139 
140  commlamvis = .false.
141  if (viscous .and. commviscous) commlamvis = .true.
142 
143  commeddyvis = .false.
144  if (eddymodel .and. commviscous) commeddyvis = .true.
145 
146  ! Set the logical whether or not to communicate gamma.
147 
148  commvargamma = .false.
149  if (commgamma .and. (cpmodel == cptempcurvefits)) &
150  commvargamma = .true.
151 
152  ! Exchange the 1 to 1 matching 2nd level cell halo's.
153 
154  call whalo1to1(level, start, end, commPressure, commVarGamma, &
155  commlamvis, commeddyvis, commpatterncell_2nd, &
156  internalcell_2nd)
157 
158  ! Exchange the overset cells
159  call woverset(level, start, end, commPressure, commVarGamma, &
160  commlamvis, commeddyvis, commpatternoverset, internaloverset)
161 
162  ! Average any overset orphans.
163 
164  do ll = 1, ntimeintervalsspectral
165  do nn = 1, ndom
166  call setpointers(nn, level, ll)
167  call orphanaverage(start, end, commPressure, commGamma, &
168  commlamvis, commeddyvis)
169  end do
170  end do
171 
172  ! If both the pressure and the total energy has been communicated
173  ! compute the energy again. The reason is that both values are
174  ! interpolated and consequently the values are not consistent.
175  ! The energy depends quadratically on the velocity.
176 
177  bothpande: if (commpressure .and. start <= irhoe .and. &
178  end >= irhoE) then
179 
180  ! First determine whether or not the total energy must be
181  ! corrected for the presence of the turbulent kinetic energy.
182 
183  correctfork = getcorrectfork()
184 
185  domains: do nn = 1, ndom
186 
187  ! Treat the overset blocks. Since we don't have the logic
188  ! setup here correctly to only update the overset cells,
189  ! just do the whole block, for every block
190  do ll = 1, ntimeintervalsspectral
191  call setpointers(nn, level, ll)
192  call computeetotblock(2, il, 2, jl, 2, kl, correctfork)
193  end do
194 
195  end do domains
196 
197  end if bothpande
198 
199  end subroutine whalo2
200 
201  subroutine orphanaverage(wstart, wend, calcPressure, calcGamma, &
202  calcLamVis, calcEddyVis)
203  !
204  ! orphanAverage uses the neighboring cells of an overset orphan
205  ! to set the flow state for the orphan cell by a simple average.
206  ! This routine operates on the block given by the block pointers
207  ! so it is assumed they are set.
208  !
209  use constants
210  use blockpointers
211  use flowvarrefstate
212  use inputphysics
213  implicit none
214  !
215  ! Subroutine arguments.
216  !
217  integer(kind=intType), intent(in) :: wstart, wend
218 
219  logical, intent(in) :: calcPressure, calcGamma, calcLamVis
220  logical, intent(in) :: calcEddyVis
221  !
222  ! Local variables.
223  !
224  integer(kind=intType) :: oi, oj, ok, ni, nj, nk, i, l, m, n, nAvg
225 
226  integer(kind=intType), dimension(3) :: del
227 
228  real(kind=realtype) :: navgreal
229 
230  ! Return immediately if there are no orphans for this block.
231 
232  if (norphans == 0) return
233 
234  ! Loop over the number of orphans.
235 
236  orphanloop: do n = 1, norphans
237 
238  ! Store the orphan indices easier.
239 
240  oi = orphans(1, n)
241  oj = orphans(2, n)
242  ok = orphans(3, n)
243 
244  ! Initialize the number of neighbors used to 0 and also set
245  ! the flow variables to zero such that an average can be
246  ! accumulated below.
247 
248  navg = 0
249 
250  do l = wstart, wend
251  w(oi, oj, ok, l) = zero
252  end do
253 
254  if (calcpressure) p(oi, oj, ok) = zero
255  if (calcgamma) gamma(oi, oj, ok) = zero
256  if (calclamvis) rlv(oi, oj, ok) = zero
257  if (calceddyvis) rev(oi, oj, ok) = zero
258 
259  ! Loop over the 3 coordinate directions, and for both the
260  ! positive and negative direction set the delta vector to be a
261  ! unit vector in that direction.
262 
263  directionloop: do m = 1, 3
264  plusminusloop: do i = -1, 1, 2
265 
266  del = 0
267  del(m) = i
268 
269  ! Compute the neighbor indices and skip if it is outside the
270  ! boundaries of the block.
271 
272  ni = oi + del(1)
273  nj = oj + del(2)
274  nk = ok + del(3)
275 
276  if (ni < 0 .or. ni > ib .or. &
277  nj < 0 .or. nj > jb .or. &
278  nk < 0 .or. nk > kb) cycle
279 
280  ! If the neighboring iblank value indicates a cell that is
281  ! part of either the field, fringe, or boundary condition,
282  ! then use its flow state in the average.
283 
284  if (iblank(ni, nj, nk) == 1) then
285 
286  ! Update the number of neighbors used in the average and
287  ! compute the flow variables for the given range.
288 
289  navg = navg + 1
290 
291  do l = wstart, wend
292  w(oi, oj, ok, l) = w(oi, oj, ok, l) + w(ni, nj, nk, l)
293  end do
294 
295  ! Check if the pressure, specific heat ratio, laminar
296  ! viscosity, and/or eddy viscosity needs to be computed.
297 
298  if (calcpressure) &
299  p(oi, oj, ok) = p(oi, oj, ok) + p(ni, nj, nk)
300  if (calcgamma) &
301  gamma(oi, oj, ok) = gamma(oi, oj, ok) + gamma(ni, nj, nk)
302  if (calclamvis) &
303  rlv(oi, oj, ok) = rlv(oi, oj, ok) + rlv(ni, nj, nk)
304  if (calceddyvis) &
305  rev(oi, oj, ok) = rev(oi, oj, ok) + rev(ni, nj, nk)
306 
307  end if
308 
309  end do plusminusloop
310  end do directionloop
311 
312  ! Check to make sure that at least 1 suitable neighbeor was
313  ! found to use in the average.
314 
315  checknoneighbors: if (navg > 0) then
316 
317  ! Divide each of the variables being computed by the number
318  ! of neighbors used in the average.
319 
320  navgreal = real(navg, realtype)
321 
322  ! Average the flow variables for the given range.
323 
324  do l = wstart, wend
325  w(oi, oj, ok, l) = w(oi, oj, ok, l) / navgreal
326  end do
327 
328  ! Check if the pressure, specific heat ratio, laminar
329  ! viscosity, and/or eddy viscosity needs to be averaged.
330 
331  if (calcpressure) p(oi, oj, ok) = p(oi, oj, ok) / navgreal
332  if (calcgamma) gamma(oi, oj, ok) = gamma(oi, oj, ok) / navgreal
333  if (calclamvis) rlv(oi, oj, ok) = rlv(oi, oj, ok) / navgreal
334  if (calceddyvis) rev(oi, oj, ok) = rev(oi, oj, ok) / navgreal
335 
336  else checknoneighbors
337 
338  ! No suitable neighbors were found in order to compute an
339  ! average. Set the variables back to the the freestream.
340 
341  do l = wstart, wend
342  w(oi, oj, ok, l) = winf(l)
343  end do
344 
345  if (calcpressure) p(oi, oj, ok) = pinfcorr
346  if (calcgamma) gamma(oi, oj, ok) = gammainf
347  if (calclamvis) rlv(oi, oj, ok) = muinf
348  if (calceddyvis) rev(oi, oj, ok) = eddyvisinfratio * muinf
349 
350  end if checknoneighbors
351 
352  end do orphanloop
353 
354  end subroutine orphanaverage
355 
356  subroutine setcommpointers(start, end, commPressure, commVarGamma, commLamVis, &
357  commEddyVis, level, sps, derivPointers, nVar, varOffset)
358 
359  ! Generic routine for setting pointers to the communication
360  ! variables. Can also set pointers to derivatve values if derivPts is True.
361 
362  use constants
363  use block, only: flowdoms, blocktype, flowdomsd, ndom
364  implicit none
365 
366  ! Input
367  integer(kind=intType), intent(in) :: start, end, level, sps
368  logical, intent(in) :: commPressure, commVarGamma, commLamVis, commEddyVis
369  logical, intent(in) :: derivPointers
370  integer(kind=intType), intent(in) :: varOffset
371 
372  ! Output
373  integer(kind=intType), intent(out) :: nVar
374 
375  ! Working:
376  integer(kind=intType) :: nn, k
377  type(blocktype), pointer :: blk, blk1, blkLevel
378 
379  ! Set the pointers for the required variables
380  domainloop: do nn = 1, ndom
381  nvar = varoffset
382  blk => flowdoms(nn, level, sps)
383 
384  if (derivpointers) then
385  blklevel => flowdomsd(nn, level, sps)
386  blk1 => flowdomsd(nn, 1, sps)
387  else
388  blklevel => flowdoms(nn, level, sps)
389  blk1 => flowdoms(nn, 1, sps)
390  end if
391 
392  do k = start, end
393  nvar = nvar + 1
394  blk%realCommVars(nvar)%var => blklevel%w(:, :, :, k)
395  end do
396 
397  if (commpressure) then
398  nvar = nvar + 1
399  blk%realCommVars(nvar)%var => blklevel%P(:, :, :)
400  end if
401 
402  if (commvargamma) then
403  nvar = nvar + 1
404  blk%realCommVars(nvar)%var => blk1%gamma(:, :, :)
405  end if
406 
407  if (commlamvis) then
408  nvar = nvar + 1
409  blk%realCommvars(nvar)%var => blk1%rlv(:, :, :)
410  end if
411 
412  if (commeddyvis) then
413  nvar = nvar + 1
414  blk%realCommVars(nvar)%var => blklevel%rev(:, :, :)
415  end if
416 
417  end do domainloop
418  nvar = nvar - varoffset
419  end subroutine setcommpointers
420 
421  subroutine whalo1to1(level, start, end, commPressure, &
422  commVarGamma, commLamVis, commEddyVis, &
423  commPattern, internal)
424  !
425  ! whalo1to1 exchanges the 1 to 1 internal halo's for the cell
426  ! centered variables for the given communication pattern. It
427  ! is possible to send a range of variables and not the entire
428  ! set, e.g. only the flow variables or only the turbulent
429  ! variables. This is controlled by the arguments start, end,
430  ! commPressure and commViscous. The exchange takes place for
431  ! the given grid level.
432  !
433  use constants
434  use block
435  use communication
437  implicit none
438  !
439  ! Subroutine arguments.
440  !
441  integer(kind=intType), intent(in) :: level, start, end
442  logical, intent(in) :: commPressure, commVarGamma
443  logical, intent(in) :: commLamVis, commEddyVis
444 
445  type(commtype), dimension(*), intent(in) :: commPattern
446  type(internalcommtype), dimension(*), intent(in) :: internal
447 
448  integer(kind=intType) :: nVar, nn, k, sps
449 
450  logical :: correctPeriodic
451 
452  ! Set the logical correctPeriodic. Only if a momentum variable
453  ! is communicated it is needed to apply the periodic
454  ! transformations.
455 
456  correctperiodic = .false.
457  if (start <= ivx .and. end >= ivz) correctPeriodic = .true.
458 
459  spectralmodes: do sps = 1, ntimeintervalsspectral
460 
461  call setcommpointers(start, end, commPressure, commVarGamma, &
462  commlamvis, commeddyvis, level, sps, .false., nvar, 0)
463 
464  if (nvar == 0) then
465  return
466  end if
467 
468  ! Run the generic exchange
469  call whalo1to1realgeneric(nvar, level, sps, commpattern, internal)
470 
471  if (correctperiodic) then
472  if (internal(level)%nPeriodic > 0) then
473  call correctperiodicvelocity(level, sps, &
474  internal(level)%nPeriodic, internal(level)%periodicData)
475  end if
476 
477  if (commpattern(level)%nPeriodic > 0) then
478  call correctperiodicvelocity(level, sps, &
479  commpattern(level)%nPeriodic, commpattern(level)%periodicData)
480  end if
481  end if
482 
483  end do spectralmodes
484 
485  end subroutine whalo1to1
486 
487  subroutine correctperiodicvelocity(level, sp, nPeriodic, &
488  periodicData)
489  !
490  ! correctPeriodicVelocity applies the periodic transformation
491  ! to the velocity of the cell halo's in periodicData.
492  !
493  use constants
494  use block
495  use communication
496  use constants
497  implicit none
498  !
499  ! Subroutine arguments.
500  !
501  integer(kind=intType), intent(in) :: level, sp, nPeriodic
502  type(periodicdatatype), dimension(:), pointer :: periodicData
503  !
504  ! Local variables.
505  !
506  integer(kind=intType) :: nn, mm, ii, i, j, k
507  real(kind=realtype) :: vx, vy, vz
508 
509  real(kind=realtype), dimension(3, 3) :: rotmatrix
510 
511  ! Loop over the number of periodic transformations.
512 
513  do nn = 1, nperiodic
514 
515  ! Store the rotation matrix a bit easier.
516 
517  rotmatrix = periodicdata(nn)%rotMatrix
518 
519  ! Loop over the number of halo cells for this transformation.
520  !DIR$ NOVECTOR
521  do ii = 1, periodicdata(nn)%nhalos
522 
523  ! Store the block and the indices a bit easier.
524 
525  mm = periodicdata(nn)%block(ii)
526  i = periodicdata(nn)%indices(ii, 1)
527  j = periodicdata(nn)%indices(ii, 2)
528  k = periodicdata(nn)%indices(ii, 3)
529 
530  ! Store the original velocities in vx, vy, vz.
531 
532  vx = flowdoms(mm, level, sp)%w(i, j, k, ivx)
533  vy = flowdoms(mm, level, sp)%w(i, j, k, ivy)
534  vz = flowdoms(mm, level, sp)%w(i, j, k, ivz)
535 
536  ! Compute the new velocity vector.
537 
538  flowdoms(mm, level, sp)%w(i, j, k, ivx) = rotmatrix(1, 1) * vx &
539  + rotmatrix(1, 2) * vy &
540  + rotmatrix(1, 3) * vz
541  flowdoms(mm, level, sp)%w(i, j, k, ivy) = rotmatrix(2, 1) * vx &
542  + rotmatrix(2, 2) * vy &
543  + rotmatrix(2, 3) * vz
544  flowdoms(mm, level, sp)%w(i, j, k, ivz) = rotmatrix(3, 1) * vx &
545  + rotmatrix(3, 2) * vy &
546  + rotmatrix(3, 3) * vz
547  end do
548 
549  end do
550 
551  end subroutine correctperiodicvelocity
552 
553  subroutine whalo1to1realgeneric(nVar, level, sps, commPattern, internal)
554  !
555  ! whalo1to1 exchanges the 1 to 1 internal halo's for the cell
556  ! centered variables for the given communication pattern.
557  ! Pointers must be set for var1, var2...varN
558  !
559  use constants
560  use block
561  use communication
563  use utils, only: echk
564  implicit none
565  !
566  ! Subroutine arguments.
567  !
568  integer(kind=intType), intent(in) :: level, sps
569 
570  type(commtype), dimension(*), intent(in) :: commPattern
571  type(internalcommtype), dimension(*), intent(in) :: internal
572  !
573  ! Local variables.
574  !
575 
576  integer :: size, procID, ierr, index
577  integer, dimension(mpi_status_size) :: mpiStatus
578 
579  integer(kind=intType) :: nVar, mm
580  integer(kind=intType) :: i, j, k, ii, jj
581  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
582 
583  ! Send the variables. The data is first copied into
584  ! the send buffer after which the buffer is sent asap.
585 
586  ii = 1
587  sends: do i = 1, commpattern(level)%nProcSend
588 
589  ! Store the processor id and the size of the message
590  ! a bit easier.
591 
592  procid = commpattern(level)%sendProc(i)
593  size = nvar * commpattern(level)%nsend(i)
594 
595  ! Copy the data in the correct part of the send buffer.
596 
597  jj = ii
598  !DIR$ NOVECTOR
599  do j = 1, commpattern(level)%nsend(i)
600 
601  ! Store the block id and the indices of the donor
602  ! a bit easier.
603 
604  d1 = commpattern(level)%sendList(i)%block(j)
605  i1 = commpattern(level)%sendList(i)%indices(j, 1) + 1
606  j1 = commpattern(level)%sendList(i)%indices(j, 2) + 1
607  k1 = commpattern(level)%sendList(i)%indices(j, 3) + 1
608 
609  ! Copy the given range of the working variables for
610  ! this cell in the buffer. Update the counter jj.
611  !DIR$ NOVECTOR
612  do k = 1, nvar
613  sendbuffer(jj) = flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1)
614  jj = jj + 1
615  end do
616  end do
617 
618  ! Send the data.
619 
620  call mpi_isend(sendbuffer(ii), size, adflow_real, procid, &
621  procid, adflow_comm_world, sendrequests(i), &
622  ierr)
623  call echk(ierr, __file__, __line__)
624 
625  ! Set ii to jj for the next processor.
626 
627  ii = jj
628 
629  end do sends
630 
631  ! Post the nonblocking receives.
632 
633  ii = 1
634  receives: do i = 1, commpattern(level)%nProcRecv
635 
636  ! Store the processor id and the size of the message
637  ! a bit easier.
638 
639  procid = commpattern(level)%recvProc(i)
640  size = nvar * commpattern(level)%nrecv(i)
641 
642  ! Post the receive.
643 
644  call mpi_irecv(recvbuffer(ii), size, adflow_real, procid, &
646  call echk(ierr, __file__, __line__)
647 
648  ! And update ii.
649 
650  ii = ii + size
651 
652  end do receives
653 
654  ! Copy the local data.
655 
656  !DIR$ NOVECTOR
657  localcopy: do i = 1, internal(level)%ncopy
658 
659  ! Store the block and the indices of the donor a bit easier.
660 
661  d1 = internal(level)%donorBlock(i)
662  i1 = internal(level)%donorIndices(i, 1) + 1
663  j1 = internal(level)%donorIndices(i, 2) + 1
664  k1 = internal(level)%donorIndices(i, 3) + 1
665 
666  ! Idem for the halo's.
667 
668  d2 = internal(level)%haloBlock(i)
669  i2 = internal(level)%haloIndices(i, 1) + 1
670  j2 = internal(level)%haloIndices(i, 2) + 1
671  k2 = internal(level)%haloIndices(i, 3) + 1
672 
673  do k = 1, nvar
674  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = &
675  flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1)
676  end do
677 
678  end do localcopy
679 
680  ! Complete the nonblocking receives in an arbitrary sequence and
681  ! copy the variables from the buffer into the halo's.
682 
683  size = commpattern(level)%nProcRecv
684  completerecvs: do i = 1, commpattern(level)%nProcRecv
685 
686  ! Complete any of the requests.
687 
688  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
689  call echk(ierr, __file__, __line__)
690 
691  ! Copy the data just arrived in the halo's.
692 
693  ii = index
694  jj = nvar * commpattern(level)%nrecvCum(ii - 1)
695  !DIR$ NOVECTOR
696  do j = 1, commpattern(level)%nrecv(ii)
697 
698  ! Store the block and the indices of the halo a bit easier.
699 
700  d2 = commpattern(level)%recvList(ii)%block(j)
701  i2 = commpattern(level)%recvList(ii)%indices(j, 1) + 1
702  j2 = commpattern(level)%recvList(ii)%indices(j, 2) + 1
703  k2 = commpattern(level)%recvList(ii)%indices(j, 3) + 1
704 
705  do k = 1, nvar
706  jj = jj + 1
707  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = recvbuffer(jj)
708  end do
709  end do
710  end do completerecvs
711 
712  ! Complete the nonblocking sends.
713 
714  size = commpattern(level)%nProcSend
715  do i = 1, commpattern(level)%nProcSend
716  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
717  end do
718 
719  end subroutine whalo1to1realgeneric
720 
721  subroutine whalo1to1realgeneric_b(nVar, level, sps, commPattern, internal)
722  !
723  ! whalo1to1RealGeneric_b is a generic implementation
724  ! of the reverse mode of whalo1to1RealGeneric.
725  !
726  use constants
727  use block
728  use communication
730  use utils, only: echk
731  implicit none
732  !
733  ! Subroutine arguments.
734  !
735  integer(kind=intType), intent(in) :: level, sps
736 
737  type(commtype), dimension(*), intent(in) :: commPattern
738  type(internalcommtype), dimension(*), intent(in) :: internal
739  !
740  ! Local variables.
741  !
742 
743  integer :: size, procID, ierr, index
744  integer, dimension(mpi_status_size) :: mpiStatus
745 
746  integer(kind=intType) :: nVar, mm
747  integer(kind=intType) :: i, j, k, ii, jj
748  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
749 
750  ! Gather up the seeds into the *recv* buffer. Note we loop
751  ! over nProcRECV here! After the buffer is assembled it is
752  ! sent off.
753 
754  jj = 1
755  ii = 1
756  recvs: do i = 1, commpattern(level)%nProcRecv
757 
758  ! Store the processor id and the size of the message
759  ! a bit easier.
760 
761  procid = commpattern(level)%recvProc(i)
762  size = nvar * commpattern(level)%nrecv(i)
763 
764  ! Copy the data into the buffer
765  !DIR$ NOVECTOR
766  do j = 1, commpattern(level)%nrecv(i)
767 
768  ! Store the block and the indices of the halo a bit easier.
769 
770  d2 = commpattern(level)%recvList(i)%block(j)
771  i2 = commpattern(level)%recvList(i)%indices(j, 1) + 1
772  j2 = commpattern(level)%recvList(i)%indices(j, 2) + 1
773  k2 = commpattern(level)%recvList(i)%indices(j, 3) + 1
774 
775  do k = 1, nvar
776  recvbuffer(jj) = flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2)
777  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = zero
778  jj = jj + 1
779  end do
780  end do
781 
782  ! Send the data.
783  call mpi_isend(recvbuffer(ii), size, adflow_real, procid, &
784  procid, adflow_comm_world, recvrequests(i), &
785  ierr)
786  call echk(ierr, __file__, __line__)
787 
788  ! Set ii to jj for the next processor.
789 
790  ii = jj
791 
792  end do recvs
793 
794  ! Post the nonblocking receives.
795 
796  ii = 1
797  sends: do i = 1, commpattern(level)%nProcSend
798 
799  ! Store the processor id and the size of the message
800  ! a bit easier.
801 
802  procid = commpattern(level)%sendProc(i)
803  size = nvar * commpattern(level)%nsend(i)
804 
805  ! Post the receive.
806 
807  call mpi_irecv(sendbuffer(ii), size, adflow_real, procid, &
809  call echk(ierr, __file__, __line__)
810 
811  ! And update ii.
812 
813  ii = ii + size
814 
815  end do sends
816 
817  ! Copy the local data.
818  !DIR$ NOVECTOR
819  localcopy: do i = 1, internal(level)%ncopy
820 
821  ! Store the block and the indices of the donor a bit easier.
822 
823  d1 = internal(level)%donorBlock(i)
824  i1 = internal(level)%donorIndices(i, 1) + 1
825  j1 = internal(level)%donorIndices(i, 2) + 1
826  k1 = internal(level)%donorIndices(i, 3) + 1
827 
828  ! Idem for the halo's.
829 
830  d2 = internal(level)%haloBlock(i)
831  i2 = internal(level)%haloIndices(i, 1) + 1
832  j2 = internal(level)%haloIndices(i, 2) + 1
833  k2 = internal(level)%haloIndices(i, 3) + 1
834 
835  ! Sum into the '1' values from the '2' values (halos).
836  do k = 1, nvar
837  flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1) = &
838  flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1) + &
839  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2)
840  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = zero
841  end do
842 
843  end do localcopy
844 
845  ! Complete the nonblocking receives in an arbitrary sequence and
846  ! copy the variables from the buffer into the halo's.
847 
848  size = commpattern(level)%nProcSend
849  completesends: do i = 1, commpattern(level)%nProcSend
850 
851  ! Complete any of the requests.
852 
853  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
854  call echk(ierr, __file__, __line__)
855 
856  ! ! Copy the data just arrived in the halo's.
857 
858  ii = index
859  jj = nvar * commpattern(level)%nsendCum(ii - 1)
860  !DIR$ NOVECTOR
861  do j = 1, commpattern(level)%nsend(ii)
862 
863  ! Store the block and the indices of the halo a bit easier.
864 
865  d2 = commpattern(level)%sendList(ii)%block(j)
866  i2 = commpattern(level)%sendList(ii)%indices(j, 1) + 1
867  j2 = commpattern(level)%sendList(ii)%indices(j, 2) + 1
868  k2 = commpattern(level)%sendList(ii)%indices(j, 3) + 1
869 
870  ! Copy the conservative variables.
871 
872  do k = 1, nvar
873  jj = jj + 1
874  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = &
875  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) + sendbuffer(jj)
876  end do
877  end do
878 
879  end do completesends
880 
881  ! Complete the nonblocking sends.
882 
883  size = commpattern(level)%nProcRecv
884  do i = 1, commpattern(level)%nProcRecv
885  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
886  call echk(ierr, __file__, __line__)
887  end do
888 
889  end subroutine whalo1to1realgeneric_b
890 
891  subroutine whalo1to1intgeneric(nVar, level, sps, commPattern, internal)
892  !
893  ! whalo1to1 exchanges the 1 to 1 internal halo's for the cell
894  ! centered variables for the given communication pattern.
895  ! Pointers must be set for var1, var2...varN
896  !
897  use constants
898  use block
899  use communication
901  use utils, only: echk
902  implicit none
903  !
904  ! Subroutine arguments.
905  !
906  integer(kind=intType), intent(in) :: level, sps
907 
908  type(commtype), dimension(*), intent(in) :: commPattern
909  type(internalcommtype), dimension(*), intent(in) :: internal
910  !
911  ! Local variables.
912  !
913 
914  integer :: size, procID, ierr, index
915  integer, dimension(mpi_status_size) :: mpiStatus
916 
917  integer(kind=intType) :: nVar, mm
918  integer(kind=intType) :: i, j, k, ii, jj
919  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
920 
921  integer(kind=intType), dimension(:), allocatable :: sendBufInt
922  integer(kind=intType), dimension(:), allocatable :: recvBufInt
923 
924  ii = commpattern(level)%nProcSend
925  ii = commpattern(level)%nsendCum(ii)
926  jj = commpattern(level)%nProcRecv
927  jj = commpattern(level)%nrecvCum(jj)
928 
929  allocate (sendbufint(ii * nvar), recvbufint(jj * nvar), stat=ierr)
930 
931  ! Send the variables. The data is first copied into
932  ! the send buffer after which the buffer is sent asap.
933 
934  ii = 1
935  sends: do i = 1, commpattern(level)%nProcSend
936 
937  ! Store the processor id and the size of the message
938  ! a bit easier.
939 
940  procid = commpattern(level)%sendProc(i)
941  size = nvar * commpattern(level)%nsend(i)
942 
943  ! Copy the data in the correct part of the send buffer.
944 
945  jj = ii
946  !DIR$ NOVECTOR
947  do j = 1, commpattern(level)%nsend(i)
948 
949  ! Store the block id and the indices of the donor
950  ! a bit easier.
951 
952  d1 = commpattern(level)%sendList(i)%block(j)
953  i1 = commpattern(level)%sendList(i)%indices(j, 1) + 1
954  j1 = commpattern(level)%sendList(i)%indices(j, 2) + 1
955  k1 = commpattern(level)%sendList(i)%indices(j, 3) + 1
956 
957  ! Copy the given range of the working variables for
958  ! this cell in the buffer. Update the counter jj.
959  !DIR$ NOVECTOR
960  do k = 1, nvar
961  sendbufint(jj) = flowdoms(d1, level, sps)%intCommVars(k)%var(i1, j1, k1)
962  jj = jj + 1
963  end do
964  end do
965 
966  ! Send the data.
967 
968  call mpi_isend(sendbufint(ii), size, adflow_integer, procid, &
969  procid, adflow_comm_world, sendrequests(i), &
970  ierr)
971  call echk(ierr, __file__, __line__)
972 
973  ! Set ii to jj for the next processor.
974 
975  ii = jj
976 
977  end do sends
978 
979  ! Post the nonblocking receives.
980 
981  ii = 1
982  receives: do i = 1, commpattern(level)%nProcRecv
983 
984  ! Store the processor id and the size of the message
985  ! a bit easier.
986 
987  procid = commpattern(level)%recvProc(i)
988  size = nvar * commpattern(level)%nrecv(i)
989 
990  ! Post the receive.
991 
992  call mpi_irecv(recvbufint(ii), size, adflow_integer, procid, &
994  call echk(ierr, __file__, __line__)
995 
996  ! And update ii.
997 
998  ii = ii + size
999 
1000  end do receives
1001 
1002  ! Copy the local data.
1003  !DIR$ NOVECTOR
1004  localcopy: do i = 1, internal(level)%ncopy
1005 
1006  ! Store the block and the indices of the donor a bit easier.
1007 
1008  d1 = internal(level)%donorBlock(i)
1009  i1 = internal(level)%donorIndices(i, 1) + 1
1010  j1 = internal(level)%donorIndices(i, 2) + 1
1011  k1 = internal(level)%donorIndices(i, 3) + 1
1012 
1013  ! Idem for the halo's.
1014 
1015  d2 = internal(level)%haloBlock(i)
1016  i2 = internal(level)%haloIndices(i, 1) + 1
1017  j2 = internal(level)%haloIndices(i, 2) + 1
1018  k2 = internal(level)%haloIndices(i, 3) + 1
1019 
1020  do k = 1, nvar
1021  flowdoms(d2, level, sps)%intCommVars(k)%var(i2, j2, k2) = &
1022  flowdoms(d1, level, sps)%intCommVars(k)%var(i1, j1, k1)
1023  end do
1024 
1025  end do localcopy
1026 
1027  ! Complete the nonblocking receives in an arbitrary sequence and
1028  ! copy the variables from the buffer into the halo's.
1029 
1030  size = commpattern(level)%nProcRecv
1031  completerecvs: do i = 1, commpattern(level)%nProcRecv
1032 
1033  ! Complete any of the requests.
1034 
1035  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
1036  call echk(ierr, __file__, __line__)
1037 
1038  ! Copy the data just arrived in the halo's.
1039 
1040  ii = index
1041  jj = nvar * commpattern(level)%nrecvCum(ii - 1)
1042  !DIR$ NOVECTOR
1043  do j = 1, commpattern(level)%nrecv(ii)
1044 
1045  ! Store the block and the indices of the halo a bit easier.
1046 
1047  d2 = commpattern(level)%recvList(ii)%block(j)
1048  i2 = commpattern(level)%recvList(ii)%indices(j, 1) + 1
1049  j2 = commpattern(level)%recvList(ii)%indices(j, 2) + 1
1050  k2 = commpattern(level)%recvList(ii)%indices(j, 3) + 1
1051 
1052  do k = 1, nvar
1053  jj = jj + 1
1054  flowdoms(d2, level, sps)%intCommVars(k)%var(i2, j2, k2) = recvbufint(jj)
1055  end do
1056  end do
1057  end do completerecvs
1058 
1059  ! Complete the nonblocking sends.
1060 
1061  size = commpattern(level)%nProcSend
1062  do i = 1, commpattern(level)%nProcSend
1063  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
1064  call echk(ierr, __file__, __line__)
1065  end do
1066 
1067  deallocate (recvbufint, sendbufint)
1068 
1069  end subroutine whalo1to1intgeneric
1070 
1071  subroutine whalo1to1intgeneric_b(nVar, level, sps, commPattern, internal)
1072  !
1073  ! whalo1to1IntGeneric_b is a generic implementation of the
1074  ! reverse mode of whalo1to1IntGeneric. Integers are summed
1075  ! together in reverse.
1076  !
1077  use constants
1078  use block
1079  use communication
1080  use inputtimespectral
1081  use utils, only: echk
1082  implicit none
1083  !
1084  ! Subroutine arguments.
1085  !
1086  integer(kind=intType), intent(in) :: level, sps
1087 
1088  type(commtype), dimension(*), intent(in) :: commPattern
1089  type(internalcommtype), dimension(*), intent(in) :: internal
1090  !
1091  ! Local variables.
1092  !
1093 
1094  integer :: size, procID, ierr, index
1095  integer, dimension(mpi_status_size) :: mpiStatus
1096 
1097  integer(kind=intType) :: nVar, mm
1098  integer(kind=intType) :: i, j, k, ii, jj
1099  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
1100  integer(kind=intType), dimension(:), allocatable :: sendBufInt
1101  integer(kind=intType), dimension(:), allocatable :: recvBufInt
1102 
1103  ii = commpattern(level)%nProcSend
1104  ii = commpattern(level)%nsendCum(ii)
1105  jj = commpattern(level)%nProcRecv
1106  jj = commpattern(level)%nrecvCum(jj)
1107 
1108  allocate (sendbufint(ii * nvar), recvbufint(jj * nvar), stat=ierr)
1109 
1110  ! Gather up the seeds into the *recv* buffer. Note we loop
1111  ! over nProcRECV here! After the buffer is assembled it is
1112  ! sent off.
1113 
1114  jj = 1
1115  ii = 1
1116  recvs: do i = 1, commpattern(level)%nProcRecv
1117 
1118  ! Store the processor id and the size of the message
1119  ! a bit easier.
1120 
1121  procid = commpattern(level)%recvProc(i)
1122  size = nvar * commpattern(level)%nrecv(i)
1123 
1124  ! Copy the data into the buffer
1125  !DIR$ NOVECTOR
1126  do j = 1, commpattern(level)%nrecv(i)
1127 
1128  ! Store the block and the indices of the halo a bit easier.
1129 
1130  d2 = commpattern(level)%recvList(i)%block(j)
1131  i2 = commpattern(level)%recvList(i)%indices(j, 1) + 1
1132  j2 = commpattern(level)%recvList(i)%indices(j, 2) + 1
1133  k2 = commpattern(level)%recvList(i)%indices(j, 3) + 1
1134  !DIR$ NOVECTOR
1135  do k = 1, nvar
1136  recvbufint(jj) = flowdoms(d2, level, sps)%intCommVars(k)%var(i2, j2, k2)
1137  flowdoms(d2, level, sps)%intCommVars(k)%var(i2, j2, k2) = 0
1138  jj = jj + 1
1139  end do
1140  end do
1141 
1142  ! Send the data.
1143  call mpi_isend(recvbufint(ii), size, adflow_integer, procid, &
1144  procid, adflow_comm_world, recvrequests(i), &
1145  ierr)
1146  call echk(ierr, __file__, __line__)
1147 
1148  ! Set ii to jj for the next processor.
1149 
1150  ii = jj
1151 
1152  end do recvs
1153 
1154  ! Post the nonblocking receives.
1155 
1156  ii = 1
1157  sends: do i = 1, commpattern(level)%nProcSend
1158 
1159  ! Store the processor id and the size of the message
1160  ! a bit easier.
1161 
1162  procid = commpattern(level)%sendProc(i)
1163  size = nvar * commpattern(level)%nsend(i)
1164 
1165  ! Post the receive.
1166 
1167  call mpi_irecv(sendbufint(ii), size, adflow_integer, procid, &
1168  myid, adflow_comm_world, sendrequests(i), ierr)
1169  call echk(ierr, __file__, __line__)
1170 
1171  ! And update ii.
1172 
1173  ii = ii + size
1174 
1175  end do sends
1176 
1177  ! Copy the local data.
1178  !DIR$ NOVECTOR
1179  localcopy: do i = 1, internal(level)%ncopy
1180 
1181  ! Store the block and the indices of the donor a bit easier.
1182 
1183  d1 = internal(level)%donorBlock(i)
1184  i1 = internal(level)%donorIndices(i, 1) + 1
1185  j1 = internal(level)%donorIndices(i, 2) + 1
1186  k1 = internal(level)%donorIndices(i, 3) + 1
1187 
1188  ! Idem for the halo's.
1189 
1190  d2 = internal(level)%haloBlock(i)
1191  i2 = internal(level)%haloIndices(i, 1) + 1
1192  j2 = internal(level)%haloIndices(i, 2) + 1
1193  k2 = internal(level)%haloIndices(i, 3) + 1
1194 
1195  ! Sum into the '1' values from the '2' values (halos).
1196  !DIR$ NOVECTOR
1197  do k = 1, nvar
1198  flowdoms(d1, level, sps)%intCommVars(k)%var(i1, j1, k1) = &
1199  flowdoms(d1, level, sps)%intCommVars(k)%var(i1, j1, k1) + &
1200  flowdoms(d2, level, sps)%intCommVars(k)%var(i2, j2, k2)
1201  flowdoms(d2, level, sps)%intCommVars(k)%var(i2, j2, k2) = 0
1202  end do
1203 
1204  end do localcopy
1205 
1206  ! Complete the nonblocking receives in an arbitrary sequence and
1207  ! copy the variables from the buffer into the halo's.
1208 
1209  size = commpattern(level)%nProcSend
1210  completesends: do i = 1, commpattern(level)%nProcSend
1211 
1212  ! Complete any of the requests.
1213 
1214  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
1215  call echk(ierr, __file__, __line__)
1216 
1217  ! ! Copy the data just arrived in the halo's.
1218 
1219  ii = index
1220  jj = nvar * commpattern(level)%nsendCum(ii - 1)
1221  !DIR$ NOVECTOR
1222  do j = 1, commpattern(level)%nsend(ii)
1223 
1224  ! Store the block and the indices of the halo a bit easier.
1225 
1226  d2 = commpattern(level)%sendList(ii)%block(j)
1227  i2 = commpattern(level)%sendList(ii)%indices(j, 1) + 1
1228  j2 = commpattern(level)%sendList(ii)%indices(j, 2) + 1
1229  k2 = commpattern(level)%sendList(ii)%indices(j, 3) + 1
1230 
1231  ! Copy the conservative variables.
1232  !DIR$ NOVECTOR
1233  do k = 1, nvar
1234  jj = jj + 1
1235  flowdoms(d2, level, sps)%intCommVars(k)%var(i2, j2, k2) = &
1236  flowdoms(d2, level, sps)%intCommVars(k)%var(i2, j2, k2) + sendbufint(jj)
1237  end do
1238  end do
1239 
1240  end do completesends
1241 
1242  ! Complete the nonblocking sends.
1243 
1244  size = commpattern(level)%nProcRecv
1245  do i = 1, commpattern(level)%nProcRecv
1246  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
1247  call echk(ierr, __file__, __line__)
1248  end do
1249 
1250  deallocate (recvbufint, sendbufint)
1251 
1252  end subroutine whalo1to1intgeneric_b
1253 
1254  subroutine whalo1to1_d(level, start, end, commPressure, &
1255  commVarGamma, commLamVis, commEddyVis, &
1256  commPattern, internal)
1257  !
1258  ! whalo1to1 exchanges the 1 to 1 internal halo's derivatives
1259  !
1260  use constants
1263  implicit none
1264  !
1265  ! Subroutine arguments.
1266  !
1267  integer(kind=intType), intent(in) :: level, start, end
1268  logical, intent(in) :: commPressure, commVarGamma
1269  logical, intent(in) :: commLamVis, commEddyVis
1270 
1271  type(commtype), dimension(*), intent(in) :: commPattern
1272  type(internalcommtype), dimension(*), intent(in) :: internal
1273 
1274  integer(kind=intType) :: nVar, nn, k, sps
1275 
1276  spectralmodes: do sps = 1, ntimeintervalsspectral
1277 
1278  call setcommpointers(start, end, commPressure, commVarGamma, &
1279  commlamvis, commeddyvis, level, sps, .true., nvar, 0)
1280 
1281  if (nvar == 0) then
1282  return
1283  end if
1284 
1285  ! Run the generic exchange
1286  call whalo1to1realgeneric(nvar, level, sps, commpattern, internal)
1287 
1288  end do spectralmodes
1289 
1290  end subroutine whalo1to1_d
1291 
1292  subroutine whalo1to1_b(level, start, end, commPressure, &
1293  commVarGamma, commLamVis, commEddyVis, &
1294  commPattern, internal)
1295  !
1296  ! whalo1to1 exchanges the 1 to 1 internal halo's derivatives
1297  !
1298  use constants
1301  implicit none
1302  !
1303  ! Subroutine arguments.
1304  !
1305  integer(kind=intType), intent(in) :: level, start, end
1306  logical, intent(in) :: commPressure, commVarGamma
1307  logical, intent(in) :: commLamVis, commEddyVis
1308 
1309  type(commtype), dimension(*), intent(in) :: commPattern
1310  type(internalcommtype), dimension(*), intent(in) :: internal
1311 
1312  integer(kind=intType) :: nVar, nn, k, sps
1313 
1314  spectralmodes: do sps = 1, ntimeintervalsspectral
1315 
1316  call setcommpointers(start, end, commPressure, commVarGamma, &
1317  commlamvis, commeddyvis, level, sps, .true., nvar, 0)
1318 
1319  if (nvar == 0) then
1320  return
1321  end if
1322 
1323  ! Run the generic exchange
1324  call whalo1to1realgeneric_b(nvar, level, sps, commpattern, internal)
1325 
1326  end do spectralmodes
1327 
1328  end subroutine whalo1to1_b
1329 
1330  subroutine woverset(level, start, end, commPressure, &
1331  commVarGamma, commLamVis, commEddyVis, &
1332  commPattern, internal)
1333  !
1334  ! wOverset controls the communication between overset halos
1335  ! for the cell-centered variables by interpolating the solution
1336  ! from other blocks consistent with the chimera approach. A tri-
1337  ! linear interpolation is used as per the input from cgns. It
1338  ! is possible to send a range of variables and not the entire
1339  ! set, e.g. only the flow variables or only the turbulent
1340  ! variables. This is controlled by the arguments start, end,
1341  ! commPressure and commViscous. The exchange takes place for
1342  ! the given grid level.
1343  !
1344  use constants
1347  implicit none
1348  !
1349  ! Subroutine arguments.
1350  !
1351  integer(kind=intType), intent(in) :: level, start, end
1352  logical, intent(in) :: commPressure, commVarGamma
1353  logical, intent(in) :: commLamVis, commEddyVis
1354 
1355  type(commtype), dimension(:, :), intent(in) :: commPattern
1356  type(internalcommtype), dimension(:, :), intent(in) :: internal
1357 
1358  ! Local variables.
1359  integer(kind=intType) :: nVar, sps
1360 
1361  spectralmodes: do sps = 1, ntimeintervalsspectral
1362 
1363  call setcommpointers(start, end, commPressure, commVarGamma, &
1364  commlamvis, commeddyvis, level, sps, .false., nvar, 0)
1365 
1366  if (nvar == 0) then
1367  return
1368  end if
1369 
1370  ! Run the generic exchange
1371  call woversetgeneric(nvar, level, sps, commpattern, internal)
1372  end do spectralmodes
1373 
1374  end subroutine woverset
1375 
1376  subroutine woverset_d(level, start, end, commPressure, &
1377  commVarGamma, commLamVis, commEddyVis, &
1378  commPattern, internal)
1379  !
1380  ! wOverset controls the communication between overset halos
1381  ! for the cell-centered variables by interpolating the solution
1382  ! from other blocks consistent with the chimera approach. A tri-
1383  ! linear interpolation is used as per the input from cgns. It
1384  ! is possible to send a range of variables and not the entire
1385  ! set, e.g. only the flow variables or only the turbulent
1386  ! variables. This is controlled by the arguments start, end,
1387  ! commPressure and commViscous. The exchange takes place for
1388  ! the given grid level.
1389  !
1390  use constants
1393  implicit none
1394  !
1395  ! Subroutine arguments.
1396  !
1397  integer(kind=intType), intent(in) :: level, start, end
1398  logical, intent(in) :: commPressure, commVarGamma
1399  logical, intent(in) :: commLamVis, commEddyVis
1400 
1401  type(commtype), dimension(:, :), intent(in) :: commPattern
1402  type(internalcommtype), dimension(:, :), intent(in) :: internal
1403  integer(kind=intType) :: nVar, sps, offset
1404 
1405  spectralmodes: do sps = 1, ntimeintervalsspectral
1406 
1407  ! this one is tricker: We have to set BOTH the real values and
1408  ! the the derivative values. Set the derivative values first:
1409  call setcommpointers(start, end, commPressure, commVarGamma, &
1410  commlamvis, commeddyvis, level, sps, .true., nvar, 0)
1411 
1412  ! And then the original real values
1413  offset = nvar
1414  call setcommpointers(start, end, commPressure, commVarGamma, &
1415  commlamvis, commeddyvis, level, sps, .false., nvar, offset)
1416 
1417  if (nvar == 0) then
1418  return
1419  end if
1420 
1421  ! Run the generic exchange
1422  call woversetgeneric_d(nvar, level, sps, commpattern, internal)
1423  end do spectralmodes
1424  end subroutine woverset_d
1425 
1426  subroutine woverset_b(level, start, end, commPressure, &
1427  commVarGamma, commLamVis, commEddyVis, &
1428  commPattern, internal)
1429  !
1430  ! wOverset_b performs the *TRANSPOSE* operation of wOverset
1431  ! It is used for adjoint/reverse mode residual evaluations.
1432  ! * See wOverset for more information.
1433  !
1434  use constants
1437  implicit none
1438  !
1439  ! Subroutine arguments.
1440  !
1441  integer(kind=intType), intent(in) :: level, start, end
1442  logical, intent(in) :: commPressure, commVarGamma
1443  logical, intent(in) :: commLamVis, commEddyVis
1444 
1445  type(commtype), dimension(:, :), intent(in) :: commPattern
1446  type(internalcommtype), dimension(:, :), intent(in) :: internal
1447  integer(kind=intType) :: nVar, sps, offset
1448 
1449  spectralmodes: do sps = 1, ntimeintervalsspectral
1450 
1451  ! this one is tricker: We have to set BOTH the real values and
1452  ! the the derivative values. Set the derivative values first:
1453  call setcommpointers(start, end, commPressure, commVarGamma, &
1454  commlamvis, commeddyvis, level, sps, .true., nvar, 0)
1455 
1456  ! And then the original real values
1457  offset = nvar
1458  call setcommpointers(start, end, commPressure, commVarGamma, &
1459  commlamvis, commeddyvis, level, sps, .false., nvar, offset)
1460 
1461  if (nvar == 0) then
1462  return
1463  end if
1464 
1465  ! Run the generic exchange
1466  call woversetgeneric_b(nvar, level, sps, commpattern, internal)
1467  end do spectralmodes
1468 
1469  end subroutine woverset_b
1470 
1471  subroutine woversetgeneric(nVar, level, sps, commPattern, Internal)
1472  !
1473  ! wOverset is the generic halo exhcnage code for the
1474  ! overset halos.
1475  !
1476  use constants
1477  use block, only: flowdoms
1478  use communication
1479  use utils, only: echk
1480  implicit none
1481  !
1482  ! Subroutine arguments.
1483  !
1484  integer(kind=intType), intent(in) :: level, sps
1485 
1486  type(commtype), dimension(:, :), intent(in) :: commPattern
1487  type(internalcommtype), dimension(:, :), intent(in) :: internal
1488  !
1489  ! Local variables.
1490  !
1491  integer :: size, procId, ierr, index
1492  integer, dimension(mpi_status_size) :: mpiStatus
1493 
1494  integer(kind=intType) :: nVar
1495  integer(kind=intType) :: i, j, k, ii, jj
1496  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
1497  real(kind=realtype), dimension(:), pointer :: weight
1498 
1499  ! Send the variables. The data is first copied into
1500  ! the send buffer after which the buffer is sent asap.
1501 
1502  ii = 1
1503  sends: do i = 1, commpattern(level, sps)%nProcSend
1504 
1505  ! Store the processor id and the size of the message
1506  ! a bit easier.
1507 
1508  procid = commpattern(level, sps)%sendProc(i)
1509  size = nvar * commpattern(level, sps)%nsend(i)
1510 
1511  ! Copy the data in the correct part of the send buffer.
1512 
1513  jj = ii
1514  !DIR$ NOVECTOR
1515  do j = 1, commpattern(level, sps)%nsend(i)
1516 
1517  ! Store the block id and the indices of the donor
1518  ! a bit easier.
1519 
1520  d1 = commpattern(level, sps)%sendList(i)%block(j)
1521  i1 = commpattern(level, sps)%sendList(i)%indices(j, 1) + 1
1522  j1 = commpattern(level, sps)%sendList(i)%indices(j, 2) + 1
1523  k1 = commpattern(level, sps)%sendList(i)%indices(j, 3) + 1
1524  weight => commpattern(level, sps)%sendList(i)%interp(j, :)
1525 
1526  ! Copy the given range of the working variables for
1527  ! this cell in the buffer. Update the counter jj.
1528  !DIR$ NOVECTOR
1529  do k = 1, nvar
1530  sendbuffer(jj) = &
1531  weight(1) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1) + &
1532  weight(2) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1, k1) + &
1533  weight(3) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1 + 1, k1) + &
1534  weight(4) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1 + 1, k1) + &
1535  weight(5) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1 + 1) + &
1536  weight(6) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1, k1 + 1) + &
1537  weight(7) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1 + 1, k1 + 1) + &
1538  weight(8) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1 + 1, k1 + 1)
1539  jj = jj + 1
1540  end do
1541  end do
1542 
1543  ! Send the data.
1544 
1545  call mpi_isend(sendbuffer(ii), size, adflow_real, procid, &
1546  procid, adflow_comm_world, sendrequests(i), &
1547  ierr)
1548  call echk(ierr, __file__, __line__)
1549 
1550  ! Set ii to jj for the next processor.
1551 
1552  ii = jj
1553 
1554  end do sends
1555 
1556  ! Post the nonblocking receives.
1557 
1558  ii = 1
1559  receives: do i = 1, commpattern(level, sps)%nProcRecv
1560 
1561  ! Store the processor id and the size of the message
1562  ! a bit easier.
1563 
1564  procid = commpattern(level, sps)%recvProc(i)
1565  size = nvar * commpattern(level, sps)%nrecv(i)
1566 
1567  ! Post the receive.
1568 
1569  call mpi_irecv(recvbuffer(ii), size, adflow_real, procid, &
1570  myid, adflow_comm_world, recvrequests(i), ierr)
1571  call echk(ierr, __file__, __line__)
1572 
1573  ! And update ii.
1574 
1575  ii = ii + size
1576 
1577  end do receives
1578 
1579  ! Do the local interpolation.
1580  !DIR$ NOVECTOR
1581  localinterp: do i = 1, internal(level, sps)%ncopy
1582 
1583  ! Store the block and the indices of the donor a bit easier.
1584 
1585  d1 = internal(level, sps)%donorBlock(i)
1586  i1 = internal(level, sps)%donorIndices(i, 1) + 1
1587  j1 = internal(level, sps)%donorIndices(i, 2) + 1
1588  k1 = internal(level, sps)%donorIndices(i, 3) + 1
1589 
1590  weight => internal(level, sps)%donorInterp(i, :)
1591 
1592  ! Idem for the halo's.
1593 
1594  d2 = internal(level, sps)%haloBlock(i)
1595  i2 = internal(level, sps)%haloIndices(i, 1) + 1
1596  j2 = internal(level, sps)%haloIndices(i, 2) + 1
1597  k2 = internal(level, sps)%haloIndices(i, 3) + 1
1598 
1599  ! Copy the given range of working variables.
1600  !DIR$ NOVECTOR
1601  do k = 1, nvar
1602  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = &
1603  weight(1) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1) + &
1604  weight(2) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1, k1) + &
1605  weight(3) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1 + 1, k1) + &
1606  weight(4) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1 + 1, k1) + &
1607  weight(5) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1 + 1) + &
1608  weight(6) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1, k1 + 1) + &
1609  weight(7) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1 + 1, k1 + 1) + &
1610  weight(8) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1 + 1, k1 + 1)
1611  end do
1612  end do localinterp
1613 
1614  ! Complete the nonblocking receives in an arbitrary sequence and
1615  ! copy the variables from the buffer into the halo's.
1616 
1617  size = commpattern(level, sps)%nProcRecv
1618  completerecvs: do i = 1, commpattern(level, sps)%nProcRecv
1619 
1620  ! Complete any of the requests.
1621 
1622  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
1623  call echk(ierr, __file__, __line__)
1624 
1625  ! Copy the data just arrived in the halo's.
1626 
1627  ii = index
1628  jj = nvar * commpattern(level, sps)%nrecvCum(ii - 1)
1629  !DIR$ NOVECTOR
1630  do j = 1, commpattern(level, sps)%nrecv(ii)
1631 
1632  ! Store the block and the indices of the halo a bit easier.
1633 
1634  d2 = commpattern(level, sps)%recvList(ii)%block(j)
1635  i2 = commpattern(level, sps)%recvList(ii)%indices(j, 1) + 1
1636  j2 = commpattern(level, sps)%recvList(ii)%indices(j, 2) + 1
1637  k2 = commpattern(level, sps)%recvList(ii)%indices(j, 3) + 1
1638  !DIR$ NOVECTOR
1639  do k = 1, nvar
1640  jj = jj + 1
1641  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = recvbuffer(jj)
1642  end do
1643  end do
1644  end do completerecvs
1645 
1646  ! Complete the nonblocking sends.
1647 
1648  size = commpattern(level, sps)%nProcSend
1649  do i = 1, commpattern(level, sps)%nProcSend
1650  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
1651  call echk(ierr, __file__, __line__)
1652  end do
1653 
1654  end subroutine woversetgeneric
1655 
1656  subroutine woversetgeneric_d(nVar, level, sps, commPattern, Internal)
1657  !
1658  ! wOverset_d is the generic halo forward mode linearized
1659  ! code for overset halos.
1660  !
1661  use constants
1662  use block, only: flowdoms
1663  use communication
1664  use utils, only: echk
1665  implicit none
1666  !
1667  ! Subroutine arguments.
1668  !
1669  integer(kind=intType), intent(in) :: level, sps
1670 
1671  type(commtype), dimension(:, :), intent(in) :: commPattern
1672  type(internalcommtype), dimension(:, :), intent(in) :: internal
1673  !
1674  ! Local variables.
1675  !
1676  integer :: size, procId, ierr, index
1677  integer, dimension(mpi_status_size) :: mpiStatus
1678 
1679  integer(kind=intType) :: nVar
1680  integer(kind=intType) :: i, j, k, ii, jj
1681  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
1682  real(kind=realtype), dimension(:), pointer :: weight, weightd
1683 
1684  ! Send the variables. The data is first copied into
1685  ! the send buffer after which the buffer is sent asap.
1686 
1687  ii = 1
1688  sends: do i = 1, commpattern(level, sps)%nProcSend
1689 
1690  ! Store the processor id and the size of the message
1691  ! a bit easier.
1692 
1693  procid = commpattern(level, sps)%sendProc(i)
1694  size = nvar * commpattern(level, sps)%nsend(i)
1695 
1696  ! Copy the data in the correct part of the send buffer.
1697 
1698  jj = ii
1699  !DIR$ NOVECTOR
1700  do j = 1, commpattern(level, sps)%nsend(i)
1701 
1702  ! Store the block id and the indices of the donor
1703  ! a bit easier.
1704 
1705  d1 = commpattern(level, sps)%sendList(i)%block(j)
1706  i1 = commpattern(level, sps)%sendList(i)%indices(j, 1) + 1
1707  j1 = commpattern(level, sps)%sendList(i)%indices(j, 2) + 1
1708  k1 = commpattern(level, sps)%sendList(i)%indices(j, 3) + 1
1709  weight => commpattern(level, sps)%sendList(i)%interp(j, :)
1710  weightd => commpattern(level, sps)%sendList(i)%interpd(j, :)
1711 
1712  ! Copy the given range of the working variables for
1713  ! this cell in the buffer. Update the counter jj.
1714  !DIR$ NOVECTOR
1715  do k = 1, nvar
1716  sendbuffer(jj) = &
1717  weight(1) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1) + &
1718  weight(2) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1, k1) + &
1719  weight(3) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1 + 1, k1) + &
1720  weight(4) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1 + 1, k1) + &
1721  weight(5) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1 + 1) + &
1722  weight(6) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1, k1 + 1) + &
1723  weight(7) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1 + 1, k1 + 1) + &
1724  weight(8) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1 + 1, k1 + 1) + &
1725  weightd(1) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1, j1, k1) + &
1726  weightd(2) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1 + 1, j1, k1) + &
1727  weightd(3) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1, j1 + 1, k1) + &
1728  weightd(4) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1 + 1, j1 + 1, k1) + &
1729  weightd(5) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1, j1, k1 + 1) + &
1730  weightd(6) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1 + 1, j1, k1 + 1) + &
1731  weightd(7) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1, j1 + 1, k1 + 1) + &
1732  weightd(8) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1 + 1, j1 + 1, k1 + 1)
1733 
1734  jj = jj + 1
1735  end do
1736  end do
1737 
1738  ! Send the data.
1739 
1740  call mpi_isend(sendbuffer(ii), size, adflow_real, procid, &
1741  procid, adflow_comm_world, sendrequests(i), &
1742  ierr)
1743  call echk(ierr, __file__, __line__)
1744 
1745  ! Set ii to jj for the next processor.
1746 
1747  ii = jj
1748 
1749  end do sends
1750 
1751  ! Post the nonblocking receives.
1752 
1753  ii = 1
1754  receives: do i = 1, commpattern(level, sps)%nProcRecv
1755 
1756  ! Store the processor id and the size of the message
1757  ! a bit easier.
1758 
1759  procid = commpattern(level, sps)%recvProc(i)
1760  size = nvar * commpattern(level, sps)%nrecv(i)
1761 
1762  ! Post the receive.
1763 
1764  call mpi_irecv(recvbuffer(ii), size, adflow_real, procid, &
1765  myid, adflow_comm_world, recvrequests(i), ierr)
1766  call echk(ierr, __file__, __line__)
1767 
1768  ! And update ii.
1769 
1770  ii = ii + size
1771 
1772  end do receives
1773 
1774  ! Do the local interpolation.
1775  !DIR$ NOVECTOR
1776  localinterp: do i = 1, internal(level, sps)%ncopy
1777 
1778  ! Store the block and the indices of the donor a bit easier.
1779 
1780  d1 = internal(level, sps)%donorBlock(i)
1781  i1 = internal(level, sps)%donorIndices(i, 1) + 1
1782  j1 = internal(level, sps)%donorIndices(i, 2) + 1
1783  k1 = internal(level, sps)%donorIndices(i, 3) + 1
1784 
1785  weight => internal(level, sps)%donorInterp(i, :)
1786  weightd => internal(level, sps)%donorInterpd(i, :)
1787 
1788  ! Idem for the halo's.
1789 
1790  d2 = internal(level, sps)%haloBlock(i)
1791  i2 = internal(level, sps)%haloIndices(i, 1) + 1
1792  j2 = internal(level, sps)%haloIndices(i, 2) + 1
1793  k2 = internal(level, sps)%haloIndices(i, 3) + 1
1794 
1795  ! Copy the given range of working variables.
1796  !DIR$ NOVECTOR
1797  do k = 1, nvar
1798  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = &
1799  weight(1) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1) + &
1800  weight(2) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1, k1) + &
1801  weight(3) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1 + 1, k1) + &
1802  weight(4) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1 + 1, k1) + &
1803  weight(5) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1, k1 + 1) + &
1804  weight(6) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1, k1 + 1) + &
1805  weight(7) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1, j1 + 1, k1 + 1) + &
1806  weight(8) * flowdoms(d1, level, sps)%realCommVars(k)%var(i1 + 1, j1 + 1, k1 + 1) + &
1807  weightd(1) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1, j1, k1) + &
1808  weightd(2) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1 + 1, j1, k1) + &
1809  weightd(3) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1, j1 + 1, k1) + &
1810  weightd(4) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1 + 1, j1 + 1, k1) + &
1811  weightd(5) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1, j1, k1 + 1) + &
1812  weightd(6) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1 + 1, j1, k1 + 1) + &
1813  weightd(7) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1, j1 + 1, k1 + 1) + &
1814  weightd(8) * flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(i1 + 1, j1 + 1, k1 + 1)
1815 
1816  end do
1817  end do localinterp
1818 
1819  ! Complete the nonblocking receives in an arbitrary sequence and
1820  ! copy the variables from the buffer into the halo's.
1821 
1822  size = commpattern(level, sps)%nProcRecv
1823  completerecvs: do i = 1, commpattern(level, sps)%nProcRecv
1824 
1825  ! Complete any of the requests.
1826 
1827  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
1828  call echk(ierr, __file__, __line__)
1829 
1830  ! Copy the data just arrived in the halo's.
1831 
1832  ii = index
1833  jj = nvar * commpattern(level, sps)%nrecvCum(ii - 1)
1834  !DIR$ NOVECTOR
1835  do j = 1, commpattern(level, sps)%nrecv(ii)
1836 
1837  ! Store the block and the indices of the halo a bit easier.
1838 
1839  d2 = commpattern(level, sps)%recvList(ii)%block(j)
1840  i2 = commpattern(level, sps)%recvList(ii)%indices(j, 1) + 1
1841  j2 = commpattern(level, sps)%recvList(ii)%indices(j, 2) + 1
1842  k2 = commpattern(level, sps)%recvList(ii)%indices(j, 3) + 1
1843 
1844  do k = 1, nvar
1845  jj = jj + 1
1846  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = recvbuffer(jj)
1847  end do
1848  end do
1849  end do completerecvs
1850 
1851  ! Complete the nonblocking sends.
1852 
1853  size = commpattern(level, sps)%nProcSend
1854  do i = 1, commpattern(level, sps)%nProcSend
1855  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
1856  call echk(ierr, __file__, __line__)
1857  end do
1858 
1859  end subroutine woversetgeneric_d
1860 
1861  subroutine woversetgeneric_b(nVar, level, sps, commPattern, Internal)
1862  !
1863  ! wOversetGeneric_b is the generic reverse mode linearized
1864  ! code for overset halos.
1865  !
1866  use constants
1867  use block, only: flowdoms
1868  use communication
1869  use utils, only: echk
1870  implicit none
1871  !
1872  ! Subroutine arguments.
1873  !
1874  integer(kind=intType), intent(in) :: level, sps
1875 
1876  type(commtype), dimension(:, :), intent(in) :: commPattern
1877  type(internalcommtype), dimension(:, :), intent(in) :: internal
1878  !
1879  ! Local variables.
1880  !
1881  integer :: size, procId, ierr, index
1882  integer, dimension(mpi_status_size) :: mpiStatus
1883 
1884  integer(kind=intType) :: nVar
1885  integer(kind=intType) :: i, j, k, ii, jj, kk
1886  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2, iii, jjj, kkk
1887  real(kind=realtype), dimension(:), pointer :: weight, weightd
1888  real(kind=realtype) :: vard
1889  ! Gather up the seeds into the *recv* buffer. Note we loop over
1890  ! nProcRECV here! After the buffer is assembled it is send off.
1891 
1892  jj = 1
1893  ii = 1
1894  recvs: do i = 1, commpattern(level, sps)%nProcRecv
1895 
1896  ! Store the processor id and the size of the message
1897  ! a bit easier.
1898 
1899  procid = commpattern(level, sps)%recvProc(i)
1900  size = nvar * commpattern(level, sps)%nrecv(i)
1901 
1902  ! Copy the data into the buffer
1903  !DIR$ NOVECTOR
1904  do j = 1, commpattern(level, sps)%nrecv(i)
1905 
1906  ! Store the block and the indices to make code a bit easier to read
1907 
1908  d2 = commpattern(level, sps)%recvList(i)%block(j)
1909  i2 = commpattern(level, sps)%recvList(i)%indices(j, 1) + 1
1910  j2 = commpattern(level, sps)%recvList(i)%indices(j, 2) + 1
1911  k2 = commpattern(level, sps)%recvList(i)%indices(j, 3) + 1
1912  !DIR$ NOVECTOR
1913  do k = 1, nvar
1914  recvbuffer(jj) = flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2)
1915  jj = jj + 1
1916  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = zero
1917  end do
1918  end do
1919 
1920  ! Send the data.
1921  call mpi_isend(recvbuffer(ii), size, adflow_real, procid, &
1922  procid, adflow_comm_world, recvrequests(i), &
1923  ierr)
1924  call echk(ierr, __file__, __line__)
1925 
1926  ! Set ii to jj for the next processor.
1927 
1928  ii = jj
1929 
1930  end do recvs
1931 
1932  ! Post the nonblocking receives.
1933 
1934  ii = 1
1935  sends: do i = 1, commpattern(level, sps)%nProcSend
1936 
1937  ! Store the processor id and the size of the message
1938  ! a bit easier.
1939 
1940  procid = commpattern(level, sps)%sendProc(i)
1941  size = nvar * commpattern(level, sps)%nsend(i)
1942 
1943  ! Post the receive.
1944 
1945  call mpi_irecv(sendbuffer(ii), size, adflow_real, procid, &
1946  myid, adflow_comm_world, sendrequests(i), ierr)
1947  call echk(ierr, __file__, __line__)
1948 
1949  ! And update ii.
1950 
1951  ii = ii + size
1952 
1953  end do sends
1954 
1955  ! Do the local interpolation.
1956  !DIR$ NOVECTOR
1957  localinterp: do i = 1, internal(level, sps)%ncopy
1958 
1959  ! Store the block and the indices of the donor a bit easier.
1960 
1961  d1 = internal(level, sps)%donorBlock(i)
1962  i1 = internal(level, sps)%donorIndices(i, 1) + 1
1963  j1 = internal(level, sps)%donorIndices(i, 2) + 1
1964  k1 = internal(level, sps)%donorIndices(i, 3) + 1
1965 
1966  weight => internal(level, sps)%donorInterp(i, :)
1967  weightd => internal(level, sps)%donorInterpd(i, :)
1968 
1969  ! Idem for the halo's.
1970 
1971  d2 = internal(level, sps)%haloBlock(i)
1972  i2 = internal(level, sps)%haloIndices(i, 1) + 1
1973  j2 = internal(level, sps)%haloIndices(i, 2) + 1
1974  k2 = internal(level, sps)%haloIndices(i, 3) + 1
1975 
1976  ! Sum into the '1' values from the '2' values accouting for the weights
1977  !DIR$ NOVECTOR
1978  do k = 1, nvar
1979  vard = flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2)
1980  kk = 0
1981  do kkk = k1, k1 + 1
1982  do jjj = j1, j1 + 1
1983  do iii = i1, i1 + 1
1984  kk = kk + 1
1985  flowdoms(d1, level, sps)%realCommVars(k)%var(iii, jjj, kkk) = &
1986  flowdoms(d1, level, sps)%realCommVars(k)%var(iii, jjj, kkk) + &
1987  weight(kk) * vard
1988 
1989  weightd(kk) = weightd(kk) + &
1990  flowdoms(d1, level, sps)%realCommVars(k + nvar)%var(iii, jjj, kkk) * vard
1991 
1992  end do
1993  end do
1994  end do
1995  flowdoms(d2, level, sps)%realCommVars(k)%var(i2, j2, k2) = zero
1996  end do
1997  end do localinterp
1998 
1999  ! Complete the nonblocking receives in an arbitrary sequence and
2000  ! copy the variables from the buffer into the halo's.
2001 
2002  size = commpattern(level, sps)%nProcSend
2003  completesends: do i = 1, commpattern(level, sps)%nProcSend
2004 
2005  ! Complete any of the requests.
2006 
2007  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
2008  call echk(ierr, __file__, __line__)
2009 
2010  ! Copy the data just arrived in the halo's.
2011 
2012  ii = index
2013 
2014  jj = nvar * commpattern(level, sps)%nsendCum(ii - 1)
2015  !DIR$ NOVECTOR
2016  do j = 1, commpattern(level, sps)%nsend(ii)
2017 
2018  ! Store the block and the indices of the halo a bit easier.
2019 
2020  d2 = commpattern(level, sps)%sendList(ii)%block(j)
2021  i2 = commpattern(level, sps)%sendList(ii)%indices(j, 1) + 1
2022  j2 = commpattern(level, sps)%sendList(ii)%indices(j, 2) + 1
2023  k2 = commpattern(level, sps)%sendList(ii)%indices(j, 3) + 1
2024 
2025  weight => commpattern(level, sps)%sendList(ii)%interp(j, :)
2026  weightd => commpattern(level, sps)%sendList(ii)%interpd(j, :)
2027  !DIR$ NOVECTOR
2028  do k = 1, nvar
2029  jj = jj + 1
2030  vard = sendbuffer(jj)
2031  kk = 0
2032  do kkk = k2, k2 + 1
2033  do jjj = j2, j2 + 1
2034  do iii = i2, i2 + 1
2035 
2036  kk = kk + 1
2037  flowdoms(d2, level, sps)%realCommVars(k)%var(iii, jjj, kkk) = &
2038  flowdoms(d2, level, sps)%realCommVars(k)%var(iii, jjj, kkk) + &
2039  weight(kk) * vard
2040 
2041  weightd(kk) = weightd(kk) + &
2042  flowdoms(d2, level, sps)%realCommVars(k + nvar)%var(iii, jjj, kkk) * vard
2043 
2044  end do
2045  end do
2046  end do
2047  end do
2048  end do
2049 
2050  end do completesends
2051 
2052  ! Complete the nonblocking sends.
2053 
2054  size = commpattern(level, sps)%nProcRecv
2055  do i = 1, commpattern(level, sps)%nProcRecv
2056  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
2057  call echk(ierr, __file__, __line__)
2058  end do
2059 
2060  end subroutine woversetgeneric_b
2061 
2062 #ifndef USE_COMPLEX
2063  subroutine whalo2_b(level, start, end, commPressure, commGamma, &
2064  commViscous)
2065  !
2066  ! whalo2_b exchanges all the 2nd level internal halo's for the
2067  ! cell centered variables IN REVERSE MODE
2068  !
2069  use constants
2070  use blockpointers
2071  use communication
2072  use flowvarrefstate
2073  use inputphysics
2074  use inputtimespectral
2075  use iteration
2076  use flowutils_b, only: computeetotblock_b
2077  use utils, only: setpointers_b, getcorrectfork
2078  implicit none
2079  !
2080  ! Subroutine arguments.
2081  !
2082  integer(kind=intType), intent(in) :: level, start, end
2083  logical, intent(in) :: commPressure, commGamma, commViscous
2084  !
2085  ! Local variables.
2086  !
2087  integer(kind=intType) :: nn, mm, ll
2088  integer(kind=intType) :: iBeg, iEnd, jBeg, jEnd, kBeg, kEnd
2089 
2090  logical :: correctForK, commLamVis, commEddyVis, commVarGamma
2091 
2092  ! Set the logicals whether or not to communicate the viscosities.
2093 
2094  commlamvis = .false.
2095  if (viscous .and. commviscous) commlamvis = .true.
2096 
2097  commeddyvis = .false.
2098  if (eddymodel .and. commviscous) commeddyvis = .true.
2099 
2100  ! Set the logical whether or not to communicate gamma.
2101 
2102  commvargamma = .false.
2103  if (commgamma .and. (cpmodel == cptempcurvefits)) &
2104  commvargamma = .true.
2105 
2106  bothpande: if (commpressure .and. start <= irhoe .and. &
2107  end >= irhoE) then
2108 
2109  ! First determine whether or not the total energy must be
2110  ! corrected for the presence of the turbulent kinetic energy.
2111 
2112  correctfork = getcorrectfork()
2113 
2114  domains: do nn = 1, ndom
2115 
2116  ! Treat the overset blocks. Since we don't have the logic
2117  ! setup here correctly to only update the overset cells,
2118  ! just do the whole block, for every block
2119  do ll = 1, ntimeintervalsspectral
2120  call setpointers_b(nn, level, ll)
2121  call computeetotblock_b(2, il, 2, jl, 2, kl, correctfork)
2122  end do
2123  end do domains
2124  end if bothpande
2125 
2126  call woverset_b(level, start, end, commPressure, commVarGamma, &
2127  commlamvis, commeddyvis, commpatternoverset, internaloverset)
2128 
2129  ! Exchange the 1 to 1 matching 2nd level cell halo's.
2130  call whalo1to1_b(level, start, end, commPressure, commVarGamma, &
2131  commlamvis, commeddyvis, commpatterncell_2nd, &
2132  internalcell_2nd)
2133 
2134  ! NOTE: Only the 1to1 halo exchange and overset is done. whalosliding,
2135  ! whalomixing, orphanAverage and PandE corrections
2136  ! calculation are NOT implementent.
2137 
2138  end subroutine whalo2_b
2139 
2140  subroutine whalo2_d(level, start, end, commPressure, commGamma, &
2141  commViscous)
2142  !
2143  ! whalo2_b exchanges all the 2nd level internal halo's for the
2144  ! cell centered variables IN FORWARD MODE
2145  !
2146  use constants
2147  use blockpointers
2148  use communication
2149  use flowvarrefstate
2150  use inputphysics
2151  use inputtimespectral
2152  use iteration
2153  use flowutils_d, only: computeetotblock_d
2154  use utils, only: setpointers_d, getcorrectfork
2155  implicit none
2156  !
2157  ! Subroutine arguments.
2158  !
2159  integer(kind=intType), intent(in) :: level, start, end
2160  logical, intent(in) :: commPressure, commGamma, commViscous
2161  !
2162  ! Local variables.
2163  !
2164  integer(kind=intType) :: nn, mm, ll
2165  integer(kind=intType) :: iBeg, iEnd, jBeg, jEnd, kBeg, kEnd
2166 
2167  logical :: correctForK, commLamVis, commEddyVis, commVarGamma
2168 
2169  ! Set the logicals whether or not to communicate the viscosities.
2170 
2171  commlamvis = .false.
2172  if (viscous .and. commviscous) commlamvis = .true.
2173 
2174  commeddyvis = .false.
2175  if (eddymodel .and. commviscous) commeddyvis = .true.
2176 
2177  ! Set the logical whether or not to communicate gamma.
2178 
2179  commvargamma = .false.
2180  if (commgamma .and. (cpmodel == cptempcurvefits)) &
2181  commvargamma = .true.
2182 
2183  ! Exchange the 1 to 1 matching 2nd level cell halo's.
2184 
2185  call whalo1to1_d(level, start, end, commPressure, commVarGamma, &
2186  commlamvis, commeddyvis, commpatterncell_2nd, &
2187  internalcell_2nd)
2188 
2189  ! Exchange the overset cells
2190  call woverset_d(level, start, end, commPressure, commVarGamma, &
2191  commlamvis, commeddyvis, commpatternoverset, internaloverset)
2192 
2193  ! NOTE: Only the 1to1 halo and wOverset exchange is done. whalosliding,
2194  ! whalomixing, orphanAverage and PandE corrections
2195  ! calculation are NOT implementent.
2196 
2197  ! If both the pressure and the total energy has been communicated
2198  ! compute the energy again. The reason is that both values are
2199  ! interpolated and consequently the values are not consistent.
2200  ! The energy depends quadratically on the velocity.
2201 
2202  bothpande: if (commpressure .and. start <= irhoe .and. &
2203  end >= irhoE) then
2204 
2205  ! First determine whether or not the total energy must be
2206  ! corrected for the presence of the turbulent kinetic energy.
2207 
2208  correctfork = getcorrectfork()
2209 
2210  domains: do nn = 1, ndom
2211 
2212  ! Treat the overset blocks. Since we don't have the logic
2213  ! setup here correctly to only update the overset cells,
2214  ! just do the whole block, for every block
2215  do ll = 1, ntimeintervalsspectral
2216  call setpointers_d(nn, level, ll)
2217  call computeetotblock_d(2, il, 2, jl, 2, kl, correctfork)
2218  end do
2219  end do domains
2220 
2221  end if bothpande
2222  end subroutine whalo2_d
2223 #endif
2224 
2225  subroutine reshalo1(level, start, end)
2226  !
2227  ! resHalo1 determines the residuals in the 1st layer of halo
2228  ! cells by applying both the boundary conditions and the
2229  ! exchange. The halo values are needed for post processing
2230  ! reasons.
2231  !
2232  use constants
2233  use blockpointers
2234  use communication
2235  use inputtimespectral
2236  use utils, only: setpointers, echk
2237  implicit none
2238  !
2239  ! Subroutine arguments.
2240  !
2241  integer(kind=intType) :: level, start, end
2242  !
2243  ! Local variables.
2244  !
2245  integer :: size, procID, ierr, index
2246  integer, dimension(mpi_status_size) :: mpiStatus
2247 
2248  integer(kind=intType) :: nVar, sps
2249  integer(kind=intType) :: ii, jj, mm, nn, i, j, k, l
2250  integer(kind=intType) :: dd1, ii1, jj1, kk1, dd2, ii2, jj2, kk2
2251 
2252  real(kind=realtype), pointer, dimension(:, :, :) :: ddw1, ddw2
2253 
2254  ! Determine the number of variables per cell to be sent.
2255 
2256  nvar = max(0_inttype, (end - start + 1))
2257  if (nvar == 0) return
2258 
2259  ! Loop over the spectral solutions and local blocks to apply
2260  ! the boundary conditions for the residual.
2261 
2262  spectralloop: do sps = 1, ntimeintervalsspectral
2263  domains: do mm = 1, ndom
2264 
2265  ! Set the pointers for this block.
2266 
2267  call setpointers(mm, level, sps)
2268 
2269  ! Loop over the boundary condition subfaces of this block
2270  ! and apply a neumann boundary condition. I know that this is
2271  ! not entirely correct for some boundary conditions, symmetry,
2272  ! solid wall, but this is not so important.
2273 
2274  bocos: do nn = 1, nbocos
2275 
2276  ! Set the pointer for ddw1 and ddw2, depending on the block
2277  ! face on which the subface is located.
2278 
2279  select case (bcfaceid(nn))
2280  case (imin)
2281  ddw1 => dw(1, 1:, 1:, :); ddw2 => dw(2, 1:, 1:, :)
2282  case (imax)
2283  ddw1 => dw(ie, 1:, 1:, :); ddw2 => dw(il, 1:, 1:, :)
2284  case (jmin)
2285  ddw1 => dw(1:, 1, 1:, :); ddw2 => dw(1:, 2, 1:, :)
2286  case (jmax)
2287  ddw1 => dw(1:, je, 1:, :); ddw2 => dw(1:, jl, 1:, :)
2288  case (kmin)
2289  ddw1 => dw(1:, 1:, 1, :); ddw2 => dw(1:, 1:, 2, :)
2290  case (kmax)
2291  ddw1 => dw(1:, 1:, ke, :); ddw2 => dw(1:, 1:, kl, :)
2292  end select
2293 
2294  ! Loop over the cell range of the subface.
2295  !DIR$ NOVECTOR
2296  do j = bcdata(nn)%jcBeg, bcdata(nn)%jcEnd
2297  !DIR$ NOVECTOR
2298  do i = bcdata(nn)%icBeg, bcdata(nn)%icEnd
2299  !DIR$ NOVECTOR
2300  do l = start, end
2301  ddw1(i, j, l) = ddw2(i, j, l)
2302  end do
2303  end do
2304  end do
2305 
2306  end do bocos
2307  end do domains
2308 
2309  ! Send the variables. The data is first copied into
2310  ! the send buffer after which the buffer is sent asap.
2311 
2312  ii = 1
2313  sends: do i = 1, commpatterncell_1st(level)%nProcSend
2314 
2315  ! Store the processor id and the size of the message
2316  ! a bit easier.
2317 
2318  procid = commpatterncell_1st(level)%sendProc(i)
2319  size = nvar * commpatterncell_1st(level)%nsend(i)
2320 
2321  ! Copy the data in the correct part of the send buffer.
2322 
2323  jj = ii
2324  !DIR$ NOVECTOR
2325  do j = 1, commpatterncell_1st(level)%nsend(i)
2326 
2327  ! Store the block id and the indices of the donor
2328  ! a bit easier.
2329 
2330  dd1 = commpatterncell_1st(level)%sendList(i)%block(j)
2331  ii1 = commpatterncell_1st(level)%sendList(i)%indices(j, 1)
2332  jj1 = commpatterncell_1st(level)%sendList(i)%indices(j, 2)
2333  kk1 = commpatterncell_1st(level)%sendList(i)%indices(j, 3)
2334 
2335  ! Copy the given range of the residuals for this cell
2336  ! in the buffer. Update the counter jj accordingly.
2337  !DIR$ NOVECTOR
2338  do k = start, end
2339  sendbuffer(jj) = flowdoms(dd1, level, sps)%dw(ii1, jj1, kk1, k)
2340  jj = jj + 1
2341  end do
2342 
2343  end do
2344 
2345  ! Send the data.
2346 
2347  call mpi_isend(sendbuffer(ii), size, adflow_real, procid, &
2348  procid, adflow_comm_world, sendrequests(i), &
2349  ierr)
2350  call echk(ierr, __file__, __line__)
2351 
2352  ! Set ii to jj for the next processor.
2353 
2354  ii = jj
2355 
2356  end do sends
2357 
2358  ! Post the nonblocking receives.
2359 
2360  ii = 1
2361  receives: do i = 1, commpatterncell_1st(level)%nProcRecv
2362 
2363  ! Store the processor id and the size of the message
2364  ! a bit easier.
2365 
2366  procid = commpatterncell_1st(level)%recvProc(i)
2367  size = nvar * commpatterncell_1st(level)%nrecv(i)
2368 
2369  ! Post the receive.
2370 
2371  call mpi_irecv(recvbuffer(ii), size, adflow_real, procid, &
2372  myid, adflow_comm_world, recvrequests(i), ierr)
2373  call echk(ierr, __file__, __line__)
2374 
2375  ! And update ii.
2376 
2377  ii = ii + size
2378 
2379  end do receives
2380 
2381  ! Copy the local data.
2382  !DIR$ NOVECTOR
2383  localcopy: do i = 1, internalcell_1st(level)%ncopy
2384 
2385  ! Store the block and the indices of the donor a bit easier.
2386 
2387  dd1 = internalcell_1st(level)%donorBlock(i)
2388  ii1 = internalcell_1st(level)%donorIndices(i, 1)
2389  jj1 = internalcell_1st(level)%donorIndices(i, 2)
2390  kk1 = internalcell_1st(level)%donorIndices(i, 3)
2391 
2392  ! Idem for the halo's.
2393 
2394  dd2 = internalcell_1st(level)%haloBlock(i)
2395  ii2 = internalcell_1st(level)%haloIndices(i, 1)
2396  jj2 = internalcell_1st(level)%haloIndices(i, 2)
2397  kk2 = internalcell_1st(level)%haloIndices(i, 3)
2398 
2399  ! Copy the given range of residuals.
2400  !DIR$ NOVECTOR
2401  do k = start, end
2402  flowdoms(dd2, level, sps)%dw(ii2, jj2, kk2, k) = &
2403  flowdoms(dd1, level, sps)%dw(ii1, jj1, kk1, k)
2404  end do
2405 
2406  end do localcopy
2407 
2408  ! Complete the nonblocking receives in an arbitrary sequence and
2409  ! copy the variables from the buffer into the halo's.
2410 
2411  size = commpatterncell_1st(level)%nProcRecv
2412  completerecvs: do i = 1, commpatterncell_1st(level)%nProcRecv
2413 
2414  ! Complete any of the requests.
2415 
2416  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
2417  call echk(ierr, __file__, __line__)
2418 
2419  ! Copy the data just arrived in the halo's.
2420 
2421  ii = index
2422  jj = nvar * commpatterncell_1st(level)%nrecvCum(ii - 1) + 1
2423  !DIR$ NOVECTOR
2424  do j = 1, commpatterncell_1st(level)%nrecv(ii)
2425 
2426  ! Store the block and the indices of the halo a bit easier.
2427 
2428  dd2 = commpatterncell_1st(level)%recvList(ii)%block(j)
2429  ii2 = commpatterncell_1st(level)%recvList(ii)%indices(j, 1)
2430  jj2 = commpatterncell_1st(level)%recvList(ii)%indices(j, 2)
2431  kk2 = commpatterncell_1st(level)%recvList(ii)%indices(j, 3)
2432 
2433  ! Copy the residuals.
2434  !DIR$ NOVECTOR
2435  do k = start, end
2436  flowdoms(dd2, level, sps)%dw(ii2, jj2, kk2, k) = recvbuffer(jj)
2437  jj = jj + 1
2438  end do
2439 
2440  end do
2441 
2442  end do completerecvs
2443 
2444  ! Complete the nonblocking sends.
2445 
2446  size = commpatterncell_1st(level)%nProcSend
2447  do i = 1, commpatterncell_1st(level)%nProcSend
2448  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
2449  call echk(ierr, __file__, __line__)
2450  end do
2451 
2452  end do spectralloop
2453 
2454  end subroutine reshalo1
2455 
2456  subroutine exchangecoor(level)
2457  !
2458  ! ExchangeCoor exchanges the coordinates of the given grid
2459  ! level.
2460  !
2461  use block
2462  use communication
2463  use inputtimespectral
2464  use utils, only: echk
2465  !
2466  ! Subroutine arguments.
2467  !
2468  integer(kind=intType), intent(in) :: level
2469  !
2470  ! Local variables.
2471  !
2472  integer :: size, procID, ierr, index
2473  integer, dimension(mpi_status_size) :: mpiStatus
2474 
2475  integer(kind=intType) :: i, j, ii, jj, mm
2476  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
2477 
2478  ! Loop over the number of spectral solutions.
2479 
2480  spectralloop: do mm = 1, ntimeintervalsspectral
2481 
2482  ! Send the coordinates i have to send. The data is first copied
2483  ! into the send buffer and this buffer is sent.
2484 
2485  ii = 1
2486  sends: do i = 1, commpatternnode_1st(level)%nProcSend
2487 
2488  ! Store the processor id and the size of the message
2489  ! a bit easier.
2490 
2491  procid = commpatternnode_1st(level)%sendProc(i)
2492  size = 3 * commpatternnode_1st(level)%nSend(i)
2493 
2494  ! Copy the data in the correct part of the send buffer.
2495 
2496  jj = ii
2497  !DIR$ NOVECTOR
2498  do j = 1, commpatternnode_1st(level)%nSend(i)
2499 
2500  ! Store the block id and the indices of the donor
2501  ! a bit easier.
2502 
2503  d1 = commpatternnode_1st(level)%sendList(i)%block(j)
2504  i1 = commpatternnode_1st(level)%sendList(i)%indices(j, 1)
2505  j1 = commpatternnode_1st(level)%sendList(i)%indices(j, 2)
2506  k1 = commpatternnode_1st(level)%sendList(i)%indices(j, 3)
2507 
2508  ! Copy the coordinates of this point in the buffer.
2509  ! Update the counter jj accordingly.
2510 
2511  sendbuffer(jj) = flowdoms(d1, level, mm)%x(i1, j1, k1, 1)
2512  sendbuffer(jj + 1) = flowdoms(d1, level, mm)%x(i1, j1, k1, 2)
2513  sendbuffer(jj + 2) = flowdoms(d1, level, mm)%x(i1, j1, k1, 3)
2514  jj = jj + 3
2515 
2516  end do
2517 
2518  ! Send the data.
2519 
2520  call mpi_isend(sendbuffer(ii), size, adflow_real, procid, &
2521  procid, adflow_comm_world, sendrequests(i), &
2522  ierr)
2523  call echk(ierr, __file__, __line__)
2524 
2525  ! Set ii to jj for the next processor.
2526 
2527  ii = jj
2528 
2529  end do sends
2530 
2531  ! Post the nonblocking receives.
2532 
2533  ii = 1
2534  receives: do i = 1, commpatternnode_1st(level)%nProcRecv
2535 
2536  ! Store the processor id and the size of the message
2537  ! a bit easier.
2538 
2539  procid = commpatternnode_1st(level)%recvProc(i)
2540  size = 3 * commpatternnode_1st(level)%nRecv(i)
2541 
2542  ! Post the receive.
2543 
2544  call mpi_irecv(recvbuffer(ii), size, adflow_real, procid, &
2545  myid, adflow_comm_world, recvrequests(i), ierr)
2546  call echk(ierr, __file__, __line__)
2547 
2548  ! And update ii.
2549 
2550  ii = ii + size
2551 
2552  end do receives
2553 
2554  ! Copy the local data.
2555  !DIR$ NOVECTOR
2556  localcopy: do i = 1, internalnode_1st(level)%nCopy
2557 
2558  ! Store the block and the indices of the donor a bit easier.
2559 
2560  d1 = internalnode_1st(level)%donorBlock(i)
2561  i1 = internalnode_1st(level)%donorIndices(i, 1)
2562  j1 = internalnode_1st(level)%donorIndices(i, 2)
2563  k1 = internalnode_1st(level)%donorIndices(i, 3)
2564  ! Idem for the halo's.
2565 
2566  d2 = internalnode_1st(level)%haloBlock(i)
2567  i2 = internalnode_1st(level)%haloIndices(i, 1)
2568  j2 = internalnode_1st(level)%haloIndices(i, 2)
2569  k2 = internalnode_1st(level)%haloIndices(i, 3)
2570  ! Copy the coordinates.
2571  flowdoms(d2, level, mm)%x(i2, j2, k2, 1) = &
2572  flowdoms(d1, level, mm)%x(i1, j1, k1, 1)
2573  flowdoms(d2, level, mm)%x(i2, j2, k2, 2) = &
2574  flowdoms(d1, level, mm)%x(i1, j1, k1, 2)
2575  flowdoms(d2, level, mm)%x(i2, j2, k2, 3) = &
2576  flowdoms(d1, level, mm)%x(i1, j1, k1, 3)
2577 
2578  end do localcopy
2579 
2580  ! Correct the periodic halos of the internal communication
2581  ! pattern
2582 
2583  call correctperiodiccoor(level, mm, &
2584  internalnode_1st(level)%nPeriodic, &
2585  internalnode_1st(level)%periodicData)
2586 
2587  ! Complete the nonblocking receives in an arbitrary sequence and
2588  ! copy the coordinates from the buffer into the halo's.
2589 
2590  size = commpatternnode_1st(level)%nProcRecv
2591  completerecvs: do i = 1, commpatternnode_1st(level)%nProcRecv
2592 
2593  ! Complete any of the requests.
2594 
2595  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
2596  call echk(ierr, __file__, __line__)
2597 
2598  ! Copy the data just arrived in the halo's.
2599 
2600  ii = index
2601  jj = 3 * commpatternnode_1st(level)%nRecvCum(ii - 1) + 1
2602  !DIR$ NOVECTOR
2603  do j = 1, commpatternnode_1st(level)%nRecv(ii)
2604 
2605  ! Store the block and the indices of the halo a bit easier.
2606 
2607  d2 = commpatternnode_1st(level)%recvList(ii)%block(j)
2608  i2 = commpatternnode_1st(level)%recvList(ii)%indices(j, 1)
2609  j2 = commpatternnode_1st(level)%recvList(ii)%indices(j, 2)
2610  k2 = commpatternnode_1st(level)%recvList(ii)%indices(j, 3)
2611 
2612  ! Copy the data.
2613 
2614  flowdoms(d2, level, mm)%x(i2, j2, k2, 1) = recvbuffer(jj)
2615  flowdoms(d2, level, mm)%x(i2, j2, k2, 2) = recvbuffer(jj + 1)
2616  flowdoms(d2, level, mm)%x(i2, j2, k2, 3) = recvbuffer(jj + 2)
2617  jj = jj + 3
2618 
2619  end do
2620 
2621  end do completerecvs
2622 
2623  ! Correct the periodic halos of the external communication
2624  ! pattern.
2625 
2626  call correctperiodiccoor(level, mm, &
2627  commpatternnode_1st(level)%nPeriodic, &
2628  commpatternnode_1st(level)%periodicData)
2629 
2630  ! Complete the nonblocking sends.
2631 
2632  size = commpatternnode_1st(level)%nProcSend
2633  do i = 1, commpatternnode_1st(level)%nProcSend
2634  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
2635  call echk(ierr, __file__, __line__)
2636  end do
2637 
2638  end do spectralloop
2639 
2640  end subroutine exchangecoor
2641 
2642  ! ==================================================================
2643 
2644  subroutine correctperiodiccoor(level, sp, nPeriodic, periodicData)
2645  !
2646  ! correctPeriodicCoor applies the periodic transformation to
2647  ! the coordinates of the nodal halo's in periodicData.
2648  !
2649  use block
2650  use communication
2651  implicit none
2652  !
2653  ! Subroutine arguments
2654  !
2655  integer(kind=intType), intent(in) :: level, sp, nPeriodic
2656  type(periodicdatatype), dimension(:), pointer :: periodicData
2657  !
2658  ! Local variables.
2659  !
2660  integer(kind=intType) :: nn, mm, ii, i, j, k
2661  real(kind=realtype) :: dx, dy, dz
2662 
2663  real(kind=realtype), dimension(3, 3) :: rotmatrix
2664  real(kind=realtype), dimension(3) :: rotcenter, translation
2665 
2666  ! Loop over the number of periodic transformations.
2667 
2668  do nn = 1, nperiodic
2669 
2670  ! Store the rotation matrix, rotation center and translation
2671  ! vector a bit easier.
2672 
2673  rotmatrix = periodicdata(nn)%rotMatrix
2674  rotcenter = periodicdata(nn)%rotCenter
2675  translation = periodicdata(nn)%translation + rotcenter
2676 
2677  ! Loop over the number of halo nodes for this transformation.
2678  !DIR$ NOVECTOR
2679  do ii = 1, periodicdata(nn)%nHalos
2680 
2681  ! Store the block and the indices a bit easier.
2682 
2683  mm = periodicdata(nn)%block(ii)
2684  i = periodicdata(nn)%indices(ii, 1)
2685  j = periodicdata(nn)%indices(ii, 2)
2686  k = periodicdata(nn)%indices(ii, 3)
2687 
2688  ! Determine the vector from the center of rotation to the
2689  ! uncorrected halo value.
2690 
2691  dx = flowdoms(mm, level, sp)%x(i, j, k, 1) - rotcenter(1)
2692  dy = flowdoms(mm, level, sp)%x(i, j, k, 2) - rotcenter(2)
2693  dz = flowdoms(mm, level, sp)%x(i, j, k, 3) - rotcenter(3)
2694 
2695  ! Compute the corrected coordinates.
2696 
2697  flowdoms(mm, level, sp)%x(i, j, k, 1) = rotmatrix(1, 1) * dx &
2698  + rotmatrix(1, 2) * dy &
2699  + rotmatrix(1, 3) * dz &
2700  + translation(1)
2701  flowdoms(mm, level, sp)%x(i, j, k, 2) = rotmatrix(2, 1) * dx &
2702  + rotmatrix(2, 2) * dy &
2703  + rotmatrix(2, 3) * dz &
2704  + translation(2)
2705  flowdoms(mm, level, sp)%x(i, j, k, 3) = rotmatrix(3, 1) * dx &
2706  + rotmatrix(3, 2) * dy &
2707  + rotmatrix(3, 3) * dz &
2708  + translation(3)
2709  end do
2710  end do
2711 
2712  end subroutine correctperiodiccoor
2713  subroutine exchangecoor_b(level)
2714  !
2715  ! ExchangeCoor_b exchanges the *derivatives* of the given grid
2716  ! level IN REVERSE MODE.
2717  !
2718  use constants
2719  use block
2720  use communication
2721  use inputtimespectral
2722  use utils, only: echk
2723  implicit none
2724  !
2725  ! Subroutine arguments.
2726  !
2727  integer(kind=intType), intent(in) :: level
2728  !
2729  ! Local variables.
2730  !
2731  integer :: size, procID, ierr, index
2732  integer, dimension(mpi_status_size) :: mpiStatus
2733 
2734  integer(kind=intType) :: i, j, ii, jj, mm, idim
2735  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
2736 
2737  ! Loop over the number of spectral solutions.
2738 
2739  spectralloop: do mm = 1, ntimeintervalsspectral
2740 
2741  ! Send the coordinates i have to send. The data is first copied
2742  ! into the send buffer and this buffer is sent.
2743 
2744  ii = 1
2745  jj = 1
2746  recvs: do i = 1, commpatternnode_1st(level)%nProcRecv
2747 
2748  ! Store the processor id and the size of the message
2749  ! a bit easier.
2750 
2751  procid = commpatternnode_1st(level)%recvProc(i)
2752  size = 3 * commpatternnode_1st(level)%nRecv(i)
2753 
2754  ! Copy the data in the correct part of the send buffer.
2755  !DIR$ NOVECTOR
2756  do j = 1, commpatternnode_1st(level)%nRecv(i)
2757 
2758  ! Store the block id and the indices of the donor
2759  ! a bit easier.
2760 
2761  d1 = commpatternnode_1st(level)%recvList(i)%block(j)
2762  i1 = commpatternnode_1st(level)%recvList(i)%indices(j, 1)
2763  j1 = commpatternnode_1st(level)%recvList(i)%indices(j, 2)
2764  k1 = commpatternnode_1st(level)%recvList(i)%indices(j, 3)
2765 
2766  ! Copy the coordinates of this point in the buffer.
2767  ! Update the counter jj accordingly.
2768 
2769  recvbuffer(jj) = flowdomsd(d1, level, mm)%x(i1, j1, k1, 1)
2770  recvbuffer(jj + 1) = flowdomsd(d1, level, mm)%x(i1, j1, k1, 2)
2771  recvbuffer(jj + 2) = flowdomsd(d1, level, mm)%x(i1, j1, k1, 3)
2772  jj = jj + 3
2773  flowdomsd(d1, level, mm)%x(i1, j1, k1, :) = zero
2774  end do
2775 
2776  ! Send the data.
2777 
2778  call mpi_isend(recvbuffer(ii), size, adflow_real, procid, &
2779  procid, adflow_comm_world, recvrequests(i), &
2780  ierr)
2781  call echk(ierr, __file__, __line__)
2782 
2783  ! Set ii to jj for the next processor.
2784 
2785  ii = jj
2786 
2787  end do recvs
2788 
2789  ! Post the nonblocking receives.
2790 
2791  ii = 1
2792  send: do i = 1, commpatternnode_1st(level)%nProcSend
2793 
2794  ! Store the processor id and the size of the message
2795  ! a bit easier.
2796 
2797  procid = commpatternnode_1st(level)%sendProc(i)
2798  size = 3 * commpatternnode_1st(level)%nSend(i)
2799 
2800  ! Post the receive.
2801 
2802  call mpi_irecv(sendbuffer(ii), size, adflow_real, procid, &
2803  myid, adflow_comm_world, sendrequests(i), ierr)
2804  call echk(ierr, __file__, __line__)
2805 
2806  ! And update ii.
2807 
2808  ii = ii + size
2809 
2810  end do send
2811 
2812  ! Copy the local data.
2813  !DIR$ NOVECTOR
2814  localcopy: do i = 1, internalnode_1st(level)%nCopy
2815 
2816  ! Store the block and the indices of the donor a bit easier.
2817 
2818  d1 = internalnode_1st(level)%donorBlock(i)
2819  i1 = internalnode_1st(level)%donorIndices(i, 1)
2820  j1 = internalnode_1st(level)%donorIndices(i, 2)
2821  k1 = internalnode_1st(level)%donorIndices(i, 3)
2822  ! Idem for the halo's.
2823 
2824  d2 = internalnode_1st(level)%haloBlock(i)
2825  i2 = internalnode_1st(level)%haloIndices(i, 1)
2826  j2 = internalnode_1st(level)%haloIndices(i, 2)
2827  k2 = internalnode_1st(level)%haloIndices(i, 3)
2828 
2829  ! Sum into the '1' values fro the '2' values
2830  do idim = 1, 3
2831  flowdomsd(d1, level, mm)%x(i1, j1, k1, idim) = flowdomsd(d1, level, mm)%x(i1, j1, k1, idim) + &
2832  flowdomsd(d2, level, mm)%x(i2, j2, k2, idim)
2833  flowdomsd(d2, level, mm)%x(i2, j2, k2, idim) = zero
2834  end do
2835  end do localcopy
2836 
2837  ! Correct the periodic halos of the internal communication
2838  ! pattern
2839 
2840  ! NOT IMPLEMENTED
2841  ! call correctPeriodicCoor(level, mm, &
2842  ! internalNode_1st(level)%nPeriodic, &
2843  ! internalNode_1st(level)%periodicData)
2844 
2845  ! Complete the nonblocking receives in an arbitrary sequence and
2846  ! copy the coordinates from the buffer into the halo's.
2847 
2848  size = commpatternnode_1st(level)%nProcSend
2849  completesends: do i = 1, commpatternnode_1st(level)%nProcSend
2850 
2851  ! Complete any of the requests.
2852 
2853  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
2854  call echk(ierr, __file__, __line__)
2855 
2856  ! Copy the data just arrived in the halo's.
2857 
2858  ii = index
2859  jj = 3 * commpatternnode_1st(level)%nSendCum(ii - 1)
2860  !DIR$ NOVECTOR
2861  do j = 1, commpatternnode_1st(level)%nSend(ii)
2862 
2863  ! Store the block and the indices of the halo a bit easier.
2864 
2865  d2 = commpatternnode_1st(level)%sendList(ii)%block(j)
2866  i2 = commpatternnode_1st(level)%sendList(ii)%indices(j, 1)
2867  j2 = commpatternnode_1st(level)%sendList(ii)%indices(j, 2)
2868  k2 = commpatternnode_1st(level)%sendList(ii)%indices(j, 3)
2869 
2870  ! Sum into the '2' values from the recv buffer
2871  do idim = 1, 3
2872  flowdomsd(d2, level, mm)%x(i2, j2, k2, idim) = flowdomsd(d2, level, mm)%x(i2, j2, k2, idim) + &
2873  sendbuffer(jj + idim)
2874  end do
2875  jj = jj + 3
2876 
2877  end do
2878 
2879  end do completesends
2880 
2881  ! Correct the periodic halos of the external communication
2882  ! pattern.
2883  ! NOT IMLEMENTED
2884  ! call correctPeriodicCoor(level, mm, &
2885  ! commPatternNode_1st(level)%nPeriodic, &
2886  ! commPatternNode_1st(level)%periodicData)
2887 
2888  ! Complete the nonblocking sends.
2889 
2890  size = commpatternnode_1st(level)%nProcRecv
2891  do i = 1, commpatternnode_1st(level)%nProcRecv
2892  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
2893  call echk(ierr, __file__, __line__)
2894  end do
2895 
2896  end do spectralloop
2897 
2898  end subroutine exchangecoor_b
2899  subroutine exchangecoor_d(level)
2900  !
2901  ! ExchangeCoor_d exchanges the *derivatives* of the given grid
2902  ! level.
2903  !
2904  use block
2905  use communication
2906  use inputtimespectral
2907  use utils, only: echk
2908  implicit none
2909  !
2910  ! Subroutine arguments.
2911  !
2912  integer(kind=intType), intent(in) :: level
2913  !
2914  ! Local variables.
2915  !
2916  integer :: size, procID, ierr, index
2917  integer, dimension(mpi_status_size) :: mpiStatus
2918 
2919  integer(kind=intType) :: i, j, ii, jj, mm
2920  integer(kind=intType) :: d1, i1, j1, k1, d2, i2, j2, k2
2921 
2922  ! Loop over the number of spectral solutions.
2923 
2924  spectralloop: do mm = 1, ntimeintervalsspectral
2925 
2926  ! Send the coordinates i have to send. The data is first copied
2927  ! into the send buffer and this buffer is sent.
2928 
2929  ii = 1
2930  sends: do i = 1, commpatternnode_1st(level)%nProcSend
2931 
2932  ! Store the processor id and the size of the message
2933  ! a bit easier.
2934 
2935  procid = commpatternnode_1st(level)%sendProc(i)
2936  size = 3 * commpatternnode_1st(level)%nSend(i)
2937 
2938  ! Copy the data in the correct part of the send buffer.
2939 
2940  jj = ii
2941  !DIR$ NOVECTOR
2942  do j = 1, commpatternnode_1st(level)%nSend(i)
2943 
2944  ! Store the block id and the indices of the donor
2945  ! a bit easier.
2946 
2947  d1 = commpatternnode_1st(level)%sendList(i)%block(j)
2948  i1 = commpatternnode_1st(level)%sendList(i)%indices(j, 1)
2949  j1 = commpatternnode_1st(level)%sendList(i)%indices(j, 2)
2950  k1 = commpatternnode_1st(level)%sendList(i)%indices(j, 3)
2951 
2952  ! Copy the coordinates of this point in the buffer.
2953  ! Update the counter jj accordingly.
2954 
2955  sendbuffer(jj) = flowdomsd(d1, level, mm)%x(i1, j1, k1, 1)
2956  sendbuffer(jj + 1) = flowdomsd(d1, level, mm)%x(i1, j1, k1, 2)
2957  sendbuffer(jj + 2) = flowdomsd(d1, level, mm)%x(i1, j1, k1, 3)
2958  jj = jj + 3
2959 
2960  end do
2961 
2962  ! Send the data.
2963 
2964  call mpi_isend(sendbuffer(ii), size, adflow_real, procid, &
2965  procid, adflow_comm_world, sendrequests(i), &
2966  ierr)
2967  call echk(ierr, __file__, __line__)
2968 
2969  ! Set ii to jj for the next processor.
2970 
2971  ii = jj
2972 
2973  end do sends
2974 
2975  ! Post the nonblocking receives.
2976 
2977  ii = 1
2978  receives: do i = 1, commpatternnode_1st(level)%nProcRecv
2979 
2980  ! Store the processor id and the size of the message
2981  ! a bit easier.
2982 
2983  procid = commpatternnode_1st(level)%recvProc(i)
2984  size = 3 * commpatternnode_1st(level)%nRecv(i)
2985 
2986  ! Post the receive.
2987 
2988  call mpi_irecv(recvbuffer(ii), size, adflow_real, procid, &
2989  myid, adflow_comm_world, recvrequests(i), ierr)
2990  call echk(ierr, __file__, __line__)
2991 
2992  ! And update ii.
2993 
2994  ii = ii + size
2995 
2996  end do receives
2997 
2998  ! Copy the local data.
2999  !DIR$ NOVECTOR
3000  localcopy: do i = 1, internalnode_1st(level)%nCopy
3001 
3002  ! Store the block and the indices of the donor a bit easier.
3003 
3004  d1 = internalnode_1st(level)%donorBlock(i)
3005  i1 = internalnode_1st(level)%donorIndices(i, 1)
3006  j1 = internalnode_1st(level)%donorIndices(i, 2)
3007  k1 = internalnode_1st(level)%donorIndices(i, 3)
3008  ! Idem for the halo's.
3009 
3010  d2 = internalnode_1st(level)%haloBlock(i)
3011  i2 = internalnode_1st(level)%haloIndices(i, 1)
3012  j2 = internalnode_1st(level)%haloIndices(i, 2)
3013  k2 = internalnode_1st(level)%haloIndices(i, 3)
3014  ! Copy the coordinates.
3015  flowdomsd(d2, level, mm)%x(i2, j2, k2, 1) = &
3016  flowdomsd(d1, level, mm)%x(i1, j1, k1, 1)
3017  flowdomsd(d2, level, mm)%x(i2, j2, k2, 2) = &
3018  flowdomsd(d1, level, mm)%x(i1, j1, k1, 2)
3019  flowdomsd(d2, level, mm)%x(i2, j2, k2, 3) = &
3020  flowdomsd(d1, level, mm)%x(i1, j1, k1, 3)
3021 
3022  end do localcopy
3023 
3024  ! Correct the periodic halos of the internal communication
3025  ! pattern
3026 
3027  ! NOT IMPLEMENTED
3028  ! call correctPeriodicCoor(level, mm, &
3029  ! internalNode_1st(level)%nPeriodic, &
3030  ! internalNode_1st(level)%periodicData)
3031 
3032  ! Complete the nonblocking receives in an arbitrary sequence and
3033  ! copy the coordinates from the buffer into the halo's.
3034 
3035  size = commpatternnode_1st(level)%nProcRecv
3036  completerecvs: do i = 1, commpatternnode_1st(level)%nProcRecv
3037 
3038  ! Complete any of the requests.
3039 
3040  call mpi_waitany(size, recvrequests, index, mpistatus, ierr)
3041  call echk(ierr, __file__, __line__)
3042 
3043  ! Copy the data just arrived in the halo's.
3044 
3045  ii = index
3046  jj = 3 * commpatternnode_1st(level)%nRecvCum(ii - 1) + 1
3047  !DIR$ NOVECTOR
3048  do j = 1, commpatternnode_1st(level)%nRecv(ii)
3049 
3050  ! Store the block and the indices of the halo a bit easier.
3051 
3052  d2 = commpatternnode_1st(level)%recvList(ii)%block(j)
3053  i2 = commpatternnode_1st(level)%recvList(ii)%indices(j, 1)
3054  j2 = commpatternnode_1st(level)%recvList(ii)%indices(j, 2)
3055  k2 = commpatternnode_1st(level)%recvList(ii)%indices(j, 3)
3056 
3057  ! Copy the data.
3058 
3059  flowdomsd(d2, level, mm)%x(i2, j2, k2, 1) = recvbuffer(jj)
3060  flowdomsd(d2, level, mm)%x(i2, j2, k2, 2) = recvbuffer(jj + 1)
3061  flowdomsd(d2, level, mm)%x(i2, j2, k2, 3) = recvbuffer(jj + 2)
3062  jj = jj + 3
3063 
3064  end do
3065 
3066  end do completerecvs
3067 
3068  ! Correct the periodic halos of the external communication
3069  ! pattern.
3070  ! NOT IMLEMENTED
3071  ! call correctPeriodicCoor(level, mm, &
3072  ! commPatternNode_1st(level)%nPeriodic, &
3073  ! commPatternNode_1st(level)%periodicData)
3074 
3075  ! Complete the nonblocking sends.
3076 
3077  size = commpatternnode_1st(level)%nProcSend
3078  do i = 1, commpatternnode_1st(level)%nProcSend
3079  call mpi_waitany(size, sendrequests, index, mpistatus, ierr)
3080  call echk(ierr, __file__, __line__)
3081  end do
3082 
3083  end do spectralloop
3084 
3085  end subroutine exchangecoor_d
3086 
3087  ! -----------------------------------------------------------------
3088  ! Comm routines for zippers
3089  ! -----------------------------------------------------------------
3090 
3091  subroutine flowintegrationzippercomm(isInflow, vars, sps)
3092 
3093  ! This routine could technically be inside of the
3094  ! flowIntegrationZipper subroutine but we split it out becuase it
3095  ! has petsc comm stuff that will differentiate manually that
3096  ! tapenade doesn't need to see.
3097 
3098  use constants
3100  use bcpointers, only: sface, ww1, ww2, pp1, pp2, gamma1, gamma2, xx
3101  use oversetdata, only: zippermeshes, zippermesh
3103  use utils, only: setpointers, setbcpointers, echk
3104 
3105 #include <petsc/finclude/petsc.h>
3106  use petsc
3107  implicit none
3108 
3109  ! Input variables
3110  logical, intent(in) :: isInflow
3111  real(kind=realtype), dimension(:, :) :: vars
3112  integer(kind=intType) :: sps
3113 
3114  ! Working variables
3115  integer(kind=intType) :: ii, iVar, i, j, iBeg, iEnd, jBeg, jEnd, ierr, nn, mm
3116  real(kind=realtype), dimension(:), pointer :: localptr
3117  type(zippermesh), pointer :: zipper
3118  type(familyexchange), pointer :: exch
3119 
3120  ! Set the zipper pointer to the zipper for inflow/outflow conditions
3121  if (isinflow) then
3122  zipper => zippermeshes(ibcgroupinflow)
3123  exch => bcfamexchange(ibcgroupinflow, sps)
3124  else
3125  zipper => zippermeshes(ibcgroupoutflow)
3126  exch => bcfamexchange(ibcgroupoutflow, sps)
3127  end if
3128 
3129  ! Note that we can generate all the nodal values we need locally
3130  ! using simple arithematic averaging since they are all flow
3131  ! properties. There is no need to use the cellToNodeScatter stuff
3132  ! here like for the forces.
3133  varloop: do ivar = 1, nzippflowcomm
3134  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
3135  call echk(ierr, __file__, __line__)
3136 
3137  ii = 0
3138  domainloop: do nn = 1, ndom
3139  call setpointers(nn, 1, sps)
3140  bocoloop: do mm = 1, nbocos
3141 
3142  if (((bctype(mm) == subsonicinflow .or. &
3143  bctype(mm) == supersonicinflow) .and. (isinflow)) &
3144  .or. &
3145  (bctype(mm) == subsonicoutflow .or. &
3146  bctype(mm) == supersonicoutflow) .and. (.not. isinflow)) then
3147 
3148  call setbcpointers(mm, .true.)
3149  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
3150  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
3151  do j = jbeg, jend
3152  do i = ibeg, iend
3153  ii = ii + 1
3154  select case (ivar)
3155  case (irho, ivx, ivy, ivz)
3156  localptr(ii) = eighth * ( &
3157  ww1(i, j, ivar) + ww1(i + 1, j, ivar) + &
3158  ww1(i, j + 1, ivar) + ww1(i + 1, j + 1, ivar) + &
3159  ww2(i, j, ivar) + ww2(i + 1, j, ivar) + &
3160  ww2(i, j + 1, ivar) + ww2(i + 1, j + 1, ivar))
3161  case (izippflowp)
3162  localptr(ii) = eighth * ( &
3163  pp1(i, j) + pp1(i + 1, j) + &
3164  pp1(i, j + 1) + pp1(i + 1, j + 1) + &
3165  pp2(i, j) + pp2(i + 1, j) + &
3166  pp2(i, j + 1) + pp2(i + 1, j + 1))
3167  case (izippflowgamma)
3168  localptr(ii) = eighth * ( &
3169  gamma1(i, j) + gamma1(i + 1, j) + &
3170  gamma1(i, j + 1) + gamma1(i + 1, j + 1) + &
3171  gamma2(i, j) + gamma2(i + 1, j) + &
3172  gamma2(i, j + 1) + gamma2(i + 1, j + 1))
3173  case (izippflowsface)
3174  if (addgridvelocities) then
3175  localptr(ii) = fourth * ( &
3176  sface(i, j) + sface(i + 1, j) + &
3177  sface(i, j + 1) + sface(i + 1, j + 1))
3178  else
3179  localptr(ii) = zero
3180  end if
3182  localptr(ii) = xx(i + 1, j + 1, ivar - izippflowx + 1)
3183  end select
3184  end do
3185  end do
3186  end if
3187  end do bocoloop
3188  end do domainloop
3189 
3190  ! Return pointer to nodeValLocal
3191  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
3192  call echk(ierr, __file__, __line__)
3193 
3194  call vecplacearray(zipper%localVal, vars(:, ivar), ierr)
3195  call echk(ierr, __file__, __line__)
3196 
3197  ! Send these values to the root using the zipper scatter.
3198  call vecscatterbegin(zipper%scatter, exch%nodeValLocal, &
3199  zipper%localVal, insert_values, scatter_forward, ierr)
3200  call echk(ierr, __file__, __line__)
3201 
3202  call vecscatterend(zipper%scatter, exch%nodeValLocal, &
3203  zipper%localVal, insert_values, scatter_forward, ierr)
3204  call echk(ierr, __file__, __line__)
3205 
3206  ! Reset the original petsc vector.
3207  call vecresetarray(zipper%localVal, ierr)
3208  call echk(ierr, __file__, __line__)
3209  end do varloop
3210 
3211  end subroutine flowintegrationzippercomm
3212 
3213  subroutine flowintegrationzippercomm_d(isInflow, vars, varsd, sps)
3214 
3215  ! Forward mode linearization of flowIntegratoinZipperComm
3216 
3217  use constants
3219  use bcpointers, only: sfaced, ww1d, ww2d, pp1d, pp2d, xxd
3220  use oversetdata, only: zippermeshes, zippermesh
3223 #include <petsc/finclude/petsc.h>
3224  use petsc
3225  implicit none
3226  ! Input variables
3227  logical, intent(in) :: isInflow
3228  real(kind=realtype), dimension(:, :) :: vars, varsd
3229  integer(kind=intType) :: sps
3230 
3231  ! Working variables
3232  integer(kind=intType) :: ii, iVar, i, j, iBeg, iEnd, jBeg, jEnd, ierr, nn, mm
3233  real(kind=realtype), dimension(:), pointer :: localptr
3234  type(zippermesh), pointer :: zipper
3235  type(familyexchange), pointer :: exch
3236 
3237  ! Set the zipper pointer to the zipper for inflow/outflow conditions
3238  if (isinflow) then
3239  ! Need to generate the vars themselves.
3240  call flowintegrationzippercomm(.true., vars, sps)
3241  zipper => zippermeshes(ibcgroupinflow)
3242  exch => bcfamexchange(ibcgroupinflow, sps)
3243  else
3244  ! Need to generate the vars themselves.
3245  call flowintegrationzippercomm(.false., vars, sps)
3246  zipper => zippermeshes(ibcgroupoutflow)
3247  exch => bcfamexchange(ibcgroupoutflow, sps)
3248  end if
3249 
3250  ! Note that we can generate all the nodal values we need locally
3251  ! using simple arithematic averaging since they are all flow
3252  ! properties. There is no need to use the cellToNodeScatter stuff
3253  ! here like for the forces.
3254  varloop: do ivar = 1, nzippflowcomm
3255  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
3256  call echk(ierr, __file__, __line__)
3257 
3258  ii = 0
3259  domainloop: do nn = 1, ndom
3260  call setpointers_d(nn, 1, sps)
3261  bocoloop: do mm = 1, nbocos
3262  if (((bctype(mm) == subsonicinflow .or. &
3263  bctype(mm) == supersonicinflow) .and. isinflow) &
3264  .or. &
3265  (bctype(mm) == subsonicoutflow .or. &
3266  bctype(mm) == supersonicoutflow) .and. (.not. isinflow)) then
3267 
3268  call setbcpointers_d(mm, .true.)
3269  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
3270  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
3271  do j = jbeg, jend
3272  do i = ibeg, iend
3273  ii = ii + 1
3274  select case (ivar)
3275  case (irho, ivx, ivy, ivz)
3276  localptr(ii) = eighth * ( &
3277  ww1d(i, j, ivar) + ww1d(i + 1, j, ivar) + &
3278  ww1d(i, j + 1, ivar) + ww1d(i + 1, j + 1, ivar) + &
3279  ww2d(i, j, ivar) + ww2d(i + 1, j, ivar) + &
3280  ww2d(i, j + 1, ivar) + ww2d(i + 1, j + 1, ivar))
3281  case (izippflowp)
3282  localptr(ii) = eighth * ( &
3283  pp1d(i, j) + pp1d(i + 1, j) + &
3284  pp1d(i, j + 1) + pp1d(i + 1, j + 1) + &
3285  pp2d(i, j) + pp2d(i + 1, j) + &
3286  pp2d(i, j + 1) + pp2d(i + 1, j + 1))
3287  case (izippflowgamma)
3288  ! Gamma is not currently an active variable
3289  localptr(ii) = zero
3290  case (izippflowsface)
3291  if (addgridvelocities) then
3292  localptr(ii) = fourth * ( &
3293  sfaced(i, j) + sfaced(i + 1, j) + &
3294  sfaced(i, j + 1) + sfaced(i + 1, j + 1))
3295  else
3296  localptr(ii) = zero
3297  end if
3299  localptr(ii) = xxd(i + 1, j + 1, ivar - izippflowx + 1)
3300  end select
3301  end do
3302  end do
3303  end if
3304  end do bocoloop
3305  end do domainloop
3306 
3307  ! Return pointer to nodeValLocal
3308  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
3309  call echk(ierr, __file__, __line__)
3310 
3311  call vecplacearray(zipper%localVal, varsd(:, ivar), ierr)
3312  call echk(ierr, __file__, __line__)
3313 
3314  ! Send these values to the root using the zipper scatter.
3315  call vecscatterbegin(zipper%scatter, exch%nodeValLocal, &
3316  zipper%localVal, insert_values, scatter_forward, ierr)
3317  call echk(ierr, __file__, __line__)
3318 
3319  call vecscatterend(zipper%scatter, exch%nodeValLocal, &
3320  zipper%localVal, insert_values, scatter_forward, ierr)
3321  call echk(ierr, __file__, __line__)
3322 
3323  ! Reset the original petsc vector.
3324  call vecresetarray(zipper%localVal, ierr)
3325  call echk(ierr, __file__, __line__)
3326  end do varloop
3327 
3328  end subroutine flowintegrationzippercomm_d
3329 
3330  subroutine flowintegrationzippercomm_b(isInflow, vars, varsd, sps)
3331 
3332  ! Reverse mode linearization of the flowIntegrationZipperComm routine
3333 
3334  use constants
3336  use bcpointers, only: sfaced, ww1d, ww2d, pp1d, pp2d, xxd
3337  use oversetdata, only: zippermeshes, zippermesh
3340 #include <petsc/finclude/petsc.h>
3341  use petsc
3342  implicit none
3343 
3344  ! Input variables
3345  logical, intent(in) :: isInflow
3346  real(kind=realtype), dimension(:, :) :: vars, varsd
3347  integer(kind=intType) :: sps
3348 
3349  ! Working variables
3350  integer(kind=intType) :: ii, iVar, i, j, iBeg, iEnd, jBeg, jEnd, ierr, nn, mm
3351  real(kind=realtype), dimension(:), pointer :: localptr
3352  real(kind=realtype) :: tmp
3353  type(zippermesh), pointer :: zipper
3354  type(familyexchange), pointer :: exch
3355 
3356  ! Set the zipper pointer to the zipper for inflow/outflow conditions
3357  if (isinflow) then
3358  zipper => zippermeshes(ibcgroupinflow)
3359  exch => bcfamexchange(ibcgroupinflow, sps)
3360  else
3361  zipper => zippermeshes(ibcgroupoutflow)
3362  exch => bcfamexchange(ibcgroupoutflow, sps)
3363  end if
3364 
3365  ! Run the var exchange loop backwards:
3366  varloop: do ivar = 1, nzippflowcomm
3367 
3368  ! Zero the vector we are scatting into:
3369  call vecset(exch%nodeValLocal, zero, ierr)
3370  call echk(ierr, __file__, __line__)
3371 
3372  call vecplacearray(zipper%localVal, varsd(:, ivar), ierr)
3373  call echk(ierr, __file__, __line__)
3374 
3375  ! Send these values to the root using the zipper scatter.
3376  call vecscatterbegin(zipper%scatter, zipper%localVal, &
3377  exch%nodeValLocal, add_values, scatter_reverse, ierr)
3378  call echk(ierr, __file__, __line__)
3379 
3380  call vecscatterend(zipper%scatter, zipper%localVal, &
3381  exch%nodeValLocal, add_values, scatter_reverse, ierr)
3382  call echk(ierr, __file__, __line__)
3383 
3384  ! Reset the original petsc vector.
3385  call vecresetarray(zipper%localVal, ierr)
3386  call echk(ierr, __file__, __line__)
3387 
3388  ! To be consistent, varsd must be zeroed since it is "used"
3389  varsd(:, ivar) = zero
3390 
3391  ! Now finish the scatting back to the acutual BCs pointers (and
3392  ! thus the state variables).
3393 
3394  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
3395  call echk(ierr, __file__, __line__)
3396 
3397  ii = 0
3398  domainloop: do nn = 1, ndom
3399  call setpointers_b(nn, 1, sps)
3400  bocoloop: do mm = 1, nbocos
3401  if (((bctype(mm) == subsonicinflow .or. &
3402  bctype(mm) == supersonicinflow) .and. isinflow) &
3403  .or. &
3404  (bctype(mm) == subsonicoutflow .or. &
3405  bctype(mm) == supersonicoutflow) .and. (.not. isinflow)) then
3406 
3407  call setbcpointers_d(mm, .true.)
3408  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
3409  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
3410  do j = jbeg, jend
3411  do i = ibeg, iend
3412  ii = ii + 1
3413  select case (ivar)
3414  case (irho, ivx, ivy, ivz)
3415  tmp = eighth * localptr(ii)
3416  ww1d(i, j, ivar) = ww1d(i, j, ivar) + tmp
3417  ww1d(i + 1, j, ivar) = ww1d(i + 1, j, ivar) + tmp
3418  ww1d(i, j + 1, ivar) = ww1d(i, j + 1, ivar) + tmp
3419  ww1d(i + 1, j + 1, ivar) = ww1d(i + 1, j + 1, ivar) + tmp
3420 
3421  ww2d(i, j, ivar) = ww2d(i, j, ivar) + tmp
3422  ww2d(i + 1, j, ivar) = ww2d(i + 1, j, ivar) + tmp
3423  ww2d(i, j + 1, ivar) = ww2d(i, j + 1, ivar) + tmp
3424  ww2d(i + 1, j + 1, ivar) = ww2d(i + 1, j + 1, ivar) + tmp
3425  case (izippflowp)
3426  tmp = eighth * localptr(ii)
3427  pp1d(i, j) = pp1d(i, j) + tmp
3428  pp1d(i + 1, j) = pp1d(i + 1, j) + tmp
3429  pp1d(i, j + 1) = pp1d(i, j + 1) + tmp
3430  pp1d(i + 1, j + 1) = pp1d(i + 1, j + 1) + tmp
3431 
3432  pp2d(i, j) = pp2d(i, j) + tmp
3433  pp2d(i + 1, j) = pp2d(i + 1, j) + tmp
3434  pp2d(i, j + 1) = pp2d(i, j + 1) + tmp
3435  pp2d(i + 1, j + 1) = pp2d(i + 1, j + 1) + tmp
3436 
3437  case (izippflowgamma)
3438  ! gamma is not currently active
3439 
3440  case (izippflowsface)
3441  if (addgridvelocities) then
3442  tmp = fourth * localptr(ii)
3443  sfaced(i, j) = sfaced(i, j) + tmp
3444  sfaced(i + 1, j) = sfaced(i + 1, j) + tmp
3445  sfaced(i, j + 1) = sfaced(i, j + 1) + tmp
3446  sfaced(i + 1, j + 1) = sfaced(i + 1, j + 1) + tmp
3447  else
3448  localptr(ii) = zero
3449  end if
3451  xxd(i + 1, j + 1, ivar - izippflowx + 1) = &
3452  xxd(i + 1, j + 1, ivar - izippflowx + 1) + localptr(ii)
3453  end select
3454  end do
3455  end do
3456  end if
3457  end do bocoloop
3458  end do domainloop
3459 
3460  ! Return pointer to nodeValLocal
3461  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
3462  call echk(ierr, __file__, __line__)
3463 
3464  end do varloop
3465 
3466  end subroutine flowintegrationzippercomm_b
3467 
3468  subroutine wallintegrationzippercomm(vars, sps)
3469 
3470  ! This routine could technically be inside of the
3471  ! flowIntegrationZipper subroutine but we split it out becuase it
3472  ! has petsc comm stuff that will differentiate manually that
3473  ! tapenade doesn't need to see.
3474 
3475  use constants
3476  use blockpointers, only: bcdata, ndom, bctype, nbocos
3477  use bcpointers, only: xx
3478  use oversetdata, only: zippermeshes, zippermesh
3481 #include <petsc/finclude/petsc.h>
3482  use petsc
3483  implicit none
3484  ! Input variables
3485  real(kind=realtype), dimension(:, :) :: vars
3486  integer(kind=intType) :: sps
3487 
3488  ! Working variables
3489  integer(kind=intType) :: ii, iVar, i, j, iBeg, iEnd, jBeg, jEnd, ierr, nn, mm
3490  real(kind=realtype), dimension(:), pointer :: localptr
3491  type(zippermesh), pointer :: zipper
3492  type(familyexchange), pointer :: exch
3493 
3494  ! Set the zipper pointer to the zipper for inflow/outflow conditions
3495  zipper => zippermeshes(ibcgroupwalls)
3496  exch => bcfamexchange(ibcgroupwalls, sps)
3497 
3498  ! Make sure the nodal tractions are computed
3499  call computenodaltractions(sps)
3500 
3501  varloop: do ivar = 1, nzippwallcomm
3502  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
3503  call echk(ierr, __file__, __line__)
3504 
3505  ii = 0
3506  domainloop: do nn = 1, ndom
3507  call setpointers(nn, 1, sps)
3508  bocoloop: do mm = 1, nbocos
3509  if (iswalltype(bctype(mm))) then
3510  call setbcpointers(mm, .true.)
3511  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
3512  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
3513  do j = jbeg, jend
3514  do i = ibeg, iend
3515  ii = ii + 1
3516  select case (ivar)
3517 
3519 
3520  localptr(ii) = bcdata(mm)%Tp(i, j, ivar)
3521 
3523 
3524  localptr(ii) = bcdata(mm)%Tv(i, j, ivar - izippwalltvx + 1)
3525 
3527 
3528  ! The +1 is due to pointer offset
3529  localptr(ii) = xx(i + 1, j + 1, ivar - izippwallx + 1)
3530 
3531  end select
3532  end do
3533  end do
3534  end if
3535  end do bocoloop
3536  end do domainloop
3537 
3538  ! Return pointer to nodeValLocal
3539  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
3540  call echk(ierr, __file__, __line__)
3541 
3542  call vecplacearray(zipper%localVal, vars(:, ivar), ierr)
3543  call echk(ierr, __file__, __line__)
3544 
3545  ! Send these values to the root using the zipper scatter.
3546  call vecscatterbegin(zipper%scatter, exch%nodeValLocal, &
3547  zipper%localVal, insert_values, scatter_forward, ierr)
3548  call echk(ierr, __file__, __line__)
3549 
3550  call vecscatterend(zipper%scatter, exch%nodeValLocal, &
3551  zipper%localVal, insert_values, scatter_forward, ierr)
3552  call echk(ierr, __file__, __line__)
3553 
3554  ! Reset the original petsc vector.
3555  call vecresetarray(zipper%localVal, ierr)
3556  call echk(ierr, __file__, __line__)
3557  end do varloop
3558 
3559  end subroutine wallintegrationzippercomm
3560 
3561  subroutine wallintegrationzippercomm_d(vars, varsd, sps)
3562 
3563  ! Forward mode linearization of the wallIntegrationZipperComm
3564 
3565  use constants
3566  use blockpointers, only: bcdatad, bcdata, nbocos, ndom, bctype
3567  use bcpointers, only: xxd
3568  use oversetdata, only: zippermeshes, zippermesh
3571 #include <petsc/finclude/petsc.h>
3572  use petsc
3573  implicit none
3574 
3575  ! Input variables
3576  real(kind=realtype), dimension(:, :) :: vars, varsd
3577  integer(kind=intType) :: sps
3578 
3579  ! Working variables
3580  integer(kind=intType) :: ii, iVar, i, j, iBeg, iEnd, jBeg, jEnd, ierr, nn, mm
3581  real(kind=realtype), dimension(:), pointer :: localptr
3582  type(zippermesh), pointer :: zipper
3583  type(familyexchange), pointer :: exch
3584 
3585  ! Need to set the actual variables first.
3586  call wallintegrationzippercomm(vars, sps)
3587 
3588  ! Compute the derivative of the nodal tractions
3589  call computenodaltractions_d(sps)
3590 
3591  ! Set the zipper pointer to the zipper for inflow/outflow conditions
3592  zipper => zippermeshes(ibcgroupwalls)
3593  exch => bcfamexchange(ibcgroupwalls, sps)
3594 
3595  varloop: do ivar = 1, nzippwallcomm
3596  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
3597  call echk(ierr, __file__, __line__)
3598 
3599  ii = 0
3600  domainloop: do nn = 1, ndom
3601  call setpointers_d(nn, 1, sps)
3602  bocoloop: do mm = 1, nbocos
3603  if (iswalltype(bctype(mm))) then
3604  call setbcpointers_d(mm, .true.)
3605  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
3606  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
3607  do j = jbeg, jend
3608  do i = ibeg, iend
3609  ii = ii + 1
3610  select case (ivar)
3611 
3613 
3614  localptr(ii) = bcdatad(mm)%Tp(i, j, ivar)
3615 
3617 
3618  localptr(ii) = bcdatad(mm)%Tv(i, j, ivar - izippwalltvx + 1)
3619 
3621 
3622  ! The +1 is due to pointer offset
3623  localptr(ii) = xxd(i + 1, j + 1, ivar - izippwallx + 1)
3624 
3625  end select
3626  end do
3627  end do
3628  end if
3629  end do bocoloop
3630  end do domainloop
3631 
3632  ! Return pointer to nodeValLocal
3633  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
3634  call echk(ierr, __file__, __line__)
3635 
3636  call vecplacearray(zipper%localVal, varsd(:, ivar), ierr)
3637  call echk(ierr, __file__, __line__)
3638 
3639  ! Send these values to the root using the zipper scatter.
3640  call vecscatterbegin(zipper%scatter, exch%nodeValLocal, &
3641  zipper%localVal, insert_values, scatter_forward, ierr)
3642  call echk(ierr, __file__, __line__)
3643 
3644  call vecscatterend(zipper%scatter, exch%nodeValLocal, &
3645  zipper%localVal, insert_values, scatter_forward, ierr)
3646  call echk(ierr, __file__, __line__)
3647 
3648  ! Reset the original petsc vector.
3649  call vecresetarray(zipper%localVal, ierr)
3650  call echk(ierr, __file__, __line__)
3651  end do varloop
3652 
3653  end subroutine wallintegrationzippercomm_d
3654 
3655  subroutine wallintegrationzippercomm_b(vars, varsd, sps)
3656 
3657  ! Reverse mode linearization of the wallIntegrationZipperComm
3658 
3659  use constants
3660  use blockpointers, only: bcdatad, bcdata, nbocos, ndom, bctype
3661  use bcpointers, only: xxd
3662  use oversetdata, only: zippermeshes, zippermesh
3665 #include <petsc/finclude/petsc.h>
3666  use petsc
3667  implicit none
3668 
3669  ! Input variables
3670  real(kind=realtype), dimension(:, :) :: vars, varsd
3671  integer(kind=intType) :: sps
3672 
3673  ! Working variables
3674  integer(kind=intType) :: ii, iVar, i, j, iBeg, iEnd, jBeg, jEnd, ierr, nn, mm
3675  real(kind=realtype), dimension(:), pointer :: localptr
3676  type(zippermesh), pointer :: zipper
3677  type(familyexchange), pointer :: exch
3678 
3679  ! Set the zipper pointer to the zipper for inflow/outflow conditions
3680  zipper => zippermeshes(ibcgroupwalls)
3681  exch => bcfamexchange(ibcgroupwalls, sps)
3682 
3683  ! Run the var exchange loop backwards:
3684  varloop: do ivar = 1, nzippwallcomm
3685 
3686  ! Zero the vector we are scatting into:
3687  call vecset(exch%nodeValLocal, zero, ierr)
3688  call echk(ierr, __file__, __line__)
3689 
3690  call vecplacearray(zipper%localVal, varsd(:, ivar), ierr)
3691  call echk(ierr, __file__, __line__)
3692 
3693  ! Send these values to the root using the zipper scatter.
3694  call vecscatterbegin(zipper%scatter, zipper%localVal, &
3695  exch%nodeValLocal, add_values, scatter_reverse, ierr)
3696  call echk(ierr, __file__, __line__)
3697 
3698  call vecscatterend(zipper%scatter, zipper%localVal, &
3699  exch%nodeValLocal, add_values, scatter_reverse, ierr)
3700  call echk(ierr, __file__, __line__)
3701 
3702  ! Reset the original petsc vector.
3703  call vecresetarray(zipper%localVal, ierr)
3704  call echk(ierr, __file__, __line__)
3705 
3706  ! To be consistent, varsd must be zeroed since it is "used"
3707  varsd(:, ivar) = zero
3708 
3709  ! Now finish the scatting back to the acutual BCs pointers (and
3710  ! thus the state variables).
3711 
3712  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
3713  call echk(ierr, __file__, __line__)
3714 
3715  ii = 0
3716  domainloop: do nn = 1, ndom
3717  call setpointers_b(nn, 1, sps)
3718  bocoloop: do mm = 1, nbocos
3719  if (iswalltype(bctype(mm))) then
3720  call setbcpointers_d(mm, .true.)
3721  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
3722  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
3723  do j = jbeg, jend
3724  do i = ibeg, iend
3725  ii = ii + 1
3726  select case (ivar)
3728 
3729  bcdatad(mm)%Tp(i, j, ivar) = localptr(ii)
3730 
3732 
3733  bcdatad(mm)%Tv(i, j, ivar - izippwalltvx + 1) = localptr(ii)
3734 
3736 
3737  ! The +1 is due to pointer offset
3738  xxd(i + 1, j + 1, ivar - izippwallx + 1) = &
3739  xxd(i + 1, j + 1, ivar - izippwallx + 1) + localptr(ii)
3740 
3741  end select
3742  end do
3743  end do
3744  end if
3745  end do bocoloop
3746  end do domainloop
3747 
3748  ! Return pointer to nodeValLocal
3749  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
3750  call echk(ierr, __file__, __line__)
3751 
3752  end do varloop
3753 
3754  ! Compute the derivatives of the nodal tractions. The will
3755  !accumulate the seeds onto bcDatad%Fv, bcDatad%Fv and bcDatad%area
3756 
3757  call computenodaltractions_b(sps)
3758 
3759  end subroutine wallintegrationzippercomm_b
3760 end module haloexchange
subroutine computenodaltractions(sps)
Definition: getForces.F90:558
subroutine computenodaltractions_b(sps)
Definition: getForces.F90:889
subroutine computenodaltractions_d(sps)
Definition: getForces.F90:614
Definition: BCData.F90:1
real(kind=realtype), dimension(:, :), pointer gamma2
Definition: BCPointers.F90:14
real(kind=realtype), dimension(:, :), pointer pp1d
Definition: BCPointers.F90:25
real(kind=realtype), dimension(:, :, :), pointer ww1d
Definition: BCPointers.F90:24
real(kind=realtype), dimension(:, :, :), pointer ww2d
Definition: BCPointers.F90:24
real(kind=realtype), dimension(:, :), pointer pp2d
Definition: BCPointers.F90:25
real(kind=realtype), dimension(:, :), pointer pp1
Definition: BCPointers.F90:11
real(kind=realtype), dimension(:, :), pointer sfaced
Definition: BCPointers.F90:31
real(kind=realtype), dimension(:, :), pointer sface
Definition: BCPointers.F90:17
real(kind=realtype), dimension(:, :, :), pointer ww1
Definition: BCPointers.F90:10
real(kind=realtype), dimension(:, :), pointer pp2
Definition: BCPointers.F90:11
real(kind=realtype), dimension(:, :, :), pointer xx
Definition: BCPointers.F90:16
real(kind=realtype), dimension(:, :), pointer gamma1
Definition: BCPointers.F90:14
real(kind=realtype), dimension(:, :, :), pointer ww2
Definition: BCPointers.F90:10
real(kind=realtype), dimension(:, :, :), pointer xxd
Definition: BCPointers.F90:29
Definition: block.F90:1
integer(kind=inttype) ndom
Definition: block.F90:761
type(blocktype), dimension(:, :, :), allocatable, target flowdomsd
Definition: block.F90:772
type(blocktype), dimension(:, :, :), allocatable, target flowdoms
Definition: block.F90:771
integer(kind=inttype), dimension(:, :), pointer orphans
real(kind=realtype), dimension(:, :, :), pointer gamma
logical addgridvelocities
integer(kind=inttype) norphans
real(kind=realtype), dimension(:, :, :), pointer p
real(kind=realtype), dimension(:, :, :, :), pointer w
type(bcdatatype), dimension(:), pointer bcdatad
integer(kind=inttype), dimension(:, :, :), pointer iblank
integer(kind=inttype) jb
integer(kind=inttype) kb
real(kind=realtype), dimension(:, :, :), pointer rlv
integer(kind=inttype), dimension(:), pointer bcfaceid
integer(kind=inttype) nbocos
real(kind=realtype), dimension(:, :, :), pointer rev
integer(kind=inttype), dimension(:), pointer bctype
integer(kind=inttype) ib
real(kind=realtype), dimension(:), allocatable recvbuffer
integer, dimension(:), allocatable recvrequests
real(kind=realtype), dimension(:), allocatable sendbuffer
integer, dimension(:), allocatable sendrequests
type(commtype), dimension(:), allocatable commpatternnode_1st
type(internalcommtype), dimension(:), allocatable internalnode_1st
integer adflow_comm_world
integer(kind=inttype), parameter cptempcurvefits
Definition: constants.F90:124
real(kind=realtype), parameter zero
Definition: constants.F90:71
integer(kind=inttype), parameter izippflowy
Definition: constants.F90:508
integer(kind=inttype), parameter supersonicinflow
Definition: constants.F90:264
integer(kind=inttype), parameter izippwalltpy
Definition: constants.F90:515
integer(kind=inttype), parameter supersonicoutflow
Definition: constants.F90:266
real(kind=realtype), parameter eighth
Definition: constants.F90:84
integer(kind=inttype), parameter izippwallz
Definition: constants.F90:523
integer, parameter irho
Definition: constants.F90:34
integer(kind=inttype), parameter izippflowx
Definition: constants.F90:507
integer, parameter ivx
Definition: constants.F90:35
integer(kind=inttype), parameter izippwalltvy
Definition: constants.F90:518
integer, parameter irhoe
Definition: constants.F90:38
integer(kind=inttype), parameter subsonicoutflow
Definition: constants.F90:267
integer(kind=inttype), parameter ibcgroupoutflow
Definition: constants.F90:311
integer(kind=inttype), parameter izippflowsface
Definition: constants.F90:506
integer(kind=inttype), parameter izippwally
Definition: constants.F90:522
integer(kind=inttype), parameter izippwalltpz
Definition: constants.F90:516
integer, parameter ivz
Definition: constants.F90:37
integer(kind=inttype), parameter ibcgroupinflow
Definition: constants.F90:310
real(kind=realtype), parameter fourth
Definition: constants.F90:82
integer(kind=inttype), parameter izippwalltvx
Definition: constants.F90:517
integer(kind=inttype), parameter izippflowgamma
Definition: constants.F90:505
integer(kind=inttype), parameter izippwalltvz
Definition: constants.F90:519
integer(kind=inttype), parameter subsonicinflow
Definition: constants.F90:265
integer(kind=inttype), parameter nzippflowcomm
Definition: constants.F90:502
integer(kind=inttype), parameter ibcgroupwalls
Definition: constants.F90:309
integer(kind=inttype), parameter nzippwallcomm
Definition: constants.F90:512
integer, parameter ivy
Definition: constants.F90:36
integer(kind=inttype), parameter izippwalltpx
Definition: constants.F90:514
integer(kind=inttype), parameter izippflowp
Definition: constants.F90:504
integer(kind=inttype), parameter izippwallx
Definition: constants.F90:521
integer(kind=inttype), parameter izippflowz
Definition: constants.F90:509
subroutine computeetotblock_b(istart, iend, jstart, jend, kstart, kend, correctfork)
subroutine computeetotblock_d(istart, iend, jstart, jend, kstart, kend, correctfork)
subroutine computeetotblock(iStart, iEnd, jStart, jEnd, kStart, kEnd, correctForK)
Definition: flowUtils.F90:553
real(kind=realtype) gammainf
real(kind=realtype) pinfcorr
real(kind=realtype), dimension(:), allocatable winf
real(kind=realtype) muinf
subroutine wallintegrationzippercomm(vars, sps)
subroutine wallintegrationzippercomm_b(vars, varsd, sps)
subroutine woverset_b(level, start, end, commPressure, commVarGamma, commLamVis, commEddyVis, commPattern, internal)
subroutine whalo2(level, start, end, commPressure, commGamma, commViscous)
subroutine correctperiodiccoor(level, sp, nPeriodic, periodicData)
subroutine setcommpointers(start, end, commPressure, commVarGamma, commLamVis, commEddyVis, level, sps, derivPointers, nVar, varOffset)
subroutine woverset(level, start, end, commPressure, commVarGamma, commLamVis, commEddyVis, commPattern, internal)
subroutine whalo2_d(level, start, end, commPressure, commGamma, commViscous)
subroutine woversetgeneric_d(nVar, level, sps, commPattern, Internal)
subroutine flowintegrationzippercomm_b(isInflow, vars, varsd, sps)
subroutine whalo1to1intgeneric_b(nVar, level, sps, commPattern, internal)
subroutine whalo1to1realgeneric(nVar, level, sps, commPattern, internal)
subroutine orphanaverage(wstart, wend, calcPressure, calcGamma, calcLamVis, calcEddyVis)
subroutine whalo1to1intgeneric(nVar, level, sps, commPattern, internal)
subroutine woverset_d(level, start, end, commPressure, commVarGamma, commLamVis, commEddyVis, commPattern, internal)
subroutine whalo1to1_d(level, start, end, commPressure, commVarGamma, commLamVis, commEddyVis, commPattern, internal)
subroutine wallintegrationzippercomm_d(vars, varsd, sps)
subroutine whalo1to1_b(level, start, end, commPressure, commVarGamma, commLamVis, commEddyVis, commPattern, internal)
subroutine whalo1(level, start, end, commPressure, commGamma, commViscous)
Definition: haloExchange.F90:7
subroutine whalo1to1realgeneric_b(nVar, level, sps, commPattern, internal)
subroutine woversetgeneric(nVar, level, sps, commPattern, Internal)
subroutine flowintegrationzippercomm_d(isInflow, vars, varsd, sps)
subroutine correctperiodicvelocity(level, sp, nPeriodic, periodicData)
subroutine woversetgeneric_b(nVar, level, sps, commPattern, Internal)
subroutine exchangecoor_d(level)
subroutine flowintegrationzippercomm(isInflow, vars, sps)
subroutine whalo2_b(level, start, end, commPressure, commGamma, commViscous)
subroutine exchangecoor(level)
subroutine reshalo1(level, start, end)
subroutine whalo1to1(level, start, end, commPressure, commVarGamma, commLamVis, commEddyVis, commPattern, internal)
subroutine exchangecoor_b(level)
real(kind=realtype) eddyvisinfratio
Definition: inputParam.F90:597
integer(kind=inttype) cpmodel
Definition: inputParam.F90:584
integer(kind=inttype) ntimeintervalsspectral
Definition: inputParam.F90:645
type(zippermesh), dimension(nfamexchange), target zippermeshes
Definition: overset.F90:376
type(familyexchange), dimension(:, :), allocatable, target bcfamexchange
Definition: utils.F90:1
logical function iswalltype(bType)
Definition: utils.F90:1705
subroutine setbcpointers(nn, spatialPointers)
Definition: utils.F90:882
subroutine setpointers_d(nn, level, sps)
Definition: utils.F90:3564
subroutine setbcpointers_d(nn, spatialpointers)
Definition: utils.F90:2049
subroutine echk(errorcode, file, line)
Definition: utils.F90:5722
subroutine setpointers_b(nn, level, sps)
Definition: utils.F90:3553
logical function getcorrectfork()
Definition: utils.F90:487
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237