ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
userSurfaceIntegrations.F90
Go to the documentation of this file.
2 
3  use constants
6 contains
7 
8  subroutine integrateusersurfaces(localValues, famList, sps)
9 
10  use constants
11  use block, onlY: flowdoms, ndom
14  use utils, only: echk, mynorm2
16  use sorting, only: faminlist
18  use utils, only: terminate
19  implicit none
20 
21  ! Input Parameters
22  real(kind=realtype), dimension(nLocalValues), intent(inout) :: localvalues
23  integer(kind=intType), dimension(:), intent(in) :: famList
24  integer(kind=intType), intent(in) :: sps
25 
26  ! Working parameters
27  integer(kind=intType) :: iSurf, i, j, k, jj, ierr, nn, iDim, nPts
28  real(kind=realtype), dimension(:), allocatable :: recvbuffer1, recvbuffer2
29  real(kind=realtype), dimension(:, :), allocatable :: vars
30  integer(kind=intType), dimension(:), allocatable :: fams
31  logical, dimension(:), allocatable :: ptValid
32  type(userintsurf), pointer :: surf
33 
34  if (nuserintsurfs == 0) then
35  return ! Nothing to do
36  end if
37 
38  ! Set the pointers for the required communication variables
39  domainloop: do nn = 1, ndom
40  if (flowdoms(nn, 1, sps)%addGridVelocities) then
41  call terminate("userSurfaceIntegrations", "Cannot use user-supplied surface integrations"&
42  &"on with moving grids")
43  end if
44 
45  flowdoms(nn, 1, sps)%realCommVars(irho)%var => flowdoms(nn, 1, sps)%w(:, :, :, irho)
46  flowdoms(nn, 1, sps)%realCommVars(ivx)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivx)
47  flowdoms(nn, 1, sps)%realCommVars(ivy)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivy)
48  flowdoms(nn, 1, sps)%realCommVars(ivz)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivz)
49  flowdoms(nn, 1, sps)%realCommVars(izippflowp)%var => flowdoms(nn, 1, sps)%P(:, :, :)
50  flowdoms(nn, 1, sps)%realCommVars(izippflowgamma)%var => flowdoms(nn, 1, sps)%gamma(:, :, :)
51  ! flowDoms(nn, 1, sps)%realCommVars(iZippFlowSface)%var => Not Implemented
52 
53  flowdoms(nn, 1, sps)%realCommVars(izippflowx)%var => flowdoms(nn, 1, sps)%x(:, :, :, 1)
54  flowdoms(nn, 1, sps)%realCommVars(izippflowy)%var => flowdoms(nn, 1, sps)%x(:, :, :, 2)
55  flowdoms(nn, 1, sps)%realCommVars(izippflowz)%var => flowdoms(nn, 1, sps)%x(:, :, :, 3)
56  end do domainloop
57 
58  masterloop: do isurf = 1, nuserintsurfs
59 
60  ! Pointer for easier reading
61  surf => userintsurfs(isurf)
62 
63  ! We will make a short-cut here: By definition user supplied
64  ! surfaces have a fixed family, we won't do anything if we
65  ! are not told to deal with this surface.
66 
67  faminclude: if (faminlist(surf%famID, famlist)) then
68  npts = size(surf%pts, 2)
69 
70  ! Communicate the face values and the nodal values
71  if (myid == 0) then
72  allocate (recvbuffer1(6 * npts), recvbuffer2(3 * npts))
73  else
74  allocate (recvbuffer1(0), recvbuffer2(0))
75  end if
76 
77  call commuserintegrationsurfacevars(recvbuffer1, irho, izippflowgamma, surf%flowComm)
78  call commuserintegrationsurfacevars(recvbuffer2, izippflowx, izippflowz, surf%nodeComm)
79 
80  ! *Finally* we can do the actual integrations
81  if (myid == 0) then
82 
83  ! Allocate some temporary data needed to supply to the
84  ! zipper integration routine.
85  allocate (ptvalid(npts), vars(npts, nzippflowcomm), fams(size(surf%conn, 2)))
86 
87  ! Initialize ptValid to True. If we find that it isn't,
88  ! we'll permenantly set that point to false. This could
89  ! come from either the node or the flow comms.
90 
91  ptvalid = .true.
92  ! Prepare for the "zipper" integration call. We have to
93  ! re-order the data according to the "inv" array in each
94  ! of the two comms.
95  do i = 1, npts
96 
97  ! Flow Variables
98  j = surf%flowComm%inv(i)
99  vars(j, irho:izippflowgamma) = recvbuffer1(6 * (i - 1) + irho:6 * (i - 1) + izippflowgamma)
100 
101  if (.not. surf%flowComm%valid(i)) then
102  ptvalid(j) = .false.
103  end if
104 
105  ! Sface is not implemented. To correctly do this,
106  ! interpolate the three components of 's', do the dot
107  ! product with the local normal to get the sFace value.
108  vars(j, izippflowsface) = zero
109 
110  ! Node Comm Values
111  j = surf%nodeComm%inv(i)
112  vars(j, izippflowx:izippflowz) = recvbuffer2(3 * i - 2:3 * i)
113 
114  ! The additional pt-valid array
115  if (.not. surf%nodeComm%valid(i)) then
116  ptvalid(j) = .false.
117  end if
118  end do
119 
120  ! The family array is all the same value:
121  fams = surf%famID
122  ! Perform the actual integration
123  call flowintegrationzipper(surf%isInflow, surf%conn, fams, vars, localvalues, famlist, sps, ptvalid)
124  deallocate (ptvalid, vars, fams)
125  end if
126  deallocate (recvbuffer1, recvbuffer2)
127  end if faminclude
128  end do masterloop
129  end subroutine integrateusersurfaces
130 #ifndef USE_COMPLEX
131  subroutine integrateusersurfaces_d(localValues, localValuesd, famList, sps)
132 
133  use constants
134  use block, onlY: flowdoms, flowdomsd, ndom
137  use utils, only: echk, mynorm2
138  use flowutils, only: computeptot, computettot
139  use sorting, only: faminlist
141  use utils, only: terminate
142  implicit none
143 
144  ! Input Parameters
145  real(kind=realtype), dimension(nLocalValues), intent(inout) :: localvalues, localvaluesd
146  integer(kind=intType), dimension(:), intent(in) :: famList
147  integer(kind=intType), intent(in) :: sps
148 
149  ! Working parameters
150  integer(kind=intType) :: iSurf, i, j, k, jj, ierr, nn, iDim, nPts
151  real(kind=realtype), dimension(:), allocatable :: recvbuffer1, recvbuffer2
152  real(kind=realtype), dimension(:), allocatable :: recvbuffer1d, recvbuffer2d
153  real(kind=realtype), dimension(:, :), allocatable :: vars, varsd
154  integer(kind=intType), dimension(:), allocatable :: fams
155  logical, dimension(:), allocatable :: ptValid
156  type(userintsurf), pointer :: surf
157 
158  if (nuserintsurfs == 0) then
159  return ! Nothing to do
160  end if
161 
162  ! Set the pointers for the required communication variables
163  domainloop: do nn = 1, ndom
164  if (flowdoms(nn, 1, sps)%addGridVelocities) then
165  call terminate("userSurfaceIntegrations", "Cannot use user-supplied surface integrations"&
166  &"on with moving grids")
167  end if
168 
169  flowdoms(nn, 1, sps)%realCommVars(irho)%var => flowdoms(nn, 1, sps)%w(:, :, :, irho)
170  flowdoms(nn, 1, sps)%realCommVars(ivx)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivx)
171  flowdoms(nn, 1, sps)%realCommVars(ivy)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivy)
172  flowdoms(nn, 1, sps)%realCommVars(ivz)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivz)
173  flowdoms(nn, 1, sps)%realCommVars(izippflowp)%var => flowdoms(nn, 1, sps)%P(:, :, :)
174  flowdoms(nn, 1, sps)%realCommVars(izippflowgamma)%var => flowdoms(nn, 1, sps)%gamma(:, :, :)
175  ! flowDoms(nn, 1, sps)%realCommVars(iZippFlowSface)%var => Not Implemented
176 
177  flowdoms(nn, 1, sps)%realCommVars(izippflowx)%var => flowdoms(nn, 1, sps)%x(:, :, :, 1)
178  flowdoms(nn, 1, sps)%realCommVars(izippflowy)%var => flowdoms(nn, 1, sps)%x(:, :, :, 2)
179  flowdoms(nn, 1, sps)%realCommVars(izippflowz)%var => flowdoms(nn, 1, sps)%x(:, :, :, 3)
180 
181  flowdoms(nn, 1, sps)%realCommVars(irho + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%w(:, :, :, irho)
182  flowdoms(nn, 1, sps)%realCommVars(ivx + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%w(:, :, :, ivx)
183  flowdoms(nn, 1, sps)%realCommVars(ivy + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%w(:, :, :, ivy)
184  flowdoms(nn, 1, sps)%realCommVars(ivz + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%w(:, :, :, ivz)
185  flowdoms(nn, 1, sps)%realCommVars(izippflowp + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%P(:, :, :)
186  flowdoms(nn, 1, sps)%realCommVars(izippflowgamma + nzippflowcomm)%var => &
187  flowdomsd(nn, 1, sps)%gamma(:, :, :)
188  ! flowDoms(nn, 1, sps)%realCommVars(iZippFlowSface+nZippFlowComm)%var => Not Implemented
189 
190  flowdoms(nn, 1, sps)%realCommVars(izippflowx + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%x(:, :, :, 1)
191  flowdoms(nn, 1, sps)%realCommVars(izippflowy + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%x(:, :, :, 2)
192  flowdoms(nn, 1, sps)%realCommVars(izippflowz + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%x(:, :, :, 3)
193 
194  end do domainloop
195 
196  masterloop: do isurf = 1, nuserintsurfs
197 
198  ! Pointer for easier reading
199  surf => userintsurfs(isurf)
200 
201  ! We will make a short-cut here: By definition user supplied
202  ! surfaces have a fixed family, we won't do anything if we
203  ! are not told to deal with this surface.
204 
205  faminclude: if (faminlist(surf%famID, famlist)) then
206  npts = size(surf%pts, 2)
207 
208  ! Communicate the face values and the nodal values
209  if (myid == 0) then
210  allocate (recvbuffer1(6 * npts), recvbuffer2(3 * npts))
211  allocate (recvbuffer1d(6 * npts), recvbuffer2d(3 * npts))
212  else
213  allocate (recvbuffer1(0), recvbuffer2(0))
214  allocate (recvbuffer1d(0), recvbuffer2d(0))
215  end if
216 
217  call commuserintegrationsurfacevars_d(recvbuffer1, recvbuffer1d, irho, izippflowgamma, surf%flowComm)
218  call commuserintegrationsurfacevars_d(recvbuffer2, recvbuffer2d, izippflowx, izippflowz, surf%nodeComm)
219 
220  ! *Finally* we can do the actual integrations
221  if (myid == 0) then
222 
223  ! Allocate some temporary data needed to supply to the
224  ! zipper integration routine.
225  allocate (ptvalid(npts), vars(npts, nzippflowcomm), &
226  varsd(npts, nzippflowcomm), fams(size(surf%conn, 2)))
227 
228  ! Initialize ptValid to True. If we find that it isn't,
229  ! we'll permenantly set that point to false. This could
230  ! come from either the node or the flow comms.
231 
232  ptvalid = .true.
233 
234  ! Prepare for the "zipper" integration call. We have to
235  ! re-order the data according to the "inv" array in each
236  ! of the two comms.
237  do i = 1, npts
238 
239  ! Flow Variables
240  j = surf%flowComm%inv(i)
241 
242  vars(j, irho:izippflowgamma) = recvbuffer1(6 * (i - 1) + irho:6 * (i - 1) + izippflowgamma)
243  varsd(j, irho:izippflowgamma) = recvbuffer1d(6 * (i - 1) + irho:6 * (i - 1) + izippflowgamma)
244 
245  if (.not. surf%flowComm%valid(i)) then
246  ptvalid(j) = .false.
247  end if
248 
249  ! Sface is not implemented. To correctly do this,
250  ! interpolate the three components of 's', do the dot
251  ! product with the local normal to get the sFace value.
252  vars(j, izippflowsface) = zero
253  varsd(j, izippflowsface) = zero
254 
255  ! Node Comm Values
256  j = surf%nodeComm%inv(i)
257  vars(j, izippflowx:izippflowz) = recvbuffer2(3 * i - 2:3 * i)
258  varsd(j, izippflowx:izippflowz) = recvbuffer2d(3 * i - 2:3 * i)
259 
260  ! The additional pt-valid array
261  if (.not. surf%nodeComm%valid(i)) then
262  ptvalid(j) = .false.
263  end if
264  end do
265 
266  ! The family array is all the same value:
267  fams = surf%famID
268 
269  ! Perform the actual integration
270  call flowintegrationzipper_d(surf%isInflow, surf%conn, fams, vars, &
271  varsd, localvalues, localvaluesd, &
272  famlist, sps, ptvalid)
273  deallocate (ptvalid, vars, varsd, fams)
274  end if
275  deallocate (recvbuffer1, recvbuffer2, recvbuffer1d, recvbuffer2d)
276  end if faminclude
277  end do masterloop
278  end subroutine integrateusersurfaces_d
279 
280  subroutine integrateusersurfaces_b(localValues, localValuesd, famList, sps)
281 
282  use constants
283  use block, onlY: flowdoms, flowdomsd, ndom
286  use utils, only: echk, mynorm2
287  use flowutils, only: computeptot, computettot
288  use sorting, only: faminlist
290  use utils, only: terminate
291  implicit none
292 
293  ! Input Parameters
294  real(kind=realtype), dimension(nLocalValues), intent(inout) :: localvalues, localvaluesd
295  integer(kind=intType), dimension(:), intent(in) :: famList
296  integer(kind=intType), intent(in) :: sps
297 
298  ! Working parameters
299  integer(kind=intType) :: iSurf, i, j, k, jj, ierr, nn, iDim, nPts
300  real(kind=realtype), dimension(:), allocatable :: recvbuffer1, recvbuffer2
301  real(kind=realtype), dimension(:), allocatable :: recvbuffer1d, recvbuffer2d
302  real(kind=realtype), dimension(:, :), allocatable :: vars, varsd
303  integer(kind=intType), dimension(:), allocatable :: fams
304  logical, dimension(:), allocatable :: ptValid
305  type(userintsurf), pointer :: surf
306 
307  if (nuserintsurfs == 0) then
308  return ! Nothing to do
309  end if
310 
311  ! Run the foward mode code pass:
312  call integrateusersurfaces(localvalues, famlist, sps)
313 
314  ! Set the pointers for the required communication variables
315  domainloop: do nn = 1, ndom
316  if (flowdoms(nn, 1, sps)%addGridVelocities) then
317  call terminate("userSurfaceIntegrations", "Cannot use user-supplied surface integrations"&
318  &"on with moving grids")
319  end if
320 
321  flowdoms(nn, 1, sps)%realCommVars(irho)%var => flowdoms(nn, 1, sps)%w(:, :, :, irho)
322  flowdoms(nn, 1, sps)%realCommVars(ivx)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivx)
323  flowdoms(nn, 1, sps)%realCommVars(ivy)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivy)
324  flowdoms(nn, 1, sps)%realCommVars(ivz)%var => flowdoms(nn, 1, sps)%w(:, :, :, ivz)
325  flowdoms(nn, 1, sps)%realCommVars(izippflowp)%var => flowdoms(nn, 1, sps)%P(:, :, :)
326  flowdoms(nn, 1, sps)%realCommVars(izippflowgamma)%var => flowdoms(nn, 1, sps)%gamma(:, :, :)
327  ! flowDoms(nn, 1, sps)%realCommVars(iZippFlowSface)%var => Not Implemented
328 
329  flowdoms(nn, 1, sps)%realCommVars(izippflowx)%var => flowdoms(nn, 1, sps)%x(:, :, :, 1)
330  flowdoms(nn, 1, sps)%realCommVars(izippflowy)%var => flowdoms(nn, 1, sps)%x(:, :, :, 2)
331  flowdoms(nn, 1, sps)%realCommVars(izippflowz)%var => flowdoms(nn, 1, sps)%x(:, :, :, 3)
332 
333  flowdoms(nn, 1, sps)%realCommVars(irho + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%w(:, :, :, irho)
334  flowdoms(nn, 1, sps)%realCommVars(ivx + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%w(:, :, :, ivx)
335  flowdoms(nn, 1, sps)%realCommVars(ivy + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%w(:, :, :, ivy)
336  flowdoms(nn, 1, sps)%realCommVars(ivz + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%w(:, :, :, ivz)
337  flowdoms(nn, 1, sps)%realCommVars(izippflowp + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%P(:, :, :)
338  flowdoms(nn, 1, sps)%realCommVars(izippflowgamma + nzippflowcomm)%var => &
339  flowdomsd(nn, 1, sps)%gamma(:, :, :)
340  ! flowDoms(nn, 1, sps)%realCommVars(iZippFlowSface+nZippFlowComm)%var => Not Implemented
341 
342  flowdoms(nn, 1, sps)%realCommVars(izippflowx + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%x(:, :, :, 1)
343  flowdoms(nn, 1, sps)%realCommVars(izippflowy + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%x(:, :, :, 2)
344  flowdoms(nn, 1, sps)%realCommVars(izippflowz + nzippflowcomm)%var => flowdomsd(nn, 1, sps)%x(:, :, :, 3)
345 
346  end do domainloop
347 
348  masterloop: do isurf = 1, nuserintsurfs
349 
350  ! Pointer for easier reading
351  surf => userintsurfs(isurf)
352 
353  ! We will make a short-cut here: By definition user supplied
354  ! surfaces have a fixed family, we won't do anything if we
355  ! are not told to deal with this surface.
356 
357  faminclude: if (faminlist(surf%famID, famlist)) then
358  npts = size(surf%pts, 2)
359 
360  ! Communicate the face values and the nodal values
361  if (myid == 0) then
362  allocate (recvbuffer1(6 * npts), recvbuffer2(3 * npts))
363  allocate (recvbuffer1d(6 * npts), recvbuffer2d(3 * npts))
364  else
365  allocate (recvbuffer1(0), recvbuffer2(0))
366  allocate (recvbuffer1d(0), recvbuffer2d(0))
367  end if
368 
369  call commuserintegrationsurfacevars(recvbuffer1, irho, izippflowgamma, surf%flowComm)
370  call commuserintegrationsurfacevars(recvbuffer2, izippflowx, izippflowz, surf%nodeComm)
371 
372  ! *Finally* we can do the actual integrations
373  if (myid == 0) then
374 
375  ! Allocate some temporary data needed to supply to the
376  ! zipper integration routine.
377  allocate (ptvalid(npts), vars(npts, nzippflowcomm), &
378  varsd(npts, nzippflowcomm), fams(size(surf%conn, 2)))
379 
380  ! Initialize ptValid to True. If we find that it isn't,
381  ! we'll permenantly set that point to false. This could
382  ! come from either the node or the flow comms.
383 
384  ptvalid = .true.
385 
386  varsd = zero
387  ! Prepare for the "zipper" integration call. We have to
388  ! re-order the data according to the "inv" array in each
389  ! of the two comms.
390  do i = 1, npts
391 
392  ! Flow Variables
393  j = surf%flowComm%inv(i)
394  vars(j, irho:izippflowgamma) = recvbuffer1(6 * (i - 1) + irho:6 * (i - 1) + izippflowgamma)
395 
396  if (.not. surf%flowComm%valid(i)) then
397  ptvalid(j) = .false.
398  end if
399 
400  ! Sface is not implemented. To correctly do this,
401  ! interpolate the three components of 's', do the dot
402  ! product with the local normal to get the sFace value.
403  vars(j, izippflowsface) = zero
404 
405  ! Node Comm Values
406  j = surf%nodeComm%inv(i)
407  vars(j, izippflowx:izippflowz) = recvbuffer2(3 * i - 2:3 * i)
408 
409  ! The additional pt-valid array
410  if (.not. surf%nodeComm%valid(i)) then
411  ptvalid(j) = .false.
412  end if
413  end do
414 
415  ! The family array is all the same value:
416  fams = surf%famID
417 
418  ! Perform the actual (reverse) integration
419  call flowintegrationzipper_b(surf%isInflow, surf%conn, fams, vars, &
420  varsd, localvalues, localvaluesd, &
421  famlist, sps, ptvalid)
422 
423  ! Accumulate into the receive buffers
424  recvbuffer1d = zero
425  recvbuffer2d = zero
426  do i = 1, npts
427 
428  ! Accumulte back to the buffer
429  j = surf%flowComm%inv(i)
430 
431  recvbuffer1d(6 * (i - 1) + irho:6 * (i - 1) + izippflowgamma) = &
432  recvbuffer1d(6 * (i - 1) + irho:6 * (i - 1) + izippflowgamma) + &
433  varsd(j, irho:izippflowgamma)
434 
435  ! Sface is not implemented. No reverse seed. Just zero
436 
437  ! Node Comm Values
438  j = surf%nodeComm%inv(i)
439  recvbuffer2d(3 * i - 2:3 * i) = recvbuffer2d(3 * i - 2:3 * i) + varsd(j, izippflowx:izippflowz)
440  end do
441 
442  deallocate (ptvalid, vars, varsd, fams)
443  end if
444 
445  ! Finish the reverse scatter
446  call commuserintegrationsurfacevars_b(recvbuffer1, recvbuffer1d, irho, izippflowgamma, surf%flowComm)
447  call commuserintegrationsurfacevars_b(recvbuffer2, recvbuffer2d, izippflowx, izippflowz, surf%nodeComm)
448 
449  deallocate (recvbuffer1, recvbuffer2, recvbuffer1d, recvbuffer2d)
450  end if faminclude
451  end do masterloop
452  end subroutine integrateusersurfaces_b
453 
454 #endif
455 
456  subroutine addintegrationsurface(pts, conn, famName, famID, isInflow, nPts, nConn)
457  ! Add a user-supplied integration surface.
458 
459  use communication, only: myid
460  use constants
461 
462  implicit none
463 
464  ! Input variables
465  integer(kind=intType), intent(in) :: nPts, nConn, famID
466  real(kind=realtype), dimension(3, nPts), intent(in) :: pts
467  integer(kind=intType), dimension(3, nConn), intent(in) :: conn
468  logical, intent(in) :: isInflow
469  character(len=*) :: famName
470  type(userintsurf), pointer :: surf
471 
472  ! Not really much to do here...we just have to save the data
473  ! into the data structure untilly we actual have to do the
474  ! search.
476  if (nuserintsurfs > nuserintsurfsmax) then
477  print *, "Error: Exceeded the maximum number of user-supplied "&
478  &"integration slices. Increase nUserIntSurfsMax"
479  stop
480  end if
481 
482  surf => userintsurfs(nuserintsurfs)
483  if (myid == 0) then
484  allocate (surf%pts(3, npts), surf%conn(3, nconn))
485  surf%pts = pts
486  surf%conn = conn
487  end if
488  surf%famName = famname
489  surf%famID = famid
490  surf%isInflow = isinflow
491  end subroutine addintegrationsurface
492 
493  subroutine buildvolumeadts(oBlocks, useDual)
494 
495  ! This builds volume ADTs for the the owned blocks. It will build
496  ! either the dual mesh or the primal mesh depening on the flag
497  ! useDual.
498 
499  use constants
500  use oversetdata, only: oversetblock
501  use blockpointers, only: ndom, x, ie, je, ke, il, jl, kl, vol, ib, jb, kb, &
503  use adtbuild, only: buildserialhex
504  use utils, only: setpointers, echk
505  implicit none
506 
507  ! Input/Output Parameters
508  type(oversetblock), dimension(:), target :: oBlocks
509  logical :: useDual
510 
511  ! Working Parameters
512  integer(kind=intType) :: nInterpol, nn, i, j, k, iii, jjj, kkk
513  integer(kind=intType) :: iStart, jStart, kStart, iEnd, jEnd, kEnd
514  integer(kind=intType) :: ii, jj, kk, mm, nADT, nHexa, planeOffset
515  type(oversetblock), pointer :: oBlock
516 
517  ninterpol = 1 ! we get the ADT to compute the interpolated volume for us.
518 
519  domainloop: do nn = 1, ndom
520 
521  call setpointers(nn, 1, 1)
522  oblock => oblocks(nn)
523 
524  primalordual: if (usedual) then
525 
526  ! Now setup the data for the ADT
527  nhexa = il * jl * kl
528  nadt = ie * je * ke
529  oblock%il = il
530  oblock%jl = jl
531  oblock%kl = kl
532 
533  allocate (oblock%xADT(3, nadt), oblock%hexaConn(8, nhexa), &
534  oblock%qualDonor(1, nadt))
535  ! Fill up the xADT using cell centers (dual mesh)
536  mm = 0
537  do k = 1, ke
538  do j = 1, je
539  do i = 1, ie
540  mm = mm + 1
541  oblock%xADT(:, mm) = eighth * ( &
542  x(i - 1, j - 1, k - 1, :) + &
543  x(i, j - 1, k - 1, :) + &
544  x(i - 1, j, k - 1, :) + &
545  x(i, j, k - 1, :) + &
546  x(i - 1, j - 1, k, :) + &
547  x(i, j - 1, k, :) + &
548  x(i - 1, j, k, :) + &
549  x(i, j, k, :))
550  oblock%qualDonor(1, mm) = vol(i, j, k)
551  end do
552  end do
553  end do
554 
555  do mm = 1, nbocos
556  ! We need to make sure that the interpolation does not
557  ! use a halo behind an overset outer bound. This could
558  ! happen because the last cell is interpolated (-1) and
559  ! the iblank on the halos are still 1. Just set the
560  ! quality very high so it is not accepted.
561 
562  select case (bcfaceid(mm))
563  case (imin)
564  istart = 1; iend = 1;
565  jstart = bcdata(mm)%icBeg; jend = bcdata(mm)%icEnd
566  kstart = bcdata(mm)%jcBeg; kend = bcdata(mm)%jcEnd
567  case (imax)
568  istart = ie; iend = ie;
569  jstart = bcdata(mm)%icBeg; jend = bcdata(mm)%icEnd
570  kstart = bcdata(mm)%jcBeg; kend = bcdata(mm)%jcEnd
571  case (jmin)
572  istart = bcdata(mm)%icBeg; iend = bcdata(mm)%icEnd
573  jstart = 1; jend = 1
574  kstart = bcdata(mm)%jcBeg; kend = bcdata(mm)%jcEnd
575  case (jmax)
576  istart = bcdata(mm)%icBeg; iend = bcdata(mm)%icEnd
577  jstart = je; jend = je;
578  kstart = bcdata(mm)%jcBeg; kend = bcdata(mm)%jcEnd
579  case (kmin)
580  istart = bcdata(mm)%icBeg; iend = bcdata(mm)%icEnd
581  jstart = bcdata(mm)%jcBeg; jend = bcdata(mm)%jcEnd
582  kstart = 1; kend = 1;
583  case (kmax)
584  istart = bcdata(mm)%icBeg; iend = bcdata(mm)%icEnd
585  jstart = bcdata(mm)%jcBeg; jend = bcdata(mm)%jcEnd
586  kstart = ke; kend = ke;
587  end select
588 
589  if (bctype(mm) == oversetouterbound) then
590  do k = kstart, kend
591  do j = jstart, jend
592  do i = istart, iend
593  ! recompute the index
594  kk = (k - 1) * ie * je + (j - 1) * ie + i
595  oblock%qualDonor(1, kk) = large
596  end do
597  end do
598  end do
599  end if
600  end do
601 
602  mm = 0
603  ! These are the 'elements' of the dual mesh.
604  planeoffset = ie * je
605  do k = 2, ke
606  do j = 2, je
607  do i = 2, ie
608  mm = mm + 1
609  oblock%hexaConn(1, mm) = (k - 2) * planeoffset + (j - 2) * ie + (i - 2) + 1
610  oblock%hexaConn(2, mm) = oblock%hexaConn(1, mm) + 1
611  oblock%hexaConn(3, mm) = oblock%hexaConn(2, mm) + ie
612  oblock%hexaConn(4, mm) = oblock%hexaConn(3, mm) - 1
613 
614  oblock%hexaConn(5, mm) = oblock%hexaConn(1, mm) + planeoffset
615  oblock%hexaConn(6, mm) = oblock%hexaConn(2, mm) + planeoffset
616  oblock%hexaConn(7, mm) = oblock%hexaConn(3, mm) + planeoffset
617  oblock%hexaConn(8, mm) = oblock%hexaConn(4, mm) + planeoffset
618  end do
619  end do
620  end do
621  else
622  ! Note that we will be including the halo primal cells. This
623  ! should slightly increase robusness for viscous off-wall
624  ! spacing. This means the primal mesh has 1 MORE node/cell
625  ! in each direction.
626 
627  ! Now setup the data for the ADT
628  nhexa = ie * je * ke
629  nadt = ib * jb * kb
630  oblock%il = ie
631  oblock%jl = je
632  oblock%kl = ke
633 
634  allocate (oblock%xADT(3, nadt), oblock%hexaConn(8, nhexa), &
635  oblock%qualDonor(1, nadt))
636 
637  oblock%qualDonor = zero
638  ! Fill up the xADT using the primal nodes
639  mm = 0
640  do k = 0, ke
641  do j = 0, je
642  do i = 0, ie
643  mm = mm + 1
644  oblock%xADT(:, mm) = x(i, j, k, :)
645 
646  ! Since we don't have all 8 volumes surrounding the
647  ! halo nodes, clip the volumes to be between 0 and ib etc.
648  do iii = 0, 1
649  do jjj = 0, 1
650  do kkk = 0, 1
651  ii = min(max(0, iii + i), ib)
652  jj = min(max(0, jjj + j), jb)
653  kk = min(max(0, kkk + k), kb)
654 
655  oblock%qualDonor(1, mm) = oblock%qualDonor(1, mm) + &
656  vol(ii, jj, kk)
657  end do
658  end do
659  end do
660 
661  ! Dividing by 8 isn't strictly necessary but we'll
662  ! do it anyway.
663  oblock%qualDonor(1, mm) = oblock%qualDonor(1, mm) * eighth
664  end do
665  end do
666  end do
667 
668  mm = 0
669  ! These are the 'elements' of the dual mesh.
670  planeoffset = ib * jb
671  do k = 1, ke
672  do j = 1, je
673  do i = 1, ie
674  mm = mm + 1
675  oblock%hexaConn(1, mm) = (k - 1) * planeoffset + (j - 1) * ib + (i - 1) + 1
676  oblock%hexaConn(2, mm) = oblock%hexaConn(1, mm) + 1
677  oblock%hexaConn(3, mm) = oblock%hexaConn(2, mm) + ib
678  oblock%hexaConn(4, mm) = oblock%hexaConn(3, mm) - 1
679 
680  oblock%hexaConn(5, mm) = oblock%hexaConn(1, mm) + planeoffset
681  oblock%hexaConn(6, mm) = oblock%hexaConn(2, mm) + planeoffset
682  oblock%hexaConn(7, mm) = oblock%hexaConn(3, mm) + planeoffset
683  oblock%hexaConn(8, mm) = oblock%hexaConn(4, mm) + planeoffset
684  end do
685  end do
686  end do
687  end if primalordual
688 
689  ! Call the custom build routine -- Serial only, only Hexa volumes,
690  ! we supply our own ADT Type
691 
692  call buildserialhex(nhexa, nadt, oblock%xADT, oblock%hexaConn, oblock%ADT)
693  end do domainloop
694 
695  end subroutine buildvolumeadts
696 
697  subroutine performinterpolation(pts, oBlocks, useDual, comm)
698 
699  ! This routine performs the actual searches for the slices. It is
700  ! generic in the sense that it will search an arbtitrary set of
701  ! points on either the primal or dual meshes. The final required
702  ! communication data is then written into the supplied comm.
703 
704  use constants
705  use block, only: interppttype
707  use oversetdata, only: oversetblock
708  use blockpointers, only: ndom, x, ie, je, ke, il, jl, kl, x, iblank, vol
711  use adtutils, only: stack
712  use adtdata, only: adtbboxtargettype
713  use utils, only: setpointers, mynorm2, echk
714  use inputoverset, only: oversetprojtol
716 
717  implicit none
718 
719  ! Input parameters:
720  real(kind=realtype), dimension(:, :), intent(in) :: pts
721  type(userintsurf) :: surf
722  type(oversetblock), dimension(:), target, intent(in) :: oBlocks
723  logical, intent(in) :: useDual
724  !type(oversetSurf), dimension(:), intent(in) :: oSurfs
725  type(usersurfcommtype) :: comm
726 
727  ! Working parameters
728  type(oversetblock), pointer :: oBlock
729  type(interppttype), dimension(:), allocatable :: surfFringes
730 
731  integer(Kind=intType) :: i, j, k, ii, jj, kk, iii, jjj, kkk, nn, mm
732  integer(kind=intType) :: iSurf, ierr, nInterpol, iProc
733  integer(kind=intType) :: nHexa, nAdt, planeOffset, elemID, nPts
734  real(kind=realtype) :: xc(4), weight(8)
735  integer mpiStatus(MPI_STATUS_SIZE)
736 
737  real(kind=realtype) :: uvw(5), uvw2(5), donorqual, xcheck(3)
738  integer(kind=intType) :: intInfo(3), intInfo2(3)
739  logical :: failed, invalid
740 
741  integer(kind=intType), dimension(:, :), allocatable :: donorInfo, intSend
742  integer(kind=intType), dimension(:), allocatable :: procSizes
743  real(kind=realtype), dimension(:, :), allocatable :: donorfrac, realsend
744 
745  ! Variables we have to pass the ADT search routine
746  integer(kind=intType), dimension(:), pointer :: BB
747  type(adtbboxtargettype), dimension(:), pointer :: BB2
748  integer(kind=intType), dimension(:), pointer :: frontLeaves
749  integer(kind=intType), dimension(:), pointer :: frontLeavesNew
750 
751  ! Data for the search
752  allocate (bb(20), frontleaves(25), frontleavesnew(25), stack(100), bb2(20))
753 
754  npts = size(pts, 2)
755 
756  ! Allocate donor information arrays
757  allocate (donorfrac(4, npts), donorinfo(5, npts))
758  donorinfo = -1
759  donorfrac(4, :) = large
760  ninterpol = 1
761  domainsearch: do nn = 1, ndom
762  oblock => oblocks(nn)
763  call setpointers(nn, 1, 1)
764 
765  ! Search the supplied pts one at a time
766  elemloop: do i = 1, npts
767 
768  xc(1:3) = pts(:, i)
769 
770  ! Call the standard tree search
771  call containmenttreesearchsinglepoint(oblock%ADT, xc, intinfo, uvw, &
772  oblock%qualDonor, ninterpol, bb, &
773  frontleaves, frontleavesnew, failed)
774 
775  ! Make sure this point is not garbage.
776  if (intinfo(1) >= 0) then
777  call fractoweights2(uvw(1:3), weight)
778  xcheck = zero
779  do j = 1, 8
780  xcheck = xcheck + weight(j) * oblock%xADT(:, oblock%hexaConn(j, intinfo(3)))
781  end do
782 
783  if (mynorm2(xcheck - xc(1:3)) > oversetprojtol) then
784  failed = .true.
785  end if
786  end if
787 
788  if (intinfo(1) >= 0 .and. failed) then
789  ! we "found" a point but it is garbage. Do the failsafe search
790  xc(4) = large
791  call mindistancetreesearchsinglepoint(oblock%ADT, xc, intinfo, uvw, &
792  oblock%qualDonor, ninterpol, bb2, frontleaves, frontleavesnew)
793 
794  ! Check this one:
795  call fractoweights2(uvw(1:3), weight)
796  xcheck = zero
797  do j = 1, 8
798  xcheck = xcheck + weight(j) * oblock%xADT(:, oblock%hexaConn(j, intinfo(3)))
799  end do
800 
801  ! Since this is the last line of defence, relax the tolerance a bit
802  if (mynorm2(xcheck - xc(1:3)) > 100 * oversetprojtol) then
803  ! This fringe has not found a donor
804  intinfo(1) = -1
805  else
806  ! This one has now passed.
807 
808  ! Important! uvw(4) is the distance squared for this search
809  ! not interpolated value
810  uvw(4) = uvw(5)
811  end if
812  end if
813 
814  elemfound: if (intinfo(1) >= 0) then
815 
816  ! Donor and block and index information for this donor.
817  donorqual = uvw(4)
818  elemid = intinfo(3) - 1 ! Make it zero based
819 
820  ! The dual mesh needs an offset of 1 becuase it only used
821  ! 1:ie values. This is not necessary for the primal.
822  if (usedual) then
823  ii = mod(elemid, oblock%il) + 1
824  jj = mod(elemid / oblock%il, oblock%jl) + 1
825  kk = elemid / (oblock%il * oblock%jl) + 1
826  else
827  ii = mod(elemid, oblock%il)
828  jj = mod(elemid / oblock%il, oblock%jl)
829  kk = elemid / (oblock%il * oblock%jl)
830  end if
831  ! Rememebr donorFrac(4, i) is the current best quality
832  if (donorqual < donorfrac(4, i)) then
833 
834  invalid = .false.
835 
836  ! For the dual mesh search, we have to make sure the
837  ! potential donors are valid. Such a check is not
838  ! necessary for the primal search since all nodes are
839  ! considered valid.
840 
841  if (usedual) then
842  ! Check if the point is invalid. We can do this
843  ! with i-blank array. We can only accept compute
844  ! cells (iblank=1) or interpolated
845  ! cells(iblank=-1).
846  do kkk = 0, 1
847  do jjj = 0, 1
848  do iii = 0, 1
849  if (.not. (iblank(ii + iii, jj + jjj, kk + kkk) == 1 .or. &
850  iblank(ii + iii, jj + jjj, kk + kkk) == -1)) then
851  invalid = .true.
852  end if
853  end do
854  end do
855  end do
856  end if
857 
858  if (.not. invalid) then
859 
860  ! Set the quality of the donor to the one we
861  ! just found. Save the rest of the necessary
862  ! information.
863  donorinfo(1, i) = myid
864  donorinfo(2, i) = nn
865  donorinfo(3, i) = ii
866  donorinfo(4, i) = jj
867  donorinfo(5, i) = kk
868  donorfrac(1:3, i) = uvw(1:3)
869  donorfrac(4, i) = donorqual
870  end if
871  end if
872  end if elemfound
873  end do elemloop
874  end do domainsearch
875 
876  ! Next count up the number of valid donors we've found and compact
877  ! the info back to that length.
878  if (myid /= 0) then
879  j = 0
880  do i = 1, npts
881  if (donorinfo(1, i) /= -1) then
882  j = j + 1
883  end if
884  end do
885  allocate (intsend(6, j), realsend(4, i))
886  if (j > 0) then
887  j = 0
888  do i = 1, npts
889  if (donorinfo(1, i) /= -1) then
890  j = j + 1
891  intsend(1:5, j) = donorinfo(:, i)
892  intsend(6, j) = i
893  realsend(:, j) = donorfrac(:, i)
894  end if
895  end do
896  end if
897  else
898  ! On the root proc, use intSend and realSend as the receiver
899  ! buffer. These can be at most nPts sized.
900  allocate (intsend(6, npts), realsend(4, npts))
901  end if
902 
903  ! Gather up the sizes (j) to the root processor so he know who to
904  ! expect data from.
905  allocate (procsizes(0:nproc - 1))
906 
907  call mpi_gather(j, 1, adflow_integer, procsizes, 1, &
908  adflow_integer, 0, adflow_comm_world, ierr)
909  call echk(ierr, __file__, __line__)
910 
911  ! Next all the procs need to send all the information back to the
912  ! root processor where we will determine the proper donors for
913  ! each of the cells
914 
915  ! All procs except root fire off their data.
916  if (myid >= 1) then
917  if (j > 0) then
918  call mpi_send(intsend, j * 6, adflow_integer, 0, myid, &
919  adflow_comm_world, ierr)
920  call echk(ierr, __file__, __line__)
921 
922  call mpi_send(realsend, j * 4, adflow_real, 0, myid, &
923  adflow_comm_world, ierr)
924  call echk(ierr, __file__, __line__)
925  end if
926  end if
927 
928  ! And the root processor recieves it...
929  if (myid == 0) then
930  do iproc = 1, nproc - 1
931  ! Determine if this proc has sent anything:
932  if (procsizes(iproc) /= 0) then
933 
934  call mpi_recv(intsend, 6 * npts, adflow_integer, iproc, iproc, &
935  adflow_comm_world, mpistatus, ierr)
936  call echk(ierr, __file__, __line__)
937 
938  call mpi_recv(realsend, 4 * npts, adflow_real, iproc, iproc, &
939  adflow_comm_world, mpistatus, ierr)
940  call echk(ierr, __file__, __line__)
941 
942  ! Now process the data (intSend and realSend) that we
943  ! just received. We don't need to check the status for
944  ! the sizes becuase we already know the sizes from the
945  ! initial gather we did.
946 
947  do i = 1, procsizes(iproc)
948  ii = intsend(6, i)
949 
950  if (realsend(4, i) < donorfrac(4, ii)) then
951  ! The incoming quality is better. Accept it.
952  donorinfo(1:5, ii) = intsend(1:5, i)
953  donorfrac(:, ii) = realsend(:, i)
954  end if
955  end do
956  end if
957  end do
958 
959  ! To make this easier, convert the information we have to a
960  ! 'fringeType' array so we can use the pre-existing sorting
961  ! routine.
962  allocate (surffringes(npts))
963  do i = 1, npts
964  surffringes(i)%donorProc = donorinfo(1, i)
965  surffringes(i)%donorBlock = donorinfo(2, i)
966  surffringes(i)%dI = donorinfo(3, i)
967  surffringes(i)%dJ = donorinfo(4, i)
968  surffringes(i)%dK = donorinfo(5, i)
969  surffringes(i)%donorFrac = donorfrac(1:3, i)
970  ! Use the myBlock attribute to keep track of the original
971  ! index. When we sort the fringes, they will no longer be
972  ! in the same order
973  surffringes(i)%myBlock = i
974  end do
975 
976  ! Perform the actual sort.
977  call qsortinterppttype(surffringes, npts)
978 
979  ! We will reuse-proc sizes to now mean the number of elements
980  ! that the processor *actually* has to send. We will include
981  ! the root proc itself in the calc becuase that will tell us
982  ! the size of the internal comm structure.
983 
984  procsizes = 0
985  allocate (comm%valid(npts))
986  comm%valid = .true.
987  do i = 1, npts
988  if (surffringes(i)%donorProc < 0) then
989  ! We dont have a donor. Flag this point as invalid
990  comm%valid(i) = .false.
991  end if
992 
993  ! Dump the points without donors on the root proc by making
994  ! sure j is at least 0 for the root proc. These will just
995  ! simply be ignored during the comm.
996  j = max(surffringes(i)%donorProc, 0)
997  procsizes(j) = procsizes(j) + 1
998  end do
999  end if
1000 
1001  ! Simply broadcast out the the proc sizes back to everyone so all
1002  ! processors know if they are to receive anything back.
1003  call mpi_bcast(procsizes, nproc, adflow_integer, 0, adflow_comm_world, ierr)
1004  call echk(ierr, __file__, __line__)
1005 
1006  ! We can now save some of the final comm data required on the
1007  ! root proc for this surf.
1008  allocate (comm%procSizes(0:nproc - 1), comm%procDisps(0:nproc))
1009 
1010  ! Copy over procSizes and generate the cumulative form of
1011  ! the size array, procDisps
1012  comm%procSizes = procsizes
1013  call getcumulativeform(comm%procSizes, nproc, comm%procDisps)
1014 
1015  ! Record the elemInverse which is necessary to index into
1016  ! the original conn array.
1017  if (myid == 0) then
1018  allocate (comm%Inv(npts))
1019  do i = 1, npts
1020  comm%Inv(i) = surffringes(i)%myBlock
1021  end do
1022  end if
1023 
1024  ! Now we can send out the final donor information to the
1025  ! processors that must supply it.
1026  comm%nDonor = procsizes(myid)
1027  allocate (comm%frac(3, comm%nDonor), comm%donorInfo(4, comm%nDonor))
1028 
1029  if (myid >= 1) then
1030  if (comm%nDonor > 0) then
1031  ! We are responible for at least 1 donor. We have to make
1032  ! use of the intSend and realSend buffers again (which
1033  ! are guaranteed to be big enough). The reason we can't
1034  ! dump the data in directlyis that intSend and realSend
1035  ! have a different leading index than we need on the
1036  ! final data structure.
1037 
1038  call mpi_recv(intsend, 6 * comm%nDonor, adflow_integer, 0, myid, &
1039  adflow_comm_world, mpistatus, ierr)
1040  call echk(ierr, __file__, __line__)
1041 
1042  call mpi_recv(realsend, 4 * comm%nDonor, adflow_real, 0, myid, &
1043  adflow_comm_world, mpistatus, ierr)
1044  call echk(ierr, __file__, __line__)
1045 
1046  ! Copy into final structure
1047  do i = 1, comm%nDonor
1048  comm%donorInfo(:, i) = intsend(1:4, i)
1049  comm%frac(:, i) = realsend(1:3, i)
1050  end do
1051  end if
1052  else
1053  ! We are the root processor.
1054  if (comm%nDonor > 0) then
1055  ! We need to copy out our donor info on the root proc if we have any
1056  do i = comm%procDisps(myid) + 1, comm%procDisps(myid + 1)
1057  comm%donorInfo(1, i) = surffringes(i)%donorBlock
1058  comm%donorInfo(2, i) = surffringes(i)%dI
1059  comm%donorInfo(3, i) = surffringes(i)%dJ
1060  comm%donorInfo(4, i) = surffringes(i)%dK
1061  comm%frac(1:3, i) = surffringes(i)%donorFrac
1062  end do
1063  end if
1064 
1065  ! Now loop over the rest of the procs and send out the info we
1066  ! need. We have to temporarily copy the data back out of
1067  ! fringes to the intSend and realSend arrays
1068  do iproc = 1, nproc - 1
1069 
1070  if (comm%procSizes(iproc) > 0) then
1071  ! Have something to send here:
1072  j = 0
1073  do i = comm%procDisps(iproc) + 1, comm%procDisps(iproc + 1)
1074  j = j + 1
1075 
1076  intsend(1, j) = surffringes(i)%donorBlock
1077  intsend(2, j) = surffringes(i)%dI
1078  intsend(3, j) = surffringes(i)%dJ
1079  intsend(4, j) = surffringes(i)%dK
1080  realsend(1:3, j) = surffringes(i)%donorFrac
1081  end do
1082 
1083  call mpi_send(intsend, j * 6, adflow_integer, iproc, iproc, &
1084  adflow_comm_world, ierr)
1085  call echk(ierr, __file__, __line__)
1086 
1087  call mpi_send(realsend, j * 4, adflow_real, iproc, iproc, &
1088  adflow_comm_world, ierr)
1089  call echk(ierr, __file__, __line__)
1090  end if
1091  end do
1092 
1093  ! Deallocate data allocatd only on root proc
1094  deallocate (surffringes)
1095  end if
1096 
1097  ! Nuke rest of allocated on all procs
1098  deallocate (intsend, realsend, procsizes, donorinfo, donorfrac)
1099  deallocate (bb, frontleaves, frontleavesnew, stack, bb2)
1100  end subroutine performinterpolation
1101 
1103 
1104  ! This routine performs the actual searches for the slices. We
1105  ! reuse much of the same machinery as is used in the overset code.
1106 
1107  use constants
1109  use oversetdata, only: oversetblock
1110  use blockpointers, only: ndom, ie, je, ke, il, jl, kl
1112  use utils, only: setpointers, echk
1113 
1114  implicit none
1115 
1116  ! Working parameters
1117  type(oversetblock), dimension(nDom), target :: oBlocks
1118  type(userintsurf), pointer :: surf
1119 
1120  integer(Kind=intType) :: iSurf, ii, i, nn, nPts, ierr
1121  real(kind=realtype), dimension(:, :), allocatable :: pts
1122  logical :: useDual
1123  if (nuserintsurfs == 0) then
1124  return
1125  end if
1126  primaldualloop: do ii = 1, 2
1127  if (ii == 1) then
1128  usedual = .true.
1129  else
1130  usedual = .false.
1131  end if
1132 
1133  call buildvolumeadts(oblocks, usedual)
1134 
1135  masterloop: do isurf = 1, nuserintsurfs
1136 
1137  surf => userintsurfs(isurf)
1138 
1139  if (myid == 0) then
1140 
1141  ! We are interpolating the nodal values for both the
1142  ! nodes and the solution variables.
1143  npts = size(surf%pts, 2)
1144  allocate (pts(3, npts))
1145  nodeloop: do i = 1, npts
1146  pts(:, i) = surf%pts(:, i)
1147  end do nodeloop
1148  end if
1149 
1150  ! Send the number of points back to all procs:
1151  call mpi_bcast(npts, 1, adflow_integer, 0, adflow_comm_world, ierr)
1152  call echk(ierr, __file__, __line__)
1153 
1154  ! All other procs except the root allocate space and receive
1155  ! the pt array.
1156  if (myid /= 0) then
1157  allocate (pts(3, npts))
1158  end if
1159 
1160  call mpi_bcast(pts, 3 * npts, adflow_real, 0, adflow_comm_world, ierr)
1161  call echk(ierr, __file__, __line__)
1162 
1163  ! Call the actual interpolation routine
1164  if (ii == 1) then
1165  call performinterpolation(pts, oblocks, .true., surf%flowComm)
1166  else
1167  call performinterpolation(pts, oblocks, .false., surf%nodeComm)
1168  end if
1169 
1170  deallocate (pts)
1171  end do masterloop
1172 
1173  ! Destroy the ADT Data and allocated values
1174  do nn = 1, ndom
1175  call destroyserialhex(oblocks(nn)%ADT)
1176  deallocate (oblocks(nn)%xADT, oblocks(nn)%hexaConn, oblocks(nn)%qualDonor)
1177  end do
1178  end do primaldualloop
1179  end subroutine interpolateintegrationsurfaces
1180 
1181  subroutine commuserintegrationsurfacevars(recvBuffer, varStart, varEnd, comm)
1182 
1183  use constants
1184  use block, onlY: flowdoms, ndom
1186  use utils, only: echk
1187  use oversetutilities, only: fractoweights
1188 
1189  implicit none
1190 
1191  ! Input/Output
1192  real(kind=realtype), dimension(:) :: recvbuffer
1193  integer(kind=intType), intent(in) :: varStart, varEnd
1194  type(usersurfcommtype) :: comm
1195 
1196  ! Working
1197  real(kind=realtype), dimension(:), allocatable :: sendbuffer
1198  integer(Kind=intType) :: d1, i1, j1, k1, jj, k, nvar, i, ierr
1199  real(kind=realtype), dimension(8) :: weight
1200 
1201  ! The number of variables we are transferring:
1202  nvar = varend - varstart + 1
1203 
1204  ! We assume that the pointers to the realCommVars have already been set.
1205 
1206  allocate (sendbuffer(nvar * comm%nDonor))
1207  ! initialize the sendBuffer to an arbitrary value. If the
1208  ! integration tries to use this value, you know something is
1209  ! wrong.
1210  sendbuffer = -99999
1211  ! First generate the interpolated data necessary
1212  jj = 0
1213  donorloop: do i = 1, comm%nDonor
1214  ! Convert the frac to weights
1215  call fractoweights(comm%frac(:, i), weight)
1216 
1217  ! Block and indices for easier reading. The +1 is due to the
1218  ! pointer offset on realCommVars.
1219 
1220  d1 = comm%donorInfo(1, i) ! Block Index
1221  i1 = comm%donorInfo(2, i) + 1 ! donor I index
1222  j1 = comm%donorInfo(3, i) + 1 ! donor J index
1223  k1 = comm%donorInfo(4, i) + 1 ! donor K index
1224 
1225  ! We are interpolating nVar variables
1226  do k = varstart, varend
1227  jj = jj + 1
1228  if (d1 > 0) then ! Is this pt valid?
1229  sendbuffer(jj) = &
1230  weight(1) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1, j1, k1) + &
1231  weight(2) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1 + 1, j1, k1) + &
1232  weight(3) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1, j1 + 1, k1) + &
1233  weight(4) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1 + 1, j1 + 1, k1) + &
1234  weight(5) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1, j1, k1 + 1) + &
1235  weight(6) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1 + 1, j1, k1 + 1) + &
1236  weight(7) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1, j1 + 1, k1 + 1) + &
1237  weight(8) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1 + 1, j1 + 1, k1 + 1)
1238  end if
1239  end do
1240  end do donorloop
1241 
1242  ! Now we can do an mpi_gatherv to the root proc:
1243  call mpi_gatherv(sendbuffer, nvar * comm%nDonor, adflow_real, recvbuffer, &
1244  nvar * comm%procSizes, nvar * comm%procDisps, adflow_real, 0, adflow_comm_world, ierr)
1245  call echk(ierr, __file__, __line__)
1246  deallocate (sendbuffer)
1247 
1248  end subroutine commuserintegrationsurfacevars
1249 
1250  subroutine commuserintegrationsurfacevars_d(recvBuffer, recvBufferd, varStart, varEnd, comm)
1251 
1252  use constants
1253  use block, onlY: flowdoms, ndom
1255  use utils, only: echk
1256  use oversetutilities, only: fractoweights
1257 
1258  implicit none
1259 
1260  ! Input/Output
1261  real(kind=realtype), dimension(:) :: recvbuffer, recvbufferd
1262  integer(kind=intType), intent(in) :: varStart, varEnd
1263  type(usersurfcommtype) :: comm
1264 
1265  ! Working
1266  real(kind=realtype), dimension(:), allocatable :: sendbuffer, sendbufferd
1267  integer(Kind=intType) :: d1, i1, j1, k1, jj, k, nvar, i, ierr
1268  real(kind=realtype), dimension(8) :: weight
1269 
1270  ! The number of variables we are transferring:
1271  nvar = varend - varstart + 1
1272 
1273  ! We assume that the pointers to the realCommVars have already been set.
1274 
1275  allocate (sendbuffer(nvar * comm%nDonor), &
1276  sendbufferd(nvar * comm%nDonor))
1277 
1278  ! First generate the interpolated data necessary
1279  jj = 0
1280  donorloop: do i = 1, comm%nDonor
1281  ! Convert the frac to weights
1282  call fractoweights(comm%frac(:, i), weight)
1283 
1284  ! Block and indices for easier reading. The +1 is due to the
1285  ! pointer offset on realCommVars.
1286 
1287  d1 = comm%donorInfo(1, i) ! Block Index
1288  i1 = comm%donorInfo(2, i) + 1 ! donor I index
1289  j1 = comm%donorInfo(3, i) + 1 ! donor J index
1290  k1 = comm%donorInfo(4, i) + 1 ! donor K index
1291 
1292  ! We are interpolating nVar variables
1293  do k = varstart, varend
1294  jj = jj + 1
1295  if (d1 > 0) then ! Is this pt valid?
1296  sendbuffer(jj) = &
1297  weight(1) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1, j1, k1) + &
1298  weight(2) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1 + 1, j1, k1) + &
1299  weight(3) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1, j1 + 1, k1) + &
1300  weight(4) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1 + 1, j1 + 1, k1) + &
1301  weight(5) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1, j1, k1 + 1) + &
1302  weight(6) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1 + 1, j1, k1 + 1) + &
1303  weight(7) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1, j1 + 1, k1 + 1) + &
1304  weight(8) * flowdoms(d1, 1, 1)%realCommVars(k)%var(i1 + 1, j1 + 1, k1 + 1)
1305  sendbufferd(jj) = &
1306  weight(1) * flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1, k1) + &
1307  weight(2) * flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1, k1) + &
1308  weight(3) * flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1 + 1, k1) + &
1309  weight(4) * flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1 + 1, k1) + &
1310  weight(5) * flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1, k1 + 1) + &
1311  weight(6) * flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1, k1 + 1) + &
1312  weight(7) * flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1 + 1, k1 + 1) + &
1313  weight(8) * flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1 + 1, k1 + 1)
1314  end if
1315  end do
1316  end do donorloop
1317 
1318  ! Now we can do an mpi_gatherv to the root proc:
1319  call mpi_gatherv(sendbuffer, nvar * comm%nDonor, adflow_real, recvbuffer, &
1320  nvar * comm%procSizes, nvar * comm%procDisps, adflow_real, 0, adflow_comm_world, ierr)
1321  call echk(ierr, __file__, __line__)
1322 
1323  call mpi_gatherv(sendbufferd, nvar * comm%nDonor, adflow_real, recvbufferd, &
1324  nvar * comm%procSizes, nvar * comm%procDisps, adflow_real, 0, adflow_comm_world, ierr)
1325  call echk(ierr, __file__, __line__)
1326 
1327  deallocate (sendbuffer, sendbufferd)
1328 
1329  end subroutine commuserintegrationsurfacevars_d
1330 
1331  subroutine commuserintegrationsurfacevars_b(recvBuffer, recvBufferd, varStart, varEnd, comm)
1332 
1333  use constants
1334  use block, onlY: flowdoms, ndom
1336  use utils, only: echk
1337  use oversetutilities, only: fractoweights
1338 
1339  implicit none
1340 
1341  ! Input/Output
1342  real(kind=realtype), dimension(:) :: recvbuffer, recvbufferd
1343  integer(kind=intType), intent(in) :: varStart, varEnd
1344  type(usersurfcommtype) :: comm
1345 
1346  ! Working
1347  real(kind=realtype), dimension(:), allocatable :: sendbuffer, sendbufferd
1348  integer(Kind=intType) :: d1, i1, j1, k1, jj, k, nvar, i, ierr
1349  real(kind=realtype), dimension(8) :: weight
1350 
1351  ! The number of variables we are transferring:
1352  nvar = varend - varstart + 1
1353 
1354  allocate (sendbufferd(nvar * comm%nDonor))
1355 
1356  ! Adjoint of a gatherv is a scatterv. Flip the send/recv relative
1357  ! to the forward call.
1358  call mpi_scatterv(recvbufferd, nvar * comm%procSizes, nvar * comm%procDisps, adflow_real, &
1359  sendbufferd, nvar * comm%nDonor, adflow_real, 0, adflow_comm_world, ierr)
1360  call echk(ierr, __file__, __line__)
1361 
1362  ! First generate the interpolated data necessary
1363  jj = 0
1364  donorloop: do i = 1, comm%nDonor
1365  ! Convert the frac to weights
1366  call fractoweights(comm%frac(:, i), weight)
1367 
1368  ! Block and indices for easier reading. The +1 is due to the
1369  ! pointer offset on realCommVars.
1370 
1371  d1 = comm%donorInfo(1, i) ! Block Index
1372  i1 = comm%donorInfo(2, i) + 1 ! donor I index
1373  j1 = comm%donorInfo(3, i) + 1 ! donor J index
1374  k1 = comm%donorInfo(4, i) + 1 ! donor K index
1375 
1376  ! We are interpolating nVar variables
1377  do k = varstart, varend
1378  jj = jj + 1
1379  if (d1 > 0) then ! Is this pt valid?
1380 
1381  ! Accumulate back onto the derivative variables.
1382  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1, k1) = &
1383  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1, k1) + &
1384  weight(1) * sendbufferd(jj)
1385 
1386  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1, k1) = &
1387  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1, k1) + &
1388  weight(2) * sendbufferd(jj)
1389 
1390  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1 + 1, k1) = &
1391  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1 + 1, k1) + &
1392  weight(3) * sendbufferd(jj)
1393 
1394  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1 + 1, k1) = &
1395  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1 + 1, k1) + &
1396  weight(4) * sendbufferd(jj)
1397 
1398  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1, k1 + 1) = &
1399  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1, k1 + 1) + &
1400  weight(5) * sendbufferd(jj)
1401 
1402  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1, k1 + 1) = &
1403  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1, k1 + 1) + &
1404  weight(6) * sendbufferd(jj)
1405 
1406  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1 + 1, k1 + 1) = &
1407  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1, j1 + 1, k1 + 1) + &
1408  weight(7) * sendbufferd(jj)
1409 
1410  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1 + 1, k1 + 1) = &
1411  flowdoms(d1, 1, 1)%realCommVars(k + nzippflowcomm)%var(i1 + 1, j1 + 1, k1 + 1) + &
1412  weight(8) * sendbufferd(jj)
1413  end if
1414  end do
1415  end do donorloop
1416 
1417  deallocate (sendbufferd)
1418 
1419  end subroutine commuserintegrationsurfacevars_b
1420 
1421  subroutine qsortinterppttype(arr, nn)
1422 
1423  use constants
1424  use block ! Cannot use-only becuase of <= operator
1425  use utils, only: terminate
1426  implicit none
1427  !
1428  ! Subroutine arguments.
1429  !
1430  integer(kind=intType), intent(in) :: nn
1431 
1432  type(interppttype), dimension(*), intent(inout) :: arr
1433  !
1434  ! Local variables.
1435  !
1436  integer(kind=intType), parameter :: m = 7
1437 
1438  integer(kind=intType) :: nStack
1439  integer(kind=intType) :: i, j, k, r, l, jStack, ii
1440 
1441  integer :: ierr
1442 
1443  type(interppttype) :: a, tmp
1444 
1445  integer(kind=intType), allocatable, dimension(:) :: stack
1446  integer(kind=intType), allocatable, dimension(:) :: tmpStack
1447 
1448  ! Allocate the memory for stack.
1449 
1450  nstack = 100
1451  allocate (stack(nstack), stat=ierr)
1452  if (ierr /= 0) &
1453  call terminate("qsortinterpPt", &
1454  "Memory allocation failure for stack")
1455 
1456  ! Initialize the variables that control the sorting.
1457 
1458  jstack = 0
1459  l = 1
1460  r = nn
1461 
1462  ! Start of the algorithm
1463 
1464  do
1465 
1466  ! Check for the size of the subarray.
1467 
1468  if ((r - l) < m) then
1469 
1470  ! Perform insertion sort
1471 
1472  do j = l + 1, r
1473  a = arr(j)
1474  do i = (j - 1), l, -1
1475  if (arr(i) <= a) exit
1476  arr(i + 1) = arr(i)
1477  end do
1478  arr(i + 1) = a
1479  end do
1480 
1481  ! In case there are no more elements on the stack, exit from
1482  ! the outermost do-loop. Algorithm has finished.
1483 
1484  if (jstack == 0) exit
1485 
1486  ! Pop stack and begin a new round of partitioning.
1487 
1488  r = stack(jstack)
1489  l = stack(jstack - 1)
1490  jstack = jstack - 2
1491 
1492  else
1493 
1494  ! Subarray is larger than the threshold for a linear sort.
1495  ! Choose median of left, center and right elements as
1496  ! partitioning element a.
1497  ! Also rearrange so that (l) <= (l+1) <= (r).
1498 
1499  k = (l + r) / 2
1500  tmp = arr(k) ! Swap the elements
1501  arr(k) = arr(l + 1) ! k and l+1.
1502  arr(l + 1) = tmp
1503 
1504  if (arr(r) < arr(l)) then
1505  tmp = arr(l) ! Swap the elements
1506  arr(l) = arr(r) ! r and l.
1507  arr(r) = tmp
1508  end if
1509 
1510  if (arr(r) < arr(l + 1)) then
1511  tmp = arr(l + 1) ! Swap the elements
1512  arr(l + 1) = arr(r) ! r and l+1.
1513  arr(r) = tmp
1514  end if
1515 
1516  if (arr(l + 1) < arr(l)) then
1517  tmp = arr(l + 1) ! Swap the elements
1518  arr(l + 1) = arr(l) ! l and l+1.
1519  arr(l) = tmp
1520  end if
1521 
1522  ! Initialize the pointers for partitioning.
1523 
1524  i = l + 1
1525  j = r
1526  a = arr(l + 1)
1527 
1528  ! The innermost loop
1529 
1530  do
1531 
1532  ! Scan up to find element >= a.
1533  do
1534  i = i + 1
1535  if (a <= arr(i)) exit
1536  end do
1537 
1538  ! Scan down to find element <= a.
1539  do
1540  j = j - 1
1541  if (arr(j) <= a) exit
1542  end do
1543 
1544  ! Exit the loop in case the pointers i and j crossed.
1545 
1546  if (j < i) exit
1547 
1548  ! Swap the element i and j.
1549 
1550  tmp = arr(i)
1551  arr(i) = arr(j)
1552  arr(j) = tmp
1553  end do
1554 
1555  ! Swap the entries j and l+1. Remember that a equals
1556  ! arr(l+1).
1557 
1558  arr(l + 1) = arr(j)
1559  arr(j) = a
1560 
1561  ! Push pointers to larger subarray on stack,
1562  ! process smaller subarray immediately.
1563 
1564  jstack = jstack + 2
1565  if (jstack > nstack) then
1566 
1567  ! Storage of the stack is too small. Reallocate.
1568 
1569  allocate (tmpstack(nstack), stat=ierr)
1570  if (ierr /= 0) &
1571  call terminate("qsortinterpPt", &
1572  "Memory allocation error for tmpStack")
1573  tmpstack = stack
1574 
1575  ! Free the memory of stack, store the old value of nStack
1576  ! in tmp and increase nStack.
1577 
1578  deallocate (stack, stat=ierr)
1579  if (ierr /= 0) &
1580  call terminate("qsortinterpPt", &
1581  "Deallocation error for stack")
1582  ii = nstack
1583  nstack = nstack + 100
1584 
1585  ! Allocate the memory for stack and copy the old values
1586  ! from tmpStack.
1587 
1588  allocate (stack(nstack), stat=ierr)
1589  if (ierr /= 0) &
1590  call terminate("qsortinterpPt", &
1591  "Memory reallocation error for stack")
1592  stack(1:ii) = tmpstack(1:ii)
1593 
1594  ! And finally release the memory of tmpStack.
1595 
1596  deallocate (tmpstack, stat=ierr)
1597  if (ierr /= 0) &
1598  call terminate("qsortinterpPt", &
1599  "Deallocation error for tmpStack")
1600  end if
1601 
1602  if ((r - i + 1) >= (j - l)) then
1603  stack(jstack) = r
1604  r = j - 1
1605  stack(jstack - 1) = j
1606  else
1607  stack(jstack) = j - 1
1608  stack(jstack - 1) = l
1609  l = j
1610  end if
1611 
1612  end if
1613  end do
1614 
1615  ! Release the memory of stack.
1616 
1617  deallocate (stack, stat=ierr)
1618  if (ierr /= 0) &
1619  call terminate("qsortinterpPt", &
1620  "Deallocation error for stack")
1621 
1622  ! Check in debug mode whether the array is really sorted.
1623 
1624  if (debug) then
1625  do i = 1, (nn - 1)
1626  if (arr(i + 1) < arr(i)) &
1627  call terminate("qsortinterpPt", &
1628  "Array is not sorted correctly")
1629  end do
1630  end if
1631 
1632  end subroutine qsortinterppttype
1633 
1634 end module usersurfaceintegrations
subroutine destroyserialhex(ADT)
Definition: adtBuild.F90:1266
subroutine buildserialhex(nHexa, nNodes, coor, hexaConn, ADT)
Definition: adtBuild.F90:1141
subroutine containmenttreesearchsinglepoint(ADT, coor, intInfo, uvw, arrDonor, nInterpol, BB, frontLeaves, frontLeavesNew, failed)
subroutine mindistancetreesearchsinglepoint(ADT, coor, intInfo, uvw, arrDonor, nInterpol, BB, frontLeaves, frontLeavesNew)
integer(kind=inttype), dimension(:), pointer stack
Definition: adtUtils.F90:20
Definition: BCData.F90:1
Definition: block.F90:1
integer(kind=inttype) ndom
Definition: block.F90:761
type(blocktype), dimension(:, :, :), allocatable, target flowdomsd
Definition: block.F90:772
type(blocktype), dimension(:, :, :), allocatable, target flowdoms
Definition: block.F90:771
integer(kind=inttype) jl
integer(kind=inttype) ie
integer(kind=inttype), dimension(:, :, :), pointer iblank
integer(kind=inttype) jb
integer(kind=inttype) kb
integer(kind=inttype), dimension(:), pointer bcfaceid
integer(kind=inttype) nbocos
integer(kind=inttype), dimension(:), pointer bctype
real(kind=realtype), dimension(:, :, :), pointer vol
integer(kind=inttype) ib
integer(kind=inttype) je
integer(kind=inttype) ke
real(kind=realtype), dimension(:, :, :, :), pointer x
integer(kind=inttype) kl
integer(kind=inttype) il
integer adflow_comm_world
integer(kind=inttype), parameter oversetouterbound
Definition: constants.F90:276
real(kind=realtype), parameter zero
Definition: constants.F90:71
integer(kind=inttype), parameter imax
Definition: constants.F90:293
integer(kind=inttype), parameter izippflowy
Definition: constants.F90:508
integer(kind=inttype), parameter kmin
Definition: constants.F90:296
integer(kind=inttype), parameter jmax
Definition: constants.F90:295
real(kind=realtype), parameter eighth
Definition: constants.F90:84
integer, parameter irho
Definition: constants.F90:34
integer(kind=inttype), parameter izippflowx
Definition: constants.F90:507
integer, parameter ivx
Definition: constants.F90:35
integer(kind=inttype), parameter izippflowsface
Definition: constants.F90:506
integer(kind=inttype), parameter imin
Definition: constants.F90:292
integer, parameter ivz
Definition: constants.F90:37
integer(kind=inttype), parameter izippflowgamma
Definition: constants.F90:505
integer(kind=inttype), parameter nzippflowcomm
Definition: constants.F90:502
real(kind=realtype), parameter large
Definition: constants.F90:24
integer(kind=inttype), parameter kmax
Definition: constants.F90:297
integer, parameter ivy
Definition: constants.F90:36
integer(kind=inttype), parameter izippflowp
Definition: constants.F90:504
integer(kind=inttype), parameter jmin
Definition: constants.F90:294
integer(kind=inttype), parameter izippflowz
Definition: constants.F90:509
subroutine computettot(rho, u, v, w, p, Ttot)
Definition: flowUtils.F90:5
subroutine computeptot(rho, u, v, w, p, ptot)
Definition: flowUtils.F90:171
real(kind=realtype) rhoref
real(kind=realtype) tref
real(kind=realtype) lref
real(kind=realtype) pref
real(kind=realtype) timeref
real(kind=realtype) oversetprojtol
Definition: inputParam.F90:884
subroutine fractoweights(frac, weights)
subroutine getcumulativeform(sizeArray, n, cumArray)
subroutine fractoweights2(frac, weights)
logical function faminlist(famID, famList)
Definition: sorting.F90:7
integer(kind=inttype), parameter nuserintsurfsmax
type(userintsurf), dimension(nuserintsurfsmax), target userintsurfs
subroutine integrateusersurfaces_d(localValues, localValuesd, famList, sps)
subroutine commuserintegrationsurfacevars_d(recvBuffer, recvBufferd, varStart, varEnd, comm)
subroutine integrateusersurfaces(localValues, famList, sps)
subroutine addintegrationsurface(pts, conn, famName, famID, isInflow, nPts, nConn)
subroutine commuserintegrationsurfacevars(recvBuffer, varStart, varEnd, comm)
subroutine integrateusersurfaces_b(localValues, localValuesd, famList, sps)
subroutine commuserintegrationsurfacevars_b(recvBuffer, recvBufferd, varStart, varEnd, comm)
subroutine performinterpolation(pts, oBlocks, useDual, comm)
subroutine buildvolumeadts(oBlocks, useDual)
Definition: utils.F90:1
real(kind=realtype) function mynorm2(x)
Definition: utils.F90:1697
subroutine echk(errorcode, file, line)
Definition: utils.F90:5722
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237
subroutine terminate(routineName, errorMessage)
Definition: utils.F90:502
subroutine flowintegrationzipper_b(isinflow, conn, fams, vars, varsd, localvalues, localvaluesd, famlist, sps, ptvalid)
subroutine flowintegrationzipper_d(isinflow, conn, fams, vars, varsd, localvalues, localvaluesd, famlist, sps, ptvalid)
subroutine flowintegrationzipper(isInflow, conn, fams, vars, localValues, famList, sps, ptValid)