ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
makeBoundaryStrings.F90
Go to the documentation of this file.
2 contains
3  subroutine makegapboundarystrings(zipperFamList, level, sps, master)
4 
5  use constants
6  use adtbuild, only: buildserialquad
8  il, jl, kl, ndom, righthanded, bcfaceid, bctype
11  use stringops
14  use sorting, only: faminlist
15  implicit none
16 
17  ! Input Params
18  integer(kind=intType), intent(in), dimension(:) :: zipperFamLIst
19  integer(kind=intType), intent(in) :: level, sps
20 
21  ! Working
22  integer(kind=intType) :: i, j, k, nn, mm, ii, jj, kk, c, e, idx
23  integer(kind=intType) :: i1, i2, j1, j2, iBeg, iEnd, jBeg, jEnd
24  integer(kind=intType) :: i3, i4, j3, j4
25  integer(kind=intType) :: iStart, iSize, ierr, iProc, firstElem, curElem
26  integer(kind=intType) :: below, above, left, right, nNodes, nElems
27  integer(kind=intType) :: patchNodeCounter, nZipped, gc
28  integer(kind=intType), dimension(:), allocatable :: nElemsProc, nNodesProc
29  integer(kind=intType), dimension(:, :), pointer :: gcp
30  real(kind=realtype), dimension(:, :, :), pointer :: xx
31  real(kind=realtype), dimension(3) :: s1, s2, s3, s4, v1, v2, v3, v4, x0
32  real(kind=realtype) :: fact, timea, minnorm
33 
34  real(kind=realtype), dimension(:, :, :), allocatable :: patchnormals
35  real(kind=realtype), dimension(:, :), allocatable :: patchh
36  integer(kind=intType), dimension(:), allocatable :: epc, surfaceSeeds, inverse
37  logical, dimension(:), allocatable :: badString
38  type(oversetstring), dimension(:), allocatable, target :: localStrings
39  type(oversetstring), dimension(:), allocatable, target :: globalStrings
40  type(oversetstring) :: master, pocketMaster
41  type(oversetstring), pointer :: stringsLL, str
42  type(oversetstring), dimension(:), allocatable, target :: strings
43  integer(kind=intType) :: nFullStrings, nUnique, famID
44  logical :: regularOrdering
45  integer mpiStatus(MPI_STATUS_SIZE)
46 
47  ! Wall search related
48  integer(kind=intType) :: ncells
49  type(oversetwall), dimension(:), allocatable, target :: walls
50  type(oversetwall), target :: fullWall
51  character(80) :: fileName
52 
53  ! Set small number to avoid division by zero when computing normal vectors
54  minnorm = 1.0e-14
55 
56  ! Loop over the wall faces counting up the edges that stradle a
57  ! compute cell and a blanked (or interpolated) cell.
58 
59  allocate (epc(nclusters)) ! epc = elementsPerCluster
60  epc = 0
61 
62  ! Get a (very) large overestimate of the total number of edges in a
63  ! cluster: as twice the number of nodes.
64  domainloop: do nn = 1, ndom
65  call setpointers(nn, level, sps)
66  call getwallsize(zipperfamlist, nnodes, nelems, .false.)
67  c = clusters(cumdomproc(myid) + nn)
68  epc(c) = epc(c) + 2 * nnodes
69  end do domainloop
70 
71  ! Allocate the space we need in the local strings.
72  allocate (localstrings(nclusters))
73  do c = 1, nclusters
74  call nullifystring(localstrings(c))
75  end do
76 
77  do c = 1, nclusters
78  allocate ( &
79  localstrings(c)%conn(2, epc(c)), localstrings(c)%nodeData(10, 2 * epc(c)), &
80  localstrings(c)%intNodeData(3, 2 * epc(c)))
81  localstrings(c)%nodeData = zero
82  localstrings(c)%nNodes = 0
83  localstrings(c)%nElems = 0
84 
85  ! Assign string pointers immediately after allocation
86  call setstringpointers(localstrings(c))
87 
88  end do
89  deallocate (epc)
90  ! And now loop back through the walls and add in the
91  ! elems/nodes/normals/indices for each edge.
92 
93  ! Reset the elems per cluster. We will count up the actual number
94  ! now.
95 
96  patchnodecounter = 0
97  domainloop2: do nn = 1, ndom
98  call setpointers(nn, level, sps)
99  ! The current cluster is 'c'
100  c = clusters(cumdomproc(myid) + nn)
101 
102  bocoloop: do mm = 1, nbocos
103  famid = bcdata(mm)%famID
104  if (faminlist(famid, zipperfamlist)) then
105 
106  select case (bcfaceid(mm))
107  case (imin)
108  xx => x(1, :, :, :)
109  gcp => globalcell(2, :, :)
110  fact = one
111  regularordering = .true.
112  case (imax)
113  xx => x(il, :, :, :)
114  gcp => globalcell(il, :, :)
115  fact = -one
116  regularordering = .false.
117  case (jmin)
118  xx => x(:, 1, :, :)
119  gcp => globalcell(:, 2, :)
120  fact = -one
121  regularordering = .false.
122  case (jmax)
123  xx => x(:, jl, :, :)
124  gcp => globalcell(:, jl, :)
125  fact = one
126  regularordering = .true.
127  case (kmin)
128  xx => x(:, :, 1, :)
129  gcp => globalcell(:, :, 2)
130  fact = one
131  regularordering = .true.
132  case (kmax)
133  xx => x(:, :, kl, :)
134  gcp => globalcell(:, :, kl)
135  fact = -one
136  regularordering = .false.
137  end select
138 
139  ! Need to reverse once more for a left-handed block
140  if (.not. righthanded) then
141  fact = -fact
142  regularordering = .not. (regularordering)
143  end if
144 
145  ! Before we go through and find the actual elems,
146  ! precompute the patch numbering and node-based averaged
147  ! unit normals
148  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
149  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
150 
151  allocate (patchnormals(3, ibeg:iend, jbeg:jend), &
152  patchh(ibeg:iend, jbeg:jend))
153 
154  do j = jbeg, jend
155  do i = ibeg, iend
156  patchnodecounter = patchnodecounter + 1
157  x0 = xx(i + 1, j + 1, :)
158 
159  ! Normalized normals for each surrounding face.
160  v1 = xx(i + 2, j + 1, :) - x0
161  v2 = xx(i + 1, j + 2, :) - x0
162  v3 = xx(i, j + 1, :) - x0
163  v4 = xx(i + 1, j, :) - x0
164 
165  call cross_prod(v1, v2, s1)
166  call cross_prod(v2, v3, s2)
167  call cross_prod(v3, v4, s3)
168  call cross_prod(v4, v1, s4)
169 
170  ! When we have an 0-grid node, two of the v vectors will be the same.
171  ! Therefore, one of the s vectors will be zero. So we define minNorm
172  ! to avoid a division by zero. This will not affect the averaged normal
173  ! vector, since we will normalize it anyway in the end.
174 
175  s1 = s1 / max(minnorm, mynorm2(s1))
176  s2 = s2 / max(minnorm, mynorm2(s2))
177  s3 = s3 / max(minnorm, mynorm2(s3))
178  s4 = s4 / max(minnorm, mynorm2(s4))
179 
180  ! Average and do final normalization including
181  ! correcting for inward normals.
182  s1 = fourth * (s1 + s2 + s3 + s4)
183  patchnormals(:, i, j) = s1 / mynorm2(s1) * fact
184 
185  ! Get the maximum edge length for this node. Use the
186  ! 4 diagonal nodes:
187  v1 = xx(i + 2, j + 2, :) - x0
188  v2 = xx(i, j + 2, :) - x0
189  v3 = xx(i, j, :) - x0
190  v4 = xx(i + 2, j, :) - x0
191 
192  patchh(i, j) = max(mynorm2(v1), mynorm2(v2), mynorm2(v3), mynorm2(v4))
193 
194  end do
195  end do
196 
197  ! ------------------
198  ! Check the i-edges
199  ! ------------------
200  do j = jbeg, jend ! <------- Node loop
201  do i = ibeg + 1, iend ! <------- Face Loop
202  if (gcp(i + 1, j + 1) >= 0 .and. gcp(i + 1, j + 2) >= 0) then
203  below = max(bcdata(mm)%iBlank(i, j), 0)
204  above = max(bcdata(mm)%iBlank(i, j + 1), 0)
205 
206  if ((below == 0 .and. above == 1) .or. (below == 1 .and. above == 0)) then
207  localstrings(c)%nNodes = localstrings(c)%nNodes + 2
208  localstrings(c)%nElems = localstrings(c)%nElems + 1
209  e = localstrings(c)%nElems
210 
211  ! Make sure the real cell is on the LEFT
212  ! of the directed edge
213 
214  if (below == 0) then
215  i1 = i - 1; j1 = j
216  i2 = i; j2 = j
217 
218  i3 = i1; j3 = j + 1
219  i4 = i2; j4 = j + 1
220  else
221  i1 = i; j1 = j
222  i2 = i - 1; j2 = j
223 
224  i3 = i1; j3 = j - 1
225  i4 = i2; j4 = j - 1
226  end if
227 
228  ! Don't forget pointer offset for xx
229  if (regularordering) then
230  localstrings(c)%nodeData(1:3, 2 * e - 1) = xx(i1 + 1, j1 + 1, :)
231  localstrings(c)%nodeData(1:3, 2 * e) = xx(i2 + 1, j2 + 1, :)
232 
233  ! Global index of node on reduced global surface.
234  localstrings(c)%intNodeData(1, 2 * e - 1) = bcdata(mm)%surfIndex(i1, j1)
235  localstrings(c)%intNodeData(1, 2 * e) = bcdata(mm)%surfIndex(i2, j2)
236  else
237  localstrings(c)%nodeData(1:3, 2 * e) = xx(i1 + 1, j1 + 1, :)
238  localstrings(c)%nodeData(1:3, 2 * e - 1) = xx(i2 + 1, j2 + 1, :)
239 
240  localstrings(c)%intNodeData(1, 2 * e) = bcdata(mm)%surfIndex(i1, j1)
241  localstrings(c)%intNodeData(1, 2 * e - 1) = bcdata(mm)%surfIndex(i2, j2)
242 
243  end if
244  v1 = xx(i1 + 1, j1 + 1, :) - xx(i3 + 1, j3 + 1, :)
245  v1 = v1 / mynorm2(v1)
246 
247  v2 = xx(i2 + 1, j2 + 1, :) - xx(i4 + 1, j4 + 1, :)
248  v2 = v2 / mynorm2(v2)
249 
250  ! Perpendicular vector
251  localstrings(c)%nodeData(7:9, 2 * e - 1) = v1
252  localstrings(c)%nodeData(7:9, 2 * e) = v2
253 
254  ! Averaged node normal
255  localstrings(c)%nodeData(4:6, 2 * e - 1) = patchnormals(:, i1, j1)
256  localstrings(c)%nodeData(4:6, 2 * e) = patchnormals(:, i2, j2)
257 
258  ! Surface deviation estimation
259  localstrings(c)%nodeData(10, 2 * e - 1) = patchh(i1, j1)
260  localstrings(c)%nodeData(10, 2 * e) = patchh(i2, j2)
261 
262  ! Cluster of the node
263  localstrings(c)%intNodeData(2, 2 * e - 1) = c
264  localstrings(c)%intNodeData(2, 2 * e) = c
265 
266  ! Family ID of node. Not implemented yet.
267  localstrings(c)%intNodeData(3, 2 * e - 1) = famid
268  localstrings(c)%intNodeData(3, 2 * e) = famid
269 
270  ! Connectivity
271  localstrings(c)%conn(:, e) = (/2 * e - 1, 2 * e/)
272  end if
273  end if
274  end do
275  end do
276 
277  ! -----------------
278  ! Check the j-edges
279  ! -----------------
280  do j = jbeg + 1, jend ! <------- Face loop
281  do i = ibeg, iend ! <------- Node Loop
282  if (gcp(i + 1, j + 1) >= 0 .and. gcp(i + 2, j + 1) >= 0) then
283  left = max(bcdata(mm)%iBlank(i, j), 0)
284  right = max(bcdata(mm)%iBlank(i + 1, j), 0)
285 
286  if ((left == 0 .and. right == 1) .or. (left == 1 .and. right == 0)) then
287  localstrings(c)%nNodes = localstrings(c)%nNodes + 2
288  localstrings(c)%nElems = localstrings(c)%nElems + 1
289 
290  e = localstrings(c)%nElems
291 
292  ! Again, make sure the real cell is on the LEFT
293  ! of the directed edge
294  if (left == 0) then
295  i1 = i; j1 = j
296  i2 = i; j2 = j - 1
297 
298  i3 = i1 + 1; j3 = j1
299  i4 = i2 + 1; j4 = j2
300 
301  else
302  i1 = i; j1 = j - 1
303  i2 = i; j2 = j
304 
305  i3 = i1 - 1; j3 = j1
306  i4 = i2 - 1; j4 = j2
307  end if
308 
309  ! Don't forget pointer offset xx
310  if (regularordering) then
311  localstrings(c)%nodeData(1:3, 2 * e - 1) = xx(i1 + 1, j1 + 1, :)
312  localstrings(c)%nodeData(1:3, 2 * e) = xx(i2 + 1, j2 + 1, :)
313 
314  ! Index of global node
315  localstrings(c)%intNodeData(1, 2 * e - 1) = bcdata(mm)%surfIndex(i1, j1)
316  localstrings(c)%intNodeData(1, 2 * e) = bcdata(mm)%surfIndex(i2, j2)
317 
318  else
319  localstrings(c)%nodeData(1:3, 2 * e) = xx(i1 + 1, j1 + 1, :)
320  localstrings(c)%nodeData(1:3, 2 * e - 1) = xx(i2 + 1, j2 + 1, :)
321 
322  ! Index of global node
323  localstrings(c)%intNodeData(1, 2 * e) = bcdata(mm)%surfIndex(i1, j1)
324  localstrings(c)%intNodeData(1, 2 * e - 1) = bcdata(mm)%surfIndex(i2, j2)
325 
326  end if
327 
328  v1 = xx(i1 + 1, j1 + 1, :) - xx(i3 + 1, j3 + 1, :)
329  v1 = v1 / mynorm2(v1)
330 
331  v2 = xx(i2 + 1, j2 + 1, :) - xx(i4 + 1, j4 + 1, :)
332  v2 = v2 / mynorm2(v2)
333 
334  ! Perpendicular vector
335  localstrings(c)%nodeData(7:9, 2 * e - 1) = v1
336  localstrings(c)%nodeData(7:9, 2 * e) = v2
337 
338  ! Averaged node normal
339  localstrings(c)%nodeData(4:6, 2 * e - 1) = patchnormals(:, i1, j1)
340  localstrings(c)%nodeData(4:6, 2 * e) = patchnormals(:, i2, j2)
341 
342  ! Surface deviation estimation
343  localstrings(c)%nodeData(10, 2 * e - 1) = patchh(i1, j1)
344  localstrings(c)%nodeData(10, 2 * e) = patchh(i2, j2)
345 
346  ! Cluster of the node
347  localstrings(c)%intNodeData(2, 2 * e - 1) = c
348  localstrings(c)%intNodeData(2, 2 * e) = c
349 
350  ! Family ID of node. Not implemented yet.
351  localstrings(c)%intNodeData(3, 2 * e - 1) = famid
352  localstrings(c)%intNodeData(3, 2 * e) = famid
353 
354  ! Connectivity
355  localstrings(c)%conn(:, e) = (/2 * e - 1, 2 * e/)
356  end if
357  end if
358  end do
359  end do
360  deallocate (patchnormals, patchh)
361  end if
362  end do bocoloop
363  end do domainloop2
364 
365  ! Before we send the gap strings to the root proc, reduce them so
366  ! the root proc has a little less work to do.
367  do c = 1, nclusters
368  call reducegapstring(localstrings(c))
369  end do
370 
371  ! Allocate the global list of strings on the root proc
372  if (myid == 0) then
373  allocate (globalstrings(nclusters))
374  do c = 1, nclusters
375  call nullifystring(globalstrings(c))
376  end do
377  end if
378 
379  ! Next for each each cluster, gather to the root the gap boundary strings
380 
381  allocate (nnodesproc(0:nproc), nelemsproc(0:nproc))
382 
383  do c = 1, nclusters
384  ! Now let the root processor know how many nodes/elements my
385  ! processor will be sending:
386  nelemsproc(0) = 0
387  nnodesproc(0) = 0
388 
389  call mpi_gather(localstrings(c)%nElems, 1, adflow_integer, nelemsproc(1:nproc), 1, adflow_integer, 0, &
390  adflow_comm_world, ierr)
391  call echk(ierr, __file__, __line__)
392 
393  call mpi_gather(localstrings(c)%nNodes, 1, adflow_integer, nnodesproc(1:nproc), 1, adflow_integer, 0, &
394  adflow_comm_world, ierr)
395  call echk(ierr, __file__, __line__)
396 
397  if (myid == 0) then
398 
399  ! Before we can receive stuff, we need to determine the node
400  ! off-sets such that the conn from the strings on each processor
401  ! don't overlap.
402 
403  do i = 2, nproc
404  ! The 0 and 1st entry of the nEdgeProc and nNodeProc arrays are already correct:
405  nnodesproc(i) = nnodesproc(i) + nnodesproc(i - 1)
406  nelemsproc(i) = nelemsproc(i) + nelemsproc(i - 1)
407  end do
408 
409  allocate (globalstrings(c)%nodeData(10, nnodesproc(nproc)), &
410  globalstrings(c)%intNodeData(3, nnodesproc(nproc)), &
411  globalstrings(c)%conn(2, nelemsproc(nproc)))
412 
413  ! Always set the pointers immediately after allocation
414  call setstringpointers(globalstrings(c))
415 
416  ! Put proc 0's own nodes/normals/indices in the global list if we have any
417  do i = 1, localstrings(c)%nNodes
418  globalstrings(c)%nodeData(:, i) = localstrings(c)%nodeData(:, i)
419  globalstrings(c)%intNodeData(:, i) = localstrings(c)%intNodeData(:, i)
420  end do
421 
422  ! Put proc 0's own elements in the global list if we have any
423  do i = 1, localstrings(c)%nElems
424  globalstrings(c)%conn(:, i) = localstrings(c)%conn(:, i)
425  end do
426 
427  ! Set my total sizes
428  globalstrings(c)%nNodes = nnodesproc(nproc)
429  globalstrings(c)%nElems = nelemsproc(nproc)
430 
431  ! Now receive from each of the other procs.
432  do iproc = 1, nproc - 1
433  ! Check if this proc actually has anything to send:
434  if ((nelemsproc(iproc + 1) - nelemsproc(iproc)) > 0) then
435  istart = nnodesproc(iproc) + 1
436  iend = nnodesproc(iproc + 1)
437  isize = iend - istart + 1
438 
439  ! ----------- Node sized arrays -------------
440  call mpi_recv(globalstrings(c)%nodeData(:, istart:iend), isize * 10, &
441  adflow_real, iproc, iproc, &
442  adflow_comm_world, mpistatus, ierr)
443  call echk(ierr, __file__, __line__)
444 
445  call mpi_recv(globalstrings(c)%intNodeData(:, istart:iend), isize * 3, &
446  adflow_integer, iproc, iproc, &
447  adflow_comm_world, mpistatus, ierr)
448  call echk(ierr, __file__, __line__)
449 
450  ! ----------- Element sized arrays -------------
451  istart = nelemsproc(iproc) + 1
452  iend = nelemsproc(iproc + 1)
453  isize = iend - istart + 1
454  call mpi_recv(globalstrings(c)%conn(:, istart:iend), isize * 2, adflow_integer, iproc, iproc, &
455  adflow_comm_world, mpistatus, ierr)
456  call echk(ierr, __file__, __line__)
457 
458  ! Increment the conn we just received by the node offset:
459  do i = istart, iend
460  globalstrings(c)%conn(:, i) = globalstrings(c)%conn(:, i) + nnodesproc(iproc)
461  end do
462  end if
463  end do
464  else
465  ! Not root proc so send my stuff if we have anything:
466  if (localstrings(c)%nElems > 0) then
467 
468  ! ----------- Node sized arrays -------------
469  call mpi_send(localstrings(c)%nodeData, 10 * localstrings(c)%nNodes, adflow_real, 0, myid, &
470  adflow_comm_world, ierr)
471  call echk(ierr, __file__, __line__)
472 
473  call mpi_send(localstrings(c)%intNodeData, 3 * localstrings(c)%nNodes, adflow_integer, 0, myid, &
474  adflow_comm_world, ierr)
475  call echk(ierr, __file__, __line__)
476 
477  ! ----------- Element sized arrays -------------
478  call mpi_send(localstrings(c)%conn, 2 * localstrings(c)%nElems, adflow_integer, 0, myid, &
479  adflow_comm_world, ierr)
480  call echk(ierr, __file__, __line__)
481 
482  end if
483  end if
484  end do
485 
486  ! Everyone is now done with the local strings
487  do c = 1, nclusters
488  call deallocatestring(localstrings(c))
489  end do
490  deallocate (localstrings)
491 
492  ! Before we perform serial code implementations, surface wall info
493  ! need to be communicated to root proc (zero) too. This will be used
494  ! later to identify zipper triangle containment search for identifying
495  ! quad surface cell info for force integration. Search will be
496  ! performed on dual surface cells.
497 
498  ! ---------- Begin wall data accumulation -------------
499  allocate (walls(nclusters))
500  ! Build primal quad walls ADT
501  call buildclusterwalls(level, sps, .false., walls, zipperfamlist, size(zipperfamlist))
502 
503  ! Finally build up a "full wall" that is made up of all the cluster
504  ! walls.
505 
506  nnodes = 0
507  ncells = 0
508  do i = 1, nclusters
509  nnodes = nnodes + walls(i)%nNodes
510  ncells = ncells + walls(i)%nCells
511  end do
512 
513  allocate (fullwall%x(3, nnodes))
514  allocate (fullwall%conn(4, ncells))
515  allocate (fullwall%ind(nnodes))
516  allocate (fullwall%indCell(ncells))
517 
518  nnodes = 0
519  ncells = 0
520  ii = 0
521  do i = 1, nclusters
522 
523  ! Add in the nodes/elements from this cluster
524 
525  do j = 1, walls(i)%nNodes
526  nnodes = nnodes + 1
527  fullwall%x(:, nnodes) = walls(i)%x(:, j)
528  fullwall%ind(nnodes) = walls(i)%ind(j)
529  end do
530 
531  do j = 1, walls(i)%nCells
532  ncells = ncells + 1
533  fullwall%conn(:, ncells) = walls(i)%conn(:, j) + ii
534  fullwall%indCell(ncells) = walls(i)%indCell(j)
535  end do
536 
537  ! Increment the node offset
538  ii = ii + walls(i)%nNodes
539  end do
540 
541  ! Finish the setup of the full wall.
542  fullwall%nCells = ncells
543  fullwall%nNodes = nnodes
544  call buildserialquad(ncells, nnodes, fullwall%x, fullwall%conn, fullwall%ADT)
545 
546  ! Now all the procs have fullWall info. Note: this is overkill,
547  ! since only proc 0 needs them for zipper triangle containment
548  ! search.
549 
550  ! =================================================================
551  ! Serial code from here on out
552  ! =================================================================
553 
554  if (myid == 0) then
555  timea = mpi_wtime()
556 
557  ! First thing we do is reduce each of the global cluster gap
558  ! strings
559  do c = 1, nclusters
560  call reducegapstring(globalstrings(c))
561  end do
562 
563  ! Combine all global strings together into a masterString. First
564  ! count up the sizes
565  nelems = 0
566  nnodes = 0
567  do c = 1, nclusters
568  nelems = nelems + globalstrings(c)%nElems
569  nnodes = nnodes + globalstrings(c)%nNodes
570  end do
571 
572  call nullifystring(master)
573  master%nNodes = nnodes
574  master%nElems = nelems
575  allocate (master%nodeData(10, nnodes), master%conn(2, nelems), &
576  master%intNodeData(3, nnodes))
577 
578  ! Set the string pointers to the individual arrays
579  call setstringpointers(master)
580 
581  nnodes = 0 ! This is our running counter for offseting nodes
582  ii = 0
583  jj = 0
584 
585  do c = 1, nclusters
586  do i = 1, globalstrings(c)%nNodes
587  ii = ii + 1
588  master%nodeData(:, ii) = globalstrings(c)%nodeData(:, i)
589  master%intNodeData(:, ii) = globalstrings(c)%intNodeData(:, i)
590  end do
591 
592  do i = 1, globalstrings(c)%nElems
593  jj = jj + 1
594  master%conn(:, jj) = globalstrings(c)%conn(:, i) + nnodes
595  end do
596  nnodes = ii
597  end do
598 
599  ! Now the root is done with the global strings so deallocate that
600  ! too.
601  do c = 1, nclusters
602  call deallocatestring(globalstrings(c))
603  end do
604  deallocate (globalstrings)
605  end if
606 
607  end subroutine makegapboundarystrings
608 end module gapboundaries
subroutine buildclusterwalls(level, sps, useDual, walls, famList, nFamList)
subroutine buildserialquad(nQuad, nNodes, coor, quadsConn, ADT)
Definition: adtBuild.F90:1278
Definition: BCData.F90:1
integer(kind=inttype) jl
logical righthanded
integer(kind=inttype), dimension(:, :, :), pointer globalcell
integer(kind=inttype), dimension(:), pointer bcfaceid
integer(kind=inttype) nbocos
integer(kind=inttype), dimension(:, :, :), pointer globalnode
integer(kind=inttype), dimension(:), pointer bctype
real(kind=realtype), dimension(:, :, :, :), pointer x
integer(kind=inttype) kl
integer(kind=inttype) il
integer adflow_comm_world
real(kind=realtype), parameter zero
Definition: constants.F90:71
integer(kind=inttype), parameter imax
Definition: constants.F90:293
integer(kind=inttype), parameter kmin
Definition: constants.F90:296
integer(kind=inttype), parameter jmax
Definition: constants.F90:295
real(kind=realtype), parameter one
Definition: constants.F90:72
integer(kind=inttype), parameter imin
Definition: constants.F90:292
real(kind=realtype), parameter fourth
Definition: constants.F90:82
integer(kind=inttype), parameter kmax
Definition: constants.F90:297
integer(kind=inttype), parameter jmin
Definition: constants.F90:294
subroutine makegapboundarystrings(zipperFamList, level, sps, master)
integer(kind=inttype) nclusters
Definition: overset.F90:366
integer(kind=inttype), dimension(:), allocatable cumdomproc
Definition: overset.F90:364
integer(kind=inttype), dimension(:), allocatable clusters
Definition: overset.F90:367
subroutine getwallsize(famList, nNodes, nCells, dualMesh)
logical function faminlist(famID, famList)
Definition: sorting.F90:7
subroutine reducegapstring(string)
Definition: stringOps.F90:259
subroutine nullifystring(string)
Definition: stringOps.F90:8
subroutine deallocatestring(string)
Definition: stringOps.F90:38
subroutine setstringpointers(string)
Definition: stringOps.F90:89
Definition: utils.F90:1
subroutine cross_prod(a, b, c)
Definition: utils.F90:1721
real(kind=realtype) function mynorm2(x)
Definition: utils.F90:1697
subroutine echk(errorcode, file, line)
Definition: utils.F90:5722
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237