ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
buildClusterWalls.F90
Go to the documentation of this file.
1 subroutine buildclusterwalls(level, sps, useDual, walls, famList, nFamList)
2 
3  ! This routine will will build a global reduced surface mesh and ADT
4  ! for each cluster. It can build using either the primal mesh or the
5  ! dual mesh depending on the useDual option.
6 
7  use adtbuild, only: buildserialquad
8  use blockpointers
9  use communication
10  use inputphysics
12  use oversetdata
13  use inputoverset
14  use utils, only: setpointers, echk, pointreduce
15  use warping, only: getcgnsmeshindices
16  use sorting, only: faminlist
17  implicit none
18 
19  ! Input Variables
20  integer(kind=intType), intent(in) :: level, sps, nFamList
21  logical, intent(in) :: useDual
22  type(oversetwall), intent(inout), dimension(nClusters), target :: walls
23  integer(Kind=intType), intent(in) :: famList(nFamList)
24 
25  ! Local Variables
26  integer(kind=intType) :: i, j, k, l, ii, jj, kk, nn, mm, iNode, iCell, c
27  integer(kind=intType) :: iBeg, iEnd, jBeg, jEnd, ni, nj, nUnique, cellID, cellID2
28  integer(kind=intType) :: ierr, iDim, lj
29 
30  ! Data for local surface
31  integer(kind=intType) :: nNodes, nCells
32  integer(kind=intType) :: nNodesLocal, nCellsLocal
33  integer(kind=intType), dimension(:, :), allocatable :: connLocal
34  integer(kind=intType), dimension(:), allocatable :: clusterNodeLocal
35  integer(kind=intType), dimension(:), allocatable :: clusterCellLocal
36  real(kind=realtype), dimension(:, :), allocatable :: nodeslocal
37  real(kind=realtype), dimension(:, :, :), pointer :: xx, xx1, xx2, xx3, xx4 => null()
38  integer(kind=intType), dimension(:, :, :), pointer :: globalCGNSNode => null()
39  integer(kind=intType), dimension(:, :), pointer :: ind, indCGNS => null()
40  integer(kind=intType), dimension(:, :), pointer :: indCell => null()
41  logical :: regularOrdering
42 
43  ! Data for global surface
44  integer(kind=intTYpe) :: nNodesGlobal, nCellsGlobal
45  integer(kind=intType), dimension(:, :), allocatable, target :: connGlobal
46  real(kind=realtype), dimension(:, :), allocatable, target :: nodesglobal
47  integer(kind=intType), dimension(:), allocatable, target :: nodeIndicesGlobal
48  integer(kind=intType), dimension(:), allocatable, target :: nodeIndicesCGNSGlobal
49  integer(kind=intType), dimension(:), allocatable, target :: cellIndicesGlobal
50 
51  integer(kind=intType), dimension(:), allocatable :: nodesPerCluster, cellsPerCluster, cnc, ccc
52  integer(kind=intType), dimension(:), allocatable :: clusterNodeGlobal
53  integer(kind=intType), dimension(:), allocatable :: clusterCellGlobal
54  integer(kind=intType), dimension(:), allocatable :: localNodeNums
55  integer(kind=intType), dimension(:), allocatable :: nodeIndicesLocal
56  integer(kind=intType), dimension(:), allocatable :: nodeIndicesCGNSLocal
57  integer(kind=intType), dimension(:), allocatable :: cellIndicesLocal
58  integer(kind=intType), dimension(:), allocatable :: cgnsIndices, curCGNSNode
59 
60  integer(kind=intType), dimension(:), allocatable :: nCellProc, cumCellProc
61  integer(kind=intType), dimension(:), allocatable :: nNodeProc, cumNodeProc
62  real(kind=realtype), dimension(:, :), allocatable :: uniquenodes
63  integer(kind=intType), dimension(:), allocatable :: link
64  real(kind=realtype), parameter :: tol = 1e-12
65 
66  ! Pointers for easier readibility
67  integer(kind=intType), dimension(:, :), pointer :: conn
68  integer(kind=intType), dimension(:), pointer :: tmpInd
69 
70  ! The first thing we do is gather all the surface nodes to
71  ! each processor such that every processor can make it's own copy of
72  ! the complete surface mesh to use to search. Note that this
73  ! procedure *DOES NOT SCALE IN MEMORY*...ie eventually the surface
74  ! mesh will become too large to store on a single processor,
75  ! although this will probably not happen until the sizes get up in
76  ! the hundreds of millions of cells.
77 
78  nnodeslocal = 0
79  ncellslocal = 0
80 
81  ! Before we start generate a local node indices for the globalCGNS index
82  ii = 0
83  do nn = 1, ndom
84  call setpointers(nn, level, sps)
85  allocate (flowdoms(nn, level, sps)%globalCGNSNode(1:il, 1:jl, 1:kl))
86  flowdoms(nn, level, sps)%globalCGNSNode = 0
87  ii = ii + il * jl * kl
88  end do
89 
90  if (level == 1) then
91  allocate (cgnsindices(3 * ii))
92  call getcgnsmeshindices(size(cgnsindices), cgnsindices)
93  ii = 0
94  do nn = 1, ndom
95  call setpointers(nn, level, sps)
96  do k = 1, kl
97  do j = 1, jl
98  do i = 1, il
99  ii = ii + 3
100  ! The reason for the +3 in the counter and the /3 is
101  ! that the CGNSMesh indicies include all DOF, so it
102  ! is the total number of mesh nodes *3. Here We only
103  ! care about nodes themselves so it is sufficient to
104  ! use basic ordering.
105  flowdoms(nn, level, sps)%globalCGNSNode(i, j, k) = cgnsindices(ii) / 3
106  end do
107  end do
108  end do
109  end do
110  deallocate (cgnsindices)
111  end if
112 
113  do nn = 1, ndom
114  call setpointers(nn, level, sps)
115 
116  do mm = 1, nbocos
117  if (faminlist(bcdata(mm)%famID, famlist)) then
118  ibeg = bcdata(mm)%inBeg
119  iend = bcdata(mm)%inEnd
120  jbeg = bcdata(mm)%jnBeg
121  jend = bcdata(mm)%jnEnd
122  if (usedual) then
123  nnodeslocal = nnodeslocal + &
124  (iend - ibeg + 2) * (jend - jbeg + 2)
125  ncellslocal = ncellslocal + &
126  (iend - ibeg + 1) * (jend - jbeg + 1)
127  else
128  nnodeslocal = nnodeslocal + &
129  (iend - ibeg + 1) * (jend - jbeg + 1)
130  ncellslocal = ncellslocal + &
131  (iend - ibeg) * (jend - jbeg)
132  end if
133  end if
134  end do
135  end do
136 
137  ! Now communicate these sizes with everyone
138  allocate (ncellproc(nproc), cumcellproc(0:nproc), &
139  nnodeproc(nproc), cumnodeproc(0:nproc))
140 
141  call mpi_allgather(ncellslocal, 1, adflow_integer, ncellproc, 1, adflow_integer, &
142  adflow_comm_world, ierr)
143  call echk(ierr, __file__, __line__)
144 
145  call mpi_allgather(nnodeslocal, 1, adflow_integer, nnodeproc, 1, adflow_integer, &
146  adflow_comm_world, ierr)
147  call echk(ierr, __file__, __line__)
148 
149  ! Now make cumulative versions of these
150  cumcellproc(0) = 0
151  cumnodeproc(0) = 0
152  do i = 1, nproc
153  cumcellproc(i) = cumcellproc(i - 1) + ncellproc(i)
154  cumnodeproc(i) = cumnodeproc(i - 1) + nnodeproc(i)
155  end do
156 
157  ! And save the total number of nodes and cells for reference
158  ncellsglobal = cumcellproc(nproc)
159  nnodesglobal = cumnodeproc(nproc)
160 
161  ! Allocate the space for the local nodes and element connectivity
162  allocate (nodeslocal(3, nnodeslocal), connlocal(4, ncellslocal), &
163  clustercelllocal(ncellslocal), clusternodelocal(nnodeslocal), &
164  nodeindiceslocal(nnodeslocal), nodeindicescgnslocal(nnodeslocal), &
165  cellindiceslocal(ncellslocal))
166 
167  icell = 0
168  inode = 0
169  ! Second loop over the local walls
170  do nn = 1, ndom
171  call setpointers(nn, level, sps)
172  c = clusters(cumdomproc(myid) + nn)
173  globalcgnsnode => flowdoms(nn, level, sps)%globalCGNSNode
174  do mm = 1, nbocos
175  if (faminlist(bcdata(mm)%famID, famlist)) then
176 
177  jbeg = bcdata(mm)%jnBeg - 1; jend = bcdata(mm)%jnEnd
178  ibeg = bcdata(mm)%inBeg - 1; iend = bcdata(mm)%inEnd
179 
180  if (usedual) then
181  ! For the dual we have to allocate the pointer, xx.
182  select case (bcfaceid(mm))
183  case (imin)
184  xx1 => x(1, 0:jl, 0:kl, :)
185  xx2 => x(1, 1:je, 0:kl, :)
186  xx3 => x(1, 0:jl, 1:ke, :)
187  xx4 => x(1, 1:je, 1:ke, :)
188  ind => globalcell(2, 1:je, 1:ke)
189  case (imax)
190  xx1 => x(il, 0:jl, 0:kl, :)
191  xx2 => x(il, 1:je, 0:kl, :)
192  xx3 => x(il, 0:jl, 1:ke, :)
193  xx4 => x(il, 1:je, 1:ke, :)
194  ind => globalcell(il, 1:je, 1:ke)
195 
196  case (jmin)
197  xx1 => x(0:il, 1, 0:kl, :)
198  xx2 => x(1:ie, 1, 0:kl, :)
199  xx3 => x(0:il, 1, 1:ke, :)
200  xx4 => x(1:ie, 1, 1:ke, :)
201  ind => globalcell(1:ie, 2, 1:ke)
202 
203  case (jmax)
204  xx1 => x(0:il, jl, 0:kl, :)
205  xx2 => x(1:ie, jl, 0:kl, :)
206  xx3 => x(0:il, jl, 1:ke, :)
207  xx4 => x(1:ie, jl, 1:ke, :)
208  ind => globalcell(1:ie, jl, 1:ke)
209 
210  case (kmin)
211  xx1 => x(0:il, 0:jl, 1, :)
212  xx2 => x(1:ie, 0:jl, 1, :)
213  xx3 => x(0:il, 1:je, 1, :)
214  xx4 => x(1:ie, 1:je, 1, :)
215  ind => globalcell(1:ie, 1:je, 2)
216 
217  case (kmax)
218  xx1 => x(0:il, 0:jl, kl, :)
219  xx2 => x(1:ie, 0:jl, kl, :)
220  xx3 => x(0:il, 1:je, kl, :)
221  xx4 => x(1:ie, 1:je, kl, :)
222  ind => globalcell(1:ie, 1:je, kl)
223  end select
224 
225  else
226  select case (bcfaceid(mm))
227  case (imin)
228  xx => x(1, :, :, :)
229  ind => globalnode(1, :, :)
230  indcgns => globalcgnsnode(1, :, :)
231  ! Pointer to owned global cell indices
232  indcell => globalcell(2, :, :)
233 
234  case (imax)
235  xx => x(il, :, :, :)
236  ind => globalnode(il, :, :)
237  indcgns => globalcgnsnode(il, :, :)
238 
239  ! Pointer to owned global cell indices
240  indcell => globalcell(il, :, :)
241 
242  case (jmin)
243  xx => x(:, 1, :, :)
244  ind => globalnode(:, 1, :)
245  indcgns => globalcgnsnode(:, 1, :)
246  ! Pointer to owned global cell indices
247  indcell => globalcell(:, 2, :)
248 
249  case (jmax)
250  xx => x(:, jl, :, :)
251  ind => globalnode(:, jl, :)
252  indcgns => globalcgnsnode(:, jl, :)
253  ! Pointer to owned global cell indices
254  indcell => globalcell(:, jl, :)
255 
256  case (kmin)
257  xx => x(:, :, 1, :)
258  ind => globalnode(:, :, 1)
259  indcgns => globalcgnsnode(:, :, 1)
260  ! Pointer to owned global cell indices
261  indcell => globalcell(:, :, 2)
262 
263  case (kmax)
264  xx => x(:, :, kl, :)
265  ind => globalnode(:, :, kl)
266  indcgns => globalcgnsnode(:, :, kl)
267 
268  ! Pointer to owned global cell indices
269  indcell => globalcell(:, :, kl)
270 
271  end select
272 
273  ! Just set hte 4 other pointers to xx so we can use the
274  ! same quarter-summation code below:
275  xx1 => xx
276  xx2 => xx
277  xx3 => xx
278  xx4 => xx
279 
280  end if
281 
282  ! We want to ensure that all the normals of the faces are
283  ! consistent. To ensure this, we enforce that all normals
284  ! are "into" the domain. Therefore we must treat difference
285  ! faces of a block differently. For example for an iLow
286  ! face, when looping over j-k in the regular way, results
287  ! in in a domain inward pointing normal for iLow but
288  ! outward pointing normal for iHigh. The same is true for
289  ! kMin and kMax. However, it is reverse for the J-faces:
290  ! This is becuase the way the pointers are extracted i then
291  ! k is the reverse of what "should" be for consistency. The
292  ! other two, the pointers are cyclic consistent: i,j->k,
293  ! j,k (wrap) ->i, but for the j-direction is is i,k->j when
294  ! to be consistent with the others it should be
295  ! k,i->j. Hope that made sense.
296 
297  select case (bcfaceid(mm))
298  case (imin, jmax, kmin)
299  regularordering = .true.
300  case default
301  regularordering = .false.
302  end select
303 
304  ! Now this can be reversed *again* if we have a block that
305  ! is left handed.
306  if (.not. righthanded) then
307  regularordering = .not. (regularordering)
308  end if
309 
310  if (usedual) then
311  ! Start and end bounds for NODES
312  jbeg = bcdata(mm)%jnBeg - 1; jend = bcdata(mm)%jnEnd
313  ibeg = bcdata(mm)%inBeg - 1; iend = bcdata(mm)%inEnd
314  else
315  ! Start and end bounds for NODES
316  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
317  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
318  end if
319 
320  ! ni, nj are the number of NODES
321  ni = iend - ibeg + 1
322  nj = jend - jbeg + 1
323 
324  ! Loop over the faces....this is the node sizes - 1
325  if (regularordering) then
326  do j = 1, nj - 1
327  do i = 1, ni - 1
328  icell = icell + 1
329  connlocal(1, icell) = cumnodeproc(myid) + inode + (j - 1) * ni + i
330  connlocal(2, icell) = cumnodeproc(myid) + inode + (j - 1) * ni + i + 1
331  connlocal(3, icell) = cumnodeproc(myid) + inode + (j) * ni + i + 1
332  connlocal(4, icell) = cumnodeproc(myid) + inode + (j) * ni + i
333  ! Set the cluster
334  clustercelllocal(icell) = c
335 
336  ! Save the global cell index
337  if (usedual) then
338  cellindiceslocal(icell) = 0
339  else
340  ! Valid only when using primary nodes
341  cellindiceslocal(icell) = indcell(ibeg + i + 1, jbeg + j + 1)
342  end if
343  end do
344  end do
345  else
346  ! Do the reverse ordering
347  do j = 1, nj - 1
348  do i = 1, ni - 1
349  icell = icell + 1
350  connlocal(1, icell) = cumnodeproc(myid) + inode + (j - 1) * ni + i
351  connlocal(2, icell) = cumnodeproc(myid) + inode + (j) * ni + i
352  connlocal(3, icell) = cumnodeproc(myid) + inode + (j) * ni + i + 1
353  connlocal(4, icell) = cumnodeproc(myid) + inode + (j - 1) * ni + i + 1
354 
355  ! Set the cluster
356  clustercelllocal(icell) = c
357 
358  ! Save the global cell index
359  if (usedual) then
360  cellindiceslocal(icell) = 0
361  else
362  ! Valid only when using primary nodes
363  cellindiceslocal(icell) = indcell(ibeg + i + 1, jbeg + j + 1)
364  end if
365  end do
366  end do
367  end if
368 
369  ! Loop over the nodes
370  do j = jbeg, jend
371  do i = ibeg, iend
372  inode = inode + 1
373  ! The plus one is for the pointer offset
374  nodeslocal(:, inode) = fourth * ( &
375  xx1(i + 1, j + 1, :) + xx2(i + 1, j + 1, :) + &
376  xx3(i + 1, j + 1, :) + xx4(i + 1, j + 1, :))
377 
378  clusternodelocal(inode) = c
379  nodeindiceslocal(inode) = ind(i + 1, j + 1) ! +1 for pointer offset
380  if (.not. usedual) then
381  nodeindicescgnslocal(inode) = indcgns(i, j) ! No pointer offset
382  else
383  nodeindicescgnslocal(inode) = 0
384  end if
385  end do
386  end do
387  end if
388  end do
389  end do
390 
391  ! Allocate space for the global reduced surface
392  allocate (nodesglobal(3, nnodesglobal), connglobal(4, ncellsglobal), &
393  clustercellglobal(ncellsglobal), clusternodeglobal(nnodesglobal), &
394  nodeindicesglobal(nnodesglobal), nodeindicescgnsglobal(nnodesglobal), &
395  cellindicesglobal(ncellsglobal))
396 
397  ! Communicate the nodes, connectivity and cluster information to everyone
398  call mpi_allgatherv(nodeslocal, 3 * nnodeslocal, adflow_real, &
399  nodesglobal, nnodeproc * 3, cumnodeproc * 3, adflow_real, &
400  adflow_comm_world, ierr)
401  call echk(ierr, __file__, __line__)
402 
403  call mpi_allgatherv(clusternodelocal, nnodeslocal, adflow_integer, &
404  clusternodeglobal, nnodeproc, cumnodeproc, adflow_integer, &
405  adflow_comm_world, ierr)
406  call echk(ierr, __file__, __line__)
407 
408  call mpi_allgatherv(nodeindiceslocal, nnodeslocal, adflow_integer, &
409  nodeindicesglobal, nnodeproc, cumnodeproc, adflow_integer, &
410  adflow_comm_world, ierr)
411  call echk(ierr, __file__, __line__)
412 
413  call mpi_allgatherv(nodeindicescgnslocal, nnodeslocal, adflow_integer, &
414  nodeindicescgnsglobal, nnodeproc, cumnodeproc, adflow_integer, &
415  adflow_comm_world, ierr)
416  call echk(ierr, __file__, __line__)
417 
418  call mpi_allgatherv(connlocal, 4 * ncellslocal, adflow_integer, &
419  connglobal, ncellproc * 4, cumcellproc * 4, adflow_integer, &
420  adflow_comm_world, ierr)
421  call echk(ierr, __file__, __line__)
422 
423  call mpi_allgatherv(clustercelllocal, ncellslocal, adflow_integer, &
424  clustercellglobal, ncellproc, cumcellproc, adflow_integer, &
425  adflow_comm_world, ierr)
426  call echk(ierr, __file__, __line__)
427 
428  call mpi_allgatherv(cellindiceslocal, ncellslocal, adflow_integer, &
429  cellindicesglobal, ncellproc, cumcellproc, adflow_integer, &
430  adflow_comm_world, ierr)
431  call echk(ierr, __file__, __line__)
432 
433  ! Free the local data we do not need anymore
434  deallocate (nodeslocal, connlocal, clustercelllocal, clusternodelocal, &
435  ncellproc, cumcellproc, nnodeproc, cumnodeproc, nodeindiceslocal, &
436  nodeindicescgnslocal, cellindiceslocal)
437 
438  ! We will now build separate trees for each cluster.
439  allocate (nodespercluster(nclusters), cellspercluster(nclusters), &
440  cnc(nclusters), ccc(nclusters))
441  nodespercluster = 0
442  cellspercluster = 0
443 
444  ! Count up the total number of elements and nodes on each cluster
445  do i = 1, ncellsglobal
446  cellspercluster(clustercellglobal(i)) = cellspercluster(clustercellglobal(i)) + 1
447  end do
448 
449  do i = 1, nnodesglobal
450  nodespercluster(clusternodeglobal(i)) = nodespercluster(clusternodeglobal(i)) + 1
451  end do
452 
453  ! Create the list of the walls. We are reusing the overset wall derived type here.
454  allocate (localnodenums(nnodesglobal))
455 
456  ! Allocate the memory for each of the cluster nodes
457  do i = 1, nclusters
458  nnodes = nodespercluster(i)
459  ncells = cellspercluster(i)
460  walls(i)%nCells = ncells
461  walls(i)%nNodes = nnodes
462 
463  allocate (walls(i)%x(3, nnodes), walls(i)%conn(4, ncells), &
464  walls(i)%ind(nnodes))
465  allocate (walls(i)%indCell(ncells))
466  end do
467 
468  ! We now loop through the master list of nodes and elements and
469  ! "push" them back to where they should go. We also keep track of
470  ! the local node numbers so that the cluster surcells can update
471  ! their own conn.
472  localnodenums = 0
473  cnc = 0
474  do i = 1, nnodesglobal
475  c = clusternodeglobal(i) ! Cluter this node belongs to
476  cnc(c) = cnc(c) + 1 ! "cluster node count:" the 'nth' node for this cluster
477 
478  walls(c)%x(:, cnc(c)) = nodesglobal(:, i)
479  walls(c)%ind(cnc(c)) = nodeindicesglobal(i)
480  localnodenums(i) = cnc(c)
481  end do
482 
483  ccc = 0
484  do i = 1, ncellsglobal
485  c = clustercellglobal(i)
486  ccc(c) = ccc(c) + 1 ! "Cluster cell count" the 'nth' cell for this cluster
487  walls(c)%conn(:, ccc(c)) = connglobal(:, i)
488 
489  walls(c)%indCell(ccc(c)) = cellindicesglobal(i)
490  end do
491 
492  do i = 1, nclusters
493 
494  ncells = walls(i)%nCells
495  nnodes = walls(i)%nNodes
496 
497  ! Fistly we need to update the conn to use our local node ordering.
498  do j = 1, ncells
499  do k = 1, 4
500  walls(i)%conn(k, j) = localnodenums(walls(i)%conn(k, j))
501  end do
502  end do
503 
504  ! Allocate temporary space for doing the point reduction.
505  allocate (uniquenodes(3, nnodes), link(nnodes))
506 
507  call pointreduce(walls(i)%x, nnodes, tol, uniquenodes, link, nunique)
508 
509  ! Update the global indices. Use the returned link
510  tmpind => walls(i)%ind
511  allocate (walls(i)%ind(nunique))
512  allocate (curcgnsnode(nunique))
513  walls(i)%ind = -1
514  curcgnsnode = -1
515  do j = 1, walls(i)%nNodes
516  ! Insted of blinding setting the index, we use the the
517  ! nodeIndicesCGNSGlobal to only set the the globalNode with
518  ! the smallest CGNS index. This guarantees that the same node
519  ! ID is always selected independent of the block
520  ! partitioning/splitting. Note that this will work even for
521  ! the coarse levels when nodeIndicesCGNSGLobal are all 0's. In
522  ! that case the first time wall(i)%ind(link(j)) is touched,
523  ! that index is taken.
524  lj = link(j)
525  if (walls(i)%ind(lj) == -1) then
526  ! Not set yet
527  walls(i)%ind(lj) = tmpind(j)
528  curcgnsnode(lj) = nodeindicescgnsglobal(j)
529  else
530  if (nodeindicescgnsglobal(j) < curcgnsnode(lj)) then
531  ! OR then potential global CGNS node index is LOWER
532  ! than the one I already have
533  walls(i)%ind(lj) = tmpind(j)
534  curcgnsnode(lj) = nodeindicescgnsglobal(j)
535  end if
536  end if
537  end do
538  deallocate (tmpind, curcgnsnode)
539 
540  ! Reset the number of nodes to be number of unique nodes
541  nnodes = nunique
542  walls(i)%nNodes = nnodes
543 
544  ! Update the nodes with the unique ones.
545  do j = 1, nunique
546  walls(i)%x(:, j) = uniquenodes(:, j)
547  end do
548 
549  ! Update conn using the link:
550  do j = 1, ncells
551  do k = 1, 4
552  walls(i)%conn(k, j) = link(walls(i)%conn(k, j))
553  end do
554  end do
555 
556  ! Unique nodes and link are no longer needed
557  deallocate (link, uniquenodes)
558 
559  call buildserialquad(ncells, nnodes, walls(i)%x, walls(i)%conn, walls(i)%ADT)
560  end do
561 
562  ! Clean up memeory
563  deallocate (nodesglobal, connglobal, clustercellglobal, &
564  clusternodeglobal, localnodenums, nodeindicesglobal, &
565  nodeindicescgnsglobal)
566 
567  do nn = 1, ndom
568  deallocate (flowdoms(nn, level, sps)%globalCGNSNode)
569  end do
570 
571 end subroutine buildclusterwalls
subroutine buildclusterwalls(level, sps, useDual, walls, famList, nFamList)
subroutine buildserialquad(nQuad, nNodes, coor, quadsConn, ADT)
Definition: adtBuild.F90:1278
Definition: BCData.F90:1
integer(kind=inttype) jl
logical righthanded
integer(kind=inttype) ie
integer(kind=inttype), dimension(:, :, :), pointer globalcell
integer(kind=inttype), dimension(:), pointer bcfaceid
integer(kind=inttype) nbocos
integer(kind=inttype), dimension(:, :, :), pointer globalnode
integer(kind=inttype) je
integer(kind=inttype) ke
real(kind=realtype), dimension(:, :, :, :), pointer x
integer(kind=inttype) kl
integer(kind=inttype) il
integer adflow_comm_world
integer(kind=inttype) nclusters
Definition: overset.F90:366
integer(kind=inttype), dimension(:), allocatable cumdomproc
Definition: overset.F90:364
integer(kind=inttype), dimension(:), allocatable clusters
Definition: overset.F90:367
logical function faminlist(famID, famList)
Definition: sorting.F90:7
Definition: utils.F90:1
subroutine pointreduce(pts, N, tol, uniquePts, link, nUnique)
Definition: utils.F90:4170
subroutine echk(errorcode, file, line)
Definition: utils.F90:5722
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237
subroutine getcgnsmeshindices(ndof, indices)
Definition: warping.F90:9