ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
tecplotIO.F90
Go to the documentation of this file.
1 module tecplotio
2 
3  use constants, only: realtype, inttype, maxstringlen, maxcgnsnamelen
4  implicit none
5  save
6 
7  character(len=maxStringLen) :: sci6 = "(ES14.6)"
8 
9  type slice
10 
11  ! nNodes : Number of nodes for this slice
12  ! ind(2, nNodes) : Indices of the nodes in the global node list on either
13  ! side of node
14  ! w(2, nNodes) : Weights used to multiply the two global nodes defined in
15  ! ind to get compute nodal values (positions, forces etc)
16  ! pL, vL, pD, vD, pM, vM : Pressure and viscous components of lift, drag, and moment
17  ! CLp, CLv, CDp, CDv, CMp, CMv : Coefficients of pressure and viscous lift, drag, and moment
18  ! chord: chord of section
19  ! pt, normal: The point and the normal that defines the slicing plane
20  ! dir_vec: a direction vector that we use to filter sliced line elements.
21  ! use_dir: flag to determine if we use the dir vec or not. if we are doing
22  ! a regular slice, e.g. wing section, we dont want to use the dir
23  ! and want to use the full slice. if we are doing a cylindrical slice
24  ! we then want to pick which direction since we would get 2 slices
25  ! on something like a nacelle in this case.
26 
27  character(len=maxStringLen) :: slicename
28  integer(kind=intType) :: sps
29  integer(kind=intType), dimension(:, :), allocatable :: ind, conn
30  real(kind=realtype), dimension(:, :), allocatable :: w, vars
31  integer(kind=intType) :: nnodes
32  real(kind=realtype) :: pl, vl, pd, vd, pm, vm, clp, clv, cdp, cdv, cmp, cmv
33  real(kind=realtype) :: chord, twist, thickness
34  real(kind=realtype), dimension(3) :: le, te
35  real(kind=realtype), dimension(3) :: pt, normal, dir_vec
36  logical :: use_dir
37  integer(kind=intType), allocatable, dimension(:) :: famlist
38  ! here we declare that 'slice'-types also now have fx,fy and fz:
39  real(kind=realtype) :: fx, fy, fz
40  end type slice
41 
42  type liftdist
43  ! nSegments: Number of nodes to use for distribution
44  ! normal: Slice direction (normal of the plane)
45  ! normal_ind: Index of direction..1 for x, 2 for y, 3 for z
46  ! distName: Name of lift distribution
47  ! slices: The list of slices this distribution will use
48  ! delta: The current delta spacing for the distribution
49  ! slicePoints: The list of points where the slices are taken
50  character(len=maxStringLen) :: distname
51  integer(kind=intType) :: nsegments, normal_ind
52  integer(kind=intType), dimension(:), allocatable :: famlist
53  real(kind=realtype) :: normal(3)
54  real(kind=realtype) :: delta
55  real(kind=realtype), dimension(:, :), allocatable :: slicepts
56  end type liftdist
57 
58  logical :: liftdistinitialized = .false.
59  integer(kind=intType) :: mscon1(16, 5), mscon2(4, 2)
60 
61  ! Data for the user supplied slices:
62  integer(kind=intType), parameter :: nslicemax = 1000
63  integer(kind=intType) :: nparaslices = 0
64  integer(kind=intType) :: nabsslices = 0
65  type(slice), dimension(:, :), allocatable :: paraslices, absslices
66 
67  ! Data for the user supplied lift distributions
68  integer(kind=intType), parameter :: nliftdistmax = 100
69  integer(kind=intType) :: nliftdists = 0
70  type(liftdist), dimension(nLiftDistMax), target :: liftdists
71 
72  ! Tecplot Variable names of the data in the lift distribution data file:
73  character(len=maxCGNSNameLen), dimension(:), allocatable :: liftdistname
74  integer(kind=intType), parameter :: nliftdistvar = 26
75 
76 contains
77  subroutine addparaslice(sliceName, pt, normal, dir_vec, use_dir, famList, n)
78  !
79  ! This subroutine is intended to be called from python.
80  ! This routine will add a parametric slice to the list of user
81  ! supplied slices.
82  use constants
83  use communication
84  use surfacefamilies
85  use surfaceutils
87  implicit none
88 
89  ! Input parameters
90  character(len=*), intent(in) :: sliceName
91  real(kind=realtype), dimension(3), intent(in) :: pt, normal, dir_vec
92  logical, intent(in) :: use_dir
93  integer(kind=intType), intent(in) :: n, famList(n)
94 
95  ! Working
96  integer(kind=intType) :: sps, sizeNode, sizeCell
97  integer(kind=intType), dimension(:), pointer :: wallList
98  real(kind=realtype), dimension(:, :), allocatable :: pts
99  integer(kind=intType), dimension(:, :), allocatable :: conn
100  integer(kind=intType), dimension(:), allocatable :: elemFam, cgnsBlockID
101 
102  if (.not. allocated(paraslices)) then
104  end if
105 
106  ! We have to add a slice for each spectral instance.
107  do sps = 1, ntimeintervalsspectral
109 
110  if (nparaslices > nslicemax) then
111  print *, 'Error: Exceeded the maximum number of slices. Increase nSliceMax'
112  stop
113  end if
114 
115  ! Slices are created on walls and walls only. Retrieve the
116  ! points, connectivity and familyID of all the walls.
117  walllist => bcfamgroups(ibcgroupwalls)%famList
118  call getsurfacesize(sizenode, sizecell, walllist, size(walllist), .true.)
119  allocate (pts(3, sizenode), conn(4, sizecell), elemfam(sizecell), cgnsblockid(sizecell))
120  call getsurfaceconnectivity(conn, cgnsblockid, sizecell, walllist, size(walllist), .true.)
121  call getsurfacepoints(pts, sizenode, sps, walllist, size(walllist), .true.)
122  call getsurfacefamily(elemfam, sizecell, walllist, size(walllist), .true.)
123 
124  ! Create actual slice
125  call createslice(pts, conn, elemfam, paraslices(nparaslices, sps), pt, normal, dir_vec, &
126  use_dir, slicename, famlist)
127 
128  ! Clean up memory.
129  deallocate (pts, conn, elemfam)
130  end do
131 
132  end subroutine addparaslice
133 
134  subroutine addabsslice(sliceName, pt, normal, dir_vec, use_dir, famList, n)
135  !
136  ! This subroutine is intended to be called from python.
137  ! This routine will add an absolute slice to the list of user
138  ! supplied slices.
139  use constants
140  use communication
141  use surfacefamilies
142  use surfaceutils
144  implicit none
145 
146  ! Input parameters
147  character(len=*), intent(in) :: sliceName
148  real(kind=realtype), dimension(3), intent(in) :: pt, normal, dir_vec
149  logical, intent(in) :: use_dir
150  integer(kind=intType), intent(in) :: n, famList(n)
151 
152  ! Working
153  integer(kind=intType) :: sps, sizeNode, sizeCell
154  integer(kind=intType), dimension(:), pointer :: wallList
155  real(kind=realtype), dimension(:, :), allocatable :: pts
156  integer(kind=intType), dimension(:, :), allocatable :: conn
157  integer(kind=intType), dimension(:), allocatable :: elemFam, cgnsBlockID
158 
159  if (.not. allocated(absslices)) then
161  end if
162 
163  do sps = 1, ntimeintervalsspectral
164  nabsslices = nabsslices + 1
165 
166  if (nabsslices > nslicemax) then
167  print *, 'Error: Exceeded the maximum number of slices. Increase nSliceMax'
168  stop
169  end if
170 
171  walllist => bcfamgroups(ibcgroupwalls)%famList
172  call getsurfacesize(sizenode, sizecell, walllist, size(walllist), .true.)
173  allocate (pts(3, sizenode), conn(4, sizecell), elemfam(sizecell), cgnsblockid(sizecell))
174  call getsurfaceconnectivity(conn, cgnsblockid, sizecell, walllist, size(walllist), .true.)
175  call getsurfacepoints(pts, sizenode, sps, walllist, size(walllist), .true.)
176  call getsurfacefamily(elemfam, sizecell, walllist, size(walllist), .true.)
177  call createslice(pts, conn, elemfam, absslices(nabsslices, sps), pt, normal, dir_vec, &
178  use_dir, slicename, famlist)
179 
180  ! Clean up memory.
181  deallocate (pts, conn, elemfam)
182  end do
183 
184  end subroutine addabsslice
185 
186  subroutine addliftdistribution(nSegments, normal, normal_ind, distName, famList, n)
187  !
188  ! This subroutine is intended to be called from python.
189  ! This routine will add the description of a lift distribution
190 
191  use constants
192  use communication
193  use surfacefamilies
194  implicit none
195 
196  ! Input parameters
197  character(len=*), intent(in) :: distName
198  integer(kind=intType), intent(in) :: nSegments
199  real(kind=realtype), dimension(3) :: normal
200  integer(kind=intType), intent(in) :: normal_ind
201  integer(kind=intType), intent(in) :: n, famList(n)
202 
203  nliftdists = nliftdists + 1
204  if (nliftdists > nliftdistmax) then
205  print *, 'Error: Exceeded the maximum number of lift distributions. &
206  &Increase nLiftDistMax'
207  stop
208  end if
209 
210  liftdists(nliftdists)%nSegments = nsegments
211  liftdists(nliftdists)%normal = normal
212  liftdists(nliftdists)%normal_ind = normal_ind
213  liftdists(nliftdists)%distName = distname
214 
215  allocate (liftdists(nliftdists)%famList(n))
216  liftdists(nliftdists)%famList(:) = famlist
217 
218  end subroutine addliftdistribution
219 
220  subroutine writetecplot(sliceFile, writeSlices, liftFile, writeLift, &
221  surfFile, writeSurf, famList, nFamList)
222  !
223  ! This is the master routine for writing tecplot data from adflow.
224  ! This routine will write the slice, lift and surface files
225  ! depending on the flags writeSlics, writeLift and writeSurface.
226  ! The reason for the combined routine is that we can safely only
227  ! perform the nodal averaging once which is required for all
228  ! three output files.
229  use constants
232  use surfaceutils, only: getsurfacesize
233  use oversetdata, only: zippermeshes
235  implicit none
236 
237  ! Input Params
238  character(len=*), intent(in) :: sliceFile, liftFile, surfFile
239  logical, intent(in) :: writeSlices, writeLift, writeSurf
240  integer(kind=intType), intent(in) :: nFamList
241  integer(kind=intType), intent(in), dimension(nFamList) :: famList
242  real(kind=realtype), dimension(:, :, :), allocatable :: nodalvalues
243  ! Working
244  integer(kind=intType) :: sps, nSolVar, sizeNOde, sizeCell
245  integer(kind=intType), dimension(:), pointer :: wallLIST
246 
247  ! Determine the number of surface variables we have
248  call numberofsurfsolvariables(nsolvar)
249  walllist => bcfamgroups(ibcgroupwalls)%famList
250  call getsurfacesize(sizenode, sizecell, walllist, size(walllist), .true.)
251 
252  ! Allocate and compute the wall-based surface data for hte slices
253  ! and lift distributions.
254  allocate (nodalvalues(max(sizenode, 1), nsolvar + 6 + 3, ntimeintervalsspectral))
255 
256  do sps = 1, ntimeintervalsspectral
258  zippermeshes(ibcgroupwalls), .true., nodalvalues(:, :, sps))
259  end do
260 
261  if (writeslices) then
262  call writeslicesfile(slicefile, nodalvalues)
263  end if
264 
265  if (writelift) then
266  call writeliftdistributionfile(liftfile, nodalvalues)
267  end if
268 
269  deallocate (nodalvalues)
270 
271  if (writesurf) then
272  call writetecplotsurfacefile(surffile, famlist)
273  end if
274 
275  end subroutine writetecplot
276 
277  subroutine writeslicesfile(fileName, nodalValues)
278  !
279  ! This subroutine is intended to be called from python. This
280  ! routine will write the user defined slics to an to the (ascii)
281  ! tecplot file fileName. ASCII files are used for simplicity since
282  ! very little information is actually written.
283  use constants
284  use communication
285  use outputmod
287  use inputphysics
288  use inputiteration
289  use inputio
290  use surfacefamilies
291  use surfaceutils
292  use utils, only: echk
293  implicit none
294 
295  ! Input Params
296  character(len=*), intent(in) :: fileName
297  real(kind=realtype), intent(inout), dimension(:, :, :) :: nodalvalues
298 
299  ! Working parameters
300  integer(kind=intType) :: file, i, sps, nSolVar, ierr
301  character(len=maxStringLen) :: fname
302  character(len=7) :: intString
303  character(len=maxCGNSNameLen), dimension(:), allocatable :: solNames
304  integer(kind=intType), allocatable, dimension(:) :: famList
305  type(slice) :: globalSlice
306  integer(kind=intType) :: sizeNode, sizeCell
307  integer(kind=intType), dimension(:), pointer :: wallList
308  real(kind=realtype), dimension(:, :), allocatable :: pts
309  integer(kind=intType), dimension(:, :), allocatable :: conn
310  integer(kind=intType), dimension(:), allocatable :: elemFam, cgnsBlockID
311 
312  ! Only write if we actually have lift distributions
313  testwriteslices: if (nparaslices + nabsslices > 0) then
314 
315  if (myid == 0 .and. printiterations) then
316  print "(a)", "#"
317  print "(a)", "# Writing slices file(s): "
318  end if
319 
320  do sps = 1, ntimeintervalsspectral
321 
322  ! If it is time spectral we need to agument the filename
323  if (equationmode == timespectral) then
324  write (intstring, "(i7)") sps
325  intstring = adjustl(intstring)
326  fname = trim(filename)//"Spectral"//trim(intstring)
327  else
328  fname = filename
329  end if
330 
331  file = 11
332  ! Open file on root proc:
333  if (myid == 0) then
334  ! Print the filename to stdout
335  print "(a,4x,a)", "#", trim(fname)
336  open (unit=file, file=trim(fname))
337 
338  ! Write Header Information
339  write (file, *) "Title = ""ADflow Slice Data"""
340  write (file, "(a)", advance="no") "Variables = "
341  write (file, "(a)", advance="no") " ""CoordinateX"" "
342  write (file, "(a)", advance="no") " ""CoordinateY"" "
343  write (file, "(a)", advance="no") " ""CoordinateZ"" "
344  write (file, "(a)", advance="no") " ""XoC"" "
345  write (file, "(a)", advance="no") " ""YoC"" "
346  write (file, "(a)", advance="no") " ""ZoC"" "
347 
348  ! Number of additional variables on slices
349  call numberofsurfsolvariables(nsolvar)
350  allocate (solnames(nsolvar))
351  call surfsolnames(solnames)
352 
353  ! Write the rest of the variables
354  do i = 1, nsolvar
355  write (file, "(a,a,a)", advance="no") """", trim(solnames(i)), """ "
356  end do
357 
358  write (file, "(1x)")
359  deallocate (solnames)
360  end if
361  call mpi_bcast(nsolvar, 1, adflow_integer, 0, adflow_comm_world, ierr)
362  call echk(ierr, __file__, __line__)
363 
364  ! Slices are created on walls and walls only. Retrieve the
365  ! points, connectivity and familyID of all the walls.
366  walllist => bcfamgroups(ibcgroupwalls)%famList
367  call getsurfacesize(sizenode, sizecell, walllist, size(walllist), .true.)
368  allocate (pts(3, sizenode), conn(4, sizecell), elemfam(sizecell), cgnsblockid(sizecell))
369  call getsurfaceconnectivity(conn, cgnsblockid, sizecell, walllist, size(walllist), .true.)
370  call getsurfacepoints(pts, sizenode, sps, walllist, size(walllist), .true.)
371  call getsurfacefamily(elemfam, sizecell, walllist, size(walllist), .true.)
372 
373  ! Integration is performed in parallel
374  do i = 1, nparaslices
375  call integrateslice(paraslices(i, sps), globalslice, &
376  nodalvalues(:, :, sps), nsolvar, .true.)
377  if (myid == 0) then
378  call writeslice(globalslice, file, nsolvar)
379  end if
380  call destroyslice(globalslice)
381  end do
382 
383  do i = 1, nabsslices
384  ! 'Destroy' the slice...just dealloc the allocated data.
385  ! before we do, save the family list
386  allocate (famlist(size(absslices(i, sps)%famList)))
387  famlist = absslices(i, sps)%famList
388  call destroyslice(absslices(i, sps))
389 
390  ! Make new one in the same location
391  call createslice(pts, conn, elemfam, absslices(i, sps), &
392  absslices(i, sps)%pt, absslices(i, sps)%normal, absslices(i, sps)%dir_vec, &
393  absslices(i, sps)%use_dir, absslices(i, sps)%sliceName, famlist)
394 
395  call integrateslice(absslices(i, sps), globalslice, &
396  nodalvalues(:, :, sps), nsolvar, .true.)
397  if (myid == 0) then
398  call writeslice(globalslice, file, nsolvar)
399  end if
400  call destroyslice(globalslice)
401  deallocate (famlist)
402  end do
403 
404  !Close file on root proc
405  if (myid == 0) then
406  close (file)
407  end if
408  end do
409 
410  if (myid == 0 .and. printiterations) then
411  print "(a)", "# Slices file(s) written"
412  print "(a)", "#"
413  end if
414  end if testwriteslices
415  end subroutine writeslicesfile
416 
417  subroutine writeliftdistributionfile(fileName, nodalValues)
418  !
419  !
420  ! This subroutine is intended to be called from python.
421  ! This routine will write the added lift distributions
422  ! to the (ascii) tecplot file fileName. ASCII files are
423  ! used for siplicity since very little informatin is actually
424  ! written.
425  use constants
426  use communication
427  use outputmod
428  use inputphysics
430  use inputiteration
431  use surfacefamilies
432  implicit none
433 
434  ! Input Params
435  character(len=*), intent(in) :: fileName
436  real(kind=realtype), dimension(:, :, :), allocatable :: nodalvalues
437 
438  ! Working parameters
439  integer(kind=intType) :: file, sps
440  character(len=maxStringLen) :: fname
441  character(len=7) :: intString
442 
443  ! Only write if we actually have lift distributions
444  testwriteliftdists: if (nliftdists > 0) then
445 
446  if (myid == 0 .and. printiterations) then
447  print "(a)", "#"
448  print "(a)", "# Writing lift distribution file(s):"
449  end if
450 
451  do sps = 1, ntimeintervalsspectral
452 
453  ! If it is time spectral we need to agument the filename
454  if (equationmode == timespectral) then
455  write (intstring, "(i7)") sps
456  intstring = adjustl(intstring)
457  fname = trim(filename)//"Spectral"//trim(intstring)
458  else
459  fname = filename
460  end if
461 
462  file = 11
463  ! Open file on root proc:
464  if (myid == 0) then
465  ! Print the filename to stdout
466  print "(a,4x,a)", "#", trim(fname)
467 
468  open (unit=file, file=trim(fname))
469  end if
470 
471  call writeliftdistributions(sps, file, nodalvalues(:, :, sps))
472 
473  ! Close file on root proc
474  if (myid == 0) then
475  close (file)
476  end if
477  end do
478 
479  if (myid == 0 .and. printiterations) then
480  print "(a)", "# Lift distribution file(s) written"
481  print "(a)", "#"
482  end if
483 
484  end if testwriteliftdists
485  end subroutine writeliftdistributionfile
486 
487  subroutine writeliftdistributions(sps, fileID, nodalValues)
488  !
489  ! This subroutine writes the liftdistribution for the specified
490  ! spectral instance. It is assumed that the required file handles
491  ! are already open and can be written to
492  use constants
493  use communication
494  use outputmod
495  use su_cgns
496  use cgnsnames
498  use surfaceutils
499  use utils, only: echk
500  use sorting, only: faminlist
501  implicit none
502 
503  ! Input parameters
504  integer(kind=intType), intent(in) :: sps, fileID
505  real(kind=realtype), dimension(:, :), intent(in) :: nodalvalues
506 
507  real(kind=realtype), dimension(3) :: xmin, xmax, xmin_local, xmax_local
508  real(kind=realtype), parameter :: tol = 1e-8
509  type(liftdist), pointer :: d
510  integer(kind=intType) :: i, j, ii, jj, idist, ierr
511  real(kind=realtype), dimension(:, :), allocatable :: values
512  character(len=maxCGNSNameLen), dimension(:), allocatable :: liftdistnames
513  real(kind=realtype) :: dmin, dmax, suml, sumd, summ, span, delta, xcur(3)
514  type(slice) :: localslice, globalslice
515  integer(kind=intType) :: sizenode, sizecell
516  integer(kind=intType), dimension(:), pointer :: walllist
517  real(kind=realtype), dimension(:, :), allocatable :: pts
518  integer(kind=intType), dimension(:, :), allocatable :: conn
519  integer(kind=intType), dimension(:), allocatable :: elemfam, cgnsblockid
520 
521  ! Slices are created on walls and walls only. Retrieve the
522  ! points, connectivity and familyID of all the walls.
523  walllist => bcfamgroups(ibcgroupwalls)%famList
524  call getsurfacesize(sizenode, sizecell, walllist, size(walllist), .true.)
525  allocate (pts(3, sizenode), conn(4, sizecell), elemfam(sizecell), cgnsblockid(sizecell))
526  call getsurfaceconnectivity(conn, cgnsblockid, sizecell, walllist, size(walllist), .true.)
527  call getsurfacepoints(pts, sizenode, sps, walllist, size(walllist), .true.)
528  call getsurfacefamily(elemfam, sizecell, walllist, size(walllist), .true.)
529 
530  do idist = 1, nliftdists
531 
532  d => liftdists(idist)
533  xmin_local = huge(real(zero))
534  xmax_local = -huge(real(zero))
535 
536  ! Get the bounding box for the entire geometry we have been slicing.
537  elemloop: do i = 1, size(conn, 2)
538  if (faminlist(elemfam(i), d%FamList)) then
539 
540  ! Extract each of the 4 nodes on this quad:
541  do jj = 1, 4
542  xcur = pts(:, conn(jj, i))
543  ! Check the max/min on each index
544  do ii = 1, 3
545  xmin_local(ii) = min(xmin_local(ii), xcur(ii))
546  xmax_local(ii) = max(xmax_local(ii), xcur(ii))
547  end do
548  end do
549  end if
550  end do elemloop
551 
552  ! Globalize all min/max values.
553  call mpi_allreduce(xmin_local, xmin, 3, adflow_real, mpi_min, &
554  adflow_comm_world, ierr)
555  call echk(ierr, __file__, __line__)
556 
557  call mpi_allreduce(xmax_local, xmax, 3, adflow_real, mpi_max, &
558  adflow_comm_world, ierr)
559  call echk(ierr, __file__, __line__)
560 
561  d%delta = (xmax(d%normal_ind) - xmin(d%normal_ind)) / dble((d%nSegments - 1))
562  allocate (d%slicePts(3, d%nSegments))
563 
564  ! Zero out all segments
565  d%slicePts = zero
566 
567  ! These are the variable names for the lift distribution:
568  allocate (liftdistnames(nliftdistvar))
569  ! Set the names here
570  liftdistnames(1) = "eta"
571  liftdistnames(2) = cgnscoorx
572  liftdistnames(3) = cgnscoory
573  liftdistnames(4) = cgnscoorz
574  liftdistnames(5) = "Lift"
575  liftdistnames(6) = "Drag"
576  liftdistnames(7) = "Moment"
577  liftdistnames(8) = "Normalized Lift"
578  liftdistnames(9) = "Normalized Drag"
579  liftdistnames(10) = "Normalized Moment"
580  liftdistnames(11) = "CL"
581  liftdistnames(12) = "CD"
582  liftdistnames(13) = "CM"
583  liftdistnames(14) = "CLp"
584  liftdistnames(15) = "CDp"
585  liftdistnames(16) = "CMp"
586  liftdistnames(17) = "CLv"
587  liftdistnames(18) = "CDv"
588  liftdistnames(19) = "CMv"
589  liftdistnames(20) = "Elliptical"
590  liftdistnames(21) = "thickness"
591  liftdistnames(22) = "twist"
592  liftdistnames(23) = "chord"
593  liftdistnames(24) = "fx"
594  liftdistnames(25) = "fy"
595  liftdistnames(26) = "fz"
596 
597  ! Only write header info for first distribution only
598  if (myid == 0) then
599  if (idist == 1) then
600  write (fileid, *) "Title= ""ADflow Lift Distribution Data"""
601  write (fileid, "(a)", advance="no") "Variables = "
602  do i = 1, nliftdistvar ! here we just write var-names in the header
603  write (fileid, "(a,a,a)", advance="no") """", trim(liftdistnames(i)), """ "
604  end do
605  write (fileid, "(1x)")
606  end if
607 
608  write (fileid, "(a,a,a)") "Zone T= """, trim(d%distName), """"
609  write (fileid, *) "I= ", d%nSegments
610  write (fileid, *) "DATAPACKING=BLOCK"
611  end if
612 
613  allocate (values(d%nSegments, nliftdistvar))
614  values = zero
615 
616  do i = 1, d%nSegments
617  if (i == 1) then
618  d%slicePts(d%normal_ind, i) = xmin(d%normal_ind) + tol
619  else if (i == d%nSegments) then
620  d%slicePts(d%normal_ind, i) = xmin(d%normal_ind) + (i - 1) * d%delta - tol
621  else
622  d%slicePts(d%normal_ind, i) = xmin(d%normal_ind) + (i - 1) * d%delta
623  end if
624  end do
625 
626  ! Scaled Eta values
627  dmin = minval(d%slicePts(d%normal_ind, :))
628  dmax = maxval(d%slicePts(d%normal_ind, :))
629 
630  ! next line we save ETA as the first column. This corresponds to
631  ! when we open a lift file, e.g. fc_000_lift.dat where we find
632  ! ETA as the first entry... (also visible through tec360)
633  values(:, 1) = (d%slicePts(d%normal_ind, :) - dmin) / (dmax - dmin)
634  ! Coordinate Varaibles
635  if (d%normal_ind == 1) then! X slices
636  values(:, 2) = d%slicePts(1, :)
637  else if (d%normal_ind == 2) then ! Y slices
638  values(:, 3) = d%slicePts(2, :)
639  else if (d%normal_ind == 3) then ! Z slices
640  values(:, 4) = d%slicePts(3, :)
641  end if
642 
643  ! as mentioned above nSegments are number of nodes in the
644  ! distribution, e.g. maybe you have asked for 150 points out along
645  ! your wing where you want the lift. Ok, we now for each of these
646  ! 150 points have to create a slice and integrate our metrics around
647  ! said slice, save the integrated metrics (e.g. lift, drag) for the
648  ! current point. We then move on to the next point, make a new slice
649  ! get the new point's metrics and save them ... we do this 150 times
650  do i = 1, d%nSegments
651  ! Make new one in the same location
652  ! we just pass the normal again as the direction vector because its not used
653  call createslice(pts, conn, elemfam, localslice, d%slicePts(:, i), &
654  d%normal, d%normal, .false., "does_not_matter", d%famList)
655  call integrateslice(localslice, globalslice, nodalvalues, 0, .false.)
656 
657  ! Total lift, drag, and moment
658  values(i, 5) = globalslice%pL + globalslice%vL
659  values(i, 6) = globalslice%pD + globalslice%vD
660  values(i, 7) = globalslice%pM + globalslice%vM
661 
662  ! Total CL, CD, and CM
663  values(i, 11) = globalslice%CLp + globalslice%CLv
664  values(i, 12) = globalslice%CDp + globalslice%CDv
665  values(i, 13) = globalslice%CMp + globalslice%CMv
666 
667  ! Pressure lift, drag, and moment coefficients
668  values(i, 14) = globalslice%CLp
669  values(i, 15) = globalslice%CDp
670  values(i, 16) = globalslice%CMp
671 
672  ! Viscous lift, drag, and moment coefficients
673  values(i, 17) = globalslice%CLv
674  values(i, 18) = globalslice%CDv
675  values(i, 19) = globalslice%CMv
676 
677  ! t/c, twist, chord
678  values(i, 21) = globalslice%thickness
679  values(i, 22) = globalslice%twist
680  values(i, 23) = globalslice%chord
681 
682  ! here we now save our new, added values that we have desired to use
683  values(i, 24) = globalslice%fx
684  values(i, 25) = globalslice%fy
685  values(i, 26) = globalslice%fz
686 
687  call destroyslice(localslice)
688  call destroyslice(globalslice)
689 
690  end do
691 
692  ! Sum up the lift, drag, and moment values from the slices:
693  suml = zero
694  sumd = zero
695  summ = zero
696  do i = 1, d%nSegments - 1
697  suml = suml + half * (values(i, 5) + values(i + 1, 5))
698  sumd = sumd + half * (values(i, 6) + values(i + 1, 6))
699  summ = summ + half * (values(i, 7) + values(i + 1, 7))
700  end do
701 
702  ! Now compute the normalized lift, drag and elliptical since
703  ! we know the sum. Note delta is non-dimensional!
704  delta = values(2, 1) - values(1, 1)
705 
706  ! This is the "nondimensional" span...it basically takes into account if you have
707  ! a wing not at the sym plane
708  span = maxval(values(:, 1)) - minval(values(:, 1))
709  dmin = minval(values(:, 1))
710  suml = suml * delta
711  sumd = sumd * delta
712  summ = summ * delta
713 
714  do i = 1, d%nSegments
715 
716  ! Normalized Lift, Drag, and Moment
717  values(i, 8) = values(i, 5) / suml
718  values(i, 9) = values(i, 6) / sumd
719  values(i, 10) = values(i, 7) / abs(summ)
720 
721  ! elliptical value
722  values(i, 20) = four / pi / span * sqrt(one - (values(i, 1) - dmin)**2 / span**2)
723  end do
724 
725  ! Write all variables in block format
726  if (myid == 0) then
727  do j = 1, nliftdistvar
728  do i = 1, d%nSegments
729  write (fileid, sci6) values(i, j)
730  end do
731  end do
732  end if
733 
734  ! Deallocate slice list and point list
735  deallocate (d%slicePts)
736 
737  ! Destroy temp variables
738  deallocate (liftdistnames, values)
739  end do
740 
741  end subroutine writeliftdistributions
742 
743  subroutine writetecplotsurfacefile(fileName, famList)
744  use constants
747  use inputphysics, only: equationmode
752  use utils, only: echk, setpointers, setbcpointers
753  use bcpointers, only: xx
754  use sorting, only: faminlist
755  use extraoutput, only: surfwriteblank
757  use surfaceutils
758 #include <petsc/finclude/petsc.h>
759  use petsc
760  implicit none
761 
762  ! Input Params
763  character(len=*), intent(in) :: filename
764  integer(kind=intType), intent(in), dimension(:) :: famlist
765 
766  ! Working parameters
767  integer(kind=intType) :: i, j, nn, mm, fileid, ivar, ii, ierr, isize
768  integer(Kind=intType) :: nsolvar, ibeg, iend, jbeg, jend, sps, sizenode, sizecell
769  integer(kind=intType) :: ibcgroup, ifam, iproc, ncells, nnodes, ncellstowrite, izone, lastzonesharing
770  character(len=maxStringLen) :: fname
771  character(len=7) :: intstring
772  integer(kind=intType), dimension(:), allocatable :: nodesizes, nodedisps
773  integer(kind=intType), dimension(:), allocatable :: cellsizes, celldisps
774  character(len=maxCGNSNameLen), dimension(:), allocatable :: solnames
775  real(kind=realtype), dimension(:, :), allocatable :: nodalvalues
776  integer(kind=intType), dimension(:, :), allocatable :: conn, localconn
777  real(kind=realtype), dimension(:, :), allocatable :: vars
778  integer(kind=intType), dimension(:), allocatable :: mask, elemfam, localelemfam, cgnsblockid
779  logical :: blanksave, bcgroupneeded, datawritten
780  type(zippermesh), pointer :: zipper
781  type(familyexchange), pointer :: exch
782  if (myid == 0 .and. printiterations) then
783  print "(a)", "#"
784  print "(a)", "# Writing tecplot surface file(s):"
785  end if
786 
787  ! Number of surface variables. Note that we *explictly*
788  ! remove the potential for writing the surface blanks as
789  ! these are not necessary for the tecplot IO as we write the
790  ! zipper mesh directly. We must save and restore the
791  ! variable in case the CGNS otuput still wants to write it.
792  blanksave = surfwriteblank
793  surfwriteblank = .false.
794  call numberofsurfsolvariables(nsolvar)
795  allocate (solnames(nsolvar))
796  call surfsolnames(solnames)
797 
798  spectralloop: do sps = 1, ntimeintervalsspectral
799 
800  ! If it is time spectral we need to agument the filename
801  if (equationmode == timespectral) then
802  write (intstring, "(i7)") sps
803  intstring = adjustl(intstring)
804  fname = trim(filename)//"Spectral"//trim(intstring)
805  else
806  fname = filename
807  end if
808 
809  fileid = 11
810  ! Open file on root proc:
811 
812  if (myid == 0) then
813  ! Print the filename to stdout
814  print "(a,4x,a)", "#", trim(fname)
815 
816  open (unit=fileid, file=trim(fname), form='UNFORMATTED', access='stream', status='replace')
817 
818  ! Tecplot magic number
819  write (fileid) "#!TDV112"
820 
821  ! Integer value of 1 (4 bytes)
822  call writeinteger(1)
823 
824  ! Integer for FileType: 0 = Full, 1= Grid, 2 = Solution
825  call writeinteger(0)
826 
827  ! Write the title of the file
828  call writestring("ADflow Surface Solution Data")
829 
830  ! Write the number of variable names
831  call writeinteger(3 + nsolvar)
832 
833  ! Write the variable names
834  call writestring("CoordinateX")
835  call writestring("CoordinateY")
836  call writestring("CoordinateZ")
837 
838  ! Write the rest of the variables
839  do i = 1, nsolvar
840  call writestring(trim(solnames(i)))
841  end do
842 
843  deallocate (solnames)
844 
845  end if
846 
847  ! First pass through to generate and write header information
848  masterbcloop1: do ibcgroup = 1, nfamexchange
849 
850  ! Pointers for easier reading
851  exch => bcfamexchange(ibcgroup, sps)
852  zipper => zippermeshes(ibcgroup)
853 
854  ! First thing we do is figure out if we actually need to do
855  ! anything with this BCgroup at all. If none the requested
856  ! families are in this BCExcahnge we don't have to do
857  ! anything.
858 
859  bcgroupneeded = .false.
860  do i = 1, size(famlist)
861  if (faminlist(famlist(i), exch%famList)) then
862  bcgroupneeded = .true.
863  end if
864  end do
865 
866  ! Keep going if we don't need this.
867  if (.not. bcgroupneeded) then
868  cycle
869  end if
870 
871  ! Get the sizes of this BCGroup
872  call getsurfacesize(sizenode, sizecell, exch%famList, size(exch%famList), .true.)
873  call mpi_reduce(sizenode, nnodes, 1, adflow_integer, mpi_sum, 0, adflow_comm_world, ierr)
874  call echk(ierr, __file__, __line__)
875 
876  ! Now gather up the cell family info.
877  allocate (celldisps(0:nproc), cellsizes(nproc))
878 
879  call mpi_gather(sizecell, 1, adflow_integer, &
880  cellsizes, 1, adflow_integer, 0, adflow_comm_world, ierr)
881  call echk(ierr, __file__, __line__)
882 
883  allocate (localelemfam(sizecell))
884  call getsurfacefamily(localelemfam, sizecell, exch%famList, size(exch%famList), .true.)
885 
886  if (myid == 0) then
887  celldisps(0) = 0
888  do iproc = 1, nproc
889  celldisps(iproc) = celldisps(iproc - 1) + cellsizes(iproc)
890  end do
891  ncells = sum(cellsizes)
892  allocate (elemfam(ncells))
893  end if
894 
895  call mpi_gatherv(localelemfam, &
896  size(localelemfam), adflow_integer, elemfam, &
897  cellsizes, celldisps, adflow_integer, 0, adflow_comm_world, ierr)
898  call echk(ierr, __file__, __line__)
899 
900  ! Deallocate temp memory
901  deallocate (localelemfam, cellsizes, celldisps)
902 
903  rootproc: if (myid == 0 .and. ncells > 0) then
904  do ifam = 1, size(exch%famList)
905 
906  ! Check if we have to write this one:
907  faminclude: if (faminlist(exch%famList(ifam), famlist)) then
908  ncellstowrite = 0
909  do i = 1, ncells
910  ! Check if this elem is to be included
911  if (elemfam(i) == exch%famList(ifam)) then
912  ncellstowrite = ncellstowrite + 1
913  end if
914  end do
915 
916  if (ncellstowrite > 0) then
917  call writefloat(zonemarker) ! Zone Marker
918  call writestring(trim(famnames(exch%famList(ifam)))) ! Zone Name
919  call writeinteger(-1) ! Parent Zone (-1 for None)
920  call writeinteger(-1) ! Strand ID (-2 for tecplot assignment)
921  call writedouble(zero) ! Solution Time
922  call writeinteger(-1)! Zone Color (Not used anymore) (-1)
923  call writeinteger(3) ! Zone Type (3 for FEQuadrilateral)
924  call writeinteger(0)! Data Packing (0 for block)
925  call writeinteger(0)! Specify Var Location (0=don't specify, all at nodes)
926  call writeinteger(0) ! Are raw 1-to-1 face neighbours supplied (0 for false)
927  call writeinteger(nnodes) ! Number of nodes in FE Zone
928  call writeinteger(ncellstowrite) ! Number of elements in FE Zone
929  call writeinteger(0) ! ICellDim, jCellDim, kCellDim (for future use, set to 0)
930  call writeinteger(0)
931  call writeinteger(0)
932  call writeinteger(0) ! Aux data specified (0 for no)
933  end if
934  end if faminclude
935  end do
936  end if rootproc
937  if (myid == 0) then
938  deallocate (elemfam)
939  end if
940  end do masterbcloop1
941 
942  if (myid == 0) then
943  call writefloat(datasectionmarker) ! Eohmarker to mark difference between header and data section
944  end if
945 
946  ! Now do everything again but for real.
947  masterbcloop: do ibcgroup = 1, nfamexchange
948 
949  ! Pointers for easier reading
950  exch => bcfamexchange(ibcgroup, sps)
951  zipper => zippermeshes(ibcgroup)
952 
953  ! First thing we do is figure out if we actually need to do
954  ! anything with this BCgroup at all. If none the requested
955  ! families are in this BCExcahnge we don't have to do
956  ! anything.
957 
958  bcgroupneeded = .false.
959  do i = 1, size(famlist)
960  if (faminlist(famlist(i), exch%famList)) then
961  bcgroupneeded = .true.
962  end if
963  end do
964 
965  ! Keep going if we don't need this.
966  if (.not. bcgroupneeded) then
967  cycle
968  end if
969 
970  ! Get the sizes of this BCGroup
971  call getsurfacesize(sizenode, sizecell, exch%famList, size(exch%famList), .true.)
972  allocate (nodalvalues(max(sizenode, 1), nsolvar + 3 + 6))
973  ! Compute the nodal data
974 
975  call computesurfaceoutputnodaldata(bcfamexchange(ibcgroup, sps), &
976  zippermeshes(ibcgroup), .false., nodalvalues(:, :))
977 
978  ! Gather up the number of nodes to be set to the root proc:
979  allocate (nodesizes(nproc), nodedisps(0:nproc))
980  nodesizes = 0
981  nodedisps = 0
982 
983  call mpi_allgather(sizenode, 1, adflow_integer, nodesizes, 1, adflow_integer, &
984  adflow_comm_world, ierr)
985  call echk(ierr, __file__, __line__)
986  nodedisps(0) = 0
987  do iproc = 1, nproc
988  nodedisps(iproc) = nodedisps(iproc - 1) + nodesizes(iproc)
989  end do
990 
991  isize = 3 + 6 + nsolvar
992  if (myid == 0) then
993  nnodes = sum(nodesizes)
994  else
995  nnodes = 1
996  end if
997 
998  ! Only root proc actually has any space allocated
999  allocate (vars(nnodes, isize))
1000 
1001  ! Gather values to the root proc.
1002  do i = 1, isize
1003  call mpi_gatherv(nodalvalues(:, i), sizenode, &
1004  adflow_real, vars(:, i), nodesizes, nodedisps, &
1005  adflow_real, 0, adflow_comm_world, ierr)
1006  call echk(ierr, __file__, __line__)
1007  end do
1008  deallocate (nodalvalues)
1009 
1010  ! Now gather up the connectivity
1011  allocate (celldisps(0:nproc), cellsizes(nproc))
1012 
1013  call mpi_gather(sizecell, 1, adflow_integer, &
1014  cellsizes, 1, adflow_integer, 0, adflow_comm_world, ierr)
1015  call echk(ierr, __file__, __line__)
1016 
1017  if (allocated(cgnsblockid)) then
1018  deallocate (cgnsblockid)
1019  end if
1020 
1021  allocate (localconn(4, sizecell), localelemfam(sizecell), cgnsblockid(sizecell))
1022  call getsurfaceconnectivity(localconn, cgnsblockid, sizecell, exch%famList, size(exch%famList), .true.)
1023  call getsurfacefamily(localelemfam, sizecell, exch%famList, size(exch%famList), .true.)
1024 
1025  if (myid == 0) then
1026  celldisps(0) = 0
1027  do iproc = 1, nproc
1028  celldisps(iproc) = celldisps(iproc - 1) + cellsizes(iproc)
1029  end do
1030  ncells = sum(cellsizes)
1031  allocate (conn(4, ncells))
1032  allocate (elemfam(ncells))
1033  end if
1034 
1035  ! We offset the conn array by nodeDisps(iProc) which
1036  ! automagically adjusts the connectivity to account for the
1037  ! number of nodes from different processors
1038 
1039  call mpi_gatherv(localconn + nodedisps(myid), &
1040  4 * size(localconn, 2), adflow_integer, conn, &
1041  cellsizes * 4, celldisps * 4, adflow_integer, 0, adflow_comm_world, ierr)
1042  call echk(ierr, __file__, __line__)
1043 
1044  call mpi_gatherv(localelemfam, &
1045  size(localelemfam), adflow_integer, elemfam, &
1046  cellsizes, celldisps, adflow_integer, 0, adflow_comm_world, ierr)
1047  call echk(ierr, __file__, __line__)
1048 
1049  ! Local values are finished
1050  deallocate (localconn, localelemfam)
1051  izone = 0
1052  rootproc2: if (myid == 0 .and. ncells > 0) then
1053 
1054  ! Need zero based for binary output.
1055  conn = conn - 1
1056 
1057  allocate (mask(ncells))
1058  datawritten = .false.
1059  do ifam = 1, size(exch%famList)
1060 
1061  ! Check if we have to write this one:
1062  faminclude2: if (faminlist(exch%famList(ifam), famlist)) then
1063 
1064  ! Create a temporary mask
1065  mask = 0
1066  ncellstowrite = 0
1067  do i = 1, ncells
1068  ! Check if this elem is to be included
1069  if (elemfam(i) == exch%famList(ifam)) then
1070  mask(i) = 1
1071  ncellstowrite = ncellstowrite + 1
1072  end if
1073  end do
1074 
1075  actualwrite2: if (ncellstowrite > 0) then
1076 
1077  ! Write Zone Data
1078  call writefloat(zonemarker)
1079 
1080  ! Data Type for each variable (1 for float, 2 for double)
1081  do i = 1, 3
1082  if (precisionsurfgrid == precisionsingle) then
1083  call writeinteger(1)
1084  else if (precisionsurfgrid == precisiondouble) then
1085  call writeinteger(2)
1086  end if
1087  end do
1088 
1089  do i = 1, nsolvar
1090  if (precisionsurfsol == precisionsingle) then
1091  call writeinteger(1)
1092  else if (precisionsurfsol == precisiondouble) then
1093  call writeinteger(2)
1094  end if
1095  end do
1096 
1097  call writeinteger(0) ! Has passive variables (0 for no)
1098 
1099  if (.not. datawritten) then
1100 
1101  ! Save the current 0-based zone so we know which to share with.
1102  lastzonesharing = izone
1103 
1104  call writeinteger(0) ! Has variable sharing (0 for no)
1105  call writeinteger(-1) ! Zone based zone number to share connectivity (-1 for no sharing)
1106 
1107  ! Min/Max Value for coordinates
1108  do j = 1, 3
1109  call writedouble(minval(vars(:, j)))
1110  call writedouble(maxval(vars(:, j)))
1111  end do
1112 
1113  ! Min/Max Value for solution variables
1114  do j = 1, nsolvar
1115  call writedouble(minval(vars(:, j + 9)))
1116  call writedouble(maxval(vars(:, j + 9)))
1117  end do
1118 
1119  ! Dump the coordinates
1120  do j = 1, 3
1121  if (precisionsurfgrid == precisionsingle) then
1122  call writefloats(vars(1:nnodes, j))
1123  else if (precisionsurfsol == precisiondouble) then
1124  call writedoubles(vars(1:nnodes, j))
1125  end if
1126  end do
1127 
1128  ! Dump the solution variables
1129  do j = 1, nsolvar
1130  if (precisionsurfsol == precisionsingle) then
1131  call writefloats(vars(1:nnodes, j + 9))
1132  else if (precisionsurfsol == precisiondouble) then
1133  call writedoubles(vars(1:nnodes, j + 9))
1134  end if
1135  end do
1136 
1137  else
1138  ! This will be sharing data from another zone.
1139  call writeinteger(1) ! Has variable sharing (0 for no)
1140 
1141  ! Write out the zone number for sharing the data.
1142  do j = 1, 3 + nsolvar
1143  call writeinteger(lastzonesharing)
1144  end do
1145 
1146  call writeinteger(-1) ! Zone based zone number to share connectivity (-1 for no sharing
1147  end if
1148 
1149  ! Dump the connectivity
1150  j = 0
1151  do i = 1, ncells
1152  ! Check if this elem is to be included
1153  if (mask(i) == 1) then
1154  call writeintegers(conn(:, i))
1155  j = j + 1
1156  end if
1157  end do
1158 
1159  izone = izone + 1
1160  end if actualwrite2
1161  end if faminclude2
1162  end do
1163  deallocate (mask)
1164  end if rootproc2
1165  if (myid == 0) then
1166  deallocate (conn, elemfam)
1167  end if
1168  deallocate (cellsizes, celldisps, nodesizes, nodedisps, vars)
1169  end do masterbcloop
1170 
1171  if (myid == 0) then
1172  close (fileid)
1173  end if
1174 
1175  end do spectralloop
1176 
1177  ! Restore the modified option
1178  surfwriteblank = blanksave
1179  if (myid == 0 .and. printiterations) then
1180  print "(a)", "# Tecplot surface file(s) written"
1181  print "(a)", "#"
1182  end if
1183 
1184  contains
1185 
1186  subroutine writefloat(adflowRealVal)
1187  use iso_fortran_env, only: real32
1188  implicit none
1189  real(kind=realtype) :: adflowrealval
1190  real(kind=real32) :: float
1191  float = adflowrealval
1192  write (fileid) float
1193  end subroutine writefloat
1194 
1195  subroutine writedouble(adflowRealVal)
1196  use iso_fortran_env, only: real64
1197  implicit none
1198  real(kind=realtype) :: adflowrealval
1199  real(kind=real64) :: dble
1200  dble = adflowrealval
1201  write (fileid) dble
1202  end subroutine writedouble
1203 
1204  subroutine writefloats(adflowRealVals)
1205  use iso_fortran_env, only: real32
1206  implicit none
1207  real(kind=realtype) :: adflowrealvals(:)
1208  real(kind=real32) :: floats(size(adflowrealvals))
1209  integer :: i
1210  floats = adflowrealvals
1211  write (fileid) floats
1212 
1213  end subroutine writefloats
1214 
1215  subroutine writedoubles(adflowRealVals)
1216  use iso_fortran_env, only: real64
1217  implicit none
1218  real(kind=realtype) :: adflowrealvals(:)
1219  real(kind=real64) :: dbles(size(adflowrealvals))
1220  integer :: i
1221  dbles = adflowrealvals
1222  write (fileid) dbles
1223 
1224  end subroutine writedoubles
1225 
1226  subroutine writeinteger(adflowIntegerVal)
1227  use iso_fortran_env, only: int32
1228  implicit none
1229  integer(kind=intType) :: adflowIntegerVal
1230  integer(kind=int32) :: int
1231 
1232  int = adflowintegerval
1233  write (fileid) int
1234  end subroutine writeinteger
1235 
1236  subroutine writeintegers(adflowIntegerVals)
1237  use iso_fortran_env, only: int32
1238  implicit none
1239  integer(kind=intType) :: adflowIntegerVals(:), i
1240  integer(kind=int32) :: ints(size(adflowintegervals))
1241  ints = adflowintegervals
1242  write (fileid) ints
1243 
1244  end subroutine writeintegers
1245 
1246  subroutine writestring(str)
1247 
1248  implicit none
1249 
1250  character(len=*) :: str
1251  integer(kind=intType) :: i
1252 
1253  do i = 1, len(str)
1254  write (fileid) iachar(str(i:i))
1255  end do
1256  write (fileid) 0
1257 
1258  end subroutine writestring
1259 
1260  end subroutine writetecplotsurfacefile
1261 
1263  use constants
1264  use communication
1265  use blockpointers
1266  use inputphysics
1267  use outputmod
1268  use inputtimespectral
1269  use surfacefamilies
1270  implicit none
1271 
1272  ! Working Variables
1273  integer(kind=intType) :: nPts, nCells, sps
1274 
1275  if (liftdistinitialized) then
1276  return
1277  else
1278 
1279  ! Data for the marching squares method: Which edges are cut by
1280  ! the contour. We haven't dealt with the case of an ambiguous
1281  ! contour which is case 6 and 11. Since most of the slices we are
1282  ! doing are planes this won't matter.
1283  mscon1(1, :) = (/0, 0, 0, 0, 0/)
1284  mscon1(2, :) = (/1, 4, 0, 0, 0/)
1285  mscon1(3, :) = (/1, 2, 0, 0, 0/)
1286  mscon1(4, :) = (/4, 2, 0, 0, 0/)
1287  mscon1(5, :) = (/2, 3, 0, 0, 0/)
1288  mscon1(6, :) = (/2, 3, 0, 0, 0/) ! Should be 2, 3, 1, 4
1289  mscon1(7, :) = (/1, 3, 0, 0, 0/)
1290  mscon1(8, :) = (/4, 3, 0, 0, 0/)
1291  mscon1(9, :) = (/4, 3, 0, 0, 0/)
1292  mscon1(10, :) = (/1, 3, 0, 0, 0/)
1293  mscon1(11, :) = (/2, 3, 0, 0, 0/) ! Should be 2, 3, 1, 4
1294  mscon1(12, :) = (/2, 3, 0, 0, 0/)
1295  mscon1(13, :) = (/4, 2, 0, 0, 0/)
1296  mscon1(14, :) = (/1, 2, 0, 0, 0/)
1297  mscon1(15, :) = (/1, 4, 0, 0, 0/)
1298  mscon1(16, :) = (/0, 0, 0, 0, 0/)
1299 
1300  mscon2(1, :) = (/1, 2/)
1301  mscon2(2, :) = (/2, 3/)
1302  mscon2(3, :) = (/3, 4/)
1303  mscon2(4, :) = (/4, 1/)
1304  liftdistinitialized = .true.
1305  end if
1306 
1307  end subroutine initializeliftdistributiondata
1308 
1309  subroutine computesurfaceoutputnodaldata(exch, zipper, includeTractions, nodalValues)
1310  !
1311  ! This purpose of this subroutine is to compute all nodal values
1312  !
1313  use constants
1314  use communication
1315  use inputphysics
1316  use blockpointers
1319  surfsolnames
1320  use surfaceutils
1321  use utils, only: setpointers, echk
1322  use sorting, only: faminlist
1323  use oversetdata, only: zippermesh
1324 #include <petsc/finclude/petsc.h>
1325  use petsc
1326  implicit none
1327  ! Input Param
1328  type(familyexchange) :: exch
1329  logical :: includeTractions
1330  real(kind=realtype), dimension(:, :), intent(inout) :: nodalvalues
1331  type(zippermesh) :: zipper
1332  ! Working params
1333  integer(kind=intType) :: i, j, ii, jj, kk, nn, mm, isol, ierr, npts, ncells
1334  integer(kind=intType) :: nfields, nsolvar, ibeg, iend, jbeg, jend, ind(4), ni, nj
1335  integer(kind=intType) :: sizeNode, sizeCell, iDim
1336  integer(kind=intType), dimension(3, 2) :: cellRangeCGNS
1337  character(len=maxCGNSNameLen), dimension(:), allocatable :: solNames
1338  real(kind=realtype), dimension(:), allocatable :: buffer
1339  real(kind=realtype), dimension(:), pointer :: weightptr, localptr
1340  real(kind=realtype), dimension(:, :), allocatable :: tmp
1341  logical :: viscousSubFace
1342 
1343  nodalvalues = zero
1344 
1345  call numberofsurfsolvariables(nsolvar)
1346  allocate (solnames(nsolvar))
1347  call surfsolnames(solnames)
1348 
1349  ! The tractions have a sepcial routine so call that first before we
1350  ! mess with the family information.
1351 
1352  if (includetractions) then
1353 
1354  ! And put the traction where it needs to go if necessary. Note that
1355  ! this computation will only work corectly when the wallExchange
1356  ! which include all wallgroups is used.
1357 
1358  call computenodaltractions(exch%sps)
1359  ii = 0
1360  do nn = 1, ndom
1361  call setpointers(nn, 1_inttype, exch%sps)
1362  do mm = 1, nbocos
1363  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
1364  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
1365 
1366  if (faminlist(bcdata(mm)%famID, exch%famList)) then
1367  do j = jbeg, jend
1368  do i = ibeg, iend
1369  ii = ii + 1
1370  nodalvalues(ii, 4) = bcdata(mm)%Tp(i, j, 1)
1371  nodalvalues(ii, 5) = bcdata(mm)%Tp(i, j, 2)
1372  nodalvalues(ii, 6) = bcdata(mm)%Tp(i, j, 3)
1373 
1374  nodalvalues(ii, 7) = bcdata(mm)%Tv(i, j, 1)
1375  nodalvalues(ii, 8) = bcdata(mm)%Tv(i, j, 2)
1376  nodalvalues(ii, 9) = bcdata(mm)%Tv(i, j, 3)
1377  end do
1378  end do
1379  end if
1380  end do
1381  end do
1382 
1383  ! Not quite dont yet with the nodal tractions; we need to send
1384  ! the nodal tractions that the duplices the *zipper* mesh needs
1385  ! to the root proc. We have a special scatter for this.
1386 
1387  if (zipper%allocated) then
1388 
1389  ! Loop over the 6 tractions
1390  do idim = 1, 6
1391 
1392  ! Copy the values into localPtr
1393  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
1394  call echk(ierr, __file__, __line__)
1395 
1396  do i = 1, exch%nNodes
1397  localptr(i) = nodalvalues(i, idim + 3)
1398  end do
1399 
1400  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
1401  call echk(ierr, __file__, __line__)
1402 
1403  ! Now use the *zipper* scatter
1404  call vecscatterbegin(zipper%scatter, exch%nodeValLocal, &
1405  zipper%localVal, insert_values, scatter_forward, ierr)
1406  call echk(ierr, __file__, __line__)
1407 
1408  call vecscatterend(zipper%scatter, exch%nodeValLocal, &
1409  zipper%localVal, insert_values, scatter_forward, ierr)
1410  call echk(ierr, __file__, __line__)
1411 
1412  ! Copy the zipper values out on the root proc
1413  if (myid == 0) then
1414  call vecgetarrayf90(zipper%localVal, localptr, ierr)
1415  call echk(ierr, __file__, __line__)
1416 
1417  do i = 1, size(localptr)
1418  nodalvalues(exch%nNodes + i, idim + 3) = localptr(i)
1419  end do
1420 
1421  call vecrestorearrayf90(zipper%localVal, localptr, ierr)
1422  call echk(ierr, __file__, __line__)
1423  end if
1424  end do
1425  end if
1426  end if
1427 
1428  ! Get the current set of surface points for the family we just set.
1429  call getsurfacesize(sizenode, sizecell, exch%famList, size(exch%famlist), .true.)
1430  allocate (tmp(3, sizenode))
1431  call getsurfacepoints(tmp, sizenode, exch%sps, exch%famList, size(exch%famList), .true.)
1432 
1433  do i = 1, sizenode
1434  nodalvalues(i, 1:3) = tmp(1:3, i)
1435  end do
1436  deallocate (tmp)
1437  ! For the remainder of the variables, use arithematic averaging.
1438  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
1439  call echk(ierr, __file__, __line__)
1440 
1441  localptr = zero
1442  ! ii is the index into the pointer array
1443  ii = 0
1444  do nn = 1, ndom
1445  call setpointers(nn, 1_inttype, exch%sps)
1446  do mm = 1, nbocos
1447  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
1448  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
1449  ni = iend - ibeg + 1
1450  nj = jend - jbeg + 1
1451  if (faminlist(bcdata(mm)%famID, exch%FamList)) then
1452  do j = 0, nj - 2
1453  do i = 0, ni - 2
1454 
1455  ! Scatter 1 to each node.
1456  ind(1) = ii + (j) * ni + i + 1
1457  ind(2) = ii + (j) * ni + i + 2
1458  ind(3) = ii + (j + 1) * ni + i + 2
1459  ind(4) = ii + (j + 1) * ni + i + 1
1460  do jj = 1, 4
1461  localptr(ind(jj)) = localptr(ind(jj)) + one
1462  end do
1463  end do
1464  end do
1465  ii = ii + ni * nj
1466  end if
1467  end do
1468  end do
1469 
1470  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
1471  call echk(ierr, __file__, __line__)
1472 
1473  ! Globalize the area
1474  call vecset(exch%sumGlobal, zero, ierr)
1475  call echk(ierr, __file__, __line__)
1476 
1477  call vecscatterbegin(exch%scatter, exch%nodeValLocal, &
1478  exch%sumGlobal, add_values, scatter_forward, ierr)
1479  call echk(ierr, __file__, __line__)
1480 
1481  call vecscatterend(exch%scatter, exch%nodeValLocal, &
1482  exch%sumGlobal, add_values, scatter_forward, ierr)
1483  call echk(ierr, __file__, __line__)
1484 
1485  ! Now compute the inverse of the weighting so that we can multiply
1486  ! instead of dividing.
1487 
1488  call vecgetarrayf90(exch%sumGlobal, localptr, ierr)
1489  call echk(ierr, __file__, __line__)
1490 
1491  localptr = one / localptr
1492 
1493  call vecrestorearrayf90(exch%sumGlobal, localptr, ierr)
1494  call echk(ierr, __file__, __line__)
1495 
1496  varloop: do isol = 1, nsolvar
1497 
1498  ! Extract the poitner to the local array
1499  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
1500  call echk(ierr, __file__, __line__)
1501 
1502  ! ii is the continous running element pointer
1503  ii = 0
1504  localptr = zero
1505  ! Do each variable separately.
1506  domainloop: do nn = 1, ndom
1507  call setpointers(nn, 1, exch%sps)
1508 
1509  bocoloop: do mm = 1, nbocos
1510  if (faminlist(bcdata(mm)%famID, exch%famList)) then
1511 
1512  ! storeSurfSolInBuffer needs to know if the subface is
1513  ! viscous or not.
1514  viscoussubface = .false.
1515  if (bctype(mm) == nswalladiabatic .or. &
1516  bctype(mm) == nswallisothermal) then
1517  viscoussubface = .true.
1518  end if
1519 
1520  ! Determine the cell range *in the original cgns
1521  ! ordering*. The reason for this is the storeSurfSolInBufer
1522  ! is normally used for writing CGNS and thus it is that
1523  ! ording that is important. However, that isn't too hard to
1524  ! deal with.
1525 
1526  jbeg = bcdata(mm)%jnBeg + 1
1527  jend = bcdata(mm)%jnEnd
1528  ibeg = bcdata(mm)%inBeg + 1
1529  iend = bcdata(mm)%inEnd
1530 
1531  ! Dummy value of 1 for the face values not set.
1532  cellrangecgns = 1
1533  select case (bcfaceid(mm))
1534  case (imin, imax)
1535  ! I range meaningless
1536  cellrangecgns(2, 1) = ibeg + jbegor - 1
1537  cellrangecgns(2, 2) = iend + jbegor - 1
1538 
1539  cellrangecgns(3, 1) = jbeg + kbegor - 1
1540  cellrangecgns(3, 2) = jend + kbegor - 1
1541 
1542  case (jmin, jmax)
1543  ! J range meaningless
1544  cellrangecgns(1, 1) = ibeg + ibegor - 1
1545  cellrangecgns(1, 2) = iend + ibegor - 1
1546 
1547  cellrangecgns(3, 1) = jbeg + kbegor - 1
1548  cellrangecgns(3, 2) = jend + kbegor - 1
1549 
1550  case (kmin, kmax)
1551  ! J range meaningless
1552  cellrangecgns(1, 1) = ibeg + ibegor - 1
1553  cellrangecgns(1, 2) = iend + ibegor - 1
1554 
1555  cellrangecgns(2, 1) = jbeg + jbegor - 1
1556  cellrangecgns(2, 2) = jend + jbegor - 1
1557  end select
1558 
1559  ! Allocate enough space for the 1D buffer
1560  allocate (buffer((iend - ibeg + 1) * (jend - jbeg + 1)))
1561  kk = 0
1562  call storesurfsolinbuffer(exch%sps, buffer, kk, nn, bcfaceid(mm), &
1563  cellrangecgns, solnames(isol), viscoussubface, .false.)
1564 
1565  ! Now since the storeSurfSol just put things in a flat
1566  ! array and are face based, here we take the 1D face data
1567  ! and scatter to the nodes.
1568 
1569  ibeg = bcdata(mm)%inBeg; iend = bcdata(mm)%inEnd
1570  jbeg = bcdata(mm)%jnBeg; jend = bcdata(mm)%jnEnd
1571  ni = iend - ibeg + 1
1572  nj = jend - jbeg + 1
1573  jj = 0
1574  do j = 0, nj - 2
1575  do i = 0, ni - 2
1576  jj = jj + 1
1577  ! Scatter value to each node
1578  ind(1) = ii + (j) * ni + i + 1
1579  ind(2) = ii + (j) * ni + i + 2
1580  ind(3) = ii + (j + 1) * ni + i + 2
1581  ind(4) = ii + (j + 1) * ni + i + 1
1582  do kk = 1, 4
1583  localptr(ind(kk)) = localptr(ind(kk)) + buffer(jj)
1584  end do
1585  end do
1586  end do
1587 
1588  ii = ii + ni * nj
1589  deallocate (buffer)
1590  end if
1591  end do bocoloop
1592  end do domainloop
1593 
1594  ! Return our pointer
1595  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
1596  call echk(ierr, __file__, __line__)
1597 
1598  ! Globalize the function value
1599  call vecset(exch%nodeValGlobal, zero, ierr)
1600  call echk(ierr, __file__, __line__)
1601 
1602  call vecscatterbegin(exch%scatter, exch%nodeValLocal, &
1603  exch%nodeValGlobal, add_values, scatter_forward, ierr)
1604  call echk(ierr, __file__, __line__)
1605 
1606  call vecscatterend(exch%scatter, exch%nodeValLocal, &
1607  exch%nodeValGlobal, add_values, scatter_forward, ierr)
1608  call echk(ierr, __file__, __line__)
1609 
1610  ! Now divide by the weighting sum. We can do this with a
1611  ! vecpointwisemult since we already divided by the weight.
1612  call vecpointwisemult(exch%nodeValGlobal, exch%nodeValGlobal, &
1613  exch%sumGlobal, ierr)
1614  call echk(ierr, __file__, __line__)
1615 
1616  ! Push back to the local values
1617  call vecscatterbegin(exch%scatter, exch%nodeValGlobal, &
1618  exch%nodeValLocal, insert_values, scatter_reverse, ierr)
1619  call echk(ierr, __file__, __line__)
1620 
1621  call vecscatterend(exch%scatter, exch%nodeValGlobal, &
1622  exch%nodeValLocal, insert_values, scatter_reverse, ierr)
1623  call echk(ierr, __file__, __line__)
1624 
1625  ! Copy the values into nodalValues
1626  call vecgetarrayf90(exch%nodeValLocal, localptr, ierr)
1627  call echk(ierr, __file__, __line__)
1628 
1629  do i = 1, size(localptr)
1630  nodalvalues(i, isol + 9) = localptr(i)
1631  end do
1632 
1633  call vecrestorearrayf90(exch%nodeValLocal, localptr, ierr)
1634  call echk(ierr, __file__, __line__)
1635 
1636  if (zipper%allocated) then
1637 
1638  ! Now use the *zipper* scatter
1639  call vecscatterbegin(zipper%scatter, exch%nodeValLocal, &
1640  zipper%localVal, insert_values, scatter_forward, ierr)
1641  call echk(ierr, __file__, __line__)
1642 
1643  call vecscatterend(zipper%scatter, exch%nodeValLocal, &
1644  zipper%localVal, insert_values, scatter_forward, ierr)
1645  call echk(ierr, __file__, __line__)
1646 
1647  ! Copy the zipper values out on the root proc
1648  if (myid == 0) then
1649  call vecgetarrayf90(zipper%localVal, localptr, ierr)
1650  call echk(ierr, __file__, __line__)
1651 
1652  do i = 1, size(localptr)
1653  nodalvalues(exch%nNodes + i, 9 + isol) = localptr(i)
1654  end do
1655 
1656  call vecrestorearrayf90(zipper%localVal, localptr, ierr)
1657  call echk(ierr, __file__, __line__)
1658  end if
1659  end if
1660  end do varloop
1661  deallocate (solnames)
1662 
1663  end subroutine computesurfaceoutputnodaldata
1664 
1665  subroutine createslice(pts, conn, elemFam, slc, pt, normal, dir_vec, use_dir, sliceName, famList)
1666  !
1667  ! This subroutine creates a slice on a plane defined by pt and
1668  ! and dir. It only uses the families specified in the famList.
1669  ! sps define which specral instance to use.
1670  !
1671  use constants
1673  use sorting, only: faminlist
1674  implicit none
1675 
1676  ! Input param
1677  real(kind=realtype), dimension(:, :), intent(in) :: pts
1678  integer(kind=intType), dimension(:, :), intent(in) :: conn
1679  integer(kind=intType), dimension(:), intent(in) :: elemFam
1680  type(slice), intent(inout) :: slc
1681  real(kind=realtype), dimension(3), intent(in) :: pt, dir_vec, normal
1682  logical, intent(in) :: use_dir
1683  character(len=*), intent(in) :: sliceName
1684  integer(kind=intType), dimension(:), intent(in) :: famList
1685 
1686  ! Working param
1687  integer(kind=intType) :: i, nMax, nUnique, oldInd, newInd
1688  integer(kind=intType) :: patchIndices(4), indexSquare, jj, kk, icon, iCoor, num1, num2
1689  real(kind=realtype) :: f(4), d, ovrdnom, tol, len_vec
1690  logical :: logic1, foundFam
1691  real(kind=realtype), dimension(:, :), pointer :: tmpweight, dummy, tmpnodes
1692  integer(kind=intType), dimension(:, :), pointer :: tmpInd
1693  integer(kind=intType), dimension(:), allocatable :: link
1694  real(kind=realtype), dimension(:), allocatable :: fc
1695  real(kind=realtype), dimension(3) :: elemc, vec
1696 
1697  ! Allocate the family list this slice is to use:
1698  slc%sliceName = slicename
1699  ! Set the info for the slice:
1700  slc%pt = pt
1701  slc%normal = normal
1702  slc%dir_vec = dir_vec
1703  slc%use_dir = use_dir
1704  slc%nNodes = 0
1705  allocate (slc%famList(size(famlist)))
1706  slc%famList = famlist
1707  ! First step is to compute the 'function' value that will be used
1708  ! for the contour.
1709 
1710  ! Equation of plane: ax + by + cz + d = 0
1711  d = -pt(1) * normal(1) - pt(2) * normal(2) - pt(3) * normal(3)
1712  ovrdnom = one / sqrt(normal(1)**2 + normal(2)**2 + normal(3)**2)
1713 
1714  ! Compute the distance function on all possible surfaces on this
1715  ! processor.
1716  allocate (fc(size(pts, 2)))
1717  do i = 1, size(pts, 2)
1718  ! Now compute the signed distance
1719  fc(i) = (normal(1) * pts(1, i) + normal(2) * pts(2, i) + normal(3) * pts(3, i) + d) * ovrdnom
1720  end do
1721 
1722  ! Estimate size of slice by the sqrt of the number of nodes in the
1723  ! mesh. Exact size doesn't matter as we realloc if necessary.
1724  nmax = int(sqrt(dble(size(pts, 2))))
1725  allocate (tmpweight(2, nmax), tmpind(2, nmax), tmpnodes(3, nmax))
1726 
1727  icoor = 0
1728  oldind = 1
1729 
1730  ! Loop over all elements
1731  elemloop: do i = 1, size(conn, 2)
1732 
1733  faminclude: if (faminlist(elemfam(i), famlist)) then
1734 
1735  ! zero out the centroid
1736  elemc = zero
1737 
1738  ! Extract the indices and function values at each corner
1739  do jj = 1, 4
1740  patchindices(jj) = conn(jj, i)
1741  f(jj) = fc(patchindices(jj))
1742  elemc = elemc + 0.25_realtype * pts(:, patchindices(jj))
1743  end do
1744 
1745  ! check if we are using the direction to pick sliced elements
1746  if (use_dir) then
1747  ! check if the centroid of this element is in the correct direction
1748  vec = elemc - pt
1749  ! normalize vector
1750  len_vec = sqrt(vec(1) * vec(1) + vec(2) * vec(2) + vec(3) * vec(3))
1751  vec = vec / len_vec
1752  if ((vec(1) * dir_vec(1) + vec(2) * dir_vec(2) + vec(3) * dir_vec(3)) .lt. zero) then
1753  ! we reject this element; just set all signed distances to 1.0
1754  do jj = 1, 4
1755  f(jj) = one
1756  end do
1757  end if
1758  end if
1759 
1760  ! Based on the values at each corner, determine which
1761  ! type contour we have
1762  indexsquare = 1
1763 
1764  if (f(1) .lt. zero) indexsquare = indexsquare + 1
1765  if (f(2) .lt. zero) indexsquare = indexsquare + 2
1766  if (f(3) .lt. zero) indexsquare = indexsquare + 4
1767  if (f(4) .lt. zero) indexsquare = indexsquare + 8
1768 
1769  logic1 = .true.
1770 
1771  kk = 1
1772  do while (logic1)
1773  ! This is the edge
1774  icon = mscon1(indexsquare, kk)
1775 
1776  if (icon == 0) then
1777  logic1 = .false.
1778  else
1779 
1780  ! num1, num2 are node indices
1781  num1 = mscon2(icon, 1)
1782  num2 = mscon2(icon, 2)
1783 
1784  icoor = icoor + 1
1785  if (icoor > nmax) then
1786  ! Need to reallocate the arrays. Make it double the size
1787  call reallocatereal2(tmpweight, 2, 2 * nmax, 2, nmax, .true.)
1788  call reallocatereal2(tmpnodes, 3, 2 * nmax, 3, nmax, .true.)
1789  call reallocateinteger2(tmpind, 2, 2 * nmax, 2, nmax, .true.)
1790  nmax = nmax * 2
1791  end if
1792 
1793  ! Weight factors
1794  tmpweight(2, icoor) = (zero - f(num1)) / (f(num2) - f(num1))
1795  tmpweight(1, icoor) = one - tmpweight(2, icoor)
1796 
1797  ! Store the weight factors
1798  tmpind(:, icoor) = (/patchindices(num1), patchindices(num2)/)
1799 
1800  ! Store the physical nodes so we know how to reduce
1801  tmpnodes(:, icoor) = &
1802  tmpweight(1, icoor) * pts(:, tmpind(1, icoor)) + &
1803  tmpweight(2, icoor) * pts(:, tmpind(2, icoor))
1804  kk = kk + 1
1805  end if
1806  end do
1807  end if faminclude
1808  end do elemloop
1809 
1810  ! To save disk space, we can compact out the doubly defined nodes that
1811  ! were created during the slicing process. Then we can allocate the
1812  ! final weight array and index array to be the exact correct
1813  ! length
1814 
1815  allocate (dummy(3, icoor), link(icoor))
1816  tol = 1e-12
1817  call pointreduce(tmpnodes, icoor, tol, dummy, link, nunique)
1818  allocate (slc%w(2, nunique), slc%ind(2, nunique), slc%conn(2, icoor / 2))
1819  slc%nNodes = nunique
1820 
1821  ! Modify the data accordingly
1822  do i = 1, icoor
1823  slc%w(:, link(i)) = tmpweight(:, i)
1824  slc%ind(:, link(i)) = tmpind(:, i)
1825  end do
1826 
1827  ! The connectivity is actually link reshaped since the original conn
1828  ! would have been (1,2), (3,4), (5,6) etc.
1829  do i = 1, icoor / 2
1830  slc%conn(1, i) = link(2 * i - 1)
1831  slc%conn(2, i) = link(2 * i)
1832  end do
1833 
1834  deallocate (tmpnodes, tmpweight, tmpind, dummy, link, fc)
1835  end subroutine createslice
1836 
1837  subroutine destroyslice(slc)
1838  !
1839  ! This subroutine destroys a slice created by the createSlice
1840  ! routine
1841  !
1842  use constants
1843  implicit none
1844 
1845  ! Input param
1846  type(slice), intent(inout) :: slc
1847 
1848  ! Deallocate weights and indices if they are already allocated
1849  if (allocated(slc%w)) then
1850  deallocate (slc%w)
1851  end if
1852 
1853  if (allocated(slc%ind)) then
1854  deallocate (slc%ind)
1855  end if
1856 
1857  if (allocated(slc%conn)) then
1858  deallocate (slc%conn)
1859  end if
1860 
1861  if (allocated(slc%famList)) then
1862  deallocate (slc%famList)
1863  end if
1864 
1865  if (allocated(slc%vars)) then
1866  deallocate (slc%vars)
1867  end if
1868 
1869  end subroutine destroyslice
1870 
1871  subroutine integrateslice(lSlc, gSlc, nodalValues, nFields, doConnectivity)
1872  !
1873  ! This subroutine integrates the forces on slice slc and computes
1874  ! the integrated quantities for lift, drag, cl and cd with
1875  ! contributions from both the pressure and visoucs forces.
1876  ! It optionally interpolates solution variables as well.
1877  !
1878  use constants
1879  use inputphysics
1880  use flowvarrefstate
1881  use communication
1882  use utils, only: echk
1883  implicit none
1884 
1885  ! Input Variables
1886  type(slice) :: lSlc, gSlc
1887  integer(kind=intType), intent(in) :: nFields
1888  logical, intent(in) :: doConnectivity
1889  real(kind=realtype), dimension(:, :), intent(in) :: nodalvalues
1890  ! Working variables
1891  integer(kind=intType) :: i, j, i1, i2
1892  real(kind=realtype), dimension(3) :: x1, x2, pt1, pt2, vt1, vt2, pf, vf, pf_elem, vf_elem
1893  real(kind=realtype) :: len, dmax, dmin, dist, fact, m(3, 3), tmp(6)
1894  real(kind=realtype) :: r(3), r_new(3), hyp, te(3), le(3), theta, w1, w2
1895  integer(kind=intType) :: bestPair(2), normal_ind, iProc, ierr, iSize
1896  real(kind=realtype), dimension(:, :), allocatable :: tempcoords
1897  real(kind=realtype), dimension(:, :), allocatable :: localvals
1898  integer(kind=intType), dimension(:), allocatable :: sliceNodeSizes, sliceCellSizes
1899  integer(kind=intType), dimension(:), allocatable :: nodeDisps, cellDisps
1900  real(kind=realtype), dimension(3) :: refpoint, pm, vm
1901  real(kind=realtype) :: xc, yc, zc
1902  ! Need another tmp to reduce the fx, fy and fz values
1903  real(kind=realtype) :: tmp_(3)
1904 
1905  ! Copy the info related to slice
1906  gslc%sliceName = trim(lslc%sliceName)
1907  gslc%pt = lslc%pt
1908  gslc%normal = lslc%normal
1909 
1910  ! Back out what is the main index of the slice, x, y or z based on
1911  ! the direction. Not the best approach, but that's ok
1912  ! TODO this can be improved since we are now doing arbitrary slice directions
1913  normal_ind = maxloc(abs(gslc%normal), 1)
1914 
1915  pf = zero
1916  vf = zero
1917  pm = zero ! pressure moment
1918  vm = zero ! viscous moment
1919  isize = 3 + 6 + nfields
1920  allocate (localvals(isize, lslc%nNodes))
1921 
1922  ! Interpolate the required values
1923  do i = 1, lslc%nNodes
1924  i1 = lslc%ind(1, i)
1925  i2 = lslc%ind(2, i)
1926  w1 = lslc%w(1, i)
1927  w2 = lslc%w(2, i)
1928  localvals(1:isize, i) = w1 * nodalvalues(i1, 1:isize) + w2 * nodalvalues(i2, 1:isize)
1929  end do
1930 
1931  ! first communicate the coordinates and connectivities to get the chord and the LE/TE
1932  ! we need this information to get the quarter-chord location,
1933  ! which is needed for the moment computations
1934 
1935  ! Gather up the number of nodes to be set to the root proc:
1936  allocate (slicenodesizes(nproc), nodedisps(0:nproc))
1937  slicenodesizes = 0
1938  nodedisps = 0
1939  call mpi_allgather(lslc%nNodes, 1, adflow_integer, slicenodesizes, 1, adflow_integer, &
1940  adflow_comm_world, ierr)
1941  call echk(ierr, __file__, __line__)
1942  nodedisps(0) = 0
1943  do iproc = 1, nproc
1944  nodedisps(iproc) = nodedisps(iproc - 1) + slicenodesizes(iproc) * isize
1945  end do
1946 
1947  if (myid == 0) then
1948  gslc%nNodes = sum(slicenodesizes)
1949  allocate (gslc%vars(isize, gslc%nNodes))
1950  end if
1951 
1952  call mpi_gatherv(localvals, isize * lslc%nNodes, adflow_real, gslc%vars, slicenodesizes * isize, &
1953  nodedisps, adflow_real, 0, adflow_comm_world, ierr)
1954  call echk(ierr, __file__, __line__)
1955 
1956  ! We may also need to gather the connectivity if the slice will have
1957  ! to be written to a file.
1958  if (doconnectivity) then
1959  i = size(lslc%conn, 2)
1960  allocate (celldisps(0:nproc), slicecellsizes(nproc))
1961 
1962  call mpi_gather(i, 1, adflow_integer, slicecellsizes, 1, adflow_integer, &
1963  0, adflow_comm_world, ierr)
1964  call echk(ierr, __file__, __line__)
1965 
1966  if (myid == 0) then
1967  celldisps(0) = 0
1968  do iproc = 1, nproc
1969  celldisps(iproc) = celldisps(iproc - 1) + slicecellsizes(iproc) * 2
1970  end do
1971  allocate (gslc%conn(2, sum(slicecellsizes)))
1972  end if
1973 
1974  ! We offset the conn array by nodeDisps(iProc) which
1975  ! automagically adjust the connectivity to account for the
1976  ! number of nodes from different processors
1977 
1978  call mpi_gatherv(lslc%conn + nodedisps(myid) / isize, 2 * size(lslc%conn, 2), adflow_integer, gslc%conn, &
1979  slicecellsizes * 2, celldisps, adflow_integer, 0, adflow_comm_world, ierr)
1980  call echk(ierr, __file__, __line__)
1981 
1982  ! Not quite finished yet since we will have gathered nodes from
1983  ! multiple procs we have to adjust the connectivity
1984 
1985  deallocate (slicecellsizes, celldisps)
1986  end if
1987 
1988  deallocate (slicenodesizes, nodedisps)
1989  ! done communicating the geometry info
1990 
1991  ! process the geometry on the root processor to figure out the LE, TE, and the chord
1992  if (myid == 0) then
1993  ! TODO the comments below can be used for a _slightly_ more accurate LE/TE detection.
1994  ! this does not really show any difference other than the twist computation,
1995  ! but depending on the application, getting the twist distribution from ADflow data might be critical.
1996  ! so we leave the comments below for a better implementation.
1997  ! For now, we just work with the 2 max-distance nodes
1998 
1999  ! begin template:
2000  ! loop over the elements and find the TE bends
2001  ! if we have 2 TE bends, then take the mid-TE point as the TE
2002  ! find the largest distance node
2003  ! if we dont have 2 bends, just get the max distance between any 2 nodes, thats our chord line
2004  ! end template.
2005 
2006  ! Compute the chord as the max length between any two nodes...this
2007  ! is n^2, so should be changed in the future
2008  dmax = zero
2009  bestpair = (/1, 1/)
2010  do i = 1, size(gslc%vars, 2)
2011  ! extract node:
2012  x1 = gslc%vars(1:3, i)
2013 
2014  do j = i + 1, size(gslc%vars, 2)
2015  ! extract node:
2016  x2 = gslc%vars(1:3, j)
2017 
2018  dist = sqrt((x1(1) - x2(1))**2 + (x1(2) - x2(2))**2 + (x1(3) - x2(3))**2)
2019 
2020  if (dist > dmax) then
2021  dmax = dist
2022  bestpair = (/i, j/)
2023  end if
2024  end do
2025  end do
2026 
2027  ! Set chord, protected from zero
2028  gslc%chord = max(dmax, 1e-12)
2029 
2030  ! figure out which node is the LE and the TE
2031  if (gslc%vars(1, bestpair(1)) < gslc%vars(1, bestpair(2))) then
2032  ! first entry in the "bestPair" is the LE node
2033  x1 = gslc%vars(1:3, bestpair(1))
2034  x2 = gslc%vars(1:3, bestpair(2))
2035  else
2036  ! second entry in the "bestPair" is the LE node
2037  x1 = gslc%vars(1:3, bestpair(2))
2038  x2 = gslc%vars(1:3, bestpair(1))
2039  end if
2040 
2041  ! using the LE and TE coordinates, compute the quarter chord point
2042  ! we will use this as reference for the moment computation
2043  refpoint(1) = 0.75_realtype * x1(1) + 0.25_realtype * x2(1)
2044  refpoint(2) = 0.75_realtype * x1(2) + 0.25_realtype * x2(2)
2045  refpoint(3) = 0.75_realtype * x1(3) + 0.25_realtype * x2(3)
2046  end if
2047 
2048  ! communicate the reference point coordinates across processors.
2049  call mpi_bcast(refpoint, 3, adflow_real, 0, adflow_comm_world, ierr)
2050  call echk(ierr, __file__, __line__)
2051 
2052  ! now we are done with the global geometric operations. go back to integrating the slices locally on each proc
2053 
2054  ! loop over elements
2055  do i = 1, size(lslc%conn, 2)
2056  ! Compute all the local variables we need.
2057 
2058  ! extract nodes:
2059  i1 = lslc%conn(1, i)
2060  i2 = lslc%conn(2, i)
2061 
2062  x1 = localvals(1:3, i1)
2063  x2 = localvals(1:3, i2)
2064 
2065  ! extract pressure tractions
2066  pt1 = localvals(4:6, i1)
2067  pt2 = localvals(4:6, i2)
2068 
2069  ! extract viscous tractions
2070  vt1 = localvals(7:9, i1)
2071  vt2 = localvals(7:9, i2)
2072 
2073  ! Length of this segment
2074  len = sqrt((x1(1) - x2(1))**2 + (x1(2) - x2(2))**2 + (x1(3) - x2(3))**2)
2075 
2076  ! compute the pressure and viscous forces on this element
2077  pf_elem = half * (pt1 + pt2) * len
2078  vf_elem = half * (vt1 + vt2) * len
2079 
2080  ! Integrate the pressure and viscous forces separately
2081  pf = pf + pf_elem
2082  vf = vf + vf_elem
2083 
2084  ! compute moment about the global reference locations
2085  xc = half * (x1(1) + x2(1)) - refpoint(1)
2086  yc = half * (x1(2) + x2(2)) - refpoint(2)
2087  zc = half * (x1(3) + x2(3)) - refpoint(3)
2088 
2089  ! pressure components
2090  pm(1) = pm(1) + yc * pf_elem(3) - zc * pf_elem(2)
2091  pm(2) = pm(2) + zc * pf_elem(1) - xc * pf_elem(3)
2092  pm(3) = pm(3) + xc * pf_elem(2) - yc * pf_elem(1)
2093 
2094  ! viscous components
2095  vm(1) = vm(1) + yc * vf_elem(3) - zc * vf_elem(2)
2096  vm(2) = vm(2) + zc * vf_elem(1) - xc * vf_elem(3)
2097  vm(3) = vm(3) + xc * vf_elem(2) - yc * vf_elem(1)
2098 
2099  end do
2100 
2101  ! That is as far as we can go in parallel. We now have to gather up
2102  ! pL, pD, pM, vL, vD, vM as well as the nodes to the root proc.
2103 
2104  ! Set the local values we can in the slice
2105  lslc%pL = liftdirection(1) * pf(1) + liftdirection(2) * pf(2) + liftdirection(3) * pf(3)
2106  lslc%pD = dragdirection(1) * pf(1) + dragdirection(2) * pf(2) + dragdirection(3) * pf(3)
2107  lslc%vL = liftdirection(1) * vf(1) + liftdirection(2) * vf(2) + liftdirection(3) * vf(3)
2108  lslc%vD = dragdirection(1) * vf(1) + dragdirection(2) * vf(2) + dragdirection(3) * vf(3)
2109 
2110  ! the moments are a bit different than lift and drag. we keep the 3 components of the moment
2111  ! in pM in this routine but then the slc%pM variable only has the component of the moment
2112  ! we are interested in. we use the direction index to get this value out and set it in the slice
2113  lslc%pM = pm(normal_ind)
2114  lslc%vM = vm(normal_ind)
2115 
2116  ! save the x,y,z-forces into the appropriate real-container
2117  ! from their type(slice) definition (see the top of this file)
2118  lslc%fx = pf(1) + vf(1)
2119  lslc%fy = pf(2) + vf(2)
2120  lslc%fz = pf(3) + vf(3)
2121 
2122  ! Reduce the lift/drag values
2123  call mpi_reduce((/lslc%pL, lslc%pD, lslc%pM, lslc%vL, lslc%vD, lslc%vM/), tmp, 6, adflow_real, mpi_sum, &
2124  0, adflow_comm_world, ierr)
2125  call echk(ierr, __file__, __line__)
2126 
2127  ! Reduce the fx, fy and fz using THE OTHER tmp, i.e. tmp_
2128  call mpi_reduce((/lslc%fx, lslc%fy, lslc%fz/), tmp_, 3, adflow_real, mpi_sum, &
2129  0, adflow_comm_world, ierr)
2130  call echk(ierr, __file__, __line__)
2131 
2132  if (myid == 0) then
2133  gslc%pL = tmp(1)
2134  gslc%pD = tmp(2)
2135  gslc%pM = tmp(3)
2136  gslc%vL = tmp(4)
2137  gslc%vD = tmp(5)
2138  gslc%vM = tmp(6)
2139 
2140  ! we now also must remember to store the MPIreduced fx, fy and fz in the
2141  ! global slice type:
2142  gslc%fx = tmp_(1)
2143  gslc%fy = tmp_(2)
2144  gslc%fz = tmp_(3)
2145 
2146  ! Compute factor to get coefficient
2147  fact = two / (gammainf * pinf * machcoef * machcoef * pref)
2148 
2149  ! Take dmax as chord and compute coefficients
2150  gslc%CLp = gslc%pL / gslc%chord * fact
2151  gslc%CDp = gslc%pD / gslc%chord * fact
2152  gslc%CLv = gslc%vL / gslc%chord * fact
2153  gslc%CDv = gslc%vD / gslc%chord * fact
2154 
2155  ! Moment factor has an extra lengthRef
2156  fact = fact / (lengthref * lref)
2157 
2158  ! moments
2159  gslc%CMp = gslc%pM / gslc%chord * fact
2160  gslc%CMv = gslc%vM / gslc%chord * fact
2161 
2162  ! Default values
2163  gslc%twist = zero
2164  gslc%thickness = zero
2165 
2166  if (gslc%nNodes == 0) then
2167  return
2168  end if
2169 
2170  ! Lastly we need the twist and the twist and the thickness
2171  i1 = bestpair(1)
2172  i2 = bestpair(2)
2173  x1 = gslc%vars(1:3, i1)
2174  x2 = gslc%vars(1:3, i2)
2175 
2176  if (x1(1) > x2(1)) then
2177  te = x1
2178  le = x2
2179  else
2180  te = x2
2181  le = x1
2182  end if
2183 
2184  ! Save the leading and trailing edges so we can do scaled output
2185  ! later
2186  gslc%le = le
2187  gslc%te = te
2188 
2189  ! Finally we need to get the thickness. For this, compute temporary
2190  ! section nodes and rotate them by the twist values we just computed
2191  ! and take the max and min
2192 
2193  ! Length of hyptoneuse is the same
2194  hyp = sqrt((x1(1) - x2(1))**2 + (x1(2) - x2(2))**2 + (x1(3) - x2(3))**2)
2195 
2196  if (normal_ind == 1) then
2197  ! Xslice...we don't how what to do here..could be y or z. Don't
2198  ! do anything.
2199  gslc%twist = zero
2200  else if (normal_ind == 2) then
2201  ! Yslice
2202  theta = asin((le(3) - te(3)) / hyp)
2203  gslc%twist = theta * 180.0 / pi
2204  else
2205  ! Zslice
2206  theta = asin((le(2) - te(2)) / hyp)
2207  gslc%twist = theta * 180.0 / pi
2208  end if
2209 
2210  if (normal_ind == 1) then
2211  m(1, 1) = one; m(1, 2) = zero; m(1, 3) = zero;
2212  m(2, 1) = zero; m(2, 2) = one; m(2, 3) = zero;
2213  m(3, 1) = zero; m(3, 2) = zero; m(3, 3) = one;
2214  else if (normal_ind == 2) then
2215  ! Y-rotation matrix
2216  m(1, 1) = cos(-theta); m(1, 2) = zero; m(1, 3) = sin(-theta);
2217  m(2, 1) = zero; m(2, 2) = one; m(2, 3) = zero;
2218  m(3, 1) = -sin(-theta); m(3, 2) = zero; m(3, 3) = cos(-theta);
2219  else
2220  ! Z rotation Matrix
2221  m(1, 1) = cos(theta); m(1, 2) = -sin(theta); m(1, 3) = zero;
2222  m(2, 1) = sin(theta); m(2, 2) = cos(theta); m(2, 3) = zero;
2223  m(3, 1) = zero; m(3, 2) = zero; m(3, 3) = one;
2224  end if
2225 
2226  allocate (tempcoords(3, size(gslc%vars, 2)))
2227  do i = 1, size(gslc%vars, 2)
2228  ! extract node:
2229  r = gslc%vars(1:3, i) - te
2230  r_new = matmul(m, r)
2231  tempcoords(:, i) = r_new + te
2232  end do
2233 
2234  ! Now get the max and the min and divide by the chord for t/c
2235  if (normal_ind == 1) then
2236  gslc%thickness = 0 ! Again, don't know what to do here
2237  else if (normal_ind == 2) then
2238  dmax = maxval(tempcoords(3, :))
2239  dmin = minval(tempcoords(3, :))
2240  gslc%thickness = (dmax - dmin) / hyp
2241  else if (normal_ind == 3) then
2242  dmax = maxval(tempcoords(2, :))
2243  dmin = minval(tempcoords(2, :))
2244  gslc%thickness = (dmax - dmin) / hyp
2245  end if
2246  deallocate (tempcoords)
2247  end if
2248  end subroutine integrateslice
2249 
2250  subroutine writeslice(slc, fileID, nFields)
2251  ! Write the data in slice 'slc' to openfile ID fileID
2252  use constants
2253  use inputio
2254  use commonformats, only: int5
2255  implicit none
2256 
2257  ! Input Parameters
2258  type(slice), intent(in) :: slc
2259  integer(kind=intType), intent(in) :: fileid, nfields
2260 
2261  ! Working Variables
2262  integer(kind=intType) :: i, j
2263  real(kind=realtype) :: tmp, tx, ty, tz
2264 
2265  write (fileid, "(a,a,a)") "Zone T= """, trim(slc%sliceName), """"
2266 
2267  ! IF we have nodes actually write:
2268  if (slc%nNodes > 0) then
2269  write (fileid, *) "Nodes = ", slc%nNodes, " Elements= ", size(slc%conn, 2), " ZONETYPE=FELINESEG"
2270  write (fileid, *) "DATAPACKING=POINT"
2271 
2272  do i = 1, slc%nNodes
2273  ! Write the coordinates
2274  do j = 1, 3
2275  write (fileid, sci6, advance='no') slc%vars(j, i)
2276  end do
2277 
2278  ! Write the scaled coordiantes with the LE at (0,0,0)
2279  do j = 1, 3
2280  tmp = slc%vars(j, i)
2281  write (fileid, sci6, advance='no') (tmp - slc%le(j)) / slc%chord
2282  end do
2283 
2284  ! Write field data. Starts at 9 (after 3 coordindates and the 6 tractions)
2285  do j = 1, nfields
2286  write (fileid, sci6, advance='no') slc%vars(9 + j, i)
2287  end do
2288 
2289  write (fileid, "(1x)")
2290  end do
2291 
2292  do i = 1, size(slc%conn, 2)
2293  write (fileid, int5) slc%conn(1, i), slc%conn(2, i)
2294  end do
2295  else ! Write dummy data so the number of zones are the same
2296 
2297  write (fileid, *) "Nodes = ", 2, " Elements= ", 1, " ZONETYPE=FELINESEG"
2298  write (fileid, *) "DATAPACKING=POINT"
2299  do i = 1, 2
2300  do j = 1, 6
2301  write (fileid, sci6, advance='no') zero
2302  end do
2303 
2304  do j = 1, nfields
2305  write (fileid, sci6, advance='no') zero
2306  end do
2307 
2308  write (fileid, "(1x)")
2309  end do
2310  write (fileid, int5) 1, 2
2311  end if
2312  end subroutine writeslice
2313 
2314 end module tecplotio
subroutine computenodaltractions(sps)
Definition: getForces.F90:558
Definition: BCData.F90:1
real(kind=realtype), dimension(:, :, :), pointer xx
Definition: BCPointers.F90:16
integer(kind=inttype) kbegor
integer(kind=inttype), dimension(:), pointer bcfaceid
integer(kind=inttype) ibegor
integer(kind=inttype) nbocos
integer(kind=inttype) jbegor
integer(kind=inttype), dimension(:), pointer bctype
character(len=maxcgnsnamelen), parameter cgnscoorz
Definition: cgnsNames.f90:20
character(len=maxcgnsnamelen), parameter cgnscoory
Definition: cgnsNames.f90:18
character(len=maxcgnsnamelen), parameter cgnscoorx
Definition: cgnsNames.f90:16
character(len=maxstringlen) int5
integer adflow_comm_world
real(kind=realtype), parameter zero
Definition: constants.F90:71
integer(kind=inttype), parameter imax
Definition: constants.F90:293
integer(kind=inttype), parameter precisiondouble
Definition: constants.F90:184
integer(kind=inttype), parameter kmin
Definition: constants.F90:296
real(kind=realtype), parameter four
Definition: constants.F90:75
real(kind=realtype), parameter pi
Definition: constants.F90:22
integer(kind=inttype), parameter jmax
Definition: constants.F90:295
real(kind=realtype) datasectionmarker
Definition: constants.F90:321
integer(kind=inttype), parameter nfamexchange
Definition: constants.F90:317
integer(kind=inttype), parameter nswalladiabatic
Definition: constants.F90:260
integer(kind=inttype), parameter timespectral
Definition: constants.F90:115
integer(kind=inttype), parameter precisionsingle
Definition: constants.F90:184
real(kind=realtype), parameter one
Definition: constants.F90:72
integer, parameter maxstringlen
Definition: constants.F90:16
real(kind=realtype), parameter half
Definition: constants.F90:80
integer, parameter maxcgnsnamelen
Definition: constants.F90:17
integer(kind=inttype), parameter imin
Definition: constants.F90:292
real(kind=realtype), parameter two
Definition: constants.F90:73
real(kind=realtype) zonemarker
Definition: constants.F90:320
integer(kind=inttype), parameter nswallisothermal
Definition: constants.F90:261
integer(kind=inttype), parameter ibcgroupwalls
Definition: constants.F90:309
integer(kind=inttype), parameter kmax
Definition: constants.F90:297
integer(kind=inttype), parameter jmin
Definition: constants.F90:294
logical surfwriteblank
Definition: extraOutput.f90:22
real(kind=realtype) gammainf
real(kind=realtype) pinf
real(kind=realtype) lref
real(kind=realtype) pref
integer(kind=inttype) precisionsurfsol
Definition: inputParam.F90:157
integer(kind=inttype) precisionsurfgrid
Definition: inputParam.F90:157
logical printiterations
Definition: inputParam.F90:288
integer(kind=inttype) equationmode
Definition: inputParam.F90:583
real(kind=realtype), dimension(3) dragdirection
Definition: inputParam.F90:601
real(kind=realtype) lengthref
Definition: inputParam.F90:598
real(kind=realtype) machcoef
Definition: inputParam.F90:593
real(kind=realtype), dimension(3) liftdirection
Definition: inputParam.F90:600
integer(kind=inttype) ntimeintervalsspectral
Definition: inputParam.F90:645
subroutine surfsolnames(solNames)
Definition: outputMod.F90:577
subroutine numberofsurfsolvariables(nSolVar)
Definition: outputMod.F90:129
subroutine storesurfsolinbuffer(sps, buffer, nn, blockID, faceID, cellRange, solName, viscousSubface, useRindLayer, iBeg, iEnd, jBeg, jEnd)
Definition: outputMod.F90:1399
type(zippermesh), dimension(nfamexchange), target zippermeshes
Definition: overset.F90:376
logical function faminlist(famID, famList)
Definition: sorting.F90:7
type(bcgrouptype), dimension(nfamexchange) bcfamgroups
character(len=maxcgnsnamelen), dimension(:), allocatable famnames
type(familyexchange), dimension(:, :), allocatable, target bcfamexchange
subroutine getsurfacesize(sizeNode, sizeCell, famList, n, includeZipper)
Definition: surfaceUtils.F90:6
subroutine getsurfacefamily(elemFam, ncell, famList, nFamList, includeZipper)
subroutine getsurfacepoints(points, npts, sps_in, famList, nFamList, includeZipper)
subroutine getsurfaceconnectivity(conn, cgnsBlockID, ncell, famList, nFamList, includeZipper)
subroutine addparaslice(sliceName, pt, normal, dir_vec, use_dir, famList, n)
Definition: tecplotIO.F90:78
subroutine integrateslice(lSlc, gSlc, nodalValues, nFields, doConnectivity)
Definition: tecplotIO.F90:1872
integer(kind=inttype), parameter nliftdistvar
Definition: tecplotIO.F90:74
integer(kind=inttype), parameter nslicemax
Definition: tecplotIO.F90:62
type(slice), dimension(:, :), allocatable paraslices
Definition: tecplotIO.F90:65
integer(kind=inttype) nabsslices
Definition: tecplotIO.F90:64
character(len=maxstringlen) sci6
Definition: tecplotIO.F90:7
type(liftdist), dimension(nliftdistmax), target liftdists
Definition: tecplotIO.F90:70
subroutine writeliftdistributions(sps, fileID, nodalValues)
Definition: tecplotIO.F90:488
integer(kind=inttype) nliftdists
Definition: tecplotIO.F90:69
subroutine writeliftdistributionfile(fileName, nodalValues)
Definition: tecplotIO.F90:418
subroutine addliftdistribution(nSegments, normal, normal_ind, distName, famList, n)
Definition: tecplotIO.F90:187
subroutine computesurfaceoutputnodaldata(exch, zipper, includeTractions, nodalValues)
Definition: tecplotIO.F90:1310
logical liftdistinitialized
Definition: tecplotIO.F90:58
subroutine initializeliftdistributiondata
Definition: tecplotIO.F90:1263
integer(kind=inttype), dimension(4, 2) mscon2
Definition: tecplotIO.F90:59
subroutine writetecplot(sliceFile, writeSlices, liftFile, writeLift, surfFile, writeSurf, famList, nFamList)
Definition: tecplotIO.F90:222
subroutine addabsslice(sliceName, pt, normal, dir_vec, use_dir, famList, n)
Definition: tecplotIO.F90:135
subroutine writeslicesfile(fileName, nodalValues)
Definition: tecplotIO.F90:278
subroutine destroyslice(slc)
Definition: tecplotIO.F90:1838
type(slice), dimension(:, :), allocatable absslices
Definition: tecplotIO.F90:65
character(len=maxcgnsnamelen), dimension(:), allocatable liftdistname
Definition: tecplotIO.F90:73
integer(kind=inttype), parameter nliftdistmax
Definition: tecplotIO.F90:68
subroutine writeslice(slc, fileID, nFields)
Definition: tecplotIO.F90:2251
subroutine createslice(pts, conn, elemFam, slc, pt, normal, dir_vec, use_dir, sliceName, famList)
Definition: tecplotIO.F90:1666
subroutine writetecplotsurfacefile(fileName, famList)
Definition: tecplotIO.F90:744
integer(kind=inttype) nparaslices
Definition: tecplotIO.F90:63
integer(kind=inttype), dimension(16, 5) mscon1
Definition: tecplotIO.F90:59
Definition: utils.F90:1
subroutine reallocateinteger2(intArray, newSize1, newSize2, oldSize1, oldSize2, alwaysFreeMem)
Definition: utils.F90:2913
subroutine setbcpointers(nn, spatialPointers)
Definition: utils.F90:882
subroutine reallocatereal2(realArray, newSize1, newSize2, oldSize1, oldSize2, alwaysFreeMem)
Definition: utils.F90:3049
subroutine pointreduce(pts, N, tol, uniquePts, link, nUnique)
Definition: utils.F90:4170
subroutine echk(errorcode, file, line)
Definition: utils.F90:5722
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237
Definition: vf.F90:1
subroutine writefloats(adflowRealVals)
Definition: tecplotIO.F90:1205
subroutine writestring(str)
Definition: tecplotIO.F90:1247
subroutine writedoubles(adflowRealVals)
Definition: tecplotIO.F90:1216
subroutine writeintegers(adflowIntegerVals)
Definition: tecplotIO.F90:1237
subroutine writedouble(adflowRealVal)
Definition: tecplotIO.F90:1196
subroutine writefloat(adflowRealVal)
Definition: tecplotIO.F90:1187
subroutine writeinteger(adflowIntegerVal)
Definition: tecplotIO.F90:1227