ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
solvers.F90
Go to the documentation of this file.
1 module solvers
2 contains
3 
4  subroutine solver
5  !
6  ! solver is the main subroutine of the solver library.
7  ! It controls the full multigrid and the kill signals.
8  !
9  use constants
10  use communication, only: myid
13  use inputphysics, only: equationmode
15  use killsignals, only: localsignal, nosignal
18  use monitor, only: timeunsteady
19  use section, only: nsections
20  use utils, only: eulerwallspresent
21  use multigrid, only: transfertofinegrid
23  use commonformats, only: stringint1
24  implicit none
25  !
26  ! Local variables.
27  !
28  real(kind=realtype), dimension(nSections) :: dtadvance
29 
30  ! If the normal momentum equation should be used to determine
31  ! the pressure in the halo for inviscid walls, find out if there
32  ! actually are inviscid walls. If so, set the logical
33  ! exchangePressureEarly to .true.; otherwise set it to .false.
34 
37  else
38  exchangepressureearly = .false.
39  end if
40 
41  ! Connect the kill signals with the appropriate functions.
42  ! Initialize localSignal for safety.
43  ! Only if signalling is supported.
44 
45 #ifndef USE_NO_SIGNALS
47  call connect_signals
48 #endif
49 
50  ! Determine the reference time for the solver.
51 
52  t0solver = mpi_wtime()
53 
54  ! Set timeUnsteady to zero; this is amount of time simulated
55  ! in unsteady mode.
56 
58 
59  ! Loop over the number of grid levels in the current computation.
60  ! Note that the counter variable, groundLevel, is defined in
61  ! the module iteration.
62 
63  do groundlevel = mgstartlevel, 1, -1
64 
65  ! Solve either the steady or the unsteady equations for this
66  ! grid level. The time spectral method can be considered as
67  ! a special kind of steady mode.
68  select case (equationmode)
69  case (steady, timespectral)
70  call solvestate
71  case (unsteady)
72  select case (timeintegrationscheme)
73  case (explicitrk)
75  end select
76  end select
77 
78  ! If this is not the finest grid level, interpolate the
79  ! solution to the next finer mesh and write a message that
80  ! this is happening. Only processor 0 performs this latter
81  ! task.
82 
83  if (groundlevel > 1) then
84 
86 
87  if (myid == 0) then
88  if (printiterations) then
89  print "(a)", "#"
90  print stringint1, "# Going down to grid level ", currentlevel
91  print "(a)", "#"
92  end if
93  end if
94 
95  call transfertofinegrid(.false.)
96 
97  ! Move the coordinates of the new fine grid level into the
98  ! correct position. Only for unsteady problems with changing
99  ! meshes. Note that the first argument of updateCoorFineMesh
100  ! is an array with the time step for section.
101 
102  if (equationmode == unsteady .and. changing_grid) then
103  dtadvance = timeunsteady
104  call updatecoorfinemesh(dtadvance, 1_inttype)
105  end if
106 
107  ! Reset nOldSolAvail to 1, such that the unsteady
108  ! computation on the finer mesh starts with a lower
109  ! order scheme.
110 
111  noldsolavail = 1
112  end if
113 
114  end do
115 
116  ! Explictly set groundlevel to 1
117  groundlevel = 1
118 
119  end subroutine solver
120 
121  ! ================================================
122  ! Utilities for unsteady simulation
123  ! ================================================
125  !
126  ! Initialize variables related to unsteady simulation.
127  ! Some are the same as those in the *solver* subroutine,
128  ! while others are specific for unsteady problems.
129  !
130  use aleutils, only: fillcoor, setlevelale
131  use constants
134  use killsignals, only: localsignal, nosignal
136  use utils, only: eulerwallspresent
137  implicit none
138 
139  ! BC treatment for normal momentum equation
142  else
143  exchangepressureearly = .false.
144  end if
145 
146  ! Connect the kill signals
147 #ifndef USE_NO_SIGNALS
149  call connect_signals
150 #endif
151 
152  ! Determine the reference time for the solver.
153  t0solver = mpi_wtime()
154 
155  ! Set time to zero
157  timestepunsteady = 0
158 
159  ! Fill up old, xold and volold
160  call fillcoor
161 
162  ! Set all ALE levels by initial configuration
163  call setlevelale(-1_inttype)
164  end subroutine solverunsteadyinit
165 
167  !
168  ! Update quantities related to geometry due to modification of mesh
169  ! That could happen when
170  ! - Steady mode with mesh modification
171  ! - Unsteady mode with non-moving mesh but with prescribed mesh motion
172  ! - Unsteady mode with warping and/or rigidly moving mesh
173  ! - Unsteady mode coupled with an external solver
174  !
178  use constants
186  use section, only: nsections
191  implicit none
192  !
193  ! Local variables.
194  !
195  integer(kind=intType) :: nn
196  real(kind=realtype), dimension(nSections) :: tnewsec, deltatsec
197  integer(kind=intType) :: lale
198 
199  testchanging: if (changing_grid .or. gridmotionspecified) then
200 
201  ! Set the new time for all sections
202  do nn = 1, nsections
203  tnewsec(nn) = timeunsteady + timeunsteadyrestart
204  deltatsec(nn) = deltat
205  end do
206 
207  ! For prescribed motion only
208  if (gridmotionspecified) &
209  call updatecoorfinemesh(deltatsec, 1_inttype)
210 
211  ! Adapt the geometric info on all grid levels needed for the
212  ! current ground level and multigrid cycle.
213  ! The wall distance only needs to be recomputed when the
214  ! grid is changing; not when a rigid body motion is
215  ! specified. Furthermore, the user can choose not to update
216  ! the wall distance, because he may know a priori that the
217  ! changes in geometry happen quite far away from the boundary
218  ! layer. This is accomplished via updateWallDistanceUnsteady.
219 
224 
225  ! Update the rotation matrices of the faces. Only needed
226  ! on the finest grid level.
227  ! For prescribed motion only
228 
229  if (gridmotionspecified) &
230  call facerotationmatrices(currentlevel, .false.)
231 
232  if (useale) then
233  ! Update the velocities using ALE scheme if moving mesh is present
234 
235  ! First update cell and surface velocity, both are vectors
236  ! Only quantities in blocks are updated, and they will not
237  ! be interpolated
238 
239  call gridvelocitiesfinelevelpart1(deforming_grid, tnewsec, 1_inttype)
240 
241  ! Secondly store x to a temporary variable xALE
242 
243  call storecoor
244 
245  ! Thirdly update surface normal and normal velocity
246 
247  aleloop: do lale = 1, nalemeshes
248  ! Interpolate mesh over latest time step for all ALE Meshes
249  call interpcoor(lale)
250 
251  ! Update s[I,J,K], norm
252  call metric(groundlevel)
253 
254  ! Update sFace[I,J,K]
255  call gridvelocitiesfinelevelpart2(deforming_grid, tnewsec, 1_inttype)
256 
257  ! Update uSlip
258  call slipvelocitiesfinelevel_ale(deforming_grid, tnewsec, 1_inttype)
259 
260  ! Update coarse level quantities to make sure multigrid is working
261  call gridvelocitiescoarselevels(1_inttype)
262  call slipvelocitiescoarselevels(1_inttype)
263 
264  ! Update rFace
265  call normalvelocitiesalllevels(1_inttype)
266 
267  ! Store data to *lale* ALE level
268  call setlevelale(lale)
269  end do aleloop
270 
271  ! Lastly recover x from temporary variable
272  ! Then compute data for current level
273 
274  call recovercoor
275 
276  ! Finish the rest of the update
277  call metric(groundlevel)
278  call gridvelocitiesfinelevelpart2(deforming_grid, tnewsec, 1_inttype)
279  call slipvelocitiesfinelevel_ale(deforming_grid, tnewsec, 1_inttype)
280 
281  else
282  ! Otherwise update the velocities naively
283 
284  ! Determine the velocities of the cell centers and faces
285  ! for the current ground level. Note that the spectral mode
286  ! is always 1 for unsteady mode.
287 
288  call gridvelocitiesfinelevel(deforming_grid, tnewsec, 1_inttype)
289 
290  ! Determine the new slip velocities on the viscous walls.
291 
292  call slipvelocitiesfinelevel(deforming_grid, tnewsec, 1_inttype)
293  end if
294 
295  ! After velocity computations on finest level are done,
296  ! Update those on coarser levels
297 
298  call gridvelocitiescoarselevels(1_inttype)
299  call slipvelocitiescoarselevels(1_inttype)
300 
301  ! Compute the normal velocities of the boundaries, if
302  ! needed for the corresponding boundary condition.
303 
304  call normalvelocitiesalllevels(1_inttype)
305 
306  ! Determine the prescribed boundary condition data for the
307  ! boundary subfaces. First for the (currently) finest grid
308  ! and then iteratively for the coarser grid levels.
309 
310  call setbcdatafinegrid(.false.)
312 
313  end if testchanging
314 
315  end subroutine updateunsteadygeometry
316 
318  !
319  ! Solve for next time step in unsteady simulation
320  !
321  use iteration, only: noldsolavail
322  use solverutils, only: shiftsolution
323  use utils, only: setcoeftimeintegrator
325  implicit none
326 
327  ! Shift the old solution for the new time step.
328 
329  call shiftsolution
330 
331  ! Set the coefficients for the time integrator.
332 
334 
335  ! Solve the state for the current time step and
336  ! update nOldSolAvail.
337  call solvestate
339 
340  end subroutine solverunsteadystep
341 
342  ! ================================================
343  ! The following are not interfaced with Python
344  ! ================================================
345 
347  !
348  ! checkWriteUnsteadyInLoop checks if a solution must be
349  ! written inside the time loop and if so write it.
350  !
352  use constants
361  use surfacefamilies, only: fullfamlist
362  implicit none
363  !
364  ! Local variables.
365  !
366  integer :: ierr
367 
368  integer(kind=intType) :: nn
369  character(len=7) :: intString
370 
371  ! Determine the global kill parameter if signals are supported.
372 
373 #ifndef USE_NO_SIGNALS
374  call mpi_allreduce(localsignal, globalsignal, 1, adflow_integer, &
375  mpi_max, adflow_comm_world, ierr)
376 #endif
377 
378  ! Initialize the logicals for the writing to .false.
379 
380  writevolume = .false.
381  writesurface = .false.
382  writegrid = .false.
383 
384  ! Check whether a solution file, either volume or surface, must
385  ! be written. Only on the finest grid level in stand alone mode.
386 
387  !if(standAloneMode .and. groundLevel == 1) then
388  if (groundlevel == 1) then
389  if (mod(timestepunsteady, nsavevolume) == 0) &
390  writevolume = .true.
391  if (mod(timestepunsteady, nsavesurface) == 0) &
392  writesurface = .true.
393 
394  if (globalsignal == signalwrite .or. &
396  writevolume = .true.
397  writesurface = .true.
398  end if
399 
400  ! Determine whether or not a grid file must be written.
401 
404 
405  ! Write the solution.
406 
407  if (writegrid .or. writevolume .or. writesurface) &
408  call writesol(fullfamlist, size(fullfamlist))
409 
410  ! Write the slice files for this timestep if they have been specified. TEMPORARY
411  ! Write lift distribution TEMPORARY
412  write (intstring, "(i4.4)") timestepunsteady + ntimestepsrestart
413  intstring = adjustl(intstring)
414  !call writeSlicesFile(trim(slicesolfile)//"_Timestep"//trim(intString)//".dat", .True.)
415  !call writeLiftDistributionFile(trim(liftDistributionFile)//"_Timestep"//trim(intString)//".dat", .True.)
416 
417  end if
418 
419  ! Update the variable oldSolWritten.
420 
421  do nn = (noldlevels - 1), 2, -1
422  oldsolwritten(nn) = oldsolwritten(nn - 1)
423  end do
424 
426 
427  end subroutine checkwriteunsteadyinloop
428 
429  ! ==================================================================
430 
432  !
433  ! checkWriteUnsteadyEndLoop checks if a solution must be
434  ! written at the end of the time loop and if so write it.
435  !
436  use constants
441  use surfacefamilies, only: fullfamlist
442  implicit none
443  !
444  ! Local variables.
445  !
446  integer(kind=intType) :: nn
447 
448  ! Write a solution. Only on the finest grid in stand alone mode
449  ! and if some time steps have been performed since the last write.
450 
451  if (standalonemode .and. groundlevel == 1) then
452 
453  writevolume = .not. writevolume
454  writesurface = .not. writesurface
455 
456  if (writevolume .or. writesurface) then
457 
458  ! Determine whether or not a grid file must be written.
459 
460  writegrid = .false.
463 
464  ! Write the solution.
465 
466  call writesol(fullfamlist, size(fullfamlist))
467 
468  ! Update the variable oldSolWritten.
469 
470  do nn = (noldlevels - 1), 2, -1
471  oldsolwritten(nn) = oldsolwritten(nn - 1)
472  end do
473 
475 
476  end if
477 
478  end if
479 
480  end subroutine checkwriteunsteadyendloop
481 
482  ! ================================================
483  ! Utilities for explicit RK solver
484  ! ================================================
485 
487  !
488  ! solverUnsteadyExplicitRK solves the unsteady equations using
489  ! the explicit Runge-Kutta schemes for the multigrid level
490  ! groundLevel.
491  !
492  use constants
493  use blockpointers, only: ndom, il, jl, kl, vol, dw, w, dwoldrk, p
494  use communication
495  use flowvarrefstate
496  use inputphysics
497  use inputunsteady
498  use iteration
499  use killsignals
500  use monitor
501  use utils, only: setpointers
502  use flowutils, only: computepressure
503  use haloexchange, only: whalo1, whalo2
505  use turbapi, only: turbresidual
506  use turbbcroutines, only: applyallturbbc
507  use utils, only: convergenceheader
509  use flowutils, only: computelamviscosity
510  use bcroutines, only: applyallbc
512  implicit none
513  !
514  ! Local parameter.
515  !
516  integer(kind=intType), parameter :: nWriteConvHeader = 50
517  !
518  ! Local variables.
519  !
520  integer(kind=intType) :: iter, nTimeSteps, stage
521  integer(kind=intType) :: i, j, k, l, m, nn
522 
523  real(kind=realtype) :: tmp, timeunsteadyold
524 
525  character(len=7) :: numberString
526 
527  ! Set rkStage to 0. This variable is only relevant if Runge-Kutta
528  ! smoothers are used as an iterative algorithm, but rkStage needs
529  ! to be initialized.
530 
531  rkstage = 0
532 
533  ! Initializations of the write and signal parameters.
534 
535  writevolume = .false.
536  writesurface = .false.
537  writegrid = .false.
540 
541  ! Initialize timeStepUnsteady to 0 and set the number of
542  ! time steps depending on the grid level.
543 
544  timestepunsteady = 0
545 
546  ntimesteps = ntimestepscoarse
547  if (groundlevel == 1) ntimesteps = ntimestepsfine
548 
549  ! Write a message to stdout about how many time steps will
550  ! be taken on the current grid. Only processor 0 does this.
551 
552  if (myid == 0) then
553  write (numberstring, "(i6)") ntimesteps
554  numberstring = adjustl(numberstring)
555 
556  print "(a)", "#"
557  print "(A, I1, 3(A))", "# Grid ", groundlevel, ": Performing ", trim(numberstring), &
558  " explicit Runge Kutta time steps."
559  print "(a)", "#"
560 
561  ! Also write the convergence header. Technically this is
562  ! not really a convergence history for explicit RK, but
563  ! the routine is called this way.
564 
565  call convergenceheader
566  end if
567 
568  ! Determine and write the initial convergence info. Again not
569  ! really convergence for explicit RK.
570 
571  call convergenceinfo
572  !
573  ! Loop over the number of time steps to be computed.
574  !
575  ! Initialize the unsteady time.
576 
577  timeunsteadyold = timeunsteady
578  timeunsteady = timeunsteadyold + gammarkunsteady(1) * deltat
579 
580  timeloop: do iter = 1, ntimesteps
581 
582  ! Rewrite the convergence header after a certain number of
583  ! iterations. Only processor 0 does this.
584 
585  if (myid == 0 .and. mod(iter, nwriteconvheader) == 0) &
586  call convergenceheader
587 
588  ! Loop over the number of RK stages.
589 
590  stageloop: do stage = 1, nrkstagesunsteady
591 
592  ! Compute the residual. Note that the turbulent residual must
593  ! be computed first, because it uses the array of the flow
594  ! field residuals as temporary buffers.
595 
596  if (equations == ransequations) then
597  call initres(nt1, nt2)
598  call turbresidual
599  end if
600 
601  call initres(1_inttype, nwf)
602  call sourceterms()
603  call residual
604 
605  ! Loop over the number of domains.
606 
607  domainloop: do nn = 1, ndom
608 
609  ! Set the pointers to this domain. Note that there is only
610  ! one time instance for a time accurate computation.
611 
612  call setpointers(nn, currentlevel, 1)
613 
614  ! Step 1: Store in dw the conservative update for the
615  ! flow field variables. This means a multiplication
616  ! with deltaT/vol. Also store in w the conservative
617  ! flow field variables.
618 
619  do k = 2, kl
620  do j = 2, jl
621  do i = 2, il
622  tmp = deltat / vol(i, j, k)
623  do l = 1, nw
624  dw(i, j, k, l) = tmp * dw(i, j, k, l)
625  end do
626 
627  w(i, j, k, ivx) = w(i, j, k, ivx) * w(i, j, k, irho)
628  w(i, j, k, ivy) = w(i, j, k, ivy) * w(i, j, k, irho)
629  w(i, j, k, ivz) = w(i, j, k, ivz) * w(i, j, k, irho)
630 
631  end do
632  end do
633  end do
634 
635  ! Step 2: Compute the new conservative flow field variables
636  ! and primitive turbulence variables.
637 
638  do m = 1, (stage - 1)
639  tmp = betarkunsteady(stage, m)
640  if (tmp /= zero) then
641  do l = 1, nw
642  do k = 2, kl
643  do j = 2, jl
644  do i = 2, il
645  w(i, j, k, l) = w(i, j, k, l) - tmp * dwoldrk(m, i, j, k, l)
646  end do
647  end do
648  end do
649  end do
650  end if
651  end do
652 
653  tmp = betarkunsteady(stage, stage)
654  do l = 1, nw
655  do k = 2, kl
656  do j = 2, jl
657  do i = 2, il
658  w(i, j, k, l) = w(i, j, k, l) - tmp * dw(i, j, k, l)
659  end do
660  end do
661  end do
662  end do
663 
664  ! Step 3. Convert the conservative variables back to
665  ! primitive variables for this block. Compute the
666  ! laminar and eddy viscosities as well.
667 
668  ! Convert the momentum into velocities.
669  ! Also store the total energy in the pressure, such that
670  ! the routine computePressure can be used.
671 
672  do k = 2, kl
673  do j = 2, jl
674  do i = 2, il
675  tmp = one / w(i, j, k, irho)
676  w(i, j, k, ivx) = w(i, j, k, ivx) * tmp
677  w(i, j, k, ivy) = w(i, j, k, ivy) * tmp
678  w(i, j, k, ivz) = w(i, j, k, ivz) * tmp
679  p(i, j, k) = w(i, j, k, irhoe)
680  end do
681  end do
682  end do
683 
684  ! Compute the pressure.
685 
686  call computepressure(2_inttype, il, 2_inttype, jl, &
687  2_inttype, kl, 0_inttype)
688 
689  ! Swap the pressure and total energy, because
690  ! computePressure stores the pressure in the position of
691  ! the total energy.
692 
693  do k = 2, kl
694  do j = 2, jl
695  do i = 2, il
696  tmp = p(i, j, k)
697  p(i, j, k) = w(i, j, k, irhoe)
698  w(i, j, k, irhoe) = tmp
699  end do
700  end do
701  end do
702 
703  ! Compute the laminar and eddy viscosities.
704 
705  call computelamviscosity(.false.)
706  call computeeddyviscosity(.false.)
707 
708  ! Step 4. Store dw in dwOldRK if this is not the last
709  ! RK stage.
710 
711  if (stage < nrkstagesunsteady) then
712  do l = 1, nw
713  do k = 2, kl
714  do j = 2, jl
715  do i = 2, il
716  dwoldrk(stage, i, j, k, l) = dw(i, j, k, l)
717  end do
718  end do
719  end do
720  end do
721  end if
722 
723  end do domainloop
724 
725  ! Exchange the pressure if the pressure must be exchanged
726  ! early. Only the first halo's are needed, thus whalo1 is
727  ! called.
728 
729  call whalo1(currentlevel, 1_inttype, 0_inttype, .true., &
730  .false., .false.)
731 
732  ! Apply the boundary conditions to all blocks.
733 
734  call applyallbc(.true.)
735  if (equations == ransequations) call applyallturbbc(.true.)
736 
737  ! Exchange the halo data. As we are on the fine grid
738  ! the second halo is needed.
739 
740  call whalo2(currentlevel, 1_inttype, nw, .true., &
741  .true., .true.)
742 
743  ! Determine the time and initialize the geometrical and
744  ! boundary info for next stage, if needed.
745 
746  if (stage < nrkstagesunsteady) then
747  timeunsteady = timeunsteadyold &
748  + gammarkunsteady(stage + 1) * deltat
749 
750  call initstagerk(stage + 1)
751  end if
752 
753  end do stageloop
754 
755  ! Increment timeStepUnsteady and update
756  ! timeUnsteady with the current time step.
757 
759  timeunsteady = timeunsteadyold + deltat
760 
761  ! Write the convergence info.
762 
763  call convergenceinfo
764 
765  ! Determine the time and initialize the geometrical and
766  ! boundary info for the next time step, such that the writing
767  ! of grid files with moving geometries is done correctly.
768 
769  timeunsteadyold = timeunsteady
770  timeunsteady = timeunsteadyold + gammarkunsteady(1) * deltat
771 
772  call initstagerk(1_inttype)
773 
774  ! Determine whether or not solution files must be written.
775 
777 
778  ! Exit the loop if the corresponding kill signal
779  ! has been received.
780 
781  if (globalsignal == signalwritequit) exit
782 
783  end do timeloop
784 
785  ! Determine whether or not the final solution must be written.
786 
788 
789  end subroutine solverunsteadyexplicitrk
790 
791  !=================================================================
792 
793  subroutine initstagerk(stage)
794  !
795  ! initStageRK performs the initialization tasks for the
796  ! Runge-Kutta schemes in unsteady mode.
797  !
798  use inputmotion
799  use inputunsteady
800  use iteration
801  use monitor
802  use section
803  use bcdata, only: setbcdatafinegrid
805  use solverutils
806  use aleutils
810  implicit none
811  !
812  ! Subroutine arguments.
813  !
814  integer(kind=intType), intent(in) :: stage
815  !
816  ! Local variables.
817  !
818  integer(kind=intType) :: nn
819  real(kind=realtype) :: fact
820 
821  real(kind=realtype), dimension(nSections) :: tnewsec, deltatsec
822 
823  ! If the grid is changing a whole lot of geometric
824  ! info must be adapted.
825 
826  testchanging: if (changing_grid .or. gridmotionspecified) then
827 
828  ! Determine the time step relative to the previous stage.
829 
830  if (stage == 1) then
831  fact = deltat * (one - gammarkunsteady(nrkstagesunsteady))
832  else
833  fact = deltat * (gammarkunsteady(stage) &
834  - gammarkunsteady(stage - 1))
835  end if
836 
837  ! Set the time for all sections; also store their
838  ! time step. They are the same for all sections, but all
839  ! of them should be updated because of consistency.
840 
841  do nn = 1, nsections
842  tnewsec(nn) = timeunsteady + timeunsteadyrestart
843  deltatsec(nn) = fact
844  end do
845 
846  ! Advance the coordinates 1 time step.
847 
848  call updatecoorfinemesh(deltatsec, 1_inttype)
849 
850  ! Adapt the geometric info on all grid levels needed for the
851  ! current ground level and multigrid cycle.
852  ! The wall distance only needs to be recomputed when the
853  ! grid is changing; not when a rigid body motion is
854  ! specified. Furthermore, the user can choose not to update
855  ! the wall distance, because he may know a priori that the
856  ! changes in geometry happen quite far away from the boundary
857  ! layer. This is accomplished via updateWallDistanceUnsteady.
858 
862 
864 
865  ! Update the rotation matrices of the faces. Only needed
866  ! on the finest grid level.
867 
868  call facerotationmatrices(currentlevel, .false.)
869 
870  ! Determine the velocities of the cell centers and faces
871  ! for the current ground level. Note that the spectral mode
872  ! is always 1 for unsteady mode.
873 
874  call gridvelocitiesfinelevel(deforming_grid, tnewsec, 1_inttype)
875 
876  ! Determine the new slip velocities on the viscous walls.
877 
878  call slipvelocitiesfinelevel(deforming_grid, tnewsec, 1_inttype)
879 
880  end if testchanging
881 
882  ! Determine the prescribed boundary condition data for the
883  ! boundary subfaces for the (currently) finest grid.
884 
885  call setbcdatafinegrid(.false.)
886 
887  end subroutine initstagerk
888 
889  ! ================================================
890  ! Internal utilities
891  ! ================================================
892  subroutine solvestate
893  !
894  ! solveState computes either the steady or unsteady state
895  ! vector for the multigrid level groundLevel, which can be
896  ! found in the module iteration.
897  !
898  use constants
915  use tecplotio, only: writetecplot
917  use surfacefamilies, only: fullfamlist
918  use flowvarrefstate, only: nwf
920  use solverutils, only: timestep
921  implicit none
922  !
923  ! Local parameter
924  !
925  integer(kind=intType), parameter :: nWriteConvHeader = 50
926  !
927  ! Local variables.
928  !
929  integer :: ierr
930  integer(kind=intType) :: nMGCycles
931  character(len=7) :: numberString
932  logical :: absConv, relConv, firstNK, firstANK, old_writeGrid
933  real(kind=realtype) :: nk_switchtol_save, curtime, ordersconvergedold
934  character(len=maxStringLen) :: iterFormat
935 
936  ! Allocate the memory for cycling.
937  if (allocated(cycling)) then
938  deallocate (cycling)
939  end if
940  allocate (cycling(nmgsteps), stat=ierr)
941 
942  ! Some initializations.
943 
944  converged = .false.
947  itertot = 0
948 
949  ! Determine the cycling strategy for this level.
950 
951  call setcyclestrategy
952 
953  ! Determine the number of multigrid cycles, which depends
954  ! on the current multigrid level.
955 
956  nmgcycles = ncycles
957  if (groundlevel > 1) nmgcycles = ncyclescoarse
958 
959  ! Allocate (or reallocate) the convergence array for this solveState.
960  call allocconvarrays(nmgcycles)
961 
962  ! Allocate space for storing hisotry of function evaluations for NK
963  ! solver with non-monotone line search if necessary
964  if (currentlevel == 1 .and. usenksolver .and. nk_ls == nonmonotonelinesearch) then
965  allocate (nklsfuncevals(nmgcycles))
966  end if
967 
968  ! Only compute the free stream resisudal once for efficiency on the
969  ! fine grid only.
970  if (groundlevel == 1) then
971  if (.not. freestreamresset) then
973  freestreamresset = .true.
974  end if
975  end if
976  ! Write a message. Only done by processor 0.
977 
978  if (myid == 0) then
979 
980  ! Write a message about the number of multigrid iterations
981  ! to be performed.
982 
983 #ifndef USE_COMPLEX
984  iterformat = "(A, I1, 3(A), I6, A, ES10.2)"
985 #else
986  iterformat = "(A, I1, 3(A), I6, A, 2ES10.2)"
987 #endif
988 
989  write (numberstring, "(i6)") nmgcycles
990  numberstring = adjustl(numberstring)
991  numberstring = trim(numberstring)
992  if (printiterations) then
993  print "(a)", "#"
994  print iterformat, "# Grid ", groundlevel, ": Performing ", trim(numberstring), &
995  " iterations, unless converged earlier. Minimum required iteration before NK switch: ", &
996  miniternum, ". Switch to NK at totalR of: ", (nk_switchtol * totalr0)
997  print "(a)", "#"
998  end if
999 
1000  if (printiterations) then
1001  call convergenceheader
1002  end if
1003  end if
1004 
1005  ! Initialize the approxiate iteration count. We won't count this
1006  ! first residual evaluation. This way on the first convergence info
1007  ! call, "Iter" and "Iter Total" will both display 0.
1008  approxtotalits = 0
1009 
1010  ! we need to re-set the orders converged to 16 as it might have been modified in the previous iteration
1011  ordersconverged = 16.0_realtype
1012 
1013  ! Evaluate the initial residual
1014  call computeresidualnk
1015 
1016  ! Need to run the time step here since the RK/DADI is expecting
1017  ! the rad{i,j,k} to be computed.
1018  call timestep(.false.)
1019 
1020  ! Extract the rhoResStart and totalRStart
1022 
1023  ! No iteration type for first residual evaluation
1024  itertype = " None"
1025  ! also no CFL, step size, or linear residual for this iteration
1026  cflmonitor = -1
1027  stepmonitor = -1
1028  linresmonitor = -1
1029 
1030  ! Determine and write the initial convergence info.
1031  call convergenceinfo
1032 
1033  ! Loop over the maximum number of nonlinear iterations
1034  firstank = .true.
1035  firstnk = .true.
1036 
1037  ! Save the NKSwitch tol since it may be modified in the loop
1038  nk_switchtol_save = nk_switchtol
1039 
1040  ! Set the converged order factor now:
1042  ordersconvergedold = ordersconverged
1043 
1044  nonlineariteration: do while (approxtotalits < nmgcycles)
1045 
1046  ! Update iterTot
1047  itertot = itertot + 1
1048 
1049  if (mod(itertot, nwriteconvheader) == 0 .and. &
1050  myid == 0 .and. printiterations) then
1051  call convergenceheader
1052  end if
1053 
1054  ! Defaults for the monitor
1055  cflmonitor = cfl
1056  stepmonitor = 1.0
1057  linresmonitor = -1
1058 
1059  ! Determine what type of update to do:
1060  if (currentlevel > 1) then
1061 
1062  ! Coarse grids do RK/DADI always
1063  call executemgcycle
1065  else
1066  if (.not. useanksolver .and. .not. usenksolver .or. (itertot <= miniternum .and. rkreset)) then
1067 
1068  ! Always RK/DADI or a RK startup. Run the MG Cycle
1069 
1070  call executemgcycle
1071 
1072  else if (useanksolver .and. .not. usenksolver) then
1073 
1074  ! Approx solver, no NKSolver
1075 
1076  if (totalr > ank_switchtol * totalr0) then
1077 
1078  call executemgcycle
1079 
1080  else
1081  call ankstep(firstank)
1082  firstank = .false.
1084 
1085  end if
1086 
1087  else if (.not. useanksolver .and. usenksolver) then
1088 
1089  ! NK Solver no approx solver
1090 
1091  if (totalr > nk_switchtol * totalr0) then
1092 
1093  call executemgcycle
1094 
1095  else
1096 
1097  call nkstep(firstnk)
1098  firstnk = .false.
1099  cflmonitor = -1
1100 
1101  end if
1102 
1103  else if (useanksolver .and. usenksolver) then
1104 
1105  ! Both approximate and NK solver.
1106 
1107  if (totalr > ank_switchtol * totalr0) then
1108 
1109  call executemgcycle
1110 
1111  else if (totalr <= ank_switchtol * totalr0 .and. &
1112  totalr > nk_switchtol * totalr0) then
1113 
1114  call ankstep(firstank)
1115  firstank = .false.
1116  firstnk = .true.
1118 
1119  else
1120 
1121  ! We free the memory for the ANK solver here because ANK solver
1122  ! module depends on the NK solver module and we cannot call
1123  ! destroyANKSolver within NKSolver itself.
1124  if (firstnk) then
1125  call destroyanksolver()
1126  end if
1127 
1128  call nkstep(firstnk)
1129  firstnk = .false.
1130  firstank = .true.
1131  cflmonitor = -1
1132 
1133  end if
1134  end if
1135  end if
1136 
1137  if (timelimit > zero) then
1138  ! Check if we ran out of time but only if we are required to use the timeLimit
1139  if (myid == 0) then
1140  curtime = mpi_wtime() - t0solver
1141  end if
1142 
1143  call mpi_bcast(curtime, 1, adflow_real, 0, adflow_comm_world, ierr)
1144 
1145  if (curtime > timelimit) then
1146  ! Set the iterTot to the limit directly so that convergence
1147  ! info thinks we are just out of cycles
1148  approxtotalits = nmgcycles
1149  end if
1150  end if
1151 
1152  ! Determine and write the convergence info.
1153  call convergenceinfo
1154 
1156 
1157  ! Update how far we are converged:
1158  ordersconverged = max(log10(totalr0 / totalr), ordersconvergedold)
1159  ordersconvergedold = ordersconverged
1160 
1161  ! Check for divergence or nan here
1162  if (routinefailed) then
1163  exit nonlineariteration
1164  end if
1165 
1166  ! Exit the loop if we are converged
1167  if (converged) then
1168  exit nonlineariteration
1169  end if
1170 
1171  ! Check if the bleed boundary conditions must be updated and
1172  ! do so if needed.
1173 
1174  ! Check if we've received a signal:
1175 #ifndef USE_NO_SIGNALS
1176  call mpi_allreduce(localsignal, globalsignal, 1, &
1177  adflow_integer, mpi_max, adflow_comm_world, &
1178  ierr)
1179 #endif
1180 
1181  if (globalsignal == signalwrite .or. writesoleachiter) then
1182  ! We have been told to write the solution even though we are not done iterating
1183 
1184  ! The grid must be written along with the volume solution solution
1185  if (writevolume) then
1186  ! temporary change the writeGrid option
1187  old_writegrid = writegrid
1188  writegrid = .true.
1189  end if
1190 
1191  if (writesoleachiter) then
1192  write (numberstring, "(i7)") itertot
1193  numberstring = adjustl(numberstring)
1194  numberstring = trim(numberstring)
1195  surfacesolfile = trim(convsolfilebasename)//"_"//trim(numberstring)//"_surf.cgns"
1196  newgridfile = trim(convsolfilebasename)//"_"//trim(numberstring)//"_vol.cgns"
1197  solfile = trim(convsolfilebasename)//"_"//trim(numberstring)//"_vol.cgns"
1198  else
1202  end if
1203 
1204  call writesol(fullfamlist, size(fullfamlist))
1205 
1206  ! Also write potential tecplot files. Note that we are not
1207  ! writing the tecplot surface file so pass in an empty file
1208  ! name and a nonsense family list.
1209  call writetecplot(forcedslicefile, .true., forcedliftfile, .true., &
1210  "", .false., [0], 1)
1211 
1212  if (writevolume) then
1213  ! change the writeGrid option back
1214  writegrid = old_writegrid
1215  end if
1216 
1217  ! Reset the signal
1219  end if
1220 
1221  if (globalsignal == signalwritequit) then
1222  exit nonlineariteration
1223  end if
1224 
1225  end do nonlineariteration
1226 
1227  ! Restore the switch tol in case it was changed
1228  nk_switchtol = nk_switchtol_save
1229 
1230  ! deallocate space for storing hisotry of function evaluations for NK
1231  ! solver with non-monotone line search if necessary
1232  if (currentlevel == 1 .and. usenksolver .and. nk_ls == nonmonotonelinesearch) then
1233  deallocate (nklsfuncevals)
1234  end if
1235 
1236  end subroutine solvestate
1237 
1238  subroutine convergenceinfo
1239  !
1240  ! convergenceInfo computes and writes the convergence info to
1241  ! standard output. In spectral mode a convergence history for
1242  ! every mode is written.
1243  !
1244  use constants
1245  use cgnsnames
1246  use block, only: ncellglobal
1247  use blockpointers, only: ndom
1253  use flowvarrefstate, only: pref, lref, gammainf
1254  use inputio, only: storeconvinneriter
1264  use oversetdata, only: oversetpresent
1266  use genericisnan, only: myisnan
1269  use surfacefamilies, only: fullfamlist
1270  implicit none
1271  !
1272  ! Local variables.
1273  !
1274  integer :: ierr, iConvStdout
1275 
1276  integer(kind=intType) :: sps, nn, mm, iConv
1277 
1278  real(kind=realtype) :: hdiffmax, machmax
1279  real(kind=realtype) :: eddyvismax, yplusmax, sepsensor, cavitation, axismoment
1280  real(kind=realtype) :: sepsensoravg(3)
1281 
1282  real(kind=realtype) :: l2convthislevel, fact
1283  real(kind=realtype), dimension(3) :: cfp, cfv, cmp, cmv
1284  real(kind=realtype) :: cmpaxis, cmvaxis
1285  logical :: nanOccurred, writeIterations
1286  logical :: absConv, relConv
1287  real(kind=realtype) :: localvalues(nlocalvalues)
1288  real(kind=realtype) :: funcvalues(ncostfunction)
1289  ! Determine whether or not the iterations must be written.
1290 
1291  writeiterations = .true.
1292  if (equationmode == unsteady .and. &
1293  timeintegrationscheme == explicitrk) writeiterations = .false.
1294 
1295  ! Initializations
1296  converged = .false.
1297  nanoccurred = .false.
1298 
1299  ! Set the L2 norm for convergence for this level.
1300 
1301  l2convthislevel = l2convcoarse
1302  if (groundlevel == 1) l2convthislevel = l2conv
1303 
1304  ! Loop over the number of spectral solutions.
1305 
1306  spectralloop: do sps = 1, ntimeintervalsspectral
1307 
1308  ! Initialize the local monitoring variables to zero.
1309 
1310  monloc = zero
1311 
1312  ! Loop over the blocks.
1313 
1314  domains: do nn = 1, ndom
1315 
1316  ! Set the pointers for this block.
1317 
1318  call setpointers(nn, groundlevel, sps)
1319 
1320  ! Compute the forces and moments for this block. Note that
1321  ! we zero localValues before each call becuase we are
1322  ! summing into momLocal.
1323  localvalues = zero
1324  call integratesurfaces(localvalues, fullfamlist)
1325 
1326  ! Convert to coefficients for monitoring:
1327  fact = two / (gammainf * machcoef * machcoef &
1328  * surfaceref * lref * lref * pref)
1329  cfp = fact * localvalues(ifp:ifp + 2)
1330  cfv = fact * localvalues(ifv:ifv + 2)
1331  fact = fact / (lengthref * lref)
1332  cmp = fact * localvalues(imp:imp + 2)
1333  cmv = fact * localvalues(imv:imv + 2)
1334  yplusmax = localvalues(iyplus)
1335  ! Determine the maximum values of the monitoring variables
1336  ! of this block.
1337 
1338  call maxhdiffmach(hdiffmax, machmax)
1339  call maxeddyv(eddyvismax)
1340 
1341  ! Loop over the number of monitoring variables.
1342  nmonitoringvar: do mm = 1, nmon
1343 
1344  ! Determine the monitoring variable and act accordingly.
1345 
1346  select case (monnames(mm))
1347 
1348  case ('totalR')
1349  call sumallresiduals(mm)
1350 
1351  case (cgnsl2resrho)
1352  call sumresiduals(irho, mm)
1353 
1354  case (cgnsl2resmomx)
1355  call sumresiduals(imx, mm)
1356 
1357  case (cgnsl2resmomy)
1358  call sumresiduals(imy, mm)
1359 
1360  case (cgnsl2resmomz)
1361  call sumresiduals(imz, mm)
1362 
1363  case (cgnsl2resrhoe)
1364  call sumresiduals(irhoe, mm)
1365 
1366  case (cgnsl2resnu, cgnsl2resk)
1367  call sumresiduals(itu1, mm)
1368 
1370  call sumresiduals(itu2, mm)
1371 
1372  case (cgnsl2resv2)
1373  call sumresiduals(itu3, mm)
1374 
1375  case (cgnsl2resf)
1376  call sumresiduals(itu4, mm)
1377 
1378  case (cgnscl)
1379  monloc(mm) = monloc(mm) &
1380  + (cfp(1) + cfv(1)) * liftdirection(1) &
1381  + (cfp(2) + cfv(2)) * liftdirection(2) &
1382  + (cfp(3) + cfv(3)) * liftdirection(3)
1383 
1384  case (cgnsclp)
1385  monloc(mm) = monloc(mm) + cfp(1) * liftdirection(1) &
1386  + cfp(2) * liftdirection(2) &
1387  + cfp(3) * liftdirection(3)
1388 
1389  case (cgnsclv)
1390  monloc(mm) = monloc(mm) + cfv(1) * liftdirection(1) &
1391  + cfv(2) * liftdirection(2) &
1392  + cfv(3) * liftdirection(3)
1393 
1394  case (cgnscd)
1395  monloc(mm) = monloc(mm) &
1396  + (cfp(1) + cfv(1)) * dragdirection(1) &
1397  + (cfp(2) + cfv(2)) * dragdirection(2) &
1398  + (cfp(3) + cfv(3)) * dragdirection(3)
1399 
1400  case (cgnscdp)
1401  monloc(mm) = monloc(mm) + cfp(1) * dragdirection(1) &
1402  + cfp(2) * dragdirection(2) &
1403  + cfp(3) * dragdirection(3)
1404 
1405  case (cgnscdv)
1406  monloc(mm) = monloc(mm) + cfv(1) * dragdirection(1) &
1407  + cfv(2) * dragdirection(2) &
1408  + cfv(3) * dragdirection(3)
1409 
1410  case (cgnscfx)
1411  monloc(mm) = monloc(mm) + cfp(1) + cfv(1)
1412 
1413  case (cgnscfy)
1414  monloc(mm) = monloc(mm) + cfp(2) + cfv(2)
1415 
1416  case (cgnscfz)
1417  monloc(mm) = monloc(mm) + cfp(3) + cfv(3)
1418 
1419  case (cgnscmx)
1420  monloc(mm) = monloc(mm) + cmp(1) + cmv(1)
1421 
1422  case (cgnscmy)
1423  monloc(mm) = monloc(mm) + cmp(2) + cmv(2)
1424 
1425  case (cgnscmz)
1426  monloc(mm) = monloc(mm) + cmp(3) + cmv(3)
1427 
1428  case (cgnshdiffmax)
1429  monloc(mm) = max(monloc(mm), hdiffmax)
1430 
1431  case (cgnsmachmax)
1432  monloc(mm) = max(monloc(mm), machmax)
1433 
1434  case (cgnsyplusmax)
1435  monloc(mm) = max(monloc(mm), yplusmax)
1436 
1437  case (cgnseddymax)
1438  monloc(mm) = max(monloc(mm), eddyvismax)
1439 
1440  case (cgnssepsensor)
1441  monloc(mm) = monloc(mm) + localvalues(isepsensor)
1442 
1443  case (cgnssepsensorksarea)
1444  monloc(mm) = monloc(mm) + localvalues(isepsensorksarea)
1445 
1446  case (cgnscavitation)
1447  monloc(mm) = monloc(mm) + localvalues(icavitation)
1448 
1449  case (cgnsaxismoment)
1450  monloc(mm) = monloc(mm) + localvalues(iaxismoment)
1451 
1452  end select
1453 
1454  end do nmonitoringvar
1455  end do domains
1456 
1457  ! Add the corrections from zipper meshes from proc 0
1458  if (oversetpresent) then
1459  localvalues = zero
1460  call integratezippers(localvalues, fullfamlist, sps)
1461 
1462  fact = two / (gammainf * machcoef * machcoef &
1463  * surfaceref * lref * lref * pref)
1464  cfp = localvalues(ifp:ifp + 2) * fact
1465  cfv = localvalues(ifv:ifv + 2) * fact
1466  fact = fact / (lengthref * lref)
1467  cmp = localvalues(imp:imp + 2) * fact
1468  cmv = localvalues(imv:imv + 2) * fact
1469 
1470  !Loop over the number of monitoring variables and just modify
1471  !the ones that need to be updated with the zipper forces we just
1472  !computed.
1473  nmonitoringvarzip: do mm = 1, nmon
1474 
1475  ! Determine the monitoring variable and act accordingly.
1476 
1477  select case (monnames(mm))
1478 
1479  case (cgnscl)
1480  monloc(mm) = monloc(mm) &
1481  + (cfp(1) + cfv(1)) * liftdirection(1) &
1482  + (cfp(2) + cfv(2)) * liftdirection(2) &
1483  + (cfp(3) + cfv(3)) * liftdirection(3)
1484 
1485  case (cgnsclp)
1486  monloc(mm) = monloc(mm) + cfp(1) * liftdirection(1) &
1487  + cfp(2) * liftdirection(2) &
1488  + cfp(3) * liftdirection(3)
1489 
1490  case (cgnsclv)
1491  monloc(mm) = monloc(mm) + cfv(1) * liftdirection(1) &
1492  + cfv(2) * liftdirection(2) &
1493  + cfv(3) * liftdirection(3)
1494 
1495  case (cgnscd)
1496  monloc(mm) = monloc(mm) &
1497  + (cfp(1) + cfv(1)) * dragdirection(1) &
1498  + (cfp(2) + cfv(2)) * dragdirection(2) &
1499  + (cfp(3) + cfv(3)) * dragdirection(3)
1500 
1501  case (cgnscdp)
1502  monloc(mm) = monloc(mm) + cfp(1) * dragdirection(1) &
1503  + cfp(2) * dragdirection(2) &
1504  + cfp(3) * dragdirection(3)
1505 
1506  case (cgnscdv)
1507  monloc(mm) = monloc(mm) + cfv(1) * dragdirection(1) &
1508  + cfv(2) * dragdirection(2) &
1509  + cfv(3) * dragdirection(3)
1510 
1511  case (cgnscfx)
1512  monloc(mm) = monloc(mm) + cfp(1) + cfv(1)
1513 
1514  case (cgnscfy)
1515  monloc(mm) = monloc(mm) + cfp(2) + cfv(2)
1516 
1517  case (cgnscfz)
1518  monloc(mm) = monloc(mm) + cfp(3) + cfv(3)
1519 
1520  case (cgnscmx)
1521  monloc(mm) = monloc(mm) + cmp(1) + cmv(1)
1522 
1523  case (cgnscmy)
1524  monloc(mm) = monloc(mm) + cmp(2) + cmv(2)
1525 
1526  case (cgnscmz)
1527  monloc(mm) = monloc(mm) + cmp(3) + cmv(3)
1528 
1529  end select
1530 
1531  end do nmonitoringvarzip
1532  end if
1533  ! Determine the global sum of the summation monitoring
1534  ! variables. This is an all reduce since every processor needs to
1535  ! know the residual to make the same descisions.
1536 
1537  if (nmonsum > 0) &
1538  call mpi_allreduce(monloc, monglob, nmonsum, adflow_real, &
1539  mpi_sum, adflow_comm_world, ierr)
1540 
1541  ! Idem for the maximum monitoring variables.
1542 #ifndef USE_COMPLEX
1543  if (nmonmax > 0) &
1544  call mpi_allreduce(monloc(nmonsum + 1), monglob(nmonsum + 1), &
1545  nmonmax, adflow_real, mpi_max, adflow_comm_world, ierr)
1546 #else
1547  if (nmonmax < 0) &
1548  monglob(nmonsum + 1) = zero
1549 #endif
1550 
1551  ! Write the convergence info; only processor 0 does this.
1552 
1553  testrootproc: if (myid == 0) then
1554 
1555  ! The variables which must always be written.
1556 
1557  if (printiterations) then
1558  write (*, "(1x,i6,2x)", advance="no") groundlevel
1559 
1560  if (equationmode == unsteady) then
1561 
1562  write (*, "(i6,1x)", advance="no") timestepunsteady + &
1564  write (*, "(es12.5,1x)", advance="no") timeunsteady + &
1566 
1567  else if (equationmode == timespectral) then
1568 
1569  write (*, "(i8,3x)", advance="no") sps
1570 
1571  end if
1572 
1573  if (writeiterations) then
1574  write (*, "(i6,1x)", advance="no") itertot
1575  write (*, "(i6,1x)", advance="no") approxtotalits
1576  write (*, "(a,1x)", advance="no") itertype
1577 
1578  if (storeconvinneriter) then
1579  solverdataarray(itertot, sps, 1) = approxtotalits
1580  solvertypearray(itertot, sps) = itertype
1581  end if
1582 
1583  if (cflmonitor < zero) then
1584  ! Print dashes if no cfl term is used, i.e. NK solver
1585  write (*, "(a,1x)", advance="no") " ---- "
1586  else
1587 #ifndef USE_COMPLEX
1588  write (*, "(es10.2,1x)", advance="no") cflmonitor
1589  if (storeconvinneriter) then
1590  solverdataarray(itertot, sps, 2) = cflmonitor
1591  end if
1592 #else
1593  write (*, "(es10.2,1x)", advance="no") real(cflmonitor)
1594 #endif
1595  end if
1596 
1597  if (stepmonitor < zero) then
1598  ! Print dashes in the first None iteration
1599  write (*, "(a,1x)", advance="no") " ---- "
1600  else
1601 #ifndef USE_COMPLEX
1602  write (*, "(f5.2,2x)", advance="no") stepmonitor
1603  if (storeconvinneriter) then
1604  solverdataarray(itertot, sps, 3) = stepmonitor
1605  end if
1606 #else
1607  write (*, "(f5.2,2x)", advance="no") real(stepmonitor)
1608 #endif
1609  end if
1610 
1611  if (linresmonitor < zero) then
1612  ! For RK/DADI just print dashes
1613  write (*, "(a,1x)", advance="no") " ----"
1614  else
1615 
1616 #ifndef USE_COMPLEX
1617  write (*, "(f5.3,1x)", advance="no") linresmonitor
1618  if (storeconvinneriter) then
1619  solverdataarray(itertot, sps, 4) = linresmonitor
1620  end if
1621 #else
1622  write (*, "(f5.3,1x)", advance="no") real(linresmonitor)
1623 #endif
1624  end if
1625 
1626  if (showcpu) then
1627 #ifndef USE_COMPLEX
1628  write (*, "(es12.5,1x)", advance="no") mpi_wtime() - t0solver
1629  if (storeconvinneriter) then
1630  solverdataarray(itertot, sps, 5) = mpi_wtime() - t0solver
1631  end if
1632 #else
1633  write (*, "(es12.5,1x)", advance="no") real(mpi_wtime() - t0solver)
1634 #endif
1635 
1636  end if
1637  end if
1638  end if
1639  end if testrootproc
1640 
1641  ! Loop over the number of monitoring values.
1642  variableloop: do mm = 1, nmon
1643 
1644  ! The residual variables must be corrected.
1645 
1646  select case (monnames(mm))
1647 
1648  case (cgnsl2resrho, cgnsl2resmomx, &
1654 #ifndef USE_COMPLEX
1655  monglob(mm) = sqrt(monglob(mm) / ncellglobal(groundlevel))
1656 #else
1657  ! take the square roots separately in complex mode
1658  monglob(mm) = cmplx(sqrt(real(monglob(mm) / ncellglobal(groundlevel))), &
1659  sqrt(aimag(monglob(mm) / ncellglobal(groundlevel))))
1660 #endif
1661 
1662  if (monnames(mm) == cgnsl2resrho) then
1663  rhores = monglob(mm)
1664  end if
1665  case ('totalR')
1666 #ifndef USE_COMPLEX
1667  monglob(mm) = sqrt(monglob(mm))
1668 #else
1669  ! take the square roots separately in complex mode
1670  monglob(mm) = cmplx(sqrt(real(monglob(mm))), sqrt(aimag(monglob(mm))))
1671 #endif
1672  totalr = monglob(mm)
1673  end select
1674 
1675  if (myisnan(monglob(mm))) nanoccurred = .true.
1676 
1677  if (myid == 0 .and. printiterations) then
1678  ! Write the convergence info to stdout.
1679 #ifndef USE_COMPLEX
1680  write (*, "(es24.16,1x)", advance="no") monglob(mm)
1681 #else
1682  select case (monnames(mm))
1683 
1684  case (cgnsl2resrho, cgnsl2resmomx, &
1689  cgnsl2resv2, cgnsl2resf, 'totalR')
1690 
1691  ! we can do a shorter print for residuals because only the leading few digits
1692  ! and the exponents are important anyways
1693  write (*, '(es16.8,SP,es17.8E3,"i")', advance="no") monglob(mm)
1694 
1695  case default
1696  ! we need to do the regular full print for functionals because they are useful
1697  write (*, '(es24.16,SP,es25.16E3,"i")', advance="no") monglob(mm)
1698  end select
1699 #endif
1700  end if
1701 
1702  ! Store the convergence info in convArray, if desired.
1703  if (storeconvinneriter) then
1704  convarray(itertot, sps, mm) = monglob(mm)
1705  end if
1706  end do variableloop
1707 
1708  if (myid == 0 .and. printiterations) then
1709  ! Write the carriage return.
1710  print "(1x)"
1711  end if
1712 
1713  ! Determine whether or not the solution is converged.
1714  ! A distinction must be made between unsteady mode and the
1715  ! other modes, because in unsteady mode the convergence of
1716  ! the inner iterations is not necessarily stored.
1717 
1718  select case (equationmode)
1719  case (steady, timespectral)
1720 
1721  ! Steady or time spectral mode. The convergence histories
1722  ! are stored and this info can be used. The logical
1723  ! converged is set to .false. if the density residual
1724  ! has not converged yet.
1725 
1726  absconv = .false.
1727  relconv = .false.
1728 
1729  ! We make a split here based on if we are operating on a
1730  ! coarse grid or the finest. On the coarse grid levels, we
1731  ! l2convThisLevel refers to the relative convergence of
1732  ! rhoRes.
1733 
1734  if (currentlevel /= 1) then
1735  if (rhores < l2convthislevel * rhoresstart) then
1736  relconv = .true.
1737  end if
1738  else
1739  ! We are on the fine level. All checking is done using
1740  ! the total residual.
1741 
1742  ! Absolute convergence check
1743  if (totalr < l2convthislevel * totalr0) then
1744  absconv = .true.
1745  end if
1746 
1747  ! Relative check only done on finest level
1748  if (totalr < l2convrel * totalrstart) then
1749  relconv = .true.
1750  end if
1751 
1752  ! If the totla number of iterations is less than the
1753  ! RKReset, don't check the residual
1754  if (itertot < miniternum .and. rkreset) then
1755  relconv = .false.
1756  absconv = .false.
1757  end if
1758  end if
1759 
1760  ! Combine the two flags.
1761  if (absconv .or. relconv) then
1762  converged = .true.
1763  end if
1764 
1765  !===========================================================
1766 
1767  case (unsteady)
1768 
1769  ! Unsteady mode. The array convArray may not be present
1770  ! and therefore something else must be done.
1771  ! First determine the position in the array timeDataArray
1772  ! that can be used. For the coarser grids this is 1,
1773  ! because the time evolution is overwritten. For the fine
1774  ! mesh the actual position is determined.
1775 
1776  nn = 1
1777  if (groundlevel == 1) &
1779  nn = max(nn, 1_inttype)
1780 
1781  ! Make a distinction between the time integration
1782  ! schemes.
1783 
1784  select case (timeintegrationscheme)
1785 
1786  case (explicitrk)
1787 
1788  ! Explicit scheme. Simply store the data in the
1789  ! convergence arrays.
1790 
1792 
1793  ! For explicit schemes the residuals are not
1794  ! monitored and therefore the monitoring variables
1795  ! can simply be copied.
1796 
1797  do mm = 1, nmon
1798  timedataarray(nn, mm) = monglob(mm)
1799  end do
1800 
1801  !=======================================================
1802 
1803  case (bdf, implicitrk)
1804 
1805  ! An implicit scheme is used and therefore an
1806  ! iterative algorithm within every time step.
1807  ! The array convArray may not be present and
1808  ! therefore
1809  ! something else must be done. First determine the
1810  ! position in the array timeDataArray that can be
1811  ! used.
1812  ! For the coarser grids this is 1, because the time
1813  ! evolution is overwritten. For the fine mesh the
1814  ! actual position is determined.
1815 
1816  ! Determine the situation we have here.
1817 
1818  testinitunsteady: if (itertot == 0) then
1819 
1820  ! This is the initialization phase for this time step.
1821  ! Simply copy monGlob into monRef, store the value
1822  ! of the physical time and set converged to .false..
1823 
1824  do mm = 1, nmon
1825  monref(mm) = monglob(mm)
1826  end do
1827 
1829  converged = .false.
1830 
1831  else testinitunsteady
1832 
1833  ! Iteration for this time step. Store the relative
1834  ! convergence for the residual compared to the start
1835  ! of this time step; an absolute norm does not give
1836  ! any information here. For all other monitoring
1837  ! variables the current value is stored.
1838 
1839  do mm = 1, nmon
1840  select case (monnames(mm))
1841 
1842  case (cgnsl2resrho, cgnsl2resmomx, &
1848 
1849  timedataarray(nn, mm) = monglob(mm) &
1850  / max(monref(mm), eps)
1851 
1852  !===============================================
1853 
1854  case default
1855 
1856  timedataarray(nn, mm) = monglob(mm)
1857 
1858  end select
1859  end do
1860 
1861  ! Set the logical converged to .false. if the density
1862  ! residual has not converged yet.
1863 
1864  if (timedataarray(nn, 1) < l2convthislevel) &
1865  converged = .true.
1866 
1867  end if testinitunsteady
1868  end select ! temporal integration scheme
1869 
1870  end select ! unsteady
1871 
1872  end do spectralloop
1873 
1874  if (nanoccurred) then
1875 
1876  if (myid == 0) then
1877  print *, 'Nan occured in Convergence Info on proc:', myid
1878  end if
1879 
1880  routinefailed = .true.
1881 
1882  call returnfail("convergenceInfo", &
1883  "A NaN occurred during the computation.")
1884 
1885  ! in a normal computation, code will simply exit.
1886  ! in a python based computation, code will set
1887  ! routinedFailed to .True. and return to the
1888  ! python level...
1889  return
1890  end if
1891 
1892  ! ! If we are at the max iteration limit but the residual is
1893  ! ! *close*, ie within maxL2DeviationFactor we say that's fine
1894 
1895  if (frompython .and. groundlevel == 1 .and. approxtotalits >= ncycles) then
1896 
1897  !Check to see if residuals are diverging or stalled for python
1898  select case (equationmode)
1899 
1900  case (steady, timespectral)
1901 
1902  ! Steady or time spectral mode. The convergence histories
1903  ! are stored and this info can be used. If the residuals
1904  ! are diverging the, logical routineFailed in killSignals
1905  ! is set to true and the progress is halted.
1906  !only check on root porcessor
1907  if (myid == 0) then
1908 
1909  ! If we made it to ncycles, check to see if we're
1910  ! "close" to being converged.
1911  do sps = 1, ntimeintervalsspectral
1912  if (totalr > maxl2deviationfactor * totalr0 * l2convthislevel) then
1913  routinefailed = .true.
1914  end if
1915  end do
1916  end if
1917  call mpi_bcast(routinefailed, 1, mpi_logical, 0, adflow_comm_world, ierr)
1918 
1919  end select
1920  end if
1921 
1922  ! Determine whether or not the solution is considered
1923  ! converged. This info is only known at processor 0 and must
1924  ! therefore be broadcast to the other processors.
1925 
1926  call mpi_bcast(converged, 1, mpi_logical, 0, adflow_comm_world, ierr)
1927 
1928  end subroutine convergenceinfo
1929 
1930 end module solvers
void connect_signals(void)
subroutine storecoor
Definition: ALEUtils.F90:659
subroutine fillcoor
Definition: ALEUtils.F90:609
subroutine recovercoor
Definition: ALEUtils.F90:749
subroutine slipvelocitiesfinelevel_ale(useOldCoor, t, sps)
Definition: ALEUtils.F90:6
subroutine interpcoor(lale)
Definition: ALEUtils.F90:700
subroutine setlevelale(setType)
Definition: ALEUtils.F90:809
real(kind=realtype) ank_cfl
Definition: NKSolvers.F90:1692
subroutine destroyanksolver
Definition: NKSolvers.F90:2789
real(kind=realtype) ank_switchtol
Definition: NKSolvers.F90:1679
subroutine ankstep(firstCall)
Definition: NKSolvers.F90:3630
logical useanksolver
Definition: NKSolvers.F90:1662
Definition: BCData.F90:1
subroutine setbcdatacoarsegrid
Definition: BCData.F90:3091
subroutine setbcdatafinegrid(initializationPart)
Definition: BCData.F90:2623
subroutine applyallbc(secondHalo)
Definition: BCRoutines.F90:16
Definition: block.F90:1
integer(kind=inttype), dimension(:), allocatable ncellglobal
Definition: block.F90:781
integer(kind=inttype) jl
real(kind=realtype), dimension(:, :, :), pointer p
real(kind=realtype), dimension(:, :, :, :), pointer w
real(kind=realtype), dimension(:, :, :, :, :), pointer dwoldrk
real(kind=realtype), dimension(:, :, :, :), pointer dw
real(kind=realtype), dimension(:, :, :), pointer vol
integer(kind=inttype) kl
integer(kind=inttype) il
character(len=maxcgnsnamelen), parameter cgnscl
Definition: cgnsNames.f90:235
character(len=maxcgnsnamelen), parameter cgnsl2resrho
Definition: cgnsNames.f90:208
character(len=maxcgnsnamelen), parameter cgnsmachmax
Definition: cgnsNames.f90:265
character(len=maxcgnsnamelen), parameter cgnsl2resmomy
Definition: cgnsNames.f90:212
character(len=maxcgnsnamelen), parameter cgnscmy
Definition: cgnsNames.f90:256
character(len=maxcgnsnamelen), parameter cgnsyplusmax
Definition: cgnsNames.f90:267
character(len=maxcgnsnamelen), parameter cgnsl2resk
Definition: cgnsNames.f90:220
character(len=maxcgnsnamelen), parameter cgnscmx
Definition: cgnsNames.f90:254
character(len=maxcgnsnamelen), parameter cgnscfy
Definition: cgnsNames.f90:250
character(len=maxcgnsnamelen), parameter cgnssepsensorksarea
Definition: cgnsNames.f90:280
character(len=maxcgnsnamelen), parameter cgnsl2resrhoe
Definition: cgnsNames.f90:216
character(len=maxcgnsnamelen), parameter cgnsaxismoment
Definition: cgnsNames.f90:282
character(len=maxcgnsnamelen), parameter cgnsl2resv2
Definition: cgnsNames.f90:228
character(len=maxcgnsnamelen), parameter cgnsl2resmomx
Definition: cgnsNames.f90:210
character(len=maxcgnsnamelen), parameter cgnsl2resmomz
Definition: cgnsNames.f90:214
character(len=maxcgnsnamelen), parameter cgnsl2resomega
Definition: cgnsNames.f90:222
character(len=maxcgnsnamelen), parameter cgnsl2restau
Definition: cgnsNames.f90:224
character(len=maxcgnsnamelen), parameter cgnsclv
Definition: cgnsNames.f90:239
character(len=maxcgnsnamelen), parameter cgnssepsensor
Definition: cgnsNames.f90:278
character(len=maxcgnsnamelen), parameter cgnsclp
Definition: cgnsNames.f90:237
character(len=maxcgnsnamelen), parameter cgnscmz
Definition: cgnsNames.f90:258
character(len=maxcgnsnamelen), parameter cgnscavitation
Definition: cgnsNames.f90:281
character(len=maxcgnsnamelen), parameter cgnseddymax
Definition: cgnsNames.f90:269
character(len=maxcgnsnamelen), parameter cgnsl2resf
Definition: cgnsNames.f90:230
character(len=maxcgnsnamelen), parameter cgnscd
Definition: cgnsNames.f90:241
character(len=maxcgnsnamelen), parameter cgnscfz
Definition: cgnsNames.f90:252
character(len=maxcgnsnamelen), parameter cgnscdp
Definition: cgnsNames.f90:243
character(len=maxcgnsnamelen), parameter cgnsl2resepsilon
Definition: cgnsNames.f90:226
character(len=maxcgnsnamelen), parameter cgnshdiffmax
Definition: cgnsNames.f90:263
character(len=maxcgnsnamelen), parameter cgnsl2resnu
Definition: cgnsNames.f90:218
character(len=maxcgnsnamelen), parameter cgnscdv
Definition: cgnsNames.f90:245
character(len=maxcgnsnamelen), parameter cgnscfx
Definition: cgnsNames.f90:248
character(len=maxstringlen) stringint1
integer adflow_comm_world
real(kind=realtype), parameter zero
Definition: constants.F90:71
integer(kind=inttype), parameter nlocalvalues
Definition: constants.F90:454
integer, parameter itu3
Definition: constants.F90:43
integer, parameter itu2
Definition: constants.F90:42
real(kind=realtype), parameter eps
Definition: constants.F90:23
integer, parameter itu1
Definition: constants.F90:40
integer, parameter irho
Definition: constants.F90:34
integer, parameter imx
Definition: constants.F90:65
integer(kind=inttype), parameter nonmonotonelinesearch
Definition: constants.F90:196
integer(kind=inttype), parameter timespectral
Definition: constants.F90:115
integer(kind=inttype), parameter implicitrk
Definition: constants.F90:189
integer(kind=inttype), parameter imp
Definition: constants.F90:455
integer, parameter ivx
Definition: constants.F90:35
integer(kind=inttype), parameter unsteady
Definition: constants.F90:115
integer, parameter irhoe
Definition: constants.F90:38
integer(kind=inttype), parameter ifv
Definition: constants.F90:455
integer(kind=inttype), parameter iyplus
Definition: constants.F90:455
integer(kind=inttype), parameter bdf
Definition: constants.F90:189
real(kind=realtype), parameter one
Definition: constants.F90:72
integer(kind=inttype), parameter steady
Definition: constants.F90:115
integer, parameter ivz
Definition: constants.F90:37
integer(kind=inttype), parameter iaxismoment
Definition: constants.F90:455
real(kind=realtype), parameter two
Definition: constants.F90:73
integer(kind=inttype), parameter imv
Definition: constants.F90:455
integer, parameter itu4
Definition: constants.F90:44
integer(kind=inttype), parameter isepsensorksarea
Definition: constants.F90:455
integer(kind=inttype), parameter explicitrk
Definition: constants.F90:189
integer(kind=inttype), parameter isepsensor
Definition: constants.F90:455
integer, parameter imz
Definition: constants.F90:67
integer, parameter imy
Definition: constants.F90:66
integer(kind=inttype), parameter ncostfunction
Definition: constants.F90:347
integer, parameter ivy
Definition: constants.F90:36
integer(kind=inttype), parameter icavitation
Definition: constants.F90:455
integer(kind=inttype), parameter ransequations
Definition: constants.F90:110
integer(kind=inttype), parameter normalmomentum
Definition: constants.F90:170
integer(kind=inttype), parameter ifp
Definition: constants.F90:455
subroutine computepressure(iBeg, iEnd, jBeg, jEnd, kBeg, kEnd, pointerOffset)
Definition: flowUtils.F90:934
subroutine computelamviscosity(includeHalos)
Definition: flowUtils.F90:1202
real(kind=realtype) gammainf
integer(kind=inttype) nt1
integer(kind=inttype) nwf
integer(kind=inttype) nw
real(kind=realtype) lref
real(kind=realtype) pref
integer(kind=inttype) nt2
subroutine whalo2(level, start, end, commPressure, commGamma, commViscous)
subroutine whalo1(level, start, end, commPressure, commGamma, commViscous)
Definition: haloExchange.F90:7
integer(kind=inttype) eulerwallbctreatment
Definition: inputParam.F90:75
character(len=maxstringlen) forcedvolumefile
Definition: inputParam.F90:173
character(len=maxstringlen) forcedliftfile
Definition: inputParam.F90:174
logical storeconvinneriter
Definition: inputParam.F90:166
character(len=maxstringlen) solfile
Definition: inputParam.F90:160
character(len=maxstringlen) surfacesolfile
Definition: inputParam.F90:162
character(len=maxstringlen) forcedslicefile
Definition: inputParam.F90:174
character(len=maxstringlen) newgridfile
Definition: inputParam.F90:159
character(len=maxstringlen) forcedsurfacefile
Definition: inputParam.F90:173
character(len=maxstringlen) convsolfilebasename
Definition: inputParam.F90:175
character(len=maxstringlen) slicesolfile
Definition: inputParam.F90:162
character(len=maxstringlen) liftdistributionfile
Definition: inputParam.F90:162
integer(kind=inttype) ncycles
Definition: inputParam.F90:262
real(kind=realtype) cflcoarse
Definition: inputParam.F90:275
integer(kind=inttype) nmgsteps
Definition: inputParam.F90:271
integer(kind=inttype) mgstartlevel
Definition: inputParam.F90:270
real(kind=realtype) maxl2deviationfactor
Definition: inputParam.F90:279
integer(kind=inttype) nsavesurface
Definition: inputParam.F90:263
real(kind=realtype) l2conv
Definition: inputParam.F90:277
integer(kind=inttype) nupdatebleeds
Definition: inputParam.F90:266
real(kind=realtype) timelimit
Definition: inputParam.F90:272
integer(kind=inttype) miniternum
Definition: inputParam.F90:274
integer(kind=inttype) ncyclescoarse
Definition: inputParam.F90:262
integer(kind=inttype) nsavevolume
Definition: inputParam.F90:263
real(kind=realtype) l2convcoarse
Definition: inputParam.F90:277
logical printiterations
Definition: inputParam.F90:288
real(kind=realtype) cfl
Definition: inputParam.F90:275
real(kind=realtype) l2convrel
Definition: inputParam.F90:278
logical gridmotionspecified
Definition: inputParam.F90:481
integer(kind=inttype) equations
Definition: inputParam.F90:583
integer(kind=inttype) equationmode
Definition: inputParam.F90:583
real(kind=realtype), dimension(3) dragdirection
Definition: inputParam.F90:601
real(kind=realtype) lengthref
Definition: inputParam.F90:598
real(kind=realtype) machcoef
Definition: inputParam.F90:593
real(kind=realtype), dimension(3) liftdirection
Definition: inputParam.F90:600
real(kind=realtype) surfaceref
Definition: inputParam.F90:598
integer(kind=inttype) ntimeintervalsspectral
Definition: inputParam.F90:645
integer(kind=inttype) ntimestepscoarse
Definition: inputParam.F90:731
real(kind=realtype), dimension(:), allocatable gammarkunsteady
Definition: inputParam.F90:745
logical useale
Definition: inputParam.F90:754
integer(kind=inttype) nrkstagesunsteady
Definition: inputParam.F90:742
real(kind=realtype) deltat
Definition: inputParam.F90:733
integer(kind=inttype) timeintegrationscheme
Definition: inputParam.F90:719
logical updatewalldistanceunsteady
Definition: inputParam.F90:765
real(kind=realtype), dimension(:, :), allocatable betarkunsteady
Definition: inputParam.F90:744
integer(kind=inttype) ntimestepsfine
Definition: inputParam.F90:731
real(kind=realtype) totalr0
Definition: iteration.f90:126
integer(kind=inttype) noldlevels
Definition: iteration.f90:79
real(kind=realtype) totalrfinal
Definition: iteration.f90:126
integer(kind=inttype) currentlevel
Definition: iteration.f90:18
real(kind=realtype) t0solver
Definition: iteration.f90:40
character(len=maxitertypelen) itertype
Definition: iteration.f90:84
logical changing_grid
Definition: iteration.f90:69
real(kind=realtype) cflmonitor
Definition: iteration.f90:97
real(kind=realtype) totalr
Definition: iteration.f90:126
logical converged
Definition: iteration.f90:52
real(kind=realtype) rhores0
Definition: iteration.f90:127
integer(kind=inttype), dimension(:), allocatable cycling
Definition: iteration.f90:26
real(kind=realtype) totalrstart
Definition: iteration.f90:126
integer(kind=inttype) groundlevel
Definition: iteration.f90:18
real(kind=realtype) rhores
Definition: iteration.f90:127
integer(kind=inttype) nalemeshes
Definition: iteration.f90:107
integer(kind=inttype) rkstage
Definition: iteration.f90:19
logical standalonemode
Definition: iteration.f90:69
logical deforming_grid
Definition: iteration.f90:69
real(kind=alwaysrealtype) stepmonitor
Definition: iteration.f90:98
real(kind=realtype) rhoresstart
Definition: iteration.f90:127
logical, dimension(:), allocatable oldsolwritten
Definition: iteration.f90:123
logical exchangepressureearly
Definition: iteration.f90:53
integer(kind=inttype) approxtotalits
Definition: iteration.f90:93
integer(kind=inttype) noldsolavail
Definition: iteration.f90:79
real(kind=realtype) ordersconverged
Definition: iteration.f90:128
real(kind=alwaysrealtype) linresmonitor
Definition: iteration.f90:99
integer(kind=inttype) itertot
Definition: iteration.f90:31
integer(kind=inttype), parameter signalwrite
Definition: killSignals.f90:24
integer(kind=inttype) localsignal
Definition: killSignals.f90:30
integer(kind=inttype), parameter signalwritequit
Definition: killSignals.f90:25
integer(kind=inttype), parameter nosignal
Definition: killSignals.f90:23
logical frompython
Definition: killSignals.f90:33
logical routinefailed
Definition: killSignals.f90:36
integer(kind=inttype) globalsignal
Definition: killSignals.f90:30
logical writegrid
Definition: monitor.f90:116
integer(kind=inttype) timestepunsteady
Definition: monitor.f90:97
integer nmon
Definition: monitor.f90:30
real(kind=cgnsrealtype), dimension(:), allocatable timearray
Definition: monitor.f90:107
integer nmonmax
Definition: monitor.f90:30
logical writesoleachiter
Definition: monitor.f90:116
real(kind=realtype), dimension(:), allocatable monloc
Definition: monitor.f90:39
real(kind=realtype), dimension(:, :, :), allocatable solverdataarray
Definition: monitor.f90:77
integer nmonsum
Definition: monitor.f90:30
real(kind=cgnsrealtype), dimension(:, :, :), allocatable convarray
Definition: monitor.f90:71
logical showcpu
Definition: monitor.f90:56
character(len=maxcgnsnamelen), dimension(:), allocatable monnames
Definition: monitor.f90:46
real(kind=realtype) timeunsteady
Definition: monitor.f90:98
real(kind=realtype) timeunsteadyrestart
Definition: monitor.f90:98
integer(kind=inttype) ntimestepsrestart
Definition: monitor.f90:97
logical writevolume
Definition: monitor.f90:116
real(kind=realtype), dimension(:), allocatable monglob
Definition: monitor.f90:40
logical writesurface
Definition: monitor.f90:116
real(kind=realtype), dimension(:), allocatable monref
Definition: monitor.f90:41
real(kind=cgnsrealtype), dimension(:, :), allocatable timedataarray
Definition: monitor.f90:109
character(len=8), dimension(:, :), allocatable solvertypearray
Definition: monitor.f90:81
subroutine executemgcycle
Definition: multiGrid.F90:826
subroutine setcyclestrategy
Definition: multiGrid.F90:958
subroutine transfertofinegrid(corrections)
Definition: multiGrid.F90:327
subroutine getfreestreamresidual(rhoRes, totalRRes)
Definition: NKSolvers.F90:281
subroutine computeresidualnk(useUpdateIntermed)
Definition: NKSolvers.F90:1085
real(kind=realtype), dimension(:), allocatable nklsfuncevals
Definition: NKSolvers.F90:74
logical usenksolver
Definition: NKSolvers.F90:32
logical freestreamresset
Definition: NKSolvers.F90:70
real(kind=realtype) nk_switchtol
Definition: NKSolvers.F90:50
real(kind=realtype) nk_cfl
Definition: NKSolvers.F90:71
subroutine nkstep(firstCall)
Definition: NKSolvers.F90:513
integer(kind=inttype) nk_ls
Definition: NKSolvers.F90:44
subroutine getcurrentresidual(rhoRes, totalRRes)
Definition: NKSolvers.F90:336
logical oversetpresent
Definition: overset.F90:373
subroutine updatecoorfinemesh(dtAdvance, sps)
subroutine updatecoordinatesalllevels
subroutine shiftcoorandvolumes
subroutine metric(level)
subroutine facerotationmatrices(level, allocMem)
subroutine updatemetricsalllevels
subroutine sourceterms
Definition: residuals.F90:1004
subroutine residual
Definition: residuals.F90:1029
subroutine initres(varStart, varEnd)
Definition: residuals.F90:965
integer(kind=inttype) nsections
Definition: section.f90:44
subroutine checkwriteunsteadyinloop
Definition: solvers.F90:347
subroutine solverunsteadyexplicitrk
Definition: solvers.F90:487
subroutine solver
Definition: solvers.F90:5
subroutine solverunsteadyinit
Definition: solvers.F90:125
subroutine checkwriteunsteadyendloop
Definition: solvers.F90:432
subroutine updateunsteadygeometry
Definition: solvers.F90:167
subroutine convergenceinfo
Definition: solvers.F90:1239
subroutine initstagerk(stage)
Definition: solvers.F90:794
subroutine solverunsteadystep
Definition: solvers.F90:318
subroutine solvestate
Definition: solvers.F90:893
subroutine gridvelocitiescoarselevels(sps)
subroutine shiftsolution
subroutine slipvelocitiesfinelevel(useOldCoor, t, sps)
subroutine slipvelocitiescoarselevels(sps)
subroutine normalvelocitiesalllevels(sps)
subroutine gridvelocitiesfinelevelpart2(useOldCoor, t, sps)
subroutine gridvelocitiesfinelevelpart1(useOldCoor, t, sps)
subroutine gridvelocitiesfinelevel(useOldCoor, t, sps)
subroutine timestep(onlyRadii)
Definition: solverUtils.F90:5
integer(kind=inttype), dimension(:), allocatable fullfamlist
subroutine integratesurfaces(localValues, famList)
subroutine writeliftdistributionfile(fileName, nodalValues)
Definition: tecplotIO.F90:418
subroutine writetecplot(sliceFile, writeSlices, liftFile, writeLift, surfFile, writeSurf, famList, nFamList)
Definition: tecplotIO.F90:222
subroutine writeslicesfile(fileName, nodalValues)
Definition: tecplotIO.F90:278
subroutine turbresidual
Definition: turbAPI.F90:98
subroutine applyallturbbc(secondHalo)
subroutine computeeddyviscosity(includeHalos)
Definition: turbUtils.F90:581
Definition: utils.F90:1
subroutine setcoeftimeintegrator
Definition: utils.F90:1536
subroutine returnfail(routineName, errorMessage)
Definition: utils.F90:5606
subroutine sumresiduals(nn, mm)
Definition: utils.F90:6365
subroutine maxhdiffmach(hdiffMax, MachMax)
Definition: utils.F90:2467
subroutine sumallresiduals(mm)
Definition: utils.F90:6405
logical function eulerwallspresent()
Definition: utils.F90:5784
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237
subroutine allocconvarrays(nIterTot)
Definition: utils.F90:5830
subroutine maxeddyv(eddyvisMax)
Definition: utils.F90:2423
subroutine convergenceheader
Definition: utils.F90:5984
subroutine updatewalldistancealllevels
subroutine integratezippers(localValues, famList, sps)
subroutine writesol(famList, nFamList)
Definition: writeSol.F90:3