ADflow  v1.0
ADflow is a finite volume RANS solver tailored for gradient-based aerodynamic design optimization.
amg.F90
Go to the documentation of this file.
1 ! This module implements an aggregation-based algebraic multigrid method.
2 ! See Sec. 6.1 in the following paper for a theoretical overview of similar methods:
3 ! K. Stuben. "A review of algebraic multigrid."
4 ! Journal of Computational and Applied Mathematics (2001)
5 ! https://doi.org/10.1016/S0377-0427(00)00516-1
6 module amg
7 
8  use constants
9  use utils, only: echk
10 #include <petsc/finclude/petsc.h>
11  use petsc
12  implicit none
13 
14  ! Structure used for storing the interpolation indices
15  type arr1int4
16  integer, dimension(:), allocatable :: arr
17  end type arr1int4
18 
19  ! Structure used for storing the interpolation indices
20  type arr3int4
21  integer, dimension(:, :, :), allocatable :: arr
22  end type arr3int4
23 
24  ! Structure used for storing the interpolation indices
25  type arr4int4
26  integer, dimension(:, :, :, :), allocatable :: arr
27  end type arr4int4
28 
29  ! The number of levels
30  integer(kind=intType) amglevels
31 
32  ! The number of outer iterations
33  integer(kind=intType) amgouterits
34 
35  ! The number of smoothing iterations
36  integer(kind=intType) amgnsmooth
37 
38  ! The number of local ILU iterations
39  integer(kind=intType) amglocalpreconits
40  integer(kind=intType) amglocalpreconitsfine
41  integer(kind=intType) amglocalpreconitscoarse
42 
43  ! ASM overlap for the solver/smoother
44  integer(kind=intType) amgasmoverlap
45  integer(kind=intType) amgasmoverlapfine
46  integer(kind=intType) amgasmoverlapcoarse
47 
48  ! ILU fill for the solver/smoother
49  integer(kind=intType) amgfilllevel
50  integer(kind=intType) amgfilllevelfine
51  integer(kind=intType) amgfilllevelcoarse
52 
53  ! Ordering
54  character(len=maxStringLen) :: amgmatrixordering
55 
56  ! Arrays for matrices/vectors/solvers on each level.
57  mat, dimension(:), allocatable :: a
58  ksp, dimension(:), allocatable :: ksplevels
59  vec, dimension(:), allocatable :: res, rhs, sol, sol2
60  pc shellpc
61 
62  mat, pointer :: finemat
63 
64  ! The interpolation arrays
65  type(arr1int4), dimension(:), allocatable :: interps
66 
67  ! The coarse level indices
68  type(arr3int4), dimension(:, :), allocatable, target :: coarseindices
69  type(arr4int4), dimension(:, :), allocatable, target :: coarseoversetindices
70 
71  logical :: amgsetup = .false.
72  integer :: bs
73 contains
74 
75  subroutine setupamg(inputMat, nCell, blockSize, levels, nSmooth)
76 
77  use blockpointers
78  use communication
79  use utils, only: setpointers
81  use adjointvars, only: ncellslocal
82  integer(kind=intType), intent(in) :: nCell, blockSize, levels, nSmooth
83  mat, target :: inputmat
84 
85  vecscatter :: scat
86  vec :: recvvec, indexvec
87  is :: is1
88 
89  integer(kind=intTYpe) :: nnx, nny, nnz, l, nn, i, j, k, ii, jj, kk, lvl, n, m
90  integer(kind=intType) :: idim, count, cursize, ierr, coarseIndex
91  integer(kind=intType) :: level, nVar, sps, nextLvl, ncoarse
92  integer(kind=intType), dimension(:, :, :), allocatable :: sizes
93  integer(kind=intType), dimension(:), allocatable :: offsets, procStarts, nnzOn, nnzOff
94  real(kind=realtype), dimension(:), pointer :: indptr
95  integer(kind=intType), dimension(:), allocatable :: indicesToGet
96 
97  if (amgsetup) then
98  return
99  end if
100 
101  ! Set some module variables
102  amglevels = levels
103  amgnsmooth = nsmooth
104 
105  ! Block size here refers to the number of states in each cell
106  bs = blocksize
107 
108  ! Set the pointer to the fine level the AMG will be working on
109  finemat => inputmat
110 
111  ! Allocate the list of the mats/vects/ksp
112  allocate ( &
113  a(2:amglevels), &
114  ksplevels(1:amglevels), &
115  res(1:amglevels), &
116  rhs(1:amglevels), &
117  sol(1:amglevels), &
118  sol2(1:amglevels))
119 
120  ! First allocate the coarse level indices.
121  allocate (coarseindices(ndom, 1:amglevels - 1))
122  allocate (coarseoversetindices(ndom, 1:amglevels - 1))
123  allocate (sizes(3, ndom, 1:amglevels))
124  allocate (offsets(0:nproc), procstarts(1:amglevels))
125  allocate (interps(1:amglevels - 1))
126  do lvl = 1, amglevels
127 
128  do nn = 1, ndom
129  call setpointers(nn, 1, 1)
130  if (lvl == 1) then
131 
132  ! Set the sizes for the finest level.
133  ! This is the number of owned cells.
134  sizes(1, nn, 1) = nx
135  sizes(2, nn, 1) = ny
136  sizes(3, nn, 1) = nz
137  end if
138  end do
139 
140  if (lvl > 1) then
141  do nn = 1, ndom
142  do idim = 1, 3
143  cursize = sizes(idim, nn, lvl - 1)
144  if (cursize == 1) then
145  sizes(idim, nn, lvl) = cursize
146  else if (mod(cursize, 2) == 0) then
147  ! Evenly divides
148  sizes(idim, nn, lvl) = cursize / 2
149  else if (mod(cursize, 2) == 1) then
150  ! Odd, so have an extra
151  sizes(idim, nn, lvl) = (cursize - 1) / 2 + 1
152  end if
153  end do
154  end do
155  end if
156 
157  ! Get the number of cells on this proc:
158  i = 0
159  do nn = 1, ndom
160  i = i + sizes(1, nn, lvl) * sizes(2, nn, lvl) * sizes(3, nn, lvl)
161  end do
162 
163  call mpi_allgather(i, 1, mpi_integer, offsets(1:nproc), 1, mpi_integer, &
164  adflow_comm_world, ierr)
165  call echk(ierr, __file__, __line__)
166 
167  ! Prefix sum
168  offsets(0) = 0
169  do i = 1, nproc
170  offsets(i) = offsets(i - 1) + offsets(i)
171  end do
172 
173  ! Get my starting index.
174  procstarts(lvl) = offsets(myid)
175 
176  end do
177 
178  ! Now set up the interp and the coarse-level indices. Note that this
179  ! loop is the number of levels MINUS 1 since we are generating the
180  ! interpolations between levels and we already have the 1st level
181  ! of the coarseIndices
182 
183  call veccreatempi(adflow_comm_world, ncellslocal(1_inttype), &
184  petsc_determine, indexvec, ierr)
185  call echk(ierr, __file__, __line__)
186 
187  do lvl = 1, amglevels - 1
188 
189  do nn = 1, ndom
190  call setpointers(nn, 1, 1)
191  allocate (coarseindices(nn, lvl)%arr(0:ib, 0:jb, 0:kb))
192  allocate (coarseoversetindices(nn, lvl)%arr(8, 0:ib, 0:jb, 0:kb))
193  coarseindices(nn, lvl)%arr = -1
194  coarseoversetindices(nn, lvl)%arr = -1
195  end do
196 
197  ! Allocate the linear algebra interpolation array for this level
198  ! (first count the number of nodes to be restricted on level lvl)
199  n = 0
200  do nn = 1, ndom
201  n = n + sizes(1, nn, lvl) * sizes(2, nn, lvl) * sizes(3, nn, lvl)
202  end do
203  allocate (interps(lvl)%arr(n))
204 
205  ! Interps uses the local ordering so we always start at 0.
206  n = 0
207  count = 0
208  ! Loop over the blocks
209  do nn = 1, ndom
210 
211  ! Sizes for next level
212  nnx = sizes(1, nn, lvl + 1)
213  nny = sizes(2, nn, lvl + 1)
214  nnz = sizes(3, nn, lvl + 1)
215 
216  ! Loop over the sizes of this level
217  do k = 1, sizes(3, nn, lvl)
218  do j = 1, sizes(2, nn, lvl)
219  do i = 1, sizes(1, nn, lvl)
220 
221  ! These are the indices on the next level
222  ii = (i - 1) / 2 + 1
223  jj = (j - 1) / 2 + 1
224  kk = (k - 1) / 2 + 1
225 
226  coarseindex = (kk - 1) * nnx * nny + (jj - 1) * nnx + ii + count
227 
228  ! Linear algebra info
229  n = n + 1
230  interps(lvl)%arr(n) = coarseindex
231 
232  end do
233  end do
234  end do
235  count = count + nnx * nny * nnz
236  end do
237 
238  ! We are not done yet; We need to fill in the block-based
239  ! coarse indices and then do a halo-exchange on it so procs
240  ! know where to put their off-proc entries on the coarser levels.
241 
242  ! Loop over the blocks.
243  n = 0
244  ii = 0
245 
246  call vecgetarrayf90(indexvec, indptr, ierr)
247  call echk(ierr, __file__, __line__)
248 
249  do nn = 1, ndom
250  call setpointers(nn, 1, 1)
251  ii = ii + (kb + 1) * (jb + 1) * (ib + 1) ! Count maximum double halos
252  ! Loop over the sizes of this level
253  do k = 2, kl
254  do j = 2, jl
255  do i = 2, il
256 
257  n = n + 1
258 
259  ! Coarse Index: This is the first coarse index for
260  ! the current finest level element.
261  coarseindex = interps(1)%arr(n)
262 
263  ! For levels higher than 2, we need to trace
264  ! through the subsequent levels to find what the
265  ! coarse index is for level lvl.
266  do nextlvl = 2, lvl
267  coarseindex = interps(nextlvl)%arr(coarseindex)
268  end do
269 
270  ! Now we set the coarse level index into the
271  ! coarseIndices array. Note that we make the index
272  ! global here by adding procStarts. We also
273  ! subtract 1 to make it zero-based for petsc.
274  coarseindex = coarseindex + procstarts(lvl + 1) - 1
275  coarseindices(nn, lvl)%arr(i, j, k) = coarseindex
276 
277  ! Now put that index into the array
278  indptr(n) = transfer(int(coarseindex, 8), one)
279 
280  end do
281  end do
282  end do
283  end do
284  call vecrestorearrayf90(indexvec, indptr, ierr)
285  call echk(ierr, __file__, __line__)
286 
287  allocate (indicestoget(8 * ii))
288  ii = 0
289  do nn = 1, ndom
290  call setpointers(nn, 1, 1)
291  do k = 0, kb
292  do j = 0, jb
293  do i = 0, ib
294  ! If this cell is is an interpolated cell, record the indices we need to get
295  if (iblank(i, j, k) == -1) then
296  do m = 1, 8
297  if (flowdoms(nn, 1, 1)%gInd(m, i, j, k) >= 0) then
298  ii = ii + 1
299  indicestoget(ii) = flowdoms(nn, 1, 1)%gInd(m, i, j, k)
300  end if
301  end do
302  end if
303  end do
304  end do
305  end do
306  end do
307 
308  ! Now create the scatter to retrieve the "indicesToGet" from
309  ! the indexVec. Petsc is always annoying for this.
310 
311  call iscreategeneral(adflow_comm_world, ii, indicestoget(1:ii), petsc_copy_values, &
312  is1, ierr)
313  call echk(ierr, __file__, __line__)
314  deallocate (indicestoget)
315 
316  ! Create array to dump the result
317  call veccreatempi(adflow_comm_world, ii, petsc_determine, recvvec, ierr)
318  call echk(ierr, __file__, __line__)
319 
320  ! Create the scatter
321  call vecscattercreate(indexvec, is1, recvvec, petsc_null_is, scat, ierr)
322  call echk(ierr, __file__, __line__)
323 
324  ! Do the actual scatter
325  call vecscatterbegin(scat, indexvec, recvvec, insert_values, scatter_forward, ierr)
326  call echk(ierr, __file__, __line__)
327  call vecscatterend(scat, indexvec, recvvec, insert_values, scatter_forward, ierr)
328  call echk(ierr, __file__, __line__)
329 
330  call vecgetarrayf90(recvvec, indptr, ierr)
331  call echk(ierr, __file__, __line__)
332 
333  ! Loop back over at set the coarse indices
334  ii = 0
335  do nn = 1, ndom
336  call setpointers(nn, 1, 1)
337  ! Loop over the sizes of this level
338  do k = 0, kb
339  do j = 0, jb
340  do i = 0, ib
341  if (iblank(i, j, k) == -1) then
342  do m = 1, 8
343  if (flowdoms(nn, 1, 1)%gInd(m, i, j, k) >= 0) then
344  ii = ii + 1
345  coarseoversetindices(nn, lvl)%arr(m, i, j, k) = &
346  int(transfer(indptr(ii), 1_8), inttype)
347  end if
348  end do
349  end if
350  end do
351  end do
352  end do
353  end do
354 
355  call vecrestorearrayf90(recvvec, indptr, ierr)
356  call echk(ierr, __file__, __line__)
357 
358  call vecscatterdestroy(scat, ierr)
359  call echk(ierr, __file__, __line__)
360 
361  call vecdestroy(recvvec, ierr)
362  call echk(ierr, __file__, __line__)
363 
364  call isdestroy(is1, ierr)
365  call echk(ierr, __file__, __line__)
366 
367  ! Now we need to exchange the coarse level indices. Set
368  ! pointers to the coarseIndices arrays and then call the
369  ! generic integer halo exchange.
370  level = 1
371  sps = 1
372  nvar = 1
373 
374  do nn = 1, ndom
375  flowdoms(nn, level, sps)%intCommVars(1)%var => coarseindices(nn, lvl)%arr(:, :, :)
376  end do
377 
379 
380  end do
381 
382  call vecdestroy(indexvec, ierr)
383  call echk(ierr, __file__, __line__)
384 
385  ! Next we need to set up the matrices and vectors
386 
387  do lvl = 1, amglevels
388 
389  call kspcreate(adflow_comm_world, ksplevels(lvl), ierr)
390  call echk(ierr, __file__, __line__)
391 
392  ! Create the coarse levels
393  if (lvl >= 2) then
394  ncoarse = maxval(interps(lvl - 1)%arr)
395 
396  allocate (nnzon(1:ncoarse), nnzoff(1:ncoarse))
397  nnzon = 14
398  nnzoff = 7
399  call matcreatebaij(adflow_comm_world, bs, ncoarse * bs, ncoarse * bs, &
400  petsc_determine, petsc_determine, 0, nnzon, 0, nnzoff, &
401  a(lvl), ierr)
402  call echk(ierr, __file__, __line__)
403  deallocate (nnzon, nnzoff)
404 
405  call matsetoption(a(lvl), mat_row_oriented, petsc_false, ierr)
406  call echk(ierr, __file__, __line__)
407 
408  call matsetoption(a(lvl), mat_new_nonzero_allocation_err, petsc_false, ierr)
409  call echk(ierr, __file__, __line__)
410 
411  call matcreatevecs(a(lvl), res(lvl), sol(lvl), ierr)
412  call echk(ierr, __file__, __line__)
413 
414  else
415  call veccreatempi(adflow_comm_world, ncell * bs, petsc_determine, res(lvl), ierr)
416  call echk(ierr, __file__, __line__)
417 
418  call vecduplicate(res(lvl), sol(lvl), ierr)
419  call echk(ierr, __file__, __line__)
420 
421  end if
422  call vecduplicate(res(lvl), rhs(lvl), ierr)
423  call echk(ierr, __file__, __line__)
424 
425  call vecduplicate(res(lvl), sol2(lvl), ierr)
426  call echk(ierr, __file__, __line__)
427 
428  end do
429  amgsetup = .true.
430 
431  end subroutine setupamg
432 
433  subroutine destroyamg
434 
435  integer(kind=intType) :: lvl, ierr, i, j
436  if (amgsetup) then
437  do lvl = 1, amglevels
438  ! Destroy all of our vectors/matrices
439  if (lvl > 1) then
440  call matdestroy(a(lvl), ierr)
441  call echk(ierr, __file__, __line__)
442  end if
443 
444  call vecdestroy(res(lvl), ierr)
445  call echk(ierr, __file__, __line__)
446 
447  call vecdestroy(sol(lvl), ierr)
448  call echk(ierr, __file__, __line__)
449 
450  call vecdestroy(sol2(lvl), ierr)
451  call echk(ierr, __file__, __line__)
452 
453  call vecdestroy(rhs(lvl), ierr)
454  call echk(ierr, __file__, __line__)
455 
456  call kspdestroy(ksplevels(lvl), ierr)
457  call echk(ierr, __file__, __line__)
458  end do
459 
460  deallocate (a, res, rhs, sol, sol2, ksplevels)
462  amgsetup = .false.
463 
464  end if
465  end subroutine destroyamg
466 
467  subroutine applyshellpc(pc, x, y, ierr)
468  use communication
469  ! Input/Output
470  pc pc
471  vec x, y
472  integer(kind=intType) ierr
473 
474  ! Working
475  integer(kind=intType) :: i
476 
477  if (amglevels > 1) then
478 
479  ! Copy the input vector x into rhs because x is read-only
480  call veccopy(x, rhs(1), ierr)
481  call echk(ierr, __file__, __line__)
482 
483  ! Zero the output vector y
484  call vecset(y, zero, ierr)
485  call echk(ierr, __file__, __line__)
486 
487  if (amgouterits == 1) then
488  call mgprecon(rhs(1), y, 1) ! y is the new approximate solution
489  else
490 
491  do i = 1, amgouterits
492 
493  call mgprecon(rhs(1), sol(1), 1) ! The solution update is stored in sol(1)
494 
495  ! Update the solution
496  call vecaypx(y, one, sol(1), ierr) ! y = y + sol(1)
497  call echk(ierr, __file__, __line__)
498 
499  if (i < amgouterits) then
500  ! Compute new residual
501  call matmult(finemat, y, rhs(1), ierr)
502  call echk(ierr, __file__, __line__)
503 
504  call vecaypx(rhs(1), -one, x, ierr)
505  call echk(ierr, __file__, __line__)
506  end if
507 
508  end do
509  end if
510  else
511  ! Solve the fine level
512  ! This is equivalent to not using multigrid
513  call kspsolve(ksplevels(1), x, y, ierr)
514  call echk(ierr, __file__, __line__)
515  end if
516 
517  end subroutine applyshellpc
518 
519  subroutine setupshellpc(pc, ierr)
520 
522  use inputadjoint
523 
524  ! Input/Output
525  pc pc
526  integer(kind=intTYpe) :: ierr
527 
528  ! Working
529  pc globalpc, subpc
530  ksp subksp
531  integer(kind=intType) :: lvl
532  integer(kind=intType) :: nlocal, first
533 
534  do lvl = 1, amglevels
535  if (lvl == 1) then
536  call kspsetoperators(ksplevels(lvl), finemat, finemat, ierr)
537  call echk(ierr, __file__, __line__)
538 
542  else
543  call kspsetoperators(ksplevels(lvl), a(lvl), a(lvl), ierr)
544  call echk(ierr, __file__, __line__)
545 
549  end if
550 
551  call kspsetnormtype(ksplevels(lvl), ksp_norm_none, ierr)
552  call echk(ierr, __file__, __line__)
553 
554  call kspsettype(ksplevels(lvl), 'richardson', ierr)
555  call echk(ierr, __file__, __line__)
556 
557  call kspsettolerances(ksplevels(lvl), petsc_default_real, &
558  petsc_default_real, petsc_default_real, &
559  amgnsmooth, ierr)
560  call echk(ierr, __file__, __line__)
561 
562  call kspgetpc(ksplevels(lvl), globalpc, ierr)
563  call echk(ierr, __file__, __line__)
564 
565  call pcsettype(globalpc, 'asm', ierr)
566  call echk(ierr, __file__, __line__)
567 
568  call pcasmsetoverlap(globalpc, amgasmoverlap, ierr)
569  call echk(ierr, __file__, __line__)
570 
571  ! Set up the main ksp context before extracting the subdomains
572  call kspsetup(ksplevels(lvl), ierr)
573  call echk(ierr, __file__, __line__)
574 
575  ! Extract the ksp objects for each subdomain
576  call pcasmgetsubksp(globalpc, nlocal, first, subksp, ierr)
577  call echk(ierr, __file__, __line__)
578 
579  ! Since there is an extra matMult required when using the Richardson preconditioner
580  ! with only 1 iteration, only use it when we need to do more than 1 iteration.
581  if (amglocalpreconits > 1) then
582  ! Set the subksp object to Richardson so we can do multiple iterations on the sub-domains
583  call kspsettype(subksp, 'richardson', ierr)
584  call echk(ierr, __file__, __line__)
585 
586  ! Set the number of iterations to do on local blocks. Tolerances are ignored.
587  call kspsettolerances(subksp, petsc_default_real, petsc_default_real, petsc_default_real, &
588  amglocalpreconits, ierr)
589  call echk(ierr, __file__, __line__)
590 
591  ! normtype is NONE because we don't want to check error
592  call kspsetnormtype(subksp, ksp_norm_none, ierr)
593  call echk(ierr, __file__, __line__)
594  else
595  ! Set the subksp object to preonly because we are only doing one iteration
596  call kspsettype(subksp, 'preonly', ierr)
597  call echk(ierr, __file__, __line__)
598  end if
599 
600  ! Extract the preconditioner for subksp object
601  call kspgetpc(subksp, subpc, ierr)
602  call echk(ierr, __file__, __line__)
603 
604  ! Set the subpc type to ILU
605  call pcsettype(subpc, 'ilu', ierr)
606  call echk(ierr, __file__, __line__)
607 
608  ! Set up the matrix ordering for the subpc object
609  call pcfactorsetmatorderingtype(subpc, amgmatrixordering, ierr)
610  call echk(ierr, __file__, __line__)
611 
612  ! Set the ILU parameters
613  call pcfactorsetlevels(subpc, amgfilllevel, ierr)
614  call echk(ierr, __file__, __line__)
615  end do
616 
617  end subroutine setupshellpc
618 
619  subroutine destroyshellpc(pc, ierr)
620 
621  ! Input/Ouput
622  pc pc
623  integer(kind=intType) :: ierr
624 
625  ! Working
626  integer(kind=intType) :: lvl
627 
628  end subroutine destroyshellpc
629 
630  subroutine restrictvec(x, y, interp)
631 
632  ! Input/Output
633  vec x, y
634  integer(kind=intType), dimension(:), intent(in) :: interp
635 
636  ! Working
637  real(kind=realtype), pointer :: yptr(:), xptr(:)
638  real(kind=realtype), pointer :: xptrblk(:, :), yptrblk(:, :)
639  integer(kind=intType) :: ierr, n, i, j
640 
641  ! Restrict x -> y
642  call vecgetarrayf90(x, xptr, ierr)
643  call echk(ierr, __file__, __line__)
644 
645  call vecgetarrayf90(y, yptr, ierr)
646  call echk(ierr, __file__, __line__)
647 
648  ! Number of block nodes
649  n = size(interp)
650 
651  ! Convenience block-based pointers
652  xptrblk(1:bs, 1:size(xptr) / bs) => xptr
653  yptrblk(1:bs, 1:size(yptr) / bs) => yptr
654 
655  ! Zero the output array
656  yptr = zero
657 
658  ! Loop over the interpolation array, summing into the coarse array
659  do i = 1, n
660  j = interp(i)
661  yptrblk(:, j) = yptrblk(:, j) + xptrblk(:, i)
662  end do
663 
664  call vecrestorearrayf90(x, xptr, ierr)
665  call echk(ierr, __file__, __line__)
666 
667  call vecrestorearrayf90(y, yptr, ierr)
668  call echk(ierr, __file__, __line__)
669 
670  end subroutine restrictvec
671 
672  subroutine prolongvec(x, y, interp)
673 
674  ! Input/Output
675  vec x, y
676  integer(kind=intType), dimension(:), intent(in) :: interp
677 
678  ! Working
679  real(kind=realtype), pointer :: yptr(:), xptr(:)
680  real(kind=realtype), pointer :: xptrblk(:, :), yptrblk(:, :)
681  integer(kind=intType) :: ierr, n, i, j
682 
683  ! Prolong vector x -> y
684 
685  call vecgetarrayf90(x, xptr, ierr)
686  call echk(ierr, __file__, __line__)
687 
688  call vecgetarrayf90(y, yptr, ierr)
689  call echk(ierr, __file__, __line__)
690 
691  ! Number of block nodes
692  n = size(interp)
693 
694  ! Convenience pointers
695  yptrblk(1:bs, 1:size(yptr) / bs) => yptr
696  xptrblk(1:bs, 1:size(xptr) / bs) => xptr
697 
698  ! Loop over the interpolation array, injecting into the fine array
699  do i = 1, n
700  j = interp(i)
701  yptrblk(:, i) = xptrblk(:, j)
702  end do
703 
704  call vecrestorearrayf90(x, xptr, ierr)
705  call echk(ierr, __file__, __line__)
706 
707  call vecrestorearrayf90(y, yptr, ierr)
708  call echk(ierr, __file__, __line__)
709 
710  end subroutine prolongvec
711 
712  recursive subroutine mgprecon(r, y, k)
713 
714  ! r is the residual and y is the approximate solution
715  ! k is the level
716 
717  ! Input/Output
718  vec y, r
719  integer(kind=intType), intent(in) :: k
720 
721  ! Working
722  integer(kind=intType) :: ierr, i
723 
724  ! Step 1: Restrict the residual
725  call restrictvec(r, rhs(k + 1), interps(k)%arr)
726 
727  ! Step 2: Solve the coarse level directly or recursively
728  if (k == amglevels - 1) then
729  ! The next level down is the bottom
730  ! Break the recursion by solving
731  call kspsolve(ksplevels(k + 1), rhs(k + 1), sol(k + 1), ierr)
732  call echk(ierr, __file__, __line__)
733  else
734  ! Call the next level down recursively
735  call mgprecon(rhs(k + 1), sol(k + 1), k + 1)
736  end if
737 
738  ! Step 3: Prolongate the solution
739  call prolongvec(sol(k + 1), y, interps(k)%arr)
740 
741  ! Step 4: Compute the new residual
742  if (k == 1) then
743  call matmult(finemat, y, res(k), ierr) ! res = A(i) * z1(k)
744  else
745  call matmult(a(k), y, res(k), ierr) ! res = A(i) * z1(k)
746  end if
747  call echk(ierr, __file__, __line__)
748 
749  call vecaypx(res(k), -one, r, ierr) ! res = -res + r
750  call echk(ierr, __file__, __line__)
751 
752  ! Step 5: Relax using the smoother
753  call kspsolve(ksplevels(k), res(k), sol2(k), ierr)
754  call echk(ierr, __file__, __line__)
755 
756  call vecaxpy(y, one, sol2(k), ierr)
757  call echk(ierr, __file__, __line__)
758 
759  end subroutine mgprecon
760 
761 end module amg
integer(kind=inttype), dimension(maxlevels) ncellslocal
Definition: ADjointVars.F90:40
Definition: amg.F90:6
type(arr3int4), dimension(:, :), allocatable, target coarseindices
Definition: amg.F90:68
integer(kind=inttype) amgouterits
Definition: amg.F90:33
integer(kind=inttype) amglevels
Definition: amg.F90:30
type(arr4int4), dimension(:, :), allocatable, target coarseoversetindices
Definition: amg.F90:69
subroutine setupshellpc(pc, ierr)
Definition: amg.F90:520
integer(kind=inttype) amgnsmooth
Definition: amg.F90:36
subroutine destroyamg
Definition: amg.F90:434
integer(kind=inttype) amgfilllevelfine
Definition: amg.F90:50
integer(kind=inttype) amglocalpreconitsfine
Definition: amg.F90:40
subroutine prolongvec(x, y, interp)
Definition: amg.F90:673
integer(kind=inttype) amgfilllevel
Definition: amg.F90:49
subroutine setupamg(inputMat, nCell, blockSize, levels, nSmooth)
Definition: amg.F90:76
integer(kind=inttype) amglocalpreconitscoarse
Definition: amg.F90:41
subroutine applyshellpc(pc, x, y, ierr)
Definition: amg.F90:468
subroutine restrictvec(x, y, interp)
Definition: amg.F90:631
type(arr1int4), dimension(:), allocatable interps
Definition: amg.F90:65
integer(kind=inttype) amgasmoverlapcoarse
Definition: amg.F90:46
character(len=maxstringlen) amgmatrixordering
Definition: amg.F90:54
logical amgsetup
Definition: amg.F90:71
integer(kind=inttype) amgasmoverlap
Definition: amg.F90:44
integer(kind=inttype) amgasmoverlapfine
Definition: amg.F90:45
integer bs
Definition: amg.F90:72
recursive subroutine mgprecon(r, y, k)
Definition: amg.F90:713
integer(kind=inttype) amgfilllevelcoarse
Definition: amg.F90:51
subroutine destroyshellpc(pc, ierr)
Definition: amg.F90:620
integer(kind=inttype) amglocalpreconits
Definition: amg.F90:39
integer(kind=inttype) jl
integer(kind=inttype) nx
integer(kind=inttype) ny
integer(kind=inttype), dimension(:, :, :), pointer iblank
integer(kind=inttype) jb
integer(kind=inttype) kb
integer(kind=inttype) ib
integer(kind=inttype) nz
integer(kind=inttype) kl
integer(kind=inttype) il
type(internalcommtype), dimension(:), allocatable internalcell_2nd
type(commtype), dimension(:), allocatable commpatterncell_2nd
integer adflow_comm_world
real(kind=realtype), parameter zero
Definition: constants.F90:71
real(kind=realtype), parameter one
Definition: constants.F90:72
subroutine whalo1to1intgeneric(nVar, level, sps, commPattern, internal)
Definition: utils.F90:1
subroutine echk(errorcode, file, line)
Definition: utils.F90:5722
subroutine setpointers(nn, mm, ll)
Definition: utils.F90:3237