Actual source code: ex5f.F90

petsc-master 2020-06-03
Report Typos and Errors
```  1:       !Solves two linear systems in parallel with KSP.  The code
2:       !illustrates repeated solution of linear systems with the same preconditioner
3:       !method but different matrices (having the same nonzero structure).  The code
4:       !also uses multiple profiling stages.  Input arguments are
5:       !  -m <size> : problem size
6:       !  -mat_nonsym : use nonsymmetric matrix (default is symmetric)

8:       !Concepts: KSP^repeatedly solving linear systems;
9:       !Concepts: PetscLog^profiling multiple stages of code;
10:       !Processors: n

12: program main
13:  #include <petsc/finclude/petscksp.h>
14:       use petscksp
15:
16:       implicit none
17:       KSP            :: myKsp            ! linear solver context
18:       Mat            :: C,Ctmp           ! matrix
19:       Vec            :: x,u,b            ! approx solution, RHS, exact solution
20:       PetscReal      :: norm             ! norm of solution error
21:       PetscScalar    :: v
22:       PetscScalar, parameter :: myNone = -1.0
23:       PetscInt       :: Ii,JJ,ldim,low,high,iglobal,Istart,Iend
24:       PetscErrorCode :: ierr
25:       PetscInt       :: i,j,its,n
26:       PetscInt       :: m = 3
27:       PetscMPIInt    :: mySize,myRank
28:       PetscBool :: &
29:         testnewC         = PETSC_FALSE, &
30:         testscaledMat    = PETSC_FALSE, &
31:         mat_nonsymmetric = PETSC_FALSE
32:       PetscBool      :: flg
33:       PetscRandom    :: rctx
34:       PetscLogStage,dimension(0:1) :: stages
35:       character(len=80)          :: outputString
36:       PetscInt,parameter :: one = 1

38:       call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
39:       if (ierr /= 0) then
40:         write(6,*)'Unable to initialize PETSc'
41:         stop
42:       endif
43:
44:       call PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-m',m,flg,ierr)
45:       CHKERRA(ierr)
46:       call MPI_Comm_rank(PETSC_COMM_WORLD,myRank,ierr)
47:       CHKERRA(ierr)
48:       call MPI_Comm_size(PETSC_COMM_WORLD,mySize,ierr)
49:       CHKERRA(ierr)
50:       n=2*mySize

52:
53:       ! Set flag if we are doing a nonsymmetric problem; the default is symmetric.
54:
55:       call PetscOptionsGetBool(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,"-mat_nonsym",mat_nonsymmetric,flg,ierr)
56:       CHKERRA(ierr)
57:       call PetscOptionsGetBool(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,"-test_scaledMat",testscaledMat,flg,ierr)
58:       CHKERRA(ierr)
59:
60:       ! Register two stages for separate profiling of the two linear solves.
61:       ! Use the runtime option -log_view for a printout of performance
62:       ! statistics at the program's conlusion.

64:       call PetscLogStageRegister("Original Solve",stages(0),ierr)
65:       CHKERRA(ierr)
66:       call PetscLogStageRegister("Second Solve",stages(1),ierr)
67:       CHKERRA(ierr)
68:
69:       ! -------------- Stage 0: Solve Original System ----------------------
70:       ! Indicate to PETSc profiling that we're beginning the first stage
71:
72:       call PetscLogStagePush(stages(0),ierr)
73:       CHKERRA(ierr)
74:
75:       ! Create parallel matrix, specifying only its global dimensions.
76:       ! When using MatCreate(), the matrix format can be specified at
77:       ! runtime. Also, the parallel partitioning of the matrix is
78:       ! determined by PETSc at runtime.
79:
80:       call MatCreate(PETSC_COMM_WORLD,C,ierr)
81:       CHKERRA(ierr)
82:       call MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n,ierr)
83:       CHKERRA(ierr)
84:       call MatSetFromOptions(C,ierr)
85:       CHKERRA(ierr)
86:       call MatSetUp(C,ierr)
87:       CHKERRA(ierr)

89:       ! Currently, all PETSc parallel matrix formats are partitioned by
90:       ! contiguous chunks of rows across the processors.  Determine which
91:       ! rows of the matrix are locally owned.
92:
93:       call MatGetOwnershipRange(C,Istart,Iend,ierr)
94:
95:       ! Set matrix entries matrix in parallel.
96:       ! - Each processor needs to insert only elements that it owns
97:       ! locally (but any non-local elements will be sent to the
98:       ! appropriate processor during matrix assembly).
99:       !- Always specify global row and columns of matrix entries.

101:       intitializeC: do Ii=Istart,Iend-1
102:           v =-1.0; i = Ii/n; j = Ii - i*n
103:           if (i>0) then
104:             JJ = Ii - n
106:             CHKERRA(ierr)
107:           endif
108:
109:           if (i<m-1) then
110:             JJ = Ii + n
112:             CHKERRA(ierr)
113:           endif
114:
115:           if (j>0) then
116:             JJ = Ii - 1
118:             CHKERRA(ierr)
119:           endif
120:
121:           if (j<n-1) then
122:             JJ = Ii + 1
124:             CHKERRA(ierr)
125:           endif
126:
127:           v=4.0
129:           CHKERRA(ierr)
130:
131:       enddo intitializeC
132:
133:       ! Make the matrix nonsymmetric if desired
134:       if (mat_nonsymmetric) then
135:         do Ii=Istart,Iend-1
136:           v=-1.5; i=Ii/n
137:           if (i>1) then
138:             JJ=Ii-n-1
140:             CHKERRA(ierr)
141:           endif
142:         enddo
143:       else
144:         call MatSetOption(C,MAT_SYMMETRIC,PETSC_TRUE,ierr)
145:         CHKERRA(ierr)
146:         call MatSetOption(C,MAT_SYMMETRY_ETERNAL,PETSC_TRUE,ierr)
147:         CHKERRA(ierr)
148:       endif
149:
150:       ! Assemble matrix, using the 2-step process:
151:       ! MatAssemblyBegin(), MatAssemblyEnd()
152:       ! Computations can be done while messages are in transition
153:       ! by placing code between these two statements.
154:
155:       call MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY,ierr)
156:       CHKERRA(ierr)
157:       call MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY,ierr)
158:       CHKERRA(ierr)

160:       ! Create parallel vectors.
161:       ! - When using VecSetSizes(), we specify only the vector's global
162:       !   dimension; the parallel partitioning is determined at runtime.
163:       ! - Note: We form 1 vector from scratch and then duplicate as needed.
164:
165:       call  VecCreate(PETSC_COMM_WORLD,u,ierr)
166:       call  VecSetSizes(u,PETSC_DECIDE,m*n,ierr)
167:       call  VecSetFromOptions(u,ierr)
168:       call  VecDuplicate(u,b,ierr)
169:       call  VecDuplicate(b,x,ierr)

171:       ! Currently, all parallel PETSc vectors are partitioned by
172:       ! contiguous chunks across the processors.  Determine which
173:       ! range of entries are locally owned.
174:
175:       call  VecGetOwnershipRange(x,low,high,ierr)
176:       CHKERRA(ierr)

178:       !Set elements within the exact solution vector in parallel.
179:       ! - Each processor needs to insert only elements that it owns
180:       ! locally (but any non-local entries will be sent to the
181:       ! appropriate processor during vector assembly).
182:       ! - Always specify global locations of vector entries.

184:       call VecGetLocalSize(x,ldim,ierr)
185:       CHKERRA(ierr)
186:       do i=0,ldim-1
187:         iglobal = i + low
188:         v = real(i + 100*myRank)
189:         call VecSetValues(u,one,iglobal,v,INSERT_VALUES,ierr)
190:         CHKERRA(ierr)
191:       enddo

193:       ! Assemble vector, using the 2-step process:
194:       ! VecAssemblyBegin(), VecAssemblyEnd()
195:       ! Computations can be done while messages are in transition,
196:       ! by placing code between these two statements.
197:
198:       call  VecAssemblyBegin(u,ierr)
199:       CHKERRA(ierr)
200:       call  VecAssemblyEnd(u,ierr)
201:       CHKERRA(ierr)
202:
203:       ! Compute right-hand-side vector
204:
205:       call  MatMult(C,u,b,ierr)

207:       CHKERRA(ierr)
208:
209:       ! Create linear solver context

211:       call  KSPCreate(PETSC_COMM_WORLD,myKsp,ierr)
212:       CHKERRA(ierr)
213:       ! Set operators. Here the matrix that defines the linear system
214:       ! also serves as the preconditioning matrix.

216:       call  KSPSetOperators(myKsp,C,C,ierr)
217:       CHKERRA(ierr)
218:       ! Set runtime options (e.g., -ksp_type <type> -pc_type <type>)

220:       call  KSPSetFromOptions(myKsp,ierr)
221:       CHKERRA(ierr)
222:       ! Solve linear system.  Here we explicitly call KSPSetUp() for more
223:       ! detailed performance monitoring of certain preconditioners, such
224:       ! as ICC and ILU.  This call is optional, as KSPSetUp() will
225:       ! automatically be called within KSPSolve() if it hasn't been

228:       call  KSPSetUp(myKsp,ierr)
229:       CHKERRA(ierr)

231:       call  KSPSolve(myKsp,b,x,ierr)
232:       CHKERRA(ierr)

234:       ! Check the error
235:
236:       call VecAXPY(x,myNone,u,ierr)
237:       call VecNorm(x,NORM_2,norm,ierr)

239:       call KSPGetIterationNumber(myKsp,its,ierr)
240:       if (.not. testscaledMat .or. norm > 1.e-7) then
241:         write(outputString,'(a,f11.9,a,i2.2,a)') 'Norm of error ',norm,', Iterations ',its,'\n'
242:         call PetscPrintf(PETSC_COMM_WORLD,outputString,ierr)
243:       endif
244:
245:       ! -------------- Stage 1: Solve Second System ----------------------
246:
247:       ! Solve another linear system with the same method.  We reuse the KSP
248:       ! context, matrix and vector data structures, and hence save the
249:       ! overhead of creating new ones.

251:       ! Indicate to PETSc profiling that we're concluding the first
252:       ! stage with PetscLogStagePop(), and beginning the second stage with
253:       ! PetscLogStagePush().
254:
255:       call PetscLogStagePop(ierr)
256:       CHKERRA(ierr)
257:       call PetscLogStagePush(stages(1),ierr)
258:       CHKERRA(ierr)
259:
260:       ! Initialize all matrix entries to zero.  MatZeroEntries() retains the
261:       ! nonzero structure of the matrix for sparse formats.
262:
263:       call MatZeroEntries(C,ierr)
264:       CHKERRA(ierr)
265:
266:       ! Assemble matrix again.  Note that we retain the same matrix data
267:       ! structure and the same nonzero pattern; we just change the values
268:       ! of the matrix entries.
269:
270:       do i=0,m-1
271:         do j=2*myRank,2*myRank+1
272:           v =-1.0; Ii=j + n*i
273:           if (i>0) then
274:             JJ = Ii - n
276:             CHKERRA(ierr)
277:           endif
278:
279:           if (i<m-1) then
280:             JJ = Ii + n
282:             CHKERRA(ierr)
283:           endif
284:
285:           if (j>0) then
286:             JJ = Ii - 1
288:             CHKERRA(ierr)
289:           endif
290:
291:           if (j<n-1) then
292:             JJ = Ii + 1
294:             CHKERRA(ierr)
295:           endif
296:
297:           v=6.0
299:           CHKERRA(ierr)
300:
301:         enddo
302:       enddo
303:
304:       ! Make the matrix nonsymmetric if desired
305:
306:       if (mat_nonsymmetric) then
307:         do Ii=Istart,Iend-1
308:           v=-1.5;  i=Ii/n
309:           if (i>1) then
310:             JJ=Ii-n-1
312:             CHKERRA(ierr)
313:           endif
314:         enddo
315:       endif
316:
317:       ! Assemble matrix, using the 2-step process:
318:       ! MatAssemblyBegin(), MatAssemblyEnd()
319:       ! Computations can be done while messages are in transition
320:       ! by placing code between these two statements.
321:
322:       call MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY,ierr)
323:       CHKERRA(ierr)
324:       call MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY,ierr)
325:       CHKERRA(ierr)
326:
327:       if (testscaledMat) then
328:         ! Scale a(0,0) and a(M-1,M-1)

330:         if (myRank /= 0) then
331:           v = 6.0*0.00001; Ii = 0; JJ = 0
332:           call MatSetValues(C,one,Ii,one,JJ,v,INSERT_VALUES,ierr)
333:           CHKERRA(ierr)
334:         elseif (myRank == mySize -1) then
335:           v = 6.0*0.00001; Ii = m*n-1; JJ = m*n-1
336:           call MatSetValues(C,one,Ii,one,JJ,v,INSERT_VALUES,ierr)
337:
338:         endif
339:
340:         call MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY,ierr)
341:         call MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY,ierr)
342:
343:         ! Compute a new right-hand-side vector
344:
345:         call  VecDestroy(u,ierr)
346:         call  VecCreate(PETSC_COMM_WORLD,u,ierr)
347:         call  VecSetSizes(u,PETSC_DECIDE,m*n,ierr)
348:         call  VecSetFromOptions(u,ierr)

350:         call  PetscRandomCreate(PETSC_COMM_WORLD,rctx,ierr)
351:         call  PetscRandomSetFromOptions(rctx,ierr)
352:         call  VecSetRandom(u,rctx,ierr)
353:         call  PetscRandomDestroy(rctx,ierr)
354:         call  VecAssemblyBegin(u,ierr)
355:         call  VecAssemblyEnd(u,ierr)
356:
357:       endif
358:
359:       call PetscOptionsGetBool(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,"-test_newMat",testnewC,flg,ierr)
360:       CHKERRA(ierr)
361:
362:       if (testnewC) then
363:       ! User may use a new matrix C with same nonzero pattern, e.g.
364:       ! ex5 -ksp_monitor -mat_type sbaij -pc_type cholesky -pc_factor_mat_solver_type mumps -test_newMat

366:         call  MatDuplicate(C,MAT_COPY_VALUES,Ctmp,ierr)
367:         call  MatDestroy(C,ierr)
368:         call  MatDuplicate(Ctmp,MAT_COPY_VALUES,C,ierr)
369:         call  MatDestroy(Ctmp,ierr)
370:       endif
371:
372:       call MatMult(C,u,b,ierr);CHKERRA(ierr)

374:       ! Set operators. Here the matrix that defines the linear system
375:       ! also serves as the preconditioning matrix.
376:
377:       call KSPSetOperators(myKsp,C,C,ierr);CHKERRA(ierr)
378:
379:       ! Solve linear system
380:
381:       call  KSPSetUp(myKsp,ierr); CHKERRA(ierr)
382:       call  KSPSolve(myKsp,b,x,ierr); CHKERRA(ierr)
383:
384:       ! Check the error
385:
386:       call VecAXPY(x,myNone,u,ierr); CHKERRA(ierr)
387:       call VecNorm(x,NORM_2,norm,ierr); CHKERRA(ierr)
388:       call KSPGetIterationNumber(myKsp,its,ierr); CHKERRA(ierr)
389:       if (.not. testscaledMat .or. norm > 1.e-7) then
390:         write(outputString,'(a,f11.9,a,i2.2,a)') 'Norm of error ',norm,', Iterations ',its,'\n'
391:         call PetscPrintf(PETSC_COMM_WORLD,outputString,ierr)
392:       endif
393:
394:       ! Free work space.  All PETSc objects should be destroyed when they
395:       ! are no longer needed.
396:
397:       call  KSPDestroy(myKsp,ierr); CHKERRA(ierr)
398:       call  VecDestroy(u,ierr); CHKERRA(ierr)
399:       call  VecDestroy(x,ierr); CHKERRA(ierr)
400:       call  VecDestroy(b,ierr); CHKERRA(ierr)
401:       call  MatDestroy(C,ierr); CHKERRA(ierr)
402:
403:       ! Indicate to PETSc profiling that we're concluding the second stage
404:
405:       call PetscLogStagePop(ierr)
406:       CHKERRA(ierr)
407:
408:       call PetscFinalize(ierr)
409:
410: !/*TEST
411: !
412: !   test:
413: !      args: -pc_type jacobi -ksp_monitor_short -ksp_gmres_cgs_refinement_type refine_always
414: !
415: !   test:
416: !      suffix: 2
417: !      nsize: 2
418: !      args: -pc_type jacobi -ksp_monitor_short -ksp_gmres_cgs_refinement_type refine_always -ksp_rtol .000001
419: !
420: !   test:
421: !      suffix: 5
422: !      nsize: 2
423: !      args: -ksp_gmres_cgs_refinement_type refine_always -ksp_monitor_lg_residualnorm -ksp_monitor_lg_true_residualnorm
424: !      output_file: output/ex5f_5.out
425: !
426: !   test:
427: !      suffix: asm
428: !      nsize: 4
429: !      args: -pc_type asm
430: !      output_file: output/ex5f_asm.out
431: !
432: !   test:
433: !      suffix: asm_baij
434: !      nsize: 4
435: !      args: -pc_type asm -mat_type baij
436: !      output_file: output/ex5f_asm.out
437: !
438: !   test:
439: !      suffix: redundant_0
440: !      args: -m 1000 -pc_type redundant -pc_redundant_number 1 -redundant_ksp_type gmres -redundant_pc_type jacobi
441: !
442: !   test:
443: !      suffix: redundant_1
444: !      nsize: 5
445: !      args: -pc_type redundant -pc_redundant_number 1 -redundant_ksp_type gmres -redundant_pc_type jacobi
446: !
447: !   test:
448: !      suffix: redundant_2
449: !      nsize: 5
450: !      args: -pc_type redundant -pc_redundant_number 3 -redundant_ksp_type gmres -redundant_pc_type jacobi
451: !
452: !   test:
453: !      suffix: redundant_3
454: !      nsize: 5
455: !      args: -pc_type redundant -pc_redundant_number 5 -redundant_ksp_type gmres -redundant_pc_type jacobi
456: !
457: !   test:
458: !      suffix: redundant_4
459: !      nsize: 5
460: !      args: -pc_type redundant -pc_redundant_number 3 -redundant_ksp_type gmres -redundant_pc_type jacobi -psubcomm_type interlaced
461: !
462: !   test:
463: !      suffix: superlu_dist
464: !      nsize: 15
465: !      requires: superlu_dist
466: !      args: -pc_type lu -pc_factor_mat_solver_type superlu_dist -mat_superlu_dist_equil false -m 150 -mat_superlu_dist_r 3 -mat_superlu_dist_c 5 -test_scaledMat
467: !
468: !   test:
469: !      suffix: superlu_dist_2
470: !      nsize: 15
471: !      requires: superlu_dist
472: !      args: -pc_type lu -pc_factor_mat_solver_type superlu_dist -mat_superlu_dist_equil false -m 150 -mat_superlu_dist_r 3 -mat_superlu_dist_c 5 -test_scaledMat -mat_superlu_dist_fact SamePattern_SameRowPerm
473: !      output_file: output/ex5f_superlu_dist.out
474: !
475: !   test:
476: !      suffix: superlu_dist_3
477: !      nsize: 15
478: !      requires: superlu_dist
479: !      args: -pc_type lu -pc_factor_mat_solver_type superlu_dist -mat_superlu_dist_equil false -m 500 -mat_superlu_dist_r 3 -mat_superlu_dist_c 5 -test_scaledMat -mat_superlu_dist_fact DOFACT
480: !      output_file: output/ex5f_superlu_dist.out
481: !
482: !   test:
483: !      suffix: superlu_dist_0
484: !      nsize: 1
485: !      requires: superlu_dist
486: !      args: -pc_type lu -pc_factor_mat_solver_type superlu_dist -test_scaledMat
487: !      output_file: output/ex5f_superlu_dist.out
488: !
489: !TEST*/

491: end program main
```