Actual source code: ex2.c

petsc-master 2019-09-15
Report Typos and Errors

  2: static char help[] = "Solves a linear system in parallel with KSP.\n\
  3: Input parameters include:\n\
  4:   -view_exact_sol   : write exact solution vector to stdout\n\
  5:   -m <mesh_x>       : number of mesh points in x-direction\n\
  6:   -n <mesh_n>       : number of mesh points in y-direction\n\n";

  8: /*T
  9:    Concepts: KSP^basic parallel example;
 10:    Concepts: KSP^Laplacian, 2d
 11:    Concepts: Laplacian, 2d
 12:    Processors: n
 13: T*/

 15: /*
 16:   Include "petscksp.h" so that we can use KSP solvers.
 17: */
 18:  #include <petscksp.h>

 20: int main(int argc,char **args)
 21: {
 22:   Vec            x,b,u;    /* approx solution, RHS, exact solution */
 23:   Mat            A;        /* linear system matrix */
 24:   KSP            ksp;      /* linear solver context */
 25:   PetscReal      norm;     /* norm of solution error */
 26:   PetscInt       i,j,Ii,J,Istart,Iend,m = 8,n = 7,its;
 28:   PetscBool      flg;
 29:   PetscScalar    v;

 31:   PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
 32:   PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
 33:   PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);
 34:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 35:          Compute the matrix and right-hand-side vector that define
 36:          the linear system, Ax = b.
 37:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 38:   /*
 39:      Create parallel matrix, specifying only its global dimensions.
 40:      When using MatCreate(), the matrix format can be specified at
 41:      runtime. Also, the parallel partitioning of the matrix is
 42:      determined by PETSc at runtime.

 44:      Performance tuning note:  For problems of substantial size,
 45:      preallocation of matrix memory is crucial for attaining good
 46:      performance. See the matrix chapter of the users manual for details.
 47:   */
 48:   MatCreate(PETSC_COMM_WORLD,&A);
 49:   MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
 50:   MatSetFromOptions(A);
 51:   MatMPIAIJSetPreallocation(A,5,NULL,5,NULL);
 52:   MatSeqAIJSetPreallocation(A,5,NULL);
 53:   MatSeqSBAIJSetPreallocation(A,1,5,NULL);
 54:   MatMPISBAIJSetPreallocation(A,1,5,NULL,5,NULL);
 55:   MatMPISELLSetPreallocation(A,5,NULL,5,NULL);
 56:   MatSeqSELLSetPreallocation(A,5,NULL);

 58:   /*
 59:      Currently, all PETSc parallel matrix formats are partitioned by
 60:      contiguous chunks of rows across the processors.  Determine which
 61:      rows of the matrix are locally owned.
 62:   */
 63:   MatGetOwnershipRange(A,&Istart,&Iend);

 65:   /*
 66:      Set matrix elements for the 2-D, five-point stencil in parallel.
 67:       - Each processor needs to insert only elements that it owns
 68:         locally (but any non-local elements will be sent to the
 69:         appropriate processor during matrix assembly).
 70:       - Always specify global rows and columns of matrix entries.

 72:      Note: this uses the less common natural ordering that orders first
 73:      all the unknowns for x = h then for x = 2h etc; Hence you see J = Ii +- n
 74:      instead of J = I +- m as you might expect. The more standard ordering
 75:      would first do all variables for y = h, then y = 2h etc.

 77:    */
 78:   for (Ii=Istart; Ii<Iend; Ii++) {
 79:     v = -1.0; i = Ii/n; j = Ii - i*n;
 80:     if (i>0)   {J = Ii - n; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
 81:     if (i<m-1) {J = Ii + n; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
 82:     if (j>0)   {J = Ii - 1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
 83:     if (j<n-1) {J = Ii + 1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
 84:     v = 4.0; MatSetValues(A,1,&Ii,1,&Ii,&v,ADD_VALUES);
 85:   }

 87:   /*
 88:      Assemble matrix, using the 2-step process:
 89:        MatAssemblyBegin(), MatAssemblyEnd()
 90:      Computations can be done while messages are in transition
 91:      by placing code between these two statements.
 92:   */
 93:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
 94:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

 96:   /* A is symmetric. Set symmetric flag to enable ICC/Cholesky preconditioner */
 97:   MatSetOption(A,MAT_SYMMETRIC,PETSC_TRUE);

 99:   /*
100:      Create parallel vectors.
101:       - We form 1 vector from scratch and then duplicate as needed.
102:       - When using VecCreate(), VecSetSizes and VecSetFromOptions()
103:         in this example, we specify only the
104:         vector's global dimension; the parallel partitioning is determined
105:         at runtime.
106:       - When solving a linear system, the vectors and matrices MUST
107:         be partitioned accordingly.  PETSc automatically generates
108:         appropriately partitioned matrices and vectors when MatCreate()
109:         and VecCreate() are used with the same communicator.
110:       - The user can alternatively specify the local vector and matrix
111:         dimensions when more sophisticated partitioning is needed
112:         (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
113:         below).
114:   */
115:   VecCreate(PETSC_COMM_WORLD,&u);
116:   VecSetSizes(u,PETSC_DECIDE,m*n);
117:   VecSetFromOptions(u);
118:   VecDuplicate(u,&b);
119:   VecDuplicate(b,&x);

121:   /*
122:      Set exact solution; then compute right-hand-side vector.
123:      By default we use an exact solution of a vector with all
124:      elements of 1.0;  
125:   */
126:   VecSet(u,1.0);
127:   MatMult(A,u,b);

129:   /*
130:      View the exact solution vector if desired
131:   */
132:   flg  = PETSC_FALSE;
133:   PetscOptionsGetBool(NULL,NULL,"-view_exact_sol",&flg,NULL);
134:   if (flg) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}

136:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
137:                 Create the linear solver and set various options
138:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
139:   KSPCreate(PETSC_COMM_WORLD,&ksp);

141:   /*
142:      Set operators. Here the matrix that defines the linear system
143:      also serves as the preconditioning matrix.
144:   */
145:   KSPSetOperators(ksp,A,A);

147:   /*
148:      Set linear solver defaults for this problem (optional).
149:      - By extracting the KSP and PC contexts from the KSP context,
150:        we can then directly call any KSP and PC routines to set
151:        various options.
152:      - The following two statements are optional; all of these
153:        parameters could alternatively be specified at runtime via
154:        KSPSetFromOptions().  All of these defaults can be
155:        overridden at runtime, as indicated below.
156:   */
157:   KSPSetTolerances(ksp,1.e-2/((m+1)*(n+1)),1.e-50,PETSC_DEFAULT,PETSC_DEFAULT);

159:   /*
160:     Set runtime options, e.g.,
161:         -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
162:     These options will override those specified above as long as
163:     KSPSetFromOptions() is called _after_ any other customization
164:     routines.
165:   */
166:   KSPSetFromOptions(ksp);

168:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
169:                       Solve the linear system
170:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

172:   KSPSolve(ksp,b,x);

174:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
175:                       Check the solution and clean up
176:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
177:   VecAXPY(x,-1.0,u);
178:   VecNorm(x,NORM_2,&norm);
179:   KSPGetIterationNumber(ksp,&its);

181:   /*
182:      Print convergence information.  PetscPrintf() produces a single
183:      print statement from all processes that share a communicator.
184:      An alternative is PetscFPrintf(), which prints to a file.
185:   */
186:   PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g iterations %D\n",(double)norm,its);

188:   /*
189:      Free work space.  All PETSc objects should be destroyed when they
190:      are no longer needed.
191:   */
192:   KSPDestroy(&ksp);
193:   VecDestroy(&u);  VecDestroy(&x);
194:   VecDestroy(&b);  MatDestroy(&A);

196:   /*
197:      Always call PetscFinalize() before exiting a program.  This routine
198:        - finalizes the PETSc libraries as well as MPI
199:        - provides summary and diagnostic information if certain runtime
200:          options are chosen (e.g., -log_view).
201:   */
202:   PetscFinalize();
203:   return ierr;
204: }

206: /*TEST

208:    build:
209:       requires: !complex !single

211:    test:
212:       suffix: chebyest_1
213:       args: -m 80 -n 80 -ksp_pc_side right -pc_type ksp -ksp_ksp_type chebyshev -ksp_ksp_max_it 5 -ksp_ksp_chebyshev_esteig 0.9,0,0,1.1 -ksp_monitor_short

215:    test:
216:       suffix: chebyest_2
217:       args: -m 80 -n 80 -ksp_pc_side right -pc_type ksp -ksp_ksp_type chebyshev -ksp_ksp_max_it 5 -ksp_ksp_chebyshev_esteig 0.9,0,0,1.1 -ksp_esteig_ksp_type cg -ksp_monitor_short 

219:    test:
220:       args: -ksp_monitor_short -m 5 -n 5 -ksp_gmres_cgs_refinement_type refine_always

222:    test:
223:       suffix: 2
224:       nsize: 2
225:       args: -ksp_monitor_short -m 5 -n 5 -ksp_gmres_cgs_refinement_type refine_always

227:    test:
228:       suffix: 3
229:       args: -pc_type sor -pc_sor_symmetric -ksp_monitor_short -ksp_gmres_cgs_refinement_type refine_always

231:    test:
232:       suffix: 4
233:       args: -pc_type eisenstat -ksp_monitor_short -ksp_gmres_cgs_refinement_type refine_always

235:    test:
236:       suffix: 5
237:       nsize: 2
238:       args: -ksp_monitor_short -m 5 -n 5 -mat_view draw -ksp_gmres_cgs_refinement_type refine_always -nox
239:       output_file: output/ex2_2.out

241:    test:
242:       suffix: bjacobi
243:       nsize: 4
244:       args: -pc_type bjacobi -pc_bjacobi_blocks 1 -ksp_monitor_short -sub_pc_type jacobi -sub_ksp_type gmres

246:    test:
247:       suffix: bjacobi_2
248:       nsize: 4
249:       args: -pc_type bjacobi -pc_bjacobi_blocks 2 -ksp_monitor_short -sub_pc_type jacobi -sub_ksp_type gmres -ksp_view

251:    test:
252:       suffix: bjacobi_3
253:       nsize: 4
254:       args: -pc_type bjacobi -pc_bjacobi_blocks 4 -ksp_monitor_short -sub_pc_type jacobi -sub_ksp_type gmres

256:    test:
257:       suffix: fbcgs
258:       args: -ksp_type fbcgs -pc_type ilu

260:    test:
261:       suffix: fbcgs_2
262:       nsize: 3
263:       args: -ksp_type fbcgsr -pc_type bjacobi

265:    test:
266:       suffix: groppcg
267:       args: -ksp_monitor_short -ksp_type groppcg -m 9 -n 9

269:    test:
270:       suffix: mkl_pardiso_cholesky
271:       requires: mkl_pardiso
272:       args: -ksp_type preonly -pc_type cholesky -mat_type sbaij -pc_factor_mat_solver_type mkl_pardiso

274:    test:
275:       suffix: mkl_pardiso_lu
276:       requires: mkl_pardiso
277:       args: -ksp_type preonly -pc_type lu -pc_factor_mat_solver_type mkl_pardiso

279:    test:
280:       suffix: pipebcgs
281:       args: -ksp_monitor_short -ksp_type pipebcgs -m 9 -n 9

283:    test:
284:       suffix: pipecg
285:       args: -ksp_monitor_short -ksp_type pipecg -m 9 -n 9

287:    test:
288:       suffix: pipecgrr
289:       args: -ksp_monitor_short -ksp_type pipecgrr -m 9 -n 9

291:    test:
292:       suffix: pipecr
293:       args: -ksp_monitor_short -ksp_type pipecr -m 9 -n 9

295:    test:
296:       suffix: pipelcg
297:       args: -ksp_monitor_short -ksp_type pipelcg -m 9 -n 9 -pc_type none -ksp_pipelcg_pipel 2 -ksp_pipelcg_lmax 2
298:       filter: grep -v "sqrt breakdown in iteration"

300:    test:
301:       suffix: sell
302:       args: -ksp_monitor_short -ksp_gmres_cgs_refinement_type refine_always -m 9 -n 9 -mat_type sell

304:    test:
305:       requires: mumps
306:       suffix: sell_mumps
307:       args: -ksp_type preonly -m 9 -n 12 -mat_type sell -pc_type lu -pc_factor_mat_solver_type mumps -pc_factor_mat_ordering_type natural

309:    test:
310:       suffix: telescope
311:       nsize: 4
312:       args: -m 100 -n 100 -ksp_converged_reason -pc_type telescope -pc_telescope_reduction_factor 4 -telescope_pc_type bjacobi

314:    test:
315:       suffix: umfpack
316:       requires: suitesparse
317:       args: -ksp_type preonly -pc_type lu -pc_factor_mat_solver_type umfpack

319:    test:
320:      suffix: pc_symmetric
321:      args: -m 10 -n 9 -ksp_converged_reason -ksp_type gmres -ksp_pc_side symmetric -pc_type cholesky
322: TEST*/