Actual source code: ex9.c

petsc-3.4.5 2014-06-29
  2: static char help[] = "The solution of 2 different linear systems with different linear solvers.\n\
  3: Also, this example illustrates the repeated\n\
  4: solution of linear systems, while reusing matrix, vector, and solver data\n\
  5: structures throughout the process.  Note the various stages of event logging.\n\n";

  7: /*T
  8:    Concepts: KSP^repeatedly solving linear systems;
  9:    Concepts: PetscLog^profiling multiple stages of code;
 10:    Concepts: PetscLog^user-defined event profiling;
 11:    Processors: n
 12: T*/

 14: /*
 15:   Include "petscksp.h" so that we can use KSP solvers.  Note that this file
 16:   automatically includes:
 17:      petscsys.h       - base PETSc routines   petscvec.h - vectors
 18:      petscmat.h - matrices
 19:      petscis.h     - index sets            petscksp.h - Krylov subspace methods
 20:      petscviewer.h - viewers               petscpc.h  - preconditioners
 21: */
 22: #include <petscksp.h>

 24: /*
 25:    Declare user-defined routines
 26: */
 27: extern PetscErrorCode CheckError(Vec,Vec,Vec,PetscInt,PetscReal,PetscLogEvent);
 28: extern PetscErrorCode MyKSPMonitor(KSP,PetscInt,PetscReal,void*);

 32: int main(int argc,char **args)
 33: {
 34:   Vec            x1,b1,x2,b2; /* solution and RHS vectors for systems #1 and #2 */
 35:   Vec            u;              /* exact solution vector */
 36:   Mat            C1,C2;         /* matrices for systems #1 and #2 */
 37:   KSP            ksp1,ksp2;   /* KSP contexts for systems #1 and #2 */
 38:   PetscInt       ntimes = 3;     /* number of times to solve the linear systems */
 39:   PetscLogEvent  CHECK_ERROR;    /* event number for error checking */
 40:   PetscInt       ldim,low,high,iglobal,Istart,Iend,Istart2,Iend2;
 41:   PetscInt       Ii,J,i,j,m = 3,n = 2,its,t;
 43:   PetscBool      flg = PETSC_FALSE;
 44:   PetscScalar    v;
 45:   PetscMPIInt    rank,size;
 46: #if defined(PETSC_USE_LOG)
 47:   PetscLogStage stages[3];
 48: #endif

 50:   PetscInitialize(&argc,&args,(char*)0,help);
 51:   PetscOptionsGetInt(NULL,"-m",&m,NULL);
 52:   PetscOptionsGetInt(NULL,"-t",&ntimes,NULL);
 53:   MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
 54:   MPI_Comm_size(PETSC_COMM_WORLD,&size);
 55:   n    = 2*size;

 57:   /*
 58:      Register various stages for profiling
 59:   */
 60:   PetscLogStageRegister("Prelim setup",&stages[0]);
 61:   PetscLogStageRegister("Linear System 1",&stages[1]);
 62:   PetscLogStageRegister("Linear System 2",&stages[2]);

 64:   /*
 65:      Register a user-defined event for profiling (error checking).
 66:   */
 67:   CHECK_ERROR = 0;
 68:   PetscLogEventRegister("Check Error",KSP_CLASSID,&CHECK_ERROR);

 70:   /* - - - - - - - - - - - - Stage 0: - - - - - - - - - - - - - -
 71:                         Preliminary Setup
 72:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

 74:   PetscLogStagePush(stages[0]);

 76:   /*
 77:      Create data structures for first linear system.
 78:       - Create parallel matrix, specifying only its global dimensions.
 79:         When using MatCreate(), the matrix format can be specified at
 80:         runtime. Also, the parallel partitioning of the matrix is
 81:         determined by PETSc at runtime.
 82:       - Create parallel vectors.
 83:         - When using VecSetSizes(), we specify only the vector's global
 84:           dimension; the parallel partitioning is determined at runtime.
 85:         - Note: We form 1 vector from scratch and then duplicate as needed.
 86:   */
 87:   MatCreate(PETSC_COMM_WORLD,&C1);
 88:   MatSetSizes(C1,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
 89:   MatSetFromOptions(C1);
 90:   MatSetUp(C1);
 91:   MatGetOwnershipRange(C1,&Istart,&Iend);
 92:   VecCreate(PETSC_COMM_WORLD,&u);
 93:   VecSetSizes(u,PETSC_DECIDE,m*n);
 94:   VecSetFromOptions(u);
 95:   VecDuplicate(u,&b1);
 96:   VecDuplicate(u,&x1);

 98:   /*
 99:      Create first linear solver context.
100:      Set runtime options (e.g., -pc_type <type>).
101:      Note that the first linear system uses the default option
102:      names, while the second linear systme uses a different
103:      options prefix.
104:   */
105:   KSPCreate(PETSC_COMM_WORLD,&ksp1);
106:   KSPSetFromOptions(ksp1);

108:   /*
109:      Set user-defined monitoring routine for first linear system.
110:   */
111:   PetscOptionsGetBool(NULL,"-my_ksp_monitor",&flg,NULL);
112:   if (flg) {KSPMonitorSet(ksp1,MyKSPMonitor,NULL,0);}

114:   /*
115:      Create data structures for second linear system.
116:   */
117:   MatCreate(PETSC_COMM_WORLD,&C2);
118:   MatSetSizes(C2,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
119:   MatSetFromOptions(C2);
120:   MatSetUp(C2);
121:   MatGetOwnershipRange(C2,&Istart2,&Iend2);
122:   VecDuplicate(u,&b2);
123:   VecDuplicate(u,&x2);

125:   /*
126:      Create second linear solver context
127:   */
128:   KSPCreate(PETSC_COMM_WORLD,&ksp2);

130:   /*
131:      Set different options prefix for second linear system.
132:      Set runtime options (e.g., -s2_pc_type <type>)
133:   */
134:   KSPAppendOptionsPrefix(ksp2,"s2_");
135:   KSPSetFromOptions(ksp2);

137:   /*
138:      Assemble exact solution vector in parallel.  Note that each
139:      processor needs to set only its local part of the vector.
140:   */
141:   VecGetLocalSize(u,&ldim);
142:   VecGetOwnershipRange(u,&low,&high);
143:   for (i=0; i<ldim; i++) {
144:     iglobal = i + low;
145:     v       = (PetscScalar)(i + 100*rank);
146:     VecSetValues(u,1,&iglobal,&v,ADD_VALUES);
147:   }
148:   VecAssemblyBegin(u);
149:   VecAssemblyEnd(u);

151:   /*
152:      Log the number of flops for computing vector entries
153:   */
154:   PetscLogFlops(2.0*ldim);

156:   /*
157:      End curent profiling stage
158:   */
159:   PetscLogStagePop();

161:   /* --------------------------------------------------------------
162:                         Linear solver loop:
163:       Solve 2 different linear systems several times in succession
164:      -------------------------------------------------------------- */

166:   for (t=0; t<ntimes; t++) {

168:     /* - - - - - - - - - - - - Stage 1: - - - - - - - - - - - - - -
169:                  Assemble and solve first linear system
170:        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

172:     /*
173:        Begin profiling stage #1
174:     */
175:     PetscLogStagePush(stages[1]);

177:     /*
178:        Initialize all matrix entries to zero.  MatZeroEntries() retains
179:        the nonzero structure of the matrix for sparse formats.
180:     */
181:     if (t > 0) {MatZeroEntries(C1);}

183:     /*
184:        Set matrix entries in parallel.  Also, log the number of flops
185:        for computing matrix entries.
186:         - Each processor needs to insert only elements that it owns
187:           locally (but any non-local elements will be sent to the
188:           appropriate processor during matrix assembly).
189:         - Always specify global row and columns of matrix entries.
190:     */
191:     for (Ii=Istart; Ii<Iend; Ii++) {
192:       v = -1.0; i = Ii/n; j = Ii - i*n;
193:       if (i>0)   {J = Ii - n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
194:       if (i<m-1) {J = Ii + n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
195:       if (j>0)   {J = Ii - 1; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
196:       if (j<n-1) {J = Ii + 1; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
197:       v = 4.0; MatSetValues(C1,1,&Ii,1,&Ii,&v,ADD_VALUES);
198:     }
199:     for (Ii=Istart; Ii<Iend; Ii++) { /* Make matrix nonsymmetric */
200:       v = -1.0*(t+0.5); i = Ii/n;
201:       if (i>0)   {J = Ii - n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
202:     }
203:     PetscLogFlops(2.0*(Iend-Istart));

205:     /*
206:        Assemble matrix, using the 2-step process:
207:          MatAssemblyBegin(), MatAssemblyEnd()
208:        Computations can be done while messages are in transition
209:        by placing code between these two statements.
210:     */
211:     MatAssemblyBegin(C1,MAT_FINAL_ASSEMBLY);
212:     MatAssemblyEnd(C1,MAT_FINAL_ASSEMBLY);

214:     /*
215:        Indicate same nonzero structure of successive linear system matrices
216:     */
217:     MatSetOption(C1,MAT_NEW_NONZERO_LOCATIONS,PETSC_TRUE);

219:     /*
220:        Compute right-hand-side vector
221:     */
222:     MatMult(C1,u,b1);

224:     /*
225:        Set operators. Here the matrix that defines the linear system
226:        also serves as the preconditioning matrix.
227:         - The flag SAME_NONZERO_PATTERN indicates that the
228:           preconditioning matrix has identical nonzero structure
229:           as during the last linear solve (although the values of
230:           the entries have changed). Thus, we can save some
231:           work in setting up the preconditioner (e.g., no need to
232:           redo symbolic factorization for ILU/ICC preconditioners).
233:         - If the nonzero structure of the matrix is different during
234:           the second linear solve, then the flag DIFFERENT_NONZERO_PATTERN
235:           must be used instead.  If you are unsure whether the
236:           matrix structure has changed or not, use the flag
237:           DIFFERENT_NONZERO_PATTERN.
238:         - Caution:  If you specify SAME_NONZERO_PATTERN, PETSc
239:           believes your assertion and does not check the structure
240:           of the matrix.  If you erroneously claim that the structure
241:           is the same when it actually is not, the new preconditioner
242:           will not function correctly.  Thus, use this optimization
243:           feature with caution!
244:     */
245:     KSPSetOperators(ksp1,C1,C1,SAME_NONZERO_PATTERN);

247:     /*
248:        Use the previous solution of linear system #1 as the initial
249:        guess for the next solve of linear system #1.  The user MUST
250:        call KSPSetInitialGuessNonzero() in indicate use of an initial
251:        guess vector; otherwise, an initial guess of zero is used.
252:     */
253:     if (t>0) {
254:       KSPSetInitialGuessNonzero(ksp1,PETSC_TRUE);
255:     }

257:     /*
258:        Solve the first linear system.  Here we explicitly call
259:        KSPSetUp() for more detailed performance monitoring of
260:        certain preconditioners, such as ICC and ILU.  This call
261:        is optional, ase KSPSetUp() will automatically be called
262:        within KSPSolve() if it hasn't been called already.
263:     */
264:     KSPSetUp(ksp1);
265:     KSPSolve(ksp1,b1,x1);
266:     KSPGetIterationNumber(ksp1,&its);

268:     /*
269:        Check error of solution to first linear system
270:     */
271:     CheckError(u,x1,b1,its,1.e-4,CHECK_ERROR);

273:     /* - - - - - - - - - - - - Stage 2: - - - - - - - - - - - - - -
274:                  Assemble and solve second linear system
275:        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

277:     /*
278:        Conclude profiling stage #1; begin profiling stage #2
279:     */
280:     PetscLogStagePop();
281:     PetscLogStagePush(stages[2]);

283:     /*
284:        Initialize all matrix entries to zero
285:     */
286:     if (t > 0) {MatZeroEntries(C2);}

288:     /*
289:        Assemble matrix in parallel. Also, log the number of flops
290:        for computing matrix entries.
291:         - To illustrate the features of parallel matrix assembly, we
292:           intentionally set the values differently from the way in
293:           which the matrix is distributed across the processors.  Each
294:           entry that is not owned locally will be sent to the appropriate
295:           processor during MatAssemblyBegin() and MatAssemblyEnd().
296:         - For best efficiency the user should strive to set as many
297:           entries locally as possible.
298:      */
299:     for (i=0; i<m; i++) {
300:       for (j=2*rank; j<2*rank+2; j++) {
301:         v = -1.0;  Ii = j + n*i;
302:         if (i>0)   {J = Ii - n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
303:         if (i<m-1) {J = Ii + n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
304:         if (j>0)   {J = Ii - 1; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
305:         if (j<n-1) {J = Ii + 1; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
306:         v = 6.0 + t*0.5; MatSetValues(C2,1,&Ii,1,&Ii,&v,ADD_VALUES);
307:       }
308:     }
309:     for (Ii=Istart2; Ii<Iend2; Ii++) { /* Make matrix nonsymmetric */
310:       v = -1.0*(t+0.5); i = Ii/n;
311:       if (i>0)   {J = Ii - n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
312:     }
313:     MatAssemblyBegin(C2,MAT_FINAL_ASSEMBLY);
314:     MatAssemblyEnd(C2,MAT_FINAL_ASSEMBLY);
315:     PetscLogFlops(2.0*(Iend-Istart));

317:     /*
318:        Indicate same nonzero structure of successive linear system matrices
319:     */
320:     MatSetOption(C2,MAT_NEW_NONZERO_LOCATIONS,PETSC_FALSE);

322:     /*
323:        Compute right-hand-side vector
324:     */
325:     MatMult(C2,u,b2);

327:     /*
328:        Set operators. Here the matrix that defines the linear system
329:        also serves as the preconditioning matrix.  Indicate same nonzero
330:        structure of successive preconditioner matrices by setting flag
331:        SAME_NONZERO_PATTERN.
332:     */
333:     KSPSetOperators(ksp2,C2,C2,SAME_NONZERO_PATTERN);

335:     /*
336:        Solve the second linear system
337:     */
338:     KSPSetUp(ksp2);
339:     KSPSolve(ksp2,b2,x2);
340:     KSPGetIterationNumber(ksp2,&its);

342:     /*
343:        Check error of solution to second linear system
344:     */
345:     CheckError(u,x2,b2,its,1.e-4,CHECK_ERROR);

347:     /*
348:        Conclude profiling stage #2
349:     */
350:     PetscLogStagePop();
351:   }
352:   /* --------------------------------------------------------------
353:                        End of linear solver loop
354:      -------------------------------------------------------------- */

356:   /*
357:      Free work space.  All PETSc objects should be destroyed when they
358:      are no longer needed.
359:   */
360:   KSPDestroy(&ksp1); KSPDestroy(&ksp2);
361:   VecDestroy(&x1);   VecDestroy(&x2);
362:   VecDestroy(&b1);   VecDestroy(&b2);
363:   MatDestroy(&C1);   MatDestroy(&C2);
364:   VecDestroy(&u);

366:   PetscFinalize();
367:   return 0;
368: }
371: /* ------------------------------------------------------------- */
372: /*
373:     CheckError - Checks the error of the solution.

375:     Input Parameters:
376:     u - exact solution
377:     x - approximate solution
378:     b - work vector
379:     its - number of iterations for convergence
380:     tol - tolerance
381:     CHECK_ERROR - the event number for error checking
382:                   (for use with profiling)

384:     Notes:
385:     In order to profile this section of code separately from the
386:     rest of the program, we register it as an "event" with
387:     PetscLogEventRegister() in the main program.  Then, we indicate
388:     the start and end of this event by respectively calling
389:         PetscLogEventBegin(CHECK_ERROR,u,x,b,0);
390:         PetscLogEventEnd(CHECK_ERROR,u,x,b,0);
391:     Here, we specify the objects most closely associated with
392:     the event (the vectors u,x,b).  Such information is optional;
393:     we could instead just use 0 instead for all objects.
394: */
395: PetscErrorCode CheckError(Vec u,Vec x,Vec b,PetscInt its,PetscReal tol,PetscLogEvent CHECK_ERROR)
396: {
397:   PetscScalar    none = -1.0;
398:   PetscReal      norm;

401:   PetscLogEventBegin(CHECK_ERROR,u,x,b,0);

403:   /*
404:      Compute error of the solution, using b as a work vector.
405:   */
406:   VecCopy(x,b);
407:   VecAXPY(b,none,u);
408:   VecNorm(b,NORM_2,&norm);
409:   if (norm > tol) {
410:     PetscPrintf(PETSC_COMM_WORLD,"Norm of error %G, Iterations %D\n",norm,its);
411:   }
412:   PetscLogEventEnd(CHECK_ERROR,u,x,b,0);
413:   return 0;
414: }
415: /* ------------------------------------------------------------- */
418: /*
419:    MyKSPMonitor - This is a user-defined routine for monitoring
420:    the KSP iterative solvers.

422:    Input Parameters:
423:      ksp   - iterative context
424:      n     - iteration number
425:      rnorm - 2-norm (preconditioned) residual value (may be estimated)
426:      dummy - optional user-defined monitor context (unused here)
427: */
428: PetscErrorCode MyKSPMonitor(KSP ksp,PetscInt n,PetscReal rnorm,void *dummy)
429: {
430:   Vec            x;

433:   /*
434:      Build the solution vector
435:   */
436:   KSPBuildSolution(ksp,NULL,&x);

438:   /*
439:      Write the solution vector and residual norm to stdout.
440:       - PetscPrintf() handles output for multiprocessor jobs
441:         by printing from only one processor in the communicator.
442:       - The parallel viewer PETSC_VIEWER_STDOUT_WORLD handles
443:         data from multiple processors so that the output
444:         is not jumbled.
445:   */
446:   PetscPrintf(PETSC_COMM_WORLD,"iteration %D solution vector:\n",n);
447:   VecView(x,PETSC_VIEWER_STDOUT_WORLD);
448:   PetscPrintf(PETSC_COMM_WORLD,"iteration %D KSP Residual norm %14.12e \n",n,rnorm);
449:   return 0;
450: }