Actual source code: ex46.c

petsc-3.3-p7 2013-05-11
  2: /* Program usage:  mpiexec -n <procs> ex46 [-help] [all PETSc options] */

  4: static char help[] = "Solves a linear system in parallel with KSP and DM.\n\
  5: Compare this to ex2 which solves the same problem without a DM.\n\n";

  7: /*T
  8:    Concepts: KSP^basic parallel example;
  9:    Concepts: KSP^Laplacian, 2d
 10:    Concepts: DM^using distributed arrays;
 11:    Concepts: Laplacian, 2d
 12:    Processors: n
 13: T*/

 15: /*
 16:   Include "petscdmda.h" so that we can use distributed arrays (DMDAs).
 17:   Include "petscksp.h" so that we can use KSP solvers.  Note that this file
 18:   automatically includes:
 19:      petscsys.h       - base PETSc routines   petscvec.h - vectors
 20:      petscmat.h - matrices
 21:      petscis.h     - index sets            petscksp.h - Krylov subspace methods
 22:      petscviewer.h - viewers               petscpc.h  - preconditioners
 23: */
 24: #include <petscdmda.h>
 25: #include <petscksp.h>

 29: int main(int argc,char **argv)
 30: {
 31:   DM             da;            /* distributed array */
 32:   Vec            x,b,u;         /* approx solution, RHS, exact solution */
 33:   Mat            A;             /* linear system matrix */
 34:   KSP            ksp;           /* linear solver context */
 35:   PetscRandom    rctx;          /* random number generator context */
 36:   PetscReal      norm;          /* norm of solution error */
 37:   PetscInt       i,j,its;
 39:   PetscBool      flg = PETSC_FALSE;
 40:   PetscLogStage  stage;
 41:   DMDALocalInfo    info;

 43:   PetscInitialize(&argc,&argv,(char*)0,help);

 45:   /*
 46:      Create distributed array to handle parallel distribution.
 47:      The problem size will default to 8 by 7, but this can be
 48:      changed using -da_grid_x M -da_grid_y N
 49:   */
 50:   DMDACreate2d(PETSC_COMM_WORLD, DMDA_BOUNDARY_NONE, DMDA_BOUNDARY_NONE,DMDA_STENCIL_STAR,-8,-7,PETSC_DECIDE,PETSC_DECIDE,1,1,PETSC_NULL,PETSC_NULL,&da);

 52:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 53:          Compute the matrix and right-hand-side vector that define
 54:          the linear system, Ax = b.
 55:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 56:   /*
 57:      Create parallel matrix preallocated according to the DMDA, format AIJ by default.
 58:      To use symmetric storage, run with -dm_mat_type sbaij -mat_ignore_lower_triangular
 59:   */
 60:   DMCreateMatrix(da,MATAIJ,&A);

 62:   /*
 63:      Set matrix elements for the 2-D, five-point stencil in parallel.
 64:       - Each processor needs to insert only elements that it owns
 65:         locally (but any non-local elements will be sent to the
 66:         appropriate processor during matrix assembly).
 67:       - Rows and columns are specified by the stencil
 68:       - Entries are normalized for a domain [0,1]x[0,1]
 69:    */
 70:   PetscLogStageRegister("Assembly", &stage);
 71:   PetscLogStagePush(stage);
 72:   DMDAGetLocalInfo(da,&info);
 73:   for (j=info.ys; j<info.ys+info.ym; j++) {
 74:     for (i=info.xs; i<info.xs+info.xm; i++) {
 75:       PetscReal hx   = 1./info.mx,hy = 1./info.my;
 76:       MatStencil row = {0},col[5] = {{0}};
 77:       PetscScalar v[5];
 78:       PetscInt ncols = 0;
 79:       row.j = j; row.i = i;
 80:       col[ncols].j = j; col[ncols].i = i; v[ncols++] = 2*(hx/hy + hy/hx);
 81:       /* boundaries */
 82:       if (i>0)         {col[ncols].j = j;   col[ncols].i = i-1; v[ncols++] = -hy/hx;}
 83:       if (i<info.mx-1) {col[ncols].j = j;   col[ncols].i = i+1; v[ncols++] = -hy/hx;}
 84:       if (j>0)         {col[ncols].j = j-1; col[ncols].i = i;   v[ncols++] = -hx/hy;}
 85:       if (j<info.my-1) {col[ncols].j = j+1; col[ncols].i = i;   v[ncols++] = -hx/hy;}
 86:       MatSetValuesStencil(A,1,&row,ncols,col,v,INSERT_VALUES);
 87:     }
 88:   }

 90:   /*
 91:      Assemble matrix, using the 2-step process:
 92:        MatAssemblyBegin(), MatAssemblyEnd()
 93:      Computations can be done while messages are in transition
 94:      by placing code between these two statements.
 95:   */
 96:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
 97:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
 98:   PetscLogStagePop();

100:   /*
101:      Create parallel vectors compatible with the DMDA.
102:   */
103:   DMCreateGlobalVector(da,&u);
104:   VecDuplicate(u,&b);
105:   VecDuplicate(u,&x);

107:   /*
108:      Set exact solution; then compute right-hand-side vector.
109:      By default we use an exact solution of a vector with all
110:      elements of 1.0;  Alternatively, using the runtime option
111:      -random_sol forms a solution vector with random components.
112:   */
113:   PetscOptionsGetBool(PETSC_NULL,"-random_exact_sol",&flg,PETSC_NULL);
114:   if (flg) {
115:     PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
116:     PetscRandomSetFromOptions(rctx);
117:     VecSetRandom(u,rctx);
118:     PetscRandomDestroy(&rctx);
119:   } else {
120:     VecSet(u,1.);
121:   }
122:   MatMult(A,u,b);

124:   /*
125:      View the exact solution vector if desired
126:   */
127:   flg  = PETSC_FALSE;
128:   PetscOptionsGetBool(PETSC_NULL,"-view_exact_sol",&flg,PETSC_NULL);
129:   if (flg) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}

131:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
132:                 Create the linear solver and set various options
133:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

135:   /*
136:      Create linear solver context
137:   */
138:   KSPCreate(PETSC_COMM_WORLD,&ksp);

140:   /*
141:      Set operators. Here the matrix that defines the linear system
142:      also serves as the preconditioning matrix.
143:   */
144:   KSPSetOperators(ksp,A,A,DIFFERENT_NONZERO_PATTERN);

146:   /*
147:     Set runtime options, e.g.,
148:         -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
149:     These options will override those specified above as long as
150:     KSPSetFromOptions() is called _after_ any other customization
151:     routines.
152:   */
153:   KSPSetFromOptions(ksp);

155:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
156:                       Solve the linear system
157:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

159:   KSPSolve(ksp,b,x);

161:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
162:                       Check solution and clean up
163:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

165:   /*
166:      Check the error
167:   */
168:   VecAXPY(x,-1.,u);
169:   VecNorm(x,NORM_2,&norm);
170:   KSPGetIterationNumber(ksp,&its);

172:   /*
173:      Print convergence information.  PetscPrintf() produces a single
174:      print statement from all processes that share a communicator.
175:      An alternative is PetscFPrintf(), which prints to a file.
176:   */
177:   PetscPrintf(PETSC_COMM_WORLD,"Norm of error %G iterations %D\n",norm,its);

179:   /*
180:      Free work space.  All PETSc objects should be destroyed when they
181:      are no longer needed.
182:   */
183:   KSPDestroy(&ksp);
184:   VecDestroy(&u);
185:   VecDestroy(&x);
186:   VecDestroy(&b);
187:   MatDestroy(&A);
188:   DMDestroy(&da);

190:   /*
191:      Always call PetscFinalize() before exiting a program.  This routine
192:        - finalizes the PETSc libraries as well as MPI
193:        - provides summary and diagnostic information if certain runtime
194:          options are chosen (e.g., -log_summary).
195:   */
196:   PetscFinalize();
197:   return 0;
198: }