Actual source code: ex13.c

  1: static char help[] = "Solves a variable Poisson problem with KSP.\n\n";

  3: /*
  4:   Include "petscksp.h" so that we can use KSP solvers.  Note that this file
  5:   automatically includes:
  6:      petscsys.h       - base PETSc routines   petscvec.h - vectors
  7:      petscmat.h - matrices
  8:      petscis.h     - index sets            petscksp.h - Krylov subspace methods
  9:      petscviewer.h - viewers               petscpc.h  - preconditioners
 10: */
 11: #include <petscksp.h>

 13: /*
 14:     User-defined context that contains all the data structures used
 15:     in the linear solution process.
 16: */
 17: typedef struct {
 18:   Vec         x, b;     /* solution vector, right-hand-side vector */
 19:   Mat         A;        /* sparse matrix */
 20:   KSP         ksp;      /* linear solver context */
 21:   PetscInt    m, n;     /* grid dimensions */
 22:   PetscScalar hx2, hy2; /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
 23: } UserCtx;

 25: extern PetscErrorCode UserInitializeLinearSolver(PetscInt, PetscInt, UserCtx *);
 26: extern PetscErrorCode UserFinalizeLinearSolver(UserCtx *);
 27: extern PetscErrorCode UserDoLinearSolver(PetscScalar *, UserCtx *userctx, PetscScalar *b, PetscScalar *x);

 29: int main(int argc, char **args)
 30: {
 31:   UserCtx      userctx;
 32:   PetscInt     m = 6, n = 7, t, tmax = 2, i, Ii, j, N;
 33:   PetscScalar *userx, *rho, *solution, *userb, hx, hy, x, y;
 34:   PetscReal    enorm;

 36:   /*
 37:      Initialize the PETSc libraries
 38:   */
 39:   PetscFunctionBeginUser;
 40:   PetscCall(PetscInitialize(&argc, &args, (char *)0, help));
 41:   /*
 42:      The next two lines are for testing only; these allow the user to
 43:      decide the grid size at runtime.
 44:   */
 45:   PetscCall(PetscOptionsGetInt(NULL, NULL, "-m", &m, NULL));
 46:   PetscCall(PetscOptionsGetInt(NULL, NULL, "-n", &n, NULL));

 48:   /*
 49:      Create the empty sparse matrix and linear solver data structures
 50:   */
 51:   PetscCall(UserInitializeLinearSolver(m, n, &userctx));
 52:   N = m * n;

 54:   /*
 55:      Allocate arrays to hold the solution to the linear system.
 56:      This is not normally done in PETSc programs, but in this case,
 57:      since we are calling these routines from a non-PETSc program, we
 58:      would like to reuse the data structures from another code. So in
 59:      the context of a larger application these would be provided by
 60:      other (non-PETSc) parts of the application code.
 61:   */
 62:   PetscCall(PetscMalloc1(N, &userx));
 63:   PetscCall(PetscMalloc1(N, &userb));
 64:   PetscCall(PetscMalloc1(N, &solution));

 66:   /*
 67:       Allocate an array to hold the coefficients in the elliptic operator
 68:   */
 69:   PetscCall(PetscMalloc1(N, &rho));

 71:   /*
 72:      Fill up the array rho[] with the function rho(x,y) = x; fill the
 73:      right-hand side b and the solution with a known problem for testing.
 74:   */
 75:   hx = 1.0 / (m + 1);
 76:   hy = 1.0 / (n + 1);
 77:   y  = hy;
 78:   Ii = 0;
 79:   for (j = 0; j < n; j++) {
 80:     x = hx;
 81:     for (i = 0; i < m; i++) {
 82:       rho[Ii]      = x;
 83:       solution[Ii] = PetscSinScalar(2. * PETSC_PI * x) * PetscSinScalar(2. * PETSC_PI * y);
 84:       userb[Ii]    = -2 * PETSC_PI * PetscCosScalar(2 * PETSC_PI * x) * PetscSinScalar(2 * PETSC_PI * y) + 8 * PETSC_PI * PETSC_PI * x * PetscSinScalar(2 * PETSC_PI * x) * PetscSinScalar(2 * PETSC_PI * y);
 85:       x += hx;
 86:       Ii++;
 87:     }
 88:     y += hy;
 89:   }

 91:   /*
 92:      Loop over a bunch of timesteps, setting up and solver the linear
 93:      system for each time-step.

 95:      Note this is somewhat artificial. It is intended to demonstrate how
 96:      one may reuse the linear solver stuff in each time-step.
 97:   */
 98:   for (t = 0; t < tmax; t++) {
 99:     PetscCall(UserDoLinearSolver(rho, &userctx, userb, userx));

101:     /*
102:         Compute error: Note that this could (and usually should) all be done
103:         using the PETSc vector operations. Here we demonstrate using more
104:         standard programming practices to show how they may be mixed with
105:         PETSc.
106:     */
107:     enorm = 0.0;
108:     for (i = 0; i < N; i++) enorm += PetscRealPart(PetscConj(solution[i] - userx[i]) * (solution[i] - userx[i]));
109:     enorm *= PetscRealPart(hx * hy);
110:     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "m %" PetscInt_FMT " n %" PetscInt_FMT " error norm %g\n", m, n, (double)enorm));
111:   }

113:   /*
114:      We are all finished solving linear systems, so we clean up the
115:      data structures.
116:   */
117:   PetscCall(PetscFree(rho));
118:   PetscCall(PetscFree(solution));
119:   PetscCall(PetscFree(userx));
120:   PetscCall(PetscFree(userb));
121:   PetscCall(UserFinalizeLinearSolver(&userctx));
122:   PetscCall(PetscFinalize());
123:   return 0;
124: }

126: /* ------------------------------------------------------------------------*/
127: PetscErrorCode UserInitializeLinearSolver(PetscInt m, PetscInt n, UserCtx *userctx)
128: {
129:   PetscInt N;

131:   PetscFunctionBeginUser;
132:   /*
133:      Here we assume use of a grid of size m x n, with all points on the
134:      interior of the domain, i.e., we do not include the points corresponding
135:      to homogeneous Dirichlet boundary conditions.  We assume that the domain
136:      is [0,1]x[0,1].
137:   */
138:   userctx->m   = m;
139:   userctx->n   = n;
140:   userctx->hx2 = (m + 1) * (m + 1);
141:   userctx->hy2 = (n + 1) * (n + 1);
142:   N            = m * n;

144:   /*
145:      Create the sparse matrix. Preallocate 5 nonzeros per row.
146:   */
147:   PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 5, 0, &userctx->A));

149:   /*
150:      Create vectors. Here we create vectors with no memory allocated.
151:      This way, we can use the data structures already in the program
152:      by using VecPlaceArray() subroutine at a later stage.
153:   */
154:   PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, N, NULL, &userctx->b));
155:   PetscCall(VecDuplicate(userctx->b, &userctx->x));

157:   /*
158:      Create linear solver context. This will be used repeatedly for all
159:      the linear solves needed.
160:   */
161:   PetscCall(KSPCreate(PETSC_COMM_SELF, &userctx->ksp));
162:   PetscFunctionReturn(PETSC_SUCCESS);
163: }

165: /*
166:    Solves -div (rho grad psi) = F using finite differences.
167:    rho is a 2-dimensional array of size m by n, stored in Fortran
168:    style by columns. userb is a standard one-dimensional array.
169: */
170: /* ------------------------------------------------------------------------*/
171: PetscErrorCode UserDoLinearSolver(PetscScalar *rho, UserCtx *userctx, PetscScalar *userb, PetscScalar *userx)
172: {
173:   PetscInt    i, j, Ii, J, m = userctx->m, n = userctx->n;
174:   Mat         A = userctx->A;
175:   PC          pc;
176:   PetscScalar v, hx2 = userctx->hx2, hy2 = userctx->hy2;

178:   PetscFunctionBeginUser;
179:   /*
180:      This is not the most efficient way of generating the matrix
181:      but let's not worry about it. We should have separate code for
182:      the four corners, each edge and then the interior. Then we won't
183:      have the slow if-tests inside the loop.

185:      Computes the operator
186:              -div rho grad
187:      on an m by n grid with zero Dirichlet boundary conditions. The rho
188:      is assumed to be given on the same grid as the finite difference
189:      stencil is applied.  For a staggered grid, one would have to change
190:      things slightly.
191:   */
192:   Ii = 0;
193:   for (j = 0; j < n; j++) {
194:     for (i = 0; i < m; i++) {
195:       if (j > 0) {
196:         J = Ii - m;
197:         v = -.5 * (rho[Ii] + rho[J]) * hy2;
198:         PetscCall(MatSetValues(A, 1, &Ii, 1, &J, &v, INSERT_VALUES));
199:       }
200:       if (j < n - 1) {
201:         J = Ii + m;
202:         v = -.5 * (rho[Ii] + rho[J]) * hy2;
203:         PetscCall(MatSetValues(A, 1, &Ii, 1, &J, &v, INSERT_VALUES));
204:       }
205:       if (i > 0) {
206:         J = Ii - 1;
207:         v = -.5 * (rho[Ii] + rho[J]) * hx2;
208:         PetscCall(MatSetValues(A, 1, &Ii, 1, &J, &v, INSERT_VALUES));
209:       }
210:       if (i < m - 1) {
211:         J = Ii + 1;
212:         v = -.5 * (rho[Ii] + rho[J]) * hx2;
213:         PetscCall(MatSetValues(A, 1, &Ii, 1, &J, &v, INSERT_VALUES));
214:       }
215:       v = 2.0 * rho[Ii] * (hx2 + hy2);
216:       PetscCall(MatSetValues(A, 1, &Ii, 1, &Ii, &v, INSERT_VALUES));
217:       Ii++;
218:     }
219:   }

221:   /*
222:      Assemble matrix
223:   */
224:   PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
225:   PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));

227:   /*
228:      Set operators. Here the matrix that defines the linear system
229:      also serves as the preconditioning matrix. Since all the matrices
230:      will have the same nonzero pattern here, we indicate this so the
231:      linear solvers can take advantage of this.
232:   */
233:   PetscCall(KSPSetOperators(userctx->ksp, A, A));

235:   /*
236:      Set linear solver defaults for this problem (optional).
237:      - Here we set it to use direct LU factorization for the solution
238:   */
239:   PetscCall(KSPGetPC(userctx->ksp, &pc));
240:   PetscCall(PCSetType(pc, PCLU));

242:   /*
243:      Set runtime options, e.g.,
244:         -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
245:      These options will override those specified above as long as
246:      KSPSetFromOptions() is called _after_ any other customization
247:      routines.

249:      Run the program with the option -help to see all the possible
250:      linear solver options.
251:   */
252:   PetscCall(KSPSetFromOptions(userctx->ksp));

254:   /*
255:      This allows the PETSc linear solvers to compute the solution
256:      directly in the user's array rather than in the PETSc vector.

258:      This is essentially a hack and not highly recommend unless you
259:      are quite comfortable with using PETSc. In general, users should
260:      write their entire application using PETSc vectors rather than
261:      arrays.
262:   */
263:   PetscCall(VecPlaceArray(userctx->x, userx));
264:   PetscCall(VecPlaceArray(userctx->b, userb));

266:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
267:                       Solve the linear system
268:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

270:   PetscCall(KSPSolve(userctx->ksp, userctx->b, userctx->x));

272:   /*
273:     Put back the PETSc array that belongs in the vector xuserctx->x
274:   */
275:   PetscCall(VecResetArray(userctx->x));
276:   PetscCall(VecResetArray(userctx->b));
277:   PetscFunctionReturn(PETSC_SUCCESS);
278: }

280: /* ------------------------------------------------------------------------*/
281: PetscErrorCode UserFinalizeLinearSolver(UserCtx *userctx)
282: {
283:   /*
284:      We are all done and don't need to solve any more linear systems, so
285:      we free the work space.  All PETSc objects should be destroyed when
286:      they are no longer needed.
287:   */
288:   PetscFunctionBeginUser;
289:   PetscCall(KSPDestroy(&userctx->ksp));
290:   PetscCall(VecDestroy(&userctx->x));
291:   PetscCall(VecDestroy(&userctx->b));
292:   PetscCall(MatDestroy(&userctx->A));
293:   PetscFunctionReturn(PETSC_SUCCESS);
294: }

296: /*TEST

298:    test:
299:       args: -m 19 -n 20 -ksp_gmres_cgs_refinement_type refine_always

301: TEST*/