Actual source code: ex13.c

petsc-3.3-p7 2013-05-11
  2: static char help[] = "Solves a variable Poisson problem with KSP.\n\n";

  4: /*T
  5:    Concepts: KSP^basic sequential example
  6:    Concepts: KSP^Laplacian, 2d
  7:    Concepts: Laplacian, 2d
  8:    Processors: 1
  9: T*/

 11: /* 
 12:   Include "petscksp.h" so that we can use KSP solvers.  Note that this file
 13:   automatically includes:
 14:      petscsys.h       - base PETSc routines   petscvec.h - vectors
 15:      petscmat.h - matrices
 16:      petscis.h     - index sets            petscksp.h - Krylov subspace methods
 17:      petscviewer.h - viewers               petscpc.h  - preconditioners
 18: */
 19: #include <petscksp.h>

 21: /*
 22:     User-defined context that contains all the data structures used
 23:     in the linear solution process.
 24: */
 25: typedef struct {
 26:    Vec         x,b;      /* solution vector, right-hand-side vector */
 27:    Mat         A;         /* sparse matrix */
 28:    KSP         ksp;      /* linear solver context */
 29:    PetscInt    m,n;      /* grid dimensions */
 30:    PetscScalar hx2,hy2;  /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
 31: } UserCtx;

 33: extern PetscErrorCode UserInitializeLinearSolver(PetscInt,PetscInt,UserCtx *);
 34: extern PetscErrorCode UserFinalizeLinearSolver(UserCtx *);
 35: extern PetscErrorCode UserDoLinearSolver(PetscScalar *,UserCtx *userctx,PetscScalar *b,PetscScalar *x);

 39: int main(int argc,char **args)
 40: {
 41:   UserCtx        userctx;
 43:   PetscInt       m = 6,n = 7,t,tmax = 2,i,Ii,j,N;
 44:   PetscScalar    *userx,*rho,*solution,*userb,hx,hy,x,y;
 45:   PetscReal      enorm;
 46:   /*
 47:      Initialize the PETSc libraries
 48:   */
 49:   PetscInitialize(&argc,&args,(char *)0,help);

 51:   /*
 52:      The next two lines are for testing only; these allow the user to
 53:      decide the grid size at runtime.
 54:   */
 55:   PetscOptionsGetInt(PETSC_NULL,"-m",&m,PETSC_NULL);
 56:   PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);

 58:   /*
 59:      Create the empty sparse matrix and linear solver data structures
 60:   */
 61:   UserInitializeLinearSolver(m,n,&userctx);
 62:   N    = m*n;

 64:   /*
 65:      Allocate arrays to hold the solution to the linear system.
 66:      This is not normally done in PETSc programs, but in this case, 
 67:      since we are calling these routines from a non-PETSc program, we 
 68:      would like to reuse the data structures from another code. So in 
 69:      the context of a larger application these would be provided by
 70:      other (non-PETSc) parts of the application code.
 71:   */
 72:   PetscMalloc(N*sizeof(PetscScalar),&userx);
 73:   PetscMalloc(N*sizeof(PetscScalar),&userb);
 74:   PetscMalloc(N*sizeof(PetscScalar),&solution);

 76:   /* 
 77:       Allocate an array to hold the coefficients in the elliptic operator
 78:   */
 79:   PetscMalloc(N*sizeof(PetscScalar),&rho);

 81:   /*
 82:      Fill up the array rho[] with the function rho(x,y) = x; fill the
 83:      right-hand-side b[] and the solution with a known problem for testing.
 84:   */
 85:   hx = 1.0/(m+1);
 86:   hy = 1.0/(n+1);
 87:   y  = hy;
 88:   Ii = 0;
 89:   for (j=0; j<n; j++) {
 90:     x = hx;
 91:     for (i=0; i<m; i++) {
 92:       rho[Ii]      = x;
 93:       solution[Ii] = PetscSinScalar(2.*PETSC_PI*x)*PetscSinScalar(2.*PETSC_PI*y);
 94:       userb[Ii]    = -2*PETSC_PI*PetscCosScalar(2*PETSC_PI*x)*PetscSinScalar(2*PETSC_PI*y) +
 95:                     8*PETSC_PI*PETSC_PI*x*PetscSinScalar(2*PETSC_PI*x)*PetscSinScalar(2*PETSC_PI*y);
 96:       x += hx;
 97:       Ii++;
 98:     }
 99:     y += hy;
100:   }

102:   /*
103:      Loop over a bunch of timesteps, setting up and solver the linear
104:      system for each time-step.

106:      Note this is somewhat artificial. It is intended to demonstrate how
107:      one may reuse the linear solver stuff in each time-step.
108:   */
109:   for (t=0; t<tmax; t++) {
110:      UserDoLinearSolver(rho,&userctx,userb,userx);

112:     /*
113:         Compute error: Note that this could (and usually should) all be done
114:         using the PETSc vector operations. Here we demonstrate using more 
115:         standard programming practices to show how they may be mixed with 
116:         PETSc.
117:     */
118:     enorm = 0.0;
119:     for (i=0; i<N; i++) {
120:       enorm += PetscRealPart(PetscConj(solution[i]-userx[i])*(solution[i]-userx[i]));
121:     }
122:     enorm *= PetscRealPart(hx*hy);
123:     PetscPrintf(PETSC_COMM_WORLD,"m %D n %D error norm %G\n",m,n,enorm);
124:   }

126:   /*
127:      We are all finished solving linear systems, so we clean up the
128:      data structures.
129:   */
130:   PetscFree(rho);
131:   PetscFree(solution);
132:   PetscFree(userx);
133:   PetscFree(userb);
134:   UserFinalizeLinearSolver(&userctx);
135:   PetscFinalize();

137:   return 0;
138: }

140: /* ------------------------------------------------------------------------*/
143: PetscErrorCode UserInitializeLinearSolver(PetscInt m,PetscInt n,UserCtx *userctx)
144: {
146:   PetscInt       N;

148:   /*
149:      Here we assume use of a grid of size m x n, with all points on the
150:      interior of the domain, i.e., we do not include the points corresponding 
151:      to homogeneous Dirichlet boundary conditions.  We assume that the domain
152:      is [0,1]x[0,1].
153:   */
154:   userctx->m   = m;
155:   userctx->n   = n;
156:   userctx->hx2 = (m+1)*(m+1);
157:   userctx->hy2 = (n+1)*(n+1);
158:   N            = m*n;

160:   /* 
161:      Create the sparse matrix. Preallocate 5 nonzeros per row.
162:   */
163:   MatCreateSeqAIJ(PETSC_COMM_SELF,N,N,5,0,&userctx->A);

165:   /* 
166:      Create vectors. Here we create vectors with no memory allocated.
167:      This way, we can use the data structures already in the program
168:      by using VecPlaceArray() subroutine at a later stage.
169:   */
170:   VecCreateSeqWithArray(PETSC_COMM_SELF,1,N,PETSC_NULL,&userctx->b);
171:   VecDuplicate(userctx->b,&userctx->x);

173:   /* 
174:      Create linear solver context. This will be used repeatedly for all 
175:      the linear solves needed.
176:   */
177:   KSPCreate(PETSC_COMM_SELF,&userctx->ksp);

179:   return 0;
180: }

184: /*
185:    Solves -div (rho grad psi) = F using finite differences.
186:    rho is a 2-dimensional array of size m by n, stored in Fortran
187:    style by columns. userb is a standard one-dimensional array.
188: */
189: /* ------------------------------------------------------------------------*/
190: PetscErrorCode UserDoLinearSolver(PetscScalar *rho,UserCtx *userctx,PetscScalar *userb,PetscScalar *userx)
191: {
193:   PetscInt       i,j,Ii,J,m = userctx->m,n = userctx->n;
194:   Mat            A = userctx->A;
195:   PC             pc;
196:   PetscScalar    v,hx2 = userctx->hx2,hy2 = userctx->hy2;

198:   /*
199:      This is not the most efficient way of generating the matrix 
200:      but let's not worry about it. We should have separate code for
201:      the four corners, each edge and then the interior. Then we won't
202:      have the slow if-tests inside the loop.

204:      Computes the operator 
205:              -div rho grad 
206:      on an m by n grid with zero Dirichlet boundary conditions. The rho
207:      is assumed to be given on the same grid as the finite difference 
208:      stencil is applied.  For a staggered grid, one would have to change
209:      things slightly.
210:   */
211:   Ii = 0;
212:   for (j=0; j<n; j++) {
213:     for (i=0; i<m; i++) {
214:       if (j>0)   {
215:         J    = Ii - m;
216:         v    = -.5*(rho[Ii] + rho[J])*hy2;
217:         MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
218:       }
219:       if (j<n-1) {
220:         J    = Ii + m;
221:         v    = -.5*(rho[Ii] + rho[J])*hy2;
222:         MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
223:       }
224:       if (i>0)   {
225:         J    = Ii - 1;
226:         v    = -.5*(rho[Ii] + rho[J])*hx2;
227:         MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
228:       }
229:       if (i<m-1) {
230:         J    = Ii + 1;
231:         v    = -.5*(rho[Ii] + rho[J])*hx2;
232:         MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
233:       }
234:       v    = 2.0*rho[Ii]*(hx2+hy2);
235:       MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
236:       Ii++;
237:     }
238:   }

240:   /* 
241:      Assemble matrix
242:   */
243:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
244:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

246:   /* 
247:      Set operators. Here the matrix that defines the linear system
248:      also serves as the preconditioning matrix. Since all the matrices
249:      will have the same nonzero pattern here, we indicate this so the
250:      linear solvers can take advantage of this.
251:   */
252:   KSPSetOperators(userctx->ksp,A,A,SAME_NONZERO_PATTERN);

254:   /* 
255:      Set linear solver defaults for this problem (optional).
256:      - Here we set it to use direct LU factorization for the solution
257:   */
258:   KSPGetPC(userctx->ksp,&pc);
259:   PCSetType(pc,PCLU);

261:   /* 
262:      Set runtime options, e.g.,
263:         -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
264:      These options will override those specified above as long as
265:      KSPSetFromOptions() is called _after_ any other customization
266:      routines.
267:  
268:      Run the program with the option -help to see all the possible
269:      linear solver options.
270:   */
271:   KSPSetFromOptions(userctx->ksp);

273:   /*
274:      This allows the PETSc linear solvers to compute the solution 
275:      directly in the user's array rather than in the PETSc vector.
276:  
277:      This is essentially a hack and not highly recommend unless you 
278:      are quite comfortable with using PETSc. In general, users should
279:      write their entire application using PETSc vectors rather than 
280:      arrays.
281:   */
282:   VecPlaceArray(userctx->x,userx);
283:   VecPlaceArray(userctx->b,userb);

285:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
286:                       Solve the linear system
287:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

289:   KSPSolve(userctx->ksp,userctx->b,userctx->x);

291:   /*
292:     Put back the PETSc array that belongs in the vector xuserctx->x
293:   */
294:   VecResetArray(userctx->x);
295:   VecResetArray(userctx->b);

297:   return 0;
298: }

300: /* ------------------------------------------------------------------------*/
303: PetscErrorCode UserFinalizeLinearSolver(UserCtx *userctx)
304: {
306:   /* 
307:      We are all done and don't need to solve any more linear systems, so
308:      we free the work space.  All PETSc objects should be destroyed when
309:      they are no longer needed.
310:   */
311:   KSPDestroy(&userctx->ksp);
312:   VecDestroy(&userctx->x);
313:   VecDestroy(&userctx->b);
314:   MatDestroy(&userctx->A);
315:   return 0;
316: }