Actual source code: redistribute.c

petsc-3.3-p7 2013-05-11
  2: /*
  3:   This file defines a "solve the problem redistributely on each subgroup of processor" preconditioner.
  4: */
  5: #include <petsc-private/pcimpl.h>     /*I "petscksp.h" I*/
  6: #include <petscksp.h>

  8: typedef struct {
  9:   KSP          ksp;
 10:   Vec          x,b;
 11:   VecScatter   scatter;
 12:   IS           is;
 13:   PetscInt     dcnt,*drows;   /* these are the local rows that have only diagonal entry */
 14:   PetscScalar  *diag;
 15:   Vec          work;
 16: } PC_Redistribute;

 20: static PetscErrorCode PCView_Redistribute(PC pc,PetscViewer viewer)
 21: {
 22:   PC_Redistribute *red = (PC_Redistribute*)pc->data;
 23:   PetscErrorCode  ierr;
 24:   PetscBool       iascii,isstring;
 25:   PetscInt        ncnt,N;

 28:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
 29:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
 30:   if (iascii) {
 31:     MPI_Allreduce(&red->dcnt,&ncnt,1,MPIU_INT,MPI_SUM,((PetscObject)pc)->comm);
 32:     MatGetSize(pc->pmat,&N,PETSC_NULL);
 33:     PetscViewerASCIIPrintf(viewer,"    Number rows eliminated %D Percentage rows eliminated %g\n",ncnt,100.0*((PetscReal)ncnt)/((PetscReal)N));
 34:     PetscViewerASCIIPrintf(viewer,"  Redistribute preconditioner: \n");
 35:     KSPView(red->ksp,viewer);
 36:   } else if (isstring) {
 37:     PetscViewerStringSPrintf(viewer," Redistribute preconditioner");
 38:     KSPView(red->ksp,viewer);
 39:   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Viewer type %s not supported for PC redistribute",((PetscObject)viewer)->type_name);
 40:   return(0);
 41: }

 45: static PetscErrorCode PCSetUp_Redistribute(PC pc)
 46: {
 47:   PC_Redistribute   *red = (PC_Redistribute*)pc->data;
 48:   PetscErrorCode    ierr;
 49:   MPI_Comm          comm;
 50:   PetscInt          rstart,rend,i,nz,cnt,*rows,ncnt,dcnt,*drows;
 51:   PetscLayout       map,nmap;
 52:   PetscMPIInt       size,imdex,tag,n;
 53:   PetscInt          *source = PETSC_NULL;
 54:   PetscMPIInt       *nprocs = PETSC_NULL,nrecvs;
 55:   PetscInt          j,nsends;
 56:   PetscInt          *owner = PETSC_NULL,*starts = PETSC_NULL,count,slen;
 57:   PetscInt          *rvalues,*svalues,recvtotal;
 58:   PetscMPIInt       *onodes1,*olengths1;
 59:   MPI_Request       *send_waits = PETSC_NULL,*recv_waits = PETSC_NULL;
 60:   MPI_Status        recv_status,*send_status;
 61:   Vec               tvec,diag;
 62:   Mat               tmat;
 63:   const PetscScalar *d;

 66:   if (pc->setupcalled) {
 67:     KSPGetOperators(red->ksp,PETSC_NULL,&tmat,PETSC_NULL);
 68:     MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_REUSE_MATRIX,&tmat);
 69:     KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);
 70:   } else {
 71:     PetscInt NN;

 73:     PetscObjectGetComm((PetscObject)pc,&comm);
 74:     MPI_Comm_size(comm,&size);
 75:     PetscObjectGetNewTag((PetscObject)pc,&tag);

 77:     /* count non-diagonal rows on process */
 78:     MatGetOwnershipRange(pc->mat,&rstart,&rend);
 79:     cnt  = 0;
 80:     for (i=rstart; i<rend; i++) {
 81:       MatGetRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);
 82:       if (nz > 1) cnt++;
 83:       MatRestoreRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);
 84:     }
 85:     PetscMalloc(cnt*sizeof(PetscInt),&rows);
 86:     PetscMalloc((rend - rstart - cnt)*sizeof(PetscInt),&drows);

 88:     /* list non-diagonal rows on process */
 89:     cnt  = 0; dcnt = 0;
 90:     for (i=rstart; i<rend; i++) {
 91:       MatGetRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);
 92:       if (nz > 1) rows[cnt++] = i;
 93:       else drows[dcnt++] = i - rstart;
 94:       MatRestoreRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);
 95:     }

 97:     /* create PetscLayout for non-diagonal rows on each process */
 98:     PetscLayoutCreate(comm,&map);
 99:     PetscLayoutSetLocalSize(map,cnt);
100:     PetscLayoutSetBlockSize(map,1);
101:     PetscLayoutSetUp(map);
102:     rstart = map->rstart;
103:     rend   = map->rend;
104: 
105:     /* create PetscLayout for load-balanced non-diagonal rows on each process */
106:     PetscLayoutCreate(comm,&nmap);
107:     MPI_Allreduce(&cnt,&ncnt,1,MPIU_INT,MPI_SUM,comm);
108:     PetscLayoutSetSize(nmap,ncnt);
109:     PetscLayoutSetBlockSize(nmap,1);
110:     PetscLayoutSetUp(nmap);

112:     MatGetSize(pc->pmat,&NN,PETSC_NULL);
113:     PetscInfo2(pc,"Number of diagonal rows eliminated %d, percentage eliminated %g\n",NN-ncnt,((PetscReal)(NN-ncnt))/((PetscReal)(NN)));
114:     /*  
115:         this code is taken from VecScatterCreate_PtoS() 
116:         Determines what rows need to be moved where to 
117:         load balance the non-diagonal rows 
118:     */
119:     /*  count number of contributors to each processor */
120:     PetscMalloc2(size,PetscMPIInt,&nprocs,cnt,PetscInt,&owner);
121:     PetscMemzero(nprocs,size*sizeof(PetscMPIInt));
122:     j      = 0;
123:     nsends = 0;
124:     for (i=rstart; i<rend; i++) {
125:       if (i < nmap->range[j]) j = 0;
126:       for (; j<size; j++) {
127:         if (i < nmap->range[j+1]) {
128:           if (!nprocs[j]++) nsends++;
129:           owner[i-rstart] = j;
130:           break;
131:         }
132:       }
133:     }
134:     /* inform other processors of number of messages and max length*/
135:     PetscGatherNumberOfMessages(comm,PETSC_NULL,nprocs,&nrecvs);
136:     PetscGatherMessageLengths(comm,nsends,nrecvs,nprocs,&onodes1,&olengths1);
137:     PetscSortMPIIntWithArray(nrecvs,onodes1,olengths1);
138:     recvtotal = 0; for (i=0; i<nrecvs; i++) recvtotal += olengths1[i];
139: 
140:     /* post receives:  rvalues - rows I will own; count - nu */
141:     PetscMalloc3(recvtotal,PetscInt,&rvalues,nrecvs,PetscInt,&source,nrecvs,MPI_Request,&recv_waits);
142:     count  = 0;
143:     for (i=0; i<nrecvs; i++) {
144:       MPI_Irecv((rvalues+count),olengths1[i],MPIU_INT,onodes1[i],tag,comm,recv_waits+i);
145:       count += olengths1[i];
146:     }

148:     /* do sends:
149:        1) starts[i] gives the starting index in svalues for stuff going to 
150:        the ith processor
151:     */
152:     PetscMalloc3(cnt,PetscInt,&svalues,nsends,MPI_Request,&send_waits,size,PetscInt,&starts);
153:     starts[0]  = 0;
154:     for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
155:     for (i=0; i<cnt; i++) {
156:       svalues[starts[owner[i]]++] = rows[i];
157:     }
158:     for (i=0; i<cnt; i++) rows[i] = rows[i] - rstart;
159:     red->drows = drows;
160:     red->dcnt  = dcnt;
161:     PetscFree(rows);

163:     starts[0] = 0;
164:     for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
165:     count = 0;
166:     for (i=0; i<size; i++) {
167:       if (nprocs[i]) {
168:         MPI_Isend(svalues+starts[i],nprocs[i],MPIU_INT,i,tag,comm,send_waits+count++);
169:       }
170:     }
171: 
172:     /*  wait on receives */
173:     count  = nrecvs;
174:     slen   = 0;
175:     while (count) {
176:       MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
177:       /* unpack receives into our local space */
178:       MPI_Get_count(&recv_status,MPIU_INT,&n);
179:       slen += n;
180:       count--;
181:     }
182:     if (slen != recvtotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Total message lengths %D not expected %D",slen,recvtotal);
183: 
184:     ISCreateGeneral(comm,slen,rvalues,PETSC_COPY_VALUES,&red->is);
185: 
186:     /* free up all work space */
187:     PetscFree(olengths1);
188:     PetscFree(onodes1);
189:     PetscFree3(rvalues,source,recv_waits);
190:     PetscFree2(nprocs,owner);
191:     if (nsends) {   /* wait on sends */
192:       PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
193:       MPI_Waitall(nsends,send_waits,send_status);
194:       PetscFree(send_status);
195:     }
196:     PetscFree3(svalues,send_waits,starts);
197:     PetscLayoutDestroy(&map);
198:     PetscLayoutDestroy(&nmap);

200:     VecCreateMPI(comm,slen,PETSC_DETERMINE,&red->b);
201:     VecDuplicate(red->b,&red->x);
202:     MatGetVecs(pc->pmat,&tvec,PETSC_NULL);
203:     VecScatterCreate(tvec,red->is,red->b,PETSC_NULL,&red->scatter);
204:     VecDestroy(&tvec);
205:     MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_INITIAL_MATRIX,&tmat);
206:     KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);
207:     MatDestroy(&tmat);
208:   }

210:   /* get diagonal portion of matrix */
211:   PetscMalloc(red->dcnt*sizeof(PetscScalar),&red->diag);
212:   MatGetVecs(pc->pmat,&diag,PETSC_NULL);
213:   MatGetDiagonal(pc->pmat,diag);
214:   VecGetArrayRead(diag,&d);
215:   for (i=0; i<red->dcnt; i++) {
216:     red->diag[i] = 1.0/d[red->drows[i]];
217:   }
218:   VecRestoreArrayRead(diag,&d);
219:   VecDestroy(&diag);
220:   KSPSetUp(red->ksp);
221:   return(0);
222: }

226: static PetscErrorCode PCApply_Redistribute(PC pc,Vec b,Vec x)
227: {
228:   PC_Redistribute   *red = (PC_Redistribute*)pc->data;
229:   PetscErrorCode    ierr;
230:   PetscInt          dcnt = red->dcnt,i;
231:   const PetscInt    *drows = red->drows;
232:   PetscScalar       *xwork;
233:   const PetscScalar *bwork,*diag = red->diag;

236:   if (!red->work) {
237:     VecDuplicate(b,&red->work);
238:   }
239:   /* compute the rows of solution that have diagonal entries only */
240:   VecSet(x,0.0);         /* x = diag(A)^{-1} b */
241:   VecGetArray(x,&xwork);
242:   VecGetArrayRead(b,&bwork);
243:   for (i=0; i<dcnt; i++) {
244:     xwork[drows[i]] = diag[i]*bwork[drows[i]];
245:   }
246:   PetscLogFlops(dcnt);
247:   VecRestoreArray(red->work,&xwork);
248:   VecRestoreArrayRead(b,&bwork);
249:   /* update the right hand side for the reduced system with diagonal rows (and corresponding columns) removed */
250:   MatMult(pc->pmat,x,red->work);
251:   VecAYPX(red->work,-1.0,b);   /* red->work = b - A x */

253:   VecScatterBegin(red->scatter,red->work,red->b,INSERT_VALUES,SCATTER_FORWARD);
254:   VecScatterEnd(red->scatter,red->work,red->b,INSERT_VALUES,SCATTER_FORWARD);
255:   KSPSolve(red->ksp,red->b,red->x);
256:   VecScatterBegin(red->scatter,red->x,x,INSERT_VALUES,SCATTER_REVERSE);
257:   VecScatterEnd(red->scatter,red->x,x,INSERT_VALUES,SCATTER_REVERSE);
258:   return(0);
259: }

263: static PetscErrorCode PCDestroy_Redistribute(PC pc)
264: {
265:   PC_Redistribute *red = (PC_Redistribute*)pc->data;
266:   PetscErrorCode  ierr;

269:   VecScatterDestroy(&red->scatter);
270:   ISDestroy(&red->is);
271:   VecDestroy(&red->b);
272:   VecDestroy(&red->x);
273:   KSPDestroy(&red->ksp);
274:   VecDestroy(&red->work);
275:   PetscFree(red->drows);
276:   PetscFree(red->diag);
277:   PetscFree(pc->data);
278:   return(0);
279: }

283: static PetscErrorCode PCSetFromOptions_Redistribute(PC pc)
284: {
285:   PetscErrorCode  ierr;
286:   PC_Redistribute *red = (PC_Redistribute*)pc->data;

289:   KSPSetFromOptions(red->ksp);
290:   return(0);
291: }

295: /*@
296:    PCRedistributeGetKSP - Gets the KSP created by the PCREDISTRIBUTE

298:    Not Collective

300:    Input Parameter:
301: .  pc - the preconditioner context

303:    Output Parameter:
304: .  innerksp - the inner KSP

306:    Level: advanced

308: .keywords: PC, redistribute solve
309: @*/
310: PetscErrorCode  PCRedistributeGetKSP(PC pc,KSP *innerksp)
311: {
312:   PC_Redistribute *red = (PC_Redistribute*)pc->data;

317:   *innerksp = red->ksp;
318:   return(0);
319: }

321: /* -------------------------------------------------------------------------------------*/
322: /*MC
323:      PCREDISTRIBUTE - Redistributes a matrix for load balancing, removing the rows that only have a diagonal entry and then applys a KSP to that new matrix

325:      Options for the redistribute preconditioners can be set with -redistribute_ksp_xxx <values> and -redistribute_pc_xxx <values>

327:      Notes:  Usually run this with -ksp_type preonly

329:      If you have used MatZeroRows() to eliminate (for example, Dirichlet) boundary conditions for a symmetric problem then you can use, for example, -ksp_type preonly 
330:      -pc_type redistribute -redistribute_ksp_type cg -redistribute_pc_type bjacobi -redistribute_sub_pc_type icc to take advantage of the symmetry.

332:      This does NOT call a partitioner to reorder rows to lower communication; the ordering of the rows in the original matrix and redistributed matrix is the same.

334:      Developer Notes: Should add an option to this preconditioner to use a partitioner to redistribute the rows to lower communication.

336:    Level: intermediate

338: .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PCRedistributeGetKSP()
339: M*/

341: EXTERN_C_BEGIN
344: PetscErrorCode  PCCreate_Redistribute(PC pc)
345: {
346:   PetscErrorCode  ierr;
347:   PC_Redistribute *red;
348:   const char      *prefix;
349: 
351:   PetscNewLog(pc,PC_Redistribute,&red);
352:   pc->data            = (void*)red;

354:   pc->ops->apply           = PCApply_Redistribute;
355:   pc->ops->applytranspose  = 0;
356:   pc->ops->setup           = PCSetUp_Redistribute;
357:   pc->ops->destroy         = PCDestroy_Redistribute;
358:   pc->ops->setfromoptions  = PCSetFromOptions_Redistribute;
359:   pc->ops->view            = PCView_Redistribute;

361:   KSPCreate(((PetscObject)pc)->comm,&red->ksp);
362:   PetscObjectIncrementTabLevel((PetscObject)red->ksp,(PetscObject)pc,1);
363:   PetscLogObjectParent(pc,red->ksp);
364:   PCGetOptionsPrefix(pc,&prefix);
365:   KSPSetOptionsPrefix(red->ksp,prefix);
366:   KSPAppendOptionsPrefix(red->ksp,"redistribute_");
367:   return(0);
368: }
369: EXTERN_C_END