Actual source code: mpiaij.c

petsc-3.11.1 2019-04-12
Report Typos and Errors


  3:  #include <../src/mat/impls/aij/mpi/mpiaij.h>
  4:  #include <petsc/private/vecimpl.h>
  5:  #include <petsc/private/vecscatterimpl.h>
  6:  #include <petsc/private/isimpl.h>
  7:  #include <petscblaslapack.h>
  8:  #include <petscsf.h>

 10: /*MC
 11:    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.

 13:    This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
 14:    and MATMPIAIJ otherwise.  As a result, for single process communicators,
 15:   MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
 16:   for communicators controlling multiple processes.  It is recommended that you call both of
 17:   the above preallocation routines for simplicity.

 19:    Options Database Keys:
 20: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()

 22:   Developer Notes:
 23:     Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
 24:    enough exist.

 26:   Level: beginner

 28: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
 29: M*/

 31: /*MC
 32:    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.

 34:    This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
 35:    and MATMPIAIJCRL otherwise.  As a result, for single process communicators,
 36:    MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
 37:   for communicators controlling multiple processes.  It is recommended that you call both of
 38:   the above preallocation routines for simplicity.

 40:    Options Database Keys:
 41: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()

 43:   Level: beginner

 45: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
 46: M*/

 48: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
 49: {
 51:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)M->data;

 54:   if (mat->A) {
 55:     MatSetBlockSizes(mat->A,rbs,cbs);
 56:     MatSetBlockSizes(mat->B,rbs,1);
 57:   }
 58:   return(0);
 59: }

 61: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
 62: {
 63:   PetscErrorCode  ierr;
 64:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ*)M->data;
 65:   Mat_SeqAIJ      *a   = (Mat_SeqAIJ*)mat->A->data;
 66:   Mat_SeqAIJ      *b   = (Mat_SeqAIJ*)mat->B->data;
 67:   const PetscInt  *ia,*ib;
 68:   const MatScalar *aa,*bb;
 69:   PetscInt        na,nb,i,j,*rows,cnt=0,n0rows;
 70:   PetscInt        m = M->rmap->n,rstart = M->rmap->rstart;

 73:   *keptrows = 0;
 74:   ia        = a->i;
 75:   ib        = b->i;
 76:   for (i=0; i<m; i++) {
 77:     na = ia[i+1] - ia[i];
 78:     nb = ib[i+1] - ib[i];
 79:     if (!na && !nb) {
 80:       cnt++;
 81:       goto ok1;
 82:     }
 83:     aa = a->a + ia[i];
 84:     for (j=0; j<na; j++) {
 85:       if (aa[j] != 0.0) goto ok1;
 86:     }
 87:     bb = b->a + ib[i];
 88:     for (j=0; j <nb; j++) {
 89:       if (bb[j] != 0.0) goto ok1;
 90:     }
 91:     cnt++;
 92: ok1:;
 93:   }
 94:   MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
 95:   if (!n0rows) return(0);
 96:   PetscMalloc1(M->rmap->n-cnt,&rows);
 97:   cnt  = 0;
 98:   for (i=0; i<m; i++) {
 99:     na = ia[i+1] - ia[i];
100:     nb = ib[i+1] - ib[i];
101:     if (!na && !nb) continue;
102:     aa = a->a + ia[i];
103:     for (j=0; j<na;j++) {
104:       if (aa[j] != 0.0) {
105:         rows[cnt++] = rstart + i;
106:         goto ok2;
107:       }
108:     }
109:     bb = b->a + ib[i];
110:     for (j=0; j<nb; j++) {
111:       if (bb[j] != 0.0) {
112:         rows[cnt++] = rstart + i;
113:         goto ok2;
114:       }
115:     }
116: ok2:;
117:   }
118:   ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
119:   return(0);
120: }

122: PetscErrorCode  MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
123: {
124:   PetscErrorCode    ierr;
125:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*) Y->data;
126:   PetscBool         cong;

129:   MatHasCongruentLayouts(Y,&cong);
130:   if (Y->assembled && cong) {
131:     MatDiagonalSet(aij->A,D,is);
132:   } else {
133:     MatDiagonalSet_Default(Y,D,is);
134:   }
135:   return(0);
136: }

138: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
139: {
140:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)M->data;
142:   PetscInt       i,rstart,nrows,*rows;

145:   *zrows = NULL;
146:   MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
147:   MatGetOwnershipRange(M,&rstart,NULL);
148:   for (i=0; i<nrows; i++) rows[i] += rstart;
149:   ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
150:   return(0);
151: }

153: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
154: {
156:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)A->data;
157:   PetscInt       i,n,*garray = aij->garray;
158:   Mat_SeqAIJ     *a_aij = (Mat_SeqAIJ*) aij->A->data;
159:   Mat_SeqAIJ     *b_aij = (Mat_SeqAIJ*) aij->B->data;
160:   PetscReal      *work;

163:   MatGetSize(A,NULL,&n);
164:   PetscCalloc1(n,&work);
165:   if (type == NORM_2) {
166:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
167:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
168:     }
169:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
170:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
171:     }
172:   } else if (type == NORM_1) {
173:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
174:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
175:     }
176:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
177:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
178:     }
179:   } else if (type == NORM_INFINITY) {
180:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
181:       work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
182:     }
183:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
184:       work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
185:     }

187:   } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
188:   if (type == NORM_INFINITY) {
189:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
190:   } else {
191:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
192:   }
193:   PetscFree(work);
194:   if (type == NORM_2) {
195:     for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
196:   }
197:   return(0);
198: }

200: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
201: {
202:   Mat_MPIAIJ      *a  = (Mat_MPIAIJ*)A->data;
203:   IS              sis,gis;
204:   PetscErrorCode  ierr;
205:   const PetscInt  *isis,*igis;
206:   PetscInt        n,*iis,nsis,ngis,rstart,i;

209:   MatFindOffBlockDiagonalEntries(a->A,&sis);
210:   MatFindNonzeroRows(a->B,&gis);
211:   ISGetSize(gis,&ngis);
212:   ISGetSize(sis,&nsis);
213:   ISGetIndices(sis,&isis);
214:   ISGetIndices(gis,&igis);

216:   PetscMalloc1(ngis+nsis,&iis);
217:   PetscMemcpy(iis,igis,ngis*sizeof(PetscInt));
218:   PetscMemcpy(iis+ngis,isis,nsis*sizeof(PetscInt));
219:   n    = ngis + nsis;
220:   PetscSortRemoveDupsInt(&n,iis);
221:   MatGetOwnershipRange(A,&rstart,NULL);
222:   for (i=0; i<n; i++) iis[i] += rstart;
223:   ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);

225:   ISRestoreIndices(sis,&isis);
226:   ISRestoreIndices(gis,&igis);
227:   ISDestroy(&sis);
228:   ISDestroy(&gis);
229:   return(0);
230: }

232: /*
233:     Distributes a SeqAIJ matrix across a set of processes. Code stolen from
234:     MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.

236:     Only for square matrices

238:     Used by a preconditioner, hence PETSC_EXTERN
239: */
240: PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
241: {
242:   PetscMPIInt    rank,size;
243:   PetscInt       *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
245:   Mat            mat;
246:   Mat_SeqAIJ     *gmata;
247:   PetscMPIInt    tag;
248:   MPI_Status     status;
249:   PetscBool      aij;
250:   MatScalar      *gmataa,*ao,*ad,*gmataarestore=0;

253:   MPI_Comm_rank(comm,&rank);
254:   MPI_Comm_size(comm,&size);
255:   if (!rank) {
256:     PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
257:     if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
258:   }
259:   if (reuse == MAT_INITIAL_MATRIX) {
260:     MatCreate(comm,&mat);
261:     MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
262:     MatGetBlockSizes(gmat,&bses[0],&bses[1]);
263:     MPI_Bcast(bses,2,MPIU_INT,0,comm);
264:     MatSetBlockSizes(mat,bses[0],bses[1]);
265:     MatSetType(mat,MATAIJ);
266:     PetscMalloc1(size+1,&rowners);
267:     PetscMalloc2(m,&dlens,m,&olens);
268:     MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);

270:     rowners[0] = 0;
271:     for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
272:     rstart = rowners[rank];
273:     rend   = rowners[rank+1];
274:     PetscObjectGetNewTag((PetscObject)mat,&tag);
275:     if (!rank) {
276:       gmata = (Mat_SeqAIJ*) gmat->data;
277:       /* send row lengths to all processors */
278:       for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
279:       for (i=1; i<size; i++) {
280:         MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
281:       }
282:       /* determine number diagonal and off-diagonal counts */
283:       PetscMemzero(olens,m*sizeof(PetscInt));
284:       PetscCalloc1(m,&ld);
285:       jj   = 0;
286:       for (i=0; i<m; i++) {
287:         for (j=0; j<dlens[i]; j++) {
288:           if (gmata->j[jj] < rstart) ld[i]++;
289:           if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
290:           jj++;
291:         }
292:       }
293:       /* send column indices to other processes */
294:       for (i=1; i<size; i++) {
295:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
296:         MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
297:         MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
298:       }

300:       /* send numerical values to other processes */
301:       for (i=1; i<size; i++) {
302:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
303:         MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
304:       }
305:       gmataa = gmata->a;
306:       gmataj = gmata->j;

308:     } else {
309:       /* receive row lengths */
310:       MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
311:       /* receive column indices */
312:       MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
313:       PetscMalloc2(nz,&gmataa,nz,&gmataj);
314:       MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
315:       /* determine number diagonal and off-diagonal counts */
316:       PetscMemzero(olens,m*sizeof(PetscInt));
317:       PetscCalloc1(m,&ld);
318:       jj   = 0;
319:       for (i=0; i<m; i++) {
320:         for (j=0; j<dlens[i]; j++) {
321:           if (gmataj[jj] < rstart) ld[i]++;
322:           if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
323:           jj++;
324:         }
325:       }
326:       /* receive numerical values */
327:       PetscMemzero(gmataa,nz*sizeof(PetscScalar));
328:       MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
329:     }
330:     /* set preallocation */
331:     for (i=0; i<m; i++) {
332:       dlens[i] -= olens[i];
333:     }
334:     MatSeqAIJSetPreallocation(mat,0,dlens);
335:     MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);

337:     for (i=0; i<m; i++) {
338:       dlens[i] += olens[i];
339:     }
340:     cnt = 0;
341:     for (i=0; i<m; i++) {
342:       row  = rstart + i;
343:       MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
344:       cnt += dlens[i];
345:     }
346:     if (rank) {
347:       PetscFree2(gmataa,gmataj);
348:     }
349:     PetscFree2(dlens,olens);
350:     PetscFree(rowners);

352:     ((Mat_MPIAIJ*)(mat->data))->ld = ld;

354:     *inmat = mat;
355:   } else {   /* column indices are already set; only need to move over numerical values from process 0 */
356:     Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
357:     Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
358:     mat  = *inmat;
359:     PetscObjectGetNewTag((PetscObject)mat,&tag);
360:     if (!rank) {
361:       /* send numerical values to other processes */
362:       gmata  = (Mat_SeqAIJ*) gmat->data;
363:       MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
364:       gmataa = gmata->a;
365:       for (i=1; i<size; i++) {
366:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
367:         MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
368:       }
369:       nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
370:     } else {
371:       /* receive numerical values from process 0*/
372:       nz   = Ad->nz + Ao->nz;
373:       PetscMalloc1(nz,&gmataa); gmataarestore = gmataa;
374:       MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
375:     }
376:     /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
377:     ld = ((Mat_MPIAIJ*)(mat->data))->ld;
378:     ad = Ad->a;
379:     ao = Ao->a;
380:     if (mat->rmap->n) {
381:       i  = 0;
382:       nz = ld[i];                                   PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
383:       nz = Ad->i[i+1] - Ad->i[i];                   PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
384:     }
385:     for (i=1; i<mat->rmap->n; i++) {
386:       nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
387:       nz = Ad->i[i+1] - Ad->i[i];                   PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
388:     }
389:     i--;
390:     if (mat->rmap->n) {
391:       nz = Ao->i[i+1] - Ao->i[i] - ld[i];           PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));
392:     }
393:     if (rank) {
394:       PetscFree(gmataarestore);
395:     }
396:   }
397:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
398:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
399:   return(0);
400: }

402: /*
403:   Local utility routine that creates a mapping from the global column
404: number to the local number in the off-diagonal part of the local
405: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
406: a slightly higher hash table cost; without it it is not scalable (each processor
407: has an order N integer array but is fast to acess.
408: */
409: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
410: {
411:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
413:   PetscInt       n = aij->B->cmap->n,i;

416:   if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
417: #if defined(PETSC_USE_CTABLE)
418:   PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
419:   for (i=0; i<n; i++) {
420:     PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
421:   }
422: #else
423:   PetscCalloc1(mat->cmap->N+1,&aij->colmap);
424:   PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
425:   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
426: #endif
427:   return(0);
428: }

430: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol)     \
431: { \
432:     if (col <= lastcol1)  low1 = 0;     \
433:     else                 high1 = nrow1; \
434:     lastcol1 = col;\
435:     while (high1-low1 > 5) { \
436:       t = (low1+high1)/2; \
437:       if (rp1[t] > col) high1 = t; \
438:       else              low1  = t; \
439:     } \
440:       for (_i=low1; _i<high1; _i++) { \
441:         if (rp1[_i] > col) break; \
442:         if (rp1[_i] == col) { \
443:           if (addv == ADD_VALUES) ap1[_i] += value;   \
444:           else                    ap1[_i] = value; \
445:           goto a_noinsert; \
446:         } \
447:       }  \
448:       if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
449:       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}                \
450:       if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
451:       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
452:       N = nrow1++ - 1; a->nz++; high1++; \
453:       /* shift up all the later entries in this row */ \
454:       for (ii=N; ii>=_i; ii--) { \
455:         rp1[ii+1] = rp1[ii]; \
456:         ap1[ii+1] = ap1[ii]; \
457:       } \
458:       rp1[_i] = col;  \
459:       ap1[_i] = value;  \
460:       A->nonzerostate++;\
461:       a_noinsert: ; \
462:       ailen[row] = nrow1; \
463: }

465: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
466:   { \
467:     if (col <= lastcol2) low2 = 0;                        \
468:     else high2 = nrow2;                                   \
469:     lastcol2 = col;                                       \
470:     while (high2-low2 > 5) {                              \
471:       t = (low2+high2)/2;                                 \
472:       if (rp2[t] > col) high2 = t;                        \
473:       else             low2  = t;                         \
474:     }                                                     \
475:     for (_i=low2; _i<high2; _i++) {                       \
476:       if (rp2[_i] > col) break;                           \
477:       if (rp2[_i] == col) {                               \
478:         if (addv == ADD_VALUES) ap2[_i] += value;         \
479:         else                    ap2[_i] = value;          \
480:         goto b_noinsert;                                  \
481:       }                                                   \
482:     }                                                     \
483:     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
484:     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}                        \
485:     if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
486:     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
487:     N = nrow2++ - 1; b->nz++; high2++;                    \
488:     /* shift up all the later entries in this row */      \
489:     for (ii=N; ii>=_i; ii--) {                            \
490:       rp2[ii+1] = rp2[ii];                                \
491:       ap2[ii+1] = ap2[ii];                                \
492:     }                                                     \
493:     rp2[_i] = col;                                        \
494:     ap2[_i] = value;                                      \
495:     B->nonzerostate++;                                    \
496:     b_noinsert: ;                                         \
497:     bilen[row] = nrow2;                                   \
498:   }

500: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
501: {
502:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
503:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
505:   PetscInt       l,*garray = mat->garray,diag;

508:   /* code only works for square matrices A */

510:   /* find size of row to the left of the diagonal part */
511:   MatGetOwnershipRange(A,&diag,0);
512:   row  = row - diag;
513:   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
514:     if (garray[b->j[b->i[row]+l]] > diag) break;
515:   }
516:   PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));

518:   /* diagonal part */
519:   PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));

521:   /* right of diagonal part */
522:   PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));
523:   return(0);
524: }

526: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
527: {
528:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
529:   PetscScalar    value;
531:   PetscInt       i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
532:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
533:   PetscBool      roworiented = aij->roworiented;

535:   /* Some Variables required in the macro */
536:   Mat        A                 = aij->A;
537:   Mat_SeqAIJ *a                = (Mat_SeqAIJ*)A->data;
538:   PetscInt   *aimax            = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
539:   MatScalar  *aa               = a->a;
540:   PetscBool  ignorezeroentries = a->ignorezeroentries;
541:   Mat        B                 = aij->B;
542:   Mat_SeqAIJ *b                = (Mat_SeqAIJ*)B->data;
543:   PetscInt   *bimax            = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
544:   MatScalar  *ba               = b->a;

546:   PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
547:   PetscInt  nonew;
548:   MatScalar *ap1,*ap2;

551:   for (i=0; i<m; i++) {
552:     if (im[i] < 0) continue;
553: #if defined(PETSC_USE_DEBUG)
554:     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
555: #endif
556:     if (im[i] >= rstart && im[i] < rend) {
557:       row      = im[i] - rstart;
558:       lastcol1 = -1;
559:       rp1      = aj + ai[row];
560:       ap1      = aa + ai[row];
561:       rmax1    = aimax[row];
562:       nrow1    = ailen[row];
563:       low1     = 0;
564:       high1    = nrow1;
565:       lastcol2 = -1;
566:       rp2      = bj + bi[row];
567:       ap2      = ba + bi[row];
568:       rmax2    = bimax[row];
569:       nrow2    = bilen[row];
570:       low2     = 0;
571:       high2    = nrow2;

573:       for (j=0; j<n; j++) {
574:         if (roworiented) value = v[i*n+j];
575:         else             value = v[i+j*m];
576:         if (in[j] >= cstart && in[j] < cend) {
577:           col   = in[j] - cstart;
578:           nonew = a->nonew;
579:           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
580:           MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
581:         } else if (in[j] < 0) continue;
582: #if defined(PETSC_USE_DEBUG)
583:         else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
584: #endif
585:         else {
586:           if (mat->was_assembled) {
587:             if (!aij->colmap) {
588:               MatCreateColmap_MPIAIJ_Private(mat);
589:             }
590: #if defined(PETSC_USE_CTABLE)
591:             PetscTableFind(aij->colmap,in[j]+1,&col);
592:             col--;
593: #else
594:             col = aij->colmap[in[j]] - 1;
595: #endif
596:             if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
597:               MatDisAssemble_MPIAIJ(mat);
598:               col  =  in[j];
599:               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
600:               B     = aij->B;
601:               b     = (Mat_SeqAIJ*)B->data;
602:               bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
603:               rp2   = bj + bi[row];
604:               ap2   = ba + bi[row];
605:               rmax2 = bimax[row];
606:               nrow2 = bilen[row];
607:               low2  = 0;
608:               high2 = nrow2;
609:               bm    = aij->B->rmap->n;
610:               ba    = b->a;
611:             } else if (col < 0) {
612:               if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
613:                 PetscInfo3(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%D,%D)\n",(double)PetscRealPart(value),im[i],in[j]);
614:               } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
615:             }
616:           } else col = in[j];
617:           nonew = b->nonew;
618:           MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
619:         }
620:       }
621:     } else {
622:       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
623:       if (!aij->donotstash) {
624:         mat->assembled = PETSC_FALSE;
625:         if (roworiented) {
626:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
627:         } else {
628:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
629:         }
630:       }
631:     }
632:   }
633:   return(0);
634: }

636: /*
637:     This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
638:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
639:     No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
640: */
641: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
642: {
643:   Mat_MPIAIJ     *aij        = (Mat_MPIAIJ*)mat->data;
644:   Mat            A           = aij->A; /* diagonal part of the matrix */
645:   Mat            B           = aij->B; /* offdiagonal part of the matrix */
646:   Mat_SeqAIJ     *a          = (Mat_SeqAIJ*)A->data;
647:   Mat_SeqAIJ     *b          = (Mat_SeqAIJ*)B->data;
648:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,col;
649:   PetscInt       *ailen      = a->ilen,*aj = a->j;
650:   PetscInt       *bilen      = b->ilen,*bj = b->j;
651:   PetscInt       am          = aij->A->rmap->n,j;
652:   PetscInt       diag_so_far = 0,dnz;
653:   PetscInt       offd_so_far = 0,onz;

656:   /* Iterate over all rows of the matrix */
657:   for (j=0; j<am; j++) {
658:     dnz = onz = 0;
659:     /*  Iterate over all non-zero columns of the current row */
660:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
661:       /* If column is in the diagonal */
662:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
663:         aj[diag_so_far++] = mat_j[col] - cstart;
664:         dnz++;
665:       } else { /* off-diagonal entries */
666:         bj[offd_so_far++] = mat_j[col];
667:         onz++;
668:       }
669:     }
670:     ailen[j] = dnz;
671:     bilen[j] = onz;
672:   }
673:   return(0);
674: }

676: /*
677:     This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
678:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
679:     No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
680:     Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
681:     would not be true and the more complex MatSetValues_MPIAIJ has to be used.
682: */
683: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
684: {
685:   Mat_MPIAIJ     *aij   = (Mat_MPIAIJ*)mat->data;
686:   Mat            A      = aij->A; /* diagonal part of the matrix */
687:   Mat            B      = aij->B; /* offdiagonal part of the matrix */
688:   Mat_SeqAIJ     *aijd  =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
689:   Mat_SeqAIJ     *a     = (Mat_SeqAIJ*)A->data;
690:   Mat_SeqAIJ     *b     = (Mat_SeqAIJ*)B->data;
691:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend;
692:   PetscInt       *ailen = a->ilen,*aj = a->j;
693:   PetscInt       *bilen = b->ilen,*bj = b->j;
694:   PetscInt       am     = aij->A->rmap->n,j;
695:   PetscInt       *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
696:   PetscInt       col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
697:   PetscScalar    *aa = a->a,*ba = b->a;

700:   /* Iterate over all rows of the matrix */
701:   for (j=0; j<am; j++) {
702:     dnz_row = onz_row = 0;
703:     rowstart_offd = full_offd_i[j];
704:     rowstart_diag = full_diag_i[j];
705:     /*  Iterate over all non-zero columns of the current row */
706:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
707:       /* If column is in the diagonal */
708:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
709:         aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
710:         aa[rowstart_diag+dnz_row] = mat_a[col];
711:         dnz_row++;
712:       } else { /* off-diagonal entries */
713:         bj[rowstart_offd+onz_row] = mat_j[col];
714:         ba[rowstart_offd+onz_row] = mat_a[col];
715:         onz_row++;
716:       }
717:     }
718:     ailen[j] = dnz_row;
719:     bilen[j] = onz_row;
720:   }
721:   return(0);
722: }

724: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
725: {
726:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
728:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
729:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;

732:   for (i=0; i<m; i++) {
733:     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
734:     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
735:     if (idxm[i] >= rstart && idxm[i] < rend) {
736:       row = idxm[i] - rstart;
737:       for (j=0; j<n; j++) {
738:         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
739:         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
740:         if (idxn[j] >= cstart && idxn[j] < cend) {
741:           col  = idxn[j] - cstart;
742:           MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
743:         } else {
744:           if (!aij->colmap) {
745:             MatCreateColmap_MPIAIJ_Private(mat);
746:           }
747: #if defined(PETSC_USE_CTABLE)
748:           PetscTableFind(aij->colmap,idxn[j]+1,&col);
749:           col--;
750: #else
751:           col = aij->colmap[idxn[j]] - 1;
752: #endif
753:           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
754:           else {
755:             MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
756:           }
757:         }
758:       }
759:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
760:   }
761:   return(0);
762: }

764: extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);

766: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
767: {
768:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
770:   PetscInt       nstash,reallocs;

773:   if (aij->donotstash || mat->nooffprocentries) return(0);

775:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
776:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
777:   PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
778:   return(0);
779: }

781: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
782: {
783:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
784:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)aij->A->data;
786:   PetscMPIInt    n;
787:   PetscInt       i,j,rstart,ncols,flg;
788:   PetscInt       *row,*col;
789:   PetscBool      other_disassembled;
790:   PetscScalar    *val;

792:   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */

795:   if (!aij->donotstash && !mat->nooffprocentries) {
796:     while (1) {
797:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
798:       if (!flg) break;

800:       for (i=0; i<n; ) {
801:         /* Now identify the consecutive vals belonging to the same row */
802:         for (j=i,rstart=row[j]; j<n; j++) {
803:           if (row[j] != rstart) break;
804:         }
805:         if (j < n) ncols = j-i;
806:         else       ncols = n-i;
807:         /* Now assemble all these values with a single function call */
808:         MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);

810:         i = j;
811:       }
812:     }
813:     MatStashScatterEnd_Private(&mat->stash);
814:   }
815:   MatAssemblyBegin(aij->A,mode);
816:   MatAssemblyEnd(aij->A,mode);

818:   /* determine if any processor has disassembled, if so we must
819:      also disassemble ourselfs, in order that we may reassemble. */
820:   /*
821:      if nonzero structure of submatrix B cannot change then we know that
822:      no processor disassembled thus we can skip this stuff
823:   */
824:   if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
825:     MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
826:     if (mat->was_assembled && !other_disassembled) {
827:       MatDisAssemble_MPIAIJ(mat);
828:     }
829:   }
830:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
831:     MatSetUpMultiply_MPIAIJ(mat);
832:   }
833:   MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
834:   MatAssemblyBegin(aij->B,mode);
835:   MatAssemblyEnd(aij->B,mode);

837:   PetscFree2(aij->rowvalues,aij->rowindices);

839:   aij->rowvalues = 0;

841:   VecDestroy(&aij->diag);
842:   if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;

844:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
845:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
846:     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
847:     MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
848:   }
849:   return(0);
850: }

852: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
853: {
854:   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;

858:   MatZeroEntries(l->A);
859:   MatZeroEntries(l->B);
860:   return(0);
861: }

863: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
864: {
865:   Mat_MPIAIJ    *mat    = (Mat_MPIAIJ *) A->data;
866:   PetscInt      *lrows;
867:   PetscInt       r, len;
868:   PetscBool      cong;

872:   /* get locally owned rows */
873:   MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
874:   /* fix right hand side if needed */
875:   if (x && b) {
876:     const PetscScalar *xx;
877:     PetscScalar       *bb;

879:     VecGetArrayRead(x, &xx);
880:     VecGetArray(b, &bb);
881:     for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
882:     VecRestoreArrayRead(x, &xx);
883:     VecRestoreArray(b, &bb);
884:   }
885:   /* Must zero l->B before l->A because the (diag) case below may put values into l->B*/
886:   MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
887:   MatHasCongruentLayouts(A,&cong);
888:   if ((diag != 0.0) && cong) {
889:     MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
890:   } else if (diag != 0.0) {
891:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
892:     if (((Mat_SeqAIJ *) mat->A->data)->nonew) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatZeroRows() on rectangular matrices cannot be used with the Mat options\nMAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
893:     for (r = 0; r < len; ++r) {
894:       const PetscInt row = lrows[r] + A->rmap->rstart;
895:       MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
896:     }
897:     MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
898:     MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
899:   } else {
900:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
901:   }
902:   PetscFree(lrows);

904:   /* only change matrix nonzero state if pattern was allowed to be changed */
905:   if (!((Mat_SeqAIJ*)(mat->A->data))->keepnonzeropattern) {
906:     PetscObjectState state = mat->A->nonzerostate + mat->B->nonzerostate;
907:     MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
908:   }
909:   return(0);
910: }

912: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
913: {
914:   Mat_MPIAIJ        *l = (Mat_MPIAIJ*)A->data;
915:   PetscErrorCode    ierr;
916:   PetscMPIInt       n = A->rmap->n;
917:   PetscInt          i,j,r,m,p = 0,len = 0;
918:   PetscInt          *lrows,*owners = A->rmap->range;
919:   PetscSFNode       *rrows;
920:   PetscSF           sf;
921:   const PetscScalar *xx;
922:   PetscScalar       *bb,*mask;
923:   Vec               xmask,lmask;
924:   Mat_SeqAIJ        *aij = (Mat_SeqAIJ*)l->B->data;
925:   const PetscInt    *aj, *ii,*ridx;
926:   PetscScalar       *aa;

929:   /* Create SF where leaves are input rows and roots are owned rows */
930:   PetscMalloc1(n, &lrows);
931:   for (r = 0; r < n; ++r) lrows[r] = -1;
932:   PetscMalloc1(N, &rrows);
933:   for (r = 0; r < N; ++r) {
934:     const PetscInt idx   = rows[r];
935:     if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
936:     if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
937:       PetscLayoutFindOwner(A->rmap,idx,&p);
938:     }
939:     rrows[r].rank  = p;
940:     rrows[r].index = rows[r] - owners[p];
941:   }
942:   PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
943:   PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
944:   /* Collect flags for rows to be zeroed */
945:   PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
946:   PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
947:   PetscSFDestroy(&sf);
948:   /* Compress and put in row numbers */
949:   for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
950:   /* zero diagonal part of matrix */
951:   MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
952:   /* handle off diagonal part of matrix */
953:   MatCreateVecs(A,&xmask,NULL);
954:   VecDuplicate(l->lvec,&lmask);
955:   VecGetArray(xmask,&bb);
956:   for (i=0; i<len; i++) bb[lrows[i]] = 1;
957:   VecRestoreArray(xmask,&bb);
958:   VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
959:   VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
960:   VecDestroy(&xmask);
961:   if (x) {
962:     VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
963:     VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
964:     VecGetArrayRead(l->lvec,&xx);
965:     VecGetArray(b,&bb);
966:   }
967:   VecGetArray(lmask,&mask);
968:   /* remove zeroed rows of off diagonal matrix */
969:   ii = aij->i;
970:   for (i=0; i<len; i++) {
971:     PetscMemzero(aij->a + ii[lrows[i]],(ii[lrows[i]+1] - ii[lrows[i]])*sizeof(PetscScalar));
972:   }
973:   /* loop over all elements of off process part of matrix zeroing removed columns*/
974:   if (aij->compressedrow.use) {
975:     m    = aij->compressedrow.nrows;
976:     ii   = aij->compressedrow.i;
977:     ridx = aij->compressedrow.rindex;
978:     for (i=0; i<m; i++) {
979:       n  = ii[i+1] - ii[i];
980:       aj = aij->j + ii[i];
981:       aa = aij->a + ii[i];

983:       for (j=0; j<n; j++) {
984:         if (PetscAbsScalar(mask[*aj])) {
985:           if (b) bb[*ridx] -= *aa*xx[*aj];
986:           *aa = 0.0;
987:         }
988:         aa++;
989:         aj++;
990:       }
991:       ridx++;
992:     }
993:   } else { /* do not use compressed row format */
994:     m = l->B->rmap->n;
995:     for (i=0; i<m; i++) {
996:       n  = ii[i+1] - ii[i];
997:       aj = aij->j + ii[i];
998:       aa = aij->a + ii[i];
999:       for (j=0; j<n; j++) {
1000:         if (PetscAbsScalar(mask[*aj])) {
1001:           if (b) bb[i] -= *aa*xx[*aj];
1002:           *aa = 0.0;
1003:         }
1004:         aa++;
1005:         aj++;
1006:       }
1007:     }
1008:   }
1009:   if (x) {
1010:     VecRestoreArray(b,&bb);
1011:     VecRestoreArrayRead(l->lvec,&xx);
1012:   }
1013:   VecRestoreArray(lmask,&mask);
1014:   VecDestroy(&lmask);
1015:   PetscFree(lrows);

1017:   /* only change matrix nonzero state if pattern was allowed to be changed */
1018:   if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
1019:     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1020:     MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
1021:   }
1022:   return(0);
1023: }

1025: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
1026: {
1027:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1029:   PetscInt       nt;
1030:   VecScatter     Mvctx = a->Mvctx;

1033:   VecGetLocalSize(xx,&nt);
1034:   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);

1036:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1037:   (*a->A->ops->mult)(a->A,xx,yy);
1038:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1039:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1040:   return(0);
1041: }

1043: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
1044: {
1045:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1049:   MatMultDiagonalBlock(a->A,bb,xx);
1050:   return(0);
1051: }

1053: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1054: {
1055:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1057:   VecScatter     Mvctx = a->Mvctx;

1060:   if (a->Mvctx_mpi1_flg) Mvctx = a->Mvctx_mpi1;
1061:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1062:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
1063:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1064:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1065:   return(0);
1066: }

1068: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
1069: {
1070:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1074:   /* do nondiagonal part */
1075:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1076:   /* do local part */
1077:   (*a->A->ops->multtranspose)(a->A,xx,yy);
1078:   /* add partial results together */
1079:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1080:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1081:   return(0);
1082: }

1084: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool  *f)
1085: {
1086:   MPI_Comm       comm;
1087:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1088:   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1089:   IS             Me,Notme;
1091:   PetscInt       M,N,first,last,*notme,i;
1092:   PetscBool      lf;
1093:   PetscMPIInt    size;

1096:   /* Easy test: symmetric diagonal block */
1097:   Bij  = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1098:   MatIsTranspose(Adia,Bdia,tol,&lf);
1099:   MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
1100:   if (!*f) return(0);
1101:   PetscObjectGetComm((PetscObject)Amat,&comm);
1102:   MPI_Comm_size(comm,&size);
1103:   if (size == 1) return(0);

1105:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1106:   MatGetSize(Amat,&M,&N);
1107:   MatGetOwnershipRange(Amat,&first,&last);
1108:   PetscMalloc1(N-last+first,&notme);
1109:   for (i=0; i<first; i++) notme[i] = i;
1110:   for (i=last; i<M; i++) notme[i-last+first] = i;
1111:   ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1112:   ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1113:   MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1114:   Aoff = Aoffs[0];
1115:   MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1116:   Boff = Boffs[0];
1117:   MatIsTranspose(Aoff,Boff,tol,f);
1118:   MatDestroyMatrices(1,&Aoffs);
1119:   MatDestroyMatrices(1,&Boffs);
1120:   ISDestroy(&Me);
1121:   ISDestroy(&Notme);
1122:   PetscFree(notme);
1123:   return(0);
1124: }

1126: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool  *f)
1127: {

1131:   MatIsTranspose_MPIAIJ(A,A,tol,f);
1132:   return(0);
1133: }

1135: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1136: {
1137:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1141:   /* do nondiagonal part */
1142:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1143:   /* do local part */
1144:   (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1145:   /* add partial results together */
1146:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1147:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1148:   return(0);
1149: }

1151: /*
1152:   This only works correctly for square matrices where the subblock A->A is the
1153:    diagonal block
1154: */
1155: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1156: {
1158:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1161:   if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1162:   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1163:   MatGetDiagonal(a->A,v);
1164:   return(0);
1165: }

1167: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1168: {
1169:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1173:   MatScale(a->A,aa);
1174:   MatScale(a->B,aa);
1175:   return(0);
1176: }

1178: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1179: {
1180:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

1184: #if defined(PETSC_USE_LOG)
1185:   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1186: #endif
1187:   MatStashDestroy_Private(&mat->stash);
1188:   VecDestroy(&aij->diag);
1189:   MatDestroy(&aij->A);
1190:   MatDestroy(&aij->B);
1191: #if defined(PETSC_USE_CTABLE)
1192:   PetscTableDestroy(&aij->colmap);
1193: #else
1194:   PetscFree(aij->colmap);
1195: #endif
1196:   PetscFree(aij->garray);
1197:   VecDestroy(&aij->lvec);
1198:   VecScatterDestroy(&aij->Mvctx);
1199:   if (aij->Mvctx_mpi1) {VecScatterDestroy(&aij->Mvctx_mpi1);}
1200:   PetscFree2(aij->rowvalues,aij->rowindices);
1201:   PetscFree(aij->ld);
1202:   PetscFree(mat->data);

1204:   PetscObjectChangeTypeName((PetscObject)mat,0);
1205:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1206:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1207:   PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1208:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1209:   PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1210:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1211:   PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1212:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1213: #if defined(PETSC_HAVE_ELEMENTAL)
1214:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1215: #endif
1216: #if defined(PETSC_HAVE_HYPRE)
1217:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1218:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMatMult_transpose_mpiaij_mpiaij_C",NULL);
1219: #endif
1220:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1221:   PetscObjectComposeFunction((PetscObject)mat,"MatPtAP_is_mpiaij_C",NULL);
1222:   return(0);
1223: }

1225: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1226: {
1227:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1228:   Mat_SeqAIJ     *A   = (Mat_SeqAIJ*)aij->A->data;
1229:   Mat_SeqAIJ     *B   = (Mat_SeqAIJ*)aij->B->data;
1231:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag;
1232:   int            fd;
1233:   PetscInt       nz,header[4],*row_lengths,*range=0,rlen,i;
1234:   PetscInt       nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz = 0;
1235:   PetscScalar    *column_values;
1236:   PetscInt       message_count,flowcontrolcount;
1237:   FILE           *file;

1240:   MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1241:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
1242:   nz   = A->nz + B->nz;
1243:   PetscViewerBinaryGetDescriptor(viewer,&fd);
1244:   if (!rank) {
1245:     header[0] = MAT_FILE_CLASSID;
1246:     header[1] = mat->rmap->N;
1247:     header[2] = mat->cmap->N;

1249:     MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1250:     PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
1251:     /* get largest number of rows any processor has */
1252:     rlen  = mat->rmap->n;
1253:     range = mat->rmap->range;
1254:     for (i=1; i<size; i++) rlen = PetscMax(rlen,range[i+1] - range[i]);
1255:   } else {
1256:     MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1257:     rlen = mat->rmap->n;
1258:   }

1260:   /* load up the local row counts */
1261:   PetscMalloc1(rlen+1,&row_lengths);
1262:   for (i=0; i<mat->rmap->n; i++) row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];

1264:   /* store the row lengths to the file */
1265:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1266:   if (!rank) {
1267:     PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);
1268:     for (i=1; i<size; i++) {
1269:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1270:       rlen = range[i+1] - range[i];
1271:       MPIULong_Recv(row_lengths,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1272:       PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);
1273:     }
1274:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1275:   } else {
1276:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1277:     MPIULong_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1278:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1279:   }
1280:   PetscFree(row_lengths);

1282:   /* load up the local column indices */
1283:   nzmax = nz; /* th processor needs space a largest processor needs */
1284:   MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));
1285:   PetscMalloc1(nzmax+1,&column_indices);
1286:   cnt   = 0;
1287:   for (i=0; i<mat->rmap->n; i++) {
1288:     for (j=B->i[i]; j<B->i[i+1]; j++) {
1289:       if ((col = garray[B->j[j]]) > cstart) break;
1290:       column_indices[cnt++] = col;
1291:     }
1292:     for (k=A->i[i]; k<A->i[i+1]; k++) column_indices[cnt++] = A->j[k] + cstart;
1293:     for (; j<B->i[i+1]; j++) column_indices[cnt++] = garray[B->j[j]];
1294:   }
1295:   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);

1297:   /* store the column indices to the file */
1298:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1299:   if (!rank) {
1300:     MPI_Status status;
1301:     PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
1302:     for (i=1; i<size; i++) {
1303:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1304:       MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1305:       if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1306:       MPIULong_Recv(column_indices,rnz,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1307:       PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);
1308:     }
1309:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1310:   } else {
1311:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1312:     MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1313:     MPIULong_Send(column_indices,nz,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1314:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1315:   }
1316:   PetscFree(column_indices);

1318:   /* load up the local column values */
1319:   PetscMalloc1(nzmax+1,&column_values);
1320:   cnt  = 0;
1321:   for (i=0; i<mat->rmap->n; i++) {
1322:     for (j=B->i[i]; j<B->i[i+1]; j++) {
1323:       if (garray[B->j[j]] > cstart) break;
1324:       column_values[cnt++] = B->a[j];
1325:     }
1326:     for (k=A->i[i]; k<A->i[i+1]; k++) column_values[cnt++] = A->a[k];
1327:     for (; j<B->i[i+1]; j++) column_values[cnt++] = B->a[j];
1328:   }
1329:   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);

1331:   /* store the column values to the file */
1332:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1333:   if (!rank) {
1334:     MPI_Status status;
1335:     PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1336:     for (i=1; i<size; i++) {
1337:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1338:       MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1339:       if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1340:       MPIULong_Recv(column_values,rnz,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat));
1341:       PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);
1342:     }
1343:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1344:   } else {
1345:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1346:     MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1347:     MPIULong_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));
1348:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1349:   }
1350:   PetscFree(column_values);

1352:   PetscViewerBinaryGetInfoPointer(viewer,&file);
1353:   if (file) fprintf(file,"-matload_block_size %d\n",(int)PetscAbs(mat->rmap->bs));
1354:   return(0);
1355: }

1357:  #include <petscdraw.h>
1358: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1359: {
1360:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1361:   PetscErrorCode    ierr;
1362:   PetscMPIInt       rank = aij->rank,size = aij->size;
1363:   PetscBool         isdraw,iascii,isbinary;
1364:   PetscViewer       sviewer;
1365:   PetscViewerFormat format;

1368:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1369:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1370:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1371:   if (iascii) {
1372:     PetscViewerGetFormat(viewer,&format);
1373:     if (format == PETSC_VIEWER_LOAD_BALANCE) {
1374:       PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1375:       PetscMalloc1(size,&nz);
1376:       MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1377:       for (i=0; i<(PetscInt)size; i++) {
1378:         nmax = PetscMax(nmax,nz[i]);
1379:         nmin = PetscMin(nmin,nz[i]);
1380:         navg += nz[i];
1381:       }
1382:       PetscFree(nz);
1383:       navg = navg/size;
1384:       PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %D  avg %D  max %D\n",nmin,navg,nmax);
1385:       return(0);
1386:     }
1387:     PetscViewerGetFormat(viewer,&format);
1388:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1389:       MatInfo   info;
1390:       PetscBool inodes;

1392:       MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1393:       MatGetInfo(mat,MAT_LOCAL,&info);
1394:       MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);
1395:       PetscViewerASCIIPushSynchronized(viewer);
1396:       if (!inodes) {
1397:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, not using I-node routines\n",
1398:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1399:       } else {
1400:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, using I-node routines\n",
1401:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1402:       }
1403:       MatGetInfo(aij->A,MAT_LOCAL,&info);
1404:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1405:       MatGetInfo(aij->B,MAT_LOCAL,&info);
1406:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1407:       PetscViewerFlush(viewer);
1408:       PetscViewerASCIIPopSynchronized(viewer);
1409:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1410:       VecScatterView(aij->Mvctx,viewer);
1411:       return(0);
1412:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1413:       PetscInt inodecount,inodelimit,*inodes;
1414:       MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1415:       if (inodes) {
1416:         PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1417:       } else {
1418:         PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1419:       }
1420:       return(0);
1421:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1422:       return(0);
1423:     }
1424:   } else if (isbinary) {
1425:     if (size == 1) {
1426:       PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1427:       MatView(aij->A,viewer);
1428:     } else {
1429:       MatView_MPIAIJ_Binary(mat,viewer);
1430:     }
1431:     return(0);
1432:   } else if (iascii && size == 1) {
1433:     PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1434:     MatView(aij->A,viewer);
1435:     return(0);
1436:   } else if (isdraw) {
1437:     PetscDraw draw;
1438:     PetscBool isnull;
1439:     PetscViewerDrawGetDraw(viewer,0,&draw);
1440:     PetscDrawIsNull(draw,&isnull);
1441:     if (isnull) return(0);
1442:   }

1444:   { /* assemble the entire matrix onto first processor */
1445:     Mat A = NULL, Av;
1446:     IS  isrow,iscol;

1448:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1449:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1450:     MatCreateSubMatrix(mat,isrow,iscol,MAT_INITIAL_MATRIX,&A);
1451:     MatMPIAIJGetSeqAIJ(A,&Av,NULL,NULL);
1452: /*  The commented code uses MatCreateSubMatrices instead */
1453: /*
1454:     Mat *AA, A = NULL, Av;
1455:     IS  isrow,iscol;

1457:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1458:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1459:     MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA);
1460:     if (!rank) {
1461:        PetscObjectReference((PetscObject)AA[0]);
1462:        A    = AA[0];
1463:        Av   = AA[0];
1464:     }
1465:     MatDestroySubMatrices(1,&AA);
1466: */
1467:     ISDestroy(&iscol);
1468:     ISDestroy(&isrow);
1469:     /*
1470:        Everyone has to call to draw the matrix since the graphics waits are
1471:        synchronized across all processors that share the PetscDraw object
1472:     */
1473:     PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1474:     if (!rank) {
1475:       if (((PetscObject)mat)->name) {
1476:         PetscObjectSetName((PetscObject)Av,((PetscObject)mat)->name);
1477:       }
1478:       MatView_SeqAIJ(Av,sviewer);
1479:     }
1480:     PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1481:     PetscViewerFlush(viewer);
1482:     MatDestroy(&A);
1483:   }
1484:   return(0);
1485: }

1487: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1488: {
1490:   PetscBool      iascii,isdraw,issocket,isbinary;

1493:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1494:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1495:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1496:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1497:   if (iascii || isdraw || isbinary || issocket) {
1498:     MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1499:   }
1500:   return(0);
1501: }

1503: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1504: {
1505:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1507:   Vec            bb1 = 0;
1508:   PetscBool      hasop;

1511:   if (flag == SOR_APPLY_UPPER) {
1512:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1513:     return(0);
1514:   }

1516:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1517:     VecDuplicate(bb,&bb1);
1518:   }

1520:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1521:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1522:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1523:       its--;
1524:     }

1526:     while (its--) {
1527:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1528:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1530:       /* update rhs: bb1 = bb - B*x */
1531:       VecScale(mat->lvec,-1.0);
1532:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1534:       /* local sweep */
1535:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1536:     }
1537:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1538:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1539:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1540:       its--;
1541:     }
1542:     while (its--) {
1543:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1544:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1546:       /* update rhs: bb1 = bb - B*x */
1547:       VecScale(mat->lvec,-1.0);
1548:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1550:       /* local sweep */
1551:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1552:     }
1553:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1554:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1555:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1556:       its--;
1557:     }
1558:     while (its--) {
1559:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1560:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1562:       /* update rhs: bb1 = bb - B*x */
1563:       VecScale(mat->lvec,-1.0);
1564:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1566:       /* local sweep */
1567:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1568:     }
1569:   } else if (flag & SOR_EISENSTAT) {
1570:     Vec xx1;

1572:     VecDuplicate(bb,&xx1);
1573:     (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);

1575:     VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1576:     VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1577:     if (!mat->diag) {
1578:       MatCreateVecs(matin,&mat->diag,NULL);
1579:       MatGetDiagonal(matin,mat->diag);
1580:     }
1581:     MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1582:     if (hasop) {
1583:       MatMultDiagonalBlock(matin,xx,bb1);
1584:     } else {
1585:       VecPointwiseMult(bb1,mat->diag,xx);
1586:     }
1587:     VecAYPX(bb1,(omega-2.0)/omega,bb);

1589:     MatMultAdd(mat->B,mat->lvec,bb1,bb1);

1591:     /* local sweep */
1592:     (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1593:     VecAXPY(xx,1.0,xx1);
1594:     VecDestroy(&xx1);
1595:   } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");

1597:   VecDestroy(&bb1);

1599:   matin->factorerrortype = mat->A->factorerrortype;
1600:   return(0);
1601: }

1603: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1604: {
1605:   Mat            aA,aB,Aperm;
1606:   const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1607:   PetscScalar    *aa,*ba;
1608:   PetscInt       i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1609:   PetscSF        rowsf,sf;
1610:   IS             parcolp = NULL;
1611:   PetscBool      done;

1615:   MatGetLocalSize(A,&m,&n);
1616:   ISGetIndices(rowp,&rwant);
1617:   ISGetIndices(colp,&cwant);
1618:   PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);

1620:   /* Invert row permutation to find out where my rows should go */
1621:   PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1622:   PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1623:   PetscSFSetFromOptions(rowsf);
1624:   for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1625:   PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1626:   PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);

1628:   /* Invert column permutation to find out where my columns should go */
1629:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1630:   PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1631:   PetscSFSetFromOptions(sf);
1632:   for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1633:   PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1634:   PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1635:   PetscSFDestroy(&sf);

1637:   ISRestoreIndices(rowp,&rwant);
1638:   ISRestoreIndices(colp,&cwant);
1639:   MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);

1641:   /* Find out where my gcols should go */
1642:   MatGetSize(aB,NULL,&ng);
1643:   PetscMalloc1(ng,&gcdest);
1644:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1645:   PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1646:   PetscSFSetFromOptions(sf);
1647:   PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);
1648:   PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);
1649:   PetscSFDestroy(&sf);

1651:   PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1652:   MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1653:   MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1654:   for (i=0; i<m; i++) {
1655:     PetscInt row = rdest[i],rowner;
1656:     PetscLayoutFindOwner(A->rmap,row,&rowner);
1657:     for (j=ai[i]; j<ai[i+1]; j++) {
1658:       PetscInt cowner,col = cdest[aj[j]];
1659:       PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1660:       if (rowner == cowner) dnnz[i]++;
1661:       else onnz[i]++;
1662:     }
1663:     for (j=bi[i]; j<bi[i+1]; j++) {
1664:       PetscInt cowner,col = gcdest[bj[j]];
1665:       PetscLayoutFindOwner(A->cmap,col,&cowner);
1666:       if (rowner == cowner) dnnz[i]++;
1667:       else onnz[i]++;
1668:     }
1669:   }
1670:   PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);
1671:   PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);
1672:   PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);
1673:   PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);
1674:   PetscSFDestroy(&rowsf);

1676:   MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1677:   MatSeqAIJGetArray(aA,&aa);
1678:   MatSeqAIJGetArray(aB,&ba);
1679:   for (i=0; i<m; i++) {
1680:     PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1681:     PetscInt j0,rowlen;
1682:     rowlen = ai[i+1] - ai[i];
1683:     for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1684:       for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1685:       MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1686:     }
1687:     rowlen = bi[i+1] - bi[i];
1688:     for (j0=j=0; j<rowlen; j0=j) {
1689:       for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1690:       MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1691:     }
1692:   }
1693:   MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1694:   MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1695:   MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1696:   MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1697:   MatSeqAIJRestoreArray(aA,&aa);
1698:   MatSeqAIJRestoreArray(aB,&ba);
1699:   PetscFree4(dnnz,onnz,tdnnz,tonnz);
1700:   PetscFree3(work,rdest,cdest);
1701:   PetscFree(gcdest);
1702:   if (parcolp) {ISDestroy(&colp);}
1703:   *B = Aperm;
1704:   return(0);
1705: }

1707: PetscErrorCode  MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1708: {
1709:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1713:   MatGetSize(aij->B,NULL,nghosts);
1714:   if (ghosts) *ghosts = aij->garray;
1715:   return(0);
1716: }

1718: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1719: {
1720:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1721:   Mat            A    = mat->A,B = mat->B;
1723:   PetscReal      isend[5],irecv[5];

1726:   info->block_size = 1.0;
1727:   MatGetInfo(A,MAT_LOCAL,info);

1729:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1730:   isend[3] = info->memory;  isend[4] = info->mallocs;

1732:   MatGetInfo(B,MAT_LOCAL,info);

1734:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1735:   isend[3] += info->memory;  isend[4] += info->mallocs;
1736:   if (flag == MAT_LOCAL) {
1737:     info->nz_used      = isend[0];
1738:     info->nz_allocated = isend[1];
1739:     info->nz_unneeded  = isend[2];
1740:     info->memory       = isend[3];
1741:     info->mallocs      = isend[4];
1742:   } else if (flag == MAT_GLOBAL_MAX) {
1743:     MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));

1745:     info->nz_used      = irecv[0];
1746:     info->nz_allocated = irecv[1];
1747:     info->nz_unneeded  = irecv[2];
1748:     info->memory       = irecv[3];
1749:     info->mallocs      = irecv[4];
1750:   } else if (flag == MAT_GLOBAL_SUM) {
1751:     MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));

1753:     info->nz_used      = irecv[0];
1754:     info->nz_allocated = irecv[1];
1755:     info->nz_unneeded  = irecv[2];
1756:     info->memory       = irecv[3];
1757:     info->mallocs      = irecv[4];
1758:   }
1759:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1760:   info->fill_ratio_needed = 0;
1761:   info->factor_mallocs    = 0;
1762:   return(0);
1763: }

1765: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1766: {
1767:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1771:   switch (op) {
1772:   case MAT_NEW_NONZERO_LOCATIONS:
1773:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1774:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1775:   case MAT_KEEP_NONZERO_PATTERN:
1776:   case MAT_NEW_NONZERO_LOCATION_ERR:
1777:   case MAT_USE_INODES:
1778:   case MAT_IGNORE_ZERO_ENTRIES:
1779:     MatCheckPreallocated(A,1);
1780:     MatSetOption(a->A,op,flg);
1781:     MatSetOption(a->B,op,flg);
1782:     break;
1783:   case MAT_ROW_ORIENTED:
1784:     MatCheckPreallocated(A,1);
1785:     a->roworiented = flg;

1787:     MatSetOption(a->A,op,flg);
1788:     MatSetOption(a->B,op,flg);
1789:     break;
1790:   case MAT_NEW_DIAGONALS:
1791:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1792:     break;
1793:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1794:     a->donotstash = flg;
1795:     break;
1796:   /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1797:   case MAT_SPD:
1798:   case MAT_SYMMETRIC:
1799:   case MAT_STRUCTURALLY_SYMMETRIC:
1800:   case MAT_HERMITIAN:
1801:   case MAT_SYMMETRY_ETERNAL:
1802:     break;
1803:   case MAT_SUBMAT_SINGLEIS:
1804:     A->submat_singleis = flg;
1805:     break;
1806:   case MAT_STRUCTURE_ONLY:
1807:     /* The option is handled directly by MatSetOption() */
1808:     break;
1809:   default:
1810:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1811:   }
1812:   return(0);
1813: }

1815: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1816: {
1817:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1818:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1820:   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1821:   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1822:   PetscInt       *cmap,*idx_p;

1825:   if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1826:   mat->getrowactive = PETSC_TRUE;

1828:   if (!mat->rowvalues && (idx || v)) {
1829:     /*
1830:         allocate enough space to hold information from the longest row.
1831:     */
1832:     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1833:     PetscInt   max = 1,tmp;
1834:     for (i=0; i<matin->rmap->n; i++) {
1835:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1836:       if (max < tmp) max = tmp;
1837:     }
1838:     PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1839:   }

1841:   if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1842:   lrow = row - rstart;

1844:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1845:   if (!v)   {pvA = 0; pvB = 0;}
1846:   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1847:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1848:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1849:   nztot = nzA + nzB;

1851:   cmap = mat->garray;
1852:   if (v  || idx) {
1853:     if (nztot) {
1854:       /* Sort by increasing column numbers, assuming A and B already sorted */
1855:       PetscInt imark = -1;
1856:       if (v) {
1857:         *v = v_p = mat->rowvalues;
1858:         for (i=0; i<nzB; i++) {
1859:           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1860:           else break;
1861:         }
1862:         imark = i;
1863:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1864:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1865:       }
1866:       if (idx) {
1867:         *idx = idx_p = mat->rowindices;
1868:         if (imark > -1) {
1869:           for (i=0; i<imark; i++) {
1870:             idx_p[i] = cmap[cworkB[i]];
1871:           }
1872:         } else {
1873:           for (i=0; i<nzB; i++) {
1874:             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1875:             else break;
1876:           }
1877:           imark = i;
1878:         }
1879:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1880:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1881:       }
1882:     } else {
1883:       if (idx) *idx = 0;
1884:       if (v)   *v   = 0;
1885:     }
1886:   }
1887:   *nz  = nztot;
1888:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1889:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1890:   return(0);
1891: }

1893: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1894: {
1895:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1898:   if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1899:   aij->getrowactive = PETSC_FALSE;
1900:   return(0);
1901: }

1903: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1904: {
1905:   Mat_MPIAIJ     *aij  = (Mat_MPIAIJ*)mat->data;
1906:   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1908:   PetscInt       i,j,cstart = mat->cmap->rstart;
1909:   PetscReal      sum = 0.0;
1910:   MatScalar      *v;

1913:   if (aij->size == 1) {
1914:      MatNorm(aij->A,type,norm);
1915:   } else {
1916:     if (type == NORM_FROBENIUS) {
1917:       v = amat->a;
1918:       for (i=0; i<amat->nz; i++) {
1919:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1920:       }
1921:       v = bmat->a;
1922:       for (i=0; i<bmat->nz; i++) {
1923:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1924:       }
1925:       MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1926:       *norm = PetscSqrtReal(*norm);
1927:       PetscLogFlops(2*amat->nz+2*bmat->nz);
1928:     } else if (type == NORM_1) { /* max column norm */
1929:       PetscReal *tmp,*tmp2;
1930:       PetscInt  *jj,*garray = aij->garray;
1931:       PetscCalloc1(mat->cmap->N+1,&tmp);
1932:       PetscMalloc1(mat->cmap->N+1,&tmp2);
1933:       *norm = 0.0;
1934:       v     = amat->a; jj = amat->j;
1935:       for (j=0; j<amat->nz; j++) {
1936:         tmp[cstart + *jj++] += PetscAbsScalar(*v);  v++;
1937:       }
1938:       v = bmat->a; jj = bmat->j;
1939:       for (j=0; j<bmat->nz; j++) {
1940:         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1941:       }
1942:       MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1943:       for (j=0; j<mat->cmap->N; j++) {
1944:         if (tmp2[j] > *norm) *norm = tmp2[j];
1945:       }
1946:       PetscFree(tmp);
1947:       PetscFree(tmp2);
1948:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1949:     } else if (type == NORM_INFINITY) { /* max row norm */
1950:       PetscReal ntemp = 0.0;
1951:       for (j=0; j<aij->A->rmap->n; j++) {
1952:         v   = amat->a + amat->i[j];
1953:         sum = 0.0;
1954:         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1955:           sum += PetscAbsScalar(*v); v++;
1956:         }
1957:         v = bmat->a + bmat->i[j];
1958:         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1959:           sum += PetscAbsScalar(*v); v++;
1960:         }
1961:         if (sum > ntemp) ntemp = sum;
1962:       }
1963:       MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
1964:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1965:     } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1966:   }
1967:   return(0);
1968: }

1970: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1971: {
1972:   Mat_MPIAIJ     *a    =(Mat_MPIAIJ*)A->data,*b;
1973:   Mat_SeqAIJ     *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
1974:   PetscInt       M     = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,*B_diag_ilen,*B_diag_i,i,ncol,A_diag_ncol;
1976:   Mat            B,A_diag,*B_diag;
1977:   MatScalar      *array;

1980:   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
1981:   ai = Aloc->i; aj = Aloc->j;
1982:   bi = Bloc->i; bj = Bloc->j;
1983:   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1984:     PetscInt             *d_nnz,*g_nnz,*o_nnz;
1985:     PetscSFNode          *oloc;
1986:     PETSC_UNUSED PetscSF sf;

1988:     PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
1989:     /* compute d_nnz for preallocation */
1990:     PetscMemzero(d_nnz,na*sizeof(PetscInt));
1991:     for (i=0; i<ai[ma]; i++) {
1992:       d_nnz[aj[i]]++;
1993:     }
1994:     /* compute local off-diagonal contributions */
1995:     PetscMemzero(g_nnz,nb*sizeof(PetscInt));
1996:     for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
1997:     /* map those to global */
1998:     PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1999:     PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
2000:     PetscSFSetFromOptions(sf);
2001:     PetscMemzero(o_nnz,na*sizeof(PetscInt));
2002:     PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2003:     PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2004:     PetscSFDestroy(&sf);

2006:     MatCreate(PetscObjectComm((PetscObject)A),&B);
2007:     MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
2008:     MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
2009:     MatSetType(B,((PetscObject)A)->type_name);
2010:     MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
2011:     PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
2012:   } else {
2013:     B    = *matout;
2014:     MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
2015:   }

2017:   b           = (Mat_MPIAIJ*)B->data;
2018:   A_diag      = a->A;
2019:   B_diag      = &b->A;
2020:   sub_B_diag  = (Mat_SeqAIJ*)(*B_diag)->data;
2021:   A_diag_ncol = A_diag->cmap->N;
2022:   B_diag_ilen = sub_B_diag->ilen;
2023:   B_diag_i    = sub_B_diag->i;

2025:   /* Set ilen for diagonal of B */
2026:   for (i=0; i<A_diag_ncol; i++) {
2027:     B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
2028:   }

2030:   /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
2031:   very quickly (=without using MatSetValues), because all writes are local. */
2032:   MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);

2034:   /* copy over the B part */
2035:   PetscCalloc1(bi[mb],&cols);
2036:   array = Bloc->a;
2037:   row   = A->rmap->rstart;
2038:   for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
2039:   cols_tmp = cols;
2040:   for (i=0; i<mb; i++) {
2041:     ncol = bi[i+1]-bi[i];
2042:     MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
2043:     row++;
2044:     array += ncol; cols_tmp += ncol;
2045:   }
2046:   PetscFree(cols);

2048:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2049:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2050:   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
2051:     *matout = B;
2052:   } else {
2053:     MatHeaderMerge(A,&B);
2054:   }
2055:   return(0);
2056: }

2058: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
2059: {
2060:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2061:   Mat            a    = aij->A,b = aij->B;
2063:   PetscInt       s1,s2,s3;

2066:   MatGetLocalSize(mat,&s2,&s3);
2067:   if (rr) {
2068:     VecGetLocalSize(rr,&s1);
2069:     if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
2070:     /* Overlap communication with computation. */
2071:     VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2072:   }
2073:   if (ll) {
2074:     VecGetLocalSize(ll,&s1);
2075:     if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
2076:     (*b->ops->diagonalscale)(b,ll,0);
2077:   }
2078:   /* scale  the diagonal block */
2079:   (*a->ops->diagonalscale)(a,ll,rr);

2081:   if (rr) {
2082:     /* Do a scatter end and then right scale the off-diagonal block */
2083:     VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2084:     (*b->ops->diagonalscale)(b,0,aij->lvec);
2085:   }
2086:   return(0);
2087: }

2089: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2090: {
2091:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2095:   MatSetUnfactored(a->A);
2096:   return(0);
2097: }

2099: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool  *flag)
2100: {
2101:   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2102:   Mat            a,b,c,d;
2103:   PetscBool      flg;

2107:   a = matA->A; b = matA->B;
2108:   c = matB->A; d = matB->B;

2110:   MatEqual(a,c,&flg);
2111:   if (flg) {
2112:     MatEqual(b,d,&flg);
2113:   }
2114:   MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2115:   return(0);
2116: }

2118: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2119: {
2121:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2122:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

2125:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2126:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2127:     /* because of the column compression in the off-processor part of the matrix a->B,
2128:        the number of columns in a->B and b->B may be different, hence we cannot call
2129:        the MatCopy() directly on the two parts. If need be, we can provide a more
2130:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2131:        then copying the submatrices */
2132:     MatCopy_Basic(A,B,str);
2133:   } else {
2134:     MatCopy(a->A,b->A,str);
2135:     MatCopy(a->B,b->B,str);
2136:   }
2137:   PetscObjectStateIncrease((PetscObject)B);
2138:   return(0);
2139: }

2141: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2142: {

2146:   MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2147:   return(0);
2148: }

2150: /*
2151:    Computes the number of nonzeros per row needed for preallocation when X and Y
2152:    have different nonzero structure.
2153: */
2154: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2155: {
2156:   PetscInt       i,j,k,nzx,nzy;

2159:   /* Set the number of nonzeros in the new matrix */
2160:   for (i=0; i<m; i++) {
2161:     const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2162:     nzx = xi[i+1] - xi[i];
2163:     nzy = yi[i+1] - yi[i];
2164:     nnz[i] = 0;
2165:     for (j=0,k=0; j<nzx; j++) {                   /* Point in X */
2166:       for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2167:       if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++;             /* Skip duplicate */
2168:       nnz[i]++;
2169:     }
2170:     for (; k<nzy; k++) nnz[i]++;
2171:   }
2172:   return(0);
2173: }

2175: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2176: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2177: {
2179:   PetscInt       m = Y->rmap->N;
2180:   Mat_SeqAIJ     *x = (Mat_SeqAIJ*)X->data;
2181:   Mat_SeqAIJ     *y = (Mat_SeqAIJ*)Y->data;

2184:   MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2185:   return(0);
2186: }

2188: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2189: {
2191:   Mat_MPIAIJ     *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2192:   PetscBLASInt   bnz,one=1;
2193:   Mat_SeqAIJ     *x,*y;

2196:   if (str == SAME_NONZERO_PATTERN) {
2197:     PetscScalar alpha = a;
2198:     x    = (Mat_SeqAIJ*)xx->A->data;
2199:     PetscBLASIntCast(x->nz,&bnz);
2200:     y    = (Mat_SeqAIJ*)yy->A->data;
2201:     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2202:     x    = (Mat_SeqAIJ*)xx->B->data;
2203:     y    = (Mat_SeqAIJ*)yy->B->data;
2204:     PetscBLASIntCast(x->nz,&bnz);
2205:     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2206:     PetscObjectStateIncrease((PetscObject)Y);
2207:   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2208:     MatAXPY_Basic(Y,a,X,str);
2209:   } else {
2210:     Mat      B;
2211:     PetscInt *nnz_d,*nnz_o;
2212:     PetscMalloc1(yy->A->rmap->N,&nnz_d);
2213:     PetscMalloc1(yy->B->rmap->N,&nnz_o);
2214:     MatCreate(PetscObjectComm((PetscObject)Y),&B);
2215:     PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2216:     MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2217:     MatSetBlockSizesFromMats(B,Y,Y);
2218:     MatSetType(B,MATMPIAIJ);
2219:     MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2220:     MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2221:     MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2222:     MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2223:     MatHeaderReplace(Y,&B);
2224:     PetscFree(nnz_d);
2225:     PetscFree(nnz_o);
2226:   }
2227:   return(0);
2228: }

2230: extern PetscErrorCode  MatConjugate_SeqAIJ(Mat);

2232: PetscErrorCode  MatConjugate_MPIAIJ(Mat mat)
2233: {
2234: #if defined(PETSC_USE_COMPLEX)
2236:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2239:   MatConjugate_SeqAIJ(aij->A);
2240:   MatConjugate_SeqAIJ(aij->B);
2241: #else
2243: #endif
2244:   return(0);
2245: }

2247: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2248: {
2249:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2253:   MatRealPart(a->A);
2254:   MatRealPart(a->B);
2255:   return(0);
2256: }

2258: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2259: {
2260:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2264:   MatImaginaryPart(a->A);
2265:   MatImaginaryPart(a->B);
2266:   return(0);
2267: }

2269: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2270: {
2271:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2273:   PetscInt       i,*idxb = 0;
2274:   PetscScalar    *va,*vb;
2275:   Vec            vtmp;

2278:   MatGetRowMaxAbs(a->A,v,idx);
2279:   VecGetArray(v,&va);
2280:   if (idx) {
2281:     for (i=0; i<A->rmap->n; i++) {
2282:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2283:     }
2284:   }

2286:   VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2287:   if (idx) {
2288:     PetscMalloc1(A->rmap->n,&idxb);
2289:   }
2290:   MatGetRowMaxAbs(a->B,vtmp,idxb);
2291:   VecGetArray(vtmp,&vb);

2293:   for (i=0; i<A->rmap->n; i++) {
2294:     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2295:       va[i] = vb[i];
2296:       if (idx) idx[i] = a->garray[idxb[i]];
2297:     }
2298:   }

2300:   VecRestoreArray(v,&va);
2301:   VecRestoreArray(vtmp,&vb);
2302:   PetscFree(idxb);
2303:   VecDestroy(&vtmp);
2304:   return(0);
2305: }

2307: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2308: {
2309:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2311:   PetscInt       i,*idxb = 0;
2312:   PetscScalar    *va,*vb;
2313:   Vec            vtmp;

2316:   MatGetRowMinAbs(a->A,v,idx);
2317:   VecGetArray(v,&va);
2318:   if (idx) {
2319:     for (i=0; i<A->cmap->n; i++) {
2320:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2321:     }
2322:   }

2324:   VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2325:   if (idx) {
2326:     PetscMalloc1(A->rmap->n,&idxb);
2327:   }
2328:   MatGetRowMinAbs(a->B,vtmp,idxb);
2329:   VecGetArray(vtmp,&vb);

2331:   for (i=0; i<A->rmap->n; i++) {
2332:     if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2333:       va[i] = vb[i];
2334:       if (idx) idx[i] = a->garray[idxb[i]];
2335:     }
2336:   }

2338:   VecRestoreArray(v,&va);
2339:   VecRestoreArray(vtmp,&vb);
2340:   PetscFree(idxb);
2341:   VecDestroy(&vtmp);
2342:   return(0);
2343: }

2345: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2346: {
2347:   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2348:   PetscInt       n      = A->rmap->n;
2349:   PetscInt       cstart = A->cmap->rstart;
2350:   PetscInt       *cmap  = mat->garray;
2351:   PetscInt       *diagIdx, *offdiagIdx;
2352:   Vec            diagV, offdiagV;
2353:   PetscScalar    *a, *diagA, *offdiagA;
2354:   PetscInt       r;

2358:   PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2359:   VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);
2360:   VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);
2361:   MatGetRowMin(mat->A, diagV,    diagIdx);
2362:   MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2363:   VecGetArray(v,        &a);
2364:   VecGetArray(diagV,    &diagA);
2365:   VecGetArray(offdiagV, &offdiagA);
2366:   for (r = 0; r < n; ++r) {
2367:     if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2368:       a[r]   = diagA[r];
2369:       idx[r] = cstart + diagIdx[r];
2370:     } else {
2371:       a[r]   = offdiagA[r];
2372:       idx[r] = cmap[offdiagIdx[r]];
2373:     }
2374:   }
2375:   VecRestoreArray(v,        &a);
2376:   VecRestoreArray(diagV,    &diagA);
2377:   VecRestoreArray(offdiagV, &offdiagA);
2378:   VecDestroy(&diagV);
2379:   VecDestroy(&offdiagV);
2380:   PetscFree2(diagIdx, offdiagIdx);
2381:   return(0);
2382: }

2384: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2385: {
2386:   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2387:   PetscInt       n      = A->rmap->n;
2388:   PetscInt       cstart = A->cmap->rstart;
2389:   PetscInt       *cmap  = mat->garray;
2390:   PetscInt       *diagIdx, *offdiagIdx;
2391:   Vec            diagV, offdiagV;
2392:   PetscScalar    *a, *diagA, *offdiagA;
2393:   PetscInt       r;

2397:   PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2398:   VecCreateSeq(PETSC_COMM_SELF, n, &diagV);
2399:   VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);
2400:   MatGetRowMax(mat->A, diagV,    diagIdx);
2401:   MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2402:   VecGetArray(v,        &a);
2403:   VecGetArray(diagV,    &diagA);
2404:   VecGetArray(offdiagV, &offdiagA);
2405:   for (r = 0; r < n; ++r) {
2406:     if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2407:       a[r]   = diagA[r];
2408:       idx[r] = cstart + diagIdx[r];
2409:     } else {
2410:       a[r]   = offdiagA[r];
2411:       idx[r] = cmap[offdiagIdx[r]];
2412:     }
2413:   }
2414:   VecRestoreArray(v,        &a);
2415:   VecRestoreArray(diagV,    &diagA);
2416:   VecRestoreArray(offdiagV, &offdiagA);
2417:   VecDestroy(&diagV);
2418:   VecDestroy(&offdiagV);
2419:   PetscFree2(diagIdx, offdiagIdx);
2420:   return(0);
2421: }

2423: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2424: {
2426:   Mat            *dummy;

2429:   MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2430:   *newmat = *dummy;
2431:   PetscFree(dummy);
2432:   return(0);
2433: }

2435: PetscErrorCode  MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2436: {
2437:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*) A->data;

2441:   MatInvertBlockDiagonal(a->A,values);
2442:   A->factorerrortype = a->A->factorerrortype;
2443:   return(0);
2444: }

2446: static PetscErrorCode  MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2447: {
2449:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)x->data;

2452:   MatSetRandom(aij->A,rctx);
2453:   MatSetRandom(aij->B,rctx);
2454:   MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2455:   MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2456:   return(0);
2457: }

2459: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2460: {
2462:   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2463:   else A->ops->increaseoverlap    = MatIncreaseOverlap_MPIAIJ;
2464:   return(0);
2465: }

2467: /*@
2468:    MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap

2470:    Collective on Mat

2472:    Input Parameters:
2473: +    A - the matrix
2474: -    sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)

2476:  Level: advanced

2478: @*/
2479: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2480: {
2481:   PetscErrorCode       ierr;

2484:   PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2485:   return(0);
2486: }

2488: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2489: {
2490:   PetscErrorCode       ierr;
2491:   PetscBool            sc = PETSC_FALSE,flg;

2494:   PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2495:   if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2496:   PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2497:   if (flg) {
2498:     MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2499:   }
2500:   PetscOptionsTail();
2501:   return(0);
2502: }

2504: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2505: {
2507:   Mat_MPIAIJ     *maij = (Mat_MPIAIJ*)Y->data;
2508:   Mat_SeqAIJ     *aij = (Mat_SeqAIJ*)maij->A->data;

2511:   if (!Y->preallocated) {
2512:     MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2513:   } else if (!aij->nz) {
2514:     PetscInt nonew = aij->nonew;
2515:     MatSeqAIJSetPreallocation(maij->A,1,NULL);
2516:     aij->nonew = nonew;
2517:   }
2518:   MatShift_Basic(Y,a);
2519:   return(0);
2520: }

2522: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool  *missing,PetscInt *d)
2523: {
2524:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2528:   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2529:   MatMissingDiagonal(a->A,missing,d);
2530:   if (d) {
2531:     PetscInt rstart;
2532:     MatGetOwnershipRange(A,&rstart,NULL);
2533:     *d += rstart;

2535:   }
2536:   return(0);
2537: }

2539: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2540: {
2541:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2545:   MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2546:   return(0);
2547: }

2549: /* -------------------------------------------------------------------*/
2550: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2551:                                        MatGetRow_MPIAIJ,
2552:                                        MatRestoreRow_MPIAIJ,
2553:                                        MatMult_MPIAIJ,
2554:                                 /* 4*/ MatMultAdd_MPIAIJ,
2555:                                        MatMultTranspose_MPIAIJ,
2556:                                        MatMultTransposeAdd_MPIAIJ,
2557:                                        0,
2558:                                        0,
2559:                                        0,
2560:                                 /*10*/ 0,
2561:                                        0,
2562:                                        0,
2563:                                        MatSOR_MPIAIJ,
2564:                                        MatTranspose_MPIAIJ,
2565:                                 /*15*/ MatGetInfo_MPIAIJ,
2566:                                        MatEqual_MPIAIJ,
2567:                                        MatGetDiagonal_MPIAIJ,
2568:                                        MatDiagonalScale_MPIAIJ,
2569:                                        MatNorm_MPIAIJ,
2570:                                 /*20*/ MatAssemblyBegin_MPIAIJ,
2571:                                        MatAssemblyEnd_MPIAIJ,
2572:                                        MatSetOption_MPIAIJ,
2573:                                        MatZeroEntries_MPIAIJ,
2574:                                 /*24*/ MatZeroRows_MPIAIJ,
2575:                                        0,
2576:                                        0,
2577:                                        0,
2578:                                        0,
2579:                                 /*29*/ MatSetUp_MPIAIJ,
2580:                                        0,
2581:                                        0,
2582:                                        MatGetDiagonalBlock_MPIAIJ,
2583:                                        0,
2584:                                 /*34*/ MatDuplicate_MPIAIJ,
2585:                                        0,
2586:                                        0,
2587:                                        0,
2588:                                        0,
2589:                                 /*39*/ MatAXPY_MPIAIJ,
2590:                                        MatCreateSubMatrices_MPIAIJ,
2591:                                        MatIncreaseOverlap_MPIAIJ,
2592:                                        MatGetValues_MPIAIJ,
2593:                                        MatCopy_MPIAIJ,
2594:                                 /*44*/ MatGetRowMax_MPIAIJ,
2595:                                        MatScale_MPIAIJ,
2596:                                        MatShift_MPIAIJ,
2597:                                        MatDiagonalSet_MPIAIJ,
2598:                                        MatZeroRowsColumns_MPIAIJ,
2599:                                 /*49*/ MatSetRandom_MPIAIJ,
2600:                                        0,
2601:                                        0,
2602:                                        0,
2603:                                        0,
2604:                                 /*54*/ MatFDColoringCreate_MPIXAIJ,
2605:                                        0,
2606:                                        MatSetUnfactored_MPIAIJ,
2607:                                        MatPermute_MPIAIJ,
2608:                                        0,
2609:                                 /*59*/ MatCreateSubMatrix_MPIAIJ,
2610:                                        MatDestroy_MPIAIJ,
2611:                                        MatView_MPIAIJ,
2612:                                        0,
2613:                                        MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ,
2614:                                 /*64*/ MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ,
2615:                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2616:                                        0,
2617:                                        0,
2618:                                        0,
2619:                                 /*69*/ MatGetRowMaxAbs_MPIAIJ,
2620:                                        MatGetRowMinAbs_MPIAIJ,
2621:                                        0,
2622:                                        0,
2623:                                        0,
2624:                                        0,
2625:                                 /*75*/ MatFDColoringApply_AIJ,
2626:                                        MatSetFromOptions_MPIAIJ,
2627:                                        0,
2628:                                        0,
2629:                                        MatFindZeroDiagonals_MPIAIJ,
2630:                                 /*80*/ 0,
2631:                                        0,
2632:                                        0,
2633:                                 /*83*/ MatLoad_MPIAIJ,
2634:                                        MatIsSymmetric_MPIAIJ,
2635:                                        0,
2636:                                        0,
2637:                                        0,
2638:                                        0,
2639:                                 /*89*/ MatMatMult_MPIAIJ_MPIAIJ,
2640:                                        MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2641:                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2642:                                        MatPtAP_MPIAIJ_MPIAIJ,
2643:                                        MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2644:                                 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2645:                                        0,
2646:                                        0,
2647:                                        0,
2648:                                        0,
2649:                                 /*99*/ 0,
2650:                                        0,
2651:                                        0,
2652:                                        MatConjugate_MPIAIJ,
2653:                                        0,
2654:                                 /*104*/MatSetValuesRow_MPIAIJ,
2655:                                        MatRealPart_MPIAIJ,
2656:                                        MatImaginaryPart_MPIAIJ,
2657:                                        0,
2658:                                        0,
2659:                                 /*109*/0,
2660:                                        0,
2661:                                        MatGetRowMin_MPIAIJ,
2662:                                        0,
2663:                                        MatMissingDiagonal_MPIAIJ,
2664:                                 /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2665:                                        0,
2666:                                        MatGetGhosts_MPIAIJ,
2667:                                        0,
2668:                                        0,
2669:                                 /*119*/0,
2670:                                        0,
2671:                                        0,
2672:                                        0,
2673:                                        MatGetMultiProcBlock_MPIAIJ,
2674:                                 /*124*/MatFindNonzeroRows_MPIAIJ,
2675:                                        MatGetColumnNorms_MPIAIJ,
2676:                                        MatInvertBlockDiagonal_MPIAIJ,
2677:                                        MatInvertVariableBlockDiagonal_MPIAIJ,
2678:                                        MatCreateSubMatricesMPI_MPIAIJ,
2679:                                 /*129*/0,
2680:                                        MatTransposeMatMult_MPIAIJ_MPIAIJ,
2681:                                        MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ,
2682:                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2683:                                        0,
2684:                                 /*134*/0,
2685:                                        0,
2686:                                        MatRARt_MPIAIJ_MPIAIJ,
2687:                                        0,
2688:                                        0,
2689:                                 /*139*/MatSetBlockSizes_MPIAIJ,
2690:                                        0,
2691:                                        0,
2692:                                        MatFDColoringSetUp_MPIXAIJ,
2693:                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2694:                                 /*144*/MatCreateMPIMatConcatenateSeqMat_MPIAIJ
2695: };

2697: /* ----------------------------------------------------------------------------------------*/

2699: PetscErrorCode  MatStoreValues_MPIAIJ(Mat mat)
2700: {
2701:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2705:   MatStoreValues(aij->A);
2706:   MatStoreValues(aij->B);
2707:   return(0);
2708: }

2710: PetscErrorCode  MatRetrieveValues_MPIAIJ(Mat mat)
2711: {
2712:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2716:   MatRetrieveValues(aij->A);
2717:   MatRetrieveValues(aij->B);
2718:   return(0);
2719: }

2721: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2722: {
2723:   Mat_MPIAIJ     *b;

2727:   PetscLayoutSetUp(B->rmap);
2728:   PetscLayoutSetUp(B->cmap);
2729:   b = (Mat_MPIAIJ*)B->data;

2731: #if defined(PETSC_USE_CTABLE)
2732:   PetscTableDestroy(&b->colmap);
2733: #else
2734:   PetscFree(b->colmap);
2735: #endif
2736:   PetscFree(b->garray);
2737:   VecDestroy(&b->lvec);
2738:   VecScatterDestroy(&b->Mvctx);

2740:   /* Because the B will have been resized we simply destroy it and create a new one each time */
2741:   MatDestroy(&b->B);
2742:   MatCreate(PETSC_COMM_SELF,&b->B);
2743:   MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
2744:   MatSetBlockSizesFromMats(b->B,B,B);
2745:   MatSetType(b->B,MATSEQAIJ);
2746:   PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);

2748:   if (!B->preallocated) {
2749:     MatCreate(PETSC_COMM_SELF,&b->A);
2750:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2751:     MatSetBlockSizesFromMats(b->A,B,B);
2752:     MatSetType(b->A,MATSEQAIJ);
2753:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2754:   }

2756:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2757:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2758:   B->preallocated  = PETSC_TRUE;
2759:   B->was_assembled = PETSC_FALSE;
2760:   B->assembled     = PETSC_FALSE;
2761:   return(0);
2762: }

2764: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2765: {
2766:   Mat_MPIAIJ     *b;

2771:   PetscLayoutSetUp(B->rmap);
2772:   PetscLayoutSetUp(B->cmap);
2773:   b = (Mat_MPIAIJ*)B->data;

2775: #if defined(PETSC_USE_CTABLE)
2776:   PetscTableDestroy(&b->colmap);
2777: #else
2778:   PetscFree(b->colmap);
2779: #endif
2780:   PetscFree(b->garray);
2781:   VecDestroy(&b->lvec);
2782:   VecScatterDestroy(&b->Mvctx);

2784:   MatResetPreallocation(b->A);
2785:   MatResetPreallocation(b->B);
2786:   B->preallocated  = PETSC_TRUE;
2787:   B->was_assembled = PETSC_FALSE;
2788:   B->assembled = PETSC_FALSE;
2789:   return(0);
2790: }

2792: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2793: {
2794:   Mat            mat;
2795:   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;

2799:   *newmat = 0;
2800:   MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2801:   MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2802:   MatSetBlockSizesFromMats(mat,matin,matin);
2803:   MatSetType(mat,((PetscObject)matin)->type_name);
2804:   a       = (Mat_MPIAIJ*)mat->data;

2806:   mat->factortype   = matin->factortype;
2807:   mat->assembled    = PETSC_TRUE;
2808:   mat->insertmode   = NOT_SET_VALUES;
2809:   mat->preallocated = PETSC_TRUE;

2811:   a->size         = oldmat->size;
2812:   a->rank         = oldmat->rank;
2813:   a->donotstash   = oldmat->donotstash;
2814:   a->roworiented  = oldmat->roworiented;
2815:   a->rowindices   = 0;
2816:   a->rowvalues    = 0;
2817:   a->getrowactive = PETSC_FALSE;

2819:   PetscLayoutReference(matin->rmap,&mat->rmap);
2820:   PetscLayoutReference(matin->cmap,&mat->cmap);

2822:   if (oldmat->colmap) {
2823: #if defined(PETSC_USE_CTABLE)
2824:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2825: #else
2826:     PetscMalloc1(mat->cmap->N,&a->colmap);
2827:     PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2828:     PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));
2829: #endif
2830:   } else a->colmap = 0;
2831:   if (oldmat->garray) {
2832:     PetscInt len;
2833:     len  = oldmat->B->cmap->n;
2834:     PetscMalloc1(len+1,&a->garray);
2835:     PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2836:     if (len) { PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt)); }
2837:   } else a->garray = 0;

2839:   VecDuplicate(oldmat->lvec,&a->lvec);
2840:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2841:   VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2842:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);

2844:   if (oldmat->Mvctx_mpi1) {
2845:     VecScatterCopy(oldmat->Mvctx_mpi1,&a->Mvctx_mpi1);
2846:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx_mpi1);
2847:   }

2849:   MatDuplicate(oldmat->A,cpvalues,&a->A);
2850:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2851:   MatDuplicate(oldmat->B,cpvalues,&a->B);
2852:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2853:   PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2854:   *newmat = mat;
2855:   return(0);
2856: }

2858: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2859: {
2860:   PetscBool      isbinary, ishdf5;

2866:   /* force binary viewer to load .info file if it has not yet done so */
2867:   PetscViewerSetUp(viewer);
2868:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2869:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5,  &ishdf5);
2870:   if (isbinary) {
2871:     MatLoad_MPIAIJ_Binary(newMat,viewer);
2872:   } else if (ishdf5) {
2873: #if defined(PETSC_HAVE_HDF5)
2874:     MatLoad_AIJ_HDF5(newMat,viewer);
2875: #else
2876:     SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
2877: #endif
2878:   } else {
2879:     SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
2880:   }
2881:   return(0);
2882: }

2884: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat newMat, PetscViewer viewer)
2885: {
2886:   PetscScalar    *vals,*svals;
2887:   MPI_Comm       comm;
2889:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag;
2890:   PetscInt       i,nz,j,rstart,rend,mmax,maxnz = 0;
2891:   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2892:   PetscInt       *ourlens = NULL,*procsnz = NULL,*offlens = NULL,jj,*mycols,*smycols;
2893:   PetscInt       cend,cstart,n,*rowners;
2894:   int            fd;
2895:   PetscInt       bs = newMat->rmap->bs;

2898:   PetscObjectGetComm((PetscObject)viewer,&comm);
2899:   MPI_Comm_size(comm,&size);
2900:   MPI_Comm_rank(comm,&rank);
2901:   PetscViewerBinaryGetDescriptor(viewer,&fd);
2902:   if (!rank) {
2903:     PetscBinaryRead(fd,(char*)header,4,PETSC_INT);
2904:     if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2905:     if (header[3] < 0) SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk,cannot load as MATMPIAIJ");
2906:   }

2908:   PetscOptionsBegin(comm,NULL,"Options for loading MATMPIAIJ matrix","Mat");
2909:   PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);
2910:   PetscOptionsEnd();
2911:   if (bs < 0) bs = 1;

2913:   MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2914:   M    = header[1]; N = header[2];

2916:   /* If global sizes are set, check if they are consistent with that given in the file */
2917:   if (newMat->rmap->N >= 0 && newMat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newMat->rmap->N,M);
2918:   if (newMat->cmap->N >=0 && newMat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newMat->cmap->N,N);

2920:   /* determine ownership of all (block) rows */
2921:   if (M%bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows (%d) and block size (%d)",M,bs);
2922:   if (newMat->rmap->n < 0) m = bs*((M/bs)/size + (((M/bs) % size) > rank));    /* PETSC_DECIDE */
2923:   else m = newMat->rmap->n; /* Set by user */

2925:   PetscMalloc1(size+1,&rowners);
2926:   MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);

2928:   /* First process needs enough room for process with most rows */
2929:   if (!rank) {
2930:     mmax = rowners[1];
2931:     for (i=2; i<=size; i++) {
2932:       mmax = PetscMax(mmax, rowners[i]);
2933:     }
2934:   } else mmax = -1;             /* unused, but compilers complain */

2936:   rowners[0] = 0;
2937:   for (i=2; i<=size; i++) {
2938:     rowners[i] += rowners[i-1];
2939:   }
2940:   rstart = rowners[rank];
2941:   rend   = rowners[rank+1];

2943:   /* distribute row lengths to all processors */
2944:   PetscMalloc2(m,&ourlens,m,&offlens);
2945:   if (!rank) {
2946:     PetscBinaryRead(fd,ourlens,m,PETSC_INT);
2947:     PetscMalloc1(mmax,&rowlengths);
2948:     PetscCalloc1(size,&procsnz);
2949:     for (j=0; j<m; j++) {
2950:       procsnz[0] += ourlens[j];
2951:     }
2952:     for (i=1; i<size; i++) {
2953:       PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);
2954:       /* calculate the number of nonzeros on each processor */
2955:       for (j=0; j<rowners[i+1]-rowners[i]; j++) {
2956:         procsnz[i] += rowlengths[j];
2957:       }
2958:       MPIULong_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
2959:     }
2960:     PetscFree(rowlengths);
2961:   } else {
2962:     MPIULong_Recv(ourlens,m,MPIU_INT,0,tag,comm);
2963:   }

2965:   if (!rank) {
2966:     /* determine max buffer needed and allocate it */
2967:     maxnz = 0;
2968:     for (i=0; i<size; i++) {
2969:       maxnz = PetscMax(maxnz,procsnz[i]);
2970:     }
2971:     PetscMalloc1(maxnz,&cols);

2973:     /* read in my part of the matrix column indices  */
2974:     nz   = procsnz[0];
2975:     PetscMalloc1(nz,&mycols);
2976:     PetscBinaryRead(fd,mycols,nz,PETSC_INT);

2978:     /* read in every one elses and ship off */
2979:     for (i=1; i<size; i++) {
2980:       nz   = procsnz[i];
2981:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2982:       MPIULong_Send(cols,nz,MPIU_INT,i,tag,comm);
2983:     }
2984:     PetscFree(cols);
2985:   } else {
2986:     /* determine buffer space needed for message */
2987:     nz = 0;
2988:     for (i=0; i<m; i++) {
2989:       nz += ourlens[i];
2990:     }
2991:     PetscMalloc1(nz,&mycols);

2993:     /* receive message of column indices*/
2994:     MPIULong_Recv(mycols,nz,MPIU_INT,0,tag,comm);
2995:   }

2997:   /* determine column ownership if matrix is not square */
2998:   if (N != M) {
2999:     if (newMat->cmap->n < 0) n = N/size + ((N % size) > rank);
3000:     else n = newMat->cmap->n;
3001:     MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);
3002:     cstart = cend - n;
3003:   } else {
3004:     cstart = rstart;
3005:     cend   = rend;
3006:     n      = cend - cstart;
3007:   }

3009:   /* loop over local rows, determining number of off diagonal entries */
3010:   PetscMemzero(offlens,m*sizeof(PetscInt));
3011:   jj   = 0;
3012:   for (i=0; i<m; i++) {
3013:     for (j=0; j<ourlens[i]; j++) {
3014:       if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
3015:       jj++;
3016:     }
3017:   }

3019:   for (i=0; i<m; i++) {
3020:     ourlens[i] -= offlens[i];
3021:   }
3022:   MatSetSizes(newMat,m,n,M,N);

3024:   if (bs > 1) {MatSetBlockSize(newMat,bs);}

3026:   MatMPIAIJSetPreallocation(newMat,0,ourlens,0,offlens);

3028:   for (i=0; i<m; i++) {
3029:     ourlens[i] += offlens[i];
3030:   }

3032:   if (!rank) {
3033:     PetscMalloc1(maxnz+1,&vals);

3035:     /* read in my part of the matrix numerical values  */
3036:     nz   = procsnz[0];
3037:     PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);

3039:     /* insert into matrix */
3040:     jj      = rstart;
3041:     smycols = mycols;
3042:     svals   = vals;
3043:     for (i=0; i<m; i++) {
3044:       MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3045:       smycols += ourlens[i];
3046:       svals   += ourlens[i];
3047:       jj++;
3048:     }

3050:     /* read in other processors and ship out */
3051:     for (i=1; i<size; i++) {
3052:       nz   = procsnz[i];
3053:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3054:       MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newMat)->tag,comm);
3055:     }
3056:     PetscFree(procsnz);
3057:   } else {
3058:     /* receive numeric values */
3059:     PetscMalloc1(nz+1,&vals);

3061:     /* receive message of values*/
3062:     MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newMat)->tag,comm);

3064:     /* insert into matrix */
3065:     jj      = rstart;
3066:     smycols = mycols;
3067:     svals   = vals;
3068:     for (i=0; i<m; i++) {
3069:       MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3070:       smycols += ourlens[i];
3071:       svals   += ourlens[i];
3072:       jj++;
3073:     }
3074:   }
3075:   PetscFree2(ourlens,offlens);
3076:   PetscFree(vals);
3077:   PetscFree(mycols);
3078:   PetscFree(rowners);
3079:   MatAssemblyBegin(newMat,MAT_FINAL_ASSEMBLY);
3080:   MatAssemblyEnd(newMat,MAT_FINAL_ASSEMBLY);
3081:   return(0);
3082: }

3084: /* Not scalable because of ISAllGather() unless getting all columns. */
3085: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
3086: {
3088:   IS             iscol_local;
3089:   PetscBool      isstride;
3090:   PetscMPIInt    lisstride=0,gisstride;

3093:   /* check if we are grabbing all columns*/
3094:   PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);

3096:   if (isstride) {
3097:     PetscInt  start,len,mstart,mlen;
3098:     ISStrideGetInfo(iscol,&start,NULL);
3099:     ISGetLocalSize(iscol,&len);
3100:     MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
3101:     if (mstart == start && mlen-mstart == len) lisstride = 1;
3102:   }

3104:   MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
3105:   if (gisstride) {
3106:     PetscInt N;
3107:     MatGetSize(mat,NULL,&N);
3108:     ISCreateStride(PetscObjectComm((PetscObject)mat),N,0,1,&iscol_local);
3109:     ISSetIdentity(iscol_local);
3110:     PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
3111:   } else {
3112:     PetscInt cbs;
3113:     ISGetBlockSize(iscol,&cbs);
3114:     ISAllGather(iscol,&iscol_local);
3115:     ISSetBlockSize(iscol_local,cbs);
3116:   }

3118:   *isseq = iscol_local;
3119:   return(0);
3120: }

3122: /*
3123:  Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3124:  (see MatCreateSubMatrix_MPIAIJ_nonscalable)

3126:  Input Parameters:
3127:    mat - matrix
3128:    isrow - parallel row index set; its local indices are a subset of local columns of mat,
3129:            i.e., mat->rstart <= isrow[i] < mat->rend
3130:    iscol - parallel column index set; its local indices are a subset of local columns of mat,
3131:            i.e., mat->cstart <= iscol[i] < mat->cend
3132:  Output Parameter:
3133:    isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
3134:    iscol_o - sequential column index set for retrieving mat->B
3135:    garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
3136:  */
3137: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
3138: {
3140:   Vec            x,cmap;
3141:   const PetscInt *is_idx;
3142:   PetscScalar    *xarray,*cmaparray;
3143:   PetscInt       ncols,isstart,*idx,m,rstart,*cmap1,count;
3144:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3145:   Mat            B=a->B;
3146:   Vec            lvec=a->lvec,lcmap;
3147:   PetscInt       i,cstart,cend,Bn=B->cmap->N;
3148:   MPI_Comm       comm;
3149:   VecScatter     Mvctx=a->Mvctx;

3152:   PetscObjectGetComm((PetscObject)mat,&comm);
3153:   ISGetLocalSize(iscol,&ncols);

3155:   /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3156:   MatCreateVecs(mat,&x,NULL);
3157:   VecSet(x,-1.0);
3158:   VecDuplicate(x,&cmap);
3159:   VecSet(cmap,-1.0);

3161:   /* Get start indices */
3162:   MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3163:   isstart -= ncols;
3164:   MatGetOwnershipRangeColumn(mat,&cstart,&cend);

3166:   ISGetIndices(iscol,&is_idx);
3167:   VecGetArray(x,&xarray);
3168:   VecGetArray(cmap,&cmaparray);
3169:   PetscMalloc1(ncols,&idx);
3170:   for (i=0; i<ncols; i++) {
3171:     xarray[is_idx[i]-cstart]    = (PetscScalar)is_idx[i];
3172:     cmaparray[is_idx[i]-cstart] = i + isstart;      /* global index of iscol[i] */
3173:     idx[i]                      = is_idx[i]-cstart; /* local index of iscol[i]  */
3174:   }
3175:   VecRestoreArray(x,&xarray);
3176:   VecRestoreArray(cmap,&cmaparray);
3177:   ISRestoreIndices(iscol,&is_idx);

3179:   /* Get iscol_d */
3180:   ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3181:   ISGetBlockSize(iscol,&i);
3182:   ISSetBlockSize(*iscol_d,i);

3184:   /* Get isrow_d */
3185:   ISGetLocalSize(isrow,&m);
3186:   rstart = mat->rmap->rstart;
3187:   PetscMalloc1(m,&idx);
3188:   ISGetIndices(isrow,&is_idx);
3189:   for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3190:   ISRestoreIndices(isrow,&is_idx);

3192:   ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3193:   ISGetBlockSize(isrow,&i);
3194:   ISSetBlockSize(*isrow_d,i);

3196:   /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3197:   VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3198:   VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);

3200:   VecDuplicate(lvec,&lcmap);

3202:   VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3203:   VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);

3205:   /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3206:   /* off-process column indices */
3207:   count = 0;
3208:   PetscMalloc1(Bn,&idx);
3209:   PetscMalloc1(Bn,&cmap1);

3211:   VecGetArray(lvec,&xarray);
3212:   VecGetArray(lcmap,&cmaparray);
3213:   for (i=0; i<Bn; i++) {
3214:     if (PetscRealPart(xarray[i]) > -1.0) {
3215:       idx[count]     = i;                   /* local column index in off-diagonal part B */
3216:       cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]);  /* column index in submat */
3217:       count++;
3218:     }
3219:   }
3220:   VecRestoreArray(lvec,&xarray);
3221:   VecRestoreArray(lcmap,&cmaparray);

3223:   ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3224:   /* cannot ensure iscol_o has same blocksize as iscol! */

3226:   PetscFree(idx);
3227:   *garray = cmap1;

3229:   VecDestroy(&x);
3230:   VecDestroy(&cmap);
3231:   VecDestroy(&lcmap);
3232:   return(0);
3233: }

3235: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3236: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3237: {
3239:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)mat->data,*asub;
3240:   Mat            M = NULL;
3241:   MPI_Comm       comm;
3242:   IS             iscol_d,isrow_d,iscol_o;
3243:   Mat            Asub = NULL,Bsub = NULL;
3244:   PetscInt       n;

3247:   PetscObjectGetComm((PetscObject)mat,&comm);

3249:   if (call == MAT_REUSE_MATRIX) {
3250:     /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3251:     PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3252:     if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");

3254:     PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3255:     if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");

3257:     PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3258:     if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");

3260:     /* Update diagonal and off-diagonal portions of submat */
3261:     asub = (Mat_MPIAIJ*)(*submat)->data;
3262:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3263:     ISGetLocalSize(iscol_o,&n);
3264:     if (n) {
3265:       MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3266:     }
3267:     MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3268:     MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);

3270:   } else { /* call == MAT_INITIAL_MATRIX) */
3271:     const PetscInt *garray;
3272:     PetscInt        BsubN;

3274:     /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3275:     ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);

3277:     /* Create local submatrices Asub and Bsub */
3278:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3279:     MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);

3281:     /* Create submatrix M */
3282:     MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);

3284:     /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3285:     asub = (Mat_MPIAIJ*)M->data;

3287:     ISGetLocalSize(iscol_o,&BsubN);
3288:     n = asub->B->cmap->N;
3289:     if (BsubN > n) {
3290:       /* This case can be tested using ~petsc/src/tao/bound/examples/tutorials/runplate2_3 */
3291:       const PetscInt *idx;
3292:       PetscInt       i,j,*idx_new,*subgarray = asub->garray;
3293:       PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);

3295:       PetscMalloc1(n,&idx_new);
3296:       j = 0;
3297:       ISGetIndices(iscol_o,&idx);
3298:       for (i=0; i<n; i++) {
3299:         if (j >= BsubN) break;
3300:         while (subgarray[i] > garray[j]) j++;

3302:         if (subgarray[i] == garray[j]) {
3303:           idx_new[i] = idx[j++];
3304:         } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3305:       }
3306:       ISRestoreIndices(iscol_o,&idx);

3308:       ISDestroy(&iscol_o);
3309:       ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);

3311:     } else if (BsubN < n) {
3312:       SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3313:     }

3315:     PetscFree(garray);
3316:     *submat = M;

3318:     /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3319:     PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3320:     ISDestroy(&isrow_d);

3322:     PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3323:     ISDestroy(&iscol_d);

3325:     PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3326:     ISDestroy(&iscol_o);
3327:   }
3328:   return(0);
3329: }

3331: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3332: {
3334:   IS             iscol_local=NULL,isrow_d;
3335:   PetscInt       csize;
3336:   PetscInt       n,i,j,start,end;
3337:   PetscBool      sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3338:   MPI_Comm       comm;

3341:   /* If isrow has same processor distribution as mat,
3342:      call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3343:   if (call == MAT_REUSE_MATRIX) {
3344:     PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3345:     if (isrow_d) {
3346:       sameRowDist  = PETSC_TRUE;
3347:       tsameDist[1] = PETSC_TRUE; /* sameColDist */
3348:     } else {
3349:       PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3350:       if (iscol_local) {
3351:         sameRowDist  = PETSC_TRUE;
3352:         tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3353:       }
3354:     }
3355:   } else {
3356:     /* Check if isrow has same processor distribution as mat */
3357:     sameDist[0] = PETSC_FALSE;
3358:     ISGetLocalSize(isrow,&n);
3359:     if (!n) {
3360:       sameDist[0] = PETSC_TRUE;
3361:     } else {
3362:       ISGetMinMax(isrow,&i,&j);
3363:       MatGetOwnershipRange(mat,&start,&end);
3364:       if (i >= start && j < end) {
3365:         sameDist[0] = PETSC_TRUE;
3366:       }
3367:     }

3369:     /* Check if iscol has same processor distribution as mat */
3370:     sameDist[1] = PETSC_FALSE;
3371:     ISGetLocalSize(iscol,&n);
3372:     if (!n) {
3373:       sameDist[1] = PETSC_TRUE;
3374:     } else {
3375:       ISGetMinMax(iscol,&i,&j);
3376:       MatGetOwnershipRangeColumn(mat,&start,&end);
3377:       if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3378:     }

3380:     PetscObjectGetComm((PetscObject)mat,&comm);
3381:     MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3382:     sameRowDist = tsameDist[0];
3383:   }

3385:   if (sameRowDist) {
3386:     if (tsameDist[1]) { /* sameRowDist & sameColDist */
3387:       /* isrow and iscol have same processor distribution as mat */
3388:       MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3389:       return(0);
3390:     } else { /* sameRowDist */
3391:       /* isrow has same processor distribution as mat */
3392:       if (call == MAT_INITIAL_MATRIX) {
3393:         PetscBool sorted;
3394:         ISGetSeqIS_Private(mat,iscol,&iscol_local);
3395:         ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3396:         ISGetSize(iscol,&i);
3397:         if (n != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != size of iscol %d",n,i);

3399:         ISSorted(iscol_local,&sorted);
3400:         if (sorted) {
3401:           /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3402:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3403:           return(0);
3404:         }
3405:       } else { /* call == MAT_REUSE_MATRIX */
3406:         IS    iscol_sub;
3407:         PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3408:         if (iscol_sub) {
3409:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3410:           return(0);
3411:         }
3412:       }
3413:     }
3414:   }

3416:   /* General case: iscol -> iscol_local which has global size of iscol */
3417:   if (call == MAT_REUSE_MATRIX) {
3418:     PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3419:     if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3420:   } else {
3421:     if (!iscol_local) {
3422:       ISGetSeqIS_Private(mat,iscol,&iscol_local);
3423:     }
3424:   }

3426:   ISGetLocalSize(iscol,&csize);
3427:   MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);

3429:   if (call == MAT_INITIAL_MATRIX) {
3430:     PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3431:     ISDestroy(&iscol_local);
3432:   }
3433:   return(0);
3434: }

3436: /*@C
3437:      MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3438:          and "off-diagonal" part of the matrix in CSR format.

3440:    Collective on MPI_Comm

3442:    Input Parameters:
3443: +  comm - MPI communicator
3444: .  A - "diagonal" portion of matrix
3445: .  B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3446: -  garray - global index of B columns

3448:    Output Parameter:
3449: .   mat - the matrix, with input A as its local diagonal matrix
3450:    Level: advanced

3452:    Notes:
3453:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3454:        A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.

3456: .seealso: MatCreateMPIAIJWithSplitArrays()
3457: @*/
3458: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3459: {
3461:   Mat_MPIAIJ     *maij;
3462:   Mat_SeqAIJ     *b=(Mat_SeqAIJ*)B->data,*bnew;
3463:   PetscInt       *oi=b->i,*oj=b->j,i,nz,col;
3464:   PetscScalar    *oa=b->a;
3465:   Mat            Bnew;
3466:   PetscInt       m,n,N;

3469:   MatCreate(comm,mat);
3470:   MatGetSize(A,&m,&n);
3471:   if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3472:   if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3473:   /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3474:   /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */

3476:   /* Get global columns of mat */
3477:   MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);

3479:   MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3480:   MatSetType(*mat,MATMPIAIJ);
3481:   MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3482:   maij = (Mat_MPIAIJ*)(*mat)->data;

3484:   (*mat)->preallocated = PETSC_TRUE;

3486:   PetscLayoutSetUp((*mat)->rmap);
3487:   PetscLayoutSetUp((*mat)->cmap);

3489:   /* Set A as diagonal portion of *mat */
3490:   maij->A = A;

3492:   nz = oi[m];
3493:   for (i=0; i<nz; i++) {
3494:     col   = oj[i];
3495:     oj[i] = garray[col];
3496:   }

3498:    /* Set Bnew as off-diagonal portion of *mat */
3499:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,oa,&Bnew);
3500:   bnew        = (Mat_SeqAIJ*)Bnew->data;
3501:   bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3502:   maij->B     = Bnew;

3504:   if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,0,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);

3506:   b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3507:   b->free_a       = PETSC_FALSE;
3508:   b->free_ij      = PETSC_FALSE;
3509:   MatDestroy(&B);

3511:   bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3512:   bnew->free_a       = PETSC_TRUE;
3513:   bnew->free_ij      = PETSC_TRUE;

3515:   /* condense columns of maij->B */
3516:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3517:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3518:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3519:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3520:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3521:   return(0);
3522: }

3524: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);

3526: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3527: {
3529:   PetscInt       i,m,n,rstart,row,rend,nz,j,bs,cbs;
3530:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3531:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3532:   Mat            M,Msub,B=a->B;
3533:   MatScalar      *aa;
3534:   Mat_SeqAIJ     *aij;
3535:   PetscInt       *garray = a->garray,*colsub,Ncols;
3536:   PetscInt       count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3537:   IS             iscol_sub,iscmap;
3538:   const PetscInt *is_idx,*cmap;
3539:   PetscBool      allcolumns=PETSC_FALSE;
3540:   MPI_Comm       comm;

3543:   PetscObjectGetComm((PetscObject)mat,&comm);

3545:   if (call == MAT_REUSE_MATRIX) {
3546:     PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3547:     if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3548:     ISGetLocalSize(iscol_sub,&count);

3550:     PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3551:     if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");

3553:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3554:     if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");

3556:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);

3558:   } else { /* call == MAT_INITIAL_MATRIX) */
3559:     PetscBool flg;

3561:     ISGetLocalSize(iscol,&n);
3562:     ISGetSize(iscol,&Ncols);

3564:     /* (1) iscol -> nonscalable iscol_local */
3565:     /* Check for special case: each processor gets entire matrix columns */
3566:     ISIdentity(iscol_local,&flg);
3567:     if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3568:     if (allcolumns) {
3569:       iscol_sub = iscol_local;
3570:       PetscObjectReference((PetscObject)iscol_local);
3571:       ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);

3573:     } else {
3574:       /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3575:       PetscInt *idx,*cmap1,k;
3576:       PetscMalloc1(Ncols,&idx);
3577:       PetscMalloc1(Ncols,&cmap1);
3578:       ISGetIndices(iscol_local,&is_idx);
3579:       count = 0;
3580:       k     = 0;
3581:       for (i=0; i<Ncols; i++) {
3582:         j = is_idx[i];
3583:         if (j >= cstart && j < cend) {
3584:           /* diagonal part of mat */
3585:           idx[count]     = j;
3586:           cmap1[count++] = i; /* column index in submat */
3587:         } else if (Bn) {
3588:           /* off-diagonal part of mat */
3589:           if (j == garray[k]) {
3590:             idx[count]     = j;
3591:             cmap1[count++] = i;  /* column index in submat */
3592:           } else if (j > garray[k]) {
3593:             while (j > garray[k] && k < Bn-1) k++;
3594:             if (j == garray[k]) {
3595:               idx[count]     = j;
3596:               cmap1[count++] = i; /* column index in submat */
3597:             }
3598:           }
3599:         }
3600:       }
3601:       ISRestoreIndices(iscol_local,&is_idx);

3603:       ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3604:       ISGetBlockSize(iscol,&cbs);
3605:       ISSetBlockSize(iscol_sub,cbs);

3607:       ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3608:     }

3610:     /* (3) Create sequential Msub */
3611:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3612:   }

3614:   ISGetLocalSize(iscol_sub,&count);
3615:   aij  = (Mat_SeqAIJ*)(Msub)->data;
3616:   ii   = aij->i;
3617:   ISGetIndices(iscmap,&cmap);

3619:   /*
3620:       m - number of local rows
3621:       Ncols - number of columns (same on all processors)
3622:       rstart - first row in new global matrix generated
3623:   */
3624:   MatGetSize(Msub,&m,NULL);

3626:   if (call == MAT_INITIAL_MATRIX) {
3627:     /* (4) Create parallel newmat */
3628:     PetscMPIInt    rank,size;
3629:     PetscInt       csize;

3631:     MPI_Comm_size(comm,&size);
3632:     MPI_Comm_rank(comm,&rank);

3634:     /*
3635:         Determine the number of non-zeros in the diagonal and off-diagonal
3636:         portions of the matrix in order to do correct preallocation
3637:     */

3639:     /* first get start and end of "diagonal" columns */
3640:     ISGetLocalSize(iscol,&csize);
3641:     if (csize == PETSC_DECIDE) {
3642:       ISGetSize(isrow,&mglobal);
3643:       if (mglobal == Ncols) { /* square matrix */
3644:         nlocal = m;
3645:       } else {
3646:         nlocal = Ncols/size + ((Ncols % size) > rank);
3647:       }
3648:     } else {
3649:       nlocal = csize;
3650:     }
3651:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3652:     rstart = rend - nlocal;
3653:     if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);

3655:     /* next, compute all the lengths */
3656:     jj    = aij->j;
3657:     PetscMalloc1(2*m+1,&dlens);
3658:     olens = dlens + m;
3659:     for (i=0; i<m; i++) {
3660:       jend = ii[i+1] - ii[i];
3661:       olen = 0;
3662:       dlen = 0;
3663:       for (j=0; j<jend; j++) {
3664:         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3665:         else dlen++;
3666:         jj++;
3667:       }
3668:       olens[i] = olen;
3669:       dlens[i] = dlen;
3670:     }

3672:     ISGetBlockSize(isrow,&bs);
3673:     ISGetBlockSize(iscol,&cbs);

3675:     MatCreate(comm,&M);
3676:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3677:     MatSetBlockSizes(M,bs,cbs);
3678:     MatSetType(M,((PetscObject)mat)->type_name);
3679:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3680:     PetscFree(dlens);

3682:   } else { /* call == MAT_REUSE_MATRIX */
3683:     M    = *newmat;
3684:     MatGetLocalSize(M,&i,NULL);
3685:     if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3686:     MatZeroEntries(M);
3687:     /*
3688:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3689:        rather than the slower MatSetValues().
3690:     */
3691:     M->was_assembled = PETSC_TRUE;
3692:     M->assembled     = PETSC_FALSE;
3693:   }

3695:   /* (5) Set values of Msub to *newmat */
3696:   PetscMalloc1(count,&colsub);
3697:   MatGetOwnershipRange(M,&rstart,NULL);

3699:   jj   = aij->j;
3700:   aa   = aij->a;
3701:   for (i=0; i<m; i++) {
3702:     row = rstart + i;
3703:     nz  = ii[i+1] - ii[i];
3704:     for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3705:     MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3706:     jj += nz; aa += nz;
3707:   }
3708:   ISRestoreIndices(iscmap,&cmap);

3710:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3711:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);

3713:   PetscFree(colsub);

3715:   /* save Msub, iscol_sub and iscmap used in processor for next request */
3716:   if (call ==  MAT_INITIAL_MATRIX) {
3717:     *newmat = M;
3718:     PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3719:     MatDestroy(&Msub);

3721:     PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3722:     ISDestroy(&iscol_sub);

3724:     PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3725:     ISDestroy(&iscmap);

3727:     if (iscol_local) {
3728:       PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3729:       ISDestroy(&iscol_local);
3730:     }
3731:   }
3732:   return(0);
3733: }

3735: /*
3736:     Not great since it makes two copies of the submatrix, first an SeqAIJ
3737:   in local and then by concatenating the local matrices the end result.
3738:   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()

3740:   Note: This requires a sequential iscol with all indices.
3741: */
3742: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3743: {
3745:   PetscMPIInt    rank,size;
3746:   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3747:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3748:   Mat            M,Mreuse;
3749:   MatScalar      *aa,*vwork;
3750:   MPI_Comm       comm;
3751:   Mat_SeqAIJ     *aij;
3752:   PetscBool      colflag,allcolumns=PETSC_FALSE;

3755:   PetscObjectGetComm((PetscObject)mat,&comm);
3756:   MPI_Comm_rank(comm,&rank);
3757:   MPI_Comm_size(comm,&size);

3759:   /* Check for special case: each processor gets entire matrix columns */
3760:   ISIdentity(iscol,&colflag);
3761:   ISGetLocalSize(iscol,&n);
3762:   if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;

3764:   if (call ==  MAT_REUSE_MATRIX) {
3765:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3766:     if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3767:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3768:   } else {
3769:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3770:   }

3772:   /*
3773:       m - number of local rows
3774:       n - number of columns (same on all processors)
3775:       rstart - first row in new global matrix generated
3776:   */
3777:   MatGetSize(Mreuse,&m,&n);
3778:   MatGetBlockSizes(Mreuse,&bs,&cbs);
3779:   if (call == MAT_INITIAL_MATRIX) {
3780:     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3781:     ii  = aij->i;
3782:     jj  = aij->j;

3784:     /*
3785:         Determine the number of non-zeros in the diagonal and off-diagonal
3786:         portions of the matrix in order to do correct preallocation
3787:     */

3789:     /* first get start and end of "diagonal" columns */
3790:     if (csize == PETSC_DECIDE) {
3791:       ISGetSize(isrow,&mglobal);
3792:       if (mglobal == n) { /* square matrix */
3793:         nlocal = m;
3794:       } else {
3795:         nlocal = n/size + ((n % size) > rank);
3796:       }
3797:     } else {
3798:       nlocal = csize;
3799:     }
3800:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3801:     rstart = rend - nlocal;
3802:     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);

3804:     /* next, compute all the lengths */
3805:     PetscMalloc1(2*m+1,&dlens);
3806:     olens = dlens + m;
3807:     for (i=0; i<m; i++) {
3808:       jend = ii[i+1] - ii[i];
3809:       olen = 0;
3810:       dlen = 0;
3811:       for (j=0; j<jend; j++) {
3812:         if (*jj < rstart || *jj >= rend) olen++;
3813:         else dlen++;
3814:         jj++;
3815:       }
3816:       olens[i] = olen;
3817:       dlens[i] = dlen;
3818:     }
3819:     MatCreate(comm,&M);
3820:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3821:     MatSetBlockSizes(M,bs,cbs);
3822:     MatSetType(M,((PetscObject)mat)->type_name);
3823:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3824:     PetscFree(dlens);
3825:   } else {
3826:     PetscInt ml,nl;

3828:     M    = *newmat;
3829:     MatGetLocalSize(M,&ml,&nl);
3830:     if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3831:     MatZeroEntries(M);
3832:     /*
3833:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3834:        rather than the slower MatSetValues().
3835:     */
3836:     M->was_assembled = PETSC_TRUE;
3837:     M->assembled     = PETSC_FALSE;
3838:   }
3839:   MatGetOwnershipRange(M,&rstart,&rend);
3840:   aij  = (Mat_SeqAIJ*)(Mreuse)->data;
3841:   ii   = aij->i;
3842:   jj   = aij->j;
3843:   aa   = aij->a;
3844:   for (i=0; i<m; i++) {
3845:     row   = rstart + i;
3846:     nz    = ii[i+1] - ii[i];
3847:     cwork = jj;     jj += nz;
3848:     vwork = aa;     aa += nz;
3849:     MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3850:   }

3852:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3853:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3854:   *newmat = M;

3856:   /* save submatrix used in processor for next request */
3857:   if (call ==  MAT_INITIAL_MATRIX) {
3858:     PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3859:     MatDestroy(&Mreuse);
3860:   }
3861:   return(0);
3862: }

3864: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3865: {
3866:   PetscInt       m,cstart, cend,j,nnz,i,d;
3867:   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3868:   const PetscInt *JJ;
3869:   PetscScalar    *values;
3871:   PetscBool      nooffprocentries;

3874:   if (Ii && Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);

3876:   PetscLayoutSetUp(B->rmap);
3877:   PetscLayoutSetUp(B->cmap);
3878:   m      = B->rmap->n;
3879:   cstart = B->cmap->rstart;
3880:   cend   = B->cmap->rend;
3881:   rstart = B->rmap->rstart;

3883:   PetscCalloc2(m,&d_nnz,m,&o_nnz);

3885: #if defined(PETSC_USE_DEBUG)
3886:   for (i=0; i<m && Ii; i++) {
3887:     nnz = Ii[i+1]- Ii[i];
3888:     JJ  = J + Ii[i];
3889:     if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3890:     if (nnz && (JJ[0] < 0)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,JJ[0]);
3891:     if (nnz && (JJ[nnz-1] >= B->cmap->N)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3892:   }
3893: #endif

3895:   for (i=0; i<m && Ii; i++) {
3896:     nnz     = Ii[i+1]- Ii[i];
3897:     JJ      = J + Ii[i];
3898:     nnz_max = PetscMax(nnz_max,nnz);
3899:     d       = 0;
3900:     for (j=0; j<nnz; j++) {
3901:       if (cstart <= JJ[j] && JJ[j] < cend) d++;
3902:     }
3903:     d_nnz[i] = d;
3904:     o_nnz[i] = nnz - d;
3905:   }
3906:   MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3907:   PetscFree2(d_nnz,o_nnz);

3909:   if (v) values = (PetscScalar*)v;
3910:   else {
3911:     PetscCalloc1(nnz_max+1,&values);
3912:   }

3914:   for (i=0; i<m && Ii; i++) {
3915:     ii   = i + rstart;
3916:     nnz  = Ii[i+1]- Ii[i];
3917:     MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);
3918:   }
3919:   nooffprocentries    = B->nooffprocentries;
3920:   B->nooffprocentries = PETSC_TRUE;
3921:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3922:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3923:   B->nooffprocentries = nooffprocentries;

3925:   if (!v) {
3926:     PetscFree(values);
3927:   }
3928:   MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3929:   return(0);
3930: }

3932: /*@
3933:    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3934:    (the default parallel PETSc format).

3936:    Collective on MPI_Comm

3938:    Input Parameters:
3939: +  B - the matrix
3940: .  i - the indices into j for the start of each local row (starts with zero)
3941: .  j - the column indices for each local row (starts with zero)
3942: -  v - optional values in the matrix

3944:    Level: developer

3946:    Notes:
3947:        The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
3948:      thus you CANNOT change the matrix entries by changing the values of v[] after you have
3949:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

3951:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

3953:        The format which is used for the sparse matrix input, is equivalent to a
3954:     row-major ordering.. i.e for the following matrix, the input data expected is
3955:     as shown

3957: $        1 0 0
3958: $        2 0 3     P0
3959: $       -------
3960: $        4 5 6     P1
3961: $
3962: $     Process0 [P0]: rows_owned=[0,1]
3963: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
3964: $        j =  {0,0,2}  [size = 3]
3965: $        v =  {1,2,3}  [size = 3]
3966: $
3967: $     Process1 [P1]: rows_owned=[2]
3968: $        i =  {0,3}    [size = nrow+1  = 1+1]
3969: $        j =  {0,1,2}  [size = 3]
3970: $        v =  {4,5,6}  [size = 3]

3972: .keywords: matrix, aij, compressed row, sparse, parallel

3974: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3975:           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3976: @*/
3977: PetscErrorCode  MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3978: {

3982:   PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3983:   return(0);
3984: }

3986: /*@C
3987:    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3988:    (the default parallel PETSc format).  For good matrix assembly performance
3989:    the user should preallocate the matrix storage by setting the parameters
3990:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3991:    performance can be increased by more than a factor of 50.

3993:    Collective on MPI_Comm

3995:    Input Parameters:
3996: +  B - the matrix
3997: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3998:            (same value is used for all local rows)
3999: .  d_nnz - array containing the number of nonzeros in the various rows of the
4000:            DIAGONAL portion of the local submatrix (possibly different for each row)
4001:            or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
4002:            The size of this array is equal to the number of local rows, i.e 'm'.
4003:            For matrices that will be factored, you must leave room for (and set)
4004:            the diagonal entry even if it is zero.
4005: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4006:            submatrix (same value is used for all local rows).
4007: -  o_nnz - array containing the number of nonzeros in the various rows of the
4008:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4009:            each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
4010:            structure. The size of this array is equal to the number
4011:            of local rows, i.e 'm'.

4013:    If the *_nnz parameter is given then the *_nz parameter is ignored

4015:    The AIJ format (also called the Yale sparse matrix format or
4016:    compressed row storage (CSR)), is fully compatible with standard Fortran 77
4017:    storage.  The stored row and column indices begin with zero.
4018:    See Users-Manual: ch_mat for details.

4020:    The parallel matrix is partitioned such that the first m0 rows belong to
4021:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
4022:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

4024:    The DIAGONAL portion of the local submatrix of a processor can be defined
4025:    as the submatrix which is obtained by extraction the part corresponding to
4026:    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
4027:    first row that belongs to the processor, r2 is the last row belonging to
4028:    the this processor, and c1-c2 is range of indices of the local part of a
4029:    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
4030:    common case of a square matrix, the row and column ranges are the same and
4031:    the DIAGONAL part is also square. The remaining portion of the local
4032:    submatrix (mxN) constitute the OFF-DIAGONAL portion.

4034:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4036:    You can call MatGetInfo() to get information on how effective the preallocation was;
4037:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
4038:    You can also run with the option -info and look for messages with the string
4039:    malloc in them to see if additional memory allocation was needed.

4041:    Example usage:

4043:    Consider the following 8x8 matrix with 34 non-zero values, that is
4044:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4045:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4046:    as follows:

4048: .vb
4049:             1  2  0  |  0  3  0  |  0  4
4050:     Proc0   0  5  6  |  7  0  0  |  8  0
4051:             9  0 10  | 11  0  0  | 12  0
4052:     -------------------------------------
4053:            13  0 14  | 15 16 17  |  0  0
4054:     Proc1   0 18  0  | 19 20 21  |  0  0
4055:             0  0  0  | 22 23  0  | 24  0
4056:     -------------------------------------
4057:     Proc2  25 26 27  |  0  0 28  | 29  0
4058:            30  0  0  | 31 32 33  |  0 34
4059: .ve

4061:    This can be represented as a collection of submatrices as:

4063: .vb
4064:       A B C
4065:       D E F
4066:       G H I
4067: .ve

4069:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4070:    owned by proc1, G,H,I are owned by proc2.

4072:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4073:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4074:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4076:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4077:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4078:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4079:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4080:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4081:    matrix, ans [DF] as another SeqAIJ matrix.

4083:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4084:    allocated for every row of the local diagonal submatrix, and o_nz
4085:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4086:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4087:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4088:    In this case, the values of d_nz,o_nz are:
4089: .vb
4090:      proc0 : dnz = 2, o_nz = 2
4091:      proc1 : dnz = 3, o_nz = 2
4092:      proc2 : dnz = 1, o_nz = 4
4093: .ve
4094:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4095:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4096:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4097:    34 values.

4099:    When d_nnz, o_nnz parameters are specified, the storage is specified
4100:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4101:    In the above case the values for d_nnz,o_nnz are:
4102: .vb
4103:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4104:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4105:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4106: .ve
4107:    Here the space allocated is sum of all the above values i.e 34, and
4108:    hence pre-allocation is perfect.

4110:    Level: intermediate

4112: .keywords: matrix, aij, compressed row, sparse, parallel

4114: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
4115:           MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
4116: @*/
4117: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
4118: {

4124:   PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
4125:   return(0);
4126: }

4128: /*@
4129:      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
4130:          CSR format the local rows.

4132:    Collective on MPI_Comm

4134:    Input Parameters:
4135: +  comm - MPI communicator
4136: .  m - number of local rows (Cannot be PETSC_DECIDE)
4137: .  n - This value should be the same as the local size used in creating the
4138:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4139:        calculated if N is given) For square matrices n is almost always m.
4140: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4141: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4142: .   i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4143: .   j - column indices
4144: -   a - matrix values

4146:    Output Parameter:
4147: .   mat - the matrix

4149:    Level: intermediate

4151:    Notes:
4152:        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4153:      thus you CANNOT change the matrix entries by changing the values of a[] after you have
4154:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

4156:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

4158:        The format which is used for the sparse matrix input, is equivalent to a
4159:     row-major ordering.. i.e for the following matrix, the input data expected is
4160:     as shown

4162: $        1 0 0
4163: $        2 0 3     P0
4164: $       -------
4165: $        4 5 6     P1
4166: $
4167: $     Process0 [P0]: rows_owned=[0,1]
4168: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4169: $        j =  {0,0,2}  [size = 3]
4170: $        v =  {1,2,3}  [size = 3]
4171: $
4172: $     Process1 [P1]: rows_owned=[2]
4173: $        i =  {0,3}    [size = nrow+1  = 1+1]
4174: $        j =  {0,1,2}  [size = 3]
4175: $        v =  {4,5,6}  [size = 3]

4177: .keywords: matrix, aij, compressed row, sparse, parallel

4179: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4180:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays()
4181: @*/
4182: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4183: {

4187:   if (i && i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4188:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4189:   MatCreate(comm,mat);
4190:   MatSetSizes(*mat,m,n,M,N);
4191:   /* MatSetBlockSizes(M,bs,cbs); */
4192:   MatSetType(*mat,MATMPIAIJ);
4193:   MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4194:   return(0);
4195: }

4197: /*@C
4198:    MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4199:    (the default parallel PETSc format).  For good matrix assembly performance
4200:    the user should preallocate the matrix storage by setting the parameters
4201:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4202:    performance can be increased by more than a factor of 50.

4204:    Collective on MPI_Comm

4206:    Input Parameters:
4207: +  comm - MPI communicator
4208: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4209:            This value should be the same as the local size used in creating the
4210:            y vector for the matrix-vector product y = Ax.
4211: .  n - This value should be the same as the local size used in creating the
4212:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4213:        calculated if N is given) For square matrices n is almost always m.
4214: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4215: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4216: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4217:            (same value is used for all local rows)
4218: .  d_nnz - array containing the number of nonzeros in the various rows of the
4219:            DIAGONAL portion of the local submatrix (possibly different for each row)
4220:            or NULL, if d_nz is used to specify the nonzero structure.
4221:            The size of this array is equal to the number of local rows, i.e 'm'.
4222: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4223:            submatrix (same value is used for all local rows).
4224: -  o_nnz - array containing the number of nonzeros in the various rows of the
4225:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4226:            each row) or NULL, if o_nz is used to specify the nonzero
4227:            structure. The size of this array is equal to the number
4228:            of local rows, i.e 'm'.

4230:    Output Parameter:
4231: .  A - the matrix

4233:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4234:    MatXXXXSetPreallocation() paradgm instead of this routine directly.
4235:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

4237:    Notes:
4238:    If the *_nnz parameter is given then the *_nz parameter is ignored

4240:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
4241:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4242:    storage requirements for this matrix.

4244:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
4245:    processor than it must be used on all processors that share the object for
4246:    that argument.

4248:    The user MUST specify either the local or global matrix dimensions
4249:    (possibly both).

4251:    The parallel matrix is partitioned across processors such that the
4252:    first m0 rows belong to process 0, the next m1 rows belong to
4253:    process 1, the next m2 rows belong to process 2 etc.. where
4254:    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4255:    values corresponding to [m x N] submatrix.

4257:    The columns are logically partitioned with the n0 columns belonging
4258:    to 0th partition, the next n1 columns belonging to the next
4259:    partition etc.. where n0,n1,n2... are the input parameter 'n'.

4261:    The DIAGONAL portion of the local submatrix on any given processor
4262:    is the submatrix corresponding to the rows and columns m,n
4263:    corresponding to the given processor. i.e diagonal matrix on
4264:    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4265:    etc. The remaining portion of the local submatrix [m x (N-n)]
4266:    constitute the OFF-DIAGONAL portion. The example below better
4267:    illustrates this concept.

4269:    For a square global matrix we define each processor's diagonal portion
4270:    to be its local rows and the corresponding columns (a square submatrix);
4271:    each processor's off-diagonal portion encompasses the remainder of the
4272:    local matrix (a rectangular submatrix).

4274:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4276:    When calling this routine with a single process communicator, a matrix of
4277:    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
4278:    type of communicator, use the construction mechanism
4279: .vb
4280:      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4281: .ve

4283: $     MatCreate(...,&A);
4284: $     MatSetType(A,MATMPIAIJ);
4285: $     MatSetSizes(A, m,n,M,N);
4286: $     MatMPIAIJSetPreallocation(A,...);

4288:    By default, this format uses inodes (identical nodes) when possible.
4289:    We search for consecutive rows with the same nonzero structure, thereby
4290:    reusing matrix information to achieve increased efficiency.

4292:    Options Database Keys:
4293: +  -mat_no_inode  - Do not use inodes
4294: -  -mat_inode_limit <limit> - Sets inode limit (max limit=5)



4298:    Example usage:

4300:    Consider the following 8x8 matrix with 34 non-zero values, that is
4301:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4302:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4303:    as follows

4305: .vb
4306:             1  2  0  |  0  3  0  |  0  4
4307:     Proc0   0  5  6  |  7  0  0  |  8  0
4308:             9  0 10  | 11  0  0  | 12  0
4309:     -------------------------------------
4310:            13  0 14  | 15 16 17  |  0  0
4311:     Proc1   0 18  0  | 19 20 21  |  0  0
4312:             0  0  0  | 22 23  0  | 24  0
4313:     -------------------------------------
4314:     Proc2  25 26 27  |  0  0 28  | 29  0
4315:            30  0  0  | 31 32 33  |  0 34
4316: .ve

4318:    This can be represented as a collection of submatrices as

4320: .vb
4321:       A B C
4322:       D E F
4323:       G H I
4324: .ve

4326:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4327:    owned by proc1, G,H,I are owned by proc2.

4329:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4330:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4331:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4333:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4334:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4335:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4336:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4337:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4338:    matrix, ans [DF] as another SeqAIJ matrix.

4340:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4341:    allocated for every row of the local diagonal submatrix, and o_nz
4342:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4343:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4344:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4345:    In this case, the values of d_nz,o_nz are
4346: .vb
4347:      proc0 : dnz = 2, o_nz = 2
4348:      proc1 : dnz = 3, o_nz = 2
4349:      proc2 : dnz = 1, o_nz = 4
4350: .ve
4351:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4352:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4353:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4354:    34 values.

4356:    When d_nnz, o_nnz parameters are specified, the storage is specified
4357:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4358:    In the above case the values for d_nnz,o_nnz are
4359: .vb
4360:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4361:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4362:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4363: .ve
4364:    Here the space allocated is sum of all the above values i.e 34, and
4365:    hence pre-allocation is perfect.

4367:    Level: intermediate

4369: .keywords: matrix, aij, compressed row, sparse, parallel

4371: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4372:           MATMPIAIJ, MatCreateMPIAIJWithArrays()
4373: @*/
4374: PetscErrorCode  MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4375: {
4377:   PetscMPIInt    size;

4380:   MatCreate(comm,A);
4381:   MatSetSizes(*A,m,n,M,N);
4382:   MPI_Comm_size(comm,&size);
4383:   if (size > 1) {
4384:     MatSetType(*A,MATMPIAIJ);
4385:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4386:   } else {
4387:     MatSetType(*A,MATSEQAIJ);
4388:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4389:   }
4390:   return(0);
4391: }

4393: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4394: {
4395:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
4396:   PetscBool      flg;

4400:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4401:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4402:   if (Ad)     *Ad     = a->A;
4403:   if (Ao)     *Ao     = a->B;
4404:   if (colmap) *colmap = a->garray;
4405:   return(0);
4406: }

4408: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4409: {
4411:   PetscInt       m,N,i,rstart,nnz,Ii;
4412:   PetscInt       *indx;
4413:   PetscScalar    *values;

4416:   MatGetSize(inmat,&m,&N);
4417:   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4418:     PetscInt       *dnz,*onz,sum,bs,cbs;

4420:     if (n == PETSC_DECIDE) {
4421:       PetscSplitOwnership(comm,&n,&N);
4422:     }
4423:     /* Check sum(n) = N */
4424:     MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4425:     if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);

4427:     MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4428:     rstart -= m;

4430:     MatPreallocateInitialize(comm,m,n,dnz,onz);
4431:     for (i=0; i<m; i++) {
4432:       MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4433:       MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4434:       MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4435:     }

4437:     MatCreate(comm,outmat);
4438:     MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4439:     MatGetBlockSizes(inmat,&bs,&cbs);
4440:     MatSetBlockSizes(*outmat,bs,cbs);
4441:     MatSetType(*outmat,MATAIJ);
4442:     MatSeqAIJSetPreallocation(*outmat,0,dnz);
4443:     MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4444:     MatPreallocateFinalize(dnz,onz);
4445:   }

4447:   /* numeric phase */
4448:   MatGetOwnershipRange(*outmat,&rstart,NULL);
4449:   for (i=0; i<m; i++) {
4450:     MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4451:     Ii   = i + rstart;
4452:     MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4453:     MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4454:   }
4455:   MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4456:   MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4457:   return(0);
4458: }

4460: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4461: {
4462:   PetscErrorCode    ierr;
4463:   PetscMPIInt       rank;
4464:   PetscInt          m,N,i,rstart,nnz;
4465:   size_t            len;
4466:   const PetscInt    *indx;
4467:   PetscViewer       out;
4468:   char              *name;
4469:   Mat               B;
4470:   const PetscScalar *values;

4473:   MatGetLocalSize(A,&m,0);
4474:   MatGetSize(A,0,&N);
4475:   /* Should this be the type of the diagonal block of A? */
4476:   MatCreate(PETSC_COMM_SELF,&B);
4477:   MatSetSizes(B,m,N,m,N);
4478:   MatSetBlockSizesFromMats(B,A,A);
4479:   MatSetType(B,MATSEQAIJ);
4480:   MatSeqAIJSetPreallocation(B,0,NULL);
4481:   MatGetOwnershipRange(A,&rstart,0);
4482:   for (i=0; i<m; i++) {
4483:     MatGetRow(A,i+rstart,&nnz,&indx,&values);
4484:     MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4485:     MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4486:   }
4487:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4488:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);

4490:   MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4491:   PetscStrlen(outfile,&len);
4492:   PetscMalloc1(len+5,&name);
4493:   sprintf(name,"%s.%d",outfile,rank);
4494:   PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4495:   PetscFree(name);
4496:   MatView(B,out);
4497:   PetscViewerDestroy(&out);
4498:   MatDestroy(&B);
4499:   return(0);
4500: }

4502: PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
4503: {
4504:   PetscErrorCode      ierr;
4505:   Mat_Merge_SeqsToMPI *merge;
4506:   PetscContainer      container;

4509:   PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);
4510:   if (container) {
4511:     PetscContainerGetPointer(container,(void**)&merge);
4512:     PetscFree(merge->id_r);
4513:     PetscFree(merge->len_s);
4514:     PetscFree(merge->len_r);
4515:     PetscFree(merge->bi);
4516:     PetscFree(merge->bj);
4517:     PetscFree(merge->buf_ri[0]);
4518:     PetscFree(merge->buf_ri);
4519:     PetscFree(merge->buf_rj[0]);
4520:     PetscFree(merge->buf_rj);
4521:     PetscFree(merge->coi);
4522:     PetscFree(merge->coj);
4523:     PetscFree(merge->owners_co);
4524:     PetscLayoutDestroy(&merge->rowmap);
4525:     PetscFree(merge);
4526:     PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);
4527:   }
4528:   MatDestroy_MPIAIJ(A);
4529:   return(0);
4530: }

4532:  #include <../src/mat/utils/freespace.h>
4533:  #include <petscbt.h>

4535: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4536: {
4537:   PetscErrorCode      ierr;
4538:   MPI_Comm            comm;
4539:   Mat_SeqAIJ          *a  =(Mat_SeqAIJ*)seqmat->data;
4540:   PetscMPIInt         size,rank,taga,*len_s;
4541:   PetscInt            N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4542:   PetscInt            proc,m;
4543:   PetscInt            **buf_ri,**buf_rj;
4544:   PetscInt            k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4545:   PetscInt            nrows,**buf_ri_k,**nextrow,**nextai;
4546:   MPI_Request         *s_waits,*r_waits;
4547:   MPI_Status          *status;
4548:   MatScalar           *aa=a->a;
4549:   MatScalar           **abuf_r,*ba_i;
4550:   Mat_Merge_SeqsToMPI *merge;
4551:   PetscContainer      container;

4554:   PetscObjectGetComm((PetscObject)mpimat,&comm);
4555:   PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);

4557:   MPI_Comm_size(comm,&size);
4558:   MPI_Comm_rank(comm,&rank);

4560:   PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4561:   PetscContainerGetPointer(container,(void**)&merge);

4563:   bi     = merge->bi;
4564:   bj     = merge->bj;
4565:   buf_ri = merge->buf_ri;
4566:   buf_rj = merge->buf_rj;

4568:   PetscMalloc1(size,&status);
4569:   owners = merge->rowmap->range;
4570:   len_s  = merge->len_s;

4572:   /* send and recv matrix values */
4573:   /*-----------------------------*/
4574:   PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4575:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

4577:   PetscMalloc1(merge->nsend+1,&s_waits);
4578:   for (proc=0,k=0; proc<size; proc++) {
4579:     if (!len_s[proc]) continue;
4580:     i    = owners[proc];
4581:     MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4582:     k++;
4583:   }

4585:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4586:   if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4587:   PetscFree(status);

4589:   PetscFree(s_waits);
4590:   PetscFree(r_waits);

4592:   /* insert mat values of mpimat */
4593:   /*----------------------------*/
4594:   PetscMalloc1(N,&ba_i);
4595:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4597:   for (k=0; k<merge->nrecv; k++) {
4598:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4599:     nrows       = *(buf_ri_k[k]);
4600:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4601:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4602:   }

4604:   /* set values of ba */
4605:   m = merge->rowmap->n;
4606:   for (i=0; i<m; i++) {
4607:     arow = owners[rank] + i;
4608:     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4609:     bnzi = bi[i+1] - bi[i];
4610:     PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));

4612:     /* add local non-zero vals of this proc's seqmat into ba */
4613:     anzi   = ai[arow+1] - ai[arow];
4614:     aj     = a->j + ai[arow];
4615:     aa     = a->a + ai[arow];
4616:     nextaj = 0;
4617:     for (j=0; nextaj<anzi; j++) {
4618:       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4619:         ba_i[j] += aa[nextaj++];
4620:       }
4621:     }

4623:     /* add received vals into ba */
4624:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4625:       /* i-th row */
4626:       if (i == *nextrow[k]) {
4627:         anzi   = *(nextai[k]+1) - *nextai[k];
4628:         aj     = buf_rj[k] + *(nextai[k]);
4629:         aa     = abuf_r[k] + *(nextai[k]);
4630:         nextaj = 0;
4631:         for (j=0; nextaj<anzi; j++) {
4632:           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4633:             ba_i[j] += aa[nextaj++];
4634:           }
4635:         }
4636:         nextrow[k]++; nextai[k]++;
4637:       }
4638:     }
4639:     MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4640:   }
4641:   MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4642:   MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);

4644:   PetscFree(abuf_r[0]);
4645:   PetscFree(abuf_r);
4646:   PetscFree(ba_i);
4647:   PetscFree3(buf_ri_k,nextrow,nextai);
4648:   PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4649:   return(0);
4650: }

4652: PetscErrorCode  MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4653: {
4654:   PetscErrorCode      ierr;
4655:   Mat                 B_mpi;
4656:   Mat_SeqAIJ          *a=(Mat_SeqAIJ*)seqmat->data;
4657:   PetscMPIInt         size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4658:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
4659:   PetscInt            M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4660:   PetscInt            len,proc,*dnz,*onz,bs,cbs;
4661:   PetscInt            k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4662:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4663:   MPI_Request         *si_waits,*sj_waits,*ri_waits,*rj_waits;
4664:   MPI_Status          *status;
4665:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
4666:   PetscBT             lnkbt;
4667:   Mat_Merge_SeqsToMPI *merge;
4668:   PetscContainer      container;

4671:   PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);

4673:   /* make sure it is a PETSc comm */
4674:   PetscCommDuplicate(comm,&comm,NULL);
4675:   MPI_Comm_size(comm,&size);
4676:   MPI_Comm_rank(comm,&rank);

4678:   PetscNew(&merge);
4679:   PetscMalloc1(size,&status);

4681:   /* determine row ownership */
4682:   /*---------------------------------------------------------*/
4683:   PetscLayoutCreate(comm,&merge->rowmap);
4684:   PetscLayoutSetLocalSize(merge->rowmap,m);
4685:   PetscLayoutSetSize(merge->rowmap,M);
4686:   PetscLayoutSetBlockSize(merge->rowmap,1);
4687:   PetscLayoutSetUp(merge->rowmap);
4688:   PetscMalloc1(size,&len_si);
4689:   PetscMalloc1(size,&merge->len_s);

4691:   m      = merge->rowmap->n;
4692:   owners = merge->rowmap->range;

4694:   /* determine the number of messages to send, their lengths */
4695:   /*---------------------------------------------------------*/
4696:   len_s = merge->len_s;

4698:   len          = 0; /* length of buf_si[] */
4699:   merge->nsend = 0;
4700:   for (proc=0; proc<size; proc++) {
4701:     len_si[proc] = 0;
4702:     if (proc == rank) {
4703:       len_s[proc] = 0;
4704:     } else {
4705:       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4706:       len_s[proc]  = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4707:     }
4708:     if (len_s[proc]) {
4709:       merge->nsend++;
4710:       nrows = 0;
4711:       for (i=owners[proc]; i<owners[proc+1]; i++) {
4712:         if (ai[i+1] > ai[i]) nrows++;
4713:       }
4714:       len_si[proc] = 2*(nrows+1);
4715:       len         += len_si[proc];
4716:     }
4717:   }

4719:   /* determine the number and length of messages to receive for ij-structure */
4720:   /*-------------------------------------------------------------------------*/
4721:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4722:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

4724:   /* post the Irecv of j-structure */
4725:   /*-------------------------------*/
4726:   PetscCommGetNewTag(comm,&tagj);
4727:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);

4729:   /* post the Isend of j-structure */
4730:   /*--------------------------------*/
4731:   PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);

4733:   for (proc=0, k=0; proc<size; proc++) {
4734:     if (!len_s[proc]) continue;
4735:     i    = owners[proc];
4736:     MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4737:     k++;
4738:   }

4740:   /* receives and sends of j-structure are complete */
4741:   /*------------------------------------------------*/
4742:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4743:   if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}

4745:   /* send and recv i-structure */
4746:   /*---------------------------*/
4747:   PetscCommGetNewTag(comm,&tagi);
4748:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);

4750:   PetscMalloc1(len+1,&buf_s);
4751:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4752:   for (proc=0,k=0; proc<size; proc++) {
4753:     if (!len_s[proc]) continue;
4754:     /* form outgoing message for i-structure:
4755:          buf_si[0]:                 nrows to be sent
4756:                [1:nrows]:           row index (global)
4757:                [nrows+1:2*nrows+1]: i-structure index
4758:     */
4759:     /*-------------------------------------------*/
4760:     nrows       = len_si[proc]/2 - 1;
4761:     buf_si_i    = buf_si + nrows+1;
4762:     buf_si[0]   = nrows;
4763:     buf_si_i[0] = 0;
4764:     nrows       = 0;
4765:     for (i=owners[proc]; i<owners[proc+1]; i++) {
4766:       anzi = ai[i+1] - ai[i];
4767:       if (anzi) {
4768:         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4769:         buf_si[nrows+1]   = i-owners[proc]; /* local row index */
4770:         nrows++;
4771:       }
4772:     }
4773:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4774:     k++;
4775:     buf_si += len_si[proc];
4776:   }

4778:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4779:   if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}

4781:   PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4782:   for (i=0; i<merge->nrecv; i++) {
4783:     PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4784:   }

4786:   PetscFree(len_si);
4787:   PetscFree(len_ri);
4788:   PetscFree(rj_waits);
4789:   PetscFree2(si_waits,sj_waits);
4790:   PetscFree(ri_waits);
4791:   PetscFree(buf_s);
4792:   PetscFree(status);

4794:   /* compute a local seq matrix in each processor */
4795:   /*----------------------------------------------*/
4796:   /* allocate bi array and free space for accumulating nonzero column info */
4797:   PetscMalloc1(m+1,&bi);
4798:   bi[0] = 0;

4800:   /* create and initialize a linked list */
4801:   nlnk = N+1;
4802:   PetscLLCreate(N,N,nlnk,lnk,lnkbt);

4804:   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4805:   len  = ai[owners[rank+1]] - ai[owners[rank]];
4806:   PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);

4808:   current_space = free_space;

4810:   /* determine symbolic info for each local row */
4811:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4813:   for (k=0; k<merge->nrecv; k++) {
4814:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4815:     nrows       = *buf_ri_k[k];
4816:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4817:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4818:   }

4820:   MatPreallocateInitialize(comm,m,n,dnz,onz);
4821:   len  = 0;
4822:   for (i=0; i<m; i++) {
4823:     bnzi = 0;
4824:     /* add local non-zero cols of this proc's seqmat into lnk */
4825:     arow  = owners[rank] + i;
4826:     anzi  = ai[arow+1] - ai[arow];
4827:     aj    = a->j + ai[arow];
4828:     PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4829:     bnzi += nlnk;
4830:     /* add received col data into lnk */
4831:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4832:       if (i == *nextrow[k]) { /* i-th row */
4833:         anzi  = *(nextai[k]+1) - *nextai[k];
4834:         aj    = buf_rj[k] + *nextai[k];
4835:         PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4836:         bnzi += nlnk;
4837:         nextrow[k]++; nextai[k]++;
4838:       }
4839:     }
4840:     if (len < bnzi) len = bnzi;  /* =max(bnzi) */

4842:     /* if free space is not available, make more free space */
4843:     if (current_space->local_remaining<bnzi) {
4844:       PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),&current_space);
4845:       nspacedouble++;
4846:     }
4847:     /* copy data into free space, then initialize lnk */
4848:     PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4849:     MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);

4851:     current_space->array           += bnzi;
4852:     current_space->local_used      += bnzi;
4853:     current_space->local_remaining -= bnzi;

4855:     bi[i+1] = bi[i] + bnzi;
4856:   }

4858:   PetscFree3(buf_ri_k,nextrow,nextai);

4860:   PetscMalloc1(bi[m]+1,&bj);
4861:   PetscFreeSpaceContiguous(&free_space,bj);
4862:   PetscLLDestroy(lnk,lnkbt);

4864:   /* create symbolic parallel matrix B_mpi */
4865:   /*---------------------------------------*/
4866:   MatGetBlockSizes(seqmat,&bs,&cbs);
4867:   MatCreate(comm,&B_mpi);
4868:   if (n==PETSC_DECIDE) {
4869:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4870:   } else {
4871:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4872:   }
4873:   MatSetBlockSizes(B_mpi,bs,cbs);
4874:   MatSetType(B_mpi,MATMPIAIJ);
4875:   MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4876:   MatPreallocateFinalize(dnz,onz);
4877:   MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);

4879:   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4880:   B_mpi->assembled    = PETSC_FALSE;
4881:   B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
4882:   merge->bi           = bi;
4883:   merge->bj           = bj;
4884:   merge->buf_ri       = buf_ri;
4885:   merge->buf_rj       = buf_rj;
4886:   merge->coi          = NULL;
4887:   merge->coj          = NULL;
4888:   merge->owners_co    = NULL;

4890:   PetscCommDestroy(&comm);

4892:   /* attach the supporting struct to B_mpi for reuse */
4893:   PetscContainerCreate(PETSC_COMM_SELF,&container);
4894:   PetscContainerSetPointer(container,merge);
4895:   PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4896:   PetscContainerDestroy(&container);
4897:   *mpimat = B_mpi;

4899:   PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4900:   return(0);
4901: }

4903: /*@C
4904:       MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4905:                  matrices from each processor

4907:     Collective on MPI_Comm

4909:    Input Parameters:
4910: +    comm - the communicators the parallel matrix will live on
4911: .    seqmat - the input sequential matrices
4912: .    m - number of local rows (or PETSC_DECIDE)
4913: .    n - number of local columns (or PETSC_DECIDE)
4914: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4916:    Output Parameter:
4917: .    mpimat - the parallel matrix generated

4919:     Level: advanced

4921:    Notes:
4922:      The dimensions of the sequential matrix in each processor MUST be the same.
4923:      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4924:      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4925: @*/
4926: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4927: {
4929:   PetscMPIInt    size;

4932:   MPI_Comm_size(comm,&size);
4933:   if (size == 1) {
4934:     PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4935:     if (scall == MAT_INITIAL_MATRIX) {
4936:       MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
4937:     } else {
4938:       MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
4939:     }
4940:     PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4941:     return(0);
4942:   }
4943:   PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4944:   if (scall == MAT_INITIAL_MATRIX) {
4945:     MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
4946:   }
4947:   MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
4948:   PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4949:   return(0);
4950: }

4952: /*@
4953:      MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
4954:           mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
4955:           with MatGetSize()

4957:     Not Collective

4959:    Input Parameters:
4960: +    A - the matrix
4961: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4963:    Output Parameter:
4964: .    A_loc - the local sequential matrix generated

4966:     Level: developer

4968: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed()

4970: @*/
4971: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4972: {
4974:   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
4975:   Mat_SeqAIJ     *mat,*a,*b;
4976:   PetscInt       *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
4977:   MatScalar      *aa,*ba,*cam;
4978:   PetscScalar    *ca;
4979:   PetscInt       am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4980:   PetscInt       *ci,*cj,col,ncols_d,ncols_o,jo;
4981:   PetscBool      match;
4982:   MPI_Comm       comm;
4983:   PetscMPIInt    size;

4986:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
4987:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
4988:   PetscObjectGetComm((PetscObject)A,&comm);
4989:   MPI_Comm_size(comm,&size);
4990:   if (size == 1 && scall == MAT_REUSE_MATRIX) return(0);

4992:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
4993:   a = (Mat_SeqAIJ*)(mpimat->A)->data;
4994:   b = (Mat_SeqAIJ*)(mpimat->B)->data;
4995:   ai = a->i; aj = a->j; bi = b->i; bj = b->j;
4996:   aa = a->a; ba = b->a;
4997:   if (scall == MAT_INITIAL_MATRIX) {
4998:     if (size == 1) {
4999:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ai,aj,aa,A_loc);
5000:       return(0);
5001:     }

5003:     PetscMalloc1(1+am,&ci);
5004:     ci[0] = 0;
5005:     for (i=0; i<am; i++) {
5006:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
5007:     }
5008:     PetscMalloc1(1+ci[am],&cj);
5009:     PetscMalloc1(1+ci[am],&ca);
5010:     k    = 0;
5011:     for (i=0; i<am; i++) {
5012:       ncols_o = bi[i+1] - bi[i];
5013:       ncols_d = ai[i+1] - ai[i];
5014:       /* off-diagonal portion of A */
5015:       for (jo=0; jo<ncols_o; jo++) {
5016:         col = cmap[*bj];
5017:         if (col >= cstart) break;
5018:         cj[k]   = col; bj++;
5019:         ca[k++] = *ba++;
5020:       }
5021:       /* diagonal portion of A */
5022:       for (j=0; j<ncols_d; j++) {
5023:         cj[k]   = cstart + *aj++;
5024:         ca[k++] = *aa++;
5025:       }
5026:       /* off-diagonal portion of A */
5027:       for (j=jo; j<ncols_o; j++) {
5028:         cj[k]   = cmap[*bj++];
5029:         ca[k++] = *ba++;
5030:       }
5031:     }
5032:     /* put together the new matrix */
5033:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
5034:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5035:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5036:     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
5037:     mat->free_a  = PETSC_TRUE;
5038:     mat->free_ij = PETSC_TRUE;
5039:     mat->nonew   = 0;
5040:   } else if (scall == MAT_REUSE_MATRIX) {
5041:     mat=(Mat_SeqAIJ*)(*A_loc)->data;
5042:     ci = mat->i; cj = mat->j; cam = mat->a;
5043:     for (i=0; i<am; i++) {
5044:       /* off-diagonal portion of A */
5045:       ncols_o = bi[i+1] - bi[i];
5046:       for (jo=0; jo<ncols_o; jo++) {
5047:         col = cmap[*bj];
5048:         if (col >= cstart) break;
5049:         *cam++ = *ba++; bj++;
5050:       }
5051:       /* diagonal portion of A */
5052:       ncols_d = ai[i+1] - ai[i];
5053:       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
5054:       /* off-diagonal portion of A */
5055:       for (j=jo; j<ncols_o; j++) {
5056:         *cam++ = *ba++; bj++;
5057:       }
5058:     }
5059:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5060:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5061:   return(0);
5062: }

5064: /*@C
5065:      MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns

5067:     Not Collective

5069:    Input Parameters:
5070: +    A - the matrix
5071: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5072: -    row, col - index sets of rows and columns to extract (or NULL)

5074:    Output Parameter:
5075: .    A_loc - the local sequential matrix generated

5077:     Level: developer

5079: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()

5081: @*/
5082: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5083: {
5084:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5086:   PetscInt       i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5087:   IS             isrowa,iscola;
5088:   Mat            *aloc;
5089:   PetscBool      match;

5092:   PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5093:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5094:   PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5095:   if (!row) {
5096:     start = A->rmap->rstart; end = A->rmap->rend;
5097:     ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5098:   } else {
5099:     isrowa = *row;
5100:   }
5101:   if (!col) {
5102:     start = A->cmap->rstart;
5103:     cmap  = a->garray;
5104:     nzA   = a->A->cmap->n;
5105:     nzB   = a->B->cmap->n;
5106:     PetscMalloc1(nzA+nzB, &idx);
5107:     ncols = 0;
5108:     for (i=0; i<nzB; i++) {
5109:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5110:       else break;
5111:     }
5112:     imark = i;
5113:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5114:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5115:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5116:   } else {
5117:     iscola = *col;
5118:   }
5119:   if (scall != MAT_INITIAL_MATRIX) {
5120:     PetscMalloc1(1,&aloc);
5121:     aloc[0] = *A_loc;
5122:   }
5123:   MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5124:   if (!col) { /* attach global id of condensed columns */
5125:     PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5126:   }
5127:   *A_loc = aloc[0];
5128:   PetscFree(aloc);
5129:   if (!row) {
5130:     ISDestroy(&isrowa);
5131:   }
5132:   if (!col) {
5133:     ISDestroy(&iscola);
5134:   }
5135:   PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5136:   return(0);
5137: }

5139: /*@C
5140:     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A

5142:     Collective on Mat

5144:    Input Parameters:
5145: +    A,B - the matrices in mpiaij format
5146: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5147: -    rowb, colb - index sets of rows and columns of B to extract (or NULL)

5149:    Output Parameter:
5150: +    rowb, colb - index sets of rows and columns of B to extract
5151: -    B_seq - the sequential matrix generated

5153:     Level: developer

5155: @*/
5156: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5157: {
5158:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5160:   PetscInt       *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5161:   IS             isrowb,iscolb;
5162:   Mat            *bseq=NULL;

5165:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5166:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5167:   }
5168:   PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);

5170:   if (scall == MAT_INITIAL_MATRIX) {
5171:     start = A->cmap->rstart;
5172:     cmap  = a->garray;
5173:     nzA   = a->A->cmap->n;
5174:     nzB   = a->B->cmap->n;
5175:     PetscMalloc1(nzA+nzB, &idx);
5176:     ncols = 0;
5177:     for (i=0; i<nzB; i++) {  /* row < local row index */
5178:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5179:       else break;
5180:     }
5181:     imark = i;
5182:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
5183:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5184:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5185:     ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5186:   } else {
5187:     if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5188:     isrowb  = *rowb; iscolb = *colb;
5189:     PetscMalloc1(1,&bseq);
5190:     bseq[0] = *B_seq;
5191:   }
5192:   MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5193:   *B_seq = bseq[0];
5194:   PetscFree(bseq);
5195:   if (!rowb) {
5196:     ISDestroy(&isrowb);
5197:   } else {
5198:     *rowb = isrowb;
5199:   }
5200:   if (!colb) {
5201:     ISDestroy(&iscolb);
5202:   } else {
5203:     *colb = iscolb;
5204:   }
5205:   PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5206:   return(0);
5207: }

5209: /*
5210:     MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5211:     of the OFF-DIAGONAL portion of local A

5213:     Collective on Mat

5215:    Input Parameters:
5216: +    A,B - the matrices in mpiaij format
5217: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5219:    Output Parameter:
5220: +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5221: .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5222: .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5223: -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N

5225:     Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5226:      for this matrix. This is not desirable..

5228:     Level: developer

5230: */
5231: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5232: {
5233:   PetscErrorCode         ierr;
5234:   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
5235:   Mat_SeqAIJ             *b_oth;
5236:   VecScatter             ctx;
5237:   MPI_Comm               comm;
5238:   const PetscMPIInt      *rprocs,*sprocs;
5239:   const PetscInt         *srow,*rstarts,*sstarts;
5240:   PetscInt               *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5241:   PetscInt               i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = 0,*sstartsj,len;
5242:   PetscScalar              *b_otha,*bufa,*bufA,*vals;
5243:   MPI_Request            *rwaits = NULL,*swaits = NULL;
5244:   MPI_Status             rstatus;
5245:   PetscMPIInt            jj,size,tag,rank,nsends_mpi,nrecvs_mpi;

5248:   PetscObjectGetComm((PetscObject)A,&comm);
5249:   MPI_Comm_size(comm,&size);

5251:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5252:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5253:   }
5254:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5255:   MPI_Comm_rank(comm,&rank);

5257:   if (size == 1) {
5258:     startsj_s = NULL;
5259:     bufa_ptr  = NULL;
5260:     *B_oth    = NULL;
5261:     return(0);
5262:   }

5264:   ctx = a->Mvctx;
5265:   tag = ((PetscObject)ctx)->tag;

5267:   if (ctx->inuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");
5268:   VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5269:   /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5270:   VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5271:   PetscMPIIntCast(nsends,&nsends_mpi);
5272:   PetscMPIIntCast(nrecvs,&nrecvs_mpi);
5273:   PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);

5275:   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5276:   if (scall == MAT_INITIAL_MATRIX) {
5277:     /* i-array */
5278:     /*---------*/
5279:     /*  post receives */
5280:     if (nrecvs) {PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);} /* rstarts can be NULL when nrecvs=0 */
5281:     for (i=0; i<nrecvs; i++) {
5282:       rowlen = rvalues + rstarts[i]*rbs;
5283:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5284:       MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5285:     }

5287:     /* pack the outgoing message */
5288:     PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);

5290:     sstartsj[0] = 0;
5291:     rstartsj[0] = 0;
5292:     len         = 0; /* total length of j or a array to be sent */
5293:     if