Actual source code: mpiaij.c

petsc-master 2020-07-09
Report Typos and Errors
  1:  #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2:  #include <petsc/private/vecimpl.h>
  3:  #include <petsc/private/vecscatterimpl.h>
  4:  #include <petsc/private/isimpl.h>
  5:  #include <petscblaslapack.h>
  6:  #include <petscsf.h>
  7:  #include <petsc/private/hashmapi.h>

  9: /*MC
 10:    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.

 12:    This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
 13:    and MATMPIAIJ otherwise.  As a result, for single process communicators,
 14:   MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation() is supported
 15:   for communicators controlling multiple processes.  It is recommended that you call both of
 16:   the above preallocation routines for simplicity.

 18:    Options Database Keys:
 19: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()

 21:   Developer Notes:
 22:     Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
 23:    enough exist.

 25:   Level: beginner

 27: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
 28: M*/

 30: /*MC
 31:    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.

 33:    This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
 34:    and MATMPIAIJCRL otherwise.  As a result, for single process communicators,
 35:    MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
 36:   for communicators controlling multiple processes.  It is recommended that you call both of
 37:   the above preallocation routines for simplicity.

 39:    Options Database Keys:
 40: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()

 42:   Level: beginner

 44: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
 45: M*/

 47: static PetscErrorCode MatBindToCPU_MPIAIJ(Mat A,PetscBool flg)
 48: {
 49:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

 53: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_VIENNACL)
 54:   A->boundtocpu = flg;
 55: #endif
 56:   if (a->A) {
 57:     MatBindToCPU(a->A,flg);
 58:   }
 59:   if (a->B) {
 60:     MatBindToCPU(a->B,flg);
 61:   }
 62:   return(0);
 63: }


 66: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
 67: {
 69:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)M->data;

 72:   if (mat->A) {
 73:     MatSetBlockSizes(mat->A,rbs,cbs);
 74:     MatSetBlockSizes(mat->B,rbs,1);
 75:   }
 76:   return(0);
 77: }

 79: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
 80: {
 81:   PetscErrorCode  ierr;
 82:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ*)M->data;
 83:   Mat_SeqAIJ      *a   = (Mat_SeqAIJ*)mat->A->data;
 84:   Mat_SeqAIJ      *b   = (Mat_SeqAIJ*)mat->B->data;
 85:   const PetscInt  *ia,*ib;
 86:   const MatScalar *aa,*bb;
 87:   PetscInt        na,nb,i,j,*rows,cnt=0,n0rows;
 88:   PetscInt        m = M->rmap->n,rstart = M->rmap->rstart;

 91:   *keptrows = 0;
 92:   ia        = a->i;
 93:   ib        = b->i;
 94:   for (i=0; i<m; i++) {
 95:     na = ia[i+1] - ia[i];
 96:     nb = ib[i+1] - ib[i];
 97:     if (!na && !nb) {
 98:       cnt++;
 99:       goto ok1;
100:     }
101:     aa = a->a + ia[i];
102:     for (j=0; j<na; j++) {
103:       if (aa[j] != 0.0) goto ok1;
104:     }
105:     bb = b->a + ib[i];
106:     for (j=0; j <nb; j++) {
107:       if (bb[j] != 0.0) goto ok1;
108:     }
109:     cnt++;
110: ok1:;
111:   }
112:   MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
113:   if (!n0rows) return(0);
114:   PetscMalloc1(M->rmap->n-cnt,&rows);
115:   cnt  = 0;
116:   for (i=0; i<m; i++) {
117:     na = ia[i+1] - ia[i];
118:     nb = ib[i+1] - ib[i];
119:     if (!na && !nb) continue;
120:     aa = a->a + ia[i];
121:     for (j=0; j<na;j++) {
122:       if (aa[j] != 0.0) {
123:         rows[cnt++] = rstart + i;
124:         goto ok2;
125:       }
126:     }
127:     bb = b->a + ib[i];
128:     for (j=0; j<nb; j++) {
129:       if (bb[j] != 0.0) {
130:         rows[cnt++] = rstart + i;
131:         goto ok2;
132:       }
133:     }
134: ok2:;
135:   }
136:   ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
137:   return(0);
138: }

140: PetscErrorCode  MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
141: {
142:   PetscErrorCode    ierr;
143:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*) Y->data;
144:   PetscBool         cong;

147:   MatHasCongruentLayouts(Y,&cong);
148:   if (Y->assembled && cong) {
149:     MatDiagonalSet(aij->A,D,is);
150:   } else {
151:     MatDiagonalSet_Default(Y,D,is);
152:   }
153:   return(0);
154: }

156: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
157: {
158:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)M->data;
160:   PetscInt       i,rstart,nrows,*rows;

163:   *zrows = NULL;
164:   MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
165:   MatGetOwnershipRange(M,&rstart,NULL);
166:   for (i=0; i<nrows; i++) rows[i] += rstart;
167:   ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
168:   return(0);
169: }

171: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
172: {
174:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)A->data;
175:   PetscInt       i,n,*garray = aij->garray;
176:   Mat_SeqAIJ     *a_aij = (Mat_SeqAIJ*) aij->A->data;
177:   Mat_SeqAIJ     *b_aij = (Mat_SeqAIJ*) aij->B->data;
178:   PetscReal      *work;

181:   MatGetSize(A,NULL,&n);
182:   PetscCalloc1(n,&work);
183:   if (type == NORM_2) {
184:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
185:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
186:     }
187:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
188:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
189:     }
190:   } else if (type == NORM_1) {
191:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
192:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
193:     }
194:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
195:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
196:     }
197:   } else if (type == NORM_INFINITY) {
198:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
199:       work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
200:     }
201:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
202:       work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
203:     }

205:   } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
206:   if (type == NORM_INFINITY) {
207:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
208:   } else {
209:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
210:   }
211:   PetscFree(work);
212:   if (type == NORM_2) {
213:     for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
214:   }
215:   return(0);
216: }

218: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
219: {
220:   Mat_MPIAIJ      *a  = (Mat_MPIAIJ*)A->data;
221:   IS              sis,gis;
222:   PetscErrorCode  ierr;
223:   const PetscInt  *isis,*igis;
224:   PetscInt        n,*iis,nsis,ngis,rstart,i;

227:   MatFindOffBlockDiagonalEntries(a->A,&sis);
228:   MatFindNonzeroRows(a->B,&gis);
229:   ISGetSize(gis,&ngis);
230:   ISGetSize(sis,&nsis);
231:   ISGetIndices(sis,&isis);
232:   ISGetIndices(gis,&igis);

234:   PetscMalloc1(ngis+nsis,&iis);
235:   PetscArraycpy(iis,igis,ngis);
236:   PetscArraycpy(iis+ngis,isis,nsis);
237:   n    = ngis + nsis;
238:   PetscSortRemoveDupsInt(&n,iis);
239:   MatGetOwnershipRange(A,&rstart,NULL);
240:   for (i=0; i<n; i++) iis[i] += rstart;
241:   ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);

243:   ISRestoreIndices(sis,&isis);
244:   ISRestoreIndices(gis,&igis);
245:   ISDestroy(&sis);
246:   ISDestroy(&gis);
247:   return(0);
248: }

250: /*
251:     Distributes a SeqAIJ matrix across a set of processes. Code stolen from
252:     MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.

254:     Only for square matrices

256:     Used by a preconditioner, hence PETSC_EXTERN
257: */
258: PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
259: {
260:   PetscMPIInt    rank,size;
261:   PetscInt       *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
263:   Mat            mat;
264:   Mat_SeqAIJ     *gmata;
265:   PetscMPIInt    tag;
266:   MPI_Status     status;
267:   PetscBool      aij;
268:   MatScalar      *gmataa,*ao,*ad,*gmataarestore=0;

271:   MPI_Comm_rank(comm,&rank);
272:   MPI_Comm_size(comm,&size);
273:   if (!rank) {
274:     PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
275:     if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
276:   }
277:   if (reuse == MAT_INITIAL_MATRIX) {
278:     MatCreate(comm,&mat);
279:     MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
280:     MatGetBlockSizes(gmat,&bses[0],&bses[1]);
281:     MPI_Bcast(bses,2,MPIU_INT,0,comm);
282:     MatSetBlockSizes(mat,bses[0],bses[1]);
283:     MatSetType(mat,MATAIJ);
284:     PetscMalloc1(size+1,&rowners);
285:     PetscMalloc2(m,&dlens,m,&olens);
286:     MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);

288:     rowners[0] = 0;
289:     for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
290:     rstart = rowners[rank];
291:     rend   = rowners[rank+1];
292:     PetscObjectGetNewTag((PetscObject)mat,&tag);
293:     if (!rank) {
294:       gmata = (Mat_SeqAIJ*) gmat->data;
295:       /* send row lengths to all processors */
296:       for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
297:       for (i=1; i<size; i++) {
298:         MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
299:       }
300:       /* determine number diagonal and off-diagonal counts */
301:       PetscArrayzero(olens,m);
302:       PetscCalloc1(m,&ld);
303:       jj   = 0;
304:       for (i=0; i<m; i++) {
305:         for (j=0; j<dlens[i]; j++) {
306:           if (gmata->j[jj] < rstart) ld[i]++;
307:           if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
308:           jj++;
309:         }
310:       }
311:       /* send column indices to other processes */
312:       for (i=1; i<size; i++) {
313:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
314:         MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
315:         MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
316:       }

318:       /* send numerical values to other processes */
319:       for (i=1; i<size; i++) {
320:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
321:         MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
322:       }
323:       gmataa = gmata->a;
324:       gmataj = gmata->j;

326:     } else {
327:       /* receive row lengths */
328:       MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
329:       /* receive column indices */
330:       MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
331:       PetscMalloc2(nz,&gmataa,nz,&gmataj);
332:       MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
333:       /* determine number diagonal and off-diagonal counts */
334:       PetscArrayzero(olens,m);
335:       PetscCalloc1(m,&ld);
336:       jj   = 0;
337:       for (i=0; i<m; i++) {
338:         for (j=0; j<dlens[i]; j++) {
339:           if (gmataj[jj] < rstart) ld[i]++;
340:           if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
341:           jj++;
342:         }
343:       }
344:       /* receive numerical values */
345:       PetscArrayzero(gmataa,nz);
346:       MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
347:     }
348:     /* set preallocation */
349:     for (i=0; i<m; i++) {
350:       dlens[i] -= olens[i];
351:     }
352:     MatSeqAIJSetPreallocation(mat,0,dlens);
353:     MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);

355:     for (i=0; i<m; i++) {
356:       dlens[i] += olens[i];
357:     }
358:     cnt = 0;
359:     for (i=0; i<m; i++) {
360:       row  = rstart + i;
361:       MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
362:       cnt += dlens[i];
363:     }
364:     if (rank) {
365:       PetscFree2(gmataa,gmataj);
366:     }
367:     PetscFree2(dlens,olens);
368:     PetscFree(rowners);

370:     ((Mat_MPIAIJ*)(mat->data))->ld = ld;

372:     *inmat = mat;
373:   } else {   /* column indices are already set; only need to move over numerical values from process 0 */
374:     Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
375:     Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
376:     mat  = *inmat;
377:     PetscObjectGetNewTag((PetscObject)mat,&tag);
378:     if (!rank) {
379:       /* send numerical values to other processes */
380:       gmata  = (Mat_SeqAIJ*) gmat->data;
381:       MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
382:       gmataa = gmata->a;
383:       for (i=1; i<size; i++) {
384:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
385:         MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
386:       }
387:       nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
388:     } else {
389:       /* receive numerical values from process 0*/
390:       nz   = Ad->nz + Ao->nz;
391:       PetscMalloc1(nz,&gmataa); gmataarestore = gmataa;
392:       MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
393:     }
394:     /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
395:     ld = ((Mat_MPIAIJ*)(mat->data))->ld;
396:     ad = Ad->a;
397:     ao = Ao->a;
398:     if (mat->rmap->n) {
399:       i  = 0;
400:       nz = ld[i];                                   PetscArraycpy(ao,gmataa,nz); ao += nz; gmataa += nz;
401:       nz = Ad->i[i+1] - Ad->i[i];                   PetscArraycpy(ad,gmataa,nz); ad += nz; gmataa += nz;
402:     }
403:     for (i=1; i<mat->rmap->n; i++) {
404:       nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscArraycpy(ao,gmataa,nz); ao += nz; gmataa += nz;
405:       nz = Ad->i[i+1] - Ad->i[i];                   PetscArraycpy(ad,gmataa,nz); ad += nz; gmataa += nz;
406:     }
407:     i--;
408:     if (mat->rmap->n) {
409:       nz = Ao->i[i+1] - Ao->i[i] - ld[i];           PetscArraycpy(ao,gmataa,nz);
410:     }
411:     if (rank) {
412:       PetscFree(gmataarestore);
413:     }
414:   }
415:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
416:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
417:   return(0);
418: }

420: /*
421:   Local utility routine that creates a mapping from the global column
422: number to the local number in the off-diagonal part of the local
423: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
424: a slightly higher hash table cost; without it it is not scalable (each processor
425: has an order N integer array but is fast to acess.
426: */
427: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
428: {
429:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
431:   PetscInt       n = aij->B->cmap->n,i;

434:   if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
435: #if defined(PETSC_USE_CTABLE)
436:   PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
437:   for (i=0; i<n; i++) {
438:     PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
439:   }
440: #else
441:   PetscCalloc1(mat->cmap->N+1,&aij->colmap);
442:   PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
443:   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
444: #endif
445:   return(0);
446: }

448: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol)     \
449: { \
450:     if (col <= lastcol1)  low1 = 0;     \
451:     else                 high1 = nrow1; \
452:     lastcol1 = col;\
453:     while (high1-low1 > 5) { \
454:       t = (low1+high1)/2; \
455:       if (rp1[t] > col) high1 = t; \
456:       else              low1  = t; \
457:     } \
458:       for (_i=low1; _i<high1; _i++) { \
459:         if (rp1[_i] > col) break; \
460:         if (rp1[_i] == col) { \
461:           if (addv == ADD_VALUES) { \
462:             ap1[_i] += value;   \
463:             /* Not sure LogFlops will slow dow the code or not */ \
464:             (void)PetscLogFlops(1.0);   \
465:            } \
466:           else                    ap1[_i] = value; \
467:           inserted = PETSC_TRUE; \
468:           goto a_noinsert; \
469:         } \
470:       }  \
471:       if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
472:       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}                \
473:       if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
474:       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
475:       N = nrow1++ - 1; a->nz++; high1++; \
476:       /* shift up all the later entries in this row */ \
477:       PetscArraymove(rp1+_i+1,rp1+_i,N-_i+1);\
478:       PetscArraymove(ap1+_i+1,ap1+_i,N-_i+1);\
479:       rp1[_i] = col;  \
480:       ap1[_i] = value;  \
481:       A->nonzerostate++;\
482:       a_noinsert: ; \
483:       ailen[row] = nrow1; \
484: }

486: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
487:   { \
488:     if (col <= lastcol2) low2 = 0;                        \
489:     else high2 = nrow2;                                   \
490:     lastcol2 = col;                                       \
491:     while (high2-low2 > 5) {                              \
492:       t = (low2+high2)/2;                                 \
493:       if (rp2[t] > col) high2 = t;                        \
494:       else             low2  = t;                         \
495:     }                                                     \
496:     for (_i=low2; _i<high2; _i++) {                       \
497:       if (rp2[_i] > col) break;                           \
498:       if (rp2[_i] == col) {                               \
499:         if (addv == ADD_VALUES) {                         \
500:           ap2[_i] += value;                               \
501:           (void)PetscLogFlops(1.0);                       \
502:         }                                                 \
503:         else                    ap2[_i] = value;          \
504:         inserted = PETSC_TRUE;                            \
505:         goto b_noinsert;                                  \
506:       }                                                   \
507:     }                                                     \
508:     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
509:     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}                        \
510:     if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
511:     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
512:     N = nrow2++ - 1; b->nz++; high2++;                    \
513:     /* shift up all the later entries in this row */      \
514:     PetscArraymove(rp2+_i+1,rp2+_i,N-_i+1);\
515:     PetscArraymove(ap2+_i+1,ap2+_i,N-_i+1);\
516:     rp2[_i] = col;                                        \
517:     ap2[_i] = value;                                      \
518:     B->nonzerostate++;                                    \
519:     b_noinsert: ;                                         \
520:     bilen[row] = nrow2;                                   \
521:   }

523: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
524: {
525:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
526:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
528:   PetscInt       l,*garray = mat->garray,diag;

531:   /* code only works for square matrices A */

533:   /* find size of row to the left of the diagonal part */
534:   MatGetOwnershipRange(A,&diag,0);
535:   row  = row - diag;
536:   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
537:     if (garray[b->j[b->i[row]+l]] > diag) break;
538:   }
539:   PetscArraycpy(b->a+b->i[row],v,l);

541:   /* diagonal part */
542:   PetscArraycpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row]));

544:   /* right of diagonal part */
545:   PetscArraycpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],b->i[row+1]-b->i[row]-l);
546: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
547:   if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && (l || (a->i[row+1]-a->i[row]) || (b->i[row+1]-b->i[row]-l))) A->offloadmask = PETSC_OFFLOAD_CPU;
548: #endif
549:   return(0);
550: }

552: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
553: {
554:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
555:   PetscScalar    value = 0.0;
557:   PetscInt       i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
558:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
559:   PetscBool      roworiented = aij->roworiented;

561:   /* Some Variables required in the macro */
562:   Mat        A                    = aij->A;
563:   Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
564:   PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
565:   MatScalar  *aa                  = a->a;
566:   PetscBool  ignorezeroentries    = a->ignorezeroentries;
567:   Mat        B                    = aij->B;
568:   Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
569:   PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
570:   MatScalar  *ba                  = b->a;
571:   /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
572:    * cannot use "#if defined" inside a macro. */
573:   PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

575:   PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
576:   PetscInt  nonew;
577:   MatScalar *ap1,*ap2;

580:   for (i=0; i<m; i++) {
581:     if (im[i] < 0) continue;
582:     if (PetscUnlikelyDebug(im[i] >= mat->rmap->N)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
583:     if (im[i] >= rstart && im[i] < rend) {
584:       row      = im[i] - rstart;
585:       lastcol1 = -1;
586:       rp1      = aj + ai[row];
587:       ap1      = aa + ai[row];
588:       rmax1    = aimax[row];
589:       nrow1    = ailen[row];
590:       low1     = 0;
591:       high1    = nrow1;
592:       lastcol2 = -1;
593:       rp2      = bj + bi[row];
594:       ap2      = ba + bi[row];
595:       rmax2    = bimax[row];
596:       nrow2    = bilen[row];
597:       low2     = 0;
598:       high2    = nrow2;

600:       for (j=0; j<n; j++) {
601:         if (v)  value = roworiented ? v[i*n+j] : v[i+j*m];
602:         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
603:         if (in[j] >= cstart && in[j] < cend) {
604:           col   = in[j] - cstart;
605:           nonew = a->nonew;
606:           MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
607: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
608:           if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
609: #endif
610:         } else if (in[j] < 0) continue;
611:         else if (PetscUnlikelyDebug(in[j] >= mat->cmap->N)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
612:         else {
613:           if (mat->was_assembled) {
614:             if (!aij->colmap) {
615:               MatCreateColmap_MPIAIJ_Private(mat);
616:             }
617: #if defined(PETSC_USE_CTABLE)
618:             PetscTableFind(aij->colmap,in[j]+1,&col);
619:             col--;
620: #else
621:             col = aij->colmap[in[j]] - 1;
622: #endif
623:             if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
624:               MatDisAssemble_MPIAIJ(mat);
625:               col  =  in[j];
626:               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
627:               B        = aij->B;
628:               b        = (Mat_SeqAIJ*)B->data;
629:               bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
630:               rp2      = bj + bi[row];
631:               ap2      = ba + bi[row];
632:               rmax2    = bimax[row];
633:               nrow2    = bilen[row];
634:               low2     = 0;
635:               high2    = nrow2;
636:               bm       = aij->B->rmap->n;
637:               ba       = b->a;
638:               inserted = PETSC_FALSE;
639:             } else if (col < 0) {
640:               if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
641:                 PetscInfo3(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%D,%D)\n",(double)PetscRealPart(value),im[i],in[j]);
642:               } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
643:             }
644:           } else col = in[j];
645:           nonew = b->nonew;
646:           MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
647: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
648:           if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
649: #endif
650:         }
651:       }
652:     } else {
653:       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
654:       if (!aij->donotstash) {
655:         mat->assembled = PETSC_FALSE;
656:         if (roworiented) {
657:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
658:         } else {
659:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
660:         }
661:       }
662:     }
663:   }
664:   return(0);
665: }

667: /*
668:     This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
669:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
670:     No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
671: */
672: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
673: {
674:   Mat_MPIAIJ     *aij        = (Mat_MPIAIJ*)mat->data;
675:   Mat            A           = aij->A; /* diagonal part of the matrix */
676:   Mat            B           = aij->B; /* offdiagonal part of the matrix */
677:   Mat_SeqAIJ     *a          = (Mat_SeqAIJ*)A->data;
678:   Mat_SeqAIJ     *b          = (Mat_SeqAIJ*)B->data;
679:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,col;
680:   PetscInt       *ailen      = a->ilen,*aj = a->j;
681:   PetscInt       *bilen      = b->ilen,*bj = b->j;
682:   PetscInt       am          = aij->A->rmap->n,j;
683:   PetscInt       diag_so_far = 0,dnz;
684:   PetscInt       offd_so_far = 0,onz;

687:   /* Iterate over all rows of the matrix */
688:   for (j=0; j<am; j++) {
689:     dnz = onz = 0;
690:     /*  Iterate over all non-zero columns of the current row */
691:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
692:       /* If column is in the diagonal */
693:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
694:         aj[diag_so_far++] = mat_j[col] - cstart;
695:         dnz++;
696:       } else { /* off-diagonal entries */
697:         bj[offd_so_far++] = mat_j[col];
698:         onz++;
699:       }
700:     }
701:     ailen[j] = dnz;
702:     bilen[j] = onz;
703:   }
704:   return(0);
705: }

707: /*
708:     This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
709:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
710:     No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
711:     Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
712:     would not be true and the more complex MatSetValues_MPIAIJ has to be used.
713: */
714: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
715: {
716:   Mat_MPIAIJ     *aij   = (Mat_MPIAIJ*)mat->data;
717:   Mat            A      = aij->A; /* diagonal part of the matrix */
718:   Mat            B      = aij->B; /* offdiagonal part of the matrix */
719:   Mat_SeqAIJ     *aijd  =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
720:   Mat_SeqAIJ     *a     = (Mat_SeqAIJ*)A->data;
721:   Mat_SeqAIJ     *b     = (Mat_SeqAIJ*)B->data;
722:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend;
723:   PetscInt       *ailen = a->ilen,*aj = a->j;
724:   PetscInt       *bilen = b->ilen,*bj = b->j;
725:   PetscInt       am     = aij->A->rmap->n,j;
726:   PetscInt       *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
727:   PetscInt       col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
728:   PetscScalar    *aa = a->a,*ba = b->a;

731:   /* Iterate over all rows of the matrix */
732:   for (j=0; j<am; j++) {
733:     dnz_row = onz_row = 0;
734:     rowstart_offd = full_offd_i[j];
735:     rowstart_diag = full_diag_i[j];
736:     /*  Iterate over all non-zero columns of the current row */
737:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
738:       /* If column is in the diagonal */
739:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
740:         aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
741:         aa[rowstart_diag+dnz_row] = mat_a[col];
742:         dnz_row++;
743:       } else { /* off-diagonal entries */
744:         bj[rowstart_offd+onz_row] = mat_j[col];
745:         ba[rowstart_offd+onz_row] = mat_a[col];
746:         onz_row++;
747:       }
748:     }
749:     ailen[j] = dnz_row;
750:     bilen[j] = onz_row;
751:   }
752:   return(0);
753: }

755: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
756: {
757:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
759:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
760:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;

763:   for (i=0; i<m; i++) {
764:     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
765:     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
766:     if (idxm[i] >= rstart && idxm[i] < rend) {
767:       row = idxm[i] - rstart;
768:       for (j=0; j<n; j++) {
769:         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
770:         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
771:         if (idxn[j] >= cstart && idxn[j] < cend) {
772:           col  = idxn[j] - cstart;
773:           MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
774:         } else {
775:           if (!aij->colmap) {
776:             MatCreateColmap_MPIAIJ_Private(mat);
777:           }
778: #if defined(PETSC_USE_CTABLE)
779:           PetscTableFind(aij->colmap,idxn[j]+1,&col);
780:           col--;
781: #else
782:           col = aij->colmap[idxn[j]] - 1;
783: #endif
784:           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
785:           else {
786:             MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
787:           }
788:         }
789:       }
790:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
791:   }
792:   return(0);
793: }

795: extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);

797: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
798: {
799:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
801:   PetscInt       nstash,reallocs;

804:   if (aij->donotstash || mat->nooffprocentries) return(0);

806:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
807:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
808:   PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
809:   return(0);
810: }

812: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
813: {
814:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
815:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)aij->A->data;
817:   PetscMPIInt    n;
818:   PetscInt       i,j,rstart,ncols,flg;
819:   PetscInt       *row,*col;
820:   PetscBool      other_disassembled;
821:   PetscScalar    *val;

823:   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */

826:   if (!aij->donotstash && !mat->nooffprocentries) {
827:     while (1) {
828:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
829:       if (!flg) break;

831:       for (i=0; i<n; ) {
832:         /* Now identify the consecutive vals belonging to the same row */
833:         for (j=i,rstart=row[j]; j<n; j++) {
834:           if (row[j] != rstart) break;
835:         }
836:         if (j < n) ncols = j-i;
837:         else       ncols = n-i;
838:         /* Now assemble all these values with a single function call */
839:         MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
840:         i    = j;
841:       }
842:     }
843:     MatStashScatterEnd_Private(&mat->stash);
844:   }
845: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
846:   if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU;
847:   /* We call MatBindToCPU() on aij->A and aij->B here, because if MatBindToCPU_MPIAIJ() is called before assembly, it cannot bind these. */
848:   if (mat->boundtocpu) {
849:     MatBindToCPU(aij->A,PETSC_TRUE);
850:     MatBindToCPU(aij->B,PETSC_TRUE);
851:   }
852: #endif
853:   MatAssemblyBegin(aij->A,mode);
854:   MatAssemblyEnd(aij->A,mode);

856:   /* determine if any processor has disassembled, if so we must
857:      also disassemble ourself, in order that we may reassemble. */
858:   /*
859:      if nonzero structure of submatrix B cannot change then we know that
860:      no processor disassembled thus we can skip this stuff
861:   */
862:   if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
863:     MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
864:     if (mat->was_assembled && !other_disassembled) {
865: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
866:       aij->B->offloadmask = PETSC_OFFLOAD_BOTH; /* do not copy on the GPU when assembling inside MatDisAssemble_MPIAIJ */
867: #endif
868:       MatDisAssemble_MPIAIJ(mat);
869:     }
870:   }
871:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
872:     MatSetUpMultiply_MPIAIJ(mat);
873:   }
874:   MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
875: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
876:   if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU;
877: #endif
878:   MatAssemblyBegin(aij->B,mode);
879:   MatAssemblyEnd(aij->B,mode);

881:   PetscFree2(aij->rowvalues,aij->rowindices);

883:   aij->rowvalues = 0;

885:   VecDestroy(&aij->diag);
886:   if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;

888:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
889:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
890:     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
891:     MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
892:   }
893: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
894:   mat->offloadmask = PETSC_OFFLOAD_BOTH;
895: #endif
896:   return(0);
897: }

899: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
900: {
901:   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;

905:   MatZeroEntries(l->A);
906:   MatZeroEntries(l->B);
907:   return(0);
908: }

910: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
911: {
912:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ *) A->data;
913:   PetscObjectState sA, sB;
914:   PetscInt        *lrows;
915:   PetscInt         r, len;
916:   PetscBool        cong, lch, gch;
917:   PetscErrorCode   ierr;

920:   /* get locally owned rows */
921:   MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
922:   MatHasCongruentLayouts(A,&cong);
923:   /* fix right hand side if needed */
924:   if (x && b) {
925:     const PetscScalar *xx;
926:     PetscScalar       *bb;

928:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
929:     VecGetArrayRead(x, &xx);
930:     VecGetArray(b, &bb);
931:     for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
932:     VecRestoreArrayRead(x, &xx);
933:     VecRestoreArray(b, &bb);
934:   }

936:   sA = mat->A->nonzerostate;
937:   sB = mat->B->nonzerostate;

939:   if (diag != 0.0 && cong) {
940:     MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
941:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
942:   } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
943:     Mat_SeqAIJ *aijA = (Mat_SeqAIJ*)mat->A->data;
944:     Mat_SeqAIJ *aijB = (Mat_SeqAIJ*)mat->B->data;
945:     PetscInt   nnwA, nnwB;
946:     PetscBool  nnzA, nnzB;

948:     nnwA = aijA->nonew;
949:     nnwB = aijB->nonew;
950:     nnzA = aijA->keepnonzeropattern;
951:     nnzB = aijB->keepnonzeropattern;
952:     if (!nnzA) {
953:       PetscInfo(mat->A,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n");
954:       aijA->nonew = 0;
955:     }
956:     if (!nnzB) {
957:       PetscInfo(mat->B,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n");
958:       aijB->nonew = 0;
959:     }
960:     /* Must zero here before the next loop */
961:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
962:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
963:     for (r = 0; r < len; ++r) {
964:       const PetscInt row = lrows[r] + A->rmap->rstart;
965:       if (row >= A->cmap->N) continue;
966:       MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
967:     }
968:     aijA->nonew = nnwA;
969:     aijB->nonew = nnwB;
970:   } else {
971:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
972:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
973:   }
974:   PetscFree(lrows);
975:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
976:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

978:   /* reduce nonzerostate */
979:   lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate);
980:   MPIU_Allreduce(&lch,&gch,1,MPIU_BOOL,MPI_LOR,PetscObjectComm((PetscObject)A));
981:   if (gch) A->nonzerostate++;
982:   return(0);
983: }

985: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
986: {
987:   Mat_MPIAIJ        *l = (Mat_MPIAIJ*)A->data;
988:   PetscErrorCode    ierr;
989:   PetscMPIInt       n = A->rmap->n;
990:   PetscInt          i,j,r,m,len = 0;
991:   PetscInt          *lrows,*owners = A->rmap->range;
992:   PetscMPIInt       p = 0;
993:   PetscSFNode       *rrows;
994:   PetscSF           sf;
995:   const PetscScalar *xx;
996:   PetscScalar       *bb,*mask;
997:   Vec               xmask,lmask;
998:   Mat_SeqAIJ        *aij = (Mat_SeqAIJ*)l->B->data;
999:   const PetscInt    *aj, *ii,*ridx;
1000:   PetscScalar       *aa;

1003:   /* Create SF where leaves are input rows and roots are owned rows */
1004:   PetscMalloc1(n, &lrows);
1005:   for (r = 0; r < n; ++r) lrows[r] = -1;
1006:   PetscMalloc1(N, &rrows);
1007:   for (r = 0; r < N; ++r) {
1008:     const PetscInt idx   = rows[r];
1009:     if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
1010:     if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
1011:       PetscLayoutFindOwner(A->rmap,idx,&p);
1012:     }
1013:     rrows[r].rank  = p;
1014:     rrows[r].index = rows[r] - owners[p];
1015:   }
1016:   PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
1017:   PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
1018:   /* Collect flags for rows to be zeroed */
1019:   PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1020:   PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1021:   PetscSFDestroy(&sf);
1022:   /* Compress and put in row numbers */
1023:   for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
1024:   /* zero diagonal part of matrix */
1025:   MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
1026:   /* handle off diagonal part of matrix */
1027:   MatCreateVecs(A,&xmask,NULL);
1028:   VecDuplicate(l->lvec,&lmask);
1029:   VecGetArray(xmask,&bb);
1030:   for (i=0; i<len; i++) bb[lrows[i]] = 1;
1031:   VecRestoreArray(xmask,&bb);
1032:   VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1033:   VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1034:   VecDestroy(&xmask);
1035:   if (x && b) { /* this code is buggy when the row and column layout don't match */
1036:     PetscBool cong;

1038:     MatHasCongruentLayouts(A,&cong);
1039:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
1040:     VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1041:     VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1042:     VecGetArrayRead(l->lvec,&xx);
1043:     VecGetArray(b,&bb);
1044:   }
1045:   VecGetArray(lmask,&mask);
1046:   /* remove zeroed rows of off diagonal matrix */
1047:   ii = aij->i;
1048:   for (i=0; i<len; i++) {
1049:     PetscArrayzero(aij->a + ii[lrows[i]],ii[lrows[i]+1] - ii[lrows[i]]);
1050:   }
1051:   /* loop over all elements of off process part of matrix zeroing removed columns*/
1052:   if (aij->compressedrow.use) {
1053:     m    = aij->compressedrow.nrows;
1054:     ii   = aij->compressedrow.i;
1055:     ridx = aij->compressedrow.rindex;
1056:     for (i=0; i<m; i++) {
1057:       n  = ii[i+1] - ii[i];
1058:       aj = aij->j + ii[i];
1059:       aa = aij->a + ii[i];

1061:       for (j=0; j<n; j++) {
1062:         if (PetscAbsScalar(mask[*aj])) {
1063:           if (b) bb[*ridx] -= *aa*xx[*aj];
1064:           *aa = 0.0;
1065:         }
1066:         aa++;
1067:         aj++;
1068:       }
1069:       ridx++;
1070:     }
1071:   } else { /* do not use compressed row format */
1072:     m = l->B->rmap->n;
1073:     for (i=0; i<m; i++) {
1074:       n  = ii[i+1] - ii[i];
1075:       aj = aij->j + ii[i];
1076:       aa = aij->a + ii[i];
1077:       for (j=0; j<n; j++) {
1078:         if (PetscAbsScalar(mask[*aj])) {
1079:           if (b) bb[i] -= *aa*xx[*aj];
1080:           *aa = 0.0;
1081:         }
1082:         aa++;
1083:         aj++;
1084:       }
1085:     }
1086:   }
1087:   if (x && b) {
1088:     VecRestoreArray(b,&bb);
1089:     VecRestoreArrayRead(l->lvec,&xx);
1090:   }
1091:   VecRestoreArray(lmask,&mask);
1092:   VecDestroy(&lmask);
1093:   PetscFree(lrows);

1095:   /* only change matrix nonzero state if pattern was allowed to be changed */
1096:   if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
1097:     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1098:     MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
1099:   }
1100:   return(0);
1101: }

1103: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
1104: {
1105:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1107:   PetscInt       nt;
1108:   VecScatter     Mvctx = a->Mvctx;

1111:   VecGetLocalSize(xx,&nt);
1112:   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
1113:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1114:   (*a->A->ops->mult)(a->A,xx,yy);
1115:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1116:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1117:   return(0);
1118: }

1120: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
1121: {
1122:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1126:   MatMultDiagonalBlock(a->A,bb,xx);
1127:   return(0);
1128: }

1130: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1131: {
1132:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1134:   VecScatter     Mvctx = a->Mvctx;

1137:   if (a->Mvctx_mpi1_flg) Mvctx = a->Mvctx_mpi1;
1138:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1139:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
1140:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1141:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1142:   return(0);
1143: }

1145: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
1146: {
1147:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1151:   /* do nondiagonal part */
1152:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1153:   /* do local part */
1154:   (*a->A->ops->multtranspose)(a->A,xx,yy);
1155:   /* add partial results together */
1156:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1157:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1158:   return(0);
1159: }

1161: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool  *f)
1162: {
1163:   MPI_Comm       comm;
1164:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1165:   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1166:   IS             Me,Notme;
1168:   PetscInt       M,N,first,last,*notme,i;
1169:   PetscBool      lf;
1170:   PetscMPIInt    size;

1173:   /* Easy test: symmetric diagonal block */
1174:   Bij  = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1175:   MatIsTranspose(Adia,Bdia,tol,&lf);
1176:   MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
1177:   if (!*f) return(0);
1178:   PetscObjectGetComm((PetscObject)Amat,&comm);
1179:   MPI_Comm_size(comm,&size);
1180:   if (size == 1) return(0);

1182:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1183:   MatGetSize(Amat,&M,&N);
1184:   MatGetOwnershipRange(Amat,&first,&last);
1185:   PetscMalloc1(N-last+first,&notme);
1186:   for (i=0; i<first; i++) notme[i] = i;
1187:   for (i=last; i<M; i++) notme[i-last+first] = i;
1188:   ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1189:   ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1190:   MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1191:   Aoff = Aoffs[0];
1192:   MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1193:   Boff = Boffs[0];
1194:   MatIsTranspose(Aoff,Boff,tol,f);
1195:   MatDestroyMatrices(1,&Aoffs);
1196:   MatDestroyMatrices(1,&Boffs);
1197:   ISDestroy(&Me);
1198:   ISDestroy(&Notme);
1199:   PetscFree(notme);
1200:   return(0);
1201: }

1203: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool  *f)
1204: {

1208:   MatIsTranspose_MPIAIJ(A,A,tol,f);
1209:   return(0);
1210: }

1212: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1213: {
1214:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1218:   /* do nondiagonal part */
1219:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1220:   /* do local part */
1221:   (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1222:   /* add partial results together */
1223:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1224:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1225:   return(0);
1226: }

1228: /*
1229:   This only works correctly for square matrices where the subblock A->A is the
1230:    diagonal block
1231: */
1232: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1233: {
1235:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1238:   if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1239:   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1240:   MatGetDiagonal(a->A,v);
1241:   return(0);
1242: }

1244: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1245: {
1246:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1250:   MatScale(a->A,aa);
1251:   MatScale(a->B,aa);
1252:   return(0);
1253: }

1255: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1256: {
1257:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

1261: #if defined(PETSC_USE_LOG)
1262:   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1263: #endif
1264:   MatStashDestroy_Private(&mat->stash);
1265:   VecDestroy(&aij->diag);
1266:   MatDestroy(&aij->A);
1267:   MatDestroy(&aij->B);
1268: #if defined(PETSC_USE_CTABLE)
1269:   PetscTableDestroy(&aij->colmap);
1270: #else
1271:   PetscFree(aij->colmap);
1272: #endif
1273:   PetscFree(aij->garray);
1274:   VecDestroy(&aij->lvec);
1275:   VecScatterDestroy(&aij->Mvctx);
1276:   if (aij->Mvctx_mpi1) {VecScatterDestroy(&aij->Mvctx_mpi1);}
1277:   PetscFree2(aij->rowvalues,aij->rowindices);
1278:   PetscFree(aij->ld);
1279:   PetscFree(mat->data);

1281:   /* may be created by MatCreateMPIAIJSumSeqAIJSymbolic */
1282:   PetscObjectCompose((PetscObject)mat,"MatMergeSeqsToMPI",NULL);

1284:   PetscObjectChangeTypeName((PetscObject)mat,0);
1285:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1286:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1287:   PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1288:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1289:   PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1290:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1291:   PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1292:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpibaij_C",NULL);
1293:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1294: #if defined(PETSC_HAVE_ELEMENTAL)
1295:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1296: #endif
1297: #if defined(PETSC_HAVE_SCALAPACK)
1298:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_scalapack_C",NULL);
1299: #endif
1300: #if defined(PETSC_HAVE_HYPRE)
1301:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1302:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",NULL);
1303: #endif
1304:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1305:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_is_mpiaij_C",NULL);
1306:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaij_mpiaij_C",NULL);
1307:   return(0);
1308: }

1310: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1311: {
1312:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1313:   Mat_SeqAIJ        *A   = (Mat_SeqAIJ*)aij->A->data;
1314:   Mat_SeqAIJ        *B   = (Mat_SeqAIJ*)aij->B->data;
1315:   const PetscInt    *garray = aij->garray;
1316:   PetscInt          header[4],M,N,m,rs,cs,nz,cnt,i,ja,jb;
1317:   PetscInt          *rowlens;
1318:   PetscInt          *colidxs;
1319:   PetscScalar       *matvals;
1320:   PetscErrorCode    ierr;

1323:   PetscViewerSetUp(viewer);

1325:   M  = mat->rmap->N;
1326:   N  = mat->cmap->N;
1327:   m  = mat->rmap->n;
1328:   rs = mat->rmap->rstart;
1329:   cs = mat->cmap->rstart;
1330:   nz = A->nz + B->nz;

1332:   /* write matrix header */
1333:   header[0] = MAT_FILE_CLASSID;
1334:   header[1] = M; header[2] = N; header[3] = nz;
1335:   MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1336:   PetscViewerBinaryWrite(viewer,header,4,PETSC_INT);

1338:   /* fill in and store row lengths  */
1339:   PetscMalloc1(m,&rowlens);
1340:   for (i=0; i<m; i++) rowlens[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1341:   PetscViewerBinaryWriteAll(viewer,rowlens,m,rs,M,PETSC_INT);
1342:   PetscFree(rowlens);

1344:   /* fill in and store column indices */
1345:   PetscMalloc1(nz,&colidxs);
1346:   for (cnt=0, i=0; i<m; i++) {
1347:     for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1348:       if (garray[B->j[jb]] > cs) break;
1349:       colidxs[cnt++] = garray[B->j[jb]];
1350:     }
1351:     for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1352:       colidxs[cnt++] = A->j[ja] + cs;
1353:     for (; jb<B->i[i+1]; jb++)
1354:       colidxs[cnt++] = garray[B->j[jb]];
1355:   }
1356:   if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1357:   PetscViewerBinaryWriteAll(viewer,colidxs,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
1358:   PetscFree(colidxs);

1360:   /* fill in and store nonzero values */
1361:   PetscMalloc1(nz,&matvals);
1362:   for (cnt=0, i=0; i<m; i++) {
1363:     for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1364:       if (garray[B->j[jb]] > cs) break;
1365:       matvals[cnt++] = B->a[jb];
1366:     }
1367:     for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1368:       matvals[cnt++] = A->a[ja];
1369:     for (; jb<B->i[i+1]; jb++)
1370:       matvals[cnt++] = B->a[jb];
1371:   }
1372:   if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1373:   PetscViewerBinaryWriteAll(viewer,matvals,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
1374:   PetscFree(matvals);

1376:   /* write block size option to the viewer's .info file */
1377:   MatView_Binary_BlockSizes(mat,viewer);
1378:   return(0);
1379: }

1381:  #include <petscdraw.h>
1382: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1383: {
1384:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1385:   PetscErrorCode    ierr;
1386:   PetscMPIInt       rank = aij->rank,size = aij->size;
1387:   PetscBool         isdraw,iascii,isbinary;
1388:   PetscViewer       sviewer;
1389:   PetscViewerFormat format;

1392:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1393:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1394:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1395:   if (iascii) {
1396:     PetscViewerGetFormat(viewer,&format);
1397:     if (format == PETSC_VIEWER_LOAD_BALANCE) {
1398:       PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1399:       PetscMalloc1(size,&nz);
1400:       MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1401:       for (i=0; i<(PetscInt)size; i++) {
1402:         nmax = PetscMax(nmax,nz[i]);
1403:         nmin = PetscMin(nmin,nz[i]);
1404:         navg += nz[i];
1405:       }
1406:       PetscFree(nz);
1407:       navg = navg/size;
1408:       PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %D  avg %D  max %D\n",nmin,navg,nmax);
1409:       return(0);
1410:     }
1411:     PetscViewerGetFormat(viewer,&format);
1412:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1413:       MatInfo   info;
1414:       PetscBool inodes;

1416:       MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1417:       MatGetInfo(mat,MAT_LOCAL,&info);
1418:       MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);
1419:       PetscViewerASCIIPushSynchronized(viewer);
1420:       if (!inodes) {
1421:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, not using I-node routines\n",
1422:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1423:       } else {
1424:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, using I-node routines\n",
1425:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1426:       }
1427:       MatGetInfo(aij->A,MAT_LOCAL,&info);
1428:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1429:       MatGetInfo(aij->B,MAT_LOCAL,&info);
1430:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1431:       PetscViewerFlush(viewer);
1432:       PetscViewerASCIIPopSynchronized(viewer);
1433:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1434:       VecScatterView(aij->Mvctx,viewer);
1435:       return(0);
1436:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1437:       PetscInt inodecount,inodelimit,*inodes;
1438:       MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1439:       if (inodes) {
1440:         PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1441:       } else {
1442:         PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1443:       }
1444:       return(0);
1445:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1446:       return(0);
1447:     }
1448:   } else if (isbinary) {
1449:     if (size == 1) {
1450:       PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1451:       MatView(aij->A,viewer);
1452:     } else {
1453:       MatView_MPIAIJ_Binary(mat,viewer);
1454:     }
1455:     return(0);
1456:   } else if (iascii && size == 1) {
1457:     PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1458:     MatView(aij->A,viewer);
1459:     return(0);
1460:   } else if (isdraw) {
1461:     PetscDraw draw;
1462:     PetscBool isnull;
1463:     PetscViewerDrawGetDraw(viewer,0,&draw);
1464:     PetscDrawIsNull(draw,&isnull);
1465:     if (isnull) return(0);
1466:   }

1468:   { /* assemble the entire matrix onto first processor */
1469:     Mat A = NULL, Av;
1470:     IS  isrow,iscol;

1472:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1473:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1474:     MatCreateSubMatrix(mat,isrow,iscol,MAT_INITIAL_MATRIX,&A);
1475:     MatMPIAIJGetSeqAIJ(A,&Av,NULL,NULL);
1476: /*  The commented code uses MatCreateSubMatrices instead */
1477: /*
1478:     Mat *AA, A = NULL, Av;
1479:     IS  isrow,iscol;

1481:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1482:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1483:     MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA);
1484:     if (!rank) {
1485:        PetscObjectReference((PetscObject)AA[0]);
1486:        A    = AA[0];
1487:        Av   = AA[0];
1488:     }
1489:     MatDestroySubMatrices(1,&AA);
1490: */
1491:     ISDestroy(&iscol);
1492:     ISDestroy(&isrow);
1493:     /*
1494:        Everyone has to call to draw the matrix since the graphics waits are
1495:        synchronized across all processors that share the PetscDraw object
1496:     */
1497:     PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1498:     if (!rank) {
1499:       if (((PetscObject)mat)->name) {
1500:         PetscObjectSetName((PetscObject)Av,((PetscObject)mat)->name);
1501:       }
1502:       MatView_SeqAIJ(Av,sviewer);
1503:     }
1504:     PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1505:     PetscViewerFlush(viewer);
1506:     MatDestroy(&A);
1507:   }
1508:   return(0);
1509: }

1511: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1512: {
1514:   PetscBool      iascii,isdraw,issocket,isbinary;

1517:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1518:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1519:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1520:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1521:   if (iascii || isdraw || isbinary || issocket) {
1522:     MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1523:   }
1524:   return(0);
1525: }

1527: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1528: {
1529:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1531:   Vec            bb1 = 0;
1532:   PetscBool      hasop;

1535:   if (flag == SOR_APPLY_UPPER) {
1536:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1537:     return(0);
1538:   }

1540:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1541:     VecDuplicate(bb,&bb1);
1542:   }

1544:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1545:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1546:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1547:       its--;
1548:     }

1550:     while (its--) {
1551:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1552:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1554:       /* update rhs: bb1 = bb - B*x */
1555:       VecScale(mat->lvec,-1.0);
1556:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1558:       /* local sweep */
1559:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1560:     }
1561:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1562:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1563:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1564:       its--;
1565:     }
1566:     while (its--) {
1567:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1568:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1570:       /* update rhs: bb1 = bb - B*x */
1571:       VecScale(mat->lvec,-1.0);
1572:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1574:       /* local sweep */
1575:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1576:     }
1577:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1578:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1579:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1580:       its--;
1581:     }
1582:     while (its--) {
1583:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1584:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1586:       /* update rhs: bb1 = bb - B*x */
1587:       VecScale(mat->lvec,-1.0);
1588:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1590:       /* local sweep */
1591:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1592:     }
1593:   } else if (flag & SOR_EISENSTAT) {
1594:     Vec xx1;

1596:     VecDuplicate(bb,&xx1);
1597:     (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);

1599:     VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1600:     VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1601:     if (!mat->diag) {
1602:       MatCreateVecs(matin,&mat->diag,NULL);
1603:       MatGetDiagonal(matin,mat->diag);
1604:     }
1605:     MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1606:     if (hasop) {
1607:       MatMultDiagonalBlock(matin,xx,bb1);
1608:     } else {
1609:       VecPointwiseMult(bb1,mat->diag,xx);
1610:     }
1611:     VecAYPX(bb1,(omega-2.0)/omega,bb);

1613:     MatMultAdd(mat->B,mat->lvec,bb1,bb1);

1615:     /* local sweep */
1616:     (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1617:     VecAXPY(xx,1.0,xx1);
1618:     VecDestroy(&xx1);
1619:   } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");

1621:   VecDestroy(&bb1);

1623:   matin->factorerrortype = mat->A->factorerrortype;
1624:   return(0);
1625: }

1627: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1628: {
1629:   Mat            aA,aB,Aperm;
1630:   const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1631:   PetscScalar    *aa,*ba;
1632:   PetscInt       i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1633:   PetscSF        rowsf,sf;
1634:   IS             parcolp = NULL;
1635:   PetscBool      done;

1639:   MatGetLocalSize(A,&m,&n);
1640:   ISGetIndices(rowp,&rwant);
1641:   ISGetIndices(colp,&cwant);
1642:   PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);

1644:   /* Invert row permutation to find out where my rows should go */
1645:   PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1646:   PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1647:   PetscSFSetFromOptions(rowsf);
1648:   for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1649:   PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1650:   PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);

1652:   /* Invert column permutation to find out where my columns should go */
1653:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1654:   PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1655:   PetscSFSetFromOptions(sf);
1656:   for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1657:   PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1658:   PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1659:   PetscSFDestroy(&sf);

1661:   ISRestoreIndices(rowp,&rwant);
1662:   ISRestoreIndices(colp,&cwant);
1663:   MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);

1665:   /* Find out where my gcols should go */
1666:   MatGetSize(aB,NULL,&ng);
1667:   PetscMalloc1(ng,&gcdest);
1668:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1669:   PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1670:   PetscSFSetFromOptions(sf);
1671:   PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);
1672:   PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);
1673:   PetscSFDestroy(&sf);

1675:   PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1676:   MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1677:   MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1678:   for (i=0; i<m; i++) {
1679:     PetscInt    row = rdest[i];
1680:     PetscMPIInt rowner;
1681:     PetscLayoutFindOwner(A->rmap,row,&rowner);
1682:     for (j=ai[i]; j<ai[i+1]; j++) {
1683:       PetscInt    col = cdest[aj[j]];
1684:       PetscMPIInt cowner;
1685:       PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1686:       if (rowner == cowner) dnnz[i]++;
1687:       else onnz[i]++;
1688:     }
1689:     for (j=bi[i]; j<bi[i+1]; j++) {
1690:       PetscInt    col = gcdest[bj[j]];
1691:       PetscMPIInt cowner;
1692:       PetscLayoutFindOwner(A->cmap,col,&cowner);
1693:       if (rowner == cowner) dnnz[i]++;
1694:       else onnz[i]++;
1695:     }
1696:   }
1697:   PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);
1698:   PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);
1699:   PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);
1700:   PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);
1701:   PetscSFDestroy(&rowsf);

1703:   MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1704:   MatSeqAIJGetArray(aA,&aa);
1705:   MatSeqAIJGetArray(aB,&ba);
1706:   for (i=0; i<m; i++) {
1707:     PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1708:     PetscInt j0,rowlen;
1709:     rowlen = ai[i+1] - ai[i];
1710:     for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1711:       for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1712:       MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1713:     }
1714:     rowlen = bi[i+1] - bi[i];
1715:     for (j0=j=0; j<rowlen; j0=j) {
1716:       for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1717:       MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1718:     }
1719:   }
1720:   MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1721:   MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1722:   MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1723:   MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1724:   MatSeqAIJRestoreArray(aA,&aa);
1725:   MatSeqAIJRestoreArray(aB,&ba);
1726:   PetscFree4(dnnz,onnz,tdnnz,tonnz);
1727:   PetscFree3(work,rdest,cdest);
1728:   PetscFree(gcdest);
1729:   if (parcolp) {ISDestroy(&colp);}
1730:   *B = Aperm;
1731:   return(0);
1732: }

1734: PetscErrorCode  MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1735: {
1736:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1740:   MatGetSize(aij->B,NULL,nghosts);
1741:   if (ghosts) *ghosts = aij->garray;
1742:   return(0);
1743: }

1745: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1746: {
1747:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1748:   Mat            A    = mat->A,B = mat->B;
1750:   PetscLogDouble isend[5],irecv[5];

1753:   info->block_size = 1.0;
1754:   MatGetInfo(A,MAT_LOCAL,info);

1756:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1757:   isend[3] = info->memory;  isend[4] = info->mallocs;

1759:   MatGetInfo(B,MAT_LOCAL,info);

1761:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1762:   isend[3] += info->memory;  isend[4] += info->mallocs;
1763:   if (flag == MAT_LOCAL) {
1764:     info->nz_used      = isend[0];
1765:     info->nz_allocated = isend[1];
1766:     info->nz_unneeded  = isend[2];
1767:     info->memory       = isend[3];
1768:     info->mallocs      = isend[4];
1769:   } else if (flag == MAT_GLOBAL_MAX) {
1770:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_MAX,PetscObjectComm((PetscObject)matin));

1772:     info->nz_used      = irecv[0];
1773:     info->nz_allocated = irecv[1];
1774:     info->nz_unneeded  = irecv[2];
1775:     info->memory       = irecv[3];
1776:     info->mallocs      = irecv[4];
1777:   } else if (flag == MAT_GLOBAL_SUM) {
1778:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_SUM,PetscObjectComm((PetscObject)matin));

1780:     info->nz_used      = irecv[0];
1781:     info->nz_allocated = irecv[1];
1782:     info->nz_unneeded  = irecv[2];
1783:     info->memory       = irecv[3];
1784:     info->mallocs      = irecv[4];
1785:   }
1786:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1787:   info->fill_ratio_needed = 0;
1788:   info->factor_mallocs    = 0;
1789:   return(0);
1790: }

1792: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1793: {
1794:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1798:   switch (op) {
1799:   case MAT_NEW_NONZERO_LOCATIONS:
1800:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1801:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1802:   case MAT_KEEP_NONZERO_PATTERN:
1803:   case MAT_NEW_NONZERO_LOCATION_ERR:
1804:   case MAT_USE_INODES:
1805:   case MAT_IGNORE_ZERO_ENTRIES:
1806:     MatCheckPreallocated(A,1);
1807:     MatSetOption(a->A,op,flg);
1808:     MatSetOption(a->B,op,flg);
1809:     break;
1810:   case MAT_ROW_ORIENTED:
1811:     MatCheckPreallocated(A,1);
1812:     a->roworiented = flg;

1814:     MatSetOption(a->A,op,flg);
1815:     MatSetOption(a->B,op,flg);
1816:     break;
1817:   case MAT_NEW_DIAGONALS:
1818:   case MAT_SORTED_FULL:
1819:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1820:     break;
1821:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1822:     a->donotstash = flg;
1823:     break;
1824:   /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1825:   case MAT_SPD:
1826:   case MAT_SYMMETRIC:
1827:   case MAT_STRUCTURALLY_SYMMETRIC:
1828:   case MAT_HERMITIAN:
1829:   case MAT_SYMMETRY_ETERNAL:
1830:     break;
1831:   case MAT_SUBMAT_SINGLEIS:
1832:     A->submat_singleis = flg;
1833:     break;
1834:   case MAT_STRUCTURE_ONLY:
1835:     /* The option is handled directly by MatSetOption() */
1836:     break;
1837:   default:
1838:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1839:   }
1840:   return(0);
1841: }

1843: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1844: {
1845:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1846:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1848:   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1849:   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1850:   PetscInt       *cmap,*idx_p;

1853:   if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1854:   mat->getrowactive = PETSC_TRUE;

1856:   if (!mat->rowvalues && (idx || v)) {
1857:     /*
1858:         allocate enough space to hold information from the longest row.
1859:     */
1860:     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1861:     PetscInt   max = 1,tmp;
1862:     for (i=0; i<matin->rmap->n; i++) {
1863:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1864:       if (max < tmp) max = tmp;
1865:     }
1866:     PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1867:   }

1869:   if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1870:   lrow = row - rstart;

1872:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1873:   if (!v)   {pvA = 0; pvB = 0;}
1874:   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1875:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1876:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1877:   nztot = nzA + nzB;

1879:   cmap = mat->garray;
1880:   if (v  || idx) {
1881:     if (nztot) {
1882:       /* Sort by increasing column numbers, assuming A and B already sorted */
1883:       PetscInt imark = -1;
1884:       if (v) {
1885:         *v = v_p = mat->rowvalues;
1886:         for (i=0; i<nzB; i++) {
1887:           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1888:           else break;
1889:         }
1890:         imark = i;
1891:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1892:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1893:       }
1894:       if (idx) {
1895:         *idx = idx_p = mat->rowindices;
1896:         if (imark > -1) {
1897:           for (i=0; i<imark; i++) {
1898:             idx_p[i] = cmap[cworkB[i]];
1899:           }
1900:         } else {
1901:           for (i=0; i<nzB; i++) {
1902:             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1903:             else break;
1904:           }
1905:           imark = i;
1906:         }
1907:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1908:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1909:       }
1910:     } else {
1911:       if (idx) *idx = 0;
1912:       if (v)   *v   = 0;
1913:     }
1914:   }
1915:   *nz  = nztot;
1916:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1917:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1918:   return(0);
1919: }

1921: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1922: {
1923:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1926:   if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1927:   aij->getrowactive = PETSC_FALSE;
1928:   return(0);
1929: }

1931: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1932: {
1933:   Mat_MPIAIJ     *aij  = (Mat_MPIAIJ*)mat->data;
1934:   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1936:   PetscInt       i,j,cstart = mat->cmap->rstart;
1937:   PetscReal      sum = 0.0;
1938:   MatScalar      *v;

1941:   if (aij->size == 1) {
1942:      MatNorm(aij->A,type,norm);
1943:   } else {
1944:     if (type == NORM_FROBENIUS) {
1945:       v = amat->a;
1946:       for (i=0; i<amat->nz; i++) {
1947:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1948:       }
1949:       v = bmat->a;
1950:       for (i=0; i<bmat->nz; i++) {
1951:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1952:       }
1953:       MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1954:       *norm = PetscSqrtReal(*norm);
1955:       PetscLogFlops(2*amat->nz+2*bmat->nz);
1956:     } else if (type == NORM_1) { /* max column norm */
1957:       PetscReal *tmp,*tmp2;
1958:       PetscInt  *jj,*garray = aij->garray;
1959:       PetscCalloc1(mat->cmap->N+1,&tmp);
1960:       PetscMalloc1(mat->cmap->N+1,&tmp2);
1961:       *norm = 0.0;
1962:       v     = amat->a; jj = amat->j;
1963:       for (j=0; j<amat->nz; j++) {
1964:         tmp[cstart + *jj++] += PetscAbsScalar(*v);  v++;
1965:       }
1966:       v = bmat->a; jj = bmat->j;
1967:       for (j=0; j<bmat->nz; j++) {
1968:         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1969:       }
1970:       MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1971:       for (j=0; j<mat->cmap->N; j++) {
1972:         if (tmp2[j] > *norm) *norm = tmp2[j];
1973:       }
1974:       PetscFree(tmp);
1975:       PetscFree(tmp2);
1976:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1977:     } else if (type == NORM_INFINITY) { /* max row norm */
1978:       PetscReal ntemp = 0.0;
1979:       for (j=0; j<aij->A->rmap->n; j++) {
1980:         v   = amat->a + amat->i[j];
1981:         sum = 0.0;
1982:         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1983:           sum += PetscAbsScalar(*v); v++;
1984:         }
1985:         v = bmat->a + bmat->i[j];
1986:         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1987:           sum += PetscAbsScalar(*v); v++;
1988:         }
1989:         if (sum > ntemp) ntemp = sum;
1990:       }
1991:       MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
1992:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1993:     } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1994:   }
1995:   return(0);
1996: }

1998: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1999: {
2000:   Mat_MPIAIJ      *a    =(Mat_MPIAIJ*)A->data,*b;
2001:   Mat_SeqAIJ      *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
2002:   PetscInt        M     = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,row,*cols,*cols_tmp,*B_diag_ilen,i,ncol,A_diag_ncol;
2003:   const PetscInt  *ai,*aj,*bi,*bj,*B_diag_i;
2004:   PetscErrorCode  ierr;
2005:   Mat             B,A_diag,*B_diag;
2006:   const MatScalar *array;

2009:   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
2010:   ai = Aloc->i; aj = Aloc->j;
2011:   bi = Bloc->i; bj = Bloc->j;
2012:   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
2013:     PetscInt             *d_nnz,*g_nnz,*o_nnz;
2014:     PetscSFNode          *oloc;
2015:     PETSC_UNUSED PetscSF sf;

2017:     PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
2018:     /* compute d_nnz for preallocation */
2019:     PetscArrayzero(d_nnz,na);
2020:     for (i=0; i<ai[ma]; i++) {
2021:       d_nnz[aj[i]]++;
2022:     }
2023:     /* compute local off-diagonal contributions */
2024:     PetscArrayzero(g_nnz,nb);
2025:     for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
2026:     /* map those to global */
2027:     PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
2028:     PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
2029:     PetscSFSetFromOptions(sf);
2030:     PetscArrayzero(o_nnz,na);
2031:     PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2032:     PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2033:     PetscSFDestroy(&sf);

2035:     MatCreate(PetscObjectComm((PetscObject)A),&B);
2036:     MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
2037:     MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
2038:     MatSetType(B,((PetscObject)A)->type_name);
2039:     MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
2040:     PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
2041:   } else {
2042:     B    = *matout;
2043:     MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
2044:   }

2046:   b           = (Mat_MPIAIJ*)B->data;
2047:   A_diag      = a->A;
2048:   B_diag      = &b->A;
2049:   sub_B_diag  = (Mat_SeqAIJ*)(*B_diag)->data;
2050:   A_diag_ncol = A_diag->cmap->N;
2051:   B_diag_ilen = sub_B_diag->ilen;
2052:   B_diag_i    = sub_B_diag->i;

2054:   /* Set ilen for diagonal of B */
2055:   for (i=0; i<A_diag_ncol; i++) {
2056:     B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
2057:   }

2059:   /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
2060:   very quickly (=without using MatSetValues), because all writes are local. */
2061:   MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);

2063:   /* copy over the B part */
2064:   PetscMalloc1(bi[mb],&cols);
2065:   array = Bloc->a;
2066:   row   = A->rmap->rstart;
2067:   for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
2068:   cols_tmp = cols;
2069:   for (i=0; i<mb; i++) {
2070:     ncol = bi[i+1]-bi[i];
2071:     MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
2072:     row++;
2073:     array += ncol; cols_tmp += ncol;
2074:   }
2075:   PetscFree(cols);

2077:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2078:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2079:   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
2080:     *matout = B;
2081:   } else {
2082:     MatHeaderMerge(A,&B);
2083:   }
2084:   return(0);
2085: }

2087: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
2088: {
2089:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2090:   Mat            a    = aij->A,b = aij->B;
2092:   PetscInt       s1,s2,s3;

2095:   MatGetLocalSize(mat,&s2,&s3);
2096:   if (rr) {
2097:     VecGetLocalSize(rr,&s1);
2098:     if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
2099:     /* Overlap communication with computation. */
2100:     VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2101:   }
2102:   if (ll) {
2103:     VecGetLocalSize(ll,&s1);
2104:     if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
2105:     (*b->ops->diagonalscale)(b,ll,0);
2106:   }
2107:   /* scale  the diagonal block */
2108:   (*a->ops->diagonalscale)(a,ll,rr);

2110:   if (rr) {
2111:     /* Do a scatter end and then right scale the off-diagonal block */
2112:     VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2113:     (*b->ops->diagonalscale)(b,0,aij->lvec);
2114:   }
2115:   return(0);
2116: }

2118: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2119: {
2120:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2124:   MatSetUnfactored(a->A);
2125:   return(0);
2126: }

2128: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool  *flag)
2129: {
2130:   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2131:   Mat            a,b,c,d;
2132:   PetscBool      flg;

2136:   a = matA->A; b = matA->B;
2137:   c = matB->A; d = matB->B;

2139:   MatEqual(a,c,&flg);
2140:   if (flg) {
2141:     MatEqual(b,d,&flg);
2142:   }
2143:   MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2144:   return(0);
2145: }

2147: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2148: {
2150:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2151:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

2154:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2155:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2156:     /* because of the column compression in the off-processor part of the matrix a->B,
2157:        the number of columns in a->B and b->B may be different, hence we cannot call
2158:        the MatCopy() directly on the two parts. If need be, we can provide a more
2159:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2160:        then copying the submatrices */
2161:     MatCopy_Basic(A,B,str);
2162:   } else {
2163:     MatCopy(a->A,b->A,str);
2164:     MatCopy(a->B,b->B,str);
2165:   }
2166:   PetscObjectStateIncrease((PetscObject)B);
2167:   return(0);
2168: }

2170: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2171: {

2175:   MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2176:   return(0);
2177: }

2179: /*
2180:    Computes the number of nonzeros per row needed for preallocation when X and Y
2181:    have different nonzero structure.
2182: */
2183: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2184: {
2185:   PetscInt       i,j,k,nzx,nzy;

2188:   /* Set the number of nonzeros in the new matrix */
2189:   for (i=0; i<m; i++) {
2190:     const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2191:     nzx = xi[i+1] - xi[i];
2192:     nzy = yi[i+1] - yi[i];
2193:     nnz[i] = 0;
2194:     for (j=0,k=0; j<nzx; j++) {                   /* Point in X */
2195:       for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2196:       if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++;             /* Skip duplicate */
2197:       nnz[i]++;
2198:     }
2199:     for (; k<nzy; k++) nnz[i]++;
2200:   }
2201:   return(0);
2202: }

2204: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2205: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2206: {
2208:   PetscInt       m = Y->rmap->N;
2209:   Mat_SeqAIJ     *x = (Mat_SeqAIJ*)X->data;
2210:   Mat_SeqAIJ     *y = (Mat_SeqAIJ*)Y->data;

2213:   MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2214:   return(0);
2215: }

2217: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2218: {
2220:   Mat_MPIAIJ     *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2221:   PetscBLASInt   bnz,one=1;
2222:   Mat_SeqAIJ     *x,*y;

2225:   if (str == SAME_NONZERO_PATTERN) {
2226:     PetscScalar alpha = a;
2227:     x    = (Mat_SeqAIJ*)xx->A->data;
2228:     PetscBLASIntCast(x->nz,&bnz);
2229:     y    = (Mat_SeqAIJ*)yy->A->data;
2230:     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2231:     x    = (Mat_SeqAIJ*)xx->B->data;
2232:     y    = (Mat_SeqAIJ*)yy->B->data;
2233:     PetscBLASIntCast(x->nz,&bnz);
2234:     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2235:     PetscObjectStateIncrease((PetscObject)Y);
2236:     /* the MatAXPY_Basic* subroutines calls MatAssembly, so the matrix on the GPU
2237:        will be updated */
2238: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
2239:     if (Y->offloadmask != PETSC_OFFLOAD_UNALLOCATED) {
2240:       Y->offloadmask = PETSC_OFFLOAD_CPU;
2241:     }
2242: #endif
2243:   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2244:     MatAXPY_Basic(Y,a,X,str);
2245:   } else {
2246:     Mat      B;
2247:     PetscInt *nnz_d,*nnz_o;
2248:     PetscMalloc1(yy->A->rmap->N,&nnz_d);
2249:     PetscMalloc1(yy->B->rmap->N,&nnz_o);
2250:     MatCreate(PetscObjectComm((PetscObject)Y),&B);
2251:     PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2252:     MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2253:     MatSetBlockSizesFromMats(B,Y,Y);
2254:     MatSetType(B,MATMPIAIJ);
2255:     MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2256:     MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2257:     MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2258:     MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2259:     MatHeaderReplace(Y,&B);
2260:     PetscFree(nnz_d);
2261:     PetscFree(nnz_o);
2262:   }
2263:   return(0);
2264: }

2266: extern PetscErrorCode  MatConjugate_SeqAIJ(Mat);

2268: PetscErrorCode  MatConjugate_MPIAIJ(Mat mat)
2269: {
2270: #if defined(PETSC_USE_COMPLEX)
2272:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2275:   MatConjugate_SeqAIJ(aij->A);
2276:   MatConjugate_SeqAIJ(aij->B);
2277: #else
2279: #endif
2280:   return(0);
2281: }

2283: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2284: {
2285:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2289:   MatRealPart(a->A);
2290:   MatRealPart(a->B);
2291:   return(0);
2292: }

2294: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2295: {
2296:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2300:   MatImaginaryPart(a->A);
2301:   MatImaginaryPart(a->B);
2302:   return(0);
2303: }

2305: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2306: {
2307:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2309:   PetscInt       i,*idxb = 0;
2310:   PetscScalar    *va,*vb;
2311:   Vec            vtmp;

2314:   MatGetRowMaxAbs(a->A,v,idx);
2315:   VecGetArray(v,&va);
2316:   if (idx) {
2317:     for (i=0; i<A->rmap->n; i++) {
2318:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2319:     }
2320:   }

2322:   VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2323:   if (idx) {
2324:     PetscMalloc1(A->rmap->n,&idxb);
2325:   }
2326:   MatGetRowMaxAbs(a->B,vtmp,idxb);
2327:   VecGetArray(vtmp,&vb);

2329:   for (i=0; i<A->rmap->n; i++) {
2330:     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2331:       va[i] = vb[i];
2332:       if (idx) idx[i] = a->garray[idxb[i]];
2333:     }
2334:   }

2336:   VecRestoreArray(v,&va);
2337:   VecRestoreArray(vtmp,&vb);
2338:   PetscFree(idxb);
2339:   VecDestroy(&vtmp);
2340:   return(0);
2341: }

2343: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2344: {
2345:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2347:   PetscInt       i,*idxb = 0;
2348:   PetscScalar    *va,*vb;
2349:   Vec            vtmp;

2352:   MatGetRowMinAbs(a->A,v,idx);
2353:   VecGetArray(v,&va);
2354:   if (idx) {
2355:     for (i=0; i<A->cmap->n; i++) {
2356:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2357:     }
2358:   }

2360:   VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2361:   if (idx) {
2362:     PetscMalloc1(A->rmap->n,&idxb);
2363:   }
2364:   MatGetRowMinAbs(a->B,vtmp,idxb);
2365:   VecGetArray(vtmp,&vb);

2367:   for (i=0; i<A->rmap->n; i++) {
2368:     if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2369:       va[i] = vb[i];
2370:       if (idx) idx[i] = a->garray[idxb[i]];
2371:     }
2372:   }

2374:   VecRestoreArray(v,&va);
2375:   VecRestoreArray(vtmp,&vb);
2376:   PetscFree(idxb);
2377:   VecDestroy(&vtmp);
2378:   return(0);
2379: }

2381: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2382: {
2383:   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2384:   PetscInt       n      = A->rmap->n;
2385:   PetscInt       cstart = A->cmap->rstart;
2386:   PetscInt       *cmap  = mat->garray;
2387:   PetscInt       *diagIdx, *offdiagIdx;
2388:   Vec            diagV, offdiagV;
2389:   PetscScalar    *a, *diagA, *offdiagA;
2390:   PetscInt       r;

2394:   PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2395:   VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);
2396:   VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);
2397:   MatGetRowMin(mat->A, diagV,    diagIdx);
2398:   MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2399:   VecGetArray(v,        &a);
2400:   VecGetArray(diagV,    &diagA);
2401:   VecGetArray(offdiagV, &offdiagA);
2402:   for (r = 0; r < n; ++r) {
2403:     if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2404:       a[r]   = diagA[r];
2405:       idx[r] = cstart + diagIdx[r];
2406:     } else {
2407:       a[r]   = offdiagA[r];
2408:       idx[r] = cmap[offdiagIdx[r]];
2409:     }
2410:   }
2411:   VecRestoreArray(v,        &a);
2412:   VecRestoreArray(diagV,    &diagA);
2413:   VecRestoreArray(offdiagV, &offdiagA);
2414:   VecDestroy(&diagV);
2415:   VecDestroy(&offdiagV);
2416:   PetscFree2(diagIdx, offdiagIdx);
2417:   return(0);
2418: }

2420: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2421: {
2422:   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2423:   PetscInt       n      = A->rmap->n;
2424:   PetscInt       cstart = A->cmap->rstart;
2425:   PetscInt       *cmap  = mat->garray;
2426:   PetscInt       *diagIdx, *offdiagIdx;
2427:   Vec            diagV, offdiagV;
2428:   PetscScalar    *a, *diagA, *offdiagA;
2429:   PetscInt       r;

2433:   PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2434:   VecCreateSeq(PETSC_COMM_SELF, n, &diagV);
2435:   VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);
2436:   MatGetRowMax(mat->A, diagV,    diagIdx);
2437:   MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2438:   VecGetArray(v,        &a);
2439:   VecGetArray(diagV,    &diagA);
2440:   VecGetArray(offdiagV, &offdiagA);
2441:   for (r = 0; r < n; ++r) {
2442:     if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2443:       a[r]   = diagA[r];
2444:       idx[r] = cstart + diagIdx[r];
2445:     } else {
2446:       a[r]   = offdiagA[r];
2447:       idx[r] = cmap[offdiagIdx[r]];
2448:     }
2449:   }
2450:   VecRestoreArray(v,        &a);
2451:   VecRestoreArray(diagV,    &diagA);
2452:   VecRestoreArray(offdiagV, &offdiagA);
2453:   VecDestroy(&diagV);
2454:   VecDestroy(&offdiagV);
2455:   PetscFree2(diagIdx, offdiagIdx);
2456:   return(0);
2457: }

2459: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2460: {
2462:   Mat            *dummy;

2465:   MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2466:   *newmat = *dummy;
2467:   PetscFree(dummy);
2468:   return(0);
2469: }

2471: PetscErrorCode  MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2472: {
2473:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*) A->data;

2477:   MatInvertBlockDiagonal(a->A,values);
2478:   A->factorerrortype = a->A->factorerrortype;
2479:   return(0);
2480: }

2482: static PetscErrorCode  MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2483: {
2485:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)x->data;

2488:   if (!x->assembled && !x->preallocated) SETERRQ(PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed");
2489:   MatSetRandom(aij->A,rctx);
2490:   if (x->assembled) {
2491:     MatSetRandom(aij->B,rctx);
2492:   } else {
2493:     MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B,x->cmap->rstart,x->cmap->rend,rctx);
2494:   }
2495:   MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2496:   MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2497:   return(0);
2498: }

2500: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2501: {
2503:   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2504:   else A->ops->increaseoverlap    = MatIncreaseOverlap_MPIAIJ;
2505:   return(0);
2506: }

2508: /*@
2509:    MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap

2511:    Collective on Mat

2513:    Input Parameters:
2514: +    A - the matrix
2515: -    sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)

2517:  Level: advanced

2519: @*/
2520: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2521: {
2522:   PetscErrorCode       ierr;

2525:   PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2526:   return(0);
2527: }

2529: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2530: {
2531:   PetscErrorCode       ierr;
2532:   PetscBool            sc = PETSC_FALSE,flg;

2535:   PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2536:   if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2537:   PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2538:   if (flg) {
2539:     MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2540:   }
2541:   PetscOptionsTail();
2542:   return(0);
2543: }

2545: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2546: {
2548:   Mat_MPIAIJ     *maij = (Mat_MPIAIJ*)Y->data;
2549:   Mat_SeqAIJ     *aij = (Mat_SeqAIJ*)maij->A->data;

2552:   if (!Y->preallocated) {
2553:     MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2554:   } else if (!aij->nz) {
2555:     PetscInt nonew = aij->nonew;
2556:     MatSeqAIJSetPreallocation(maij->A,1,NULL);
2557:     aij->nonew = nonew;
2558:   }
2559:   MatShift_Basic(Y,a);
2560:   return(0);
2561: }

2563: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool  *missing,PetscInt *d)
2564: {
2565:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2569:   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2570:   MatMissingDiagonal(a->A,missing,d);
2571:   if (d) {
2572:     PetscInt rstart;
2573:     MatGetOwnershipRange(A,&rstart,NULL);
2574:     *d += rstart;

2576:   }
2577:   return(0);
2578: }

2580: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2581: {
2582:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2586:   MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2587:   return(0);
2588: }

2590: /* -------------------------------------------------------------------*/
2591: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2592:                                        MatGetRow_MPIAIJ,
2593:                                        MatRestoreRow_MPIAIJ,
2594:                                        MatMult_MPIAIJ,
2595:                                 /* 4*/ MatMultAdd_MPIAIJ,
2596:                                        MatMultTranspose_MPIAIJ,
2597:                                        MatMultTransposeAdd_MPIAIJ,
2598:                                        0,
2599:                                        0,
2600:                                        0,
2601:                                 /*10*/ 0,
2602:                                        0,
2603:                                        0,
2604:                                        MatSOR_MPIAIJ,
2605:                                        MatTranspose_MPIAIJ,
2606:                                 /*15*/ MatGetInfo_MPIAIJ,
2607:                                        MatEqual_MPIAIJ,
2608:                                        MatGetDiagonal_MPIAIJ,
2609:                                        MatDiagonalScale_MPIAIJ,
2610:                                        MatNorm_MPIAIJ,
2611:                                 /*20*/ MatAssemblyBegin_MPIAIJ,
2612:                                        MatAssemblyEnd_MPIAIJ,
2613:                                        MatSetOption_MPIAIJ,
2614:                                        MatZeroEntries_MPIAIJ,
2615:                                 /*24*/ MatZeroRows_MPIAIJ,
2616:                                        0,
2617:                                        0,
2618:                                        0,
2619:                                        0,
2620:                                 /*29*/ MatSetUp_MPIAIJ,
2621:                                        0,
2622:                                        0,
2623:                                        MatGetDiagonalBlock_MPIAIJ,
2624:                                        0,
2625:                                 /*34*/ MatDuplicate_MPIAIJ,
2626:                                        0,
2627:                                        0,
2628:                                        0,
2629:                                        0,
2630:                                 /*39*/ MatAXPY_MPIAIJ,
2631:                                        MatCreateSubMatrices_MPIAIJ,
2632:                                        MatIncreaseOverlap_MPIAIJ,
2633:                                        MatGetValues_MPIAIJ,
2634:                                        MatCopy_MPIAIJ,
2635:                                 /*44*/ MatGetRowMax_MPIAIJ,
2636:                                        MatScale_MPIAIJ,
2637:                                        MatShift_MPIAIJ,
2638:                                        MatDiagonalSet_MPIAIJ,
2639:                                        MatZeroRowsColumns_MPIAIJ,
2640:                                 /*49*/ MatSetRandom_MPIAIJ,
2641:                                        0,
2642:                                        0,
2643:                                        0,
2644:                                        0,
2645:                                 /*54*/ MatFDColoringCreate_MPIXAIJ,
2646:                                        0,
2647:                                        MatSetUnfactored_MPIAIJ,
2648:                                        MatPermute_MPIAIJ,
2649:                                        0,
2650:                                 /*59*/ MatCreateSubMatrix_MPIAIJ,
2651:                                        MatDestroy_MPIAIJ,
2652:                                        MatView_MPIAIJ,
2653:                                        0,
2654:                                        0,
2655:                                 /*64*/ 0,
2656:                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2657:                                        0,
2658:                                        0,
2659:                                        0,
2660:                                 /*69*/ MatGetRowMaxAbs_MPIAIJ,
2661:                                        MatGetRowMinAbs_MPIAIJ,
2662:                                        0,
2663:                                        0,
2664:                                        0,
2665:                                        0,
2666:                                 /*75*/ MatFDColoringApply_AIJ,
2667:                                        MatSetFromOptions_MPIAIJ,
2668:                                        0,
2669:                                        0,
2670:                                        MatFindZeroDiagonals_MPIAIJ,
2671:                                 /*80*/ 0,
2672:                                        0,
2673:                                        0,
2674:                                 /*83*/ MatLoad_MPIAIJ,
2675:                                        MatIsSymmetric_MPIAIJ,
2676:                                        0,
2677:                                        0,
2678:                                        0,
2679:                                        0,
2680:                                 /*89*/ 0,
2681:                                        0,
2682:                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2683:                                        0,
2684:                                        0,
2685:                                 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2686:                                        0,
2687:                                        0,
2688:                                        0,
2689:                                        MatBindToCPU_MPIAIJ,
2690:                                 /*99*/ MatProductSetFromOptions_MPIAIJ,
2691:                                        0,
2692:                                        0,
2693:                                        MatConjugate_MPIAIJ,
2694:                                        0,
2695:                                 /*104*/MatSetValuesRow_MPIAIJ,
2696:                                        MatRealPart_MPIAIJ,
2697:                                        MatImaginaryPart_MPIAIJ,
2698:                                        0,
2699:                                        0,
2700:                                 /*109*/0,
2701:                                        0,
2702:                                        MatGetRowMin_MPIAIJ,
2703:                                        0,
2704:                                        MatMissingDiagonal_MPIAIJ,
2705:                                 /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2706:                                        0,
2707:                                        MatGetGhosts_MPIAIJ,
2708:                                        0,
2709:                                        0,
2710:                                 /*119*/0,
2711:                                        0,
2712:                                        0,
2713:                                        0,
2714:                                        MatGetMultiProcBlock_MPIAIJ,
2715:                                 /*124*/MatFindNonzeroRows_MPIAIJ,
2716:                                        MatGetColumnNorms_MPIAIJ,
2717:                                        MatInvertBlockDiagonal_MPIAIJ,
2718:                                        MatInvertVariableBlockDiagonal_MPIAIJ,
2719:                                        MatCreateSubMatricesMPI_MPIAIJ,
2720:                                 /*129*/0,
2721:                                        0,
2722:                                        0,
2723:                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2724:                                        0,
2725:                                 /*134*/0,
2726:                                        0,
2727:                                        0,
2728:                                        0,
2729:                                        0,
2730:                                 /*139*/MatSetBlockSizes_MPIAIJ,
2731:                                        0,
2732:                                        0,
2733:                                        MatFDColoringSetUp_MPIXAIJ,
2734:                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2735:                                        MatCreateMPIMatConcatenateSeqMat_MPIAIJ,
2736:                                 /*145*/0,
2737:                                        0,
2738:                                        0
2739: };

2741: /* ----------------------------------------------------------------------------------------*/

2743: PetscErrorCode  MatStoreValues_MPIAIJ(Mat mat)
2744: {
2745:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2749:   MatStoreValues(aij->A);
2750:   MatStoreValues(aij->B);
2751:   return(0);
2752: }

2754: PetscErrorCode  MatRetrieveValues_MPIAIJ(Mat mat)
2755: {
2756:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2760:   MatRetrieveValues(aij->A);
2761:   MatRetrieveValues(aij->B);
2762:   return(0);
2763: }

2765: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2766: {
2767:   Mat_MPIAIJ     *b;
2769:   PetscMPIInt    size;

2772:   PetscLayoutSetUp(B->rmap);
2773:   PetscLayoutSetUp(B->cmap);
2774:   b = (Mat_MPIAIJ*)B->data;

2776: #if defined(PETSC_USE_CTABLE)
2777:   PetscTableDestroy(&b->colmap);
2778: #else
2779:   PetscFree(b->colmap);
2780: #endif
2781:   PetscFree(b->garray);
2782:   VecDestroy(&b->lvec);
2783:   VecScatterDestroy(&b->Mvctx);

2785:   /* Because the B will have been resized we simply destroy it and create a new one each time */
2786:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
2787:   MatDestroy(&b->B);
2788:   MatCreate(PETSC_COMM_SELF,&b->B);
2789:   MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);
2790:   MatSetBlockSizesFromMats(b->B,B,B);
2791:   MatSetType(b->B,MATSEQAIJ);
2792:   PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);

2794:   if (!B->preallocated) {
2795:     MatCreate(PETSC_COMM_SELF,&b->A);
2796:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2797:     MatSetBlockSizesFromMats(b->A,B,B);
2798:     MatSetType(b->A,MATSEQAIJ);
2799:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2800:   }

2802:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2803:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2804:   B->preallocated  = PETSC_TRUE;
2805:   B->was_assembled = PETSC_FALSE;
2806:   B->assembled     = PETSC_FALSE;
2807:   return(0);
2808: }

2810: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2811: {
2812:   Mat_MPIAIJ     *b;

2817:   PetscLayoutSetUp(B->rmap);
2818:   PetscLayoutSetUp(B->cmap);
2819:   b = (Mat_MPIAIJ*)B->data;

2821: #if defined(PETSC_USE_CTABLE)
2822:   PetscTableDestroy(&b->colmap);
2823: #else
2824:   PetscFree(b->colmap);
2825: #endif
2826:   PetscFree(b->garray);
2827:   VecDestroy(&b->lvec);
2828:   VecScatterDestroy(&b->Mvctx);

2830:   MatResetPreallocation(b->A);
2831:   MatResetPreallocation(b->B);
2832:   B->preallocated  = PETSC_TRUE;
2833:   B->was_assembled = PETSC_FALSE;
2834:   B->assembled = PETSC_FALSE;
2835:   return(0);
2836: }

2838: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2839: {
2840:   Mat            mat;
2841:   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;

2845:   *newmat = 0;
2846:   MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2847:   MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2848:   MatSetBlockSizesFromMats(mat,matin,matin);
2849:   MatSetType(mat,((PetscObject)matin)->type_name);
2850:   a       = (Mat_MPIAIJ*)mat->data;

2852:   mat->factortype   = matin->factortype;
2853:   mat->assembled    = matin->assembled;
2854:   mat->insertmode   = NOT_SET_VALUES;
2855:   mat->preallocated = matin->preallocated;

2857:   a->size         = oldmat->size;
2858:   a->rank         = oldmat->rank;
2859:   a->donotstash   = oldmat->donotstash;
2860:   a->roworiented  = oldmat->roworiented;
2861:   a->rowindices   = NULL;
2862:   a->rowvalues    = NULL;
2863:   a->getrowactive = PETSC_FALSE;

2865:   PetscLayoutReference(matin->rmap,&mat->rmap);
2866:   PetscLayoutReference(matin->cmap,&mat->cmap);

2868:   if (oldmat->colmap) {
2869: #if defined(PETSC_USE_CTABLE)
2870:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2871: #else
2872:     PetscMalloc1(mat->cmap->N,&a->colmap);
2873:     PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2874:     PetscArraycpy(a->colmap,oldmat->colmap,mat->cmap->N);
2875: #endif
2876:   } else a->colmap = NULL;
2877:   if (oldmat->garray) {
2878:     PetscInt len;
2879:     len  = oldmat->B->cmap->n;
2880:     PetscMalloc1(len+1,&a->garray);
2881:     PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2882:     if (len) { PetscArraycpy(a->garray,oldmat->garray,len); }
2883:   } else a->garray = NULL;

2885:   /* It may happen MatDuplicate is called with a non-assembled matrix
2886:      In fact, MatDuplicate only requires the matrix to be preallocated
2887:      This may happen inside a DMCreateMatrix_Shell */
2888:   if (oldmat->lvec) {
2889:     VecDuplicate(oldmat->lvec,&a->lvec);
2890:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2891:   }
2892:   if (oldmat->Mvctx) {
2893:     VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2894:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2895:   }
2896:   if (oldmat->Mvctx_mpi1) {
2897:     VecScatterCopy(oldmat->Mvctx_mpi1,&a->Mvctx_mpi1);
2898:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx_mpi1);
2899:   }

2901:   MatDuplicate(oldmat->A,cpvalues,&a->A);
2902:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2903:   MatDuplicate(oldmat->B,cpvalues,&a->B);
2904:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2905:   PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2906:   *newmat = mat;
2907:   return(0);
2908: }

2910: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2911: {
2912:   PetscBool      isbinary, ishdf5;

2918:   /* force binary viewer to load .info file if it has not yet done so */
2919:   PetscViewerSetUp(viewer);
2920:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2921:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5,  &ishdf5);
2922:   if (isbinary) {
2923:     MatLoad_MPIAIJ_Binary(newMat,viewer);
2924:   } else if (ishdf5) {
2925: #if defined(PETSC_HAVE_HDF5)
2926:     MatLoad_AIJ_HDF5(newMat,viewer);
2927: #else
2928:     SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
2929: #endif
2930:   } else {
2931:     SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
2932:   }
2933:   return(0);
2934: }

2936: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
2937: {
2938:   PetscInt       header[4],M,N,m,nz,rows,cols,sum,i;
2939:   PetscInt       *rowidxs,*colidxs;
2940:   PetscScalar    *matvals;

2944:   PetscViewerSetUp(viewer);

2946:   /* read in matrix header */
2947:   PetscViewerBinaryRead(viewer,header,4,NULL,PETSC_INT);
2948:   if (header[0] != MAT_FILE_CLASSID) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Not a matrix object in file");
2949:   M  = header[1]; N = header[2]; nz = header[3];
2950:   if (M < 0) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Matrix row size (%D) in file is negative",M);
2951:   if (N < 0) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Matrix column size (%D) in file is negative",N);
2952:   if (nz < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk, cannot load as MPIAIJ");

2954:   /* set block sizes from the viewer's .info file */
2955:   MatLoad_Binary_BlockSizes(mat,viewer);
2956:   /* set global sizes if not set already */
2957:   if (mat->rmap->N < 0) mat->rmap->N = M;
2958:   if (mat->cmap->N < 0) mat->cmap->N = N;
2959:   PetscLayoutSetUp(mat->rmap);
2960:   PetscLayoutSetUp(mat->cmap);

2962:   /* check if the matrix sizes are correct */
2963:   MatGetSize(mat,&rows,&cols);
2964:   if (M != rows || N != cols) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Matrix in file of different sizes (%D, %D) than the input matrix (%D, %D)",M,N,rows,cols);

2966:   /* read in row lengths and build row indices */
2967:   MatGetLocalSize(mat,&m,NULL);
2968:   PetscMalloc1(m+1,&rowidxs);
2969:   PetscViewerBinaryReadAll(viewer,rowidxs+1,m,PETSC_DECIDE,M,PETSC_INT);
2970:   rowidxs[0] = 0; for (i=0; i<m; i++) rowidxs[i+1] += rowidxs[i];
2971:   MPIU_Allreduce(&rowidxs[m],&sum,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)viewer));
2972:   if (sum != nz) SETERRQ2(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Inconsistent matrix data in file: nonzeros = %D, sum-row-lengths = %D\n",nz,sum);
2973:   /* read in column indices and matrix values */
2974:   PetscMalloc2(rowidxs[m],&colidxs,rowidxs[m],&matvals);
2975:   PetscViewerBinaryReadAll(viewer,colidxs,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
2976:   PetscViewerBinaryReadAll(viewer,matvals,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
2977:   /* store matrix indices and values */
2978:   MatMPIAIJSetPreallocationCSR(mat,rowidxs,colidxs,matvals);
2979:   PetscFree(rowidxs);
2980:   PetscFree2(colidxs,matvals);
2981:   return(0);
2982: }

2984: /* Not scalable because of ISAllGather() unless getting all columns. */
2985: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
2986: {
2988:   IS             iscol_local;
2989:   PetscBool      isstride;
2990:   PetscMPIInt    lisstride=0,gisstride;

2993:   /* check if we are grabbing all columns*/
2994:   PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);

2996:   if (isstride) {
2997:     PetscInt  start,len,mstart,mlen;
2998:     ISStrideGetInfo(iscol,&start,NULL);
2999:     ISGetLocalSize(iscol,&len);
3000:     MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
3001:     if (mstart == start && mlen-mstart == len) lisstride = 1;
3002:   }

3004:   MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
3005:   if (gisstride) {
3006:     PetscInt N;
3007:     MatGetSize(mat,NULL,&N);
3008:     ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol_local);
3009:     ISSetIdentity(iscol_local);
3010:     PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
3011:   } else {
3012:     PetscInt cbs;
3013:     ISGetBlockSize(iscol,&cbs);
3014:     ISAllGather(iscol,&iscol_local);
3015:     ISSetBlockSize(iscol_local,cbs);
3016:   }

3018:   *isseq = iscol_local;
3019:   return(0);
3020: }

3022: /*
3023:  Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3024:  (see MatCreateSubMatrix_MPIAIJ_nonscalable)

3026:  Input Parameters:
3027:    mat - matrix
3028:    isrow - parallel row index set; its local indices are a subset of local columns of mat,
3029:            i.e., mat->rstart <= isrow[i] < mat->rend
3030:    iscol - parallel column index set; its local indices are a subset of local columns of mat,
3031:            i.e., mat->cstart <= iscol[i] < mat->cend
3032:  Output Parameter:
3033:    isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
3034:    iscol_o - sequential column index set for retrieving mat->B
3035:    garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
3036:  */
3037: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
3038: {
3040:   Vec            x,cmap;
3041:   const PetscInt *is_idx;
3042:   PetscScalar    *xarray,*cmaparray;
3043:   PetscInt       ncols,isstart,*idx,m,rstart,*cmap1,count;
3044:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3045:   Mat            B=a->B;
3046:   Vec            lvec=a->lvec,lcmap;
3047:   PetscInt       i,cstart,cend,Bn=B->cmap->N;
3048:   MPI_Comm       comm;
3049:   VecScatter     Mvctx=a->Mvctx;

3052:   PetscObjectGetComm((PetscObject)mat,&comm);
3053:   ISGetLocalSize(iscol,&ncols);

3055:   /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3056:   MatCreateVecs(mat,&x,NULL);
3057:   VecSet(x,-1.0);
3058:   VecDuplicate(x,&cmap);
3059:   VecSet(cmap,-1.0);

3061:   /* Get start indices */
3062:   MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3063:   isstart -= ncols;
3064:   MatGetOwnershipRangeColumn(mat,&cstart,&cend);

3066:   ISGetIndices(iscol,&is_idx);
3067:   VecGetArray(x,&xarray);
3068:   VecGetArray(cmap,&cmaparray);
3069:   PetscMalloc1(ncols,&idx);
3070:   for (i=0; i<ncols; i++) {
3071:     xarray[is_idx[i]-cstart]    = (PetscScalar)is_idx[i];
3072:     cmaparray[is_idx[i]-cstart] = i + isstart;      /* global index of iscol[i] */
3073:     idx[i]                      = is_idx[i]-cstart; /* local index of iscol[i]  */
3074:   }
3075:   VecRestoreArray(x,&xarray);
3076:   VecRestoreArray(cmap,&cmaparray);
3077:   ISRestoreIndices(iscol,&is_idx);

3079:   /* Get iscol_d */
3080:   ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3081:   ISGetBlockSize(iscol,&i);
3082:   ISSetBlockSize(*iscol_d,i);

3084:   /* Get isrow_d */
3085:   ISGetLocalSize(isrow,&m);
3086:   rstart = mat->rmap->rstart;
3087:   PetscMalloc1(m,&idx);
3088:   ISGetIndices(isrow,&is_idx);
3089:   for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3090:   ISRestoreIndices(isrow,&is_idx);

3092:   ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3093:   ISGetBlockSize(isrow,&i);
3094:   ISSetBlockSize(*isrow_d,i);

3096:   /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3097:   VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3098:   VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);

3100:   VecDuplicate(lvec,&lcmap);

3102:   VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3103:   VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);

3105:   /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3106:   /* off-process column indices */
3107:   count = 0;
3108:   PetscMalloc1(Bn,&idx);
3109:   PetscMalloc1(Bn,&cmap1);

3111:   VecGetArray(lvec,&xarray);
3112:   VecGetArray(lcmap,&cmaparray);
3113:   for (i=0; i<Bn; i++) {
3114:     if (PetscRealPart(xarray[i]) > -1.0) {
3115:       idx[count]     = i;                   /* local column index in off-diagonal part B */
3116:       cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]);  /* column index in submat */
3117:       count++;
3118:     }
3119:   }
3120:   VecRestoreArray(lvec,&xarray);
3121:   VecRestoreArray(lcmap,&cmaparray);

3123:   ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3124:   /* cannot ensure iscol_o has same blocksize as iscol! */

3126:   PetscFree(idx);
3127:   *garray = cmap1;

3129:   VecDestroy(&x);
3130:   VecDestroy(&cmap);
3131:   VecDestroy(&lcmap);
3132:   return(0);
3133: }

3135: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3136: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3137: {
3139:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)mat->data,*asub;
3140:   Mat            M = NULL;
3141:   MPI_Comm       comm;
3142:   IS             iscol_d,isrow_d,iscol_o;
3143:   Mat            Asub = NULL,Bsub = NULL;
3144:   PetscInt       n;

3147:   PetscObjectGetComm((PetscObject)mat,&comm);

3149:   if (call == MAT_REUSE_MATRIX) {
3150:     /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3151:     PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3152:     if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");

3154:     PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3155:     if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");

3157:     PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3158:     if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");

3160:     /* Update diagonal and off-diagonal portions of submat */
3161:     asub = (Mat_MPIAIJ*)(*submat)->data;
3162:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3163:     ISGetLocalSize(iscol_o,&n);
3164:     if (n) {
3165:       MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3166:     }
3167:     MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3168:     MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);

3170:   } else { /* call == MAT_INITIAL_MATRIX) */
3171:     const PetscInt *garray;
3172:     PetscInt        BsubN;

3174:     /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3175:     ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);

3177:     /* Create local submatrices Asub and Bsub */
3178:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3179:     MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);

3181:     /* Create submatrix M */
3182:     MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);

3184:     /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3185:     asub = (Mat_MPIAIJ*)M->data;

3187:     ISGetLocalSize(iscol_o,&BsubN);
3188:     n = asub->B->cmap->N;
3189:     if (BsubN > n) {
3190:       /* This case can be tested using ~petsc/src/tao/bound/tutorials/runplate2_3 */
3191:       const PetscInt *idx;
3192:       PetscInt       i,j,*idx_new,*subgarray = asub->garray;
3193:       PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);

3195:       PetscMalloc1(n,&idx_new);
3196:       j = 0;
3197:       ISGetIndices(iscol_o,&idx);
3198:       for (i=0; i<n; i++) {
3199:         if (j >= BsubN) break;
3200:         while (subgarray[i] > garray[j]) j++;

3202:         if (subgarray[i] == garray[j]) {
3203:           idx_new[i] = idx[j++];
3204:         } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3205:       }
3206:       ISRestoreIndices(iscol_o,&idx);

3208:       ISDestroy(&iscol_o);
3209:       ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);

3211:     } else if (BsubN < n) {
3212:       SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3213:     }

3215:     PetscFree(garray);
3216:     *submat = M;

3218:     /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3219:     PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3220:     ISDestroy(&isrow_d);

3222:     PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3223:     ISDestroy(&iscol_d);

3225:     PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3226:     ISDestroy(&iscol_o);
3227:   }
3228:   return(0);
3229: }

3231: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3232: {
3234:   IS             iscol_local=NULL,isrow_d;
3235:   PetscInt       csize;
3236:   PetscInt       n,i,j,start,end;
3237:   PetscBool      sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3238:   MPI_Comm       comm;

3241:   /* If isrow has same processor distribution as mat,
3242:      call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3243:   if (call == MAT_REUSE_MATRIX) {
3244:     PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3245:     if (isrow_d) {
3246:       sameRowDist  = PETSC_TRUE;
3247:       tsameDist[1] = PETSC_TRUE; /* sameColDist */
3248:     } else {
3249:       PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3250:       if (iscol_local) {
3251:         sameRowDist  = PETSC_TRUE;
3252:         tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3253:       }
3254:     }
3255:   } else {
3256:     /* Check if isrow has same processor distribution as mat */
3257:     sameDist[0] = PETSC_FALSE;
3258:     ISGetLocalSize(isrow,&n);
3259:     if (!n) {
3260:       sameDist[0] = PETSC_TRUE;
3261:     } else {
3262:       ISGetMinMax(isrow,&i,&j);
3263:       MatGetOwnershipRange(mat,&start,&end);
3264:       if (i >= start && j < end) {
3265:         sameDist[0] = PETSC_TRUE;
3266:       }
3267:     }

3269:     /* Check if iscol has same processor distribution as mat */
3270:     sameDist[1] = PETSC_FALSE;
3271:     ISGetLocalSize(iscol,&n);
3272:     if (!n) {
3273:       sameDist[1] = PETSC_TRUE;
3274:     } else {
3275:       ISGetMinMax(iscol,&i,&j);
3276:       MatGetOwnershipRangeColumn(mat,&start,&end);
3277:       if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3278:     }

3280:     PetscObjectGetComm((PetscObject)mat,&comm);
3281:     MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3282:     sameRowDist = tsameDist[0];
3283:   }

3285:   if (sameRowDist) {
3286:     if (tsameDist[1]) { /* sameRowDist & sameColDist */
3287:       /* isrow and iscol have same processor distribution as mat */
3288:       MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3289:       return(0);
3290:     } else { /* sameRowDist */
3291:       /* isrow has same processor distribution as mat */
3292:       if (call == MAT_INITIAL_MATRIX) {
3293:         PetscBool sorted;
3294:         ISGetSeqIS_Private(mat,iscol,&iscol_local);
3295:         ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3296:         ISGetSize(iscol,&i);
3297:         if (n != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != size of iscol %d",n,i);

3299:         ISSorted(iscol_local,&sorted);
3300:         if (sorted) {
3301:           /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3302:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3303:           return(0);
3304:         }
3305:       } else { /* call == MAT_REUSE_MATRIX */
3306:         IS    iscol_sub;
3307:         PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3308:         if (iscol_sub) {
3309:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3310:           return(0);
3311:         }
3312:       }
3313:     }
3314:   }

3316:   /* General case: iscol -> iscol_local which has global size of iscol */
3317:   if (call == MAT_REUSE_MATRIX) {
3318:     PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3319:     if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3320:   } else {
3321:     if (!iscol_local) {
3322:       ISGetSeqIS_Private(mat,iscol,&iscol_local);
3323:     }
3324:   }

3326:   ISGetLocalSize(iscol,&csize);
3327:   MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);

3329:   if (call == MAT_INITIAL_MATRIX) {
3330:     PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3331:     ISDestroy(&iscol_local);
3332:   }
3333:   return(0);
3334: }

3336: /*@C
3337:      MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3338:          and "off-diagonal" part of the matrix in CSR format.

3340:    Collective

3342:    Input Parameters:
3343: +  comm - MPI communicator
3344: .  A - "diagonal" portion of matrix
3345: .  B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3346: -  garray - global index of B columns

3348:    Output Parameter:
3349: .   mat - the matrix, with input A as its local diagonal matrix
3350:    Level: advanced

3352:    Notes:
3353:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3354:        A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.

3356: .seealso: MatCreateMPIAIJWithSplitArrays()
3357: @*/
3358: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3359: {
3361:   Mat_MPIAIJ     *maij;
3362:   Mat_SeqAIJ     *b=(Mat_SeqAIJ*)B->data,*bnew;
3363:   PetscInt       *oi=b->i,*oj=b->j,i,nz,col;
3364:   PetscScalar    *oa=b->a;
3365:   Mat            Bnew;
3366:   PetscInt       m,n,N;

3369:   MatCreate(comm,mat);
3370:   MatGetSize(A,&m,&n);
3371:   if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3372:   if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3373:   /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3374:   /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */

3376:   /* Get global columns of mat */
3377:   MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);

3379:   MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3380:   MatSetType(*mat,MATMPIAIJ);
3381:   MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3382:   maij = (Mat_MPIAIJ*)(*mat)->data;

3384:   (*mat)->preallocated = PETSC_TRUE;

3386:   PetscLayoutSetUp((*mat)->rmap);
3387:   PetscLayoutSetUp((*mat)->cmap);

3389:   /* Set A as diagonal portion of *mat */
3390:   maij->A = A;

3392:   nz = oi[m];
3393:   for (i=0; i<nz; i++) {
3394:     col   = oj[i];
3395:     oj[i] = garray[col];
3396:   }

3398:    /* Set Bnew as off-diagonal portion of *mat */
3399:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,oa,&Bnew);
3400:   bnew        = (Mat_SeqAIJ*)Bnew->data;
3401:   bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3402:   maij->B     = Bnew;

3404:   if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,0,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);

3406:   b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3407:   b->free_a       = PETSC_FALSE;
3408:   b->free_ij      = PETSC_FALSE;
3409:   MatDestroy(&B);

3411:   bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3412:   bnew->free_a       = PETSC_TRUE;
3413:   bnew->free_ij      = PETSC_TRUE;

3415:   /* condense columns of maij->B */
3416:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3417:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3418:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3419:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3420:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3421:   return(0);
3422: }

3424: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);

3426: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3427: {
3429:   PetscInt       i,m,n,rstart,row,rend,nz,j,bs,cbs;
3430:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3431:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3432:   Mat            M,Msub,B=a->B;
3433:   MatScalar      *aa;
3434:   Mat_SeqAIJ     *aij;
3435:   PetscInt       *garray = a->garray,*colsub,Ncols;
3436:   PetscInt       count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3437:   IS             iscol_sub,iscmap;
3438:   const PetscInt *is_idx,*cmap;
3439:   PetscBool      allcolumns=PETSC_FALSE;
3440:   MPI_Comm       comm;

3443:   PetscObjectGetComm((PetscObject)mat,&comm);

3445:   if (call == MAT_REUSE_MATRIX) {
3446:     PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3447:     if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3448:     ISGetLocalSize(iscol_sub,&count);

3450:     PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3451:     if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");

3453:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3454:     if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");

3456:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);

3458:   } else { /* call == MAT_INITIAL_MATRIX) */
3459:     PetscBool flg;

3461:     ISGetLocalSize(iscol,&n);
3462:     ISGetSize(iscol,&Ncols);

3464:     /* (1) iscol -> nonscalable iscol_local */
3465:     /* Check for special case: each processor gets entire matrix columns */
3466:     ISIdentity(iscol_local,&flg);
3467:     if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3468:     MPIU_Allreduce(MPI_IN_PLACE,&allcolumns,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));
3469:     if (allcolumns) {
3470:       iscol_sub = iscol_local;
3471:       PetscObjectReference((PetscObject)iscol_local);
3472:       ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);

3474:     } else {
3475:       /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3476:       PetscInt *idx,*cmap1,k;
3477:       PetscMalloc1(Ncols,&idx);
3478:       PetscMalloc1(Ncols,&cmap1);
3479:       ISGetIndices(iscol_local,&is_idx);
3480:       count = 0;
3481:       k     = 0;
3482:       for (i=0; i<Ncols; i++) {
3483:         j = is_idx[i];
3484:         if (j >= cstart && j < cend) {
3485:           /* diagonal part of mat */
3486:           idx[count]     = j;
3487:           cmap1[count++] = i; /* column index in submat */
3488:         } else if (Bn) {
3489:           /* off-diagonal part of mat */
3490:           if (j == garray[k]) {
3491:             idx[count]     = j;
3492:             cmap1[count++] = i;  /* column index in submat */
3493:           } else if (j > garray[k]) {
3494:             while (j > garray[k] && k < Bn-1) k++;
3495:             if (j == garray[k]) {
3496:               idx[count]     = j;
3497:               cmap1[count++] = i; /* column index in submat */
3498:             }
3499:           }
3500:         }
3501:       }
3502:       ISRestoreIndices(iscol_local,&is_idx);

3504:       ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3505:       ISGetBlockSize(iscol,&cbs);
3506:       ISSetBlockSize(iscol_sub,cbs);

3508:       ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3509:     }

3511:     /* (3) Create sequential Msub */
3512:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3513:   }

3515:   ISGetLocalSize(iscol_sub,&count);
3516:   aij  = (Mat_SeqAIJ*)(Msub)->data;
3517:   ii   = aij->i;
3518:   ISGetIndices(iscmap,&cmap);

3520:   /*
3521:       m - number of local rows
3522:       Ncols - number of columns (same on all processors)
3523:       rstart - first row in new global matrix generated
3524:   */
3525:   MatGetSize(Msub,&m,NULL);

3527:   if (call == MAT_INITIAL_MATRIX) {
3528:     /* (4) Create parallel newmat */
3529:     PetscMPIInt    rank,size;
3530:     PetscInt       csize;

3532:     MPI_Comm_size(comm,&size);
3533:     MPI_Comm_rank(comm,&rank);

3535:     /*
3536:         Determine the number of non-zeros in the diagonal and off-diagonal
3537:         portions of the matrix in order to do correct preallocation
3538:     */

3540:     /* first get start and end of "diagonal" columns */
3541:     ISGetLocalSize(iscol,&csize);
3542:     if (csize == PETSC_DECIDE) {
3543:       ISGetSize(isrow,&mglobal);
3544:       if (mglobal == Ncols) { /* square matrix */
3545:         nlocal = m;
3546:       } else {
3547:         nlocal = Ncols/size + ((Ncols % size) > rank);
3548:       }
3549:     } else {
3550:       nlocal = csize;
3551:     }
3552:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3553:     rstart = rend - nlocal;
3554:     if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);

3556:     /* next, compute all the lengths */
3557:     jj    = aij->j;
3558:     PetscMalloc1(2*m+1,&dlens);
3559:     olens = dlens + m;
3560:     for (i=0; i<m; i++) {
3561:       jend = ii[i+1] - ii[i];
3562:       olen = 0;
3563:       dlen = 0;
3564:       for (j=0; j<jend; j++) {
3565:         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3566:         else dlen++;
3567:         jj++;
3568:       }
3569:       olens[i] = olen;
3570:       dlens[i] = dlen;
3571:     }

3573:     ISGetBlockSize(isrow,&bs);
3574:     ISGetBlockSize(iscol,&cbs);

3576:     MatCreate(comm,&M);
3577:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3578:     MatSetBlockSizes(M,bs,cbs);
3579:     MatSetType(M,((PetscObject)mat)->type_name);
3580:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3581:     PetscFree(dlens);

3583:   } else { /* call == MAT_REUSE_MATRIX */
3584:     M    = *newmat;
3585:     MatGetLocalSize(M,&i,NULL);
3586:     if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3587:     MatZeroEntries(M);
3588:     /*
3589:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3590:        rather than the slower MatSetValues().
3591:     */
3592:     M->was_assembled = PETSC_TRUE;
3593:     M->assembled     = PETSC_FALSE;
3594:   }

3596:   /* (5) Set values of Msub to *newmat */
3597:   PetscMalloc1(count,&colsub);
3598:   MatGetOwnershipRange(M,&rstart,NULL);

3600:   jj   = aij->j;
3601:   aa   = aij->a;
3602:   for (i=0; i<m; i++) {
3603:     row = rstart + i;
3604:     nz  = ii[i+1] - ii[i];
3605:     for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3606:     MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3607:     jj += nz; aa += nz;
3608:   }
3609:   ISRestoreIndices(iscmap,&cmap);

3611:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3612:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);

3614:   PetscFree(colsub);

3616:   /* save Msub, iscol_sub and iscmap used in processor for next request */
3617:   if (call ==  MAT_INITIAL_MATRIX) {
3618:     *newmat = M;
3619:     PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3620:     MatDestroy(&Msub);

3622:     PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3623:     ISDestroy(&iscol_sub);

3625:     PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3626:     ISDestroy(&iscmap);

3628:     if (iscol_local) {
3629:       PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3630:       ISDestroy(&iscol_local);
3631:     }
3632:   }
3633:   return(0);
3634: }

3636: /*
3637:     Not great since it makes two copies of the submatrix, first an SeqAIJ
3638:   in local and then by concatenating the local matrices the end result.
3639:   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()

3641:   Note: This requires a sequential iscol with all indices.
3642: */
3643: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3644: {
3646:   PetscMPIInt    rank,size;
3647:   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3648:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3649:   Mat            M,Mreuse;
3650:   MatScalar      *aa,*vwork;
3651:   MPI_Comm       comm;
3652:   Mat_SeqAIJ     *aij;
3653:   PetscBool      colflag,allcolumns=PETSC_FALSE;

3656:   PetscObjectGetComm((PetscObject)mat,&comm);
3657:   MPI_Comm_rank(comm,&rank);
3658:   MPI_Comm_size(comm,&size);

3660:   /* Check for special case: each processor gets entire matrix columns */
3661:   ISIdentity(iscol,&colflag);
3662:   ISGetLocalSize(iscol,&n);
3663:   if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3664:   MPIU_Allreduce(MPI_IN_PLACE,&allcolumns,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));

3666:   if (call ==  MAT_REUSE_MATRIX) {
3667:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3668:     if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3669:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3670:   } else {
3671:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3672:   }

3674:   /*
3675:       m - number of local rows
3676:       n - number of columns (same on all processors)
3677:       rstart - first row in new global matrix generated
3678:   */
3679:   MatGetSize(Mreuse,&m,&n);
3680:   MatGetBlockSizes(Mreuse,&bs,&cbs);
3681:   if (call == MAT_INITIAL_MATRIX) {
3682:     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3683:     ii  = aij->i;
3684:     jj  = aij->j;

3686:     /*
3687:         Determine the number of non-zeros in the diagonal and off-diagonal
3688:         portions of the matrix in order to do correct preallocation
3689:     */

3691:     /* first get start and end of "diagonal" columns */
3692:     if (csize == PETSC_DECIDE) {
3693:       ISGetSize(isrow,&mglobal);
3694:       if (mglobal == n) { /* square matrix */
3695:         nlocal = m;
3696:       } else {
3697:         nlocal = n/size + ((n % size) > rank);
3698:       }
3699:     } else {
3700:       nlocal = csize;
3701:     }
3702:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3703:     rstart = rend - nlocal;
3704:     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);

3706:     /* next, compute all the lengths */
3707:     PetscMalloc1(2*m+1,&dlens);
3708:     olens = dlens + m;
3709:     for (i=0; i<m; i++) {
3710:       jend = ii[i+1] - ii[i];
3711:       olen = 0;
3712:       dlen = 0;
3713:       for (j=0; j<jend; j++) {
3714:         if (*jj < rstart || *jj >= rend) olen++;
3715:         else dlen++;
3716:         jj++;
3717:       }
3718:       olens[i] = olen;
3719:       dlens[i] = dlen;
3720:     }
3721:     MatCreate(comm,&M);
3722:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3723:     MatSetBlockSizes(M,bs,cbs);
3724:     MatSetType(M,((PetscObject)mat)->type_name);
3725:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3726:     PetscFree(dlens);
3727:   } else {
3728:     PetscInt ml,nl;

3730:     M    = *newmat;
3731:     MatGetLocalSize(M,&ml,&nl);
3732:     if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3733:     MatZeroEntries(M);
3734:     /*
3735:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3736:        rather than the slower MatSetValues().
3737:     */
3738:     M->was_assembled = PETSC_TRUE;
3739:     M->assembled     = PETSC_FALSE;
3740:   }
3741:   MatGetOwnershipRange(M,&rstart,&rend);
3742:   aij  = (Mat_SeqAIJ*)(Mreuse)->data;
3743:   ii   = aij->i;
3744:   jj   = aij->j;
3745:   aa   = aij->a;
3746:   for (i=0; i<m; i++) {
3747:     row   = rstart + i;
3748:     nz    = ii[i+1] - ii[i];
3749:     cwork = jj;     jj += nz;
3750:     vwork = aa;     aa += nz;
3751:     MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3752:   }

3754:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3755:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3756:   *newmat = M;

3758:   /* save submatrix used in processor for next request */
3759:   if (call ==  MAT_INITIAL_MATRIX) {
3760:     PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3761:     MatDestroy(&Mreuse);
3762:   }
3763:   return(0);
3764: }

3766: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3767: {
3768:   PetscInt       m,cstart, cend,j,nnz,i,d;
3769:   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3770:   const PetscInt *JJ;
3772:   PetscBool      nooffprocentries;

3775:   if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);

3777:   PetscLayoutSetUp(B->rmap);
3778:   PetscLayoutSetUp(B->cmap);
3779:   m      = B->rmap->n;
3780:   cstart = B->cmap->rstart;
3781:   cend   = B->cmap->rend;
3782:   rstart = B->rmap->rstart;

3784:   PetscCalloc2(m,&d_nnz,m,&o_nnz);

3786:   if (PetscDefined(USE_DEBUG)) {
3787:     for (i=0; i<m; i++) {
3788:       nnz = Ii[i+1]- Ii[i];
3789:       JJ  = J + Ii[i];
3790:       if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3791:       if (nnz && (JJ[0] < 0)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,JJ[0]);
3792:       if (nnz && (JJ[nnz-1] >= B->cmap->N)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3793:     }
3794:   }

3796:   for (i=0; i<m; i++) {
3797:     nnz     = Ii[i+1]- Ii[i];
3798:     JJ      = J + Ii[i];
3799:     nnz_max = PetscMax(nnz_max,nnz);
3800:     d       = 0;
3801:     for (j=0; j<nnz; j++) {
3802:       if (cstart <= JJ[j] && JJ[j] < cend) d++;
3803:     }
3804:     d_nnz[i] = d;
3805:     o_nnz[i] = nnz - d;
3806:   }
3807:   MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3808:   PetscFree2(d_nnz,o_nnz);

3810:   for (i=0; i<m; i++) {
3811:     ii   = i + rstart;
3812:     MatSetValues_MPIAIJ(B,1,&ii,Ii[i+1] - Ii[i],J+Ii[i], v ? v + Ii[i] : NULL,INSERT_VALUES);
3813:   }
3814:   nooffprocentries    = B->nooffprocentries;
3815:   B->nooffprocentries = PETSC_TRUE;
3816:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3817:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3818:   B->nooffprocentries = nooffprocentries;

3820:   MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3821:   return(0);
3822: }

3824: /*@
3825:    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3826:    (the default parallel PETSc format).

3828:    Collective

3830:    Input Parameters:
3831: +  B - the matrix
3832: .  i - the indices into j for the start of each local row (starts with zero)
3833: .  j - the column indices for each local row (starts with zero)
3834: -  v - optional values in the matrix

3836:    Level: developer

3838:    Notes:
3839:        The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
3840:      thus you CANNOT change the matrix entries by changing the values of v[] after you have
3841:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

3843:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

3845:        The format which is used for the sparse matrix input, is equivalent to a
3846:     row-major ordering.. i.e for the following matrix, the input data expected is
3847:     as shown

3849: $        1 0 0
3850: $        2 0 3     P0
3851: $       -------
3852: $        4 5 6     P1
3853: $
3854: $     Process0 [P0]: rows_owned=[0,1]
3855: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
3856: $        j =  {0,0,2}  [size = 3]
3857: $        v =  {1,2,3}  [size = 3]
3858: $
3859: $     Process1 [P1]: rows_owned=[2]
3860: $        i =  {0,3}    [size = nrow+1  = 1+1]
3861: $        j =  {0,1,2}  [size = 3]
3862: $        v =  {4,5,6}  [size = 3]

3864: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3865:           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3866: @*/
3867: PetscErrorCode  MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3868: {

3872:   PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3873:   return(0);
3874: }

3876: /*@C
3877:    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3878:    (the default parallel PETSc format).  For good matrix assembly performance
3879:    the user should preallocate the matrix storage by setting the parameters
3880:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3881:    performance can be increased by more than a factor of 50.

3883:    Collective

3885:    Input Parameters:
3886: +  B - the matrix
3887: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3888:            (same value is used for all local rows)
3889: .  d_nnz - array containing the number of nonzeros in the various rows of the
3890:            DIAGONAL portion of the local submatrix (possibly different for each row)
3891:            or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
3892:            The size of this array is equal to the number of local rows, i.e 'm'.
3893:            For matrices that will be factored, you must leave room for (and set)
3894:            the diagonal entry even if it is zero.
3895: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3896:            submatrix (same value is used for all local rows).
3897: -  o_nnz - array containing the number of nonzeros in the various rows of the
3898:            OFF-DIAGONAL portion of the local submatrix (possibly different for
3899:            each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
3900:            structure. The size of this array is equal to the number
3901:            of local rows, i.e 'm'.

3903:    If the *_nnz parameter is given then the *_nz parameter is ignored

3905:    The AIJ format (also called the Yale sparse matrix format or
3906:    compressed row storage (CSR)), is fully compatible with standard Fortran 77
3907:    storage.  The stored row and column indices begin with zero.
3908:    See Users-Manual: ch_mat for details.

3910:    The parallel matrix is partitioned such that the first m0 rows belong to
3911:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
3912:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

3914:    The DIAGONAL portion of the local submatrix of a processor can be defined
3915:    as the submatrix which is obtained by extraction the part corresponding to
3916:    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
3917:    first row that belongs to the processor, r2 is the last row belonging to
3918:    the this processor, and c1-c2 is range of indices of the local part of a
3919:    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
3920:    common case of a square matrix, the row and column ranges are the same and
3921:    the DIAGONAL part is also square. The remaining portion of the local
3922:    submatrix (mxN) constitute the OFF-DIAGONAL portion.

3924:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

3926:    You can call MatGetInfo() to get information on how effective the preallocation was;
3927:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3928:    You can also run with the option -info and look for messages with the string
3929:    malloc in them to see if additional memory allocation was needed.

3931:    Example usage:

3933:    Consider the following 8x8 matrix with 34 non-zero values, that is
3934:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3935:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3936:    as follows:

3938: .vb
3939:             1  2  0  |  0  3  0  |  0  4
3940:     Proc0   0  5  6  |  7  0  0  |  8  0
3941:             9  0 10  | 11  0  0  | 12  0
3942:     -------------------------------------
3943:            13  0 14  | 15 16 17  |  0  0
3944:     Proc1   0 18  0  | 19 20 21  |  0  0
3945:             0  0  0  | 22 23  0  | 24  0
3946:     -------------------------------------
3947:     Proc2  25 26 27  |  0  0 28  | 29  0
3948:            30  0  0  | 31 32 33  |  0 34
3949: .ve

3951:    This can be represented as a collection of submatrices as:

3953: .vb
3954:       A B C
3955:       D E F
3956:       G H I
3957: .ve

3959:    Where the submatrices A,B,C are owned by proc0, D,E,F are
3960:    owned by proc1, G,H,I are owned by proc2.

3962:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3963:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3964:    The 'M','N' parameters are 8,8, and have the same values on all procs.

3966:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3967:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3968:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3969:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3970:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3971:    matrix, ans [DF] as another SeqAIJ matrix.

3973:    When d_nz, o_nz parameters are specified, d_nz storage elements are
3974:    allocated for every row of the local diagonal submatrix, and o_nz
3975:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
3976:    One way to choose d_nz and o_nz is to use the max nonzerors per local
3977:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3978:    In this case, the values of d_nz,o_nz are:
3979: .vb
3980:      proc0 : dnz = 2, o_nz = 2
3981:      proc1 : dnz = 3, o_nz = 2
3982:      proc2 : dnz = 1, o_nz = 4
3983: .ve
3984:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3985:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3986:    for proc3. i.e we are using 12+15+10=37 storage locations to store
3987:    34 values.

3989:    When d_nnz, o_nnz parameters are specified, the storage is specified
3990:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3991:    In the above case the values for d_nnz,o_nnz are:
3992: .vb
3993:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3994:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3995:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
3996: .ve
3997:    Here the space allocated is sum of all the above values i.e 34, and
3998:    hence pre-allocation is perfect.

4000:    Level: intermediate

4002: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
4003:           MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
4004: @*/
4005: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
4006: {

4012:   PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
4013:   return(0);
4014: }

4016: /*@
4017:      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
4018:          CSR format for the local rows.

4020:    Collective

4022:    Input Parameters:
4023: +  comm - MPI communicator
4024: .  m - number of local rows (Cannot be PETSC_DECIDE)
4025: .  n - This value should be the same as the local size used in creating the
4026:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4027:        calculated if N is given) For square matrices n is almost always m.
4028: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4029: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4030: .   i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4031: .   j - column indices
4032: -   a - matrix values

4034:    Output Parameter:
4035: .   mat - the matrix

4037:    Level: intermediate

4039:    Notes:
4040:        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4041:      thus you CANNOT change the matrix entries by changing the values of a[] after you have
4042:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

4044:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

4046:        The format which is used for the sparse matrix input, is equivalent to a
4047:     row-major ordering.. i.e for the following matrix, the input data expected is
4048:     as shown

4050:        Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays

4052: $        1 0 0
4053: $        2 0 3     P0
4054: $       -------
4055: $        4 5 6     P1
4056: $
4057: $     Process0 [P0]: rows_owned=[0,1]
4058: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4059: $        j =  {0,0,2}  [size = 3]
4060: $        v =  {1,2,3}  [size = 3]
4061: $
4062: $     Process1 [P1]: rows_owned=[2]
4063: $        i =  {0,3}    [size = nrow+1  = 1+1]
4064: $        j =  {0,1,2}  [size = 3]
4065: $        v =  {4,5,6}  [size = 3]

4067: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4068:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4069: @*/
4070: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4071: {

4075:   if (i && i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4076:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4077:   MatCreate(comm,mat);
4078:   MatSetSizes(*mat,m,n,M,N);
4079:   /* MatSetBlockSizes(M,bs,cbs); */
4080:   MatSetType(*mat,MATMPIAIJ);
4081:   MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4082:   return(0);
4083: }

4085: /*@
4086:      MatUpdateMPIAIJWithArrays - updates a MPI AIJ matrix using arrays that contain in standard
4087:          CSR format for the local rows. Only the numerical values are updated the other arrays must be identical

4089:    Collective

4091:    Input Parameters:
4092: +  mat - the matrix
4093: .  m - number of local rows (Cannot be PETSC_DECIDE)
4094: .  n - This value should be the same as the local size used in creating the
4095:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4096:        calculated if N is given) For square matrices n is almost always m.
4097: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4098: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4099: .  Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4100: .  J - column indices
4101: -  v - matrix values

4103:    Level: intermediate

4105: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4106:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4107: @*/
4108: PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
4109: {
4111:   PetscInt       cstart,nnz,i,j;
4112:   PetscInt       *ld;
4113:   PetscBool      nooffprocentries;
4114:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*)mat->data;
4115:   Mat_SeqAIJ     *Ad  = (Mat_SeqAIJ*)Aij->A->data, *Ao  = (Mat_SeqAIJ*)Aij->B->data;
4116:   PetscScalar    *ad = Ad->a, *ao = Ao->a;
4117:   const PetscInt *Adi = Ad->i;
4118:   PetscInt       ldi,Iii,md;

4121:   if (Ii[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4122:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4123:   if (m != mat->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()");
4124:   if (n != mat->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()");

4126:   cstart = mat->cmap->rstart;
4127:   if (!Aij->ld) {
4128:     /* count number of entries below block diagonal */
4129:     PetscCalloc1(m,&ld);
4130:     Aij->ld = ld;
4131:     for (i=0; i<m; i++) {
4132:       nnz  = Ii[i+1]- Ii[i];
4133:       j     = 0;
4134:       while  (J[j] < cstart && j < nnz) {j++;}
4135:       J    += nnz;
4136:       ld[i] = j;
4137:     }
4138:   } else {
4139:     ld = Aij->ld;
4140:   }

4142:   for (i=0; i<m; i++) {
4143:     nnz  = Ii[i+1]- Ii[i];
4144:     Iii  = Ii[i];
4145:     ldi  = ld[i];
4146:     md   = Adi[i+1]-Adi[i];
4147:     PetscArraycpy(ao,v + Iii,ldi);
4148:     PetscArraycpy(ad,v + Iii + ldi,md);
4149:     PetscArraycpy(ao + ldi,v + Iii + ldi + md,nnz - ldi - md);
4150:     ad  += md;
4151:     ao  += nnz - md;
4152:   }
4153:   nooffprocentries      = mat->nooffprocentries;
4154:   mat->nooffprocentries = PETSC_TRUE;
4155:   PetscObjectStateIncrease((PetscObject)Aij->A);
4156:   PetscObjectStateIncrease((PetscObject)Aij->B);
4157:   PetscObjectStateIncrease((PetscObject)mat);
4158:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
4159:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
4160:   mat->nooffprocentries = nooffprocentries;
4161:   return(0);
4162: }

4164: /*@C
4165:    MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4166:    (the default parallel PETSc format).  For good matrix assembly performance
4167:    the user should preallocate the matrix storage by setting the parameters
4168:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4169:    performance can be increased by more than a factor of 50.

4171:    Collective

4173:    Input Parameters:
4174: +  comm - MPI communicator
4175: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4176:            This value should be the same as the local size used in creating the
4177:            y vector for the matrix-vector product y = Ax.
4178: .  n - This value should be the same as the local size used in creating the
4179:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4180:        calculated if N is given) For square matrices n is almost always m.
4181: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4182: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4183: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4184:            (same value is used for all local rows)
4185: .  d_nnz - array containing the number of nonzeros in the various rows of the
4186:            DIAGONAL portion of the local submatrix (possibly different for each row)
4187:            or NULL, if d_nz is used to specify the nonzero structure.
4188:            The size of this array is equal to the number of local rows, i.e 'm'.
4189: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4190:            submatrix (same value is used for all local rows).
4191: -  o_nnz - array containing the number of nonzeros in the various rows of the
4192:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4193:            each row) or NULL, if o_nz is used to specify the nonzero
4194:            structure. The size of this array is equal to the number
4195:            of local rows, i.e 'm'.

4197:    Output Parameter:
4198: .  A - the matrix

4200:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4201:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
4202:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

4204:    Notes:
4205:    If the *_nnz parameter is given then the *_nz parameter is ignored

4207:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
4208:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4209:    storage requirements for this matrix.

4211:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
4212:    processor than it must be used on all processors that share the object for
4213:    that argument.

4215:    The user MUST specify either the local or global matrix dimensions
4216:    (possibly both).

4218:    The parallel matrix is partitioned across processors such that the
4219:    first m0 rows belong to process 0, the next m1 rows belong to
4220:    process 1, the next m2 rows belong to process 2 etc.. where
4221:    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4222:    values corresponding to [m x N] submatrix.

4224:    The columns are logically partitioned with the n0 columns belonging
4225:    to 0th partition, the next n1 columns belonging to the next
4226:    partition etc.. where n0,n1,n2... are the input parameter 'n'.

4228:    The DIAGONAL portion of the local submatrix on any given processor
4229:    is the submatrix corresponding to the rows and columns m,n
4230:    corresponding to the given processor. i.e diagonal matrix on
4231:    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4232:    etc. The remaining portion of the local submatrix [m x (N-n)]
4233:    constitute the OFF-DIAGONAL portion. The example below better
4234:    illustrates this concept.

4236:    For a square global matrix we define each processor's diagonal portion
4237:    to be its local rows and the corresponding columns (a square submatrix);
4238:    each processor's off-diagonal portion encompasses the remainder of the
4239:    local matrix (a rectangular submatrix).

4241:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4243:    When calling this routine with a single process communicator, a matrix of
4244:    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
4245:    type of communicator, use the construction mechanism
4246: .vb
4247:      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4248: .ve

4250: $     MatCreate(...,&A);
4251: $     MatSetType(A,MATMPIAIJ);
4252: $     MatSetSizes(A, m,n,M,N);
4253: $     MatMPIAIJSetPreallocation(A,...);

4255:    By default, this format uses inodes (identical nodes) when possible.
4256:    We search for consecutive rows with the same nonzero structure, thereby
4257:    reusing matrix information to achieve increased efficiency.

4259:    Options Database Keys:
4260: +  -mat_no_inode  - Do not use inodes
4261: -  -mat_inode_limit <limit> - Sets inode limit (max limit=5)



4265:    Example usage:

4267:    Consider the following 8x8 matrix with 34 non-zero values, that is
4268:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4269:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4270:    as follows

4272: .vb
4273:             1  2  0  |  0  3  0  |  0  4
4274:     Proc0   0  5  6  |  7  0  0  |  8  0
4275:             9  0 10  | 11  0  0  | 12  0
4276:     -------------------------------------
4277:            13  0 14  | 15 16 17  |  0  0
4278:     Proc1   0 18  0  | 19 20 21  |  0  0
4279:             0  0  0  | 22 23  0  | 24  0
4280:     -------------------------------------
4281:     Proc2  25 26 27  |  0  0 28  | 29  0
4282:            30  0  0  | 31 32 33  |  0 34
4283: .ve

4285:    This can be represented as a collection of submatrices as

4287: .vb
4288:       A B C
4289:       D E F
4290:       G H I
4291: .ve

4293:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4294:    owned by proc1, G,H,I are owned by proc2.

4296:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4297:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4298:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4300:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4301:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4302:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4303:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4304:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4305:    matrix, ans [DF] as another SeqAIJ matrix.

4307:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4308:    allocated for every row of the local diagonal submatrix, and o_nz
4309:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4310:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4311:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4312:    In this case, the values of d_nz,o_nz are
4313: .vb
4314:      proc0 : dnz = 2, o_nz = 2
4315:      proc1 : dnz = 3, o_nz = 2
4316:      proc2 : dnz = 1, o_nz = 4
4317: .ve
4318:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4319:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4320:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4321:    34 values.

4323:    When d_nnz, o_nnz parameters are specified, the storage is specified
4324:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4325:    In the above case the values for d_nnz,o_nnz are
4326: .vb
4327:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4328:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4329:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4330: .ve
4331:    Here the space allocated is sum of all the above values i.e 34, and
4332:    hence pre-allocation is perfect.

4334:    Level: intermediate

4336: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4337:           MATMPIAIJ, MatCreateMPIAIJWithArrays()
4338: @*/
4339: PetscErrorCode  MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4340: {
4342:   PetscMPIInt    size;

4345:   MatCreate(comm,A);
4346:   MatSetSizes(*A,m,n,M,N);
4347:   MPI_Comm_size(comm,&size);
4348:   if (size > 1) {
4349:     MatSetType(*A,MATMPIAIJ);
4350:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4351:   } else {
4352:     MatSetType(*A,MATSEQAIJ);
4353:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4354:   }
4355:   return(0);
4356: }

4358: /*@C
4359:   MatMPIAIJGetSeqAIJ - Returns the local piece of this distributed matrix
4360:   
4361:   Not collective
4362:   
4363:   Input Parameter:
4364: . A - The MPIAIJ matrix

4366:   Output Parameters:
4367: + Ad - The local diagonal block as a SeqAIJ matrix
4368: . Ao - The local off-diagonal block as a SeqAIJ matrix
4369: - colmap - An array mapping local column numbers of Ao to global column numbers of the parallel matrix

4371:   Note: The rows in Ad and Ao are in [0, Nr), where Nr is the number of local rows on this process. The columns
4372:   in Ad are in [0, Nc) where Nc is the number of local columns. The columns are Ao are in [0, Nco), where Nco is
4373:   the number of nonzero columns in the local off-diagonal piece of the matrix A. The array colmap maps these
4374:   local column numbers to global column numbers in the original matrix.

4376:   Level: intermediate

4378: .seealso: MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed(), MatCreateAIJ(), MATMPIAIJ, MATSEQAIJ
4379: @*/
4380: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4381: {
4382:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
4383:   PetscBool      flg;

4387:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4388:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4389:   if (Ad)     *Ad     = a->A;
4390:   if (Ao)     *Ao     = a->B;
4391:   if (colmap) *colmap = a->garray;
4392:   return(0);
4393: }

4395: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4396: {
4398:   PetscInt       m,N,i,rstart,nnz,Ii;
4399:   PetscInt       *indx;
4400:   PetscScalar    *values;

4403:   MatGetSize(inmat,&m,&N);
4404:   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4405:     PetscInt       *dnz,*onz,sum,bs,cbs;

4407:     if (n == PETSC_DECIDE) {
4408:       PetscSplitOwnership(comm,&n,&N);
4409:     }
4410:     /* Check sum(n) = N */
4411:     MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4412:     if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);

4414:     MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4415:     rstart -= m;

4417:     MatPreallocateInitialize(comm,m,n,dnz,onz);
4418:     for (i=0; i<m; i++) {
4419:       MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4420:       MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4421:       MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4422:     }

4424:     MatCreate(comm,outmat);
4425:     MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4426:     MatGetBlockSizes(inmat,&bs,&cbs);
4427:     MatSetBlockSizes(*outmat,bs,cbs);
4428:     MatSetType(*outmat,MATAIJ);
4429:     MatSeqAIJSetPreallocation(*outmat,0,dnz);
4430:     MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4431:     MatPreallocateFinalize(dnz,onz);
4432:   }

4434:   /* numeric phase */
4435:   MatGetOwnershipRange(*outmat,&rstart,NULL);
4436:   for (i=0; i<m; i++) {
4437:     MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4438:     Ii   = i + rstart;
4439:     MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4440:     MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4441:   }
4442:   MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4443:   MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4444:   return(0);
4445: }

4447: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4448: {
4449:   PetscErrorCode    ierr;
4450:   PetscMPIInt       rank;
4451:   PetscInt          m,N,i,rstart,nnz;
4452:   size_t            len;
4453:   const PetscInt    *indx;
4454:   PetscViewer       out;
4455:   char              *name;
4456:   Mat               B;
4457:   const PetscScalar *values;

4460:   MatGetLocalSize(A,&m,0);
4461:   MatGetSize(A,0,&N);
4462:   /* Should this be the type of the diagonal block of A? */
4463:   MatCreate(PETSC_COMM_SELF,&B);
4464:   MatSetSizes(B,m,N,m,N);
4465:   MatSetBlockSizesFromMats(B,A,A);
4466:   MatSetType(B,MATSEQAIJ);
4467:   MatSeqAIJSetPreallocation(B,0,NULL);
4468:   MatGetOwnershipRange(A,&rstart,0);
4469:   for (i=0; i<m; i++) {
4470:     MatGetRow(A,i+rstart,&nnz,&indx,&values);
4471:     MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4472:     MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4473:   }
4474:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4475:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);

4477:   MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4478:   PetscStrlen(outfile,&len);
4479:   PetscMalloc1(len+6,&name);
4480:   PetscSNPrintf(name,len+6,"%s.%d",outfile,rank);
4481:   PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4482:   PetscFree(name);
4483:   MatView(B,out);
4484:   PetscViewerDestroy(&out);
4485:   MatDestroy(&B);
4486:   return(0);
4487: }

4489: static PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(void *data)
4490: {
4491:   PetscErrorCode      ierr;
4492:   Mat_Merge_SeqsToMPI *merge = (Mat_Merge_SeqsToMPI *)data;

4495:   if (!merge) return(0);
4496:   PetscFree(merge->id_r);
4497:   PetscFree(merge->len_s);
4498:   PetscFree(merge->len_r);
4499:   PetscFree(merge->bi);
4500:   PetscFree(merge->bj);
4501:   PetscFree(merge->buf_ri[0]);
4502:   PetscFree(merge->buf_ri);
4503:   PetscFree(merge->buf_rj[0]);
4504:   PetscFree(merge->buf_rj);
4505:   PetscFree(merge->coi);
4506:   PetscFree(merge->coj);
4507:   PetscFree(merge->owners_co);
4508:   PetscLayoutDestroy(&merge->rowmap);
4509:   PetscFree(merge);
4510:   return(0);
4511: }

4513:  #include <../src/mat/utils/freespace.h>
4514:  #include <petscbt.h>

4516: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4517: {
4518:   PetscErrorCode      ierr;
4519:   MPI_Comm            comm;
4520:   Mat_SeqAIJ          *a  =(Mat_SeqAIJ*)seqmat->data;
4521:   PetscMPIInt         size,rank,taga,*len_s;
4522:   PetscInt            N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4523:   PetscInt            proc,m;
4524:   PetscInt            **buf_ri,**buf_rj;
4525:   PetscInt            k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4526:   PetscInt            nrows,**buf_ri_k,**nextrow,**nextai;
4527:   MPI_Request         *s_waits,*r_waits;
4528:   MPI_Status          *status;
4529:   MatScalar           *aa=a->a;
4530:   MatScalar           **abuf_r,*ba_i;
4531:   Mat_Merge_SeqsToMPI *merge;
4532:   PetscContainer      container;

4535:   PetscObjectGetComm((PetscObject)mpimat,&comm);
4536:   PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);

4538:   MPI_Comm_size(comm,&size);
4539:   MPI_Comm_rank(comm,&rank);

4541:   PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4542:   if (!container) SETERRQ(PetscObjectComm((PetscObject)mpimat),PETSC_ERR_PLIB,"Mat not created from MatCreateMPIAIJSumSeqAIJSymbolic");
4543:   PetscContainerGetPointer(container,(void**)&merge);

4545:   bi     = merge->bi;
4546:   bj     = merge->bj;
4547:   buf_ri = merge->buf_ri;
4548:   buf_rj = merge->buf_rj;

4550:   PetscMalloc1(size,&status);
4551:   owners = merge->rowmap->range;
4552:   len_s  = merge->len_s;

4554:   /* send and recv matrix values */
4555:   /*-----------------------------*/
4556:   PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4557:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

4559:   PetscMalloc1(merge->nsend+1,&s_waits);
4560:   for (proc=0,k=0; proc<size; proc++) {
4561:     if (!len_s[proc]) continue;
4562:     i    = owners[proc];
4563:     MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4564:     k++;
4565:   }

4567:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4568:   if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4569:   PetscFree(status);

4571:   PetscFree(s_waits);
4572:   PetscFree(r_waits);

4574:   /* insert mat values of mpimat */
4575:   /*----------------------------*/
4576:   PetscMalloc1(N,&ba_i);
4577:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4579:   for (k=0; k<merge->nrecv; k++) {
4580:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4581:     nrows       = *(buf_ri_k[k]);
4582:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4583:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4584:   }

4586:   /* set values of ba */
4587:   m = merge->rowmap->n;
4588:   for (i=0; i<m; i++) {
4589:     arow = owners[rank] + i;
4590:     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4591:     bnzi = bi[i+1] - bi[i];
4592:     PetscArrayzero(ba_i,bnzi);

4594:     /* add local non-zero vals of this proc's seqmat into ba */
4595:     anzi   = ai[arow+1] - ai[arow];
4596:     aj     = a->j + ai[arow];
4597:     aa     = a->a + ai[arow];
4598:     nextaj = 0;
4599:     for (j=0; nextaj<anzi; j++) {
4600:       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4601:         ba_i[j] += aa[nextaj++];
4602:       }
4603:     }

4605:     /* add received vals into ba */
4606:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4607:       /* i-th row */
4608:       if (i == *nextrow[k]) {
4609:         anzi   = *(nextai[k]+1) - *nextai[k];
4610:         aj     = buf_rj[k] + *(nextai[k]);
4611:         aa     = abuf_r[k] + *(nextai[k]);
4612:         nextaj = 0;
4613:         for (j=0; nextaj<anzi; j++) {
4614:           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4615:             ba_i[j] += aa[nextaj++];
4616:           }
4617:         }
4618:         nextrow[k]++; nextai[k]++;
4619:       }
4620:     }
4621:     MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4622:   }
4623:   MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4624:   MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);

4626:   PetscFree(abuf_r[0]);
4627:   PetscFree(abuf_r);
4628:   PetscFree(ba_i);
4629:   PetscFree3(buf_ri_k,nextrow,nextai);
4630:   PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4631:   return(0);
4632: }

4634: PetscErrorCode  MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4635: {
4636:   PetscErrorCode      ierr;
4637:   Mat                 B_mpi;
4638:   Mat_SeqAIJ          *a=(Mat_SeqAIJ*)seqmat->data;
4639:   PetscMPIInt         size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4640:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
4641:   PetscInt            M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4642:   PetscInt            len,proc,*dnz,*onz,bs,cbs;
4643:   PetscInt            k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4644:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4645:   MPI_Request         *si_waits,*sj_waits,*ri_waits,*rj_waits;
4646:   MPI_Status          *status;
4647:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
4648:   PetscBT             lnkbt;
4649:   Mat_Merge_SeqsToMPI *merge;
4650:   PetscContainer      container;

4653:   PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);

4655:   /* make sure it is a PETSc comm */
4656:   PetscCommDuplicate(comm,&comm,NULL);
4657:   MPI_Comm_size(comm,&size);
4658:   MPI_Comm_rank(comm,&rank);

4660:   PetscNew(&merge);
4661:   PetscMalloc1(size,&status);

4663:   /* determine row ownership */
4664:   /*---------------------------------------------------------*/
4665:   PetscLayoutCreate(comm,&merge->rowmap);
4666:   PetscLayoutSetLocalSize(merge->rowmap,m);
4667:   PetscLayoutSetSize(merge->rowmap,M);
4668:   PetscLayoutSetBlockSize(merge->rowmap,1);
4669:   PetscLayoutSetUp(merge->rowmap);
4670:   PetscMalloc1(size,&len_si);
4671:   PetscMalloc1(size,&merge->len_s);

4673:   m      = merge->rowmap->n;
4674:   owners = merge->rowmap->range;

4676:   /* determine the number of messages to send, their lengths */
4677:   /*---------------------------------------------------------*/
4678:   len_s = merge->len_s;

4680:   len          = 0; /* length of buf_si[] */
4681:   merge->nsend = 0;
4682:   for (proc=0; proc<size; proc++) {
4683:     len_si[proc] = 0;
4684:     if (proc == rank) {
4685:       len_s[proc] = 0;
4686:     } else {
4687:       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4688:       len_s[proc]  = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4689:     }
4690:     if (len_s[proc]) {
4691:       merge->nsend++;
4692:       nrows = 0;
4693:       for (i=owners[proc]; i<owners[proc+1]; i++) {
4694:         if (ai[i+1] > ai[i]) nrows++;
4695:       }
4696:       len_si[proc] = 2*(nrows+1);
4697:       len         += len_si[proc];
4698:     }
4699:   }

4701:   /* determine the number and length of messages to receive for ij-structure */
4702:   /*-------------------------------------------------------------------------*/
4703:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4704:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

4706:   /* post the Irecv of j-structure */
4707:   /*-------------------------------*/
4708:   PetscCommGetNewTag(comm,&tagj);
4709:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);

4711:   /* post the Isend of j-structure */
4712:   /*--------------------------------*/
4713:   PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);

4715:   for (proc=0, k=0; proc<size; proc++) {
4716:     if (!len_s[proc]) continue;
4717:     i    = owners[proc];
4718:     MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4719:     k++;
4720:   }

4722:   /* receives and sends of j-structure are complete */
4723:   /*------------------------------------------------*/
4724:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4725:   if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}

4727:   /* send and recv i-structure */
4728:   /*---------------------------*/
4729:   PetscCommGetNewTag(comm,&tagi);
4730:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);

4732:   PetscMalloc1(len+1,&buf_s);
4733:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4734:   for (proc=0,k=0; proc<size; proc++) {
4735:     if (!len_s[proc]) continue;
4736:     /* form outgoing message for i-structure:
4737:          buf_si[0]:                 nrows to be sent
4738:                [1:nrows]:           row index (global)
4739:                [nrows+1:2*nrows+1]: i-structure index
4740:     */
4741:     /*-------------------------------------------*/
4742:     nrows       = len_si[proc]/2 - 1;
4743:     buf_si_i    = buf_si + nrows+1;
4744:     buf_si[0]   = nrows;
4745:     buf_si_i[0] = 0;
4746:     nrows       = 0;
4747:     for (i=owners[proc]; i<owners[proc+1]; i++) {
4748:       anzi = ai[i+1] - ai[i];
4749:       if (anzi) {
4750:         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4751:         buf_si[nrows+1]   = i-owners[proc]; /* local row index */
4752:         nrows++;
4753:       }
4754:     }
4755:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4756:     k++;
4757:     buf_si += len_si[proc];
4758:   }

4760:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4761:   if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}

4763:   PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4764:   for (i=0; i<merge->nrecv; i++) {
4765:     PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4766:   }

4768:   PetscFree(len_si);
4769:   PetscFree(len_ri);
4770:   PetscFree(rj_waits);
4771:   PetscFree2(si_waits,sj_waits);
4772:   PetscFree(ri_waits);
4773:   PetscFree(buf_s);
4774:   PetscFree(status);

4776:   /* compute a local seq matrix in each processor */
4777:   /*----------------------------------------------*/
4778:   /* allocate bi array and free space for accumulating nonzero column info */
4779:   PetscMalloc1(m+1,&bi);
4780:   bi[0] = 0;

4782:   /* create and initialize a linked list */
4783:   nlnk = N+1;
4784:   PetscLLCreate(N,N,nlnk,lnk,lnkbt);

4786:   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4787:   len  = ai[owners[rank+1]] - ai[owners[rank]];
4788:   PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);

4790:   current_space = free_space;

4792:   /* determine symbolic info for each local row */
4793:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4795:   for (k=0; k<merge->nrecv; k++) {
4796:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4797:     nrows       = *buf_ri_k[k];
4798:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4799:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4800:   }

4802:   MatPreallocateInitialize(comm,m,n,dnz,onz);
4803:   len  = 0;
4804:   for (i=0; i<m; i++) {
4805:     bnzi = 0;
4806:     /* add local non-zero cols of this proc's seqmat into lnk */
4807:     arow  = owners[rank] + i;
4808:     anzi  = ai[arow+1] - ai[arow];
4809:     aj    = a->j + ai[arow];
4810:     PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4811:     bnzi += nlnk;
4812:     /* add received col data into lnk */
4813:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4814:       if (i == *nextrow[k]) { /* i-th row */
4815:         anzi  = *(nextai[k]+1) - *nextai[k];
4816:         aj    = buf_rj[k] + *nextai[k];
4817:         PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4818:         bnzi += nlnk;
4819:         nextrow[k]++; nextai[k]++;
4820:       }
4821:     }
4822:     if (len < bnzi) len = bnzi;  /* =max(bnzi) */

4824:     /* if free space is not available, make more free space */
4825:     if (current_space->local_remaining<bnzi) {
4826:       PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),&current_space);
4827:       nspacedouble++;
4828:     }
4829:     /* copy data into free space, then initialize lnk */
4830:     PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4831:     MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);

4833:     current_space->array           += bnzi;
4834:     current_space->local_used      += bnzi;
4835:     current_space->local_remaining -= bnzi;

4837:     bi[i+1] = bi[i] + bnzi;
4838:   }

4840:   PetscFree3(buf_ri_k,nextrow,nextai);

4842:   PetscMalloc1(bi[m]+1,&bj);
4843:   PetscFreeSpaceContiguous(&free_space,bj);
4844:   PetscLLDestroy(lnk,lnkbt);

4846:   /* create symbolic parallel matrix B_mpi */
4847:   /*---------------------------------------*/
4848:   MatGetBlockSizes(seqmat,&bs,&cbs);
4849:   MatCreate(comm,&B_mpi);
4850:   if (n==PETSC_DECIDE) {
4851:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4852:   } else {
4853:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4854:   }
4855:   MatSetBlockSizes(B_mpi,bs,cbs);
4856:   MatSetType(B_mpi,MATMPIAIJ);
4857:   MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4858:   MatPreallocateFinalize(dnz,onz);
4859:   MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);

4861:   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4862:   B_mpi->assembled  = PETSC_FALSE;
4863:   merge->bi         = bi;
4864:   merge->bj         = bj;
4865:   merge->buf_ri     = buf_ri;
4866:   merge->buf_rj     = buf_rj;
4867:   merge->coi        = NULL;
4868:   merge->coj        = NULL;
4869:   merge->owners_co  = NULL;

4871:   PetscCommDestroy(&comm);

4873:   /* attach the supporting struct to B_mpi for reuse */
4874:   PetscContainerCreate(PETSC_COMM_SELF,&container);
4875:   PetscContainerSetPointer(container,merge);
4876:   PetscContainerSetUserDestroy(container,MatDestroy_MPIAIJ_SeqsToMPI);
4877:   PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4878:   PetscContainerDestroy(&container);
4879:   *mpimat = B_mpi;

4881:   PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4882:   return(0);
4883: }

4885: /*@C
4886:       MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4887:                  matrices from each processor

4889:     Collective

4891:    Input Parameters:
4892: +    comm - the communicators the parallel matrix will live on
4893: .    seqmat - the input sequential matrices
4894: .    m - number of local rows (or PETSC_DECIDE)
4895: .    n - number of local columns (or PETSC_DECIDE)
4896: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4898:    Output Parameter:
4899: .    mpimat - the parallel matrix generated

4901:     Level: advanced

4903:    Notes:
4904:      The dimensions of the sequential matrix in each processor MUST be the same.
4905:      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4906:      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4907: @*/
4908: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4909: {
4911:   PetscMPIInt    size;

4914:   MPI_Comm_size(comm,&size);
4915:   if (size == 1) {
4916:     PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4917:     if (scall == MAT_INITIAL_MATRIX) {
4918:       MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
4919:     } else {
4920:       MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
4921:     }
4922:     PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4923:     return(0);
4924:   }
4925:   PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4926:   if (scall == MAT_INITIAL_MATRIX) {
4927:     MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
4928:   }
4929:   MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
4930:   PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4931:   return(0);
4932: }

4934: /*@
4935:      MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
4936:           mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
4937:           with MatGetSize()

4939:     Not Collective

4941:    Input Parameters:
4942: +    A - the matrix
4943: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4945:    Output Parameter:
4946: .    A_loc - the local sequential matrix generated

4948:     Level: developer

4950:    Notes:
4951:      When the communicator associated with A has size 1 and MAT_INITIAL_MATRIX is requested, the matrix returned is the diagonal part of A.
4952:      If MAT_REUSE_MATRIX is requested with comm size 1, MatCopy(Adiag,*A_loc,SAME_NONZERO_PATTERN) is called.
4953:      This means that one can preallocate the proper sequential matrix first and then call this routine with MAT_REUSE_MATRIX to safely
4954:      modify the values of the returned A_loc.

4956: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed()

4958: @*/
4959: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4960: {
4962:   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
4963:   Mat_SeqAIJ     *mat,*a,*b;
4964:   PetscInt       *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
4965:   MatScalar      *aa,*ba,*cam;
4966:   PetscScalar    *ca;
4967:   PetscMPIInt    size;
4968:   PetscInt       am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4969:   PetscInt       *ci,*cj,col,ncols_d,ncols_o,jo;
4970:   PetscBool      match;

4973:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
4974:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
4975:   MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
4976:   if (size == 1) {
4977:     if (scall == MAT_INITIAL_MATRIX) {
4978:       PetscObjectReference((PetscObject)mpimat->A);
4979:       *A_loc = mpimat->A;
4980:     } else if (scall == MAT_REUSE_MATRIX) {
4981:       MatCopy(mpimat->A,*A_loc,SAME_NONZERO_PATTERN);
4982:     }
4983:     return(0);
4984:   }

4986:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
4987:   a = (Mat_SeqAIJ*)(mpimat->A)->data;
4988:   b = (Mat_SeqAIJ*)(mpimat->B)->data;
4989:   ai = a->i; aj = a->j; bi = b->i; bj = b->j;
4990:   aa = a->a; ba = b->a;
4991:   if (scall == MAT_INITIAL_MATRIX) {
4992:     PetscMalloc1(1+am,&ci);
4993:     ci[0] = 0;
4994:     for (i=0; i<am; i++) {
4995:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
4996:     }
4997:     PetscMalloc1(1+ci[am],&cj);
4998:     PetscMalloc1(1+ci[am],&ca);
4999:     k    = 0;
5000:     for (i=0; i<am; i++) {
5001:       ncols_o = bi[i+1] - bi[i];
5002:       ncols_d = ai[i+1] - ai[i];
5003:       /* off-diagonal portion of A */
5004:       for (jo=0; jo<ncols_o; jo++) {
5005:         col = cmap[*bj];
5006:         if (col >= cstart) break;
5007:         cj[k]   = col; bj++;
5008:         ca[k++] = *ba++;
5009:       }
5010:       /* diagonal portion of A */
5011:       for (j=0; j<ncols_d; j++) {
5012:         cj[k]   = cstart + *aj++;
5013:         ca[k++] = *aa++;
5014:       }
5015:       /* off-diagonal portion of A */
5016:       for (j=jo; j<ncols_o; j++) {
5017:         cj[k]   = cmap[*bj++];
5018:         ca[k++] = *ba++;
5019:       }
5020:     }
5021:     /* put together the new matrix */
5022:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
5023:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5024:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5025:     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
5026:     mat->free_a  = PETSC_TRUE;
5027:     mat->free_ij = PETSC_TRUE;
5028:     mat->nonew   = 0;
5029:   } else if (scall == MAT_REUSE_MATRIX) {
5030:     mat=(Mat_SeqAIJ*)(*A_loc)->data;
5031:     ci = mat->i; cj = mat->j; cam = mat->a;
5032:     for (i=0; i<am; i++) {
5033:       /* off-diagonal portion of A */
5034:       ncols_o = bi[i+1] - bi[i];
5035:       for (jo=0; jo<ncols_o; jo++) {
5036:         col = cmap[*bj];
5037:         if (col >= cstart) break;
5038:         *cam++ = *ba++; bj++;
5039:       }
5040:       /* diagonal portion of A */
5041:       ncols_d = ai[i+1] - ai[i];
5042:       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
5043:       /* off-diagonal portion of A */
5044:       for (j=jo; j<ncols_o; j++) {
5045:         *cam++ = *ba++; bj++;
5046:       }
5047:     }
5048:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5049:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5050:   return(0);
5051: }

5053: /*@C
5054:      MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns

5056:     Not Collective

5058:    Input Parameters:
5059: +    A - the matrix
5060: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5061: -    row, col - index sets of rows and columns to extract (or NULL)

5063:    Output Parameter:
5064: .    A_loc - the local sequential matrix generated

5066:     Level: developer

5068: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()

5070: @*/
5071: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5072: {
5073:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5075:   PetscInt       i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5076:   IS             isrowa,iscola;
5077:   Mat            *aloc;
5078:   PetscBool      match;

5081:   PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5082:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5083:   PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5084:   if (!row) {
5085:     start = A->rmap->rstart; end = A->rmap->rend;
5086:     ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5087:   } else {
5088:     isrowa = *row;
5089:   }
5090:   if (!col) {
5091:     start = A->cmap->rstart;
5092:     cmap  = a->garray;
5093:     nzA   = a->A->cmap->n;
5094:     nzB   = a->B->cmap->n;
5095:     PetscMalloc1(nzA+nzB, &idx);
5096:     ncols = 0;
5097:     for (i=0; i<nzB; i++) {
5098:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5099:       else break;
5100:     }
5101:     imark = i;
5102:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5103:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5104:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5105:   } else {
5106:     iscola = *col;
5107:   }
5108:   if (scall != MAT_INITIAL_MATRIX) {
5109:     PetscMalloc1(1,&aloc);
5110:     aloc[0] = *A_loc;
5111:   }
5112:   MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5113:   if (!col) { /* attach global id of condensed columns */
5114:     PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5115:   }
5116:   *A_loc = aloc[0];
5117:   PetscFree(aloc);
5118:   if (!row) {
5119:     ISDestroy(&isrowa);
5120:   }
5121:   if (!col) {
5122:     ISDestroy(&iscola);
5123:   }
5124:   PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5125:   return(0);
5126: }

5128: /*
5129:  * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5130:  * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5131:  * on a global size.
5132:  * */
5133: PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P,IS rows,Mat *P_oth)
5134: {
5135:   Mat_MPIAIJ               *p=(Mat_MPIAIJ*)P->data;
5136:   Mat_SeqAIJ               *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data,*p_oth;
5137:   PetscInt                 plocalsize,nrows,*ilocal,*oilocal,i,lidx,*nrcols,*nlcols,ncol;
5138:   PetscMPIInt              owner;
5139:   PetscSFNode              *iremote,*oiremote;
5140:   const PetscInt           *lrowindices;
5141:   PetscErrorCode           ierr;
5142:   PetscSF                  sf,osf;
5143:   PetscInt                 pcstart,*roffsets,*loffsets,*pnnz,j;
5144:   PetscInt                 ontotalcols,dntotalcols,ntotalcols,nout;
5145:   MPI_Comm                 comm;
5146:   ISLocalToGlobalMapping   mapping;

5149:   PetscObjectGetComm((PetscObject)P,&comm);
5150:   /* plocalsize is the number of roots
5151:    * nrows is the number of leaves
5152:    * */
5153:   MatGetLocalSize(P,&plocalsize,NULL);
5154:   ISGetLocalSize(rows,&nrows);
5155:   PetscCalloc1(nrows,&iremote);
5156:   ISGetIndices(rows,&lrowindices);
5157:   for (i=0;i<nrows;i++) {
5158:     /* Find a remote index and an owner for a row
5159:      * The row could be local or remote
5160:      * */
5161:     owner = 0;
5162:     lidx  = 0;
5163:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,&lidx);
5164:     iremote[i].index = lidx;
5165:     iremote[i].rank  = owner;
5166:   }
5167:   /* Create SF to communicate how many nonzero columns for each row */
5168:   PetscSFCreate(comm,&sf);
5169:   /* SF will figure out the number of nonzero colunms for each row, and their
5170:    * offsets
5171:    * */
5172:   PetscSFSetGraph(sf,plocalsize,nrows,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5173:   PetscSFSetFromOptions(sf);
5174:   PetscSFSetUp(sf);

5176:   PetscCalloc1(2*(plocalsize+1),&roffsets);
5177:   PetscCalloc1(2*plocalsize,&nrcols);
5178:   PetscCalloc1(nrows,&pnnz);
5179:   roffsets[0] = 0;
5180:   roffsets[1] = 0;
5181:   for (i=0;i<plocalsize;i++) {
5182:     /* diag */
5183:     nrcols[i*2+0] = pd->i[i+1] - pd->i[i];
5184:     /* off diag */
5185:     nrcols[i*2+1] = po->i[i+1] - po->i[i];
5186:     /* compute offsets so that we relative location for each row */
5187:     roffsets[(i+1)*2+0] = roffsets[i*2+0] + nrcols[i*2+0];
5188:     roffsets[(i+1)*2+1] = roffsets[i*2+1] + nrcols[i*2+1];
5189:   }
5190:   PetscCalloc1(2*nrows,&nlcols);
5191:   PetscCalloc1(2*nrows,&loffsets);
5192:   /* 'r' means root, and 'l' means leaf */
5193:   PetscSFBcastBegin(sf,MPIU_2INT,nrcols,nlcols);
5194:   PetscSFBcastBegin(sf,MPIU_2INT,roffsets,loffsets);
5195:   PetscSFBcastEnd(sf,MPIU_2INT,nrcols,nlcols);
5196:   PetscSFBcastEnd(sf,MPIU_2INT,roffsets,loffsets);
5197:   PetscSFDestroy(&sf);
5198:   PetscFree(roffsets);
5199:   PetscFree(nrcols);
5200:   dntotalcols = 0;
5201:   ontotalcols = 0;
5202:   ncol = 0;
5203:   for (i=0;i<nrows;i++) {
5204:     pnnz[i] = nlcols[i*2+0] + nlcols[i*2+1];
5205:     ncol = PetscMax(pnnz[i],ncol);
5206:     /* diag */
5207:     dntotalcols += nlcols[i*2+0];
5208:     /* off diag */
5209:     ontotalcols += nlcols[i*2+1];
5210:   }
5211:   /* We do not need to figure the right number of columns
5212:    * since all the calculations will be done by going through the raw data
5213:    * */
5214:   MatCreateSeqAIJ(PETSC_COMM_SELF,nrows,ncol,0,pnnz,P_oth);
5215:   MatSetUp(*P_oth);
5216:   PetscFree(pnnz);
5217:   p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5218:   /* diag */
5219:   PetscCalloc1(dntotalcols,&iremote);
5220:   /* off diag */
5221:   PetscCalloc1(ontotalcols,&oiremote);
5222:   /* diag */
5223:   PetscCalloc1(dntotalcols,&ilocal);
5224:   /* off diag */
5225:   PetscCalloc1(ontotalcols,&oilocal);
5226:   dntotalcols = 0;
5227:   ontotalcols = 0;
5228:   ntotalcols  = 0;
5229:   for (i=0;i<nrows;i++) {
5230:     owner = 0;
5231:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,NULL);
5232:     /* Set iremote for diag matrix */
5233:     for (j=0;j<nlcols[i*2+0];j++) {
5234:       iremote[dntotalcols].index   = loffsets[i*2+0] + j;
5235:       iremote[dntotalcols].rank    = owner;
5236:       /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5237:       ilocal[dntotalcols++]        = ntotalcols++;
5238:     }
5239:     /* off diag */
5240:     for (j=0;j<nlcols[i*2+1];j++) {
5241:       oiremote[ontotalcols].index   = loffsets[i*2+1] + j;
5242:       oiremote[ontotalcols].rank    = owner;
5243:       oilocal[ontotalcols++]        = ntotalcols++;
5244:     }
5245:   }
5246:   ISRestoreIndices(rows,&lrowindices);
5247:   PetscFree(loffsets);
5248:   PetscFree(nlcols);
5249:   PetscSFCreate(comm,&sf);
5250:   /* P serves as roots and P_oth is leaves
5251:    * Diag matrix
5252:    * */
5253:   PetscSFSetGraph(sf,pd->i[plocalsize],dntotalcols,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5254:   PetscSFSetFromOptions(sf);
5255:   PetscSFSetUp(sf);

5257:   PetscSFCreate(comm,&osf);
5258:   /* Off diag */
5259:   PetscSFSetGraph(osf,po->i[plocalsize],ontotalcols,oilocal,PETSC_OWN_POINTER,oiremote,PETSC_OWN_POINTER);
5260:   PetscSFSetFromOptions(osf);
5261:   PetscSFSetUp(osf);
5262:   /* We operate on the matrix internal data for saving memory */
5263:   PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a);
5264:   PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a);
5265:   MatGetOwnershipRangeColumn(P,&pcstart,NULL);
5266:   /* Convert to global indices for diag matrix */
5267:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] += pcstart;
5268:   PetscSFBcastBegin(sf,MPIU_INT,pd->j,p_oth->j);
5269:   /* We want P_oth store global indices */
5270:   ISLocalToGlobalMappingCreate(comm,1,p->B->cmap->n,p->garray,PETSC_COPY_VALUES,&mapping);
5271:   /* Use memory scalable approach */
5272:   ISLocalToGlobalMappingSetType(mapping,ISLOCALTOGLOBALMAPPINGHASH);
5273:   ISLocalToGlobalMappingApply(mapping,po->i[plocalsize],po->j,po->j);
5274:   PetscSFBcastBegin(osf,MPIU_INT,po->j,p_oth->j);
5275:   PetscSFBcastEnd(sf,MPIU_INT,pd->j,p_oth->j);
5276:   /* Convert back to local indices */
5277:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] -= pcstart;
5278:   PetscSFBcastEnd(osf,MPIU_INT,po->j,p_oth->j);
5279:   nout = 0;
5280:   ISGlobalToLocalMappingApply(mapping,IS_GTOLM_DROP,po->i[plocalsize],po->j,&nout,po->j);
5281:   if (nout != po->i[plocalsize]) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP,"n %D does not equal to nout %D \n",po->i[plocalsize],nout);
5282:   ISLocalToGlobalMappingDestroy(&mapping);
5283:   /* Exchange values */
5284:   PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a);
5285:   PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a);
5286:   /* Stop PETSc from shrinking memory */
5287:   for (i=0;i<nrows;i++) p_oth->ilen[i] = p_oth->imax[i];
5288:   MatAssemblyBegin(*P_oth,MAT_FINAL_ASSEMBLY);
5289:   MatAssemblyEnd(*P_oth,MAT_FINAL_ASSEMBLY);
5290:   /* Attach PetscSF objects to P_oth so that we can reuse it later */
5291:   PetscObjectCompose((PetscObject)*P_oth,"diagsf",(PetscObject)sf);
5292:   PetscObjectCompose((PetscObject)*P_oth,"offdiagsf",(PetscObject)osf);
5293:   PetscSFDestroy(&sf);
5294:   PetscSFDestroy(&osf);
5295:   return(0);
5296: }

5298: /*
5299:  * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5300:  * This supports MPIAIJ and MAIJ
5301:  * */
5302: PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A,Mat P,PetscInt dof,MatReuse reuse,Mat *P_oth)
5303: {
5304:   Mat_MPIAIJ            *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
5305:   Mat_SeqAIJ            *p_oth;
5306:   Mat_SeqAIJ            *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
5307:   IS                    rows,map;
5308:   PetscHMapI            hamp;
5309:   PetscInt              i,htsize,*rowindices,off,*mapping,key,count;
5310:   MPI_Comm              comm;
5311:   PetscSF               sf,osf;
5312:   PetscBool             has;
5313:   PetscErrorCode        ierr;

5316:   PetscObjectGetComm((PetscObject)A,&comm);
5317:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,P,0,0);
5318:   /* If it is the first time, create an index set of off-diag nonzero columns of A,
5319:    *  and then create a submatrix (that often is an overlapping matrix)
5320:    * */
5321:   if (reuse == MAT_INITIAL_MATRIX) {
5322:     /* Use a hash table to figure out unique keys */
5323:     PetscHMapICreate(&hamp);
5324:     PetscHMapIResize(hamp,a->B->cmap->n);
5325:     PetscCalloc1(a->B->cmap->n,&mapping);
5326:     count = 0;
5327:     /* Assume that  a->g is sorted, otherwise the following does not make sense */
5328:     for (i=0;i<a->B->cmap->n;i++) {
5329:       key  = a->garray[i]/dof;
5330:       PetscHMapIHas(hamp,key,&has);
5331:       if (!has) {
5332:         mapping[i] = count;
5333:         PetscHMapISet(hamp,key,count++);
5334:       } else {
5335:         /* Current 'i' has the same value the previous step */
5336:         mapping[i] = count-1;
5337:       }
5338:     }
5339:     ISCreateGeneral(comm,a->B->cmap->n,mapping,PETSC_OWN_POINTER,&map);
5340:     PetscHMapIGetSize(hamp,&htsize);
5341:     if (htsize!=count) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP," Size of hash map %D is inconsistent with count %D \n",htsize,count);
5342:     PetscCalloc1(htsize,&rowindices);
5343:     off = 0;
5344:     PetscHMapIGetKeys(hamp,&off,rowindices);
5345:     PetscHMapIDestroy(&hamp);
5346:     PetscSortInt(htsize,rowindices);
5347:     ISCreateGeneral(comm,htsize,rowindices,PETSC_OWN_POINTER,&rows);
5348:     /* In case, the matrix was already created but users want to recreate the matrix */
5349:     MatDestroy(P_oth);
5350:     MatCreateSeqSubMatrixWithRows_Private(P,rows,P_oth);
5351:     PetscObjectCompose((PetscObject)*P_oth,"aoffdiagtopothmapping",(PetscObject)map);
5352:     ISDestroy(&map);
5353:     ISDestroy(&rows);
5354:   } else if (reuse == MAT_REUSE_MATRIX) {
5355:     /* If matrix was already created, we simply update values using SF objects
5356:      * that as attached to the matrix ealier.
5357:      *  */
5358:     PetscObjectQuery((PetscObject)*P_oth,"diagsf",(PetscObject*)&sf);
5359:     PetscObjectQuery((PetscObject)*P_oth,"offdiagsf",(PetscObject*)&osf);
5360:     if (!sf || !osf) SETERRQ(comm,PETSC_ERR_ARG_NULL,"Matrix is not initialized yet");
5361:     p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5362:     /* Update values in place */
5363:     PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a);
5364:     PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a);
5365:     PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a);
5366:     PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a);
5367:   } else SETERRQ(comm,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown reuse type");
5368:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,P,0,0);
5369:   return(0);
5370: }

5372: /*@C
5373:     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A

5375:     Collective on Mat

5377:    Input Parameters:
5378: +    A,B - the matrices in mpiaij format
5379: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5380: -    rowb, colb - index sets of rows and columns of B to extract (or NULL)

5382:    Output Parameter:
5383: +    rowb, colb - index sets of rows and columns of B to extract
5384: -    B_seq - the sequential matrix generated

5386:     Level: developer

5388: @*/
5389: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5390: {
5391:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5393:   PetscInt       *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5394:   IS             isrowb,iscolb;
5395:   Mat            *bseq=NULL;

5398:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5399:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5400:   }
5401:   PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);

5403:   if (scall == MAT_INITIAL_MATRIX) {
5404:     start = A->cmap->rstart;
5405:     cmap  = a->garray;
5406:     nzA   = a->A->cmap->n;
5407:     nzB   = a->B->cmap->n;
5408:     PetscMalloc1(nzA+nzB, &idx);
5409:     ncols = 0;
5410:     for (i=0; i<nzB; i++) {  /* row < local row index */
5411:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5412:       else break;
5413:     }
5414:     imark = i;
5415:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
5416:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5417:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5418:     ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5419:   } else {
5420:     if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5421:     isrowb  = *rowb; iscolb = *colb;
5422:     PetscMalloc1(1,&bseq);
5423:     bseq[0] = *B_seq;
5424:   }
5425:   MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5426:   *B_seq = bseq[0];
5427:   PetscFree(bseq);
5428:   if (!rowb) {
5429:     ISDestroy(&isrowb);
5430:   } else {
5431:     *rowb = isrowb;
5432:   }
5433:   if (!colb) {
5434:     ISDestroy(&iscolb);
5435:   } else {
5436:     *colb = iscolb;
5437:   }
5438:   PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5439:   return(0);
5440: }

5442: /*
5443:     MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5444:     of the OFF-DIAGONAL portion of local A

5446:     Collective on Mat

5448:    Input Parameters:
5449: +    A,B - the matrices in mpiaij format
5450: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5452:    Output Parameter:
5453: +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5454: .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5455: .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5456: -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N

5458:     Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5459:      for this matrix. This is not desirable..

5461:     Level: developer

5463: */
5464: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5465: {
5466:   PetscErrorCode         ierr;
5467:   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
5468:   Mat_SeqAIJ             *b_oth;
5469:   VecScatter             ctx;
5470:   MPI_Comm               comm;
5471:   const PetscMPIInt      *rprocs,*sprocs;
5472:   const PetscInt         *srow,*rstarts,*sstarts;
5473:   PetscInt               *rowlen,*bufj,*bufJ,ncols = 0,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5474:   PetscInt               i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = 0,*sstartsj,len;
5475:   PetscScalar            *b_otha,*bufa,*bufA,*vals = NULL;
5476:   MPI_Request            *rwaits = NULL,*swaits = NULL;
5477:   MPI_Status             rstatus;
5478:   PetscMPIInt            jj,size,tag,rank,nsends_mpi,nrecvs_mpi;

5481:   PetscObjectGetComm((PetscObject)A,&comm);
5482:   MPI_Comm_size(comm,&size);

5484:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5485:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5486:   }
5487:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5488:   MPI_Comm_rank(comm,&rank);

5490:   if (size == 1) {
5491:     startsj_s = NULL;
5492:     bufa_ptr  = NULL;
5493:     *B_oth    = NULL;
5494:     return(0);
5495:   }

5497:   ctx = a->Mvctx;
5498:   tag = ((PetscObject)ctx)->tag;

5500:   if (ctx->inuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");
5501:   VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5502:   /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5503:   VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5504:   PetscMPIIntCast(nsends,&nsends_mpi);
5505:   PetscMPIIntCast(nrecvs,&nrecvs_mpi);
5506:   PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);

5508:   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5509:   if (scall == MAT_INITIAL_MATRIX) {
5510:     /* i-array */
5511:     /*---------*/
5512:     /*  post receives */
5513:     if (nrecvs) {PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);} /* rstarts can be NULL when nrecvs=0 */
5514:     for (i=0; i<nrecvs; i++) {
5515:       rowlen = rvalues + rstarts[i]*rbs;
5516:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5517:       MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5518:     }

5520:     /* pack the outgoing message */
5521:     PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);

5523:     sstartsj[0] = 0;
5524:     rstartsj[0] = 0;
5525:     len         = 0; /* total length of j or a array to be sent */
5526:     if (nsends) {
5527:       k    = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5528:       PetscMalloc1(sbs*(sstarts[nsends]-sstarts[0]),&svalues);
5529:     }
5530:     for (i=0; i<nsends; i++) {
5531:       rowlen = svalues + (sstarts[i]-sstarts[0])*sbs;
5532:       nrows  = sstarts[i+1]-sstarts[i]; /* num of block rows */
5533:       for (j=0; j<nrows; j++) {
5534:         row = srow[k] + B->rmap->range[rank]; /* global row idx */
5535:         for (l=0; l<sbs; l++) {
5536:           MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */

5538:           rowlen[j*sbs+l] = ncols;

5540:           len += ncols;
5541:           MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5542:         }
5543:         k++;
5544:       }
5545:       MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);

5547:       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5548:     }
5549:     /* recvs and sends of i-array are completed */
5550:     i = nrecvs;
5551:     while (i--) {
5552:       MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5553:     }
5554:     if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5555:     PetscFree(svalues);

5557:     /* allocate buffers for sending j and a arrays */
5558:     PetscMalloc1(len+1,&bufj);
5559:     PetscMalloc1(len+1,&bufa);

5561:     /* create i-array of B_oth */
5562:     PetscMalloc1(aBn+2,&b_othi);

5564:     b_othi[0] = 0;
5565:     len       = 0; /* total length of j or a array to be received */
5566:     k         = 0;
5567:     for (i=0; i<nrecvs; i++) {
5568:       rowlen = rvalues + (rstarts[i]-rstarts[0])*rbs;
5569:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of rows to be received */
5570:       for (j=0; j<nrows; j++) {
5571:         b_othi[k+1] = b_othi[k] + rowlen[j];
5572:         PetscIntSumError(rowlen[j],len,&len);
5573:         k++;
5574:       }
5575:       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5576:     }
5577:     PetscFree(rvalues);

5579:     /* allocate space for j and a arrrays of B_oth */
5580:     PetscMalloc1(b_othi[aBn]+1,&b_othj);
5581:     PetscMalloc1(b_othi[aBn]+1,&b_otha);

5583:     /* j-array */
5584:     /*---------*/
5585:     /*  post receives of j-array */
5586:     for (i=0; i<nrecvs; i++) {
5587:       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5588:       MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5589:     }

5591:     /* pack the outgoing message j-array */
5592:     if (nsends) k = sstarts[0];
5593:     for (i=0; i<nsends; i++) {
5594:       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5595:       bufJ  = bufj+sstartsj[i];
5596:       for (j=0; j<nrows; j++) {
5597:         row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5598:         for (ll=0; ll<sbs; ll++) {
5599:           MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5600:           for (l=0; l<ncols; l++) {
5601:             *bufJ++ = cols[l];
5602:           }
5603:           MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5604:         }
5605:       }
5606:       MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5607:     }

5609:     /* recvs and sends of j-array are completed */
5610:     i = nrecvs;
5611:     while (i--) {
5612:       MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5613:     }
5614:     if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5615:   } else if (scall == MAT_REUSE_MATRIX) {
5616:     sstartsj = *startsj_s;
5617:     rstartsj = *startsj_r;
5618:     bufa     = *bufa_ptr;
5619:     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
5620:     b_otha   = b_oth->a;
5621:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");

5623:   /* a-array */
5624:   /*---------*/
5625:   /*  post receives of a-array */
5626:   for (i=0; i<nrecvs; i++) {
5627:     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5628:     MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5629:   }

5631:   /* pack the outgoing message a-array */
5632:   if (nsends) k = sstarts[0];
5633:   for (i=0; i<nsends; i++) {
5634:     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5635:     bufA  = bufa+sstartsj[i];
5636:     for (j=0; j<nrows; j++) {
5637:       row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5638:       for (ll=0; ll<sbs; ll++) {
5639:         MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5640:         for (l=0; l<ncols; l++) {
5641:           *bufA++ = vals[l];
5642:         }
5643:         MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5644:       }
5645:     }
5646:     MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5647:   }
5648:   /* recvs and sends of a-array are completed */
5649:   i = nrecvs;
5650:   while (i--) {
5651:     MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5652:   }
5653:   if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5654:   PetscFree2(rwaits,swaits);

5656:   if (scall == MAT_INITIAL_MATRIX) {
5657:     /* put together the new matrix */
5658:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);

5660:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5661:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5662:     b_oth          = (Mat_SeqAIJ*)(*B_oth)->data;
5663:     b_oth->free_a  = PETSC_TRUE;
5664:     b_oth->free_ij = PETSC_TRUE;
5665:     b_oth->nonew   = 0;

5667:     PetscFree(bufj);
5668:     if (!startsj_s || !bufa_ptr) {
5669:       PetscFree2(sstartsj,rstartsj);
5670:       PetscFree(bufa_ptr);
5671:     } else {
5672:       *startsj_s = sstartsj;
5673:       *startsj_r = rstartsj;
5674:       *bufa_ptr  = bufa;
5675:     }
5676:   }

5678:   VecScatterRestoreRemote_Private(ctx,PETSC_TRUE,&nsends,&sstarts,&srow,&sprocs,&sbs);
5679:   VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE,&nrecvs,&rstarts,NULL,&rprocs,&rbs);
5680:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5681:   return(0);
5682: }

5684: /*@C
5685:   MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.

5687:   Not Collective

5689:   Input Parameters:
5690: . A - The matrix in mpiaij format

5692:   Output Parameter:
5693: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5694: . colmap - A map from global column index to local index into lvec
5695: - multScatter - A scatter from the argument of a matrix-vector product to lvec

5697:   Level: developer

5699: @*/
5700: #if defined(PETSC_USE_CTABLE)
5701: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5702: #else
5703: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5704: #endif
5705: {
5706:   Mat_MPIAIJ *a;

5713:   a = (Mat_MPIAIJ*) A->data;
5714:   if (lvec) *lvec = a->lvec;
5715:   if (colmap) *colmap = a->colmap;
5716:   if (multScatter) *multScatter = a->Mvctx;
5717:   return(0);
5718: }

5720: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5721: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5722: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat,MatType,MatReuse,Mat*);
5723: #if defined(PETSC_HAVE_MKL_SPARSE)
5724: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5725: #endif
5726: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIBAIJ(Mat,MatType,MatReuse,Mat*);
5727: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5728: #if defined(PETSC_HAVE_ELEMENTAL)
5729: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5730: #endif
5731: #if defined(PETSC_HAVE_SCALAPACK)
5732: PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat,MatType,MatReuse,Mat*);
5733: #endif
5734: #if defined(PETSC_HAVE_HYPRE)
5735: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5736: #endif
5737: #if defined(PETSC_HAVE_CUDA)
5738: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
5739: #endif
5740: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat,MatType,MatReuse,Mat*);
5741: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat,MatType,MatReuse,Mat*);
5742: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_IS_XAIJ(Mat);

5744: /*
5745:     Computes (B'*A')' since computing B*A directly is untenable

5747:                n                       p                          p
5748:         (              )       (              )         (                  )
5749:       m (      A       )  *  n (       B      )   =   m (         C        )
5750:         (              )       (              )         (                  )

5752: */
5753: static PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5754: {
5756:   Mat            At,Bt,Ct;

5759:   MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5760:   MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5761:   MatMatMult(Bt,At,MAT_INITIAL_MATRIX,PETSC_DEFAULT,&Ct);
5762:   MatDestroy(&At);
5763:   MatDestroy(&Bt);
5764:   MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5765:   MatDestroy(&Ct);
5766:   return(0);
5767: }

5769: static PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat C)
5770: {
5772:   PetscBool      cisdense;

5775:   if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5776:   MatSetSizes(C,A->rmap->n,B->cmap->n,A->rmap->N,B->cmap->N);
5777:   MatSetBlockSizesFromMats(C,A,B);
5778:   PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATMPIDENSE,MATMPIDENSECUDA,"");
5779:   if (!cisdense) {
5780:     MatSetType(C,((PetscObject)A)->type_name);
5781:   }
5782:   MatSetUp(C);

5784:   C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
5785:   return(0);
5786: }

5788: /* ----------------------------------------------------------------*/
5789: static PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ_AB(Mat C)
5790: {
5791:   Mat_Product *product = C->product;
5792:   Mat         A = product->A,B=product->B;

5795:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
5796:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);

5798:   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIAIJ;
5799:   C->ops->productsymbolic = MatProductSymbolic_AB;
5800:   return(0);
5801: }

5803: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ(Mat C)
5804: {
5806:   Mat_Product    *product = C->product;

5809:   if (product->type == MATPRODUCT_AB) {
5810:     MatProductSetFromOptions_MPIDense_MPIAIJ_AB(C);
5811:   }
5812:   return(0);
5813: }
5814: /* ----------------------------------------------------------------*/

5816: /*MC
5817:    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.

5819:    Options Database Keys:
5820: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()

5822:    Level: beginner

5824:    Notes:
5825:     MatSetValues() may be called for this matrix type with a NULL argument for the numerical values,
5826:     in this case the values associated with the rows and columns one passes in are set to zero
5827:     in the matrix

5829:     MatSetOptions(,MAT_STRUCTURE_ONLY,PETSC_TRUE) may be called for this matrix type. In this no
5830:     space is allocated for the nonzero entries and any entries passed with MatSetValues() are ignored

5832: .seealso: MatCreateAIJ()
5833: M*/

5835: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
5836: {
5837:   Mat_MPIAIJ     *b;
5839:   PetscMPIInt    size;

5842:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);

5844:   PetscNewLog(B,&b);
5845:   B->data       = (void*)b;
5846:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
5847:   B->assembled  = PETSC_FALSE;
5848:   B->insertmode = NOT_SET_VALUES;
5849:   b->size       = size;

5851:   MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);

5853:   /* build cache for off array entries formed */
5854:   MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);

5856:   b->donotstash  = PETSC_FALSE;
5857:   b->colmap      = 0;
5858:   b->garray      = 0;
5859:   b->roworiented = PETSC_TRUE;

5861:   /* stuff used for matrix vector multiply */
5862:   b->lvec  = NULL;
5863:   b->Mvctx = NULL;

5865:   /* stuff for MatGetRow() */
5866:   b->rowindices   = 0;
5867:   b->rowvalues    = 0;
5868:   b->getrowactive = PETSC_FALSE;

5870:   /* flexible pointer used in CUSP/CUSPARSE classes */
5871:   b->spptr = NULL;

5873:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
5874:   PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
5875:   PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
5876:   PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
5877:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
5878:   PetscObjectComposeFunction((PetscObject)B,"MatResetPreallocation_C",MatResetPreallocation_MPIAIJ);
5879:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
5880:   PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
5881:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
5882:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijsell_C",MatConvert_MPIAIJ_MPIAIJSELL);
5883: #if defined(PETSC_HAVE_MKL_SPARSE)
5884:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
5885: #endif
5886:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
5887:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpibaij_C",MatConvert_MPIAIJ_MPIBAIJ);
5888:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
5889: #if defined(PETSC_HAVE_ELEMENTAL)
5890:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
5891: #endif
5892: #if defined(PETSC_HAVE_SCALAPACK)
5893:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_scalapack_C",MatConvert_AIJ_ScaLAPACK);
5894: #endif
5895:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_XAIJ_IS);
5896:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisell_C",MatConvert_MPIAIJ_MPISELL);
5897: #if defined(PETSC_HAVE_HYPRE)
5898:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
5899:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",MatProductSetFromOptions_Transpose_AIJ_AIJ);
5900: #endif
5901:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_is_mpiaij_C",MatProductSetFromOptions_IS_XAIJ);
5902:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaij_mpiaij_C",MatProductSetFromOptions_MPIAIJ);
5903:   PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
5904:   return(0);
5905: }

5907: /*@C
5908:      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5909:          and "off-diagonal" part of the matrix in CSR format.

5911:    Collective

5913:    Input Parameters:
5914: +  comm - MPI communicator
5915: .  m - number of local rows (Cannot be PETSC_DECIDE)
5916: .  n - This value should be the same as the local size used in creating the
5917:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5918:        calculated if N is given) For square matrices n is almost always m.
5919: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5920: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5921: .   i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
5922: .   j - column indices
5923: .   a - matrix values
5924: .   oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
5925: .   oj - column indices
5926: -   oa - matrix values

5928:    Output Parameter:
5929: .   mat - the matrix

5931:    Level: advanced

5933:    Notes:
5934:        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
5935:        must free the arrays once the matrix has been destroyed and not before.

5937:        The i and j indices are 0 based

5939:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix

5941:        This sets local rows and cannot be used to set off-processor values.

5943:        Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
5944:        legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
5945:        not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
5946:        the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
5947:        keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
5948:        communication if it is known that only local entries will be set.

5950: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5951:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
5952: @*/
5953: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5954: {
5956:   Mat_MPIAIJ     *maij;

5959:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5960:   if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5961:   if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5962:   MatCreate(comm,mat);
5963:   MatSetSizes(*mat,m,n,M,N);
5964:   MatSetType(*mat,MATMPIAIJ);
5965:   maij = (Mat_MPIAIJ*) (*mat)->data;

5967:   (*mat)->preallocated = PETSC_TRUE;

5969:   PetscLayoutSetUp((*mat)->rmap);
5970:   PetscLayoutSetUp((*mat)->cmap);

5972:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
5973:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);

5975:   MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);
5976:   MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);
5977:   MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);
5978:   MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);

5980:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
5981:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
5982:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
5983:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
5984:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
5985:   return(0);
5986: }

5988: /*
5989:     Special version for direct calls from Fortran
5990: */
5991:  #include <petsc/private/fortranimpl.h>

5993: /* Change these macros so can be used in void function */
5994: #undef CHKERRQ
5995: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
5996: #undef SETERRQ2
5997: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
5998: #undef SETERRQ3
5999: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
6000: #undef SETERRQ
6001: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)

6003: #if defined(PETSC_HAVE_FORTRAN_CAPS)
6004: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
6005: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
6006: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
6007: #else
6008: #endif
6009: PETSC_EXTERN void matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
6010: {
6011:   Mat            mat  = *mmat;
6012:   PetscInt       m    = *mm, n = *mn;
6013:   InsertMode     addv = *maddv;
6014:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
6015:   PetscScalar    value;

6018:   MatCheckPreallocated(mat,1);
6019:   if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
6020:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
6021:   {
6022:     PetscInt  i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
6023:     PetscInt  cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
6024:     PetscBool roworiented = aij->roworiented;

6026:     /* Some Variables required in the macro */
6027:     Mat        A                    = aij->A;
6028:     Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
6029:     PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
6030:     MatScalar  *aa                  = a->a;
6031:     PetscBool  ignorezeroentries    = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
6032:     Mat        B                    = aij->B;
6033:     Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
6034:     PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
6035:     MatScalar  *ba                  = b->a;
6036:     /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
6037:      * cannot use "#if defined" inside a macro. */
6038:     PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

6040:     PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
6041:     PetscInt  nonew = a->nonew;
6042:     MatScalar *ap1,*ap2;

6045:     for (i=0; i<m; i++) {
6046:       if (im[i] < 0) continue;
6047:       if (PetscUnlikelyDebug(im[i] >= mat->rmap->N)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
6048:       if (im[i] >= rstart && im[i] < rend) {
6049:         row      = im[i] - rstart;
6050:         lastcol1 = -1;
6051:         rp1      = aj + ai[row];
6052:         ap1      = aa + ai[row];
6053:         rmax1    = aimax[row];
6054:         nrow1    = ailen[row];
6055:         low1     = 0;
6056:         high1    = nrow1;
6057:         lastcol2 = -1;
6058:         rp2      = bj + bi[row];
6059:         ap2      = ba + bi[row];
6060:         rmax2    = bimax[row];
6061:         nrow2    = bilen[row];
6062:         low2     = 0;
6063:         high2    = nrow2;

6065:         for (j=0; j<n; j++) {
6066:           if (roworiented) value = v[i*n+j];
6067:           else value = v[i+j*m];
6068:           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
6069:           if (in[j] >= cstart && in[j] < cend) {
6070:             col = in[j] - cstart;
6071:             MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
6072: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
6073:             if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
6074: #endif
6075:           } else if (in[j] < 0) continue;
6076:           else if (PetscUnlikelyDebug(in[j] >= mat->cmap->N)) {
6077:             /* extra brace on SETERRQ2() is required for --with-errorchecking=0 - due to the next 'else' clause */
6078:             SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
6079:           } else {
6080:             if (mat->was_assembled) {
6081:               if (!aij->colmap) {
6082:                 MatCreateColmap_MPIAIJ_Private(mat);
6083:               }
6084: #if defined(PETSC_USE_CTABLE)
6085:               PetscTableFind(aij->colmap,in[j]+1,&col);
6086:               col--;
6087: #else
6088:               col = aij->colmap[in[j]] - 1;
6089: #endif
6090:               if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
6091:                 MatDisAssemble_MPIAIJ(mat);
6092:                 col  =  in[j];
6093:                 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
6094:                 B        = aij->B;
6095:                 b        = (Mat_SeqAIJ*)B->data;
6096:                 bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
6097:                 rp2      = bj + bi[row];
6098:                 ap2      = ba + bi[row];
6099:                 rmax2    = bimax[row];
6100:                 nrow2    = bilen[row];
6101:                 low2     = 0;
6102:                 high2    = nrow2;
6103:                 bm       = aij->B->rmap->n;
6104:                 ba       = b->a;
6105:                 inserted = PETSC_FALSE;
6106:               }
6107:             } else col = in[j];
6108:             MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
6109: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
6110:             if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
6111: #endif
6112:           }
6113:         }
6114:       } else if (!aij->donotstash) {
6115:         if (roworiented) {
6116:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6117:         } else {
6118:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6119:         }
6120:       }
6121:     }
6122:   }
6123:   PetscFunctionReturnVoid();
6124: }