Actual source code: mpiaij.c

petsc-3.12.0 2019-09-29
Report Typos and Errors
  1:  #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2:  #include <petsc/private/vecimpl.h>
  3:  #include <petsc/private/vecscatterimpl.h>
  4:  #include <petsc/private/isimpl.h>
  5:  #include <petscblaslapack.h>
  6:  #include <petscsf.h>
  7:  #include <petsc/private/hashmapi.h>

  9: /*MC
 10:    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.

 12:    This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
 13:    and MATMPIAIJ otherwise.  As a result, for single process communicators,
 14:   MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation() is supported
 15:   for communicators controlling multiple processes.  It is recommended that you call both of
 16:   the above preallocation routines for simplicity.

 18:    Options Database Keys:
 19: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()

 21:   Developer Notes:
 22:     Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
 23:    enough exist.

 25:   Level: beginner

 27: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
 28: M*/

 30: /*MC
 31:    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.

 33:    This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
 34:    and MATMPIAIJCRL otherwise.  As a result, for single process communicators,
 35:    MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
 36:   for communicators controlling multiple processes.  It is recommended that you call both of
 37:   the above preallocation routines for simplicity.

 39:    Options Database Keys:
 40: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()

 42:   Level: beginner

 44: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
 45: M*/

 47: static PetscErrorCode MatPinToCPU_MPIAIJ(Mat A,PetscBool flg)
 48: {
 49:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

 53: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_VIENNACL)
 54:   A->pinnedtocpu = flg;
 55: #endif
 56:   if (a->A) {
 57:     MatPinToCPU(a->A,flg);
 58:   }
 59:   if (a->B) {
 60:     MatPinToCPU(a->B,flg);
 61:   }
 62:   return(0);
 63: }


 66: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
 67: {
 69:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)M->data;

 72:   if (mat->A) {
 73:     MatSetBlockSizes(mat->A,rbs,cbs);
 74:     MatSetBlockSizes(mat->B,rbs,1);
 75:   }
 76:   return(0);
 77: }

 79: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
 80: {
 81:   PetscErrorCode  ierr;
 82:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ*)M->data;
 83:   Mat_SeqAIJ      *a   = (Mat_SeqAIJ*)mat->A->data;
 84:   Mat_SeqAIJ      *b   = (Mat_SeqAIJ*)mat->B->data;
 85:   const PetscInt  *ia,*ib;
 86:   const MatScalar *aa,*bb;
 87:   PetscInt        na,nb,i,j,*rows,cnt=0,n0rows;
 88:   PetscInt        m = M->rmap->n,rstart = M->rmap->rstart;

 91:   *keptrows = 0;
 92:   ia        = a->i;
 93:   ib        = b->i;
 94:   for (i=0; i<m; i++) {
 95:     na = ia[i+1] - ia[i];
 96:     nb = ib[i+1] - ib[i];
 97:     if (!na && !nb) {
 98:       cnt++;
 99:       goto ok1;
100:     }
101:     aa = a->a + ia[i];
102:     for (j=0; j<na; j++) {
103:       if (aa[j] != 0.0) goto ok1;
104:     }
105:     bb = b->a + ib[i];
106:     for (j=0; j <nb; j++) {
107:       if (bb[j] != 0.0) goto ok1;
108:     }
109:     cnt++;
110: ok1:;
111:   }
112:   MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
113:   if (!n0rows) return(0);
114:   PetscMalloc1(M->rmap->n-cnt,&rows);
115:   cnt  = 0;
116:   for (i=0; i<m; i++) {
117:     na = ia[i+1] - ia[i];
118:     nb = ib[i+1] - ib[i];
119:     if (!na && !nb) continue;
120:     aa = a->a + ia[i];
121:     for (j=0; j<na;j++) {
122:       if (aa[j] != 0.0) {
123:         rows[cnt++] = rstart + i;
124:         goto ok2;
125:       }
126:     }
127:     bb = b->a + ib[i];
128:     for (j=0; j<nb; j++) {
129:       if (bb[j] != 0.0) {
130:         rows[cnt++] = rstart + i;
131:         goto ok2;
132:       }
133:     }
134: ok2:;
135:   }
136:   ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
137:   return(0);
138: }

140: PetscErrorCode  MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
141: {
142:   PetscErrorCode    ierr;
143:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*) Y->data;
144:   PetscBool         cong;

147:   MatHasCongruentLayouts(Y,&cong);
148:   if (Y->assembled && cong) {
149:     MatDiagonalSet(aij->A,D,is);
150:   } else {
151:     MatDiagonalSet_Default(Y,D,is);
152:   }
153:   return(0);
154: }

156: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
157: {
158:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)M->data;
160:   PetscInt       i,rstart,nrows,*rows;

163:   *zrows = NULL;
164:   MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
165:   MatGetOwnershipRange(M,&rstart,NULL);
166:   for (i=0; i<nrows; i++) rows[i] += rstart;
167:   ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
168:   return(0);
169: }

171: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
172: {
174:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)A->data;
175:   PetscInt       i,n,*garray = aij->garray;
176:   Mat_SeqAIJ     *a_aij = (Mat_SeqAIJ*) aij->A->data;
177:   Mat_SeqAIJ     *b_aij = (Mat_SeqAIJ*) aij->B->data;
178:   PetscReal      *work;

181:   MatGetSize(A,NULL,&n);
182:   PetscCalloc1(n,&work);
183:   if (type == NORM_2) {
184:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
185:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
186:     }
187:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
188:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
189:     }
190:   } else if (type == NORM_1) {
191:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
192:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
193:     }
194:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
195:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
196:     }
197:   } else if (type == NORM_INFINITY) {
198:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
199:       work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
200:     }
201:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
202:       work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
203:     }

205:   } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
206:   if (type == NORM_INFINITY) {
207:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
208:   } else {
209:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
210:   }
211:   PetscFree(work);
212:   if (type == NORM_2) {
213:     for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
214:   }
215:   return(0);
216: }

218: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
219: {
220:   Mat_MPIAIJ      *a  = (Mat_MPIAIJ*)A->data;
221:   IS              sis,gis;
222:   PetscErrorCode  ierr;
223:   const PetscInt  *isis,*igis;
224:   PetscInt        n,*iis,nsis,ngis,rstart,i;

227:   MatFindOffBlockDiagonalEntries(a->A,&sis);
228:   MatFindNonzeroRows(a->B,&gis);
229:   ISGetSize(gis,&ngis);
230:   ISGetSize(sis,&nsis);
231:   ISGetIndices(sis,&isis);
232:   ISGetIndices(gis,&igis);

234:   PetscMalloc1(ngis+nsis,&iis);
235:   PetscArraycpy(iis,igis,ngis);
236:   PetscArraycpy(iis+ngis,isis,nsis);
237:   n    = ngis + nsis;
238:   PetscSortRemoveDupsInt(&n,iis);
239:   MatGetOwnershipRange(A,&rstart,NULL);
240:   for (i=0; i<n; i++) iis[i] += rstart;
241:   ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);

243:   ISRestoreIndices(sis,&isis);
244:   ISRestoreIndices(gis,&igis);
245:   ISDestroy(&sis);
246:   ISDestroy(&gis);
247:   return(0);
248: }

250: /*
251:     Distributes a SeqAIJ matrix across a set of processes. Code stolen from
252:     MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.

254:     Only for square matrices

256:     Used by a preconditioner, hence PETSC_EXTERN
257: */
258: PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
259: {
260:   PetscMPIInt    rank,size;
261:   PetscInt       *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
263:   Mat            mat;
264:   Mat_SeqAIJ     *gmata;
265:   PetscMPIInt    tag;
266:   MPI_Status     status;
267:   PetscBool      aij;
268:   MatScalar      *gmataa,*ao,*ad,*gmataarestore=0;

271:   MPI_Comm_rank(comm,&rank);
272:   MPI_Comm_size(comm,&size);
273:   if (!rank) {
274:     PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
275:     if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
276:   }
277:   if (reuse == MAT_INITIAL_MATRIX) {
278:     MatCreate(comm,&mat);
279:     MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
280:     MatGetBlockSizes(gmat,&bses[0],&bses[1]);
281:     MPI_Bcast(bses,2,MPIU_INT,0,comm);
282:     MatSetBlockSizes(mat,bses[0],bses[1]);
283:     MatSetType(mat,MATAIJ);
284:     PetscMalloc1(size+1,&rowners);
285:     PetscMalloc2(m,&dlens,m,&olens);
286:     MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);

288:     rowners[0] = 0;
289:     for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
290:     rstart = rowners[rank];
291:     rend   = rowners[rank+1];
292:     PetscObjectGetNewTag((PetscObject)mat,&tag);
293:     if (!rank) {
294:       gmata = (Mat_SeqAIJ*) gmat->data;
295:       /* send row lengths to all processors */
296:       for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
297:       for (i=1; i<size; i++) {
298:         MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
299:       }
300:       /* determine number diagonal and off-diagonal counts */
301:       PetscArrayzero(olens,m);
302:       PetscCalloc1(m,&ld);
303:       jj   = 0;
304:       for (i=0; i<m; i++) {
305:         for (j=0; j<dlens[i]; j++) {
306:           if (gmata->j[jj] < rstart) ld[i]++;
307:           if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
308:           jj++;
309:         }
310:       }
311:       /* send column indices to other processes */
312:       for (i=1; i<size; i++) {
313:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
314:         MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
315:         MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
316:       }

318:       /* send numerical values to other processes */
319:       for (i=1; i<size; i++) {
320:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
321:         MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
322:       }
323:       gmataa = gmata->a;
324:       gmataj = gmata->j;

326:     } else {
327:       /* receive row lengths */
328:       MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
329:       /* receive column indices */
330:       MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
331:       PetscMalloc2(nz,&gmataa,nz,&gmataj);
332:       MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
333:       /* determine number diagonal and off-diagonal counts */
334:       PetscArrayzero(olens,m);
335:       PetscCalloc1(m,&ld);
336:       jj   = 0;
337:       for (i=0; i<m; i++) {
338:         for (j=0; j<dlens[i]; j++) {
339:           if (gmataj[jj] < rstart) ld[i]++;
340:           if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
341:           jj++;
342:         }
343:       }
344:       /* receive numerical values */
345:       PetscArrayzero(gmataa,nz);
346:       MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
347:     }
348:     /* set preallocation */
349:     for (i=0; i<m; i++) {
350:       dlens[i] -= olens[i];
351:     }
352:     MatSeqAIJSetPreallocation(mat,0,dlens);
353:     MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);

355:     for (i=0; i<m; i++) {
356:       dlens[i] += olens[i];
357:     }
358:     cnt = 0;
359:     for (i=0; i<m; i++) {
360:       row  = rstart + i;
361:       MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
362:       cnt += dlens[i];
363:     }
364:     if (rank) {
365:       PetscFree2(gmataa,gmataj);
366:     }
367:     PetscFree2(dlens,olens);
368:     PetscFree(rowners);

370:     ((Mat_MPIAIJ*)(mat->data))->ld = ld;

372:     *inmat = mat;
373:   } else {   /* column indices are already set; only need to move over numerical values from process 0 */
374:     Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
375:     Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
376:     mat  = *inmat;
377:     PetscObjectGetNewTag((PetscObject)mat,&tag);
378:     if (!rank) {
379:       /* send numerical values to other processes */
380:       gmata  = (Mat_SeqAIJ*) gmat->data;
381:       MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
382:       gmataa = gmata->a;
383:       for (i=1; i<size; i++) {
384:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
385:         MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
386:       }
387:       nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
388:     } else {
389:       /* receive numerical values from process 0*/
390:       nz   = Ad->nz + Ao->nz;
391:       PetscMalloc1(nz,&gmataa); gmataarestore = gmataa;
392:       MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
393:     }
394:     /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
395:     ld = ((Mat_MPIAIJ*)(mat->data))->ld;
396:     ad = Ad->a;
397:     ao = Ao->a;
398:     if (mat->rmap->n) {
399:       i  = 0;
400:       nz = ld[i];                                   PetscArraycpy(ao,gmataa,nz); ao += nz; gmataa += nz;
401:       nz = Ad->i[i+1] - Ad->i[i];                   PetscArraycpy(ad,gmataa,nz); ad += nz; gmataa += nz;
402:     }
403:     for (i=1; i<mat->rmap->n; i++) {
404:       nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscArraycpy(ao,gmataa,nz); ao += nz; gmataa += nz;
405:       nz = Ad->i[i+1] - Ad->i[i];                   PetscArraycpy(ad,gmataa,nz); ad += nz; gmataa += nz;
406:     }
407:     i--;
408:     if (mat->rmap->n) {
409:       nz = Ao->i[i+1] - Ao->i[i] - ld[i];           PetscArraycpy(ao,gmataa,nz);
410:     }
411:     if (rank) {
412:       PetscFree(gmataarestore);
413:     }
414:   }
415:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
416:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
417:   return(0);
418: }

420: /*
421:   Local utility routine that creates a mapping from the global column
422: number to the local number in the off-diagonal part of the local
423: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
424: a slightly higher hash table cost; without it it is not scalable (each processor
425: has an order N integer array but is fast to acess.
426: */
427: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
428: {
429:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
431:   PetscInt       n = aij->B->cmap->n,i;

434:   if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
435: #if defined(PETSC_USE_CTABLE)
436:   PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
437:   for (i=0; i<n; i++) {
438:     PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
439:   }
440: #else
441:   PetscCalloc1(mat->cmap->N+1,&aij->colmap);
442:   PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
443:   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
444: #endif
445:   return(0);
446: }

448: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol)     \
449: { \
450:     if (col <= lastcol1)  low1 = 0;     \
451:     else                 high1 = nrow1; \
452:     lastcol1 = col;\
453:     while (high1-low1 > 5) { \
454:       t = (low1+high1)/2; \
455:       if (rp1[t] > col) high1 = t; \
456:       else              low1  = t; \
457:     } \
458:       for (_i=low1; _i<high1; _i++) { \
459:         if (rp1[_i] > col) break; \
460:         if (rp1[_i] == col) { \
461:           if (addv == ADD_VALUES) { \
462:             ap1[_i] += value;   \
463:             /* Not sure LogFlops will slow dow the code or not */ \
464:             (void)PetscLogFlops(1.0);   \
465:            } \
466:           else                    ap1[_i] = value; \
467:           goto a_noinsert; \
468:         } \
469:       }  \
470:       if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
471:       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}                \
472:       if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
473:       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
474:       N = nrow1++ - 1; a->nz++; high1++; \
475:       /* shift up all the later entries in this row */ \
476:       PetscArraymove(rp1+_i+1,rp1+_i,N-_i+1);\
477:       PetscArraymove(ap1+_i+1,ap1+_i,N-_i+1);\
478:       rp1[_i] = col;  \
479:       ap1[_i] = value;  \
480:       A->nonzerostate++;\
481:       a_noinsert: ; \
482:       ailen[row] = nrow1; \
483: }

485: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
486:   { \
487:     if (col <= lastcol2) low2 = 0;                        \
488:     else high2 = nrow2;                                   \
489:     lastcol2 = col;                                       \
490:     while (high2-low2 > 5) {                              \
491:       t = (low2+high2)/2;                                 \
492:       if (rp2[t] > col) high2 = t;                        \
493:       else             low2  = t;                         \
494:     }                                                     \
495:     for (_i=low2; _i<high2; _i++) {                       \
496:       if (rp2[_i] > col) break;                           \
497:       if (rp2[_i] == col) {                               \
498:         if (addv == ADD_VALUES) {                         \
499:           ap2[_i] += value;                               \
500:           (void)PetscLogFlops(1.0);                       \
501:         }                                                 \
502:         else                    ap2[_i] = value;          \
503:         goto b_noinsert;                                  \
504:       }                                                   \
505:     }                                                     \
506:     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
507:     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}                        \
508:     if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
509:     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
510:     N = nrow2++ - 1; b->nz++; high2++;                    \
511:     /* shift up all the later entries in this row */      \
512:     PetscArraymove(rp2+_i+1,rp2+_i,N-_i+1);\
513:     PetscArraymove(ap2+_i+1,ap2+_i,N-_i+1);\
514:     rp2[_i] = col;                                        \
515:     ap2[_i] = value;                                      \
516:     B->nonzerostate++;                                    \
517:     b_noinsert: ;                                         \
518:     bilen[row] = nrow2;                                   \
519:   }

521: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
522: {
523:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
524:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
526:   PetscInt       l,*garray = mat->garray,diag;

529:   /* code only works for square matrices A */

531:   /* find size of row to the left of the diagonal part */
532:   MatGetOwnershipRange(A,&diag,0);
533:   row  = row - diag;
534:   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
535:     if (garray[b->j[b->i[row]+l]] > diag) break;
536:   }
537:   PetscArraycpy(b->a+b->i[row],v,l);

539:   /* diagonal part */
540:   PetscArraycpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row]));

542:   /* right of diagonal part */
543:   PetscArraycpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],b->i[row+1]-b->i[row]-l);
544:   return(0);
545: }

547: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
548: {
549:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
550:   PetscScalar    value = 0.0;
552:   PetscInt       i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
553:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
554:   PetscBool      roworiented = aij->roworiented;

556:   /* Some Variables required in the macro */
557:   Mat        A                 = aij->A;
558:   Mat_SeqAIJ *a                = (Mat_SeqAIJ*)A->data;
559:   PetscInt   *aimax            = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
560:   MatScalar  *aa               = a->a;
561:   PetscBool  ignorezeroentries = a->ignorezeroentries;
562:   Mat        B                 = aij->B;
563:   Mat_SeqAIJ *b                = (Mat_SeqAIJ*)B->data;
564:   PetscInt   *bimax            = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
565:   MatScalar  *ba               = b->a;

567:   PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
568:   PetscInt  nonew;
569:   MatScalar *ap1,*ap2;

572:   for (i=0; i<m; i++) {
573:     if (im[i] < 0) continue;
574: #if defined(PETSC_USE_DEBUG)
575:     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
576: #endif
577:     if (im[i] >= rstart && im[i] < rend) {
578:       row      = im[i] - rstart;
579:       lastcol1 = -1;
580:       rp1      = aj + ai[row];
581:       ap1      = aa + ai[row];
582:       rmax1    = aimax[row];
583:       nrow1    = ailen[row];
584:       low1     = 0;
585:       high1    = nrow1;
586:       lastcol2 = -1;
587:       rp2      = bj + bi[row];
588:       ap2      = ba + bi[row];
589:       rmax2    = bimax[row];
590:       nrow2    = bilen[row];
591:       low2     = 0;
592:       high2    = nrow2;

594:       for (j=0; j<n; j++) {
595:         if (v)  value = roworiented ? v[i*n+j] : v[i+j*m];
596:         if (in[j] >= cstart && in[j] < cend) {
597:           col   = in[j] - cstart;
598:           nonew = a->nonew;
599:           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
600:           MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
601:         } else if (in[j] < 0) continue;
602: #if defined(PETSC_USE_DEBUG)
603:         else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
604: #endif
605:         else {
606:           if (mat->was_assembled) {
607:             if (!aij->colmap) {
608:               MatCreateColmap_MPIAIJ_Private(mat);
609:             }
610: #if defined(PETSC_USE_CTABLE)
611:             PetscTableFind(aij->colmap,in[j]+1,&col);
612:             col--;
613: #else
614:             col = aij->colmap[in[j]] - 1;
615: #endif
616:             if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
617:               MatDisAssemble_MPIAIJ(mat);
618:               col  =  in[j];
619:               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
620:               B     = aij->B;
621:               b     = (Mat_SeqAIJ*)B->data;
622:               bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
623:               rp2   = bj + bi[row];
624:               ap2   = ba + bi[row];
625:               rmax2 = bimax[row];
626:               nrow2 = bilen[row];
627:               low2  = 0;
628:               high2 = nrow2;
629:               bm    = aij->B->rmap->n;
630:               ba    = b->a;
631:             } else if (col < 0) {
632:               if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
633:                 PetscInfo3(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%D,%D)\n",(double)PetscRealPart(value),im[i],in[j]);
634:               } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
635:             }
636:           } else col = in[j];
637:           nonew = b->nonew;
638:           MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
639:         }
640:       }
641:     } else {
642:       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
643:       if (!aij->donotstash) {
644:         mat->assembled = PETSC_FALSE;
645:         if (roworiented) {
646:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
647:         } else {
648:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
649:         }
650:       }
651:     }
652:   }
653:   return(0);
654: }

656: /*
657:     This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
658:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
659:     No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
660: */
661: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
662: {
663:   Mat_MPIAIJ     *aij        = (Mat_MPIAIJ*)mat->data;
664:   Mat            A           = aij->A; /* diagonal part of the matrix */
665:   Mat            B           = aij->B; /* offdiagonal part of the matrix */
666:   Mat_SeqAIJ     *a          = (Mat_SeqAIJ*)A->data;
667:   Mat_SeqAIJ     *b          = (Mat_SeqAIJ*)B->data;
668:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,col;
669:   PetscInt       *ailen      = a->ilen,*aj = a->j;
670:   PetscInt       *bilen      = b->ilen,*bj = b->j;
671:   PetscInt       am          = aij->A->rmap->n,j;
672:   PetscInt       diag_so_far = 0,dnz;
673:   PetscInt       offd_so_far = 0,onz;

676:   /* Iterate over all rows of the matrix */
677:   for (j=0; j<am; j++) {
678:     dnz = onz = 0;
679:     /*  Iterate over all non-zero columns of the current row */
680:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
681:       /* If column is in the diagonal */
682:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
683:         aj[diag_so_far++] = mat_j[col] - cstart;
684:         dnz++;
685:       } else { /* off-diagonal entries */
686:         bj[offd_so_far++] = mat_j[col];
687:         onz++;
688:       }
689:     }
690:     ailen[j] = dnz;
691:     bilen[j] = onz;
692:   }
693:   return(0);
694: }

696: /*
697:     This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
698:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
699:     No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
700:     Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
701:     would not be true and the more complex MatSetValues_MPIAIJ has to be used.
702: */
703: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
704: {
705:   Mat_MPIAIJ     *aij   = (Mat_MPIAIJ*)mat->data;
706:   Mat            A      = aij->A; /* diagonal part of the matrix */
707:   Mat            B      = aij->B; /* offdiagonal part of the matrix */
708:   Mat_SeqAIJ     *aijd  =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
709:   Mat_SeqAIJ     *a     = (Mat_SeqAIJ*)A->data;
710:   Mat_SeqAIJ     *b     = (Mat_SeqAIJ*)B->data;
711:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend;
712:   PetscInt       *ailen = a->ilen,*aj = a->j;
713:   PetscInt       *bilen = b->ilen,*bj = b->j;
714:   PetscInt       am     = aij->A->rmap->n,j;
715:   PetscInt       *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
716:   PetscInt       col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
717:   PetscScalar    *aa = a->a,*ba = b->a;

720:   /* Iterate over all rows of the matrix */
721:   for (j=0; j<am; j++) {
722:     dnz_row = onz_row = 0;
723:     rowstart_offd = full_offd_i[j];
724:     rowstart_diag = full_diag_i[j];
725:     /*  Iterate over all non-zero columns of the current row */
726:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
727:       /* If column is in the diagonal */
728:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
729:         aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
730:         aa[rowstart_diag+dnz_row] = mat_a[col];
731:         dnz_row++;
732:       } else { /* off-diagonal entries */
733:         bj[rowstart_offd+onz_row] = mat_j[col];
734:         ba[rowstart_offd+onz_row] = mat_a[col];
735:         onz_row++;
736:       }
737:     }
738:     ailen[j] = dnz_row;
739:     bilen[j] = onz_row;
740:   }
741:   return(0);
742: }

744: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
745: {
746:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
748:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
749:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;

752:   for (i=0; i<m; i++) {
753:     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
754:     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
755:     if (idxm[i] >= rstart && idxm[i] < rend) {
756:       row = idxm[i] - rstart;
757:       for (j=0; j<n; j++) {
758:         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
759:         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
760:         if (idxn[j] >= cstart && idxn[j] < cend) {
761:           col  = idxn[j] - cstart;
762:           MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
763:         } else {
764:           if (!aij->colmap) {
765:             MatCreateColmap_MPIAIJ_Private(mat);
766:           }
767: #if defined(PETSC_USE_CTABLE)
768:           PetscTableFind(aij->colmap,idxn[j]+1,&col);
769:           col--;
770: #else
771:           col = aij->colmap[idxn[j]] - 1;
772: #endif
773:           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
774:           else {
775:             MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
776:           }
777:         }
778:       }
779:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
780:   }
781:   return(0);
782: }

784: extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);

786: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
787: {
788:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
790:   PetscInt       nstash,reallocs;

793:   if (aij->donotstash || mat->nooffprocentries) return(0);

795:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
796:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
797:   PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
798:   return(0);
799: }

801: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
802: {
803:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
804:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)aij->A->data;
806:   PetscMPIInt    n;
807:   PetscInt       i,j,rstart,ncols,flg;
808:   PetscInt       *row,*col;
809:   PetscBool      other_disassembled;
810:   PetscScalar    *val;

812:   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */

815:   if (!aij->donotstash && !mat->nooffprocentries) {
816:     while (1) {
817:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
818:       if (!flg) break;

820:       for (i=0; i<n; ) {
821:         /* Now identify the consecutive vals belonging to the same row */
822:         for (j=i,rstart=row[j]; j<n; j++) {
823:           if (row[j] != rstart) break;
824:         }
825:         if (j < n) ncols = j-i;
826:         else       ncols = n-i;
827:         /* Now assemble all these values with a single function call */
828:         MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);

830:         i = j;
831:       }
832:     }
833:     MatStashScatterEnd_Private(&mat->stash);
834:   }
835: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
836:   if (mat->valid_GPU_matrix == PETSC_OFFLOAD_CPU) aij->A->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
837: #endif
838:   MatAssemblyBegin(aij->A,mode);
839:   MatAssemblyEnd(aij->A,mode);

841:   /* determine if any processor has disassembled, if so we must
842:      also disassemble ourself, in order that we may reassemble. */
843:   /*
844:      if nonzero structure of submatrix B cannot change then we know that
845:      no processor disassembled thus we can skip this stuff
846:   */
847:   if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
848:     MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
849:     if (mat->was_assembled && !other_disassembled) {
850: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
851:       aij->B->valid_GPU_matrix = PETSC_OFFLOAD_BOTH; /* do not copy on the GPU when assembling inside MatDisAssemble_MPIAIJ */
852: #endif
853:       MatDisAssemble_MPIAIJ(mat);
854:     }
855:   }
856:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
857:     MatSetUpMultiply_MPIAIJ(mat);
858:   }
859:   MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
860: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
861:   if (mat->valid_GPU_matrix == PETSC_OFFLOAD_CPU && aij->B->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) aij->B->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
862: #endif
863:   MatAssemblyBegin(aij->B,mode);
864:   MatAssemblyEnd(aij->B,mode);

866:   PetscFree2(aij->rowvalues,aij->rowindices);

868:   aij->rowvalues = 0;

870:   VecDestroy(&aij->diag);
871:   if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;

873:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
874:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
875:     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
876:     MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
877:   }
878: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
879:   mat->valid_GPU_matrix = PETSC_OFFLOAD_BOTH;
880: #endif
881:   return(0);
882: }

884: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
885: {
886:   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;

890:   MatZeroEntries(l->A);
891:   MatZeroEntries(l->B);
892:   return(0);
893: }

895: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
896: {
897:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ *) A->data;
898:   PetscObjectState sA, sB;
899:   PetscInt        *lrows;
900:   PetscInt         r, len;
901:   PetscBool        cong, lch, gch;
902:   PetscErrorCode   ierr;

905:   /* get locally owned rows */
906:   MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
907:   MatHasCongruentLayouts(A,&cong);
908:   /* fix right hand side if needed */
909:   if (x && b) {
910:     const PetscScalar *xx;
911:     PetscScalar       *bb;

913:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
914:     VecGetArrayRead(x, &xx);
915:     VecGetArray(b, &bb);
916:     for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
917:     VecRestoreArrayRead(x, &xx);
918:     VecRestoreArray(b, &bb);
919:   }

921:   sA = mat->A->nonzerostate;
922:   sB = mat->B->nonzerostate;

924:   if (diag != 0.0 && cong) {
925:     MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
926:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
927:   } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
928:     Mat_SeqAIJ *aijA = (Mat_SeqAIJ*)mat->A->data;
929:     Mat_SeqAIJ *aijB = (Mat_SeqAIJ*)mat->B->data;
930:     PetscInt   nnwA, nnwB;
931:     PetscBool  nnzA, nnzB;

933:     nnwA = aijA->nonew;
934:     nnwB = aijB->nonew;
935:     nnzA = aijA->keepnonzeropattern;
936:     nnzB = aijB->keepnonzeropattern;
937:     if (!nnzA) {
938:       PetscInfo(mat->A,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n");
939:       aijA->nonew = 0;
940:     }
941:     if (!nnzB) {
942:       PetscInfo(mat->B,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n");
943:       aijB->nonew = 0;
944:     }
945:     /* Must zero here before the next loop */
946:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
947:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
948:     for (r = 0; r < len; ++r) {
949:       const PetscInt row = lrows[r] + A->rmap->rstart;
950:       if (row >= A->cmap->N) continue;
951:       MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
952:     }
953:     aijA->nonew = nnwA;
954:     aijB->nonew = nnwB;
955:   } else {
956:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
957:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
958:   }
959:   PetscFree(lrows);
960:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
961:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

963:   /* reduce nonzerostate */
964:   lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate);
965:   MPIU_Allreduce(&lch,&gch,1,MPIU_BOOL,MPI_LOR,PetscObjectComm((PetscObject)A));
966:   if (gch) A->nonzerostate++;
967:   return(0);
968: }

970: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
971: {
972:   Mat_MPIAIJ        *l = (Mat_MPIAIJ*)A->data;
973:   PetscErrorCode    ierr;
974:   PetscMPIInt       n = A->rmap->n;
975:   PetscInt          i,j,r,m,p = 0,len = 0;
976:   PetscInt          *lrows,*owners = A->rmap->range;
977:   PetscSFNode       *rrows;
978:   PetscSF           sf;
979:   const PetscScalar *xx;
980:   PetscScalar       *bb,*mask;
981:   Vec               xmask,lmask;
982:   Mat_SeqAIJ        *aij = (Mat_SeqAIJ*)l->B->data;
983:   const PetscInt    *aj, *ii,*ridx;
984:   PetscScalar       *aa;

987:   /* Create SF where leaves are input rows and roots are owned rows */
988:   PetscMalloc1(n, &lrows);
989:   for (r = 0; r < n; ++r) lrows[r] = -1;
990:   PetscMalloc1(N, &rrows);
991:   for (r = 0; r < N; ++r) {
992:     const PetscInt idx   = rows[r];
993:     if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
994:     if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
995:       PetscLayoutFindOwner(A->rmap,idx,&p);
996:     }
997:     rrows[r].rank  = p;
998:     rrows[r].index = rows[r] - owners[p];
999:   }
1000:   PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
1001:   PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
1002:   /* Collect flags for rows to be zeroed */
1003:   PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1004:   PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1005:   PetscSFDestroy(&sf);
1006:   /* Compress and put in row numbers */
1007:   for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
1008:   /* zero diagonal part of matrix */
1009:   MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
1010:   /* handle off diagonal part of matrix */
1011:   MatCreateVecs(A,&xmask,NULL);
1012:   VecDuplicate(l->lvec,&lmask);
1013:   VecGetArray(xmask,&bb);
1014:   for (i=0; i<len; i++) bb[lrows[i]] = 1;
1015:   VecRestoreArray(xmask,&bb);
1016:   VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1017:   VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1018:   VecDestroy(&xmask);
1019:   if (x && b) { /* this code is buggy when the row and column layout don't match */
1020:     PetscBool cong;

1022:     MatHasCongruentLayouts(A,&cong);
1023:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
1024:     VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1025:     VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1026:     VecGetArrayRead(l->lvec,&xx);
1027:     VecGetArray(b,&bb);
1028:   }
1029:   VecGetArray(lmask,&mask);
1030:   /* remove zeroed rows of off diagonal matrix */
1031:   ii = aij->i;
1032:   for (i=0; i<len; i++) {
1033:     PetscArrayzero(aij->a + ii[lrows[i]],ii[lrows[i]+1] - ii[lrows[i]]);
1034:   }
1035:   /* loop over all elements of off process part of matrix zeroing removed columns*/
1036:   if (aij->compressedrow.use) {
1037:     m    = aij->compressedrow.nrows;
1038:     ii   = aij->compressedrow.i;
1039:     ridx = aij->compressedrow.rindex;
1040:     for (i=0; i<m; i++) {
1041:       n  = ii[i+1] - ii[i];
1042:       aj = aij->j + ii[i];
1043:       aa = aij->a + ii[i];

1045:       for (j=0; j<n; j++) {
1046:         if (PetscAbsScalar(mask[*aj])) {
1047:           if (b) bb[*ridx] -= *aa*xx[*aj];
1048:           *aa = 0.0;
1049:         }
1050:         aa++;
1051:         aj++;
1052:       }
1053:       ridx++;
1054:     }
1055:   } else { /* do not use compressed row format */
1056:     m = l->B->rmap->n;
1057:     for (i=0; i<m; i++) {
1058:       n  = ii[i+1] - ii[i];
1059:       aj = aij->j + ii[i];
1060:       aa = aij->a + ii[i];
1061:       for (j=0; j<n; j++) {
1062:         if (PetscAbsScalar(mask[*aj])) {
1063:           if (b) bb[i] -= *aa*xx[*aj];
1064:           *aa = 0.0;
1065:         }
1066:         aa++;
1067:         aj++;
1068:       }
1069:     }
1070:   }
1071:   if (x && b) {
1072:     VecRestoreArray(b,&bb);
1073:     VecRestoreArrayRead(l->lvec,&xx);
1074:   }
1075:   VecRestoreArray(lmask,&mask);
1076:   VecDestroy(&lmask);
1077:   PetscFree(lrows);

1079:   /* only change matrix nonzero state if pattern was allowed to be changed */
1080:   if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
1081:     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1082:     MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
1083:   }
1084:   return(0);
1085: }

1087: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
1088: {
1089:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1091:   PetscInt       nt;
1092:   VecScatter     Mvctx = a->Mvctx;

1095:   VecGetLocalSize(xx,&nt);
1096:   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);

1098:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1099:   (*a->A->ops->mult)(a->A,xx,yy);
1100:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1101:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1102:   return(0);
1103: }

1105: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
1106: {
1107:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1111:   MatMultDiagonalBlock(a->A,bb,xx);
1112:   return(0);
1113: }

1115: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1116: {
1117:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1119:   VecScatter     Mvctx = a->Mvctx;

1122:   if (a->Mvctx_mpi1_flg) Mvctx = a->Mvctx_mpi1;
1123:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1124:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
1125:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1126:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1127:   return(0);
1128: }

1130: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
1131: {
1132:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1136:   /* do nondiagonal part */
1137:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1138:   /* do local part */
1139:   (*a->A->ops->multtranspose)(a->A,xx,yy);
1140:   /* add partial results together */
1141:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1142:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1143:   return(0);
1144: }

1146: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool  *f)
1147: {
1148:   MPI_Comm       comm;
1149:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1150:   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1151:   IS             Me,Notme;
1153:   PetscInt       M,N,first,last,*notme,i;
1154:   PetscBool      lf;
1155:   PetscMPIInt    size;

1158:   /* Easy test: symmetric diagonal block */
1159:   Bij  = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1160:   MatIsTranspose(Adia,Bdia,tol,&lf);
1161:   MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
1162:   if (!*f) return(0);
1163:   PetscObjectGetComm((PetscObject)Amat,&comm);
1164:   MPI_Comm_size(comm,&size);
1165:   if (size == 1) return(0);

1167:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1168:   MatGetSize(Amat,&M,&N);
1169:   MatGetOwnershipRange(Amat,&first,&last);
1170:   PetscMalloc1(N-last+first,&notme);
1171:   for (i=0; i<first; i++) notme[i] = i;
1172:   for (i=last; i<M; i++) notme[i-last+first] = i;
1173:   ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1174:   ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1175:   MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1176:   Aoff = Aoffs[0];
1177:   MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1178:   Boff = Boffs[0];
1179:   MatIsTranspose(Aoff,Boff,tol,f);
1180:   MatDestroyMatrices(1,&Aoffs);
1181:   MatDestroyMatrices(1,&Boffs);
1182:   ISDestroy(&Me);
1183:   ISDestroy(&Notme);
1184:   PetscFree(notme);
1185:   return(0);
1186: }

1188: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool  *f)
1189: {

1193:   MatIsTranspose_MPIAIJ(A,A,tol,f);
1194:   return(0);
1195: }

1197: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1198: {
1199:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1203:   /* do nondiagonal part */
1204:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1205:   /* do local part */
1206:   (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1207:   /* add partial results together */
1208:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1209:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1210:   return(0);
1211: }

1213: /*
1214:   This only works correctly for square matrices where the subblock A->A is the
1215:    diagonal block
1216: */
1217: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1218: {
1220:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1223:   if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1224:   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1225:   MatGetDiagonal(a->A,v);
1226:   return(0);
1227: }

1229: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1230: {
1231:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1235:   MatScale(a->A,aa);
1236:   MatScale(a->B,aa);
1237:   return(0);
1238: }

1240: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1241: {
1242:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

1246: #if defined(PETSC_USE_LOG)
1247:   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1248: #endif
1249:   MatStashDestroy_Private(&mat->stash);
1250:   VecDestroy(&aij->diag);
1251:   MatDestroy(&aij->A);
1252:   MatDestroy(&aij->B);
1253: #if defined(PETSC_USE_CTABLE)
1254:   PetscTableDestroy(&aij->colmap);
1255: #else
1256:   PetscFree(aij->colmap);
1257: #endif
1258:   PetscFree(aij->garray);
1259:   VecDestroy(&aij->lvec);
1260:   VecScatterDestroy(&aij->Mvctx);
1261:   if (aij->Mvctx_mpi1) {VecScatterDestroy(&aij->Mvctx_mpi1);}
1262:   PetscFree2(aij->rowvalues,aij->rowindices);
1263:   PetscFree(aij->ld);
1264:   PetscFree(mat->data);

1266:   PetscObjectChangeTypeName((PetscObject)mat,0);
1267:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1268:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1269:   PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1270:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1271:   PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1272:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1273:   PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1274:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1275: #if defined(PETSC_HAVE_ELEMENTAL)
1276:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1277: #endif
1278: #if defined(PETSC_HAVE_HYPRE)
1279:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1280:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMatMult_transpose_mpiaij_mpiaij_C",NULL);
1281: #endif
1282:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1283:   PetscObjectComposeFunction((PetscObject)mat,"MatPtAP_is_mpiaij_C",NULL);
1284:   return(0);
1285: }

1287: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1288: {
1289:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1290:   Mat_SeqAIJ     *A   = (Mat_SeqAIJ*)aij->A->data;
1291:   Mat_SeqAIJ     *B   = (Mat_SeqAIJ*)aij->B->data;
1293:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag;
1294:   int            fd;
1295:   PetscInt       nz,header[4],*row_lengths,*range=0,rlen,i;
1296:   PetscInt       nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz = 0;
1297:   PetscScalar    *column_values;
1298:   PetscInt       message_count,flowcontrolcount;
1299:   FILE           *file;

1302:   MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1303:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
1304:   nz   = A->nz + B->nz;
1305:   PetscViewerBinaryGetDescriptor(viewer,&fd);
1306:   if (!rank) {
1307:     header[0] = MAT_FILE_CLASSID;
1308:     header[1] = mat->rmap->N;
1309:     header[2] = mat->cmap->N;

1311:     MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1312:     PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
1313:     /* get largest number of rows any processor has */
1314:     rlen  = mat->rmap->n;
1315:     range = mat->rmap->range;
1316:     for (i=1; i<size; i++) rlen = PetscMax(rlen,range[i+1] - range[i]);
1317:   } else {
1318:     MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1319:     rlen = mat->rmap->n;
1320:   }

1322:   /* load up the local row counts */
1323:   PetscMalloc1(rlen+1,&row_lengths);
1324:   for (i=0; i<mat->rmap->n; i++) row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];

1326:   /* store the row lengths to the file */
1327:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1328:   if (!rank) {
1329:     PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);
1330:     for (i=1; i<size; i++) {
1331:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1332:       rlen = range[i+1] - range[i];
1333:       MPIULong_Recv(row_lengths,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1334:       PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);
1335:     }
1336:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1337:   } else {
1338:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1339:     MPIULong_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1340:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1341:   }
1342:   PetscFree(row_lengths);

1344:   /* load up the local column indices */
1345:   nzmax = nz; /* th processor needs space a largest processor needs */
1346:   MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));
1347:   PetscMalloc1(nzmax+1,&column_indices);
1348:   cnt   = 0;
1349:   for (i=0; i<mat->rmap->n; i++) {
1350:     for (j=B->i[i]; j<B->i[i+1]; j++) {
1351:       if ((col = garray[B->j[j]]) > cstart) break;
1352:       column_indices[cnt++] = col;
1353:     }
1354:     for (k=A->i[i]; k<A->i[i+1]; k++) column_indices[cnt++] = A->j[k] + cstart;
1355:     for (; j<B->i[i+1]; j++) column_indices[cnt++] = garray[B->j[j]];
1356:   }
1357:   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);

1359:   /* store the column indices to the file */
1360:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1361:   if (!rank) {
1362:     MPI_Status status;
1363:     PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
1364:     for (i=1; i<size; i++) {
1365:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1366:       MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1367:       if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1368:       MPIULong_Recv(column_indices,rnz,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1369:       PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);
1370:     }
1371:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1372:   } else {
1373:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1374:     MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1375:     MPIULong_Send(column_indices,nz,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1376:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1377:   }
1378:   PetscFree(column_indices);

1380:   /* load up the local column values */
1381:   PetscMalloc1(nzmax+1,&column_values);
1382:   cnt  = 0;
1383:   for (i=0; i<mat->rmap->n; i++) {
1384:     for (j=B->i[i]; j<B->i[i+1]; j++) {
1385:       if (garray[B->j[j]] > cstart) break;
1386:       column_values[cnt++] = B->a[j];
1387:     }
1388:     for (k=A->i[i]; k<A->i[i+1]; k++) column_values[cnt++] = A->a[k];
1389:     for (; j<B->i[i+1]; j++) column_values[cnt++] = B->a[j];
1390:   }
1391:   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);

1393:   /* store the column values to the file */
1394:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1395:   if (!rank) {
1396:     MPI_Status status;
1397:     PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1398:     for (i=1; i<size; i++) {
1399:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1400:       MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1401:       if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1402:       MPIULong_Recv(column_values,rnz,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat));
1403:       PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);
1404:     }
1405:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1406:   } else {
1407:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1408:     MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1409:     MPIULong_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));
1410:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1411:   }
1412:   PetscFree(column_values);

1414:   PetscViewerBinaryGetInfoPointer(viewer,&file);
1415:   if (file) fprintf(file,"-matload_block_size %d\n",(int)PetscAbs(mat->rmap->bs));
1416:   return(0);
1417: }

1419:  #include <petscdraw.h>
1420: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1421: {
1422:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1423:   PetscErrorCode    ierr;
1424:   PetscMPIInt       rank = aij->rank,size = aij->size;
1425:   PetscBool         isdraw,iascii,isbinary;
1426:   PetscViewer       sviewer;
1427:   PetscViewerFormat format;

1430:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1431:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1432:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1433:   if (iascii) {
1434:     PetscViewerGetFormat(viewer,&format);
1435:     if (format == PETSC_VIEWER_LOAD_BALANCE) {
1436:       PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1437:       PetscMalloc1(size,&nz);
1438:       MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1439:       for (i=0; i<(PetscInt)size; i++) {
1440:         nmax = PetscMax(nmax,nz[i]);
1441:         nmin = PetscMin(nmin,nz[i]);
1442:         navg += nz[i];
1443:       }
1444:       PetscFree(nz);
1445:       navg = navg/size;
1446:       PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %D  avg %D  max %D\n",nmin,navg,nmax);
1447:       return(0);
1448:     }
1449:     PetscViewerGetFormat(viewer,&format);
1450:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1451:       MatInfo   info;
1452:       PetscBool inodes;

1454:       MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1455:       MatGetInfo(mat,MAT_LOCAL,&info);
1456:       MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);
1457:       PetscViewerASCIIPushSynchronized(viewer);
1458:       if (!inodes) {
1459:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, not using I-node routines\n",
1460:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1461:       } else {
1462:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, using I-node routines\n",
1463:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1464:       }
1465:       MatGetInfo(aij->A,MAT_LOCAL,&info);
1466:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1467:       MatGetInfo(aij->B,MAT_LOCAL,&info);
1468:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1469:       PetscViewerFlush(viewer);
1470:       PetscViewerASCIIPopSynchronized(viewer);
1471:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1472:       VecScatterView(aij->Mvctx,viewer);
1473:       return(0);
1474:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1475:       PetscInt inodecount,inodelimit,*inodes;
1476:       MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1477:       if (inodes) {
1478:         PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1479:       } else {
1480:         PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1481:       }
1482:       return(0);
1483:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1484:       return(0);
1485:     }
1486:   } else if (isbinary) {
1487:     if (size == 1) {
1488:       PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1489:       MatView(aij->A,viewer);
1490:     } else {
1491:       MatView_MPIAIJ_Binary(mat,viewer);
1492:     }
1493:     return(0);
1494:   } else if (iascii && size == 1) {
1495:     PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1496:     MatView(aij->A,viewer);
1497:     return(0);
1498:   } else if (isdraw) {
1499:     PetscDraw draw;
1500:     PetscBool isnull;
1501:     PetscViewerDrawGetDraw(viewer,0,&draw);
1502:     PetscDrawIsNull(draw,&isnull);
1503:     if (isnull) return(0);
1504:   }

1506:   { /* assemble the entire matrix onto first processor */
1507:     Mat A = NULL, Av;
1508:     IS  isrow,iscol;

1510:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1511:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1512:     MatCreateSubMatrix(mat,isrow,iscol,MAT_INITIAL_MATRIX,&A);
1513:     MatMPIAIJGetSeqAIJ(A,&Av,NULL,NULL);
1514: /*  The commented code uses MatCreateSubMatrices instead */
1515: /*
1516:     Mat *AA, A = NULL, Av;
1517:     IS  isrow,iscol;

1519:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1520:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1521:     MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA);
1522:     if (!rank) {
1523:        PetscObjectReference((PetscObject)AA[0]);
1524:        A    = AA[0];
1525:        Av   = AA[0];
1526:     }
1527:     MatDestroySubMatrices(1,&AA);
1528: */
1529:     ISDestroy(&iscol);
1530:     ISDestroy(&isrow);
1531:     /*
1532:        Everyone has to call to draw the matrix since the graphics waits are
1533:        synchronized across all processors that share the PetscDraw object
1534:     */
1535:     PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1536:     if (!rank) {
1537:       if (((PetscObject)mat)->name) {
1538:         PetscObjectSetName((PetscObject)Av,((PetscObject)mat)->name);
1539:       }
1540:       MatView_SeqAIJ(Av,sviewer);
1541:     }
1542:     PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1543:     PetscViewerFlush(viewer);
1544:     MatDestroy(&A);
1545:   }
1546:   return(0);
1547: }

1549: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1550: {
1552:   PetscBool      iascii,isdraw,issocket,isbinary;

1555:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1556:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1557:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1558:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1559:   if (iascii || isdraw || isbinary || issocket) {
1560:     MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1561:   }
1562:   return(0);
1563: }

1565: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1566: {
1567:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1569:   Vec            bb1 = 0;
1570:   PetscBool      hasop;

1573:   if (flag == SOR_APPLY_UPPER) {
1574:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1575:     return(0);
1576:   }

1578:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1579:     VecDuplicate(bb,&bb1);
1580:   }

1582:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1583:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1584:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1585:       its--;
1586:     }

1588:     while (its--) {
1589:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1590:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1592:       /* update rhs: bb1 = bb - B*x */
1593:       VecScale(mat->lvec,-1.0);
1594:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1596:       /* local sweep */
1597:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1598:     }
1599:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1600:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1601:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1602:       its--;
1603:     }
1604:     while (its--) {
1605:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1606:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1608:       /* update rhs: bb1 = bb - B*x */
1609:       VecScale(mat->lvec,-1.0);
1610:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1612:       /* local sweep */
1613:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1614:     }
1615:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1616:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1617:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1618:       its--;
1619:     }
1620:     while (its--) {
1621:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1622:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1624:       /* update rhs: bb1 = bb - B*x */
1625:       VecScale(mat->lvec,-1.0);
1626:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1628:       /* local sweep */
1629:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1630:     }
1631:   } else if (flag & SOR_EISENSTAT) {
1632:     Vec xx1;

1634:     VecDuplicate(bb,&xx1);
1635:     (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);

1637:     VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1638:     VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1639:     if (!mat->diag) {
1640:       MatCreateVecs(matin,&mat->diag,NULL);
1641:       MatGetDiagonal(matin,mat->diag);
1642:     }
1643:     MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1644:     if (hasop) {
1645:       MatMultDiagonalBlock(matin,xx,bb1);
1646:     } else {
1647:       VecPointwiseMult(bb1,mat->diag,xx);
1648:     }
1649:     VecAYPX(bb1,(omega-2.0)/omega,bb);

1651:     MatMultAdd(mat->B,mat->lvec,bb1,bb1);

1653:     /* local sweep */
1654:     (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1655:     VecAXPY(xx,1.0,xx1);
1656:     VecDestroy(&xx1);
1657:   } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");

1659:   VecDestroy(&bb1);

1661:   matin->factorerrortype = mat->A->factorerrortype;
1662:   return(0);
1663: }

1665: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1666: {
1667:   Mat            aA,aB,Aperm;
1668:   const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1669:   PetscScalar    *aa,*ba;
1670:   PetscInt       i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1671:   PetscSF        rowsf,sf;
1672:   IS             parcolp = NULL;
1673:   PetscBool      done;

1677:   MatGetLocalSize(A,&m,&n);
1678:   ISGetIndices(rowp,&rwant);
1679:   ISGetIndices(colp,&cwant);
1680:   PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);

1682:   /* Invert row permutation to find out where my rows should go */
1683:   PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1684:   PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1685:   PetscSFSetFromOptions(rowsf);
1686:   for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1687:   PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1688:   PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);

1690:   /* Invert column permutation to find out where my columns should go */
1691:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1692:   PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1693:   PetscSFSetFromOptions(sf);
1694:   for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1695:   PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1696:   PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1697:   PetscSFDestroy(&sf);

1699:   ISRestoreIndices(rowp,&rwant);
1700:   ISRestoreIndices(colp,&cwant);
1701:   MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);

1703:   /* Find out where my gcols should go */
1704:   MatGetSize(aB,NULL,&ng);
1705:   PetscMalloc1(ng,&gcdest);
1706:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1707:   PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1708:   PetscSFSetFromOptions(sf);
1709:   PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);
1710:   PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);
1711:   PetscSFDestroy(&sf);

1713:   PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1714:   MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1715:   MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1716:   for (i=0; i<m; i++) {
1717:     PetscInt row = rdest[i],rowner;
1718:     PetscLayoutFindOwner(A->rmap,row,&rowner);
1719:     for (j=ai[i]; j<ai[i+1]; j++) {
1720:       PetscInt cowner,col = cdest[aj[j]];
1721:       PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1722:       if (rowner == cowner) dnnz[i]++;
1723:       else onnz[i]++;
1724:     }
1725:     for (j=bi[i]; j<bi[i+1]; j++) {
1726:       PetscInt cowner,col = gcdest[bj[j]];
1727:       PetscLayoutFindOwner(A->cmap,col,&cowner);
1728:       if (rowner == cowner) dnnz[i]++;
1729:       else onnz[i]++;
1730:     }
1731:   }
1732:   PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);
1733:   PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);
1734:   PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);
1735:   PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);
1736:   PetscSFDestroy(&rowsf);

1738:   MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1739:   MatSeqAIJGetArray(aA,&aa);
1740:   MatSeqAIJGetArray(aB,&ba);
1741:   for (i=0; i<m; i++) {
1742:     PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1743:     PetscInt j0,rowlen;
1744:     rowlen = ai[i+1] - ai[i];
1745:     for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1746:       for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1747:       MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1748:     }
1749:     rowlen = bi[i+1] - bi[i];
1750:     for (j0=j=0; j<rowlen; j0=j) {
1751:       for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1752:       MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1753:     }
1754:   }
1755:   MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1756:   MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1757:   MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1758:   MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1759:   MatSeqAIJRestoreArray(aA,&aa);
1760:   MatSeqAIJRestoreArray(aB,&ba);
1761:   PetscFree4(dnnz,onnz,tdnnz,tonnz);
1762:   PetscFree3(work,rdest,cdest);
1763:   PetscFree(gcdest);
1764:   if (parcolp) {ISDestroy(&colp);}
1765:   *B = Aperm;
1766:   return(0);
1767: }

1769: PetscErrorCode  MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1770: {
1771:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1775:   MatGetSize(aij->B,NULL,nghosts);
1776:   if (ghosts) *ghosts = aij->garray;
1777:   return(0);
1778: }

1780: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1781: {
1782:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1783:   Mat            A    = mat->A,B = mat->B;
1785:   PetscReal      isend[5],irecv[5];

1788:   info->block_size = 1.0;
1789:   MatGetInfo(A,MAT_LOCAL,info);

1791:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1792:   isend[3] = info->memory;  isend[4] = info->mallocs;

1794:   MatGetInfo(B,MAT_LOCAL,info);

1796:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1797:   isend[3] += info->memory;  isend[4] += info->mallocs;
1798:   if (flag == MAT_LOCAL) {
1799:     info->nz_used      = isend[0];
1800:     info->nz_allocated = isend[1];
1801:     info->nz_unneeded  = isend[2];
1802:     info->memory       = isend[3];
1803:     info->mallocs      = isend[4];
1804:   } else if (flag == MAT_GLOBAL_MAX) {
1805:     MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));

1807:     info->nz_used      = irecv[0];
1808:     info->nz_allocated = irecv[1];
1809:     info->nz_unneeded  = irecv[2];
1810:     info->memory       = irecv[3];
1811:     info->mallocs      = irecv[4];
1812:   } else if (flag == MAT_GLOBAL_SUM) {
1813:     MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));

1815:     info->nz_used      = irecv[0];
1816:     info->nz_allocated = irecv[1];
1817:     info->nz_unneeded  = irecv[2];
1818:     info->memory       = irecv[3];
1819:     info->mallocs      = irecv[4];
1820:   }
1821:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1822:   info->fill_ratio_needed = 0;
1823:   info->factor_mallocs    = 0;
1824:   return(0);
1825: }

1827: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1828: {
1829:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1833:   switch (op) {
1834:   case MAT_NEW_NONZERO_LOCATIONS:
1835:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1836:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1837:   case MAT_KEEP_NONZERO_PATTERN:
1838:   case MAT_NEW_NONZERO_LOCATION_ERR:
1839:   case MAT_USE_INODES:
1840:   case MAT_IGNORE_ZERO_ENTRIES:
1841:     MatCheckPreallocated(A,1);
1842:     MatSetOption(a->A,op,flg);
1843:     MatSetOption(a->B,op,flg);
1844:     break;
1845:   case MAT_ROW_ORIENTED:
1846:     MatCheckPreallocated(A,1);
1847:     a->roworiented = flg;

1849:     MatSetOption(a->A,op,flg);
1850:     MatSetOption(a->B,op,flg);
1851:     break;
1852:   case MAT_NEW_DIAGONALS:
1853:   case MAT_SORTED_FULL:
1854:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1855:     break;
1856:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1857:     a->donotstash = flg;
1858:     break;
1859:   /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1860:   case MAT_SPD:
1861:   case MAT_SYMMETRIC:
1862:   case MAT_STRUCTURALLY_SYMMETRIC:
1863:   case MAT_HERMITIAN:
1864:   case MAT_SYMMETRY_ETERNAL:
1865:     break;
1866:   case MAT_SUBMAT_SINGLEIS:
1867:     A->submat_singleis = flg;
1868:     break;
1869:   case MAT_STRUCTURE_ONLY:
1870:     /* The option is handled directly by MatSetOption() */
1871:     break;
1872:   default:
1873:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1874:   }
1875:   return(0);
1876: }

1878: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1879: {
1880:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1881:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1883:   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1884:   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1885:   PetscInt       *cmap,*idx_p;

1888:   if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1889:   mat->getrowactive = PETSC_TRUE;

1891:   if (!mat->rowvalues && (idx || v)) {
1892:     /*
1893:         allocate enough space to hold information from the longest row.
1894:     */
1895:     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1896:     PetscInt   max = 1,tmp;
1897:     for (i=0; i<matin->rmap->n; i++) {
1898:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1899:       if (max < tmp) max = tmp;
1900:     }
1901:     PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1902:   }

1904:   if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1905:   lrow = row - rstart;

1907:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1908:   if (!v)   {pvA = 0; pvB = 0;}
1909:   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1910:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1911:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1912:   nztot = nzA + nzB;

1914:   cmap = mat->garray;
1915:   if (v  || idx) {
1916:     if (nztot) {
1917:       /* Sort by increasing column numbers, assuming A and B already sorted */
1918:       PetscInt imark = -1;
1919:       if (v) {
1920:         *v = v_p = mat->rowvalues;
1921:         for (i=0; i<nzB; i++) {
1922:           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1923:           else break;
1924:         }
1925:         imark = i;
1926:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1927:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1928:       }
1929:       if (idx) {
1930:         *idx = idx_p = mat->rowindices;
1931:         if (imark > -1) {
1932:           for (i=0; i<imark; i++) {
1933:             idx_p[i] = cmap[cworkB[i]];
1934:           }
1935:         } else {
1936:           for (i=0; i<nzB; i++) {
1937:             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1938:             else break;
1939:           }
1940:           imark = i;
1941:         }
1942:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1943:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1944:       }
1945:     } else {
1946:       if (idx) *idx = 0;
1947:       if (v)   *v   = 0;
1948:     }
1949:   }
1950:   *nz  = nztot;
1951:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1952:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1953:   return(0);
1954: }

1956: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1957: {
1958:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1961:   if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1962:   aij->getrowactive = PETSC_FALSE;
1963:   return(0);
1964: }

1966: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1967: {
1968:   Mat_MPIAIJ     *aij  = (Mat_MPIAIJ*)mat->data;
1969:   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1971:   PetscInt       i,j,cstart = mat->cmap->rstart;
1972:   PetscReal      sum = 0.0;
1973:   MatScalar      *v;

1976:   if (aij->size == 1) {
1977:      MatNorm(aij->A,type,norm);
1978:   } else {
1979:     if (type == NORM_FROBENIUS) {
1980:       v = amat->a;
1981:       for (i=0; i<amat->nz; i++) {
1982:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1983:       }
1984:       v = bmat->a;
1985:       for (i=0; i<bmat->nz; i++) {
1986:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1987:       }
1988:       MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1989:       *norm = PetscSqrtReal(*norm);
1990:       PetscLogFlops(2*amat->nz+2*bmat->nz);
1991:     } else if (type == NORM_1) { /* max column norm */
1992:       PetscReal *tmp,*tmp2;
1993:       PetscInt  *jj,*garray = aij->garray;
1994:       PetscCalloc1(mat->cmap->N+1,&tmp);
1995:       PetscMalloc1(mat->cmap->N+1,&tmp2);
1996:       *norm = 0.0;
1997:       v     = amat->a; jj = amat->j;
1998:       for (j=0; j<amat->nz; j++) {
1999:         tmp[cstart + *jj++] += PetscAbsScalar(*v);  v++;
2000:       }
2001:       v = bmat->a; jj = bmat->j;
2002:       for (j=0; j<bmat->nz; j++) {
2003:         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
2004:       }
2005:       MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
2006:       for (j=0; j<mat->cmap->N; j++) {
2007:         if (tmp2[j] > *norm) *norm = tmp2[j];
2008:       }
2009:       PetscFree(tmp);
2010:       PetscFree(tmp2);
2011:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
2012:     } else if (type == NORM_INFINITY) { /* max row norm */
2013:       PetscReal ntemp = 0.0;
2014:       for (j=0; j<aij->A->rmap->n; j++) {
2015:         v   = amat->a + amat->i[j];
2016:         sum = 0.0;
2017:         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
2018:           sum += PetscAbsScalar(*v); v++;
2019:         }
2020:         v = bmat->a + bmat->i[j];
2021:         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
2022:           sum += PetscAbsScalar(*v); v++;
2023:         }
2024:         if (sum > ntemp) ntemp = sum;
2025:       }
2026:       MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
2027:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
2028:     } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
2029:   }
2030:   return(0);
2031: }

2033: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
2034: {
2035:   Mat_MPIAIJ      *a    =(Mat_MPIAIJ*)A->data,*b;
2036:   Mat_SeqAIJ      *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
2037:   PetscInt        M     = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,row,*cols,*cols_tmp,*B_diag_ilen,i,ncol,A_diag_ncol;
2038:   const PetscInt  *ai,*aj,*bi,*bj,*B_diag_i;
2039:   PetscErrorCode  ierr;
2040:   Mat             B,A_diag,*B_diag;
2041:   const MatScalar *array;

2044:   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
2045:   ai = Aloc->i; aj = Aloc->j;
2046:   bi = Bloc->i; bj = Bloc->j;
2047:   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
2048:     PetscInt             *d_nnz,*g_nnz,*o_nnz;
2049:     PetscSFNode          *oloc;
2050:     PETSC_UNUSED PetscSF sf;

2052:     PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
2053:     /* compute d_nnz for preallocation */
2054:     PetscArrayzero(d_nnz,na);
2055:     for (i=0; i<ai[ma]; i++) {
2056:       d_nnz[aj[i]]++;
2057:     }
2058:     /* compute local off-diagonal contributions */
2059:     PetscArrayzero(g_nnz,nb);
2060:     for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
2061:     /* map those to global */
2062:     PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
2063:     PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
2064:     PetscSFSetFromOptions(sf);
2065:     PetscArrayzero(o_nnz,na);
2066:     PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2067:     PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2068:     PetscSFDestroy(&sf);

2070:     MatCreate(PetscObjectComm((PetscObject)A),&B);
2071:     MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
2072:     MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
2073:     MatSetType(B,((PetscObject)A)->type_name);
2074:     MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
2075:     PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
2076:   } else {
2077:     B    = *matout;
2078:     MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
2079:   }

2081:   b           = (Mat_MPIAIJ*)B->data;
2082:   A_diag      = a->A;
2083:   B_diag      = &b->A;
2084:   sub_B_diag  = (Mat_SeqAIJ*)(*B_diag)->data;
2085:   A_diag_ncol = A_diag->cmap->N;
2086:   B_diag_ilen = sub_B_diag->ilen;
2087:   B_diag_i    = sub_B_diag->i;

2089:   /* Set ilen for diagonal of B */
2090:   for (i=0; i<A_diag_ncol; i++) {
2091:     B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
2092:   }

2094:   /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
2095:   very quickly (=without using MatSetValues), because all writes are local. */
2096:   MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);

2098:   /* copy over the B part */
2099:   PetscMalloc1(bi[mb],&cols);
2100:   array = Bloc->a;
2101:   row   = A->rmap->rstart;
2102:   for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
2103:   cols_tmp = cols;
2104:   for (i=0; i<mb; i++) {
2105:     ncol = bi[i+1]-bi[i];
2106:     MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
2107:     row++;
2108:     array += ncol; cols_tmp += ncol;
2109:   }
2110:   PetscFree(cols);

2112:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2113:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2114:   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
2115:     *matout = B;
2116:   } else {
2117:     MatHeaderMerge(A,&B);
2118:   }
2119:   return(0);
2120: }

2122: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
2123: {
2124:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2125:   Mat            a    = aij->A,b = aij->B;
2127:   PetscInt       s1,s2,s3;

2130:   MatGetLocalSize(mat,&s2,&s3);
2131:   if (rr) {
2132:     VecGetLocalSize(rr,&s1);
2133:     if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
2134:     /* Overlap communication with computation. */
2135:     VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2136:   }
2137:   if (ll) {
2138:     VecGetLocalSize(ll,&s1);
2139:     if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
2140:     (*b->ops->diagonalscale)(b,ll,0);
2141:   }
2142:   /* scale  the diagonal block */
2143:   (*a->ops->diagonalscale)(a,ll,rr);

2145:   if (rr) {
2146:     /* Do a scatter end and then right scale the off-diagonal block */
2147:     VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2148:     (*b->ops->diagonalscale)(b,0,aij->lvec);
2149:   }
2150:   return(0);
2151: }

2153: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2154: {
2155:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2159:   MatSetUnfactored(a->A);
2160:   return(0);
2161: }

2163: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool  *flag)
2164: {
2165:   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2166:   Mat            a,b,c,d;
2167:   PetscBool      flg;

2171:   a = matA->A; b = matA->B;
2172:   c = matB->A; d = matB->B;

2174:   MatEqual(a,c,&flg);
2175:   if (flg) {
2176:     MatEqual(b,d,&flg);
2177:   }
2178:   MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2179:   return(0);
2180: }

2182: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2183: {
2185:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2186:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

2189:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2190:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2191:     /* because of the column compression in the off-processor part of the matrix a->B,
2192:        the number of columns in a->B and b->B may be different, hence we cannot call
2193:        the MatCopy() directly on the two parts. If need be, we can provide a more
2194:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2195:        then copying the submatrices */
2196:     MatCopy_Basic(A,B,str);
2197:   } else {
2198:     MatCopy(a->A,b->A,str);
2199:     MatCopy(a->B,b->B,str);
2200:   }
2201:   PetscObjectStateIncrease((PetscObject)B);
2202:   return(0);
2203: }

2205: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2206: {

2210:   MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2211:   return(0);
2212: }

2214: /*
2215:    Computes the number of nonzeros per row needed for preallocation when X and Y
2216:    have different nonzero structure.
2217: */
2218: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2219: {
2220:   PetscInt       i,j,k,nzx,nzy;

2223:   /* Set the number of nonzeros in the new matrix */
2224:   for (i=0; i<m; i++) {
2225:     const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2226:     nzx = xi[i+1] - xi[i];
2227:     nzy = yi[i+1] - yi[i];
2228:     nnz[i] = 0;
2229:     for (j=0,k=0; j<nzx; j++) {                   /* Point in X */
2230:       for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2231:       if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++;             /* Skip duplicate */
2232:       nnz[i]++;
2233:     }
2234:     for (; k<nzy; k++) nnz[i]++;
2235:   }
2236:   return(0);
2237: }

2239: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2240: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2241: {
2243:   PetscInt       m = Y->rmap->N;
2244:   Mat_SeqAIJ     *x = (Mat_SeqAIJ*)X->data;
2245:   Mat_SeqAIJ     *y = (Mat_SeqAIJ*)Y->data;

2248:   MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2249:   return(0);
2250: }

2252: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2253: {
2255:   Mat_MPIAIJ     *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2256:   PetscBLASInt   bnz,one=1;
2257:   Mat_SeqAIJ     *x,*y;

2260:   if (str == SAME_NONZERO_PATTERN) {
2261:     PetscScalar alpha = a;
2262:     x    = (Mat_SeqAIJ*)xx->A->data;
2263:     PetscBLASIntCast(x->nz,&bnz);
2264:     y    = (Mat_SeqAIJ*)yy->A->data;
2265:     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2266:     x    = (Mat_SeqAIJ*)xx->B->data;
2267:     y    = (Mat_SeqAIJ*)yy->B->data;
2268:     PetscBLASIntCast(x->nz,&bnz);
2269:     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2270:     PetscObjectStateIncrease((PetscObject)Y);
2271:     /* the MatAXPY_Basic* subroutines calls MatAssembly, so the matrix on the GPU
2272:        will be updated */
2273: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
2274:     if (Y->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
2275:       Y->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
2276:     }
2277: #endif
2278:   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2279:     MatAXPY_Basic(Y,a,X,str);
2280:   } else {
2281:     Mat      B;
2282:     PetscInt *nnz_d,*nnz_o;
2283:     PetscMalloc1(yy->A->rmap->N,&nnz_d);
2284:     PetscMalloc1(yy->B->rmap->N,&nnz_o);
2285:     MatCreate(PetscObjectComm((PetscObject)Y),&B);
2286:     PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2287:     MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2288:     MatSetBlockSizesFromMats(B,Y,Y);
2289:     MatSetType(B,MATMPIAIJ);
2290:     MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2291:     MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2292:     MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2293:     MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2294:     MatHeaderReplace(Y,&B);
2295:     PetscFree(nnz_d);
2296:     PetscFree(nnz_o);
2297:   }
2298:   return(0);
2299: }

2301: extern PetscErrorCode  MatConjugate_SeqAIJ(Mat);

2303: PetscErrorCode  MatConjugate_MPIAIJ(Mat mat)
2304: {
2305: #if defined(PETSC_USE_COMPLEX)
2307:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2310:   MatConjugate_SeqAIJ(aij->A);
2311:   MatConjugate_SeqAIJ(aij->B);
2312: #else
2314: #endif
2315:   return(0);
2316: }

2318: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2319: {
2320:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2324:   MatRealPart(a->A);
2325:   MatRealPart(a->B);
2326:   return(0);
2327: }

2329: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2330: {
2331:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2335:   MatImaginaryPart(a->A);
2336:   MatImaginaryPart(a->B);
2337:   return(0);
2338: }

2340: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2341: {
2342:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2344:   PetscInt       i,*idxb = 0;
2345:   PetscScalar    *va,*vb;
2346:   Vec            vtmp;

2349:   MatGetRowMaxAbs(a->A,v,idx);
2350:   VecGetArray(v,&va);
2351:   if (idx) {
2352:     for (i=0; i<A->rmap->n; i++) {
2353:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2354:     }
2355:   }

2357:   VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2358:   if (idx) {
2359:     PetscMalloc1(A->rmap->n,&idxb);
2360:   }
2361:   MatGetRowMaxAbs(a->B,vtmp,idxb);
2362:   VecGetArray(vtmp,&vb);

2364:   for (i=0; i<A->rmap->n; i++) {
2365:     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2366:       va[i] = vb[i];
2367:       if (idx) idx[i] = a->garray[idxb[i]];
2368:     }
2369:   }

2371:   VecRestoreArray(v,&va);
2372:   VecRestoreArray(vtmp,&vb);
2373:   PetscFree(idxb);
2374:   VecDestroy(&vtmp);
2375:   return(0);
2376: }

2378: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2379: {
2380:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2382:   PetscInt       i,*idxb = 0;
2383:   PetscScalar    *va,*vb;
2384:   Vec            vtmp;

2387:   MatGetRowMinAbs(a->A,v,idx);
2388:   VecGetArray(v,&va);
2389:   if (idx) {
2390:     for (i=0; i<A->cmap->n; i++) {
2391:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2392:     }
2393:   }

2395:   VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2396:   if (idx) {
2397:     PetscMalloc1(A->rmap->n,&idxb);
2398:   }
2399:   MatGetRowMinAbs(a->B,vtmp,idxb);
2400:   VecGetArray(vtmp,&vb);

2402:   for (i=0; i<A->rmap->n; i++) {
2403:     if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2404:       va[i] = vb[i];
2405:       if (idx) idx[i] = a->garray[idxb[i]];
2406:     }
2407:   }

2409:   VecRestoreArray(v,&va);
2410:   VecRestoreArray(vtmp,&vb);
2411:   PetscFree(idxb);
2412:   VecDestroy(&vtmp);
2413:   return(0);
2414: }

2416: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2417: {
2418:   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2419:   PetscInt       n      = A->rmap->n;
2420:   PetscInt       cstart = A->cmap->rstart;
2421:   PetscInt       *cmap  = mat->garray;
2422:   PetscInt       *diagIdx, *offdiagIdx;
2423:   Vec            diagV, offdiagV;
2424:   PetscScalar    *a, *diagA, *offdiagA;
2425:   PetscInt       r;

2429:   PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2430:   VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);
2431:   VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);
2432:   MatGetRowMin(mat->A, diagV,    diagIdx);
2433:   MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2434:   VecGetArray(v,        &a);
2435:   VecGetArray(diagV,    &diagA);
2436:   VecGetArray(offdiagV, &offdiagA);
2437:   for (r = 0; r < n; ++r) {
2438:     if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2439:       a[r]   = diagA[r];
2440:       idx[r] = cstart + diagIdx[r];
2441:     } else {
2442:       a[r]   = offdiagA[r];
2443:       idx[r] = cmap[offdiagIdx[r]];
2444:     }
2445:   }
2446:   VecRestoreArray(v,        &a);
2447:   VecRestoreArray(diagV,    &diagA);
2448:   VecRestoreArray(offdiagV, &offdiagA);
2449:   VecDestroy(&diagV);
2450:   VecDestroy(&offdiagV);
2451:   PetscFree2(diagIdx, offdiagIdx);
2452:   return(0);
2453: }

2455: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2456: {
2457:   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2458:   PetscInt       n      = A->rmap->n;
2459:   PetscInt       cstart = A->cmap->rstart;
2460:   PetscInt       *cmap  = mat->garray;
2461:   PetscInt       *diagIdx, *offdiagIdx;
2462:   Vec            diagV, offdiagV;
2463:   PetscScalar    *a, *diagA, *offdiagA;
2464:   PetscInt       r;

2468:   PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2469:   VecCreateSeq(PETSC_COMM_SELF, n, &diagV);
2470:   VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);
2471:   MatGetRowMax(mat->A, diagV,    diagIdx);
2472:   MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2473:   VecGetArray(v,        &a);
2474:   VecGetArray(diagV,    &diagA);
2475:   VecGetArray(offdiagV, &offdiagA);
2476:   for (r = 0; r < n; ++r) {
2477:     if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2478:       a[r]   = diagA[r];
2479:       idx[r] = cstart + diagIdx[r];
2480:     } else {
2481:       a[r]   = offdiagA[r];
2482:       idx[r] = cmap[offdiagIdx[r]];
2483:     }
2484:   }
2485:   VecRestoreArray(v,        &a);
2486:   VecRestoreArray(diagV,    &diagA);
2487:   VecRestoreArray(offdiagV, &offdiagA);
2488:   VecDestroy(&diagV);
2489:   VecDestroy(&offdiagV);
2490:   PetscFree2(diagIdx, offdiagIdx);
2491:   return(0);
2492: }

2494: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2495: {
2497:   Mat            *dummy;

2500:   MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2501:   *newmat = *dummy;
2502:   PetscFree(dummy);
2503:   return(0);
2504: }

2506: PetscErrorCode  MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2507: {
2508:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*) A->data;

2512:   MatInvertBlockDiagonal(a->A,values);
2513:   A->factorerrortype = a->A->factorerrortype;
2514:   return(0);
2515: }

2517: static PetscErrorCode  MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2518: {
2520:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)x->data;

2523:   if (!x->assembled && !x->preallocated) SETERRQ(PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed");
2524:   MatSetRandom(aij->A,rctx);
2525:   if (x->assembled) {
2526:     MatSetRandom(aij->B,rctx);
2527:   } else {
2528:     MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B,x->cmap->rstart,x->cmap->rend,rctx);
2529:   }
2530:   MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2531:   MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2532:   return(0);
2533: }

2535: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2536: {
2538:   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2539:   else A->ops->increaseoverlap    = MatIncreaseOverlap_MPIAIJ;
2540:   return(0);
2541: }

2543: /*@
2544:    MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap

2546:    Collective on Mat

2548:    Input Parameters:
2549: +    A - the matrix
2550: -    sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)

2552:  Level: advanced

2554: @*/
2555: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2556: {
2557:   PetscErrorCode       ierr;

2560:   PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2561:   return(0);
2562: }

2564: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2565: {
2566:   PetscErrorCode       ierr;
2567:   PetscBool            sc = PETSC_FALSE,flg;

2570:   PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2571:   if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2572:   PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2573:   if (flg) {
2574:     MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2575:   }
2576:   PetscOptionsTail();
2577:   return(0);
2578: }

2580: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2581: {
2583:   Mat_MPIAIJ     *maij = (Mat_MPIAIJ*)Y->data;
2584:   Mat_SeqAIJ     *aij = (Mat_SeqAIJ*)maij->A->data;

2587:   if (!Y->preallocated) {
2588:     MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2589:   } else if (!aij->nz) {
2590:     PetscInt nonew = aij->nonew;
2591:     MatSeqAIJSetPreallocation(maij->A,1,NULL);
2592:     aij->nonew = nonew;
2593:   }
2594:   MatShift_Basic(Y,a);
2595:   return(0);
2596: }

2598: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool  *missing,PetscInt *d)
2599: {
2600:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2604:   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2605:   MatMissingDiagonal(a->A,missing,d);
2606:   if (d) {
2607:     PetscInt rstart;
2608:     MatGetOwnershipRange(A,&rstart,NULL);
2609:     *d += rstart;

2611:   }
2612:   return(0);
2613: }

2615: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2616: {
2617:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2621:   MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2622:   return(0);
2623: }

2625: /* -------------------------------------------------------------------*/
2626: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2627:                                        MatGetRow_MPIAIJ,
2628:                                        MatRestoreRow_MPIAIJ,
2629:                                        MatMult_MPIAIJ,
2630:                                 /* 4*/ MatMultAdd_MPIAIJ,
2631:                                        MatMultTranspose_MPIAIJ,
2632:                                        MatMultTransposeAdd_MPIAIJ,
2633:                                        0,
2634:                                        0,
2635:                                        0,
2636:                                 /*10*/ 0,
2637:                                        0,
2638:                                        0,
2639:                                        MatSOR_MPIAIJ,
2640:                                        MatTranspose_MPIAIJ,
2641:                                 /*15*/ MatGetInfo_MPIAIJ,
2642:                                        MatEqual_MPIAIJ,
2643:                                        MatGetDiagonal_MPIAIJ,
2644:                                        MatDiagonalScale_MPIAIJ,
2645:                                        MatNorm_MPIAIJ,
2646:                                 /*20*/ MatAssemblyBegin_MPIAIJ,
2647:                                        MatAssemblyEnd_MPIAIJ,
2648:                                        MatSetOption_MPIAIJ,
2649:                                        MatZeroEntries_MPIAIJ,
2650:                                 /*24*/ MatZeroRows_MPIAIJ,
2651:                                        0,
2652:                                        0,
2653:                                        0,
2654:                                        0,
2655:                                 /*29*/ MatSetUp_MPIAIJ,
2656:                                        0,
2657:                                        0,
2658:                                        MatGetDiagonalBlock_MPIAIJ,
2659:                                        0,
2660:                                 /*34*/ MatDuplicate_MPIAIJ,
2661:                                        0,
2662:                                        0,
2663:                                        0,
2664:                                        0,
2665:                                 /*39*/ MatAXPY_MPIAIJ,
2666:                                        MatCreateSubMatrices_MPIAIJ,
2667:                                        MatIncreaseOverlap_MPIAIJ,
2668:                                        MatGetValues_MPIAIJ,
2669:                                        MatCopy_MPIAIJ,
2670:                                 /*44*/ MatGetRowMax_MPIAIJ,
2671:                                        MatScale_MPIAIJ,
2672:                                        MatShift_MPIAIJ,
2673:                                        MatDiagonalSet_MPIAIJ,
2674:                                        MatZeroRowsColumns_MPIAIJ,
2675:                                 /*49*/ MatSetRandom_MPIAIJ,
2676:                                        0,
2677:                                        0,
2678:                                        0,
2679:                                        0,
2680:                                 /*54*/ MatFDColoringCreate_MPIXAIJ,
2681:                                        0,
2682:                                        MatSetUnfactored_MPIAIJ,
2683:                                        MatPermute_MPIAIJ,
2684:                                        0,
2685:                                 /*59*/ MatCreateSubMatrix_MPIAIJ,
2686:                                        MatDestroy_MPIAIJ,
2687:                                        MatView_MPIAIJ,
2688:                                        0,
2689:                                        MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ,
2690:                                 /*64*/ MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ,
2691:                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2692:                                        0,
2693:                                        0,
2694:                                        0,
2695:                                 /*69*/ MatGetRowMaxAbs_MPIAIJ,
2696:                                        MatGetRowMinAbs_MPIAIJ,
2697:                                        0,
2698:                                        0,
2699:                                        0,
2700:                                        0,
2701:                                 /*75*/ MatFDColoringApply_AIJ,
2702:                                        MatSetFromOptions_MPIAIJ,
2703:                                        0,
2704:                                        0,
2705:                                        MatFindZeroDiagonals_MPIAIJ,
2706:                                 /*80*/ 0,
2707:                                        0,
2708:                                        0,
2709:                                 /*83*/ MatLoad_MPIAIJ,
2710:                                        MatIsSymmetric_MPIAIJ,
2711:                                        0,
2712:                                        0,
2713:                                        0,
2714:                                        0,
2715:                                 /*89*/ MatMatMult_MPIAIJ_MPIAIJ,
2716:                                        MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2717:                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2718:                                        MatPtAP_MPIAIJ_MPIAIJ,
2719:                                        MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2720:                                 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2721:                                        0,
2722:                                        0,
2723:                                        0,
2724:                                        MatPinToCPU_MPIAIJ,
2725:                                 /*99*/ 0,
2726:                                        0,
2727:                                        0,
2728:                                        MatConjugate_MPIAIJ,
2729:                                        0,
2730:                                 /*104*/MatSetValuesRow_MPIAIJ,
2731:                                        MatRealPart_MPIAIJ,
2732:                                        MatImaginaryPart_MPIAIJ,
2733:                                        0,
2734:                                        0,
2735:                                 /*109*/0,
2736:                                        0,
2737:                                        MatGetRowMin_MPIAIJ,
2738:                                        0,
2739:                                        MatMissingDiagonal_MPIAIJ,
2740:                                 /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2741:                                        0,
2742:                                        MatGetGhosts_MPIAIJ,
2743:                                        0,
2744:                                        0,
2745:                                 /*119*/0,
2746:                                        0,
2747:                                        0,
2748:                                        0,
2749:                                        MatGetMultiProcBlock_MPIAIJ,
2750:                                 /*124*/MatFindNonzeroRows_MPIAIJ,
2751:                                        MatGetColumnNorms_MPIAIJ,
2752:                                        MatInvertBlockDiagonal_MPIAIJ,
2753:                                        MatInvertVariableBlockDiagonal_MPIAIJ,
2754:                                        MatCreateSubMatricesMPI_MPIAIJ,
2755:                                 /*129*/0,
2756:                                        MatTransposeMatMult_MPIAIJ_MPIAIJ,
2757:                                        MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ,
2758:                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2759:                                        0,
2760:                                 /*134*/0,
2761:                                        0,
2762:                                        MatRARt_MPIAIJ_MPIAIJ,
2763:                                        0,
2764:                                        0,
2765:                                 /*139*/MatSetBlockSizes_MPIAIJ,
2766:                                        0,
2767:                                        0,
2768:                                        MatFDColoringSetUp_MPIXAIJ,
2769:                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2770:                                 /*144*/MatCreateMPIMatConcatenateSeqMat_MPIAIJ
2771: };

2773: /* ----------------------------------------------------------------------------------------*/

2775: PetscErrorCode  MatStoreValues_MPIAIJ(Mat mat)
2776: {
2777:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2781:   MatStoreValues(aij->A);
2782:   MatStoreValues(aij->B);
2783:   return(0);
2784: }

2786: PetscErrorCode  MatRetrieveValues_MPIAIJ(Mat mat)
2787: {
2788:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2792:   MatRetrieveValues(aij->A);
2793:   MatRetrieveValues(aij->B);
2794:   return(0);
2795: }

2797: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2798: {
2799:   Mat_MPIAIJ     *b;
2801:   PetscMPIInt    size;

2804:   PetscLayoutSetUp(B->rmap);
2805:   PetscLayoutSetUp(B->cmap);
2806:   b = (Mat_MPIAIJ*)B->data;

2808: #if defined(PETSC_USE_CTABLE)
2809:   PetscTableDestroy(&b->colmap);
2810: #else
2811:   PetscFree(b->colmap);
2812: #endif
2813:   PetscFree(b->garray);
2814:   VecDestroy(&b->lvec);
2815:   VecScatterDestroy(&b->Mvctx);

2817:   /* Because the B will have been resized we simply destroy it and create a new one each time */
2818:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
2819:   MatDestroy(&b->B);
2820:   MatCreate(PETSC_COMM_SELF,&b->B);
2821:   MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);
2822:   MatSetBlockSizesFromMats(b->B,B,B);
2823:   MatSetType(b->B,MATSEQAIJ);
2824:   PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);

2826:   if (!B->preallocated) {
2827:     MatCreate(PETSC_COMM_SELF,&b->A);
2828:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2829:     MatSetBlockSizesFromMats(b->A,B,B);
2830:     MatSetType(b->A,MATSEQAIJ);
2831:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2832:   }

2834:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2835:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2836:   B->preallocated  = PETSC_TRUE;
2837:   B->was_assembled = PETSC_FALSE;
2838:   B->assembled     = PETSC_FALSE;
2839:   return(0);
2840: }

2842: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2843: {
2844:   Mat_MPIAIJ     *b;

2849:   PetscLayoutSetUp(B->rmap);
2850:   PetscLayoutSetUp(B->cmap);
2851:   b = (Mat_MPIAIJ*)B->data;

2853: #if defined(PETSC_USE_CTABLE)
2854:   PetscTableDestroy(&b->colmap);
2855: #else
2856:   PetscFree(b->colmap);
2857: #endif
2858:   PetscFree(b->garray);
2859:   VecDestroy(&b->lvec);
2860:   VecScatterDestroy(&b->Mvctx);

2862:   MatResetPreallocation(b->A);
2863:   MatResetPreallocation(b->B);
2864:   B->preallocated  = PETSC_TRUE;
2865:   B->was_assembled = PETSC_FALSE;
2866:   B->assembled = PETSC_FALSE;
2867:   return(0);
2868: }

2870: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2871: {
2872:   Mat            mat;
2873:   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;

2877:   *newmat = 0;
2878:   MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2879:   MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2880:   MatSetBlockSizesFromMats(mat,matin,matin);
2881:   MatSetType(mat,((PetscObject)matin)->type_name);
2882:   a       = (Mat_MPIAIJ*)mat->data;

2884:   mat->factortype   = matin->factortype;
2885:   mat->assembled    = PETSC_TRUE;
2886:   mat->insertmode   = NOT_SET_VALUES;
2887:   mat->preallocated = PETSC_TRUE;

2889:   a->size         = oldmat->size;
2890:   a->rank         = oldmat->rank;
2891:   a->donotstash   = oldmat->donotstash;
2892:   a->roworiented  = oldmat->roworiented;
2893:   a->rowindices   = 0;
2894:   a->rowvalues    = 0;
2895:   a->getrowactive = PETSC_FALSE;

2897:   PetscLayoutReference(matin->rmap,&mat->rmap);
2898:   PetscLayoutReference(matin->cmap,&mat->cmap);

2900:   if (oldmat->colmap) {
2901: #if defined(PETSC_USE_CTABLE)
2902:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2903: #else
2904:     PetscMalloc1(mat->cmap->N,&a->colmap);
2905:     PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2906:     PetscArraycpy(a->colmap,oldmat->colmap,mat->cmap->N);
2907: #endif
2908:   } else a->colmap = 0;
2909:   if (oldmat->garray) {
2910:     PetscInt len;
2911:     len  = oldmat->B->cmap->n;
2912:     PetscMalloc1(len+1,&a->garray);
2913:     PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2914:     if (len) { PetscArraycpy(a->garray,oldmat->garray,len); }
2915:   } else a->garray = 0;

2917:   VecDuplicate(oldmat->lvec,&a->lvec);
2918:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2919:   VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2920:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);

2922:   if (oldmat->Mvctx_mpi1) {
2923:     VecScatterCopy(oldmat->Mvctx_mpi1,&a->Mvctx_mpi1);
2924:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx_mpi1);
2925:   }

2927:   MatDuplicate(oldmat->A,cpvalues,&a->A);
2928:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2929:   MatDuplicate(oldmat->B,cpvalues,&a->B);
2930:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2931:   PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2932:   *newmat = mat;
2933:   return(0);
2934: }

2936: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2937: {
2938:   PetscBool      isbinary, ishdf5;

2944:   /* force binary viewer to load .info file if it has not yet done so */
2945:   PetscViewerSetUp(viewer);
2946:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2947:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5,  &ishdf5);
2948:   if (isbinary) {
2949:     MatLoad_MPIAIJ_Binary(newMat,viewer);
2950:   } else if (ishdf5) {
2951: #if defined(PETSC_HAVE_HDF5)
2952:     MatLoad_AIJ_HDF5(newMat,viewer);
2953: #else
2954:     SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
2955: #endif
2956:   } else {
2957:     SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
2958:   }
2959:   return(0);
2960: }

2962: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat newMat, PetscViewer viewer)
2963: {
2964:   PetscScalar    *vals,*svals;
2965:   MPI_Comm       comm;
2967:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag;
2968:   PetscInt       i,nz,j,rstart,rend,mmax,maxnz = 0;
2969:   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2970:   PetscInt       *ourlens = NULL,*procsnz = NULL,*offlens = NULL,jj,*mycols,*smycols;
2971:   PetscInt       cend,cstart,n,*rowners;
2972:   int            fd;
2973:   PetscInt       bs = newMat->rmap->bs;

2976:   PetscObjectGetComm((PetscObject)viewer,&comm);
2977:   MPI_Comm_size(comm,&size);
2978:   MPI_Comm_rank(comm,&rank);
2979:   PetscViewerBinaryGetDescriptor(viewer,&fd);
2980:   if (!rank) {
2981:     PetscBinaryRead(fd,(char*)header,4,NULL,PETSC_INT);
2982:     if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2983:     if (header[3] < 0) SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk,cannot load as MATMPIAIJ");
2984:   }

2986:   PetscOptionsBegin(comm,NULL,"Options for loading MATMPIAIJ matrix","Mat");
2987:   PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);
2988:   PetscOptionsEnd();
2989:   if (bs < 0) bs = 1;

2991:   MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2992:   M    = header[1]; N = header[2];

2994:   /* If global sizes are set, check if they are consistent with that given in the file */
2995:   if (newMat->rmap->N >= 0 && newMat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newMat->rmap->N,M);
2996:   if (newMat->cmap->N >=0 && newMat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newMat->cmap->N,N);

2998:   /* determine ownership of all (block) rows */
2999:   if (M%bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows (%d) and block size (%d)",M,bs);
3000:   if (newMat->rmap->n < 0) m = bs*((M/bs)/size + (((M/bs) % size) > rank));    /* PETSC_DECIDE */
3001:   else m = newMat->rmap->n; /* Set by user */

3003:   PetscMalloc1(size+1,&rowners);
3004:   MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);

3006:   /* First process needs enough room for process with most rows */
3007:   if (!rank) {
3008:     mmax = rowners[1];
3009:     for (i=2; i<=size; i++) {
3010:       mmax = PetscMax(mmax, rowners[i]);
3011:     }
3012:   } else mmax = -1;             /* unused, but compilers complain */

3014:   rowners[0] = 0;
3015:   for (i=2; i<=size; i++) {
3016:     rowners[i] += rowners[i-1];
3017:   }
3018:   rstart = rowners[rank];
3019:   rend   = rowners[rank+1];

3021:   /* distribute row lengths to all processors */
3022:   PetscMalloc2(m,&ourlens,m,&offlens);
3023:   if (!rank) {
3024:     PetscBinaryRead(fd,ourlens,m,NULL,PETSC_INT);
3025:     PetscMalloc1(mmax,&rowlengths);
3026:     PetscCalloc1(size,&procsnz);
3027:     for (j=0; j<m; j++) {
3028:       procsnz[0] += ourlens[j];
3029:     }
3030:     for (i=1; i<size; i++) {
3031:       PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],NULL,PETSC_INT);
3032:       /* calculate the number of nonzeros on each processor */
3033:       for (j=0; j<rowners[i+1]-rowners[i]; j++) {
3034:         procsnz[i] += rowlengths[j];
3035:       }
3036:       MPIULong_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
3037:     }
3038:     PetscFree(rowlengths);
3039:   } else {
3040:     MPIULong_Recv(ourlens,m,MPIU_INT,0,tag,comm);
3041:   }

3043:   if (!rank) {
3044:     /* determine max buffer needed and allocate it */
3045:     maxnz = 0;
3046:     for (i=0; i<size; i++) {
3047:       maxnz = PetscMax(maxnz,procsnz[i]);
3048:     }
3049:     PetscMalloc1(maxnz,&cols);

3051:     /* read in my part of the matrix column indices  */
3052:     nz   = procsnz[0];
3053:     PetscMalloc1(nz,&mycols);
3054:     PetscBinaryRead(fd,mycols,nz,NULL,PETSC_INT);

3056:     /* read in every one elses and ship off */
3057:     for (i=1; i<size; i++) {
3058:       nz   = procsnz[i];
3059:       PetscBinaryRead(fd,cols,nz,NULL,PETSC_INT);
3060:       MPIULong_Send(cols,nz,MPIU_INT,i,tag,comm);
3061:     }
3062:     PetscFree(cols);
3063:   } else {
3064:     /* determine buffer space needed for message */
3065:     nz = 0;
3066:     for (i=0; i<m; i++) {
3067:       nz += ourlens[i];
3068:     }
3069:     PetscMalloc1(nz,&mycols);

3071:     /* receive message of column indices*/
3072:     MPIULong_Recv(mycols,nz,MPIU_INT,0,tag,comm);
3073:   }

3075:   /* determine column ownership if matrix is not square */
3076:   if (N != M) {
3077:     if (newMat->cmap->n < 0) n = N/size + ((N % size) > rank);
3078:     else n = newMat->cmap->n;
3079:     MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);
3080:     cstart = cend - n;
3081:   } else {
3082:     cstart = rstart;
3083:     cend   = rend;
3084:     n      = cend - cstart;
3085:   }

3087:   /* loop over local rows, determining number of off diagonal entries */
3088:   PetscArrayzero(offlens,m);
3089:   jj   = 0;
3090:   for (i=0; i<m; i++) {
3091:     for (j=0; j<ourlens[i]; j++) {
3092:       if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
3093:       jj++;
3094:     }
3095:   }

3097:   for (i=0; i<m; i++) {
3098:     ourlens[i] -= offlens[i];
3099:   }
3100:   MatSetSizes(newMat,m,n,M,N);

3102:   if (bs > 1) {MatSetBlockSize(newMat,bs);}

3104:   MatMPIAIJSetPreallocation(newMat,0,ourlens,0,offlens);

3106:   for (i=0; i<m; i++) {
3107:     ourlens[i] += offlens[i];
3108:   }

3110:   if (!rank) {
3111:     PetscMalloc1(maxnz+1,&vals);

3113:     /* read in my part of the matrix numerical values  */
3114:     nz   = procsnz[0];
3115:     PetscBinaryRead(fd,vals,nz,NULL,PETSC_SCALAR);

3117:     /* insert into matrix */
3118:     jj      = rstart;
3119:     smycols = mycols;
3120:     svals   = vals;
3121:     for (i=0; i<m; i++) {
3122:       MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3123:       smycols += ourlens[i];
3124:       svals   += ourlens[i];
3125:       jj++;
3126:     }

3128:     /* read in other processors and ship out */
3129:     for (i=1; i<size; i++) {
3130:       nz   = procsnz[i];
3131:       PetscBinaryRead(fd,vals,nz,NULL,PETSC_SCALAR);
3132:       MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newMat)->tag,comm);
3133:     }
3134:     PetscFree(procsnz);
3135:   } else {
3136:     /* receive numeric values */
3137:     PetscMalloc1(nz+1,&vals);

3139:     /* receive message of values*/
3140:     MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newMat)->tag,comm);

3142:     /* insert into matrix */
3143:     jj      = rstart;
3144:     smycols = mycols;
3145:     svals   = vals;
3146:     for (i=0; i<m; i++) {
3147:       MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3148:       smycols += ourlens[i];
3149:       svals   += ourlens[i];
3150:       jj++;
3151:     }
3152:   }
3153:   PetscFree2(ourlens,offlens);
3154:   PetscFree(vals);
3155:   PetscFree(mycols);
3156:   PetscFree(rowners);
3157:   MatAssemblyBegin(newMat,MAT_FINAL_ASSEMBLY);
3158:   MatAssemblyEnd(newMat,MAT_FINAL_ASSEMBLY);
3159:   return(0);
3160: }

3162: /* Not scalable because of ISAllGather() unless getting all columns. */
3163: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
3164: {
3166:   IS             iscol_local;
3167:   PetscBool      isstride;
3168:   PetscMPIInt    lisstride=0,gisstride;

3171:   /* check if we are grabbing all columns*/
3172:   PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);

3174:   if (isstride) {
3175:     PetscInt  start,len,mstart,mlen;
3176:     ISStrideGetInfo(iscol,&start,NULL);
3177:     ISGetLocalSize(iscol,&len);
3178:     MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
3179:     if (mstart == start && mlen-mstart == len) lisstride = 1;
3180:   }

3182:   MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
3183:   if (gisstride) {
3184:     PetscInt N;
3185:     MatGetSize(mat,NULL,&N);
3186:     ISCreateStride(PetscObjectComm((PetscObject)mat),N,0,1,&iscol_local);
3187:     ISSetIdentity(iscol_local);
3188:     PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
3189:   } else {
3190:     PetscInt cbs;
3191:     ISGetBlockSize(iscol,&cbs);
3192:     ISAllGather(iscol,&iscol_local);
3193:     ISSetBlockSize(iscol_local,cbs);
3194:   }

3196:   *isseq = iscol_local;
3197:   return(0);
3198: }

3200: /*
3201:  Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3202:  (see MatCreateSubMatrix_MPIAIJ_nonscalable)

3204:  Input Parameters:
3205:    mat - matrix
3206:    isrow - parallel row index set; its local indices are a subset of local columns of mat,
3207:            i.e., mat->rstart <= isrow[i] < mat->rend
3208:    iscol - parallel column index set; its local indices are a subset of local columns of mat,
3209:            i.e., mat->cstart <= iscol[i] < mat->cend
3210:  Output Parameter:
3211:    isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
3212:    iscol_o - sequential column index set for retrieving mat->B
3213:    garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
3214:  */
3215: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
3216: {
3218:   Vec            x,cmap;
3219:   const PetscInt *is_idx;
3220:   PetscScalar    *xarray,*cmaparray;
3221:   PetscInt       ncols,isstart,*idx,m,rstart,*cmap1,count;
3222:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3223:   Mat            B=a->B;
3224:   Vec            lvec=a->lvec,lcmap;
3225:   PetscInt       i,cstart,cend,Bn=B->cmap->N;
3226:   MPI_Comm       comm;
3227:   VecScatter     Mvctx=a->Mvctx;

3230:   PetscObjectGetComm((PetscObject)mat,&comm);
3231:   ISGetLocalSize(iscol,&ncols);

3233:   /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3234:   MatCreateVecs(mat,&x,NULL);
3235:   VecSet(x,-1.0);
3236:   VecDuplicate(x,&cmap);
3237:   VecSet(cmap,-1.0);

3239:   /* Get start indices */
3240:   MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3241:   isstart -= ncols;
3242:   MatGetOwnershipRangeColumn(mat,&cstart,&cend);

3244:   ISGetIndices(iscol,&is_idx);
3245:   VecGetArray(x,&xarray);
3246:   VecGetArray(cmap,&cmaparray);
3247:   PetscMalloc1(ncols,&idx);
3248:   for (i=0; i<ncols; i++) {
3249:     xarray[is_idx[i]-cstart]    = (PetscScalar)is_idx[i];
3250:     cmaparray[is_idx[i]-cstart] = i + isstart;      /* global index of iscol[i] */
3251:     idx[i]                      = is_idx[i]-cstart; /* local index of iscol[i]  */
3252:   }
3253:   VecRestoreArray(x,&xarray);
3254:   VecRestoreArray(cmap,&cmaparray);
3255:   ISRestoreIndices(iscol,&is_idx);

3257:   /* Get iscol_d */
3258:   ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3259:   ISGetBlockSize(iscol,&i);
3260:   ISSetBlockSize(*iscol_d,i);

3262:   /* Get isrow_d */
3263:   ISGetLocalSize(isrow,&m);
3264:   rstart = mat->rmap->rstart;
3265:   PetscMalloc1(m,&idx);
3266:   ISGetIndices(isrow,&is_idx);
3267:   for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3268:   ISRestoreIndices(isrow,&is_idx);

3270:   ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3271:   ISGetBlockSize(isrow,&i);
3272:   ISSetBlockSize(*isrow_d,i);

3274:   /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3275:   VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3276:   VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);

3278:   VecDuplicate(lvec,&lcmap);

3280:   VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3281:   VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);

3283:   /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3284:   /* off-process column indices */
3285:   count = 0;
3286:   PetscMalloc1(Bn,&idx);
3287:   PetscMalloc1(Bn,&cmap1);

3289:   VecGetArray(lvec,&xarray);
3290:   VecGetArray(lcmap,&cmaparray);
3291:   for (i=0; i<Bn; i++) {
3292:     if (PetscRealPart(xarray[i]) > -1.0) {
3293:       idx[count]     = i;                   /* local column index in off-diagonal part B */
3294:       cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]);  /* column index in submat */
3295:       count++;
3296:     }
3297:   }
3298:   VecRestoreArray(lvec,&xarray);
3299:   VecRestoreArray(lcmap,&cmaparray);

3301:   ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3302:   /* cannot ensure iscol_o has same blocksize as iscol! */

3304:   PetscFree(idx);
3305:   *garray = cmap1;

3307:   VecDestroy(&x);
3308:   VecDestroy(&cmap);
3309:   VecDestroy(&lcmap);
3310:   return(0);
3311: }

3313: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3314: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3315: {
3317:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)mat->data,*asub;
3318:   Mat            M = NULL;
3319:   MPI_Comm       comm;
3320:   IS             iscol_d,isrow_d,iscol_o;
3321:   Mat            Asub = NULL,Bsub = NULL;
3322:   PetscInt       n;

3325:   PetscObjectGetComm((PetscObject)mat,&comm);

3327:   if (call == MAT_REUSE_MATRIX) {
3328:     /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3329:     PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3330:     if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");

3332:     PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3333:     if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");

3335:     PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3336:     if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");

3338:     /* Update diagonal and off-diagonal portions of submat */
3339:     asub = (Mat_MPIAIJ*)(*submat)->data;
3340:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3341:     ISGetLocalSize(iscol_o,&n);
3342:     if (n) {
3343:       MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3344:     }
3345:     MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3346:     MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);

3348:   } else { /* call == MAT_INITIAL_MATRIX) */
3349:     const PetscInt *garray;
3350:     PetscInt        BsubN;

3352:     /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3353:     ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);

3355:     /* Create local submatrices Asub and Bsub */
3356:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3357:     MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);

3359:     /* Create submatrix M */
3360:     MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);

3362:     /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3363:     asub = (Mat_MPIAIJ*)M->data;

3365:     ISGetLocalSize(iscol_o,&BsubN);
3366:     n = asub->B->cmap->N;
3367:     if (BsubN > n) {
3368:       /* This case can be tested using ~petsc/src/tao/bound/examples/tutorials/runplate2_3 */
3369:       const PetscInt *idx;
3370:       PetscInt       i,j,*idx_new,*subgarray = asub->garray;
3371:       PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);

3373:       PetscMalloc1(n,&idx_new);
3374:       j = 0;
3375:       ISGetIndices(iscol_o,&idx);
3376:       for (i=0; i<n; i++) {
3377:         if (j >= BsubN) break;
3378:         while (subgarray[i] > garray[j]) j++;

3380:         if (subgarray[i] == garray[j]) {
3381:           idx_new[i] = idx[j++];
3382:         } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3383:       }
3384:       ISRestoreIndices(iscol_o,&idx);

3386:       ISDestroy(&iscol_o);
3387:       ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);

3389:     } else if (BsubN < n) {
3390:       SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3391:     }

3393:     PetscFree(garray);
3394:     *submat = M;

3396:     /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3397:     PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3398:     ISDestroy(&isrow_d);

3400:     PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3401:     ISDestroy(&iscol_d);

3403:     PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3404:     ISDestroy(&iscol_o);
3405:   }
3406:   return(0);
3407: }

3409: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3410: {
3412:   IS             iscol_local=NULL,isrow_d;
3413:   PetscInt       csize;
3414:   PetscInt       n,i,j,start,end;
3415:   PetscBool      sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3416:   MPI_Comm       comm;

3419:   /* If isrow has same processor distribution as mat,
3420:      call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3421:   if (call == MAT_REUSE_MATRIX) {
3422:     PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3423:     if (isrow_d) {
3424:       sameRowDist  = PETSC_TRUE;
3425:       tsameDist[1] = PETSC_TRUE; /* sameColDist */
3426:     } else {
3427:       PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3428:       if (iscol_local) {
3429:         sameRowDist  = PETSC_TRUE;
3430:         tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3431:       }
3432:     }
3433:   } else {
3434:     /* Check if isrow has same processor distribution as mat */
3435:     sameDist[0] = PETSC_FALSE;
3436:     ISGetLocalSize(isrow,&n);
3437:     if (!n) {
3438:       sameDist[0] = PETSC_TRUE;
3439:     } else {
3440:       ISGetMinMax(isrow,&i,&j);
3441:       MatGetOwnershipRange(mat,&start,&end);
3442:       if (i >= start && j < end) {
3443:         sameDist[0] = PETSC_TRUE;
3444:       }
3445:     }

3447:     /* Check if iscol has same processor distribution as mat */
3448:     sameDist[1] = PETSC_FALSE;
3449:     ISGetLocalSize(iscol,&n);
3450:     if (!n) {
3451:       sameDist[1] = PETSC_TRUE;
3452:     } else {
3453:       ISGetMinMax(iscol,&i,&j);
3454:       MatGetOwnershipRangeColumn(mat,&start,&end);
3455:       if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3456:     }

3458:     PetscObjectGetComm((PetscObject)mat,&comm);
3459:     MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3460:     sameRowDist = tsameDist[0];
3461:   }

3463:   if (sameRowDist) {
3464:     if (tsameDist[1]) { /* sameRowDist & sameColDist */
3465:       /* isrow and iscol have same processor distribution as mat */
3466:       MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3467:       return(0);
3468:     } else { /* sameRowDist */
3469:       /* isrow has same processor distribution as mat */
3470:       if (call == MAT_INITIAL_MATRIX) {
3471:         PetscBool sorted;
3472:         ISGetSeqIS_Private(mat,iscol,&iscol_local);
3473:         ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3474:         ISGetSize(iscol,&i);
3475:         if (n != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != size of iscol %d",n,i);

3477:         ISSorted(iscol_local,&sorted);
3478:         if (sorted) {
3479:           /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3480:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3481:           return(0);
3482:         }
3483:       } else { /* call == MAT_REUSE_MATRIX */
3484:         IS    iscol_sub;
3485:         PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3486:         if (iscol_sub) {
3487:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3488:           return(0);
3489:         }
3490:       }
3491:     }
3492:   }

3494:   /* General case: iscol -> iscol_local which has global size of iscol */
3495:   if (call == MAT_REUSE_MATRIX) {
3496:     PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3497:     if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3498:   } else {
3499:     if (!iscol_local) {
3500:       ISGetSeqIS_Private(mat,iscol,&iscol_local);
3501:     }
3502:   }

3504:   ISGetLocalSize(iscol,&csize);
3505:   MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);

3507:   if (call == MAT_INITIAL_MATRIX) {
3508:     PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3509:     ISDestroy(&iscol_local);
3510:   }
3511:   return(0);
3512: }

3514: /*@C
3515:      MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3516:          and "off-diagonal" part of the matrix in CSR format.

3518:    Collective

3520:    Input Parameters:
3521: +  comm - MPI communicator
3522: .  A - "diagonal" portion of matrix
3523: .  B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3524: -  garray - global index of B columns

3526:    Output Parameter:
3527: .   mat - the matrix, with input A as its local diagonal matrix
3528:    Level: advanced

3530:    Notes:
3531:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3532:        A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.

3534: .seealso: MatCreateMPIAIJWithSplitArrays()
3535: @*/
3536: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3537: {
3539:   Mat_MPIAIJ     *maij;
3540:   Mat_SeqAIJ     *b=(Mat_SeqAIJ*)B->data,*bnew;
3541:   PetscInt       *oi=b->i,*oj=b->j,i,nz,col;
3542:   PetscScalar    *oa=b->a;
3543:   Mat            Bnew;
3544:   PetscInt       m,n,N;

3547:   MatCreate(comm,mat);
3548:   MatGetSize(A,&m,&n);
3549:   if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3550:   if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3551:   /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3552:   /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */

3554:   /* Get global columns of mat */
3555:   MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);

3557:   MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3558:   MatSetType(*mat,MATMPIAIJ);
3559:   MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3560:   maij = (Mat_MPIAIJ*)(*mat)->data;

3562:   (*mat)->preallocated = PETSC_TRUE;

3564:   PetscLayoutSetUp((*mat)->rmap);
3565:   PetscLayoutSetUp((*mat)->cmap);

3567:   /* Set A as diagonal portion of *mat */
3568:   maij->A = A;

3570:   nz = oi[m];
3571:   for (i=0; i<nz; i++) {
3572:     col   = oj[i];
3573:     oj[i] = garray[col];
3574:   }

3576:    /* Set Bnew as off-diagonal portion of *mat */
3577:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,oa,&Bnew);
3578:   bnew        = (Mat_SeqAIJ*)Bnew->data;
3579:   bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3580:   maij->B     = Bnew;

3582:   if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,0,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);

3584:   b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3585:   b->free_a       = PETSC_FALSE;
3586:   b->free_ij      = PETSC_FALSE;
3587:   MatDestroy(&B);

3589:   bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3590:   bnew->free_a       = PETSC_TRUE;
3591:   bnew->free_ij      = PETSC_TRUE;

3593:   /* condense columns of maij->B */
3594:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3595:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3596:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3597:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3598:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3599:   return(0);
3600: }

3602: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);

3604: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3605: {
3607:   PetscInt       i,m,n,rstart,row,rend,nz,j,bs,cbs;
3608:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3609:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3610:   Mat            M,Msub,B=a->B;
3611:   MatScalar      *aa;
3612:   Mat_SeqAIJ     *aij;
3613:   PetscInt       *garray = a->garray,*colsub,Ncols;
3614:   PetscInt       count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3615:   IS             iscol_sub,iscmap;
3616:   const PetscInt *is_idx,*cmap;
3617:   PetscBool      allcolumns=PETSC_FALSE;
3618:   MPI_Comm       comm;

3621:   PetscObjectGetComm((PetscObject)mat,&comm);

3623:   if (call == MAT_REUSE_MATRIX) {
3624:     PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3625:     if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3626:     ISGetLocalSize(iscol_sub,&count);

3628:     PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3629:     if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");

3631:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3632:     if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");

3634:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);

3636:   } else { /* call == MAT_INITIAL_MATRIX) */
3637:     PetscBool flg;

3639:     ISGetLocalSize(iscol,&n);
3640:     ISGetSize(iscol,&Ncols);

3642:     /* (1) iscol -> nonscalable iscol_local */
3643:     /* Check for special case: each processor gets entire matrix columns */
3644:     ISIdentity(iscol_local,&flg);
3645:     if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3646:     if (allcolumns) {
3647:       iscol_sub = iscol_local;
3648:       PetscObjectReference((PetscObject)iscol_local);
3649:       ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);

3651:     } else {
3652:       /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3653:       PetscInt *idx,*cmap1,k;
3654:       PetscMalloc1(Ncols,&idx);
3655:       PetscMalloc1(Ncols,&cmap1);
3656:       ISGetIndices(iscol_local,&is_idx);
3657:       count = 0;
3658:       k     = 0;
3659:       for (i=0; i<Ncols; i++) {
3660:         j = is_idx[i];
3661:         if (j >= cstart && j < cend) {
3662:           /* diagonal part of mat */
3663:           idx[count]     = j;
3664:           cmap1[count++] = i; /* column index in submat */
3665:         } else if (Bn) {
3666:           /* off-diagonal part of mat */
3667:           if (j == garray[k]) {
3668:             idx[count]     = j;
3669:             cmap1[count++] = i;  /* column index in submat */
3670:           } else if (j > garray[k]) {
3671:             while (j > garray[k] && k < Bn-1) k++;
3672:             if (j == garray[k]) {
3673:               idx[count]     = j;
3674:               cmap1[count++] = i; /* column index in submat */
3675:             }
3676:           }
3677:         }
3678:       }
3679:       ISRestoreIndices(iscol_local,&is_idx);

3681:       ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3682:       ISGetBlockSize(iscol,&cbs);
3683:       ISSetBlockSize(iscol_sub,cbs);

3685:       ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3686:     }

3688:     /* (3) Create sequential Msub */
3689:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3690:   }

3692:   ISGetLocalSize(iscol_sub,&count);
3693:   aij  = (Mat_SeqAIJ*)(Msub)->data;
3694:   ii   = aij->i;
3695:   ISGetIndices(iscmap,&cmap);

3697:   /*
3698:       m - number of local rows
3699:       Ncols - number of columns (same on all processors)
3700:       rstart - first row in new global matrix generated
3701:   */
3702:   MatGetSize(Msub,&m,NULL);

3704:   if (call == MAT_INITIAL_MATRIX) {
3705:     /* (4) Create parallel newmat */
3706:     PetscMPIInt    rank,size;
3707:     PetscInt       csize;

3709:     MPI_Comm_size(comm,&size);
3710:     MPI_Comm_rank(comm,&rank);

3712:     /*
3713:         Determine the number of non-zeros in the diagonal and off-diagonal
3714:         portions of the matrix in order to do correct preallocation
3715:     */

3717:     /* first get start and end of "diagonal" columns */
3718:     ISGetLocalSize(iscol,&csize);
3719:     if (csize == PETSC_DECIDE) {
3720:       ISGetSize(isrow,&mglobal);
3721:       if (mglobal == Ncols) { /* square matrix */
3722:         nlocal = m;
3723:       } else {
3724:         nlocal = Ncols/size + ((Ncols % size) > rank);
3725:       }
3726:     } else {
3727:       nlocal = csize;
3728:     }
3729:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3730:     rstart = rend - nlocal;
3731:     if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);

3733:     /* next, compute all the lengths */
3734:     jj    = aij->j;
3735:     PetscMalloc1(2*m+1,&dlens);
3736:     olens = dlens + m;
3737:     for (i=0; i<m; i++) {
3738:       jend = ii[i+1] - ii[i];
3739:       olen = 0;
3740:       dlen = 0;
3741:       for (j=0; j<jend; j++) {
3742:         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3743:         else dlen++;
3744:         jj++;
3745:       }
3746:       olens[i] = olen;
3747:       dlens[i] = dlen;
3748:     }

3750:     ISGetBlockSize(isrow,&bs);
3751:     ISGetBlockSize(iscol,&cbs);

3753:     MatCreate(comm,&M);
3754:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3755:     MatSetBlockSizes(M,bs,cbs);
3756:     MatSetType(M,((PetscObject)mat)->type_name);
3757:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3758:     PetscFree(dlens);

3760:   } else { /* call == MAT_REUSE_MATRIX */
3761:     M    = *newmat;
3762:     MatGetLocalSize(M,&i,NULL);
3763:     if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3764:     MatZeroEntries(M);
3765:     /*
3766:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3767:        rather than the slower MatSetValues().
3768:     */
3769:     M->was_assembled = PETSC_TRUE;
3770:     M->assembled     = PETSC_FALSE;
3771:   }

3773:   /* (5) Set values of Msub to *newmat */
3774:   PetscMalloc1(count,&colsub);
3775:   MatGetOwnershipRange(M,&rstart,NULL);

3777:   jj   = aij->j;
3778:   aa   = aij->a;
3779:   for (i=0; i<m; i++) {
3780:     row = rstart + i;
3781:     nz  = ii[i+1] - ii[i];
3782:     for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3783:     MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3784:     jj += nz; aa += nz;
3785:   }
3786:   ISRestoreIndices(iscmap,&cmap);

3788:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3789:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);

3791:   PetscFree(colsub);

3793:   /* save Msub, iscol_sub and iscmap used in processor for next request */
3794:   if (call ==  MAT_INITIAL_MATRIX) {
3795:     *newmat = M;
3796:     PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3797:     MatDestroy(&Msub);

3799:     PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3800:     ISDestroy(&iscol_sub);

3802:     PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3803:     ISDestroy(&iscmap);

3805:     if (iscol_local) {
3806:       PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3807:       ISDestroy(&iscol_local);
3808:     }
3809:   }
3810:   return(0);
3811: }

3813: /*
3814:     Not great since it makes two copies of the submatrix, first an SeqAIJ
3815:   in local and then by concatenating the local matrices the end result.
3816:   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()

3818:   Note: This requires a sequential iscol with all indices.
3819: */
3820: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3821: {
3823:   PetscMPIInt    rank,size;
3824:   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3825:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3826:   Mat            M,Mreuse;
3827:   MatScalar      *aa,*vwork;
3828:   MPI_Comm       comm;
3829:   Mat_SeqAIJ     *aij;
3830:   PetscBool      colflag,allcolumns=PETSC_FALSE;

3833:   PetscObjectGetComm((PetscObject)mat,&comm);
3834:   MPI_Comm_rank(comm,&rank);
3835:   MPI_Comm_size(comm,&size);

3837:   /* Check for special case: each processor gets entire matrix columns */
3838:   ISIdentity(iscol,&colflag);
3839:   ISGetLocalSize(iscol,&n);
3840:   if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;

3842:   if (call ==  MAT_REUSE_MATRIX) {
3843:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3844:     if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3845:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3846:   } else {
3847:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3848:   }

3850:   /*
3851:       m - number of local rows
3852:       n - number of columns (same on all processors)
3853:       rstart - first row in new global matrix generated
3854:   */
3855:   MatGetSize(Mreuse,&m,&n);
3856:   MatGetBlockSizes(Mreuse,&bs,&cbs);
3857:   if (call == MAT_INITIAL_MATRIX) {
3858:     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3859:     ii  = aij->i;
3860:     jj  = aij->j;

3862:     /*
3863:         Determine the number of non-zeros in the diagonal and off-diagonal
3864:         portions of the matrix in order to do correct preallocation
3865:     */

3867:     /* first get start and end of "diagonal" columns */
3868:     if (csize == PETSC_DECIDE) {
3869:       ISGetSize(isrow,&mglobal);
3870:       if (mglobal == n) { /* square matrix */
3871:         nlocal = m;
3872:       } else {
3873:         nlocal = n/size + ((n % size) > rank);
3874:       }
3875:     } else {
3876:       nlocal = csize;
3877:     }
3878:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3879:     rstart = rend - nlocal;
3880:     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);

3882:     /* next, compute all the lengths */
3883:     PetscMalloc1(2*m+1,&dlens);
3884:     olens = dlens + m;
3885:     for (i=0; i<m; i++) {
3886:       jend = ii[i+1] - ii[i];
3887:       olen = 0;
3888:       dlen = 0;
3889:       for (j=0; j<jend; j++) {
3890:         if (*jj < rstart || *jj >= rend) olen++;
3891:         else dlen++;
3892:         jj++;
3893:       }
3894:       olens[i] = olen;
3895:       dlens[i] = dlen;
3896:     }
3897:     MatCreate(comm,&M);
3898:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3899:     MatSetBlockSizes(M,bs,cbs);
3900:     MatSetType(M,((PetscObject)mat)->type_name);
3901:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3902:     PetscFree(dlens);
3903:   } else {
3904:     PetscInt ml,nl;

3906:     M    = *newmat;
3907:     MatGetLocalSize(M,&ml,&nl);
3908:     if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3909:     MatZeroEntries(M);
3910:     /*
3911:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3912:        rather than the slower MatSetValues().
3913:     */
3914:     M->was_assembled = PETSC_TRUE;
3915:     M->assembled     = PETSC_FALSE;
3916:   }
3917:   MatGetOwnershipRange(M,&rstart,&rend);
3918:   aij  = (Mat_SeqAIJ*)(Mreuse)->data;
3919:   ii   = aij->i;
3920:   jj   = aij->j;
3921:   aa   = aij->a;
3922:   for (i=0; i<m; i++) {
3923:     row   = rstart + i;
3924:     nz    = ii[i+1] - ii[i];
3925:     cwork = jj;     jj += nz;
3926:     vwork = aa;     aa += nz;
3927:     MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3928:   }

3930:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3931:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3932:   *newmat = M;

3934:   /* save submatrix used in processor for next request */
3935:   if (call ==  MAT_INITIAL_MATRIX) {
3936:     PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3937:     MatDestroy(&Mreuse);
3938:   }
3939:   return(0);
3940: }

3942: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3943: {
3944:   PetscInt       m,cstart, cend,j,nnz,i,d;
3945:   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3946:   const PetscInt *JJ;
3948:   PetscBool      nooffprocentries;

3951:   if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);

3953:   PetscLayoutSetUp(B->rmap);
3954:   PetscLayoutSetUp(B->cmap);
3955:   m      = B->rmap->n;
3956:   cstart = B->cmap->rstart;
3957:   cend   = B->cmap->rend;
3958:   rstart = B->rmap->rstart;

3960:   PetscCalloc2(m,&d_nnz,m,&o_nnz);

3962: #if defined(PETSC_USE_DEBUG)
3963:   for (i=0; i<m; i++) {
3964:     nnz = Ii[i+1]- Ii[i];
3965:     JJ  = J + Ii[i];
3966:     if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3967:     if (nnz && (JJ[0] < 0)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,JJ[0]);
3968:     if (nnz && (JJ[nnz-1] >= B->cmap->N)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3969:   }
3970: #endif

3972:   for (i=0; i<m; i++) {
3973:     nnz     = Ii[i+1]- Ii[i];
3974:     JJ      = J + Ii[i];
3975:     nnz_max = PetscMax(nnz_max,nnz);
3976:     d       = 0;
3977:     for (j=0; j<nnz; j++) {
3978:       if (cstart <= JJ[j] && JJ[j] < cend) d++;
3979:     }
3980:     d_nnz[i] = d;
3981:     o_nnz[i] = nnz - d;
3982:   }
3983:   MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3984:   PetscFree2(d_nnz,o_nnz);

3986:   for (i=0; i<m; i++) {
3987:     ii   = i + rstart;
3988:     MatSetValues_MPIAIJ(B,1,&ii,Ii[i+1] - Ii[i],J+Ii[i], v ? v + Ii[i] : NULL,INSERT_VALUES);
3989:   }
3990:   nooffprocentries    = B->nooffprocentries;
3991:   B->nooffprocentries = PETSC_TRUE;
3992:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3993:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3994:   B->nooffprocentries = nooffprocentries;

3996:   MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3997:   return(0);
3998: }

4000: /*@
4001:    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
4002:    (the default parallel PETSc format).

4004:    Collective

4006:    Input Parameters:
4007: +  B - the matrix
4008: .  i - the indices into j for the start of each local row (starts with zero)
4009: .  j - the column indices for each local row (starts with zero)
4010: -  v - optional values in the matrix

4012:    Level: developer

4014:    Notes:
4015:        The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
4016:      thus you CANNOT change the matrix entries by changing the values of v[] after you have
4017:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

4019:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

4021:        The format which is used for the sparse matrix input, is equivalent to a
4022:     row-major ordering.. i.e for the following matrix, the input data expected is
4023:     as shown

4025: $        1 0 0
4026: $        2 0 3     P0
4027: $       -------
4028: $        4 5 6     P1
4029: $
4030: $     Process0 [P0]: rows_owned=[0,1]
4031: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4032: $        j =  {0,0,2}  [size = 3]
4033: $        v =  {1,2,3}  [size = 3]
4034: $
4035: $     Process1 [P1]: rows_owned=[2]
4036: $        i =  {0,3}    [size = nrow+1  = 1+1]
4037: $        j =  {0,1,2}  [size = 3]
4038: $        v =  {4,5,6}  [size = 3]

4040: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
4041:           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
4042: @*/
4043: PetscErrorCode  MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
4044: {

4048:   PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
4049:   return(0);
4050: }

4052: /*@C
4053:    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
4054:    (the default parallel PETSc format).  For good matrix assembly performance
4055:    the user should preallocate the matrix storage by setting the parameters
4056:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4057:    performance can be increased by more than a factor of 50.

4059:    Collective

4061:    Input Parameters:
4062: +  B - the matrix
4063: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4064:            (same value is used for all local rows)
4065: .  d_nnz - array containing the number of nonzeros in the various rows of the
4066:            DIAGONAL portion of the local submatrix (possibly different for each row)
4067:            or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
4068:            The size of this array is equal to the number of local rows, i.e 'm'.
4069:            For matrices that will be factored, you must leave room for (and set)
4070:            the diagonal entry even if it is zero.
4071: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4072:            submatrix (same value is used for all local rows).
4073: -  o_nnz - array containing the number of nonzeros in the various rows of the
4074:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4075:            each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
4076:            structure. The size of this array is equal to the number
4077:            of local rows, i.e 'm'.

4079:    If the *_nnz parameter is given then the *_nz parameter is ignored

4081:    The AIJ format (also called the Yale sparse matrix format or
4082:    compressed row storage (CSR)), is fully compatible with standard Fortran 77
4083:    storage.  The stored row and column indices begin with zero.
4084:    See Users-Manual: ch_mat for details.

4086:    The parallel matrix is partitioned such that the first m0 rows belong to
4087:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
4088:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

4090:    The DIAGONAL portion of the local submatrix of a processor can be defined
4091:    as the submatrix which is obtained by extraction the part corresponding to
4092:    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
4093:    first row that belongs to the processor, r2 is the last row belonging to
4094:    the this processor, and c1-c2 is range of indices of the local part of a
4095:    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
4096:    common case of a square matrix, the row and column ranges are the same and
4097:    the DIAGONAL part is also square. The remaining portion of the local
4098:    submatrix (mxN) constitute the OFF-DIAGONAL portion.

4100:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4102:    You can call MatGetInfo() to get information on how effective the preallocation was;
4103:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
4104:    You can also run with the option -info and look for messages with the string
4105:    malloc in them to see if additional memory allocation was needed.

4107:    Example usage:

4109:    Consider the following 8x8 matrix with 34 non-zero values, that is
4110:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4111:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4112:    as follows:

4114: .vb
4115:             1  2  0  |  0  3  0  |  0  4
4116:     Proc0   0  5  6  |  7  0  0  |  8  0
4117:             9  0 10  | 11  0  0  | 12  0
4118:     -------------------------------------
4119:            13  0 14  | 15 16 17  |  0  0
4120:     Proc1   0 18  0  | 19 20 21  |  0  0
4121:             0  0  0  | 22 23  0  | 24  0
4122:     -------------------------------------
4123:     Proc2  25 26 27  |  0  0 28  | 29  0
4124:            30  0  0  | 31 32 33  |  0 34
4125: .ve

4127:    This can be represented as a collection of submatrices as:

4129: .vb
4130:       A B C
4131:       D E F
4132:       G H I
4133: .ve

4135:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4136:    owned by proc1, G,H,I are owned by proc2.

4138:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4139:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4140:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4142:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4143:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4144:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4145:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4146:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4147:    matrix, ans [DF] as another SeqAIJ matrix.

4149:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4150:    allocated for every row of the local diagonal submatrix, and o_nz
4151:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4152:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4153:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4154:    In this case, the values of d_nz,o_nz are:
4155: .vb
4156:      proc0 : dnz = 2, o_nz = 2
4157:      proc1 : dnz = 3, o_nz = 2
4158:      proc2 : dnz = 1, o_nz = 4
4159: .ve
4160:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4161:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4162:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4163:    34 values.

4165:    When d_nnz, o_nnz parameters are specified, the storage is specified
4166:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4167:    In the above case the values for d_nnz,o_nnz are:
4168: .vb
4169:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4170:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4171:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4172: .ve
4173:    Here the space allocated is sum of all the above values i.e 34, and
4174:    hence pre-allocation is perfect.

4176:    Level: intermediate

4178: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
4179:           MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
4180: @*/
4181: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
4182: {

4188:   PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
4189:   return(0);
4190: }

4192: /*@
4193:      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
4194:          CSR format for the local rows.

4196:    Collective

4198:    Input Parameters:
4199: +  comm - MPI communicator
4200: .  m - number of local rows (Cannot be PETSC_DECIDE)
4201: .  n - This value should be the same as the local size used in creating the
4202:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4203:        calculated if N is given) For square matrices n is almost always m.
4204: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4205: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4206: .   i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4207: .   j - column indices
4208: -   a - matrix values

4210:    Output Parameter:
4211: .   mat - the matrix

4213:    Level: intermediate

4215:    Notes:
4216:        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4217:      thus you CANNOT change the matrix entries by changing the values of a[] after you have
4218:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

4220:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

4222:        The format which is used for the sparse matrix input, is equivalent to a
4223:     row-major ordering.. i.e for the following matrix, the input data expected is
4224:     as shown

4226:        Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays

4228: $        1 0 0
4229: $        2 0 3     P0
4230: $       -------
4231: $        4 5 6     P1
4232: $
4233: $     Process0 [P0]: rows_owned=[0,1]
4234: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4235: $        j =  {0,0,2}  [size = 3]
4236: $        v =  {1,2,3}  [size = 3]
4237: $
4238: $     Process1 [P1]: rows_owned=[2]
4239: $        i =  {0,3}    [size = nrow+1  = 1+1]
4240: $        j =  {0,1,2}  [size = 3]
4241: $        v =  {4,5,6}  [size = 3]

4243: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4244:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4245: @*/
4246: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4247: {

4251:   if (i && i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4252:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4253:   MatCreate(comm,mat);
4254:   MatSetSizes(*mat,m,n,M,N);
4255:   /* MatSetBlockSizes(M,bs,cbs); */
4256:   MatSetType(*mat,MATMPIAIJ);
4257:   MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4258:   return(0);
4259: }

4261: /*@
4262:      MatUpdateMPIAIJWithArrays - updates a MPI AIJ matrix using arrays that contain in standard
4263:          CSR format for the local rows. Only the numerical values are updated the other arrays must be identical

4265:    Collective

4267:    Input Parameters:
4268: +  mat - the matrix
4269: .  m - number of local rows (Cannot be PETSC_DECIDE)
4270: .  n - This value should be the same as the local size used in creating the
4271:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4272:        calculated if N is given) For square matrices n is almost always m.
4273: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4274: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4275: .  Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4276: .  J - column indices
4277: -  v - matrix values

4279:    Level: intermediate

4281: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4282:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4283: @*/
4284: PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
4285: {
4287:   PetscInt       cstart,nnz,i,j;
4288:   PetscInt       *ld;
4289:   PetscBool      nooffprocentries;
4290:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*)mat->data;
4291:   Mat_SeqAIJ     *Ad  = (Mat_SeqAIJ*)Aij->A->data, *Ao  = (Mat_SeqAIJ*)Aij->B->data;
4292:   PetscScalar    *ad = Ad->a, *ao = Ao->a;
4293:   const PetscInt *Adi = Ad->i;
4294:   PetscInt       ldi,Iii,md;

4297:   if (Ii[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4298:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4299:   if (m != mat->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()");
4300:   if (n != mat->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()");

4302:   cstart = mat->cmap->rstart;
4303:   if (!Aij->ld) {
4304:     /* count number of entries below block diagonal */
4305:     PetscCalloc1(m,&ld);
4306:     Aij->ld = ld;
4307:     for (i=0; i<m; i++) {
4308:       nnz  = Ii[i+1]- Ii[i];
4309:       j     = 0;
4310:       while  (J[j] < cstart && j < nnz) {j++;}
4311:       J    += nnz;
4312:       ld[i] = j;
4313:     }
4314:   } else {
4315:     ld = Aij->ld;
4316:   }

4318:   for (i=0; i<m; i++) {
4319:     nnz  = Ii[i+1]- Ii[i];
4320:     Iii  = Ii[i];
4321:     ldi  = ld[i];
4322:     md   = Adi[i+1]-Adi[i];
4323:     PetscArraycpy(ao,v + Iii,ldi);
4324:     PetscArraycpy(ad,v + Iii + ldi,md);
4325:     PetscArraycpy(ao + ldi,v + Iii + ldi + md,nnz - ldi - md);
4326:     ad  += md;
4327:     ao  += nnz - md;
4328:   }
4329:   nooffprocentries      = mat->nooffprocentries;
4330:   mat->nooffprocentries = PETSC_TRUE;
4331:   PetscObjectStateIncrease((PetscObject)Aij->A);
4332:   PetscObjectStateIncrease((PetscObject)Aij->B);
4333:   PetscObjectStateIncrease((PetscObject)mat);
4334:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
4335:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
4336:   mat->nooffprocentries = nooffprocentries;
4337:   return(0);
4338: }

4340: /*@C
4341:    MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4342:    (the default parallel PETSc format).  For good matrix assembly performance
4343:    the user should preallocate the matrix storage by setting the parameters
4344:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4345:    performance can be increased by more than a factor of 50.

4347:    Collective

4349:    Input Parameters:
4350: +  comm - MPI communicator
4351: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4352:            This value should be the same as the local size used in creating the
4353:            y vector for the matrix-vector product y = Ax.
4354: .  n - This value should be the same as the local size used in creating the
4355:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4356:        calculated if N is given) For square matrices n is almost always m.
4357: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4358: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4359: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4360:            (same value is used for all local rows)
4361: .  d_nnz - array containing the number of nonzeros in the various rows of the
4362:            DIAGONAL portion of the local submatrix (possibly different for each row)
4363:            or NULL, if d_nz is used to specify the nonzero structure.
4364:            The size of this array is equal to the number of local rows, i.e 'm'.
4365: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4366:            submatrix (same value is used for all local rows).
4367: -  o_nnz - array containing the number of nonzeros in the various rows of the
4368:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4369:            each row) or NULL, if o_nz is used to specify the nonzero
4370:            structure. The size of this array is equal to the number
4371:            of local rows, i.e 'm'.

4373:    Output Parameter:
4374: .  A - the matrix

4376:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4377:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
4378:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

4380:    Notes:
4381:    If the *_nnz parameter is given then the *_nz parameter is ignored

4383:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
4384:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4385:    storage requirements for this matrix.

4387:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
4388:    processor than it must be used on all processors that share the object for
4389:    that argument.

4391:    The user MUST specify either the local or global matrix dimensions
4392:    (possibly both).

4394:    The parallel matrix is partitioned across processors such that the
4395:    first m0 rows belong to process 0, the next m1 rows belong to
4396:    process 1, the next m2 rows belong to process 2 etc.. where
4397:    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4398:    values corresponding to [m x N] submatrix.

4400:    The columns are logically partitioned with the n0 columns belonging
4401:    to 0th partition, the next n1 columns belonging to the next
4402:    partition etc.. where n0,n1,n2... are the input parameter 'n'.

4404:    The DIAGONAL portion of the local submatrix on any given processor
4405:    is the submatrix corresponding to the rows and columns m,n
4406:    corresponding to the given processor. i.e diagonal matrix on
4407:    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4408:    etc. The remaining portion of the local submatrix [m x (N-n)]
4409:    constitute the OFF-DIAGONAL portion. The example below better
4410:    illustrates this concept.

4412:    For a square global matrix we define each processor's diagonal portion
4413:    to be its local rows and the corresponding columns (a square submatrix);
4414:    each processor's off-diagonal portion encompasses the remainder of the
4415:    local matrix (a rectangular submatrix).

4417:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4419:    When calling this routine with a single process communicator, a matrix of
4420:    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
4421:    type of communicator, use the construction mechanism
4422: .vb
4423:      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4424: .ve

4426: $     MatCreate(...,&A);
4427: $     MatSetType(A,MATMPIAIJ);
4428: $     MatSetSizes(A, m,n,M,N);
4429: $     MatMPIAIJSetPreallocation(A,...);

4431:    By default, this format uses inodes (identical nodes) when possible.
4432:    We search for consecutive rows with the same nonzero structure, thereby
4433:    reusing matrix information to achieve increased efficiency.

4435:    Options Database Keys:
4436: +  -mat_no_inode  - Do not use inodes
4437: -  -mat_inode_limit <limit> - Sets inode limit (max limit=5)



4441:    Example usage:

4443:    Consider the following 8x8 matrix with 34 non-zero values, that is
4444:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4445:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4446:    as follows

4448: .vb
4449:             1  2  0  |  0  3  0  |  0  4
4450:     Proc0   0  5  6  |  7  0  0  |  8  0
4451:             9  0 10  | 11  0  0  | 12  0
4452:     -------------------------------------
4453:            13  0 14  | 15 16 17  |  0  0
4454:     Proc1   0 18  0  | 19 20 21  |  0  0
4455:             0  0  0  | 22 23  0  | 24  0
4456:     -------------------------------------
4457:     Proc2  25 26 27  |  0  0 28  | 29  0
4458:            30  0  0  | 31 32 33  |  0 34
4459: .ve

4461:    This can be represented as a collection of submatrices as

4463: .vb
4464:       A B C
4465:       D E F
4466:       G H I
4467: .ve

4469:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4470:    owned by proc1, G,H,I are owned by proc2.

4472:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4473:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4474:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4476:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4477:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4478:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4479:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4480:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4481:    matrix, ans [DF] as another SeqAIJ matrix.

4483:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4484:    allocated for every row of the local diagonal submatrix, and o_nz
4485:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4486:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4487:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4488:    In this case, the values of d_nz,o_nz are
4489: .vb
4490:      proc0 : dnz = 2, o_nz = 2
4491:      proc1 : dnz = 3, o_nz = 2
4492:      proc2 : dnz = 1, o_nz = 4
4493: .ve
4494:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4495:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4496:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4497:    34 values.

4499:    When d_nnz, o_nnz parameters are specified, the storage is specified
4500:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4501:    In the above case the values for d_nnz,o_nnz are
4502: .vb
4503:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4504:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4505:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4506: .ve
4507:    Here the space allocated is sum of all the above values i.e 34, and
4508:    hence pre-allocation is perfect.

4510:    Level: intermediate

4512: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4513:           MATMPIAIJ, MatCreateMPIAIJWithArrays()
4514: @*/
4515: PetscErrorCode  MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4516: {
4518:   PetscMPIInt    size;

4521:   MatCreate(comm,A);
4522:   MatSetSizes(*A,m,n,M,N);
4523:   MPI_Comm_size(comm,&size);
4524:   if (size > 1) {
4525:     MatSetType(*A,MATMPIAIJ);
4526:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4527:   } else {
4528:     MatSetType(*A,MATSEQAIJ);
4529:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4530:   }
4531:   return(0);
4532: }

4534: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4535: {
4536:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
4537:   PetscBool      flg;

4541:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4542:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4543:   if (Ad)     *Ad     = a->A;
4544:   if (Ao)     *Ao     = a->B;
4545:   if (colmap) *colmap = a->garray;
4546:   return(0);
4547: }

4549: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4550: {
4552:   PetscInt       m,N,i,rstart,nnz,Ii;
4553:   PetscInt       *indx;
4554:   PetscScalar    *values;

4557:   MatGetSize(inmat,&m,&N);
4558:   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4559:     PetscInt       *dnz,*onz,sum,bs,cbs;

4561:     if (n == PETSC_DECIDE) {
4562:       PetscSplitOwnership(comm,&n,&N);
4563:     }
4564:     /* Check sum(n) = N */
4565:     MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4566:     if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);

4568:     MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4569:     rstart -= m;

4571:     MatPreallocateInitialize(comm,m,n,dnz,onz);
4572:     for (i=0; i<m; i++) {
4573:       MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4574:       MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4575:       MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4576:     }

4578:     MatCreate(comm,outmat);
4579:     MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4580:     MatGetBlockSizes(inmat,&bs,&cbs);
4581:     MatSetBlockSizes(*outmat,bs,cbs);
4582:     MatSetType(*outmat,MATAIJ);
4583:     MatSeqAIJSetPreallocation(*outmat,0,dnz);
4584:     MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4585:     MatPreallocateFinalize(dnz,onz);
4586:   }

4588:   /* numeric phase */
4589:   MatGetOwnershipRange(*outmat,&rstart,NULL);
4590:   for (i=0; i<m; i++) {
4591:     MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4592:     Ii   = i + rstart;
4593:     MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4594:     MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4595:   }
4596:   MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4597:   MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4598:   return(0);
4599: }

4601: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4602: {
4603:   PetscErrorCode    ierr;
4604:   PetscMPIInt       rank;
4605:   PetscInt          m,N,i,rstart,nnz;
4606:   size_t            len;
4607:   const PetscInt    *indx;
4608:   PetscViewer       out;
4609:   char              *name;
4610:   Mat               B;
4611:   const PetscScalar *values;

4614:   MatGetLocalSize(A,&m,0);
4615:   MatGetSize(A,0,&N);
4616:   /* Should this be the type of the diagonal block of A? */
4617:   MatCreate(PETSC_COMM_SELF,&B);
4618:   MatSetSizes(B,m,N,m,N);
4619:   MatSetBlockSizesFromMats(B,A,A);
4620:   MatSetType(B,MATSEQAIJ);
4621:   MatSeqAIJSetPreallocation(B,0,NULL);
4622:   MatGetOwnershipRange(A,&rstart,0);
4623:   for (i=0; i<m; i++) {
4624:     MatGetRow(A,i+rstart,&nnz,&indx,&values);
4625:     MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4626:     MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4627:   }
4628:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4629:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);

4631:   MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4632:   PetscStrlen(outfile,&len);
4633:   PetscMalloc1(len+5,&name);
4634:   sprintf(name,"%s.%d",outfile,rank);
4635:   PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4636:   PetscFree(name);
4637:   MatView(B,out);
4638:   PetscViewerDestroy(&out);
4639:   MatDestroy(&B);
4640:   return(0);
4641: }

4643: PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
4644: {
4645:   PetscErrorCode      ierr;
4646:   Mat_Merge_SeqsToMPI *merge;
4647:   PetscContainer      container;

4650:   PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);
4651:   if (container) {
4652:     PetscContainerGetPointer(container,(void**)&merge);
4653:     PetscFree(merge->id_r);
4654:     PetscFree(merge->len_s);
4655:     PetscFree(merge->len_r);
4656:     PetscFree(merge->bi);
4657:     PetscFree(merge->bj);
4658:     PetscFree(merge->buf_ri[0]);
4659:     PetscFree(merge->buf_ri);
4660:     PetscFree(merge->buf_rj[0]);
4661:     PetscFree(merge->buf_rj);
4662:     PetscFree(merge->coi);
4663:     PetscFree(merge->coj);
4664:     PetscFree(merge->owners_co);
4665:     PetscLayoutDestroy(&merge->rowmap);
4666:     PetscFree(merge);
4667:     PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);
4668:   }
4669:   MatDestroy_MPIAIJ(A);
4670:   return(0);
4671: }

4673:  #include <../src/mat/utils/freespace.h>
4674:  #include <petscbt.h>

4676: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4677: {
4678:   PetscErrorCode      ierr;
4679:   MPI_Comm            comm;
4680:   Mat_SeqAIJ          *a  =(Mat_SeqAIJ*)seqmat->data;
4681:   PetscMPIInt         size,rank,taga,*len_s;
4682:   PetscInt            N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4683:   PetscInt            proc,m;
4684:   PetscInt            **buf_ri,**buf_rj;
4685:   PetscInt            k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4686:   PetscInt            nrows,**buf_ri_k,**nextrow,**nextai;
4687:   MPI_Request         *s_waits,*r_waits;
4688:   MPI_Status          *status;
4689:   MatScalar           *aa=a->a;
4690:   MatScalar           **abuf_r,*ba_i;
4691:   Mat_Merge_SeqsToMPI *merge;
4692:   PetscContainer      container;

4695:   PetscObjectGetComm((PetscObject)mpimat,&comm);
4696:   PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);

4698:   MPI_Comm_size(comm,&size);
4699:   MPI_Comm_rank(comm,&rank);

4701:   PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4702:   PetscContainerGetPointer(container,(void**)&merge);

4704:   bi     = merge->bi;
4705:   bj     = merge->bj;
4706:   buf_ri = merge->buf_ri;
4707:   buf_rj = merge->buf_rj;

4709:   PetscMalloc1(size,&status);
4710:   owners = merge->rowmap->range;
4711:   len_s  = merge->len_s;

4713:   /* send and recv matrix values */
4714:   /*-----------------------------*/
4715:   PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4716:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

4718:   PetscMalloc1(merge->nsend+1,&s_waits);
4719:   for (proc=0,k=0; proc<size; proc++) {
4720:     if (!len_s[proc]) continue;
4721:     i    = owners[proc];
4722:     MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4723:     k++;
4724:   }

4726:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4727:   if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4728:   PetscFree(status);

4730:   PetscFree(s_waits);
4731:   PetscFree(r_waits);

4733:   /* insert mat values of mpimat */
4734:   /*----------------------------*/
4735:   PetscMalloc1(N,&ba_i);
4736:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4738:   for (k=0; k<merge->nrecv; k++) {
4739:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4740:     nrows       = *(buf_ri_k[k]);
4741:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4742:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4743:   }

4745:   /* set values of ba */
4746:   m = merge->rowmap->n;
4747:   for (i=0; i<m; i++) {
4748:     arow = owners[rank] + i;
4749:     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4750:     bnzi = bi[i+1] - bi[i];
4751:     PetscArrayzero(ba_i,bnzi);

4753:     /* add local non-zero vals of this proc's seqmat into ba */
4754:     anzi   = ai[arow+1] - ai[arow];
4755:     aj     = a->j + ai[arow];
4756:     aa     = a->a + ai[arow];
4757:     nextaj = 0;
4758:     for (j=0; nextaj<anzi; j++) {
4759:       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4760:         ba_i[j] += aa[nextaj++];
4761:       }
4762:     }

4764:     /* add received vals into ba */
4765:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4766:       /* i-th row */
4767:       if (i == *nextrow[k]) {
4768:         anzi   = *(nextai[k]+1) - *nextai[k];
4769:         aj     = buf_rj[k] + *(nextai[k]);
4770:         aa     = abuf_r[k] + *(nextai[k]);
4771:         nextaj = 0;
4772:         for (j=0; nextaj<anzi; j++) {
4773:           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4774:             ba_i[j] += aa[nextaj++];
4775:           }
4776:         }
4777:         nextrow[k]++; nextai[k]++;
4778:       }
4779:     }
4780:     MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4781:   }
4782:   MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4783:   MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);

4785:   PetscFree(abuf_r[0]);
4786:   PetscFree(abuf_r);
4787:   PetscFree(ba_i);
4788:   PetscFree3(buf_ri_k,nextrow,nextai);
4789:   PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4790:   return(0);
4791: }

4793: PetscErrorCode  MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4794: {
4795:   PetscErrorCode      ierr;
4796:   Mat                 B_mpi;
4797:   Mat_SeqAIJ          *a=(Mat_SeqAIJ*)seqmat->data;
4798:   PetscMPIInt         size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4799:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
4800:   PetscInt            M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4801:   PetscInt            len,proc,*dnz,*onz,bs,cbs;
4802:   PetscInt            k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4803:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4804:   MPI_Request         *si_waits,*sj_waits,*ri_waits,*rj_waits;
4805:   MPI_Status          *status;
4806:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
4807:   PetscBT             lnkbt;
4808:   Mat_Merge_SeqsToMPI *merge;
4809:   PetscContainer      container;

4812:   PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);

4814:   /* make sure it is a PETSc comm */
4815:   PetscCommDuplicate(comm,&comm,NULL);
4816:   MPI_Comm_size(comm,&size);
4817:   MPI_Comm_rank(comm,&rank);

4819:   PetscNew(&merge);
4820:   PetscMalloc1(size,&status);

4822:   /* determine row ownership */
4823:   /*---------------------------------------------------------*/
4824:   PetscLayoutCreate(comm,&merge->rowmap);
4825:   PetscLayoutSetLocalSize(merge->rowmap,m);
4826:   PetscLayoutSetSize(merge->rowmap,M);
4827:   PetscLayoutSetBlockSize(merge->rowmap,1);
4828:   PetscLayoutSetUp(merge->rowmap);
4829:   PetscMalloc1(size,&len_si);
4830:   PetscMalloc1(size,&merge->len_s);

4832:   m      = merge->rowmap->n;
4833:   owners = merge->rowmap->range;

4835:   /* determine the number of messages to send, their lengths */
4836:   /*---------------------------------------------------------*/
4837:   len_s = merge->len_s;

4839:   len          = 0; /* length of buf_si[] */
4840:   merge->nsend = 0;
4841:   for (proc=0; proc<size; proc++) {
4842:     len_si[proc] = 0;
4843:     if (proc == rank) {
4844:       len_s[proc] = 0;
4845:     } else {
4846:       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4847:       len_s[proc]  = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4848:     }
4849:     if (len_s[proc]) {
4850:       merge->nsend++;
4851:       nrows = 0;
4852:       for (i=owners[proc]; i<owners[proc+1]; i++) {
4853:         if (ai[i+1] > ai[i]) nrows++;
4854:       }
4855:       len_si[proc] = 2*(nrows+1);
4856:       len         += len_si[proc];
4857:     }
4858:   }

4860:   /* determine the number and length of messages to receive for ij-structure */
4861:   /*-------------------------------------------------------------------------*/
4862:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4863:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

4865:   /* post the Irecv of j-structure */
4866:   /*-------------------------------*/
4867:   PetscCommGetNewTag(comm,&tagj);
4868:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);

4870:   /* post the Isend of j-structure */
4871:   /*--------------------------------*/
4872:   PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);

4874:   for (proc=0, k=0; proc<size; proc++) {
4875:     if (!len_s[proc]) continue;
4876:     i    = owners[proc];
4877:     MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4878:     k++;
4879:   }

4881:   /* receives and sends of j-structure are complete */
4882:   /*------------------------------------------------*/
4883:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4884:   if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}

4886:   /* send and recv i-structure */
4887:   /*---------------------------*/
4888:   PetscCommGetNewTag(comm,&tagi);
4889:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);

4891:   PetscMalloc1(len+1,&buf_s);
4892:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4893:   for (proc=0,k=0; proc<size; proc++) {
4894:     if (!len_s[proc]) continue;
4895:     /* form outgoing message for i-structure:
4896:          buf_si[0]:                 nrows to be sent
4897:                [1:nrows]:           row index (global)
4898:                [nrows+1:2*nrows+1]: i-structure index
4899:     */
4900:     /*-------------------------------------------*/
4901:     nrows       = len_si[proc]/2 - 1;
4902:     buf_si_i    = buf_si + nrows+1;
4903:     buf_si[0]   = nrows;
4904:     buf_si_i[0] = 0;
4905:     nrows       = 0;
4906:     for (i=owners[proc]; i<owners[proc+1]; i++) {
4907:       anzi = ai[i+1] - ai[i];
4908:       if (anzi) {
4909:         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4910:         buf_si[nrows+1]   = i-owners[proc]; /* local row index */
4911:         nrows++;
4912:       }
4913:     }
4914:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4915:     k++;
4916:     buf_si += len_si[proc];
4917:   }

4919:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4920:   if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}

4922:   PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4923:   for (i=0; i<merge->nrecv; i++) {
4924:     PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4925:   }

4927:   PetscFree(len_si);
4928:   PetscFree(len_ri);
4929:   PetscFree(rj_waits);
4930:   PetscFree2(si_waits,sj_waits);
4931:   PetscFree(ri_waits);
4932:   PetscFree(buf_s);
4933:   PetscFree(status);

4935:   /* compute a local seq matrix in each processor */
4936:   /*----------------------------------------------*/
4937:   /* allocate bi array and free space for accumulating nonzero column info */
4938:   PetscMalloc1(m+1,&bi);
4939:   bi[0] = 0;

4941:   /* create and initialize a linked list */
4942:   nlnk = N+1;
4943:   PetscLLCreate(N,N,nlnk,lnk,lnkbt);

4945:   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4946:   len  = ai[owners[rank+1]] - ai[owners[rank]];
4947:   PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);

4949:   current_space = free_space;

4951:   /* determine symbolic info for each local row */
4952:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4954:   for (k=0; k<merge->nrecv; k++) {
4955:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4956:     nrows       = *buf_ri_k[k];
4957:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4958:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4959:   }

4961:   MatPreallocateInitialize(comm,m,n,dnz,onz);
4962:   len  = 0;
4963:   for (i=0; i<m; i++) {
4964:     bnzi = 0;
4965:     /* add local non-zero cols of this proc's seqmat into lnk */
4966:     arow  = owners[rank] + i;
4967:     anzi  = ai[arow+1] - ai[arow];
4968:     aj    = a->j + ai[arow];
4969:     PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4970:     bnzi += nlnk;
4971:     /* add received col data into lnk */
4972:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4973:       if (i == *nextrow[k]) { /* i-th row */
4974:         anzi  = *(nextai[k]+1) - *nextai[k];
4975:         aj    = buf_rj[k] + *nextai[k];
4976:         PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4977:         bnzi += nlnk;
4978:         nextrow[k]++; nextai[k]++;
4979:       }
4980:     }
4981:     if (len < bnzi) len = bnzi;  /* =max(bnzi) */

4983:     /* if free space is not available, make more free space */
4984:     if (current_space->local_remaining<bnzi) {
4985:       PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),&current_space);
4986:       nspacedouble++;
4987:     }
4988:     /* copy data into free space, then initialize lnk */
4989:     PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4990:     MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);

4992:     current_space->array           += bnzi;
4993:     current_space->local_used      += bnzi;
4994:     current_space->local_remaining -= bnzi;

4996:     bi[i+1] = bi[i] + bnzi;
4997:   }

4999:   PetscFree3(buf_ri_k,nextrow,nextai);

5001:   PetscMalloc1(bi[m]+1,&bj);
5002:   PetscFreeSpaceContiguous(&free_space,bj);
5003:   PetscLLDestroy(lnk,lnkbt);

5005:   /* create symbolic parallel matrix B_mpi */
5006:   /*---------------------------------------*/
5007:   MatGetBlockSizes(seqmat,&bs,&cbs);
5008:   MatCreate(comm,&B_mpi);
5009:   if (n==PETSC_DECIDE) {
5010:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
5011:   } else {
5012:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
5013:   }
5014:   MatSetBlockSizes(B_mpi,bs,cbs);
5015:   MatSetType(B_mpi,MATMPIAIJ);
5016:   MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
5017:   MatPreallocateFinalize(dnz,onz);
5018:   MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);

5020:   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
5021:   B_mpi->assembled    = PETSC_FALSE;
5022:   B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
5023:   merge->bi           = bi;
5024:   merge->bj           = bj;
5025:   merge->buf_ri       = buf_ri;
5026:   merge->buf_rj       = buf_rj;
5027:   merge->coi          = NULL;
5028:   merge->coj          = NULL;
5029:   merge->owners_co    = NULL;

5031:   PetscCommDestroy(&comm);

5033:   /* attach the supporting struct to B_mpi for reuse */
5034:   PetscContainerCreate(PETSC_COMM_SELF,&container);
5035:   PetscContainerSetPointer(container,merge);
5036:   PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
5037:   PetscContainerDestroy(&container);
5038:   *mpimat = B_mpi;

5040:   PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
5041:   return(0);
5042: }

5044: /*@C
5045:       MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
5046:                  matrices from each processor

5048:     Collective

5050:    Input Parameters:
5051: +    comm - the communicators the parallel matrix will live on
5052: .    seqmat - the input sequential matrices
5053: .    m - number of local rows (or PETSC_DECIDE)
5054: .    n - number of local columns (or PETSC_DECIDE)
5055: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5057:    Output Parameter:
5058: .    mpimat - the parallel matrix generated

5060:     Level: advanced

5062:    Notes:
5063:      The dimensions of the sequential matrix in each processor MUST be the same.
5064:      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
5065:      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
5066: @*/
5067: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
5068: {
5070:   PetscMPIInt    size;

5073:   MPI_Comm_size(comm,&size);
5074:   if (size == 1) {
5075:     PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
5076:     if (scall == MAT_INITIAL_MATRIX) {
5077:       MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
5078:     } else {
5079:       MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
5080:     }
5081:     PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
5082:     return(0);
5083:   }
5084:   PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
5085:   if (scall == MAT_INITIAL_MATRIX) {
5086:     MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
5087:   }
5088:   MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
5089:   PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
5090:   return(0);
5091: }

5093: /*@
5094:      MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
5095:           mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
5096:           with MatGetSize()

5098:     Not Collective

5100:    Input Parameters:
5101: +    A - the matrix
5102: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5104:    Output Parameter:
5105: .    A_loc - the local sequential matrix generated

5107:     Level: developer

5109: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed()

5111: @*/
5112: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
5113: {
5115:   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
5116:   Mat_SeqAIJ     *mat,*a,*b;
5117:   PetscInt       *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
5118:   MatScalar      *aa,*ba,*cam;
5119:   PetscScalar    *ca;
5120:   PetscInt       am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
5121:   PetscInt       *ci,*cj,col,ncols_d,ncols_o,jo;
5122:   PetscBool      match;
5123:   MPI_Comm       comm;
5124:   PetscMPIInt    size;

5127:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
5128:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5129:   PetscObjectGetComm((PetscObject)A,&comm);
5130:   MPI_Comm_size(comm,&size);
5131:   if (size == 1 && scall == MAT_REUSE_MATRIX) return(0);

5133:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5134:   a = (Mat_SeqAIJ*)(mpimat->A)->data;
5135:   b = (Mat_SeqAIJ*)(mpimat->B)->data;
5136:   ai = a->i; aj = a->j; bi = b->i; bj = b->j;
5137:   aa = a->a; ba = b->a;
5138:   if (scall == MAT_INITIAL_MATRIX) {
5139:     if (size == 1) {
5140:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ai,aj,aa,A_loc);
5141:       return(0);
5142:     }

5144:     PetscMalloc1(1+am,&ci);
5145:     ci[0] = 0;
5146:     for (i=0; i<am; i++) {
5147:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
5148:     }
5149:     PetscMalloc1(1+ci[am],&cj);
5150:     PetscMalloc1(1+ci[am],&ca);
5151:     k    = 0;
5152:     for (i=0; i<am; i++) {
5153:       ncols_o = bi[i+1] - bi[i];
5154:       ncols_d = ai[i+1] - ai[i];
5155:       /* off-diagonal portion of A */
5156:       for (jo=0; jo<ncols_o; jo++) {
5157:         col = cmap[*bj];
5158:         if (col >= cstart) break;
5159:         cj[k]   = col; bj++;
5160:         ca[k++] = *ba++;
5161:       }
5162:       /* diagonal portion of A */
5163:       for (j=0; j<ncols_d; j++) {
5164:         cj[k]   = cstart + *aj++;
5165:         ca[k++] = *aa++;
5166:       }
5167:       /* off-diagonal portion of A */
5168:       for (j=jo; j<ncols_o; j++) {
5169:         cj[k]   = cmap[*bj++];
5170:         ca[k++] = *ba++;
5171:       }
5172:     }
5173:     /* put together the new matrix */
5174:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
5175:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5176:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5177:     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
5178:     mat->free_a  = PETSC_TRUE;
5179:     mat->free_ij = PETSC_TRUE;
5180:     mat->nonew   = 0;
5181:   } else if (scall == MAT_REUSE_MATRIX) {
5182:     mat=(Mat_SeqAIJ*)(*A_loc)->data;
5183:     ci = mat->i; cj = mat->j; cam = mat->a;
5184:     for (i=0; i<am; i++) {
5185:       /* off-diagonal portion of A */
5186:       ncols_o = bi[i+1] - bi[i];
5187:       for (jo=0; jo<ncols_o; jo++) {
5188:         col = cmap[*bj];
5189:         if (col >= cstart) break;
5190:         *cam++ = *ba++; bj++;
5191:       }
5192:       /* diagonal portion of A */
5193:       ncols_d = ai[i+1] - ai[i];
5194:       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
5195:       /* off-diagonal portion of A */
5196:       for (j=jo; j<ncols_o; j++) {
5197:         *cam++ = *ba++; bj++;
5198:       }
5199:     }
5200:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5201:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5202:   return(0);
5203: }

5205: /*@C
5206:      MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns

5208:     Not Collective

5210:    Input Parameters:
5211: +    A - the matrix
5212: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5213: -    row, col - index sets of rows and columns to extract (or NULL)

5215:    Output Parameter:
5216: .    A_loc - the local sequential matrix generated

5218:     Level: developer

5220: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()

5222: @*/
5223: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5224: {
5225:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5227:   PetscInt       i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5228:   IS             isrowa,iscola;
5229:   Mat            *aloc;
5230:   PetscBool      match;

5233:   PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5234:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5235:   PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5236:   if (!row) {
5237:     start = A->rmap->rstart; end = A->rmap->rend;
5238:     ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5239:   } else {
5240:     isrowa = *row;
5241:   }
5242:   if (!col) {
5243:     start = A->cmap->rstart;
5244:     cmap  = a->garray;
5245:     nzA   = a->A->cmap->n;
5246:     nzB   = a->B->cmap->n;
5247:     PetscMalloc1(nzA+nzB, &idx);
5248:     ncols = 0;
5249:     for (i=0; i<nzB; i++) {
5250:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5251:       else break;
5252:     }
5253:     imark = i;
5254:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5255:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5256:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5257:   } else {
5258:     iscola = *col;
5259:   }
5260:   if (scall != MAT_INITIAL_MATRIX) {
5261:     PetscMalloc1(1,&aloc);
5262:     aloc[0] = *A_loc;
5263:   }
5264:   MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5265:   if (!col) { /* attach global id of condensed columns */
5266:     PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5267:   }
5268:   *A_loc = aloc[0];
5269:   PetscFree(aloc);
5270:   if (!row) {
5271:     ISDestroy(&isrowa);
5272:   }
5273:   if (!col) {
5274:     ISDestroy(&iscola);
5275:   }
5276:   PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5277:   return(0);
5278: }

5280: /*
5281:  * Destroy a mat that may be composed with PetscSF communication objects.
5282:  * The SF objects were created in MatCreateSeqSubMatrixWithRows_Private.
5283:  * */
5284: PetscErrorCode MatDestroy_SeqAIJ_PetscSF(Mat mat)
5285: {
5286:   PetscSF          sf,osf;
5287:   IS               map;
5288:   PetscErrorCode   ierr;

5291:   PetscObjectQuery((PetscObject)mat,"diagsf",(PetscObject*)&sf);
5292:   PetscObjectQuery((PetscObject)mat,"offdiagsf",(PetscObject*)&osf);
5293:   PetscSFDestroy(&sf);
5294:   PetscSFDestroy(&osf);
5295:   PetscObjectQuery((PetscObject)mat,"aoffdiagtopothmapping",(PetscObject*)&map);
5296:   ISDestroy(&map);
5297:   MatDestroy_SeqAIJ(mat);
5298:   return(0);
5299: }

5301: /*
5302:  * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5303:  * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5304:  * on a global size.
5305:  * */
5306: PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P,IS rows,Mat *P_oth)
5307: {
5308:   Mat_MPIAIJ               *p=(Mat_MPIAIJ*)P->data;
5309:   Mat_SeqAIJ               *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data,*p_oth;
5310:   PetscInt                 plocalsize,nrows,*ilocal,*oilocal,i,owner,lidx,*nrcols,*nlcols,ncol;
5311:   PetscSFNode              *iremote,*oiremote;
5312:   const PetscInt           *lrowindices;
5313:   PetscErrorCode           ierr;
5314:   PetscSF                  sf,osf;
5315:   PetscInt                 pcstart,*roffsets,*loffsets,*pnnz,j;
5316:   PetscInt                 ontotalcols,dntotalcols,ntotalcols,nout;
5317:   MPI_Comm                 comm;
5318:   ISLocalToGlobalMapping   mapping;

5321:   PetscObjectGetComm((PetscObject)P,&comm);
5322:   /* plocalsize is the number of roots
5323:    * nrows is the number of leaves
5324:    * */
5325:   MatGetLocalSize(P,&plocalsize,NULL);
5326:   ISGetLocalSize(rows,&nrows);
5327:   PetscCalloc1(nrows,&iremote);
5328:   ISGetIndices(rows,&lrowindices);
5329:   for (i=0;i<nrows;i++) {
5330:     /* Find a remote index and an owner for a row
5331:      * The row could be local or remote
5332:      * */
5333:     owner = 0;
5334:     lidx  = 0;
5335:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,&lidx);
5336:     iremote[i].index = lidx;
5337:     iremote[i].rank  = owner;
5338:   }
5339:   /* Create SF to communicate how many nonzero columns for each row */
5340:   PetscSFCreate(comm,&sf);
5341:   /* SF will figure out the number of nonzero colunms for each row, and their
5342:    * offsets
5343:    * */
5344:   PetscSFSetGraph(sf,plocalsize,nrows,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5345:   PetscSFSetFromOptions(sf);
5346:   PetscSFSetUp(sf);

5348:   PetscCalloc1(2*(plocalsize+1),&roffsets);
5349:   PetscCalloc1(2*plocalsize,&nrcols);
5350:   PetscCalloc1(nrows,&pnnz);
5351:   roffsets[0] = 0;
5352:   roffsets[1] = 0;
5353:   for (i=0;i<plocalsize;i++) {
5354:     /* diag */
5355:     nrcols[i*2+0] = pd->i[i+1] - pd->i[i];
5356:     /* off diag */
5357:     nrcols[i*2+1] = po->i[i+1] - po->i[i];
5358:     /* compute offsets so that we relative location for each row */
5359:     roffsets[(i+1)*2+0] = roffsets[i*2+0] + nrcols[i*2+0];
5360:     roffsets[(i+1)*2+1] = roffsets[i*2+1] + nrcols[i*2+1];
5361:   }
5362:   PetscCalloc1(2*nrows,&nlcols);
5363:   PetscCalloc1(2*nrows,&loffsets);
5364:   /* 'r' means root, and 'l' means leaf */
5365:   PetscSFBcastBegin(sf,MPIU_2INT,nrcols,nlcols);
5366:   PetscSFBcastBegin(sf,MPIU_2INT,roffsets,loffsets);
5367:   PetscSFBcastEnd(sf,MPIU_2INT,nrcols,nlcols);
5368:   PetscSFBcastEnd(sf,MPIU_2INT,roffsets,loffsets);
5369:   PetscSFDestroy(&sf);
5370:   PetscFree(roffsets);
5371:   PetscFree(nrcols);
5372:   dntotalcols = 0;
5373:   ontotalcols = 0;
5374:   ncol = 0;
5375:   for (i=0;i<nrows;i++) {
5376:     pnnz[i] = nlcols[i*2+0] + nlcols[i*2+1];
5377:     ncol = PetscMax(pnnz[i],ncol);
5378:     /* diag */
5379:     dntotalcols += nlcols[i*2+0];
5380:     /* off diag */
5381:     ontotalcols += nlcols[i*2+1];
5382:   }
5383:   /* We do not need to figure the right number of columns
5384:    * since all the calculations will be done by going through the raw data
5385:    * */
5386:   MatCreateSeqAIJ(PETSC_COMM_SELF,nrows,ncol,0,pnnz,P_oth);
5387:   MatSetUp(*P_oth);
5388:   PetscFree(pnnz);
5389:   p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5390:   /* diag */
5391:   PetscCalloc1(dntotalcols,&iremote);
5392:   /* off diag */
5393:   PetscCalloc1(ontotalcols,&oiremote);
5394:   /* diag */
5395:   PetscCalloc1(dntotalcols,&ilocal);
5396:   /* off diag */
5397:   PetscCalloc1(ontotalcols,&oilocal);
5398:   dntotalcols = 0;
5399:   ontotalcols = 0;
5400:   ntotalcols  = 0;
5401:   for (i=0;i<nrows;i++) {
5402:     owner = 0;
5403:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,NULL);
5404:     /* Set iremote for diag matrix */
5405:     for (j=0;j<nlcols[i*2+0];j++) {
5406:       iremote[dntotalcols].index   = loffsets[i*2+0] + j;
5407:       iremote[dntotalcols].rank    = owner;
5408:       /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5409:       ilocal[dntotalcols++]        = ntotalcols++;
5410:     }
5411:     /* off diag */
5412:     for (j=0;j<nlcols[i*2+1];j++) {
5413:       oiremote[ontotalcols].index   = loffsets[i*2+1] + j;
5414:       oiremote[ontotalcols].rank    = owner;
5415:       oilocal[ontotalcols++]        = ntotalcols++;
5416:     }
5417:   }
5418:   ISRestoreIndices(rows,&lrowindices);
5419:   PetscFree(loffsets);
5420:   PetscFree(nlcols);
5421:   PetscSFCreate(comm,&sf);
5422:   /* P serves as roots and P_oth is leaves
5423:    * Diag matrix
5424:    * */
5425:   PetscSFSetGraph(sf,pd->i[plocalsize],dntotalcols,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5426:   PetscSFSetFromOptions(sf);
5427:   PetscSFSetUp(sf);

5429:   PetscSFCreate(comm,&osf);
5430:   /* Off diag */
5431:   PetscSFSetGraph(osf,po->i[plocalsize],ontotalcols,oilocal,PETSC_OWN_POINTER,oiremote,PETSC_OWN_POINTER);
5432:   PetscSFSetFromOptions(osf);
5433:   PetscSFSetUp(osf);
5434:   /* We operate on the matrix internal data for saving memory */
5435:   PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a);
5436:   PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a);
5437:   MatGetOwnershipRangeColumn(P,&pcstart,NULL);
5438:   /* Convert to global indices for diag matrix */
5439:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] += pcstart;
5440:   PetscSFBcastBegin(sf,MPIU_INT,pd->j,p_oth->j);
5441:   /* We want P_oth store global indices */
5442:   ISLocalToGlobalMappingCreate(comm,1,p->B->cmap->n,p->garray,PETSC_COPY_VALUES,&mapping);
5443:   /* Use memory scalable approach */
5444:   ISLocalToGlobalMappingSetType(mapping,ISLOCALTOGLOBALMAPPINGHASH);
5445:   ISLocalToGlobalMappingApply(mapping,po->i[plocalsize],po->j,po->j);
5446:   PetscSFBcastBegin(osf,MPIU_INT,po->j,p_oth->j);
5447:   PetscSFBcastEnd(sf,MPIU_INT,pd->j,p_oth->j);
5448:   /* Convert back to local indices */
5449:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] -= pcstart;
5450:   PetscSFBcastEnd(osf,MPIU_INT,po->j,p_oth->j);
5451:   nout = 0;
5452:   ISGlobalToLocalMappingApply(mapping,IS_GTOLM_DROP,po->i[plocalsize],po->j,&nout,po->j);
5453:   if (nout != po->i[plocalsize]) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP,"n %D does not equal to nout %D \n",po->i[plocalsize],nout);
5454:   ISLocalToGlobalMappingDestroy(&mapping);
5455:   /* Exchange values */
5456:   PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a);
5457:   PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a);
5458:   /* Stop PETSc from shrinking memory */
5459:   for (i=0;i<nrows;i++) p_oth->ilen[i] = p_oth->imax[i];
5460:   MatAssemblyBegin(*P_oth,MAT_FINAL_ASSEMBLY);
5461:   MatAssemblyEnd(*P_oth,MAT_FINAL_ASSEMBLY);
5462:   /* Attach PetscSF objects to P_oth so that we can reuse it later */
5463:   PetscObjectCompose((PetscObject)*P_oth,"diagsf",(PetscObject)sf);
5464:   PetscObjectCompose((PetscObject)*P_oth,"offdiagsf",(PetscObject)osf);
5465:   /* ``New MatDestroy" takes care of PetscSF objects as well */
5466:   (*P_oth)->ops->destroy = MatDestroy_SeqAIJ_PetscSF;
5467:   return(0);
5468: }

5470: /*
5471:  * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5472:  * This supports MPIAIJ and MAIJ
5473:  * */
5474: PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A,Mat P,PetscInt dof,MatReuse reuse,Mat *P_oth)
5475: {
5476:   Mat_MPIAIJ            *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
5477:   Mat_SeqAIJ            *p_oth;
5478:   Mat_SeqAIJ            *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
5479:   IS                    rows,map;
5480:   PetscHMapI            hamp;
5481:   PetscInt              i,htsize,*rowindices,off,*mapping,key,count;
5482:   MPI_Comm              comm;
5483:   PetscSF               sf,osf;
5484:   PetscBool             has;
5485:   PetscErrorCode        ierr;

5488:   PetscObjectGetComm((PetscObject)A,&comm);
5489:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,P,0,0);
5490:   /* If it is the first time, create an index set of off-diag nonzero columns of A,
5491:    *  and then create a submatrix (that often is an overlapping matrix)
5492:    * */
5493:   if (reuse==MAT_INITIAL_MATRIX) {
5494:     /* Use a hash table to figure out unique keys */
5495:     PetscHMapICreate(&hamp);
5496:     PetscHMapIResize(hamp,a->B->cmap->n);
5497:     PetscCalloc1(a->B->cmap->n,&mapping);
5498:     count = 0;
5499:     /* Assume that  a->g is sorted, otherwise the following does not make sense */
5500:     for (i=0;i<a->B->cmap->n;i++) {
5501:       key  = a->garray[i]/dof;
5502:       PetscHMapIHas(hamp,key,&has);
5503:       if (!has) {
5504:         mapping[i] = count;
5505:         PetscHMapISet(hamp,key,count++);
5506:       } else {
5507:         /* Current 'i' has the same value the previous step */
5508:         mapping[i] = count-1;
5509:       }
5510:     }
5511:     ISCreateGeneral(comm,a->B->cmap->n,mapping,PETSC_OWN_POINTER,&map);
5512:     PetscHMapIGetSize(hamp,&htsize);
5513:     if (htsize!=count) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP," Size of hash map %D is inconsistent with count %D \n",htsize,count);
5514:     PetscCalloc1(htsize,&rowindices);
5515:     off = 0;
5516:     PetscHMapIGetKeys(hamp,&off,rowindices);
5517:     PetscHMapIDestroy(&hamp);
5518:     PetscSortInt(htsize,rowindices);
5519:     ISCreateGeneral(comm,htsize,rowindices,PETSC_OWN_POINTER,&rows);
5520:     /* In case, the matrix was already created but users want to recreate the matrix */
5521:     MatDestroy(P_oth);
5522:     MatCreateSeqSubMatrixWithRows_Private(P,rows,P_oth);
5523:     PetscObjectCompose((PetscObject)*P_oth,"aoffdiagtopothmapping",(PetscObject)map);
5524:     ISDestroy(&rows);
5525:   } else if (reuse==MAT_REUSE_MATRIX) {
5526:     /* If matrix was already created, we simply update values using SF objects
5527:      * that as attached to the matrix ealier.
5528:      *  */
5529:     PetscObjectQuery((PetscObject)*P_oth,"diagsf",(PetscObject*)&sf);
5530:     PetscObjectQuery((PetscObject)*P_oth,"offdiagsf",(PetscObject*)&osf);
5531:     if (!sf || !osf) {
5532:       SETERRQ(comm,PETSC_ERR_ARG_NULL,"Matrix is not initialized yet \n");
5533:     }
5534:     p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5535:     /* Update values in place */
5536:     PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a);
5537:     PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a);
5538:     PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a);
5539:     PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a);
5540:   } else {
5541:     SETERRQ(comm,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown reuse type \n");
5542:   }
5543:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,P,0,0);
5544:   return(0);
5545: }

5547: /*@C
5548:     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A

5550:     Collective on Mat

5552:    Input Parameters:
5553: +    A,B - the matrices in mpiaij format
5554: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5555: -    rowb, colb - index sets of rows and columns of B to extract (or NULL)

5557:    Output Parameter:
5558: +    rowb, colb - index sets of rows and columns of B to extract
5559: -    B_seq - the sequential matrix generated

5561:     Level: developer

5563: @*/
5564: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5565: {
5566:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5568:   PetscInt       *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5569:   IS             isrowb,iscolb;
5570:   Mat            *bseq=NULL;

5573:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5574:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5575:   }
5576:   PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);

5578:   if (scall == MAT_INITIAL_MATRIX) {
5579:     start = A->cmap->rstart;
5580:     cmap  = a->garray;
5581:     nzA   = a->A->cmap->n;
5582:     nzB   = a->B->cmap->n;
5583:     PetscMalloc1(nzA+nzB, &idx);
5584:     ncols = 0;
5585:     for (i=0; i<nzB; i++) {  /* row < local row index */
5586:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5587:       else break;
5588:     }
5589:     imark = i;
5590:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
5591:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5592:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5593:     ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5594:   } else {
5595:     if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5596:     isrowb  = *rowb; iscolb = *colb;
5597:     PetscMalloc1(1,&bseq);
5598:     bseq[0] = *B_seq;
5599:   }
5600:   MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5601:   *B_seq = bseq[0];
5602:   PetscFree(bseq);
5603:   if (!rowb) {
5604:     ISDestroy(&isrowb);
5605:   } else {
5606:     *rowb = isrowb;
5607:   }
5608:   if (!colb) {
5609:     ISDestroy(&iscolb);
5610:   } else {
5611:     *colb = iscolb;
5612:   }
5613:   PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5614:   return(0);
5615: }

5617: /*
5618:     MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5619:     of the OFF-DIAGONAL portion of local A

5621:     Collective on Mat

5623:    Input Parameters:
5624: +    A,B - the matrices in mpiaij format
5625: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5627:    Output Parameter:
5628: +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5629: .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5630: .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5631: -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N

5633:     Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5634:      for this matrix. This is not desirable..

5636:     Level: developer

5638: */
5639: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5640: {
5641:   PetscErrorCode         ierr;
5642:   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
5643:   Mat_SeqAIJ             *b_oth;
5644:   VecScatter             ctx;
5645:   MPI_Comm               comm;
5646:   const PetscMPIInt      *rprocs,*sprocs;
5647:   const PetscInt         *srow,*rstarts,*sstarts;
5648:   PetscInt               *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5649:   PetscInt               i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = 0,*sstartsj,len;
5650:   PetscScalar              *b_otha,*bufa,*bufA,*vals;
5651:   MPI_Request            *rwaits = NULL,*swaits = NULL;
5652:   MPI_Status             rstatus;
5653:   PetscMPIInt            jj,size,tag,rank,nsends_mpi,nrecvs_mpi;

5656:   PetscObjectGetComm((PetscObject)A,&comm);
5657:   MPI_Comm_size(comm,&size);

5659:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5660:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5661:   }
5662:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5663:   MPI_Comm_rank(comm,&rank);

5665:   if (size == 1) {
5666:     startsj_s = NULL;
5667:     bufa_ptr  = NULL;
5668:     *B_oth    = NULL;
5669:     return(0);
5670:   }

5672:   ctx = a->Mvctx;
5673:   tag = ((PetscObject)ctx)->tag;

5675:   if (ctx->inuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");
5676:   VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5677:   /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5678:   VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5679:   PetscMPIIntCast(nsends,&nsends_mpi);
5680:   PetscMPIIntCast(nrecvs,&nrecvs_mpi);
5681:   PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);

5683:   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5684:   if (scall == MAT_INITIAL_MATRIX) {
5685:     /* i-array */
5686:     /*---------*/
5687:     /*  post receives */
5688:     if (nrecvs) {PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);} /* rstarts can be NULL when nrecvs=0 */
5689:     for (i=0; i<nrecvs; i++) {
5690:       rowlen = rvalues + rstarts[i]*rbs;
5691:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5692:       MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5693:     }

5695:     /* pack the outgoing message */
5696:     PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);

5698:     sstartsj[0] = 0;
5699:     rstartsj[0] = 0;
5700:     len         = 0; /* total length of j or a array to be sent */
5701:     if (nsends) {
5702:       k    = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5703:       PetscMalloc1(sbs*(sstarts[nsends]-sstarts[0]),&svalues);
5704:     }
5705:     for (i=0; i<nsends; i++) {
5706:       rowlen = svalues + (sstarts[i]-sstarts[0])*sbs;
5707:       nrows  = sstarts[i+1]-sstarts[i]; /* num of block rows */
5708:       for (j=0; j<nrows; j++) {
5709:         row = srow[k] + B->rmap->range[rank]; /* global row idx */
5710:         for (l=0; l<sbs; l++) {
5711:           MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */

5713:           rowlen[j*sbs+l] = ncols;

5715:           len += ncols;
5716:           MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5717:         }
5718:         k++;
5719:       }
5720:       MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);

5722:       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5723:     }
5724:     /* recvs and sends of i-array are completed */
5725:     i = nrecvs;
5726:     while (i--) {
5727:       MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5728:     }
5729:     if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5730:     PetscFree(svalues);

5732:     /* allocate buffers for sending j and a arrays */
5733:     PetscMalloc1(len+1,&bufj);
5734:     PetscMalloc1(len+1,&bufa);

5736:     /* create i-array of B_oth */
5737:     PetscMalloc1(aBn+2,&b_othi);

5739:     b_othi[0] = 0;
5740:     len       = 0; /* total length of j or a array to be received */
5741:     k         = 0;
5742:     for (i=0; i<nrecvs; i++) {
5743:       rowlen = rvalues + (rstarts[i]-rstarts[0])*rbs;
5744:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of rows to be received */
5745:       for (j=0; j<nrows; j++) {
5746:         b_othi[k+1] = b_othi[k] + rowlen[j];
5747:         PetscIntSumError(rowlen[j],len,&len);
5748:         k++;
5749:       }
5750:       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5751:     }
5752:     PetscFree(rvalues);

5754:     /* allocate space for j and a arrrays of B_oth */
5755:     PetscMalloc1(b_othi[aBn]+1,&b_othj);
5756:     PetscMalloc1(b_othi[aBn]+1,&b_otha);

5758:     /* j-array */
5759:     /*---------*/
5760:     /*  post receives of j-array */
5761:     for (i=0; i<nrecvs; i++) {
5762:       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5763:       MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5764:     }

5766:     /* pack the outgoing message j-array */
5767:     if (nsends) k = sstarts[0];
5768:     for (i=0; i<nsends; i++) {
5769:       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5770:       bufJ  = bufj+sstartsj[i];
5771:       for (j=0; j<nrows; j++) {
5772:         row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5773:         for (ll=0; ll<sbs; ll++) {
5774:           MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5775:           for (l=0; l<ncols; l++) {
5776:             *bufJ++ = cols[l];
5777:           }
5778:           MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5779:         }
5780:       }
5781:       MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5782:     }

5784:     /* recvs and sends of j-array are completed */
5785:     i = nrecvs;
5786:     while (i--) {
5787:       MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5788:     }
5789:     if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5790:   } else if (scall == MAT_REUSE_MATRIX) {
5791:     sstartsj = *startsj_s;
5792:     rstartsj = *startsj_r;
5793:     bufa     = *bufa_ptr;
5794:     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
5795:     b_otha   = b_oth->a;
5796:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");

5798:   /* a-array */
5799:   /*---------*/
5800:   /*  post receives of a-array */
5801:   for (i=0; i<nrecvs; i++) {
5802:     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5803:     MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5804:   }

5806:   /* pack the outgoing message a-array */
5807:   if (nsends) k = sstarts[0];
5808:   for (i=0; i<nsends; i++) {
5809:     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5810:     bufA  = bufa+sstartsj[i];
5811:     for (j=0; j<nrows; j++) {
5812:       row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5813:       for (ll=0; ll<sbs; ll++) {
5814:         MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5815:         for (l=0; l<ncols; l++) {
5816:           *bufA++ = vals[l];
5817:         }
5818:         MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5819:       }
5820:     }
5821:     MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5822:   }
5823:   /* recvs and sends of a-array are completed */
5824:   i = nrecvs;
5825:   while (i--) {
5826:     MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5827:   }
5828:   if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5829:   PetscFree2(rwaits,swaits);

5831:   if (scall == MAT_INITIAL_MATRIX) {
5832:     /* put together the new matrix */
5833:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);

5835:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5836:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5837:     b_oth          = (Mat_SeqAIJ*)(*B_oth)->data;
5838:     b_oth->free_a  = PETSC_TRUE;
5839:     b_oth->free_ij = PETSC_TRUE;
5840:     b_oth->nonew   = 0;

5842:     PetscFree(bufj);
5843:     if (!startsj_s || !bufa_ptr) {
5844:       PetscFree2(sstartsj,rstartsj);
5845:       PetscFree(bufa_ptr);
5846:     } else {
5847:       *startsj_s = sstartsj;
5848:       *startsj_r = rstartsj;
5849:       *bufa_ptr  = bufa;
5850:     }
5851:   }

5853:   VecScatterRestoreRemote_Private(ctx,PETSC_TRUE,&nsends,&sstarts,&srow,&sprocs,&sbs);
5854:   VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE,&nrecvs,&rstarts,NULL,&rprocs,&rbs);
5855:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5856:   return(0);
5857: }

5859: /*@C
5860:   MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.

5862:   Not Collective

5864:   Input Parameters:
5865: . A - The matrix in mpiaij format

5867:   Output Parameter:
5868: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5869: . colmap - A map from global column index to local index into lvec
5870: - multScatter - A scatter from the argument of a matrix-vector product to lvec

5872:   Level: developer

5874: @*/
5875: #if defined(PETSC_USE_CTABLE)
5876: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5877: #else
5878: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5879: #endif
5880: {
5881:   Mat_MPIAIJ *a;

5888:   a = (Mat_MPIAIJ*) A->data;
5889:   if (lvec) *lvec = a->lvec;
5890:   if (colmap) *colmap = a->colmap;
5891:   if (multScatter) *multScatter = a->Mvctx;
5892:   return(0);
5893: }

5895: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5896: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5897: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat,MatType,MatReuse,Mat*);
5898: #if defined(PETSC_HAVE_MKL_SPARSE)
5899: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5900: #endif
5901: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5902: #if defined(PETSC_HAVE_ELEMENTAL)
5903: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5904: #endif
5905: #if defined(PETSC_HAVE_HYPRE)
5906: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5907: PETSC_INTERN PetscErrorCode MatMatMatMult_Transpose_AIJ_AIJ(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
5908: #endif
5909: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat,MatType,MatReuse,Mat*);
5910: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat,MatType,MatReuse,Mat*);
5911: PETSC_INTERN PetscErrorCode MatPtAP_IS_XAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);

5913: /*
5914:     Computes (B'*A')' since computing B*A directly is untenable

5916:                n                       p                          p
5917:         (              )       (              )         (                  )
5918:       m (      A       )  *  n (       B      )   =   m (         C        )
5919:         (              )       (              )         (                  )

5921: */
5922: PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5923: {
5925:   Mat            At,Bt,Ct;

5928:   MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5929:   MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5930:   MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);
5931:   MatDestroy(&At);
5932:   MatDestroy(&Bt);
5933:   MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5934:   MatDestroy(&Ct);
5935:   return(0);
5936: }

5938: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
5939: {
5941:   PetscInt       m=A->rmap->n,n=B->cmap->n;
5942:   Mat            Cmat;

5945:   if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5946:   MatCreate(PetscObjectComm((PetscObject)A),&Cmat);
5947:   MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
5948:   MatSetBlockSizesFromMats(Cmat,A,B);
5949:   MatSetType(Cmat,MATMPIDENSE);
5950:   MatMPIDenseSetPreallocation(Cmat,NULL);
5951:   MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);
5952:   MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);

5954:   Cmat->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;

5956:   *C = Cmat;
5957:   return(0);
5958: }

5960: /* ----------------------------------------------------------------*/
5961: PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
5962: {

5966:   if (scall == MAT_INITIAL_MATRIX) {
5967:     PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
5968:     MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);
5969:     PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
5970:   }
5971:   PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
5972:   MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);
5973:   PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
5974:   return(0);
5975: }

5977: /*MC
5978:    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.

5980:    Options Database Keys:
5981: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()

5983:    Level: beginner

5985:    Notes:
5986:     MatSetValues() may be called for this matrix type with a NULL argument for the numerical values,
5987:     in this case the values associated with the rows and columns one passes in are set to zero
5988:     in the matrix

5990:     MatSetOptions(,MAT_STRUCTURE_ONLY,PETSC_TRUE) may be called for this matrix type. In this no
5991:     space is allocated for the nonzero entries and any entries passed with MatSetValues() are ignored

5993: .seealso: MatCreateAIJ()
5994: M*/

5996: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
5997: {
5998:   Mat_MPIAIJ     *b;
6000:   PetscMPIInt    size;

6003:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);

6005:   PetscNewLog(B,&b);
6006:   B->data       = (void*)b;
6007:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
6008:   B->assembled  = PETSC_FALSE;
6009:   B->insertmode = NOT_SET_VALUES;
6010:   b->size       = size;

6012:   MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);

6014:   /* build cache for off array entries formed */
6015:   MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);

6017:   b->donotstash  = PETSC_FALSE;
6018:   b->colmap      = 0;
6019:   b->garray      = 0;
6020:   b->roworiented = PETSC_TRUE;

6022:   /* stuff used for matrix vector multiply */
6023:   b->lvec  = NULL;
6024:   b->Mvctx = NULL;

6026:   /* stuff for MatGetRow() */
6027:   b->rowindices   = 0;
6028:   b->rowvalues    = 0;
6029:   b->getrowactive = PETSC_FALSE;

6031:   /* flexible pointer used in CUSP/CUSPARSE classes */
6032:   b->spptr = NULL;

6034:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
6035:   PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
6036:   PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
6037:   PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
6038:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
6039:   PetscObjectComposeFunction((PetscObject)B,"MatResetPreallocation_C",MatResetPreallocation_MPIAIJ);
6040:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
6041:   PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
6042:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
6043:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijsell_C",MatConvert_MPIAIJ_MPIAIJSELL);
6044: #if defined(PETSC_HAVE_MKL_SPARSE)
6045:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
6046: #endif
6047:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
6048:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
6049: #if defined(PETSC_HAVE_ELEMENTAL)
6050:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
6051: #endif
6052: #if defined(PETSC_HAVE_HYPRE)
6053:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
6054: #endif
6055:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_XAIJ_IS);
6056:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisell_C",MatConvert_MPIAIJ_MPISELL);
6057:   PetscObjectComposeFunction((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",MatMatMult_MPIDense_MPIAIJ);
6058:   PetscObjectComposeFunction((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",MatMatMultSymbolic_MPIDense_MPIAIJ);
6059:   PetscObjectComposeFunction((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",MatMatMultNumeric_MPIDense_MPIAIJ);
6060: #if defined(PETSC_HAVE_HYPRE)
6061:   PetscObjectComposeFunction((PetscObject)B,"MatMatMatMult_transpose_mpiaij_mpiaij_C",MatMatMatMult_Transpose_AIJ_AIJ);
6062: #endif
6063:   PetscObjectComposeFunction((PetscObject)B,"MatPtAP_is_mpiaij_C",MatPtAP_IS_XAIJ);
6064:   PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
6065:   return(0);
6066: }

6068: /*@C
6069:      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
6070:          and "off-diagonal" part of the matrix in CSR format.

6072:    Collective

6074:    Input Parameters:
6075: +  comm - MPI communicator
6076: .  m - number of local rows (Cannot be PETSC_DECIDE)
6077: .  n - This value should be the same as the local size used in creating the
6078:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
6079:        calculated if N is given) For square matrices n is almost always m.
6080: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
6081: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
6082: .   i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
6083: .   j - column indices
6084: .   a - matrix values
6085: .   oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
6086: .   oj - column indices
6087: -   oa - matrix values

6089:    Output Parameter:
6090: .   mat - the matrix

6092:    Level: advanced

6094:    Notes:
6095:        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
6096:        must free the arrays once the matrix has been destroyed and not before.

6098:        The i and j indices are 0 based

6100:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix

6102:        This sets local rows and cannot be used to set off-processor values.

6104:        Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
6105:        legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
6106:        not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
6107:        the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
6108:        keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
6109:        communication if it is known that only local entries will be set.

6111: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
6112:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
6113: @*/
6114: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
6115: {
6117:   Mat_MPIAIJ     *maij;

6120:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
6121:   if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
6122:   if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
6123:   MatCreate(comm,mat);
6124:   MatSetSizes(*mat,m,n,M,N);
6125:   MatSetType(*mat,MATMPIAIJ);
6126:   maij = (Mat_MPIAIJ*) (*mat)->data;

6128:   (*mat)->preallocated = PETSC_TRUE;

6130:   PetscLayoutSetUp((*mat)->rmap);
6131:   PetscLayoutSetUp((*mat)->cmap);

6133:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
6134:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);

6136:   MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);
6137:   MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);
6138:   MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);
6139:   MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);

6141:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
6142:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
6143:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
6144:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
6145:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
6146:   return(0);
6147: }

6149: /*
6150:     Special version for direct calls from Fortran
6151: */
6152:  #include <petsc/private/fortranimpl.h>

6154: /* Change these macros so can be used in void function */
6155: #undef CHKERRQ
6156: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
6157: #undef SETERRQ2
6158: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
6159: #undef SETERRQ3
6160: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
6161: #undef SETERRQ
6162: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)

6164: #if defined(PETSC_HAVE_FORTRAN_CAPS)
6165: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
6166: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
6167: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
6168: #else
6169: #endif
6170: PETSC_EXTERN void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
6171: {
6172:   Mat            mat  = *mmat;
6173:   PetscInt       m    = *mm, n = *mn;
6174:   InsertMode     addv = *maddv;
6175:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
6176:   PetscScalar    value;

6179:   MatCheckPreallocated(mat,1);
6180:   if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;

6182: #if defined(PETSC_USE_DEBUG)
6183:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
6184: #endif
6185:   {
6186:     PetscInt  i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
6187:     PetscInt  cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
6188:     PetscBool roworiented = aij->roworiented;

6190:     /* Some Variables required in the macro */
6191:     Mat        A                 = aij->A;
6192:     Mat_SeqAIJ *a                = (Mat_SeqAIJ*)A->data;
6193:     PetscInt   *aimax            = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
6194:     MatScalar  *aa               = a->a;
6195:     PetscBool  ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
6196:     Mat        B                 = aij->B;
6197:     Mat_SeqAIJ *b                = (Mat_SeqAIJ*)B->data;
6198:     PetscInt   *bimax            = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
6199:     MatScalar  *ba               = b->a;

6201:     PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
6202:     PetscInt  nonew = a->nonew;
6203:     MatScalar *ap1,*ap2;

6206:     for (i=0; i<m; i++) {
6207:       if (im[i] < 0) continue;
6208: #if defined(PETSC_USE_DEBUG)
6209:       if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
6210: #endif
6211:       if (im[i] >= rstart && im[i] < rend) {
6212:         row      = im[i] - rstart;
6213:         lastcol1 = -1;
6214:         rp1      = aj + ai[row];
6215:         ap1      = aa + ai[row];
6216:         rmax1    = aimax[row];
6217:         nrow1    = ailen[row];
6218:         low1     = 0;
6219:         high1    = nrow1;
6220:         lastcol2 = -1;
6221:         rp2      = bj + bi[row];
6222:         ap2      = ba + bi[row];
6223:         rmax2    = bimax[row];
6224:         nrow2    = bilen[row];
6225:         low2     = 0;
6226:         high2    = nrow2;

6228:         for (j=0; j<n; j++) {
6229:           if (roworiented) value = v[i*n+j];
6230:           else value = v[i+j*m];
6231:           if (in[j] >= cstart && in[j] < cend) {
6232:             col = in[j] - cstart;
6233:             if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
6234:             MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
6235:           } else if (in[j] < 0) continue;
6236: #if defined(PETSC_USE_DEBUG)
6237:           /* extra brace on SETERRQ2() is required for --with-errorchecking=0 - due to the next 'else' clause */
6238:           else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
6239: #endif
6240:           else {
6241:             if (mat->was_assembled) {
6242:               if (!aij->colmap) {
6243:                 MatCreateColmap_MPIAIJ_Private(mat);
6244:               }
6245: #if defined(PETSC_USE_CTABLE)
6246:               PetscTableFind(aij->colmap,in[j]+1,&col);
6247:               col--;
6248: #else
6249:               col = aij->colmap[in[j]] - 1;
6250: #endif
6251:               if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
6252:               if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
6253:                 MatDisAssemble_MPIAIJ(mat);
6254:                 col  =  in[j];
6255:                 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
6256:                 B     = aij->B;
6257:                 b     = (Mat_SeqAIJ*)B->data;
6258:                 bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
6259:                 rp2   = bj + bi[row];
6260:                 ap2   = ba + bi[row];
6261:                 rmax2 = bimax[row];
6262:                 nrow2 = bilen[row];
6263:                 low2  = 0;
6264:                 high2 = nrow2;
6265:                 bm    = aij->B->rmap->n;
6266:                 ba    = b->a;
6267:               }
6268:             } else col = in[j];
6269:             MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
6270:           }
6271:         }
6272:       } else if (!aij->donotstash) {
6273:         if (roworiented) {
6274:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6275:         } else {
6276:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6277:         }
6278:       }
6279:     }
6280:   }
6281:   PetscFunctionReturnVoid();
6282: }