Actual source code: matrix.c

petsc-master 2018-02-17
Report Typos and Errors

  2: /*
  3:    This is where the abstract matrix operations are defined
  4: */

  6:  #include <petsc/private/matimpl.h>
  7:  #include <petsc/private/isimpl.h>
  8:  #include <petsc/private/vecimpl.h>

 10: /* Logging support */
 11: PetscClassId MAT_CLASSID;
 12: PetscClassId MAT_COLORING_CLASSID;
 13: PetscClassId MAT_FDCOLORING_CLASSID;
 14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;

 16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
 17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve;
 18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
 19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
 20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
 21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_CreateSubMats, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
 22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
 23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_CreateSubMat;
 24: PetscLogEvent MAT_TransposeColoringCreate;
 25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
 26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
 27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
 28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
 29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
 30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
 31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
 32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
 33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
 34: PetscLogEvent MAT_GetMultiProcBlock;
 35: PetscLogEvent MAT_CUSPCopyToGPU, MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch, MAT_SetValuesBatchI, MAT_SetValuesBatchII, MAT_SetValuesBatchIII, MAT_SetValuesBatchIV;
 36: PetscLogEvent MAT_ViennaCLCopyToGPU;
 37: PetscLogEvent MAT_Merge,MAT_Residual,MAT_SetRandom;
 38: PetscLogEvent MATCOLORING_Apply,MATCOLORING_Comm,MATCOLORING_Local,MATCOLORING_ISCreate,MATCOLORING_SetUp,MATCOLORING_Weights;

 40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};

 42: /*@
 43:    MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations

 45:    Logically Collective on Mat

 47:    Input Parameters:
 48: +  x  - the matrix
 49: -  rctx - the random number context, formed by PetscRandomCreate(), or NULL and
 50:           it will create one internally.

 52:    Output Parameter:
 53: .  x  - the matrix

 55:    Example of Usage:
 56: .vb
 57:      PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
 58:      MatSetRandom(x,rctx);
 59:      PetscRandomDestroy(rctx);
 60: .ve

 62:    Level: intermediate

 64:    Concepts: matrix^setting to random
 65:    Concepts: random^matrix

 67: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
 68: @*/
 69: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
 70: {
 72:   PetscRandom    randObj = NULL;


 79:   if (!rctx) {
 80:     MPI_Comm comm;
 81:     PetscObjectGetComm((PetscObject)x,&comm);
 82:     PetscRandomCreate(comm,&randObj);
 83:     PetscRandomSetFromOptions(randObj);
 84:     rctx = randObj;
 85:   }

 87:   PetscLogEventBegin(MAT_SetRandom,x,rctx,0,0);
 88:   (*x->ops->setrandom)(x,rctx);
 89:   PetscLogEventEnd(MAT_SetRandom,x,rctx,0,0);

 91:   x->assembled = PETSC_TRUE;
 92:   PetscRandomDestroy(&randObj);
 93:   return(0);
 94: }

 96: /*@
 97:    MatFactorGetErrorZeroPivot - returns the pivot value that was determined to be zero and the row it occurred in

 99:    Logically Collective on Mat

101:    Input Parameters:
102: .  mat - the factored matrix

104:    Output Parameter:
105: +  pivot - the pivot value computed
106: -  row - the row that the zero pivot occurred. Note that this row must be interpreted carefully due to row reorderings and which processes
107:          the share the matrix

109:    Level: advanced

111:    Notes: This routine does not work for factorizations done with external packages.
112:    This routine should only be called if MatGetFactorError() returns a value of MAT_FACTOR_NUMERIC_ZEROPIVOT

114:    This can be called on non-factored matrices that come from, for example, matrices used in SOR.

116: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
117: @*/
118: PetscErrorCode MatFactorGetErrorZeroPivot(Mat mat,PetscReal *pivot,PetscInt *row)
119: {
122:   *pivot = mat->factorerror_zeropivot_value;
123:   *row   = mat->factorerror_zeropivot_row;
124:   return(0);
125: }

127: /*@
128:    MatFactorGetError - gets the error code from a factorization

130:    Logically Collective on Mat

132:    Input Parameters:
133: .  mat - the factored matrix

135:    Output Parameter:
136: .  err  - the error code

138:    Level: advanced

140:    Notes:    This can be called on non-factored matrices that come from, for example, matrices used in SOR.

142: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
143: @*/
144: PetscErrorCode MatFactorGetError(Mat mat,MatFactorError *err)
145: {
148:   *err = mat->factorerrortype;
149:   return(0);
150: }

152: /*@
153:    MatFactorClearError - clears the error code in a factorization

155:    Logically Collective on Mat

157:    Input Parameter:
158: .  mat - the factored matrix

160:    Level: developer

162:    Notes: This can be called on non-factored matrices that come from, for example, matrices used in SOR.

164: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorGetError(), MatFactorGetErrorZeroPivot()
165: @*/
166: PetscErrorCode MatFactorClearError(Mat mat)
167: {
170:   mat->factorerrortype             = MAT_FACTOR_NOERROR;
171:   mat->factorerror_zeropivot_value = 0.0;
172:   mat->factorerror_zeropivot_row   = 0;
173:   return(0);
174: }

176: static PetscErrorCode MatFindNonzeroRows_Basic(Mat mat,IS *keptrows)
177: {
178:   PetscErrorCode    ierr;
179:   Vec               r,l;
180:   const PetscScalar *al;
181:   PetscInt          i,nz,gnz,N,n;

184:   MatGetSize(mat,&N,NULL);
185:   MatGetLocalSize(mat,&n,NULL);
186:   MatCreateVecs(mat,&r,&l);
187:   VecSet(l,0.0);
188:   VecSetRandom(r,NULL);
189:   MatMult(mat,r,l);
190:   VecGetArrayRead(l,&al);
191:   for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nz++;
192:   MPIU_Allreduce(&nz,&gnz,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)mat));
193:   if (gnz != N) {
194:     PetscInt *nzr;
195:     PetscMalloc1(nz,&nzr);
196:     if (nz) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nzr[nz++] = i; }
197:     ISCreateGeneral(PetscObjectComm((PetscObject)mat),nz,nzr,PETSC_OWN_POINTER,keptrows);
198:   } else *keptrows = NULL;
199:   VecRestoreArrayRead(l,&al);
200:   VecDestroy(&l);
201:   VecDestroy(&r);
202:   return(0);
203: }

205: /*@
206:       MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix

208:   Input Parameter:
209: .    A  - the matrix

211:   Output Parameter:
212: .    keptrows - the rows that are not completely zero

214:   Notes: keptrows is set to NULL if all rows are nonzero.

216:   Level: intermediate

218:  @*/
219: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
220: {

227:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
228:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
229:   if (!mat->ops->findnonzerorows) {
230:     MatFindNonzeroRows_Basic(mat,keptrows);
231:   } else {
232:     (*mat->ops->findnonzerorows)(mat,keptrows);
233:   }
234:   return(0);
235: }

237: /*@
238:       MatFindZeroRows - Locate all rows that are completely zero in the matrix

240:   Input Parameter:
241: .    A  - the matrix

243:   Output Parameter:
244: .    zerorows - the rows that are completely zero

246:   Notes: zerorows is set to NULL if no rows are zero.

248:   Level: intermediate

250:  @*/
251: PetscErrorCode MatFindZeroRows(Mat mat,IS *zerorows)
252: {
254:   IS keptrows;
255:   PetscInt m, n;


260:   MatFindNonzeroRows(mat, &keptrows);
261:   /* MatFindNonzeroRows sets keptrows to NULL if there are no zero rows.
262:      In keeping with this convention, we set zerorows to NULL if there are no zero
263:      rows. */
264:   if (keptrows == NULL) {
265:     *zerorows = NULL;
266:   } else {
267:     MatGetOwnershipRange(mat,&m,&n);
268:     ISComplement(keptrows,m,n,zerorows);
269:     ISDestroy(&keptrows);
270:   }
271:   return(0);
272: }

274: /*@
275:    MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling

277:    Not Collective

279:    Input Parameters:
280: .   A - the matrix

282:    Output Parameters:
283: .   a - the diagonal part (which is a SEQUENTIAL matrix)

285:    Notes: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
286:           Use caution, as the reference count on the returned matrix is not incremented and it is used as
287:           part of the containing MPI Mat's normal operation.

289:    Level: advanced

291: @*/
292: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
293: {

300:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
301:   if (!A->ops->getdiagonalblock) {
302:     PetscMPIInt size;
303:     MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
304:     if (size == 1) {
305:       *a = A;
306:       return(0);
307:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not coded for this matrix type");
308:   }
309:   (*A->ops->getdiagonalblock)(A,a);
310:   return(0);
311: }

313: /*@
314:    MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.

316:    Collective on Mat

318:    Input Parameters:
319: .  mat - the matrix

321:    Output Parameter:
322: .   trace - the sum of the diagonal entries

324:    Level: advanced

326: @*/
327: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
328: {
330:   Vec            diag;

333:   MatCreateVecs(mat,&diag,NULL);
334:   MatGetDiagonal(mat,diag);
335:   VecSum(diag,trace);
336:   VecDestroy(&diag);
337:   return(0);
338: }

340: /*@
341:    MatRealPart - Zeros out the imaginary part of the matrix

343:    Logically Collective on Mat

345:    Input Parameters:
346: .  mat - the matrix

348:    Level: advanced


351: .seealso: MatImaginaryPart()
352: @*/
353: PetscErrorCode MatRealPart(Mat mat)
354: {

360:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
361:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
362:   if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
363:   MatCheckPreallocated(mat,1);
364:   (*mat->ops->realpart)(mat);
365: #if defined(PETSC_HAVE_CUSP)
366:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
367:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
368:   }
369: #elif defined(PETSC_HAVE_VIENNACL)
370:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
371:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
372:   }
373: #elif defined(PETSC_HAVE_VECCUDA)
374:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
375:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
376:   }
377: #endif
378:   return(0);
379: }

381: /*@C
382:    MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix

384:    Collective on Mat

386:    Input Parameter:
387: .  mat - the matrix

389:    Output Parameters:
390: +   nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
391: -   ghosts - the global indices of the ghost points

393:    Notes: the nghosts and ghosts are suitable to pass into VecCreateGhost()

395:    Level: advanced

397: @*/
398: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
399: {

405:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
406:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
407:   if (!mat->ops->getghosts) {
408:     if (nghosts) *nghosts = 0;
409:     if (ghosts) *ghosts = 0;
410:   } else {
411:     (*mat->ops->getghosts)(mat,nghosts,ghosts);
412:   }
413:   return(0);
414: }


417: /*@
418:    MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part

420:    Logically Collective on Mat

422:    Input Parameters:
423: .  mat - the matrix

425:    Level: advanced


428: .seealso: MatRealPart()
429: @*/
430: PetscErrorCode MatImaginaryPart(Mat mat)
431: {

437:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
438:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
439:   if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
440:   MatCheckPreallocated(mat,1);
441:   (*mat->ops->imaginarypart)(mat);
442: #if defined(PETSC_HAVE_CUSP)
443:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
444:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
445:   }
446: #elif defined(PETSC_HAVE_VIENNACL)
447:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
448:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
449:   }
450: #elif defined(PETSC_HAVE_VECCUDA)
451:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
452:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
453:   }
454: #endif
455:   return(0);
456: }

458: /*@
459:    MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)

461:    Not Collective

463:    Input Parameter:
464: .  mat - the matrix

466:    Output Parameters:
467: +  missing - is any diagonal missing
468: -  dd - first diagonal entry that is missing (optional) on this process

470:    Level: advanced


473: .seealso: MatRealPart()
474: @*/
475: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
476: {

482:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
483:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
484:   if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
485:   (*mat->ops->missingdiagonal)(mat,missing,dd);
486:   return(0);
487: }

489: /*@C
490:    MatGetRow - Gets a row of a matrix.  You MUST call MatRestoreRow()
491:    for each row that you get to ensure that your application does
492:    not bleed memory.

494:    Not Collective

496:    Input Parameters:
497: +  mat - the matrix
498: -  row - the row to get

500:    Output Parameters:
501: +  ncols -  if not NULL, the number of nonzeros in the row
502: .  cols - if not NULL, the column numbers
503: -  vals - if not NULL, the values

505:    Notes:
506:    This routine is provided for people who need to have direct access
507:    to the structure of a matrix.  We hope that we provide enough
508:    high-level matrix routines that few users will need it.

510:    MatGetRow() always returns 0-based column indices, regardless of
511:    whether the internal representation is 0-based (default) or 1-based.

513:    For better efficiency, set cols and/or vals to NULL if you do
514:    not wish to extract these quantities.

516:    The user can only examine the values extracted with MatGetRow();
517:    the values cannot be altered.  To change the matrix entries, one
518:    must use MatSetValues().

520:    You can only have one call to MatGetRow() outstanding for a particular
521:    matrix at a time, per processor. MatGetRow() can only obtain rows
522:    associated with the given processor, it cannot get rows from the
523:    other processors; for that we suggest using MatCreateSubMatrices(), then
524:    MatGetRow() on the submatrix. The row index passed to MatGetRows()
525:    is in the global number of rows.

527:    Fortran Notes:
528:    The calling sequence from Fortran is
529: .vb
530:    MatGetRow(matrix,row,ncols,cols,values,ierr)
531:          Mat     matrix (input)
532:          integer row    (input)
533:          integer ncols  (output)
534:          integer cols(maxcols) (output)
535:          double precision (or double complex) values(maxcols) output
536: .ve
537:    where maxcols >= maximum nonzeros in any row of the matrix.


540:    Caution:
541:    Do not try to change the contents of the output arrays (cols and vals).
542:    In some cases, this may corrupt the matrix.

544:    Level: advanced

546:    Concepts: matrices^row access

548: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatCreateSubMatrices(), MatGetDiagonal()
549: @*/
550: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
551: {
553:   PetscInt       incols;

558:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
559:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
560:   if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
561:   MatCheckPreallocated(mat,1);
562:   PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
563:   (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
564:   if (ncols) *ncols = incols;
565:   PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
566:   return(0);
567: }

569: /*@
570:    MatConjugate - replaces the matrix values with their complex conjugates

572:    Logically Collective on Mat

574:    Input Parameters:
575: .  mat - the matrix

577:    Level: advanced

579: .seealso:  VecConjugate()
580: @*/
581: PetscErrorCode MatConjugate(Mat mat)
582: {
583: #if defined(PETSC_USE_COMPLEX)

588:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
589:   if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
590:   (*mat->ops->conjugate)(mat);
591: #if defined(PETSC_HAVE_CUSP)
592:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
593:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
594:   }
595: #elif defined(PETSC_HAVE_VIENNACL)
596:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
597:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
598:   }
599: #elif defined(PETSC_HAVE_VECCUDA)
600:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
601:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
602:   }
603: #endif
604:   return(0);
605: #else
606:   return 0;
607: #endif
608: }

610: /*@C
611:    MatRestoreRow - Frees any temporary space allocated by MatGetRow().

613:    Not Collective

615:    Input Parameters:
616: +  mat - the matrix
617: .  row - the row to get
618: .  ncols, cols - the number of nonzeros and their columns
619: -  vals - if nonzero the column values

621:    Notes:
622:    This routine should be called after you have finished examining the entries.

624:    This routine zeros out ncols, cols, and vals. This is to prevent accidental
625:    us of the array after it has been restored. If you pass NULL, it will
626:    not zero the pointers.  Use of cols or vals after MatRestoreRow is invalid.

628:    Fortran Notes:
629:    The calling sequence from Fortran is
630: .vb
631:    MatRestoreRow(matrix,row,ncols,cols,values,ierr)
632:       Mat     matrix (input)
633:       integer row    (input)
634:       integer ncols  (output)
635:       integer cols(maxcols) (output)
636:       double precision (or double complex) values(maxcols) output
637: .ve
638:    Where maxcols >= maximum nonzeros in any row of the matrix.

640:    In Fortran MatRestoreRow() MUST be called after MatGetRow()
641:    before another call to MatGetRow() can be made.

643:    Level: advanced

645: .seealso:  MatGetRow()
646: @*/
647: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
648: {

654:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
655:   if (!mat->ops->restorerow) return(0);
656:   (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
657:   if (ncols) *ncols = 0;
658:   if (cols)  *cols = NULL;
659:   if (vals)  *vals = NULL;
660:   return(0);
661: }

663: /*@
664:    MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
665:    You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.

667:    Not Collective

669:    Input Parameters:
670: +  mat - the matrix

672:    Notes:
673:    The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.

675:    Level: advanced

677:    Concepts: matrices^row access

679: .seealso: MatRestoreRowRowUpperTriangular()
680: @*/
681: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
682: {

688:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
689:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
690:   if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
691:   MatCheckPreallocated(mat,1);
692:   (*mat->ops->getrowuppertriangular)(mat);
693:   return(0);
694: }

696: /*@
697:    MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.

699:    Not Collective

701:    Input Parameters:
702: +  mat - the matrix

704:    Notes:
705:    This routine should be called after you have finished MatGetRow/MatRestoreRow().


708:    Level: advanced

710: .seealso:  MatGetRowUpperTriangular()
711: @*/
712: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
713: {

718:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
719:   if (!mat->ops->restorerowuppertriangular) return(0);
720:   (*mat->ops->restorerowuppertriangular)(mat);
721:   return(0);
722: }

724: /*@C
725:    MatSetOptionsPrefix - Sets the prefix used for searching for all
726:    Mat options in the database.

728:    Logically Collective on Mat

730:    Input Parameter:
731: +  A - the Mat context
732: -  prefix - the prefix to prepend to all option names

734:    Notes:
735:    A hyphen (-) must NOT be given at the beginning of the prefix name.
736:    The first character of all runtime options is AUTOMATICALLY the hyphen.

738:    Level: advanced

740: .keywords: Mat, set, options, prefix, database

742: .seealso: MatSetFromOptions()
743: @*/
744: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
745: {

750:   PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
751:   return(0);
752: }

754: /*@C
755:    MatAppendOptionsPrefix - Appends to the prefix used for searching for all
756:    Mat options in the database.

758:    Logically Collective on Mat

760:    Input Parameters:
761: +  A - the Mat context
762: -  prefix - the prefix to prepend to all option names

764:    Notes:
765:    A hyphen (-) must NOT be given at the beginning of the prefix name.
766:    The first character of all runtime options is AUTOMATICALLY the hyphen.

768:    Level: advanced

770: .keywords: Mat, append, options, prefix, database

772: .seealso: MatGetOptionsPrefix()
773: @*/
774: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
775: {

780:   PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
781:   return(0);
782: }

784: /*@C
785:    MatGetOptionsPrefix - Sets the prefix used for searching for all
786:    Mat options in the database.

788:    Not Collective

790:    Input Parameter:
791: .  A - the Mat context

793:    Output Parameter:
794: .  prefix - pointer to the prefix string used

796:    Notes: On the fortran side, the user should pass in a string 'prefix' of
797:    sufficient length to hold the prefix.

799:    Level: advanced

801: .keywords: Mat, get, options, prefix, database

803: .seealso: MatAppendOptionsPrefix()
804: @*/
805: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
806: {

811:   PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
812:   return(0);
813: }

815: /*@
816:    MatResetPreallocation - Reset mat to use the original nonzero pattern provided by users.

818:    Collective on Mat

820:    Input Parameters:
821: .  A - the Mat context

823:    Notes:
824:    The allocated memory will be shrunk after calling MatAssembly with MAT_FINAL_ASSEMBLY. Users can reset the preallocation to access the original memory.
825:    Currently support MPIAIJ and SEQAIJ.

827:    Level: beginner

829: .keywords: Mat, ResetPreallocation

831: .seealso: MatSeqAIJSetPreallocation(), MatMPIAIJSetPreallocation(), MatXAIJSetPreallocation()
832: @*/
833: PetscErrorCode MatResetPreallocation(Mat A)
834: {

840:   PetscUseMethod(A,"MatResetPreallocation_C",(Mat),(A));
841:   return(0);
842: }


845: /*@
846:    MatSetUp - Sets up the internal matrix data structures for the later use.

848:    Collective on Mat

850:    Input Parameters:
851: .  A - the Mat context

853:    Notes:
854:    If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.

856:    If a suitable preallocation routine is used, this function does not need to be called.

858:    See the Performance chapter of the PETSc users manual for how to preallocate matrices

860:    Level: beginner

862: .keywords: Mat, setup

864: .seealso: MatCreate(), MatDestroy()
865: @*/
866: PetscErrorCode MatSetUp(Mat A)
867: {
868:   PetscMPIInt    size;

873:   if (!((PetscObject)A)->type_name) {
874:     MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
875:     if (size == 1) {
876:       MatSetType(A, MATSEQAIJ);
877:     } else {
878:       MatSetType(A, MATMPIAIJ);
879:     }
880:   }
881:   if (!A->preallocated && A->ops->setup) {
882:     PetscInfo(A,"Warning not preallocating matrix storage\n");
883:     (*A->ops->setup)(A);
884:   }
885:   PetscLayoutSetUp(A->rmap);
886:   PetscLayoutSetUp(A->cmap);
887:   A->preallocated = PETSC_TRUE;
888:   return(0);
889: }

891: #if defined(PETSC_HAVE_SAWS)
892:  #include <petscviewersaws.h>
893: #endif
894: /*@C
895:    MatView - Visualizes a matrix object.

897:    Collective on Mat

899:    Input Parameters:
900: +  mat - the matrix
901: -  viewer - visualization context

903:   Notes:
904:   The available visualization contexts include
905: +    PETSC_VIEWER_STDOUT_SELF - for sequential matrices
906: .    PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
907: .    PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
908: -     PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure

910:    The user can open alternative visualization contexts with
911: +    PetscViewerASCIIOpen() - Outputs matrix to a specified file
912: .    PetscViewerBinaryOpen() - Outputs matrix in binary to a
913:          specified file; corresponding input uses MatLoad()
914: .    PetscViewerDrawOpen() - Outputs nonzero matrix structure to
915:          an X window display
916: -    PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
917:          Currently only the sequential dense and AIJ
918:          matrix types support the Socket viewer.

920:    The user can call PetscViewerPushFormat() to specify the output
921:    format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
922:    PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen).  Available formats include
923: +    PETSC_VIEWER_DEFAULT - default, prints matrix contents
924: .    PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
925: .    PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
926: .    PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
927:          format common among all matrix types
928: .    PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
929:          format (which is in many cases the same as the default)
930: .    PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
931:          size and structure (not the matrix entries)
932: .    PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
933:          the matrix structure

935:    Options Database Keys:
936: +  -mat_view ::ascii_info - Prints info on matrix at conclusion of MatAssemblyEnd()
937: .  -mat_view ::ascii_info_detail - Prints more detailed info
938: .  -mat_view - Prints matrix in ASCII format
939: .  -mat_view ::ascii_matlab - Prints matrix in Matlab format
940: .  -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
941: .  -display <name> - Sets display name (default is host)
942: .  -draw_pause <sec> - Sets number of seconds to pause after display
943: .  -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 12 Using MATLAB with PETSc for details)
944: .  -viewer_socket_machine <machine> -
945: .  -viewer_socket_port <port> -
946: .  -mat_view binary - save matrix to file in binary format
947: -  -viewer_binary_filename <name> -
948:    Level: beginner

950:    Notes: see the manual page for MatLoad() for the exact format of the binary file when the binary
951:       viewer is used.

953:       See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
954:       viewer is used.

956:       One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
957:       And then use the following mouse functions:
958:           left mouse: zoom in
959:           middle mouse: zoom out
960:           right mouse: continue with the simulation

962:    Concepts: matrices^viewing
963:    Concepts: matrices^plotting
964:    Concepts: matrices^printing

966: .seealso: PetscViewerPushFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
967:           PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
968: @*/
969: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
970: {
971:   PetscErrorCode    ierr;
972:   PetscInt          rows,cols,rbs,cbs;
973:   PetscBool         iascii,ibinary;
974:   PetscViewerFormat format;
975:   PetscMPIInt       size;
976: #if defined(PETSC_HAVE_SAWS)
977:   PetscBool         issaws;
978: #endif

983:   if (!viewer) {
984:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
985:   }
988:   MatCheckPreallocated(mat,1);
989:   PetscViewerGetFormat(viewer,&format);
990:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
991:   if (size == 1 && format == PETSC_VIEWER_LOAD_BALANCE) return(0);
992:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&ibinary);
993:   if (ibinary) {
994:     PetscBool mpiio;
995:     PetscViewerBinaryGetUseMPIIO(viewer,&mpiio);
996:     if (mpiio) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"PETSc matrix viewers do not support using MPI-IO, turn off that flag");
997:   }

999:   PetscLogEventBegin(MAT_View,mat,viewer,0,0);
1000:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1001:   if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
1002:     SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
1003:   }

1005: #if defined(PETSC_HAVE_SAWS)
1006:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
1007: #endif
1008:   if (iascii) {
1009:     if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
1010:     PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
1011:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1012:       PetscViewerASCIIPushTab(viewer);
1013:       MatGetSize(mat,&rows,&cols);
1014:       MatGetBlockSizes(mat,&rbs,&cbs);
1015:       if (rbs != 1 || cbs != 1) {
1016:         if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
1017:         else            {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
1018:       } else {
1019:         PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
1020:       }
1021:       if (mat->factortype) {
1022:         MatSolverType solver;
1023:         MatFactorGetSolverType(mat,&solver);
1024:         PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
1025:       }
1026:       if (mat->ops->getinfo) {
1027:         MatInfo info;
1028:         MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
1029:         PetscViewerASCIIPrintf(viewer,"total: nonzeros=%.f, allocated nonzeros=%.f\n",info.nz_used,info.nz_allocated);
1030:         PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
1031:       }
1032:       if (mat->nullsp) {PetscViewerASCIIPrintf(viewer,"  has attached null space\n");}
1033:       if (mat->nearnullsp) {PetscViewerASCIIPrintf(viewer,"  has attached near null space\n");}
1034:     }
1035: #if defined(PETSC_HAVE_SAWS)
1036:   } else if (issaws) {
1037:     PetscMPIInt rank;

1039:     PetscObjectName((PetscObject)mat);
1040:     MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
1041:     if (!((PetscObject)mat)->amsmem && !rank) {
1042:       PetscObjectViewSAWs((PetscObject)mat,viewer);
1043:     }
1044: #endif
1045:   }
1046:   if ((format == PETSC_VIEWER_NATIVE || format == PETSC_VIEWER_LOAD_BALANCE) && mat->ops->viewnative) {
1047:     PetscViewerASCIIPushTab(viewer);
1048:     (*mat->ops->viewnative)(mat,viewer);
1049:     PetscViewerASCIIPopTab(viewer);
1050:   } else if (mat->ops->view) {
1051:     PetscViewerASCIIPushTab(viewer);
1052:     (*mat->ops->view)(mat,viewer);
1053:     PetscViewerASCIIPopTab(viewer);
1054:   }
1055:   if (iascii) {
1056:     if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
1057:     PetscViewerGetFormat(viewer,&format);
1058:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1059:       PetscViewerASCIIPopTab(viewer);
1060:     }
1061:   }
1062:   PetscLogEventEnd(MAT_View,mat,viewer,0,0);
1063:   return(0);
1064: }

1066: #if defined(PETSC_USE_DEBUG)
1067: #include <../src/sys/totalview/tv_data_display.h>
1068: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
1069: {
1070:   TV_add_row("Local rows", "int", &mat->rmap->n);
1071:   TV_add_row("Local columns", "int", &mat->cmap->n);
1072:   TV_add_row("Global rows", "int", &mat->rmap->N);
1073:   TV_add_row("Global columns", "int", &mat->cmap->N);
1074:   TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
1075:   return TV_format_OK;
1076: }
1077: #endif

1079: /*@C
1080:    MatLoad - Loads a matrix that has been stored in binary format
1081:    with MatView().  The matrix format is determined from the options database.
1082:    Generates a parallel MPI matrix if the communicator has more than one
1083:    processor.  The default matrix type is AIJ.

1085:    Collective on PetscViewer

1087:    Input Parameters:
1088: +  newmat - the newly loaded matrix, this needs to have been created with MatCreate()
1089:             or some related function before a call to MatLoad()
1090: -  viewer - binary file viewer, created with PetscViewerBinaryOpen()

1092:    Options Database Keys:
1093:    Used with block matrix formats (MATSEQBAIJ,  ...) to specify
1094:    block size
1095: .    -matload_block_size <bs>

1097:    Level: beginner

1099:    Notes:
1100:    If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
1101:    Mat before calling this routine if you wish to set it from the options database.

1103:    MatLoad() automatically loads into the options database any options
1104:    given in the file filename.info where filename is the name of the file
1105:    that was passed to the PetscViewerBinaryOpen(). The options in the info
1106:    file will be ignored if you use the -viewer_binary_skip_info option.

1108:    If the type or size of newmat is not set before a call to MatLoad, PETSc
1109:    sets the default matrix type AIJ and sets the local and global sizes.
1110:    If type and/or size is already set, then the same are used.

1112:    In parallel, each processor can load a subset of rows (or the
1113:    entire matrix).  This routine is especially useful when a large
1114:    matrix is stored on disk and only part of it is desired on each
1115:    processor.  For example, a parallel solver may access only some of
1116:    the rows from each processor.  The algorithm used here reads
1117:    relatively small blocks of data rather than reading the entire
1118:    matrix and then subsetting it.

1120:    Notes for advanced users:
1121:    Most users should not need to know the details of the binary storage
1122:    format, since MatLoad() and MatView() completely hide these details.
1123:    But for anyone who's interested, the standard binary matrix storage
1124:    format is

1126: $    int    MAT_FILE_CLASSID
1127: $    int    number of rows
1128: $    int    number of columns
1129: $    int    total number of nonzeros
1130: $    int    *number nonzeros in each row
1131: $    int    *column indices of all nonzeros (starting index is zero)
1132: $    PetscScalar *values of all nonzeros

1134:    PETSc automatically does the byte swapping for
1135: machines that store the bytes reversed, e.g.  DEC alpha, freebsd,
1136: linux, Windows and the paragon; thus if you write your own binary
1137: read/write routines you have to swap the bytes; see PetscBinaryRead()
1138: and PetscBinaryWrite() to see how this may be done.

1140: .keywords: matrix, load, binary, input

1142: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()

1144:  @*/
1145: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
1146: {
1148:   PetscBool      isbinary,flg;

1153:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1154:   if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");

1156:   if (!((PetscObject)newmat)->type_name) {
1157:     MatSetType(newmat,MATAIJ);
1158:   }

1160:   if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
1161:   PetscLogEventBegin(MAT_Load,viewer,0,0,0);
1162:   (*newmat->ops->load)(newmat,viewer);
1163:   PetscLogEventEnd(MAT_Load,viewer,0,0,0);

1165:   flg  = PETSC_FALSE;
1166:   PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1167:   if (flg) {
1168:     MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1169:     MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1170:   }
1171:   flg  = PETSC_FALSE;
1172:   PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1173:   if (flg) {
1174:     MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1175:   }
1176:   return(0);
1177: }

1179: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1180: {
1182:   Mat_Redundant  *redund = *redundant;
1183:   PetscInt       i;

1186:   if (redund){
1187:     if (redund->matseq) { /* via MatCreateSubMatrices()  */
1188:       ISDestroy(&redund->isrow);
1189:       ISDestroy(&redund->iscol);
1190:       MatDestroySubMatrices(1,&redund->matseq);
1191:     } else {
1192:       PetscFree2(redund->send_rank,redund->recv_rank);
1193:       PetscFree(redund->sbuf_j);
1194:       PetscFree(redund->sbuf_a);
1195:       for (i=0; i<redund->nrecvs; i++) {
1196:         PetscFree(redund->rbuf_j[i]);
1197:         PetscFree(redund->rbuf_a[i]);
1198:       }
1199:       PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1200:     }

1202:     if (redund->subcomm) {
1203:       PetscCommDestroy(&redund->subcomm);
1204:     }
1205:     PetscFree(redund);
1206:   }
1207:   return(0);
1208: }

1210: /*@
1211:    MatDestroy - Frees space taken by a matrix.

1213:    Collective on Mat

1215:    Input Parameter:
1216: .  A - the matrix

1218:    Level: beginner

1220: @*/
1221: PetscErrorCode MatDestroy(Mat *A)
1222: {

1226:   if (!*A) return(0);
1228:   if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}

1230:   /* if memory was published with SAWs then destroy it */
1231:   PetscObjectSAWsViewOff((PetscObject)*A);
1232:   if ((*A)->ops->destroy) {
1233:     (*(*A)->ops->destroy)(*A);
1234:   }

1236:   PetscFree((*A)->solvertype);
1237:   MatDestroy_Redundant(&(*A)->redundant);
1238:   MatNullSpaceDestroy(&(*A)->nullsp);
1239:   MatNullSpaceDestroy(&(*A)->transnullsp);
1240:   MatNullSpaceDestroy(&(*A)->nearnullsp);
1241:   MatDestroy(&(*A)->schur);
1242:   PetscLayoutDestroy(&(*A)->rmap);
1243:   PetscLayoutDestroy(&(*A)->cmap);
1244:   PetscHeaderDestroy(A);
1245:   return(0);
1246: }

1248: /*@C
1249:    MatSetValues - Inserts or adds a block of values into a matrix.
1250:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1251:    MUST be called after all calls to MatSetValues() have been completed.

1253:    Not Collective

1255:    Input Parameters:
1256: +  mat - the matrix
1257: .  v - a logically two-dimensional array of values
1258: .  m, idxm - the number of rows and their global indices
1259: .  n, idxn - the number of columns and their global indices
1260: -  addv - either ADD_VALUES or INSERT_VALUES, where
1261:    ADD_VALUES adds values to any existing entries, and
1262:    INSERT_VALUES replaces existing entries with new values

1264:    Notes:
1265:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1266:       MatSetUp() before using this routine

1268:    By default the values, v, are row-oriented. See MatSetOption() for other options.

1270:    Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1271:    options cannot be mixed without intervening calls to the assembly
1272:    routines.

1274:    MatSetValues() uses 0-based row and column numbers in Fortran
1275:    as well as in C.

1277:    Negative indices may be passed in idxm and idxn, these rows and columns are
1278:    simply ignored. This allows easily inserting element stiffness matrices
1279:    with homogeneous Dirchlet boundary conditions that you don't want represented
1280:    in the matrix.

1282:    Efficiency Alert:
1283:    The routine MatSetValuesBlocked() may offer much better efficiency
1284:    for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).

1286:    Level: beginner

1288:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
1289:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

1291:    Concepts: matrices^putting entries in

1293: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1294:           InsertMode, INSERT_VALUES, ADD_VALUES
1295: @*/
1296: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1297: {
1299: #if defined(PETSC_USE_DEBUG)
1300:   PetscInt       i,j;
1301: #endif

1306:   if (!m || !n) return(0); /* no values to insert */
1310:   MatCheckPreallocated(mat,1);
1311:   if (mat->insertmode == NOT_SET_VALUES) {
1312:     mat->insertmode = addv;
1313:   }
1314: #if defined(PETSC_USE_DEBUG)
1315:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1316:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1317:   if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);

1319:   for (i=0; i<m; i++) {
1320:     for (j=0; j<n; j++) {
1321:       if (mat->erroriffailure && PetscIsInfOrNanScalar(v[i*n+j]))
1322: #if defined(PETSC_USE_COMPLEX)
1323:         SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1324: #else
1325:         SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1326: #endif
1327:     }
1328:   }
1329: #endif

1331:   if (mat->assembled) {
1332:     mat->was_assembled = PETSC_TRUE;
1333:     mat->assembled     = PETSC_FALSE;
1334:   }
1335:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1336:   (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1337:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1338: #if defined(PETSC_HAVE_CUSP)
1339:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1340:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1341:   }
1342: #elif defined(PETSC_HAVE_VIENNACL)
1343:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1344:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1345:   }
1346: #elif defined(PETSC_HAVE_VECCUDA)
1347:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1348:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1349:   }
1350: #endif
1351:   return(0);
1352: }


1355: /*@
1356:    MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1357:         values into a matrix

1359:    Not Collective

1361:    Input Parameters:
1362: +  mat - the matrix
1363: .  row - the (block) row to set
1364: -  v - a logically two-dimensional array of values

1366:    Notes:
1367:    By the values, v, are column-oriented (for the block version) and sorted

1369:    All the nonzeros in the row must be provided

1371:    The matrix must have previously had its column indices set

1373:    The row must belong to this process

1375:    Level: intermediate

1377:    Concepts: matrices^putting entries in

1379: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1380:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1381: @*/
1382: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1383: {
1385:   PetscInt       globalrow;

1391:   ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1392:   MatSetValuesRow(mat,globalrow,v);
1393: #if defined(PETSC_HAVE_CUSP)
1394:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1395:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1396:   }
1397: #elif defined(PETSC_HAVE_VIENNACL)
1398:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1399:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1400:   }
1401: #elif defined(PETSC_HAVE_VECCUDA)
1402:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1403:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1404:   }
1405: #endif
1406:   return(0);
1407: }

1409: /*@
1410:    MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1411:         values into a matrix

1413:    Not Collective

1415:    Input Parameters:
1416: +  mat - the matrix
1417: .  row - the (block) row to set
1418: -  v - a logically two-dimensional (column major) array of values for  block matrices with blocksize larger than one, otherwise a one dimensional array of values

1420:    Notes:
1421:    The values, v, are column-oriented for the block version.

1423:    All the nonzeros in the row must be provided

1425:    THE MATRIX MUST HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.

1427:    The row must belong to this process

1429:    Level: advanced

1431:    Concepts: matrices^putting entries in

1433: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1434:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1435: @*/
1436: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1437: {

1443:   MatCheckPreallocated(mat,1);
1445: #if defined(PETSC_USE_DEBUG)
1446:   if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1447:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1448: #endif
1449:   mat->insertmode = INSERT_VALUES;

1451:   if (mat->assembled) {
1452:     mat->was_assembled = PETSC_TRUE;
1453:     mat->assembled     = PETSC_FALSE;
1454:   }
1455:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1456:   if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1457:   (*mat->ops->setvaluesrow)(mat,row,v);
1458:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1459: #if defined(PETSC_HAVE_CUSP)
1460:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1461:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1462:   }
1463: #elif defined(PETSC_HAVE_VIENNACL)
1464:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1465:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1466:   }
1467: #elif defined(PETSC_HAVE_VECCUDA)
1468:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1469:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1470:   }
1471: #endif
1472:   return(0);
1473: }

1475: /*@
1476:    MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1477:      Using structured grid indexing

1479:    Not Collective

1481:    Input Parameters:
1482: +  mat - the matrix
1483: .  m - number of rows being entered
1484: .  idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1485: .  n - number of columns being entered
1486: .  idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1487: .  v - a logically two-dimensional array of values
1488: -  addv - either ADD_VALUES or INSERT_VALUES, where
1489:    ADD_VALUES adds values to any existing entries, and
1490:    INSERT_VALUES replaces existing entries with new values

1492:    Notes:
1493:    By default the values, v, are row-oriented.  See MatSetOption() for other options.

1495:    Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1496:    options cannot be mixed without intervening calls to the assembly
1497:    routines.

1499:    The grid coordinates are across the entire grid, not just the local portion

1501:    MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1502:    as well as in C.

1504:    For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine

1506:    In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1507:    or call MatSetLocalToGlobalMapping() and MatSetStencil() first.

1509:    The columns and rows in the stencil passed in MUST be contained within the
1510:    ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1511:    if you create a DMDA with an overlap of one grid level and on a particular process its first
1512:    local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1513:    first i index you can use in your column and row indices in MatSetStencil() is 5.

1515:    In Fortran idxm and idxn should be declared as
1516: $     MatStencil idxm(4,m),idxn(4,n)
1517:    and the values inserted using
1518: $    idxm(MatStencil_i,1) = i
1519: $    idxm(MatStencil_j,1) = j
1520: $    idxm(MatStencil_k,1) = k
1521: $    idxm(MatStencil_c,1) = c
1522:    etc

1524:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1525:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1526:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1527:    DM_BOUNDARY_PERIODIC boundary type.

1529:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1530:    a single value per point) you can skip filling those indices.

1532:    Inspired by the structured grid interface to the HYPRE package
1533:    (http://www.llnl.gov/CASC/hypre)

1535:    Efficiency Alert:
1536:    The routine MatSetValuesBlockedStencil() may offer much better efficiency
1537:    for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).

1539:    Level: beginner

1541:    Concepts: matrices^putting entries in

1543: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1544:           MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1545: @*/
1546: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1547: {
1549:   PetscInt       buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1550:   PetscInt       j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1551:   PetscInt       *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);

1554:   if (!m || !n) return(0); /* no values to insert */

1561:   if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1562:     jdxm = buf; jdxn = buf+m;
1563:   } else {
1564:     PetscMalloc2(m,&bufm,n,&bufn);
1565:     jdxm = bufm; jdxn = bufn;
1566:   }
1567:   for (i=0; i<m; i++) {
1568:     for (j=0; j<3-sdim; j++) dxm++;
1569:     tmp = *dxm++ - starts[0];
1570:     for (j=0; j<dim-1; j++) {
1571:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1572:       else                                       tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1573:     }
1574:     if (mat->stencil.noc) dxm++;
1575:     jdxm[i] = tmp;
1576:   }
1577:   for (i=0; i<n; i++) {
1578:     for (j=0; j<3-sdim; j++) dxn++;
1579:     tmp = *dxn++ - starts[0];
1580:     for (j=0; j<dim-1; j++) {
1581:       if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1582:       else                                       tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1583:     }
1584:     if (mat->stencil.noc) dxn++;
1585:     jdxn[i] = tmp;
1586:   }
1587:   MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1588:   PetscFree2(bufm,bufn);
1589:   return(0);
1590: }

1592: /*@
1593:    MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1594:      Using structured grid indexing

1596:    Not Collective

1598:    Input Parameters:
1599: +  mat - the matrix
1600: .  m - number of rows being entered
1601: .  idxm - grid coordinates for matrix rows being entered
1602: .  n - number of columns being entered
1603: .  idxn - grid coordinates for matrix columns being entered
1604: .  v - a logically two-dimensional array of values
1605: -  addv - either ADD_VALUES or INSERT_VALUES, where
1606:    ADD_VALUES adds values to any existing entries, and
1607:    INSERT_VALUES replaces existing entries with new values

1609:    Notes:
1610:    By default the values, v, are row-oriented and unsorted.
1611:    See MatSetOption() for other options.

1613:    Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1614:    options cannot be mixed without intervening calls to the assembly
1615:    routines.

1617:    The grid coordinates are across the entire grid, not just the local portion

1619:    MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1620:    as well as in C.

1622:    For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine

1624:    In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1625:    or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.

1627:    The columns and rows in the stencil passed in MUST be contained within the
1628:    ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1629:    if you create a DMDA with an overlap of one grid level and on a particular process its first
1630:    local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1631:    first i index you can use in your column and row indices in MatSetStencil() is 5.

1633:    In Fortran idxm and idxn should be declared as
1634: $     MatStencil idxm(4,m),idxn(4,n)
1635:    and the values inserted using
1636: $    idxm(MatStencil_i,1) = i
1637: $    idxm(MatStencil_j,1) = j
1638: $    idxm(MatStencil_k,1) = k
1639:    etc

1641:    Negative indices may be passed in idxm and idxn, these rows and columns are
1642:    simply ignored. This allows easily inserting element stiffness matrices
1643:    with homogeneous Dirchlet boundary conditions that you don't want represented
1644:    in the matrix.

1646:    Inspired by the structured grid interface to the HYPRE package
1647:    (http://www.llnl.gov/CASC/hypre)

1649:    Level: beginner

1651:    Concepts: matrices^putting entries in

1653: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1654:           MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1655:           MatSetBlockSize(), MatSetLocalToGlobalMapping()
1656: @*/
1657: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1658: {
1660:   PetscInt       buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1661:   PetscInt       j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1662:   PetscInt       *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);

1665:   if (!m || !n) return(0); /* no values to insert */

1672:   if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1673:     jdxm = buf; jdxn = buf+m;
1674:   } else {
1675:     PetscMalloc2(m,&bufm,n,&bufn);
1676:     jdxm = bufm; jdxn = bufn;
1677:   }
1678:   for (i=0; i<m; i++) {
1679:     for (j=0; j<3-sdim; j++) dxm++;
1680:     tmp = *dxm++ - starts[0];
1681:     for (j=0; j<sdim-1; j++) {
1682:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1683:       else                                       tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1684:     }
1685:     dxm++;
1686:     jdxm[i] = tmp;
1687:   }
1688:   for (i=0; i<n; i++) {
1689:     for (j=0; j<3-sdim; j++) dxn++;
1690:     tmp = *dxn++ - starts[0];
1691:     for (j=0; j<sdim-1; j++) {
1692:       if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1693:       else                                       tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1694:     }
1695:     dxn++;
1696:     jdxn[i] = tmp;
1697:   }
1698:   MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1699:   PetscFree2(bufm,bufn);
1700: #if defined(PETSC_HAVE_CUSP)
1701:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1702:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1703:   }
1704: #elif defined(PETSC_HAVE_VIENNACL)
1705:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1706:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1707:   }
1708: #elif defined(PETSC_HAVE_VECCUDA)
1709:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1710:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1711:   }
1712: #endif
1713:   return(0);
1714: }

1716: /*@
1717:    MatSetStencil - Sets the grid information for setting values into a matrix via
1718:         MatSetValuesStencil()

1720:    Not Collective

1722:    Input Parameters:
1723: +  mat - the matrix
1724: .  dim - dimension of the grid 1, 2, or 3
1725: .  dims - number of grid points in x, y, and z direction, including ghost points on your processor
1726: .  starts - starting point of ghost nodes on your processor in x, y, and z direction
1727: -  dof - number of degrees of freedom per node


1730:    Inspired by the structured grid interface to the HYPRE package
1731:    (www.llnl.gov/CASC/hyper)

1733:    For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1734:    user.

1736:    Level: beginner

1738:    Concepts: matrices^putting entries in

1740: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1741:           MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1742: @*/
1743: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1744: {
1745:   PetscInt i;


1752:   mat->stencil.dim = dim + (dof > 1);
1753:   for (i=0; i<dim; i++) {
1754:     mat->stencil.dims[i]   = dims[dim-i-1];      /* copy the values in backwards */
1755:     mat->stencil.starts[i] = starts[dim-i-1];
1756:   }
1757:   mat->stencil.dims[dim]   = dof;
1758:   mat->stencil.starts[dim] = 0;
1759:   mat->stencil.noc         = (PetscBool)(dof == 1);
1760:   return(0);
1761: }

1763: /*@C
1764:    MatSetValuesBlocked - Inserts or adds a block of values into a matrix.

1766:    Not Collective

1768:    Input Parameters:
1769: +  mat - the matrix
1770: .  v - a logically two-dimensional array of values
1771: .  m, idxm - the number of block rows and their global block indices
1772: .  n, idxn - the number of block columns and their global block indices
1773: -  addv - either ADD_VALUES or INSERT_VALUES, where
1774:    ADD_VALUES adds values to any existing entries, and
1775:    INSERT_VALUES replaces existing entries with new values

1777:    Notes:
1778:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1779:    MatXXXXSetPreallocation() or MatSetUp() before using this routine.

1781:    The m and n count the NUMBER of blocks in the row direction and column direction,
1782:    NOT the total number of rows/columns; for example, if the block size is 2 and
1783:    you are passing in values for rows 2,3,4,5  then m would be 2 (not 4).
1784:    The values in idxm would be 1 2; that is the first index for each block divided by
1785:    the block size.

1787:    Note that you must call MatSetBlockSize() when constructing this matrix (before
1788:    preallocating it).

1790:    By default the values, v, are row-oriented, so the layout of
1791:    v is the same as for MatSetValues(). See MatSetOption() for other options.

1793:    Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1794:    options cannot be mixed without intervening calls to the assembly
1795:    routines.

1797:    MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1798:    as well as in C.

1800:    Negative indices may be passed in idxm and idxn, these rows and columns are
1801:    simply ignored. This allows easily inserting element stiffness matrices
1802:    with homogeneous Dirchlet boundary conditions that you don't want represented
1803:    in the matrix.

1805:    Each time an entry is set within a sparse matrix via MatSetValues(),
1806:    internal searching must be done to determine where to place the
1807:    data in the matrix storage space.  By instead inserting blocks of
1808:    entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1809:    reduced.

1811:    Example:
1812: $   Suppose m=n=2 and block size(bs) = 2 The array is
1813: $
1814: $   1  2  | 3  4
1815: $   5  6  | 7  8
1816: $   - - - | - - -
1817: $   9  10 | 11 12
1818: $   13 14 | 15 16
1819: $
1820: $   v[] should be passed in like
1821: $   v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1822: $
1823: $  If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1824: $   v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]

1826:    Level: intermediate

1828:    Concepts: matrices^putting entries in blocked

1830: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1831: @*/
1832: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1833: {

1839:   if (!m || !n) return(0); /* no values to insert */
1843:   MatCheckPreallocated(mat,1);
1844:   if (mat->insertmode == NOT_SET_VALUES) {
1845:     mat->insertmode = addv;
1846:   }
1847: #if defined(PETSC_USE_DEBUG)
1848:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1849:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1850:   if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1851: #endif

1853:   if (mat->assembled) {
1854:     mat->was_assembled = PETSC_TRUE;
1855:     mat->assembled     = PETSC_FALSE;
1856:   }
1857:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1858:   if (mat->ops->setvaluesblocked) {
1859:     (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1860:   } else {
1861:     PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1862:     PetscInt i,j,bs,cbs;
1863:     MatGetBlockSizes(mat,&bs,&cbs);
1864:     if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1865:       iidxm = buf; iidxn = buf + m*bs;
1866:     } else {
1867:       PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1868:       iidxm = bufr; iidxn = bufc;
1869:     }
1870:     for (i=0; i<m; i++) {
1871:       for (j=0; j<bs; j++) {
1872:         iidxm[i*bs+j] = bs*idxm[i] + j;
1873:       }
1874:     }
1875:     for (i=0; i<n; i++) {
1876:       for (j=0; j<cbs; j++) {
1877:         iidxn[i*cbs+j] = cbs*idxn[i] + j;
1878:       }
1879:     }
1880:     MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1881:     PetscFree2(bufr,bufc);
1882:   }
1883:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1884: #if defined(PETSC_HAVE_CUSP)
1885:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1886:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1887:   }
1888: #elif defined(PETSC_HAVE_VIENNACL)
1889:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1890:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1891:   }
1892: #elif defined(PETSC_HAVE_VECCUDA)
1893:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1894:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1895:   }
1896: #endif
1897:   return(0);
1898: }

1900: /*@
1901:    MatGetValues - Gets a block of values from a matrix.

1903:    Not Collective; currently only returns a local block

1905:    Input Parameters:
1906: +  mat - the matrix
1907: .  v - a logically two-dimensional array for storing the values
1908: .  m, idxm - the number of rows and their global indices
1909: -  n, idxn - the number of columns and their global indices

1911:    Notes:
1912:    The user must allocate space (m*n PetscScalars) for the values, v.
1913:    The values, v, are then returned in a row-oriented format,
1914:    analogous to that used by default in MatSetValues().

1916:    MatGetValues() uses 0-based row and column numbers in
1917:    Fortran as well as in C.

1919:    MatGetValues() requires that the matrix has been assembled
1920:    with MatAssemblyBegin()/MatAssemblyEnd().  Thus, calls to
1921:    MatSetValues() and MatGetValues() CANNOT be made in succession
1922:    without intermediate matrix assembly.

1924:    Negative row or column indices will be ignored and those locations in v[] will be
1925:    left unchanged.

1927:    Level: advanced

1929:    Concepts: matrices^accessing values

1931: .seealso: MatGetRow(), MatCreateSubMatrices(), MatSetValues()
1932: @*/
1933: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1934: {

1940:   if (!m || !n) return(0);
1944:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1945:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1946:   if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1947:   MatCheckPreallocated(mat,1);

1949:   PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1950:   (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1951:   PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1952:   return(0);
1953: }

1955: /*@
1956:   MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1957:   the same size. Currently, this can only be called once and creates the given matrix.

1959:   Not Collective

1961:   Input Parameters:
1962: + mat - the matrix
1963: . nb - the number of blocks
1964: . bs - the number of rows (and columns) in each block
1965: . rows - a concatenation of the rows for each block
1966: - v - a concatenation of logically two-dimensional arrays of values

1968:   Notes:
1969:   In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.

1971:   Level: advanced

1973:   Concepts: matrices^putting entries in

1975: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1976:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1977: @*/
1978: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1979: {

1987: #if defined(PETSC_USE_DEBUG)
1988:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1989: #endif

1991:   PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1992:   if (mat->ops->setvaluesbatch) {
1993:     (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1994:   } else {
1995:     PetscInt b;
1996:     for (b = 0; b < nb; ++b) {
1997:       MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1998:     }
1999:   }
2000:   PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
2001:   return(0);
2002: }

2004: /*@
2005:    MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
2006:    the routine MatSetValuesLocal() to allow users to insert matrix entries
2007:    using a local (per-processor) numbering.

2009:    Not Collective

2011:    Input Parameters:
2012: +  x - the matrix
2013: .  rmapping - row mapping created with ISLocalToGlobalMappingCreate()   or ISLocalToGlobalMappingCreateIS()
2014: - cmapping - column mapping

2016:    Level: intermediate

2018:    Concepts: matrices^local to global mapping
2019:    Concepts: local to global mapping^for matrices

2021: .seealso:  MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
2022: @*/
2023: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
2024: {


2033:   if (x->ops->setlocaltoglobalmapping) {
2034:     (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
2035:   } else {
2036:     PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
2037:     PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
2038:   }
2039:   return(0);
2040: }


2043: /*@
2044:    MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()

2046:    Not Collective

2048:    Input Parameters:
2049: .  A - the matrix

2051:    Output Parameters:
2052: + rmapping - row mapping
2053: - cmapping - column mapping

2055:    Level: advanced

2057:    Concepts: matrices^local to global mapping
2058:    Concepts: local to global mapping^for matrices

2060: .seealso:  MatSetValuesLocal()
2061: @*/
2062: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
2063: {
2069:   if (rmapping) *rmapping = A->rmap->mapping;
2070:   if (cmapping) *cmapping = A->cmap->mapping;
2071:   return(0);
2072: }

2074: /*@
2075:    MatGetLayouts - Gets the PetscLayout objects for rows and columns

2077:    Not Collective

2079:    Input Parameters:
2080: .  A - the matrix

2082:    Output Parameters:
2083: + rmap - row layout
2084: - cmap - column layout

2086:    Level: advanced

2088: .seealso:  MatCreateVecs(), MatGetLocalToGlobalMapping()
2089: @*/
2090: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
2091: {
2097:   if (rmap) *rmap = A->rmap;
2098:   if (cmap) *cmap = A->cmap;
2099:   return(0);
2100: }

2102: /*@C
2103:    MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
2104:    using a local ordering of the nodes.

2106:    Not Collective

2108:    Input Parameters:
2109: +  mat - the matrix
2110: .  nrow, irow - number of rows and their local indices
2111: .  ncol, icol - number of columns and their local indices
2112: .  y -  a logically two-dimensional array of values
2113: -  addv - either INSERT_VALUES or ADD_VALUES, where
2114:    ADD_VALUES adds values to any existing entries, and
2115:    INSERT_VALUES replaces existing entries with new values

2117:    Notes:
2118:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2119:       MatSetUp() before using this routine

2121:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine

2123:    Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
2124:    options cannot be mixed without intervening calls to the assembly
2125:    routines.

2127:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2128:    MUST be called after all calls to MatSetValuesLocal() have been completed.

2130:    Level: intermediate

2132:    Concepts: matrices^putting entries in with local numbering

2134:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2135:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

2137: .seealso:  MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
2138:            MatSetValueLocal()
2139: @*/
2140: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2141: {

2147:   MatCheckPreallocated(mat,1);
2148:   if (!nrow || !ncol) return(0); /* no values to insert */
2152:   if (mat->insertmode == NOT_SET_VALUES) {
2153:     mat->insertmode = addv;
2154:   }
2155: #if defined(PETSC_USE_DEBUG)
2156:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2157:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2158:   if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2159: #endif

2161:   if (mat->assembled) {
2162:     mat->was_assembled = PETSC_TRUE;
2163:     mat->assembled     = PETSC_FALSE;
2164:   }
2165:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2166:   if (mat->ops->setvalueslocal) {
2167:     (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2168:   } else {
2169:     PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2170:     if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2171:       irowm = buf; icolm = buf+nrow;
2172:     } else {
2173:       PetscMalloc2(nrow,&bufr,ncol,&bufc);
2174:       irowm = bufr; icolm = bufc;
2175:     }
2176:     ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2177:     ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2178:     MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2179:     PetscFree2(bufr,bufc);
2180:   }
2181:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2182: #if defined(PETSC_HAVE_CUSP)
2183:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2184:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2185:   }
2186: #elif defined(PETSC_HAVE_VIENNACL)
2187:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2188:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2189:   }
2190: #elif defined(PETSC_HAVE_VECCUDA)
2191:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
2192:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
2193:   }
2194: #endif
2195:   return(0);
2196: }

2198: /*@C
2199:    MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2200:    using a local ordering of the nodes a block at a time.

2202:    Not Collective

2204:    Input Parameters:
2205: +  x - the matrix
2206: .  nrow, irow - number of rows and their local indices
2207: .  ncol, icol - number of columns and their local indices
2208: .  y -  a logically two-dimensional array of values
2209: -  addv - either INSERT_VALUES or ADD_VALUES, where
2210:    ADD_VALUES adds values to any existing entries, and
2211:    INSERT_VALUES replaces existing entries with new values

2213:    Notes:
2214:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2215:       MatSetUp() before using this routine

2217:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2218:       before using this routineBefore calling MatSetValuesLocal(), the user must first set the

2220:    Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2221:    options cannot be mixed without intervening calls to the assembly
2222:    routines.

2224:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2225:    MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.

2227:    Level: intermediate

2229:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2230:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

2232:    Concepts: matrices^putting blocked values in with local numbering

2234: .seealso:  MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2235:            MatSetValuesLocal(),  MatSetValuesBlocked()
2236: @*/
2237: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2238: {

2244:   MatCheckPreallocated(mat,1);
2245:   if (!nrow || !ncol) return(0); /* no values to insert */
2249:   if (mat->insertmode == NOT_SET_VALUES) {
2250:     mat->insertmode = addv;
2251:   }
2252: #if defined(PETSC_USE_DEBUG)
2253:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2254:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2255:   if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2256: #endif

2258:   if (mat->assembled) {
2259:     mat->was_assembled = PETSC_TRUE;
2260:     mat->assembled     = PETSC_FALSE;
2261:   }
2262:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2263:   if (mat->ops->setvaluesblockedlocal) {
2264:     (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2265:   } else {
2266:     PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2267:     if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2268:       irowm = buf; icolm = buf + nrow;
2269:     } else {
2270:       PetscMalloc2(nrow,&bufr,ncol,&bufc);
2271:       irowm = bufr; icolm = bufc;
2272:     }
2273:     ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2274:     ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2275:     MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2276:     PetscFree2(bufr,bufc);
2277:   }
2278:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2279: #if defined(PETSC_HAVE_CUSP)
2280:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2281:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2282:   }
2283: #elif defined(PETSC_HAVE_VIENNACL)
2284:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2285:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2286:   }
2287: #elif defined(PETSC_HAVE_VECCUDA)
2288:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
2289:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
2290:   }
2291: #endif
2292:   return(0);
2293: }

2295: /*@
2296:    MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal

2298:    Collective on Mat and Vec

2300:    Input Parameters:
2301: +  mat - the matrix
2302: -  x   - the vector to be multiplied

2304:    Output Parameters:
2305: .  y - the result

2307:    Notes:
2308:    The vectors x and y cannot be the same.  I.e., one cannot
2309:    call MatMult(A,y,y).

2311:    Level: developer

2313:    Concepts: matrix-vector product

2315: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2316: @*/
2317: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2318: {


2327:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2328:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2329:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2330:   MatCheckPreallocated(mat,1);

2332:   if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2333:   (*mat->ops->multdiagonalblock)(mat,x,y);
2334:   PetscObjectStateIncrease((PetscObject)y);
2335:   return(0);
2336: }

2338: /* --------------------------------------------------------*/
2339: /*@
2340:    MatMult - Computes the matrix-vector product, y = Ax.

2342:    Neighbor-wise Collective on Mat and Vec

2344:    Input Parameters:
2345: +  mat - the matrix
2346: -  x   - the vector to be multiplied

2348:    Output Parameters:
2349: .  y - the result

2351:    Notes:
2352:    The vectors x and y cannot be the same.  I.e., one cannot
2353:    call MatMult(A,y,y).

2355:    Level: beginner

2357:    Concepts: matrix-vector product

2359: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2360: @*/
2361: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2362: {

2370:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2371:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2372:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2373: #if !defined(PETSC_HAVE_CONSTRAINTS)
2374:   if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2375:   if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2376:   if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2377: #endif
2378:   VecLocked(y,3);
2379:   if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2380:   MatCheckPreallocated(mat,1);

2382:   VecLockPush(x);
2383:   if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2384:   PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2385:   (*mat->ops->mult)(mat,x,y);
2386:   PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2387:   if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2388:   VecLockPop(x);
2389:   return(0);
2390: }

2392: /*@
2393:    MatMultTranspose - Computes matrix transpose times a vector y = A^T * x.

2395:    Neighbor-wise Collective on Mat and Vec

2397:    Input Parameters:
2398: +  mat - the matrix
2399: -  x   - the vector to be multiplied

2401:    Output Parameters:
2402: .  y - the result

2404:    Notes:
2405:    The vectors x and y cannot be the same.  I.e., one cannot
2406:    call MatMultTranspose(A,y,y).

2408:    For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2409:    use MatMultHermitianTranspose()

2411:    Level: beginner

2413:    Concepts: matrix vector product^transpose

2415: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2416: @*/
2417: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2418: {


2427:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2428:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2429:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2430: #if !defined(PETSC_HAVE_CONSTRAINTS)
2431:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2432:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2433: #endif
2434:   if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2435:   MatCheckPreallocated(mat,1);

2437:   if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply transpose defined");
2438:   PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2439:   VecLockPush(x);
2440:   (*mat->ops->multtranspose)(mat,x,y);
2441:   VecLockPop(x);
2442:   PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2443:   PetscObjectStateIncrease((PetscObject)y);
2444:   if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2445:   return(0);
2446: }

2448: /*@
2449:    MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.

2451:    Neighbor-wise Collective on Mat and Vec

2453:    Input Parameters:
2454: +  mat - the matrix
2455: -  x   - the vector to be multilplied

2457:    Output Parameters:
2458: .  y - the result

2460:    Notes:
2461:    The vectors x and y cannot be the same.  I.e., one cannot
2462:    call MatMultHermitianTranspose(A,y,y).

2464:    Also called the conjugate transpose, complex conjugate transpose, or adjoint.

2466:    For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.

2468:    Level: beginner

2470:    Concepts: matrix vector product^transpose

2472: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2473: @*/
2474: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2475: {
2477:   Vec            w;


2485:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2486:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2487:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2488: #if !defined(PETSC_HAVE_CONSTRAINTS)
2489:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2490:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2491: #endif
2492:   MatCheckPreallocated(mat,1);

2494:   PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2495:   if (mat->ops->multhermitiantranspose) {
2496:     VecLockPush(x);
2497:     (*mat->ops->multhermitiantranspose)(mat,x,y);
2498:     VecLockPop(x);
2499:   } else {
2500:     VecDuplicate(x,&w);
2501:     VecCopy(x,w);
2502:     VecConjugate(w);
2503:     MatMultTranspose(mat,w,y);
2504:     VecDestroy(&w);
2505:     VecConjugate(y);
2506:   }
2507:   PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2508:   PetscObjectStateIncrease((PetscObject)y);
2509:   return(0);
2510: }

2512: /*@
2513:     MatMultAdd -  Computes v3 = v2 + A * v1.

2515:     Neighbor-wise Collective on Mat and Vec

2517:     Input Parameters:
2518: +   mat - the matrix
2519: -   v1, v2 - the vectors

2521:     Output Parameters:
2522: .   v3 - the result

2524:     Notes:
2525:     The vectors v1 and v3 cannot be the same.  I.e., one cannot
2526:     call MatMultAdd(A,v1,v2,v1).

2528:     Level: beginner

2530:     Concepts: matrix vector product^addition

2532: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2533: @*/
2534: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2535: {


2545:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2546:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2547:   if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2548:   /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2549:      if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2550:   if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2551:   if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2552:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2553:   MatCheckPreallocated(mat,1);

2555:   if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2556:   PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2557:   VecLockPush(v1);
2558:   (*mat->ops->multadd)(mat,v1,v2,v3);
2559:   VecLockPop(v1);
2560:   PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2561:   PetscObjectStateIncrease((PetscObject)v3);
2562:   return(0);
2563: }

2565: /*@
2566:    MatMultTransposeAdd - Computes v3 = v2 + A' * v1.

2568:    Neighbor-wise Collective on Mat and Vec

2570:    Input Parameters:
2571: +  mat - the matrix
2572: -  v1, v2 - the vectors

2574:    Output Parameters:
2575: .  v3 - the result

2577:    Notes:
2578:    The vectors v1 and v3 cannot be the same.  I.e., one cannot
2579:    call MatMultTransposeAdd(A,v1,v2,v1).

2581:    Level: beginner

2583:    Concepts: matrix vector product^transpose and addition

2585: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2586: @*/
2587: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2588: {


2598:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2599:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2600:   if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2601:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2602:   if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2603:   if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2604:   if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2605:   MatCheckPreallocated(mat,1);

2607:   PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2608:   VecLockPush(v1);
2609:   (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2610:   VecLockPop(v1);
2611:   PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2612:   PetscObjectStateIncrease((PetscObject)v3);
2613:   return(0);
2614: }

2616: /*@
2617:    MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.

2619:    Neighbor-wise Collective on Mat and Vec

2621:    Input Parameters:
2622: +  mat - the matrix
2623: -  v1, v2 - the vectors

2625:    Output Parameters:
2626: .  v3 - the result

2628:    Notes:
2629:    The vectors v1 and v3 cannot be the same.  I.e., one cannot
2630:    call MatMultHermitianTransposeAdd(A,v1,v2,v1).

2632:    Level: beginner

2634:    Concepts: matrix vector product^transpose and addition

2636: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2637: @*/
2638: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2639: {


2649:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2650:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2651:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2652:   if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2653:   if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2654:   if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2655:   MatCheckPreallocated(mat,1);

2657:   PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2658:   VecLockPush(v1);
2659:   if (mat->ops->multhermitiantransposeadd) {
2660:     (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2661:    } else {
2662:     Vec w,z;
2663:     VecDuplicate(v1,&w);
2664:     VecCopy(v1,w);
2665:     VecConjugate(w);
2666:     VecDuplicate(v3,&z);
2667:     MatMultTranspose(mat,w,z);
2668:     VecDestroy(&w);
2669:     VecConjugate(z);
2670:     VecWAXPY(v3,1.0,v2,z);
2671:     VecDestroy(&z);
2672:   }
2673:   VecLockPop(v1);
2674:   PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2675:   PetscObjectStateIncrease((PetscObject)v3);
2676:   return(0);
2677: }

2679: /*@
2680:    MatMultConstrained - The inner multiplication routine for a
2681:    constrained matrix P^T A P.

2683:    Neighbor-wise Collective on Mat and Vec

2685:    Input Parameters:
2686: +  mat - the matrix
2687: -  x   - the vector to be multilplied

2689:    Output Parameters:
2690: .  y - the result

2692:    Notes:
2693:    The vectors x and y cannot be the same.  I.e., one cannot
2694:    call MatMult(A,y,y).

2696:    Level: beginner

2698: .keywords: matrix, multiply, matrix-vector product, constraint
2699: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2700: @*/
2701: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2702: {

2709:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2710:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2711:   if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2712:   if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2713:   if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2714:   if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);

2716:   PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2717:   VecLockPush(x);
2718:   (*mat->ops->multconstrained)(mat,x,y);
2719:   VecLockPop(x);
2720:   PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2721:   PetscObjectStateIncrease((PetscObject)y);
2722:   return(0);
2723: }

2725: /*@
2726:    MatMultTransposeConstrained - The inner multiplication routine for a
2727:    constrained matrix P^T A^T P.

2729:    Neighbor-wise Collective on Mat and Vec

2731:    Input Parameters:
2732: +  mat - the matrix
2733: -  x   - the vector to be multilplied

2735:    Output Parameters:
2736: .  y - the result

2738:    Notes:
2739:    The vectors x and y cannot be the same.  I.e., one cannot
2740:    call MatMult(A,y,y).

2742:    Level: beginner

2744: .keywords: matrix, multiply, matrix-vector product, constraint
2745: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2746: @*/
2747: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2748: {

2755:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2756:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2757:   if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2758:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2759:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);

2761:   PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2762:   (*mat->ops->multtransposeconstrained)(mat,x,y);
2763:   PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2764:   PetscObjectStateIncrease((PetscObject)y);
2765:   return(0);
2766: }

2768: /*@C
2769:    MatGetFactorType - gets the type of factorization it is

2771:    Note Collective
2772:    as the flag

2774:    Input Parameters:
2775: .  mat - the matrix

2777:    Output Parameters:
2778: .  t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT

2780:     Level: intermediate

2782: .seealso:    MatFactorType, MatGetFactor()
2783: @*/
2784: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2785: {
2789:   *t = mat->factortype;
2790:   return(0);
2791: }

2793: /* ------------------------------------------------------------*/
2794: /*@C
2795:    MatGetInfo - Returns information about matrix storage (number of
2796:    nonzeros, memory, etc.).

2798:    Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag

2800:    Input Parameters:
2801: .  mat - the matrix

2803:    Output Parameters:
2804: +  flag - flag indicating the type of parameters to be returned
2805:    (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2806:    MAT_GLOBAL_SUM - sum over all processors)
2807: -  info - matrix information context

2809:    Notes:
2810:    The MatInfo context contains a variety of matrix data, including
2811:    number of nonzeros allocated and used, number of mallocs during
2812:    matrix assembly, etc.  Additional information for factored matrices
2813:    is provided (such as the fill ratio, number of mallocs during
2814:    factorization, etc.).  Much of this info is printed to PETSC_STDOUT
2815:    when using the runtime options
2816: $       -info -mat_view ::ascii_info

2818:    Example for C/C++ Users:
2819:    See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2820:    data within the MatInfo context.  For example,
2821: .vb
2822:       MatInfo info;
2823:       Mat     A;
2824:       double  mal, nz_a, nz_u;

2826:       MatGetInfo(A,MAT_LOCAL,&info);
2827:       mal  = info.mallocs;
2828:       nz_a = info.nz_allocated;
2829: .ve

2831:    Example for Fortran Users:
2832:    Fortran users should declare info as a double precision
2833:    array of dimension MAT_INFO_SIZE, and then extract the parameters
2834:    of interest.  See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2835:    a complete list of parameter names.
2836: .vb
2837:       double  precision info(MAT_INFO_SIZE)
2838:       double  precision mal, nz_a
2839:       Mat     A
2840:       integer ierr

2842:       call MatGetInfo(A,MAT_LOCAL,info,ierr)
2843:       mal = info(MAT_INFO_MALLOCS)
2844:       nz_a = info(MAT_INFO_NZ_ALLOCATED)
2845: .ve

2847:     Level: intermediate

2849:     Concepts: matrices^getting information on

2851:     Developer Note: fortran interface is not autogenerated as the f90
2852:     interface defintion cannot be generated correctly [due to MatInfo]

2854: .seealso: MatStashGetInfo()

2856: @*/
2857: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2858: {

2865:   if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2866:   MatCheckPreallocated(mat,1);
2867:   (*mat->ops->getinfo)(mat,flag,info);
2868:   return(0);
2869: }

2871: /*
2872:    This is used by external packages where it is not easy to get the info from the actual
2873:    matrix factorization.
2874: */
2875: PetscErrorCode MatGetInfo_External(Mat A,MatInfoType flag,MatInfo *info)
2876: {

2880:   PetscMemzero(info,sizeof(MatInfo));
2881:   return(0);
2882: }

2884: /* ----------------------------------------------------------*/

2886: /*@C
2887:    MatLUFactor - Performs in-place LU factorization of matrix.

2889:    Collective on Mat

2891:    Input Parameters:
2892: +  mat - the matrix
2893: .  row - row permutation
2894: .  col - column permutation
2895: -  info - options for factorization, includes
2896: $          fill - expected fill as ratio of original fill.
2897: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2898: $                   Run with the option -info to determine an optimal value to use

2900:    Notes:
2901:    Most users should employ the simplified KSP interface for linear solvers
2902:    instead of working directly with matrix algebra routines such as this.
2903:    See, e.g., KSPCreate().

2905:    This changes the state of the matrix to a factored matrix; it cannot be used
2906:    for example with MatSetValues() unless one first calls MatSetUnfactored().

2908:    Level: developer

2910:    Concepts: matrices^LU factorization

2912: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2913:           MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()

2915:     Developer Note: fortran interface is not autogenerated as the f90
2916:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2918: @*/
2919: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2920: {
2922:   MatFactorInfo  tinfo;

2930:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2931:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2932:   if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2933:   MatCheckPreallocated(mat,1);
2934:   if (!info) {
2935:     MatFactorInfoInitialize(&tinfo);
2936:     info = &tinfo;
2937:   }

2939:   PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2940:   (*mat->ops->lufactor)(mat,row,col,info);
2941:   PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2942:   PetscObjectStateIncrease((PetscObject)mat);
2943:   return(0);
2944: }

2946: /*@C
2947:    MatILUFactor - Performs in-place ILU factorization of matrix.

2949:    Collective on Mat

2951:    Input Parameters:
2952: +  mat - the matrix
2953: .  row - row permutation
2954: .  col - column permutation
2955: -  info - structure containing
2956: $      levels - number of levels of fill.
2957: $      expected fill - as ratio of original fill.
2958: $      1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2959:                 missing diagonal entries)

2961:    Notes:
2962:    Probably really in-place only when level of fill is zero, otherwise allocates
2963:    new space to store factored matrix and deletes previous memory.

2965:    Most users should employ the simplified KSP interface for linear solvers
2966:    instead of working directly with matrix algebra routines such as this.
2967:    See, e.g., KSPCreate().

2969:    Level: developer

2971:    Concepts: matrices^ILU factorization

2973: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo

2975:     Developer Note: fortran interface is not autogenerated as the f90
2976:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2978: @*/
2979: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2980: {

2989:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2990:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2991:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2992:   if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2993:   MatCheckPreallocated(mat,1);

2995:   PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2996:   (*mat->ops->ilufactor)(mat,row,col,info);
2997:   PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2998:   PetscObjectStateIncrease((PetscObject)mat);
2999:   return(0);
3000: }

3002: /*@C
3003:    MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
3004:    Call this routine before calling MatLUFactorNumeric().

3006:    Collective on Mat

3008:    Input Parameters:
3009: +  fact - the factor matrix obtained with MatGetFactor()
3010: .  mat - the matrix
3011: .  row, col - row and column permutations
3012: -  info - options for factorization, includes
3013: $          fill - expected fill as ratio of original fill.
3014: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3015: $                   Run with the option -info to determine an optimal value to use


3018:    Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.

3020:    Most users should employ the simplified KSP interface for linear solvers
3021:    instead of working directly with matrix algebra routines such as this.
3022:    See, e.g., KSPCreate().

3024:    Level: developer

3026:    Concepts: matrices^LU symbolic factorization

3028: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()

3030:     Developer Note: fortran interface is not autogenerated as the f90
3031:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3033: @*/
3034: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
3035: {

3045:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3046:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3047:   if (!(fact)->ops->lufactorsymbolic) {
3048:     MatSolverType spackage;
3049:     MatFactorGetSolverType(fact,&spackage);
3050:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
3051:   }
3052:   MatCheckPreallocated(mat,2);

3054:   PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
3055:   (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
3056:   PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
3057:   PetscObjectStateIncrease((PetscObject)fact);
3058:   return(0);
3059: }

3061: /*@C
3062:    MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
3063:    Call this routine after first calling MatLUFactorSymbolic().

3065:    Collective on Mat

3067:    Input Parameters:
3068: +  fact - the factor matrix obtained with MatGetFactor()
3069: .  mat - the matrix
3070: -  info - options for factorization

3072:    Notes:
3073:    See MatLUFactor() for in-place factorization.  See
3074:    MatCholeskyFactorNumeric() for the symmetric, positive definite case.

3076:    Most users should employ the simplified KSP interface for linear solvers
3077:    instead of working directly with matrix algebra routines such as this.
3078:    See, e.g., KSPCreate().

3080:    Level: developer

3082:    Concepts: matrices^LU numeric factorization

3084: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()

3086:     Developer Note: fortran interface is not autogenerated as the f90
3087:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3089: @*/
3090: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3091: {

3099:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3100:   if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);

3102:   if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
3103:   MatCheckPreallocated(mat,2);
3104:   PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
3105:   (fact->ops->lufactornumeric)(fact,mat,info);
3106:   PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
3107:   MatViewFromOptions(fact,NULL,"-mat_factor_view");
3108:   PetscObjectStateIncrease((PetscObject)fact);
3109:   return(0);
3110: }

3112: /*@C
3113:    MatCholeskyFactor - Performs in-place Cholesky factorization of a
3114:    symmetric matrix.

3116:    Collective on Mat

3118:    Input Parameters:
3119: +  mat - the matrix
3120: .  perm - row and column permutations
3121: -  f - expected fill as ratio of original fill

3123:    Notes:
3124:    See MatLUFactor() for the nonsymmetric case.  See also
3125:    MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().

3127:    Most users should employ the simplified KSP interface for linear solvers
3128:    instead of working directly with matrix algebra routines such as this.
3129:    See, e.g., KSPCreate().

3131:    Level: developer

3133:    Concepts: matrices^Cholesky factorization

3135: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
3136:           MatGetOrdering()

3138:     Developer Note: fortran interface is not autogenerated as the f90
3139:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3141: @*/
3142: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
3143: {

3151:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3152:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3153:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3154:   if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"In-place factorization for Mat type %s is not supported, try out-of-place factorization. See MatCholeskyFactorSymbolic/Numeric",((PetscObject)mat)->type_name);
3155:   MatCheckPreallocated(mat,1);

3157:   PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3158:   (*mat->ops->choleskyfactor)(mat,perm,info);
3159:   PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3160:   PetscObjectStateIncrease((PetscObject)mat);
3161:   return(0);
3162: }

3164: /*@C
3165:    MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3166:    of a symmetric matrix.

3168:    Collective on Mat

3170:    Input Parameters:
3171: +  fact - the factor matrix obtained with MatGetFactor()
3172: .  mat - the matrix
3173: .  perm - row and column permutations
3174: -  info - options for factorization, includes
3175: $          fill - expected fill as ratio of original fill.
3176: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3177: $                   Run with the option -info to determine an optimal value to use

3179:    Notes:
3180:    See MatLUFactorSymbolic() for the nonsymmetric case.  See also
3181:    MatCholeskyFactor() and MatCholeskyFactorNumeric().

3183:    Most users should employ the simplified KSP interface for linear solvers
3184:    instead of working directly with matrix algebra routines such as this.
3185:    See, e.g., KSPCreate().

3187:    Level: developer

3189:    Concepts: matrices^Cholesky symbolic factorization

3191: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3192:           MatGetOrdering()

3194:     Developer Note: fortran interface is not autogenerated as the f90
3195:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3197: @*/
3198: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3199: {

3208:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3209:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3210:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3211:   if (!(fact)->ops->choleskyfactorsymbolic) {
3212:     MatSolverType spackage;
3213:     MatFactorGetSolverType(fact,&spackage);
3214:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3215:   }
3216:   MatCheckPreallocated(mat,2);

3218:   PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3219:   (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3220:   PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3221:   PetscObjectStateIncrease((PetscObject)fact);
3222:   return(0);
3223: }

3225: /*@C
3226:    MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3227:    of a symmetric matrix. Call this routine after first calling
3228:    MatCholeskyFactorSymbolic().

3230:    Collective on Mat

3232:    Input Parameters:
3233: +  fact - the factor matrix obtained with MatGetFactor()
3234: .  mat - the initial matrix
3235: .  info - options for factorization
3236: -  fact - the symbolic factor of mat


3239:    Notes:
3240:    Most users should employ the simplified KSP interface for linear solvers
3241:    instead of working directly with matrix algebra routines such as this.
3242:    See, e.g., KSPCreate().

3244:    Level: developer

3246:    Concepts: matrices^Cholesky numeric factorization

3248: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()

3250:     Developer Note: fortran interface is not autogenerated as the f90
3251:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3253: @*/
3254: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3255: {

3263:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3264:   if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3265:   if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3266:   MatCheckPreallocated(mat,2);

3268:   PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3269:   (fact->ops->choleskyfactornumeric)(fact,mat,info);
3270:   PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3271:   MatViewFromOptions(fact,NULL,"-mat_factor_view");
3272:   PetscObjectStateIncrease((PetscObject)fact);
3273:   return(0);
3274: }

3276: /* ----------------------------------------------------------------*/
3277: /*@
3278:    MatSolve - Solves A x = b, given a factored matrix.

3280:    Neighbor-wise Collective on Mat and Vec

3282:    Input Parameters:
3283: +  mat - the factored matrix
3284: -  b - the right-hand-side vector

3286:    Output Parameter:
3287: .  x - the result vector

3289:    Notes:
3290:    The vectors b and x cannot be the same.  I.e., one cannot
3291:    call MatSolve(A,x,x).

3293:    Notes:
3294:    Most users should employ the simplified KSP interface for linear solvers
3295:    instead of working directly with matrix algebra routines such as this.
3296:    See, e.g., KSPCreate().

3298:    Level: developer

3300:    Concepts: matrices^triangular solves

3302: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3303: @*/
3304: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3305: {

3315:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3316:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3317:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3318:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3319:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3320:   if (!mat->rmap->N && !mat->cmap->N) return(0);
3321:   if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3322:   MatCheckPreallocated(mat,1);

3324:   PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3325:   if (mat->factorerrortype) {
3326:     PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3327:     VecSetInf(x);
3328:   } else {
3329:     (*mat->ops->solve)(mat,b,x);
3330:   }
3331:   PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3332:   PetscObjectStateIncrease((PetscObject)x);
3333:   return(0);
3334: }

3336: static PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X, PetscBool trans)
3337: {
3339:   Vec            b,x;
3340:   PetscInt       m,N,i;
3341:   PetscScalar    *bb,*xx;
3342:   PetscBool      flg;

3345:   PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3346:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3347:   PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3348:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");

3350:   MatDenseGetArray(B,&bb);
3351:   MatDenseGetArray(X,&xx);
3352:   MatGetLocalSize(B,&m,NULL);  /* number local rows */
3353:   MatGetSize(B,NULL,&N);       /* total columns in dense matrix */
3354:   MatCreateVecs(A,&x,&b);
3355:   for (i=0; i<N; i++) {
3356:     VecPlaceArray(b,bb + i*m);
3357:     VecPlaceArray(x,xx + i*m);
3358:     if (trans) {
3359:       MatSolveTranspose(A,b,x);
3360:     } else {
3361:       MatSolve(A,b,x);
3362:     }
3363:     VecResetArray(x);
3364:     VecResetArray(b);
3365:   }
3366:   VecDestroy(&b);
3367:   VecDestroy(&x);
3368:   MatDenseRestoreArray(B,&bb);
3369:   MatDenseRestoreArray(X,&xx);
3370:   return(0);
3371: }

3373: /*@
3374:    MatMatSolve - Solves A X = B, given a factored matrix.

3376:    Neighbor-wise Collective on Mat

3378:    Input Parameters:
3379: +  A - the factored matrix
3380: -  B - the right-hand-side matrix  (dense matrix)

3382:    Output Parameter:
3383: .  X - the result matrix (dense matrix)

3385:    Notes:
3386:    The matrices b and x cannot be the same.  I.e., one cannot
3387:    call MatMatSolve(A,x,x).

3389:    Notes:
3390:    Most users should usually employ the simplified KSP interface for linear solvers
3391:    instead of working directly with matrix algebra routines such as this.
3392:    See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3393:    at a time.

3395:    When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3396:    it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.

3398:    Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.

3400:    Level: developer

3402:    Concepts: matrices^triangular solves

3404: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3405: @*/
3406: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3407: {

3417:   if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3418:   if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3419:   if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3420:   if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3421:   if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3422:   if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3423:   if (!A->rmap->N && !A->cmap->N) return(0);
3424:   MatCheckPreallocated(A,1);

3426:   PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3427:   if (!A->ops->matsolve) {
3428:     PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3429:     MatMatSolve_Basic(A,B,X,PETSC_FALSE);
3430:   } else {
3431:     (*A->ops->matsolve)(A,B,X);
3432:   }
3433:   PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3434:   PetscObjectStateIncrease((PetscObject)X);
3435:   return(0);
3436: }

3438: /*@
3439:    MatMatSolveTranspose - Solves A^T X = B, given a factored matrix.

3441:    Neighbor-wise Collective on Mat

3443:    Input Parameters:
3444: +  A - the factored matrix
3445: -  B - the right-hand-side matrix  (dense matrix)

3447:    Output Parameter:
3448: .  X - the result matrix (dense matrix)

3450:    Notes:
3451:    The matrices b and x cannot be the same.  I.e., one cannot
3452:    call MatMatSolveTranspose(A,x,x).

3454:    Notes:
3455:    Most users should usually employ the simplified KSP interface for linear solvers
3456:    instead of working directly with matrix algebra routines such as this.
3457:    See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3458:    at a time.

3460:    When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3461:    it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.

3463:    Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.

3465:    Level: developer

3467:    Concepts: matrices^triangular solves

3469: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3470: @*/
3471: PetscErrorCode MatMatSolveTranspose(Mat A,Mat B,Mat X)
3472: {

3482:   if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3483:   if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3484:   if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3485:   if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3486:   if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3487:   if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3488:   if (!A->rmap->N && !A->cmap->N) return(0);
3489:   MatCheckPreallocated(A,1);

3491:   PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3492:   if (!A->ops->matsolvetranspose) {
3493:     PetscInfo1(A,"Mat type %s using basic MatMatSolveTranspose\n",((PetscObject)A)->type_name);
3494:     MatMatSolve_Basic(A,B,X,PETSC_TRUE);
3495:   } else {
3496:     (*A->ops->matsolvetranspose)(A,B,X);
3497:   }
3498:   PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3499:   PetscObjectStateIncrease((PetscObject)X);
3500:   return(0);
3501: }

3503: /*@
3504:    MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3505:                             U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,

3507:    Neighbor-wise Collective on Mat and Vec

3509:    Input Parameters:
3510: +  mat - the factored matrix
3511: -  b - the right-hand-side vector

3513:    Output Parameter:
3514: .  x - the result vector

3516:    Notes:
3517:    MatSolve() should be used for most applications, as it performs
3518:    a forward solve followed by a backward solve.

3520:    The vectors b and x cannot be the same,  i.e., one cannot
3521:    call MatForwardSolve(A,x,x).

3523:    For matrix in seqsbaij format with block size larger than 1,
3524:    the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3525:    MatForwardSolve() solves U^T*D y = b, and
3526:    MatBackwardSolve() solves U x = y.
3527:    Thus they do not provide a symmetric preconditioner.

3529:    Most users should employ the simplified KSP interface for linear solvers
3530:    instead of working directly with matrix algebra routines such as this.
3531:    See, e.g., KSPCreate().

3533:    Level: developer

3535:    Concepts: matrices^forward solves

3537: .seealso: MatSolve(), MatBackwardSolve()
3538: @*/
3539: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3540: {

3550:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3551:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3552:   if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3553:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3554:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3555:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3556:   MatCheckPreallocated(mat,1);
3557:   PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3558:   (*mat->ops->forwardsolve)(mat,b,x);
3559:   PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3560:   PetscObjectStateIncrease((PetscObject)x);
3561:   return(0);
3562: }

3564: /*@
3565:    MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3566:                              D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,

3568:    Neighbor-wise Collective on Mat and Vec

3570:    Input Parameters:
3571: +  mat - the factored matrix
3572: -  b - the right-hand-side vector

3574:    Output Parameter:
3575: .  x - the result vector

3577:    Notes:
3578:    MatSolve() should be used for most applications, as it performs
3579:    a forward solve followed by a backward solve.

3581:    The vectors b and x cannot be the same.  I.e., one cannot
3582:    call MatBackwardSolve(A,x,x).

3584:    For matrix in seqsbaij format with block size larger than 1,
3585:    the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3586:    MatForwardSolve() solves U^T*D y = b, and
3587:    MatBackwardSolve() solves U x = y.
3588:    Thus they do not provide a symmetric preconditioner.

3590:    Most users should employ the simplified KSP interface for linear solvers
3591:    instead of working directly with matrix algebra routines such as this.
3592:    See, e.g., KSPCreate().

3594:    Level: developer

3596:    Concepts: matrices^backward solves

3598: .seealso: MatSolve(), MatForwardSolve()
3599: @*/
3600: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3601: {

3611:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3612:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3613:   if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3614:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3615:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3616:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3617:   MatCheckPreallocated(mat,1);

3619:   PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3620:   (*mat->ops->backwardsolve)(mat,b,x);
3621:   PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3622:   PetscObjectStateIncrease((PetscObject)x);
3623:   return(0);
3624: }

3626: /*@
3627:    MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.

3629:    Neighbor-wise Collective on Mat and Vec

3631:    Input Parameters:
3632: +  mat - the factored matrix
3633: .  b - the right-hand-side vector
3634: -  y - the vector to be added to

3636:    Output Parameter:
3637: .  x - the result vector

3639:    Notes:
3640:    The vectors b and x cannot be the same.  I.e., one cannot
3641:    call MatSolveAdd(A,x,y,x).

3643:    Most users should employ the simplified KSP interface for linear solvers
3644:    instead of working directly with matrix algebra routines such as this.
3645:    See, e.g., KSPCreate().

3647:    Level: developer

3649:    Concepts: matrices^triangular solves

3651: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3652: @*/
3653: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3654: {
3655:   PetscScalar    one = 1.0;
3656:   Vec            tmp;

3668:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3669:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3670:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3671:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3672:   if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3673:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3674:   if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3675:   MatCheckPreallocated(mat,1);

3677:   PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3678:   if (mat->ops->solveadd) {
3679:     (*mat->ops->solveadd)(mat,b,y,x);
3680:   } else {
3681:     /* do the solve then the add manually */
3682:     if (x != y) {
3683:       MatSolve(mat,b,x);
3684:       VecAXPY(x,one,y);
3685:     } else {
3686:       VecDuplicate(x,&tmp);
3687:       PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3688:       VecCopy(x,tmp);
3689:       MatSolve(mat,b,x);
3690:       VecAXPY(x,one,tmp);
3691:       VecDestroy(&tmp);
3692:     }
3693:   }
3694:   PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3695:   PetscObjectStateIncrease((PetscObject)x);
3696:   return(0);
3697: }

3699: /*@
3700:    MatSolveTranspose - Solves A' x = b, given a factored matrix.

3702:    Neighbor-wise Collective on Mat and Vec

3704:    Input Parameters:
3705: +  mat - the factored matrix
3706: -  b - the right-hand-side vector

3708:    Output Parameter:
3709: .  x - the result vector

3711:    Notes:
3712:    The vectors b and x cannot be the same.  I.e., one cannot
3713:    call MatSolveTranspose(A,x,x).

3715:    Most users should employ the simplified KSP interface for linear solvers
3716:    instead of working directly with matrix algebra routines such as this.
3717:    See, e.g., KSPCreate().

3719:    Level: developer

3721:    Concepts: matrices^triangular solves

3723: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3724: @*/
3725: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3726: {

3736:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3737:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3738:   if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3739:   if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3740:   if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3741:   MatCheckPreallocated(mat,1);
3742:   PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3743:   if (mat->factorerrortype) {
3744:     PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3745:     VecSetInf(x);
3746:   } else {
3747:     (*mat->ops->solvetranspose)(mat,b,x);
3748:   }
3749:   PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3750:   PetscObjectStateIncrease((PetscObject)x);
3751:   return(0);
3752: }

3754: /*@
3755:    MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3756:                       factored matrix.

3758:    Neighbor-wise Collective on Mat and Vec

3760:    Input Parameters:
3761: +  mat - the factored matrix
3762: .  b - the right-hand-side vector
3763: -  y - the vector to be added to

3765:    Output Parameter:
3766: .  x - the result vector

3768:    Notes:
3769:    The vectors b and x cannot be the same.  I.e., one cannot
3770:    call MatSolveTransposeAdd(A,x,y,x).

3772:    Most users should employ the simplified KSP interface for linear solvers
3773:    instead of working directly with matrix algebra routines such as this.
3774:    See, e.g., KSPCreate().

3776:    Level: developer

3778:    Concepts: matrices^triangular solves

3780: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3781: @*/
3782: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3783: {
3784:   PetscScalar    one = 1.0;
3786:   Vec            tmp;

3797:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3798:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3799:   if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3800:   if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3801:   if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3802:   if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3803:   MatCheckPreallocated(mat,1);

3805:   PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3806:   if (mat->ops->solvetransposeadd) {
3807:     if (mat->factorerrortype) {
3808:       PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3809:       VecSetInf(x);
3810:     } else {
3811:       (*mat->ops->solvetransposeadd)(mat,b,y,x);
3812:     }
3813:   } else {
3814:     /* do the solve then the add manually */
3815:     if (x != y) {
3816:       MatSolveTranspose(mat,b,x);
3817:       VecAXPY(x,one,y);
3818:     } else {
3819:       VecDuplicate(x,&tmp);
3820:       PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3821:       VecCopy(x,tmp);
3822:       MatSolveTranspose(mat,b,x);
3823:       VecAXPY(x,one,tmp);
3824:       VecDestroy(&tmp);
3825:     }
3826:   }
3827:   PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3828:   PetscObjectStateIncrease((PetscObject)x);
3829:   return(0);
3830: }
3831: /* ----------------------------------------------------------------*/

3833: /*@
3834:    MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.

3836:    Neighbor-wise Collective on Mat and Vec

3838:    Input Parameters:
3839: +  mat - the matrix
3840: .  b - the right hand side
3841: .  omega - the relaxation factor
3842: .  flag - flag indicating the type of SOR (see below)
3843: .  shift -  diagonal shift
3844: .  its - the number of iterations
3845: -  lits - the number of local iterations

3847:    Output Parameters:
3848: .  x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)

3850:    SOR Flags:
3851: .     SOR_FORWARD_SWEEP - forward SOR
3852: .     SOR_BACKWARD_SWEEP - backward SOR
3853: .     SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3854: .     SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3855: .     SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3856: .     SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3857: .     SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3858:          upper/lower triangular part of matrix to
3859:          vector (with omega)
3860: .     SOR_ZERO_INITIAL_GUESS - zero initial guess

3862:    Notes:
3863:    SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3864:    SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3865:    on each processor.

3867:    Application programmers will not generally use MatSOR() directly,
3868:    but instead will employ the KSP/PC interface.

3870:    Notes: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing

3872:    Notes for Advanced Users:
3873:    The flags are implemented as bitwise inclusive or operations.
3874:    For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3875:    to specify a zero initial guess for SSOR.

3877:    Most users should employ the simplified KSP interface for linear solvers
3878:    instead of working directly with matrix algebra routines such as this.
3879:    See, e.g., KSPCreate().

3881:    Vectors x and b CANNOT be the same

3883:    Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes

3885:    Level: developer

3887:    Concepts: matrices^relaxation
3888:    Concepts: matrices^SOR
3889:    Concepts: matrices^Gauss-Seidel

3891: @*/
3892: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3893: {

3903:   if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3904:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3905:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3906:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3907:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3908:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3909:   if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3910:   if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3911:   if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");

3913:   MatCheckPreallocated(mat,1);
3914:   PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3915:   ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3916:   PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3917:   PetscObjectStateIncrease((PetscObject)x);
3918:   return(0);
3919: }

3921: /*
3922:       Default matrix copy routine.
3923: */
3924: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3925: {
3926:   PetscErrorCode    ierr;
3927:   PetscInt          i,rstart = 0,rend = 0,nz;
3928:   const PetscInt    *cwork;
3929:   const PetscScalar *vwork;

3932:   if (B->assembled) {
3933:     MatZeroEntries(B);
3934:   }
3935:   MatGetOwnershipRange(A,&rstart,&rend);
3936:   for (i=rstart; i<rend; i++) {
3937:     MatGetRow(A,i,&nz,&cwork,&vwork);
3938:     MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3939:     MatRestoreRow(A,i,&nz,&cwork,&vwork);
3940:   }
3941:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3942:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3943:   return(0);
3944: }

3946: /*@
3947:    MatCopy - Copys a matrix to another matrix.

3949:    Collective on Mat

3951:    Input Parameters:
3952: +  A - the matrix
3953: -  str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN

3955:    Output Parameter:
3956: .  B - where the copy is put

3958:    Notes:
3959:    If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3960:    same nonzero pattern or the routine will crash.

3962:    MatCopy() copies the matrix entries of a matrix to another existing
3963:    matrix (after first zeroing the second matrix).  A related routine is
3964:    MatConvert(), which first creates a new matrix and then copies the data.

3966:    Level: intermediate

3968:    Concepts: matrices^copying

3970: .seealso: MatConvert(), MatDuplicate()

3972: @*/
3973: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3974: {
3976:   PetscInt       i;

3984:   MatCheckPreallocated(B,2);
3985:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3986:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3987:   if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
3988:   MatCheckPreallocated(A,1);
3989:   if (A == B) return(0);

3991:   PetscLogEventBegin(MAT_Copy,A,B,0,0);
3992:   if (A->ops->copy) {
3993:     (*A->ops->copy)(A,B,str);
3994:   } else { /* generic conversion */
3995:     MatCopy_Basic(A,B,str);
3996:   }

3998:   B->stencil.dim = A->stencil.dim;
3999:   B->stencil.noc = A->stencil.noc;
4000:   for (i=0; i<=A->stencil.dim; i++) {
4001:     B->stencil.dims[i]   = A->stencil.dims[i];
4002:     B->stencil.starts[i] = A->stencil.starts[i];
4003:   }

4005:   PetscLogEventEnd(MAT_Copy,A,B,0,0);
4006:   PetscObjectStateIncrease((PetscObject)B);
4007:   return(0);
4008: }

4010: /*@C
4011:    MatConvert - Converts a matrix to another matrix, either of the same
4012:    or different type.

4014:    Collective on Mat

4016:    Input Parameters:
4017: +  mat - the matrix
4018: .  newtype - new matrix type.  Use MATSAME to create a new matrix of the
4019:    same type as the original matrix.
4020: -  reuse - denotes if the destination matrix is to be created or reused.
4021:    Use MAT_INPLACE_MATRIX for inplace conversion (that is when you want the input mat to be changed to contain the matrix in the new format), otherwise use
4022:    MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX (can only be used after the first call was made with MAT_INITIAL_MATRIX, causes the matrix space in M to be reused).

4024:    Output Parameter:
4025: .  M - pointer to place new matrix

4027:    Notes:
4028:    MatConvert() first creates a new matrix and then copies the data from
4029:    the first matrix.  A related routine is MatCopy(), which copies the matrix
4030:    entries of one matrix to another already existing matrix context.

4032:    Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
4033:    the MPI communicator of the generated matrix is always the same as the communicator
4034:    of the input matrix.

4036:    Level: intermediate

4038:    Concepts: matrices^converting between storage formats

4040: .seealso: MatCopy(), MatDuplicate()
4041: @*/
4042: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
4043: {
4045:   PetscBool      sametype,issame,flg;
4046:   char           convname[256],mtype[256];
4047:   Mat            B;

4053:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4054:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4055:   MatCheckPreallocated(mat,1);
4056:   MatSetOption(mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);

4058:   PetscOptionsGetString(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
4059:   if (flg) {
4060:     newtype = mtype;
4061:   }
4062:   PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
4063:   PetscStrcmp(newtype,"same",&issame);
4064:   if ((reuse == MAT_INPLACE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires same input and output matrix");
4065:   if ((reuse == MAT_REUSE_MATRIX) && (mat == *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX means reuse matrix in final argument, perhaps you mean MAT_INPLACE_MATRIX");

4067:   if ((reuse == MAT_INPLACE_MATRIX) && (issame || sametype)) return(0);

4069:   if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
4070:     (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
4071:   } else {
4072:     PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
4073:     const char     *prefix[3] = {"seq","mpi",""};
4074:     PetscInt       i;
4075:     /*
4076:        Order of precedence:
4077:        1) See if a specialized converter is known to the current matrix.
4078:        2) See if a specialized converter is known to the desired matrix class.
4079:        3) See if a good general converter is registered for the desired class
4080:           (as of 6/27/03 only MATMPIADJ falls into this category).
4081:        4) See if a good general converter is known for the current matrix.
4082:        5) Use a really basic converter.
4083:     */

4085:     /* 1) See if a specialized converter is known to the current matrix and the desired class */
4086:     for (i=0; i<3; i++) {
4087:       PetscStrcpy(convname,"MatConvert_");
4088:       PetscStrcat(convname,((PetscObject)mat)->type_name);
4089:       PetscStrcat(convname,"_");
4090:       PetscStrcat(convname,prefix[i]);
4091:       PetscStrcat(convname,issame ? ((PetscObject)mat)->type_name : newtype);
4092:       PetscStrcat(convname,"_C");
4093:       PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4094:       if (conv) goto foundconv;
4095:     }

4097:     /* 2)  See if a specialized converter is known to the desired matrix class. */
4098:     MatCreate(PetscObjectComm((PetscObject)mat),&B);
4099:     MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
4100:     MatSetType(B,newtype);
4101:     for (i=0; i<3; i++) {
4102:       PetscStrcpy(convname,"MatConvert_");
4103:       PetscStrcat(convname,((PetscObject)mat)->type_name);
4104:       PetscStrcat(convname,"_");
4105:       PetscStrcat(convname,prefix[i]);
4106:       PetscStrcat(convname,newtype);
4107:       PetscStrcat(convname,"_C");
4108:       PetscObjectQueryFunction((PetscObject)B,convname,&conv);
4109:       if (conv) {
4110:         MatDestroy(&B);
4111:         goto foundconv;
4112:       }
4113:     }

4115:     /* 3) See if a good general converter is registered for the desired class */
4116:     conv = B->ops->convertfrom;
4117:     MatDestroy(&B);
4118:     if (conv) goto foundconv;

4120:     /* 4) See if a good general converter is known for the current matrix */
4121:     if (mat->ops->convert) {
4122:       conv = mat->ops->convert;
4123:     }
4124:     if (conv) goto foundconv;

4126:     /* 5) Use a really basic converter. */
4127:     conv = MatConvert_Basic;

4129: foundconv:
4130:     PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4131:     (*conv)(mat,newtype,reuse,M);
4132:     if (mat->rmap->mapping && mat->cmap->mapping && !(*M)->rmap->mapping && !(*M)->cmap->mapping) {
4133:       /* the block sizes must be same if the mappings are copied over */
4134:       (*M)->rmap->bs = mat->rmap->bs;
4135:       (*M)->cmap->bs = mat->cmap->bs;
4136:       PetscObjectReference((PetscObject)mat->rmap->mapping);
4137:       PetscObjectReference((PetscObject)mat->cmap->mapping);
4138:       (*M)->rmap->mapping = mat->rmap->mapping;
4139:       (*M)->cmap->mapping = mat->cmap->mapping;
4140:     }
4141:     (*M)->stencil.dim = mat->stencil.dim;
4142:     (*M)->stencil.noc = mat->stencil.noc;
4143:     for (i=0; i<=mat->stencil.dim; i++) {
4144:       (*M)->stencil.dims[i]   = mat->stencil.dims[i];
4145:       (*M)->stencil.starts[i] = mat->stencil.starts[i];
4146:     }
4147:     PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4148:   }
4149:   PetscObjectStateIncrease((PetscObject)*M);

4151:   /* Copy Mat options */
4152:   if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
4153:   if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
4154:   return(0);
4155: }

4157: /*@C
4158:    MatFactorGetSolverType - Returns name of the package providing the factorization routines

4160:    Not Collective

4162:    Input Parameter:
4163: .  mat - the matrix, must be a factored matrix

4165:    Output Parameter:
4166: .   type - the string name of the package (do not free this string)

4168:    Notes:
4169:       In Fortran you pass in a empty string and the package name will be copied into it.
4170:     (Make sure the string is long enough)

4172:    Level: intermediate

4174: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4175: @*/
4176: PetscErrorCode MatFactorGetSolverType(Mat mat, MatSolverType *type)
4177: {
4178:   PetscErrorCode ierr, (*conv)(Mat,MatSolverType*);

4183:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
4184:   PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverType_C",&conv);
4185:   if (!conv) {
4186:     *type = MATSOLVERPETSC;
4187:   } else {
4188:     (*conv)(mat,type);
4189:   }
4190:   return(0);
4191: }

4193: typedef struct _MatSolverTypeForSpecifcType* MatSolverTypeForSpecifcType;
4194: struct _MatSolverTypeForSpecifcType {
4195:   MatType                        mtype;
4196:   PetscErrorCode                 (*getfactor[4])(Mat,MatFactorType,Mat*);
4197:   MatSolverTypeForSpecifcType next;
4198: };

4200: typedef struct _MatSolverTypeHolder* MatSolverTypeHolder;
4201: struct _MatSolverTypeHolder {
4202:   char                           *name;
4203:   MatSolverTypeForSpecifcType handlers;
4204:   MatSolverTypeHolder         next;
4205: };

4207: static MatSolverTypeHolder MatSolverTypeHolders = NULL;

4209: /*@C
4210:    MatSolvePackageRegister - Registers a MatSolverType that works for a particular matrix type

4212:    Input Parameters:
4213: +    package - name of the package, for example petsc or superlu
4214: .    mtype - the matrix type that works with this package
4215: .    ftype - the type of factorization supported by the package
4216: -    getfactor - routine that will create the factored matrix ready to be used

4218:     Level: intermediate

4220: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4221: @*/
4222: PetscErrorCode MatSolverTypeRegister(MatSolverType package,const MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
4223: {
4224:   PetscErrorCode                 ierr;
4225:   MatSolverTypeHolder         next = MatSolverTypeHolders,prev;
4226:   PetscBool                      flg;
4227:   MatSolverTypeForSpecifcType inext,iprev = NULL;

4230:   if (!next) {
4231:     PetscNew(&MatSolverTypeHolders);
4232:     PetscStrallocpy(package,&MatSolverTypeHolders->name);
4233:     PetscNew(&MatSolverTypeHolders->handlers);
4234:     PetscStrallocpy(mtype,(char **)&MatSolverTypeHolders->handlers->mtype);
4235:     MatSolverTypeHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4236:     return(0);
4237:   }
4238:   while (next) {
4239:     PetscStrcasecmp(package,next->name,&flg);
4240:     if (flg) {
4241:       if (!next->handlers) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MatSolverTypeHolder is missing handlers");
4242:       inext = next->handlers;
4243:       while (inext) {
4244:         PetscStrcasecmp(mtype,inext->mtype,&flg);
4245:         if (flg) {
4246:           inext->getfactor[(int)ftype-1] = getfactor;
4247:           return(0);
4248:         }
4249:         iprev = inext;
4250:         inext = inext->next;
4251:       }
4252:       PetscNew(&iprev->next);
4253:       PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4254:       iprev->next->getfactor[(int)ftype-1] = getfactor;
4255:       return(0);
4256:     }
4257:     prev = next;
4258:     next = next->next;
4259:   }
4260:   PetscNew(&prev->next);
4261:   PetscStrallocpy(package,&prev->next->name);
4262:   PetscNew(&prev->next->handlers);
4263:   PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4264:   prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4265:   return(0);
4266: }

4268: /*@C
4269:    MatSolvePackageGet - Get's the function that creates the factor matrix if it exist

4271:    Input Parameters:
4272: +    package - name of the package, for example petsc or superlu
4273: .    ftype - the type of factorization supported by the package
4274: -    mtype - the matrix type that works with this package

4276:    Output Parameters:
4277: +   foundpackage - PETSC_TRUE if the package was registered
4278: .   foundmtype - PETSC_TRUE if the package supports the requested mtype
4279: -   getfactor - routine that will create the factored matrix ready to be used or NULL if not found

4281:     Level: intermediate

4283: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4284: @*/
4285: PetscErrorCode MatSolverTypeGet(MatSolverType package,const MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4286: {
4287:   PetscErrorCode                 ierr;
4288:   MatSolverTypeHolder         next = MatSolverTypeHolders;
4289:   PetscBool                      flg;
4290:   MatSolverTypeForSpecifcType inext;

4293:   if (foundpackage) *foundpackage = PETSC_FALSE;
4294:   if (foundmtype)   *foundmtype   = PETSC_FALSE;
4295:   if (getfactor)    *getfactor    = NULL;

4297:   if (package) {
4298:     while (next) {
4299:       PetscStrcasecmp(package,next->name,&flg);
4300:       if (flg) {
4301:         if (foundpackage) *foundpackage = PETSC_TRUE;
4302:         inext = next->handlers;
4303:         while (inext) {
4304:           PetscStrbeginswith(mtype,inext->mtype,&flg);
4305:           if (flg) {
4306:             if (foundmtype) *foundmtype = PETSC_TRUE;
4307:             if (getfactor)  *getfactor  = inext->getfactor[(int)ftype-1];
4308:             return(0);
4309:           }
4310:           inext = inext->next;
4311:         }
4312:       }
4313:       next = next->next;
4314:     }
4315:   } else {
4316:     while (next) {
4317:       inext = next->handlers;
4318:       while (inext) {
4319:         PetscStrbeginswith(mtype,inext->mtype,&flg);
4320:         if (flg && inext->getfactor[(int)ftype-1]) {
4321:           if (foundpackage) *foundpackage = PETSC_TRUE;
4322:           if (foundmtype)   *foundmtype   = PETSC_TRUE;
4323:           if (getfactor)    *getfactor    = inext->getfactor[(int)ftype-1];
4324:           return(0);
4325:         }
4326:         inext = inext->next;
4327:       }
4328:       next = next->next;
4329:     }
4330:   }
4331:   return(0);
4332: }

4334: PetscErrorCode MatSolverTypeDestroy(void)
4335: {
4336:   PetscErrorCode                 ierr;
4337:   MatSolverTypeHolder         next = MatSolverTypeHolders,prev;
4338:   MatSolverTypeForSpecifcType inext,iprev;

4341:   while (next) {
4342:     PetscFree(next->name);
4343:     inext = next->handlers;
4344:     while (inext) {
4345:       PetscFree(inext->mtype);
4346:       iprev = inext;
4347:       inext = inext->next;
4348:       PetscFree(iprev);
4349:     }
4350:     prev = next;
4351:     next = next->next;
4352:     PetscFree(prev);
4353:   }
4354:   MatSolverTypeHolders = NULL;
4355:   return(0);
4356: }

4358: /*@C
4359:    MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()

4361:    Collective on Mat

4363:    Input Parameters:
4364: +  mat - the matrix
4365: .  type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4366: -  ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,

4368:    Output Parameters:
4369: .  f - the factor matrix used with MatXXFactorSymbolic() calls

4371:    Notes:
4372:       Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4373:      such as pastix, superlu, mumps etc.

4375:       PETSc must have been ./configure to use the external solver, using the option --download-package

4377:    Level: intermediate

4379: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4380: @*/
4381: PetscErrorCode MatGetFactor(Mat mat, MatSolverType type,MatFactorType ftype,Mat *f)
4382: {
4383:   PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4384:   PetscBool      foundpackage,foundmtype;


4390:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4391:   MatCheckPreallocated(mat,1);

4393:   MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4394:   if (!foundpackage) {
4395:     if (type) {
4396:       SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4397:     } else {
4398:       SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate a solver package. Perhaps you must ./configure with --download-<package>");
4399:     }
4400:   }

4402:   if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverType %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4403:   if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverType %s does not support factorization type %s for  matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);

4405: #if defined(PETSC_USE_COMPLEX)
4406:   if (mat->hermitian && (ftype == MAT_FACTOR_CHOLESKY||ftype == MAT_FACTOR_ICC)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Hermitian CHOLESKY or ICC Factor is not supported");
4407: #endif

4409:   (*conv)(mat,ftype,f);
4410:   return(0);
4411: }

4413: /*@C
4414:    MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type

4416:    Not Collective

4418:    Input Parameters:
4419: +  mat - the matrix
4420: .  type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4421: -  ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,

4423:    Output Parameter:
4424: .    flg - PETSC_TRUE if the factorization is available

4426:    Notes:
4427:       Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4428:      such as pastix, superlu, mumps etc.

4430:       PETSc must have been ./configure to use the external solver, using the option --download-package

4432:    Level: intermediate

4434: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4435: @*/
4436: PetscErrorCode MatGetFactorAvailable(Mat mat, MatSolverType type,MatFactorType ftype,PetscBool  *flg)
4437: {
4438:   PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);


4444:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4445:   MatCheckPreallocated(mat,1);

4447:   *flg = PETSC_FALSE;
4448:   MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4449:   if (gconv) {
4450:     *flg = PETSC_TRUE;
4451:   }
4452:   return(0);
4453: }

4455:  #include <petscdmtypes.h>

4457: /*@
4458:    MatDuplicate - Duplicates a matrix including the non-zero structure.

4460:    Collective on Mat

4462:    Input Parameters:
4463: +  mat - the matrix
4464: -  op - One of MAT_DO_NOT_COPY_VALUES, MAT_COPY_VALUES, or MAT_SHARE_NONZERO_PATTERN.
4465:         See the manual page for MatDuplicateOption for an explanation of these options.

4467:    Output Parameter:
4468: .  M - pointer to place new matrix

4470:    Level: intermediate

4472:    Concepts: matrices^duplicating

4474:    Notes: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.

4476: .seealso: MatCopy(), MatConvert(), MatDuplicateOption
4477: @*/
4478: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4479: {
4481:   Mat            B;
4482:   PetscInt       i;
4483:   DM             dm;

4489:   if (op == MAT_COPY_VALUES && !mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"MAT_COPY_VALUES not allowed for unassembled matrix");
4490:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4491:   MatCheckPreallocated(mat,1);

4493:   *M = 0;
4494:   if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4495:   PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4496:   (*mat->ops->duplicate)(mat,op,M);
4497:   B    = *M;

4499:   B->stencil.dim = mat->stencil.dim;
4500:   B->stencil.noc = mat->stencil.noc;
4501:   for (i=0; i<=mat->stencil.dim; i++) {
4502:     B->stencil.dims[i]   = mat->stencil.dims[i];
4503:     B->stencil.starts[i] = mat->stencil.starts[i];
4504:   }

4506:   B->nooffproczerorows = mat->nooffproczerorows;
4507:   B->nooffprocentries  = mat->nooffprocentries;

4509:   PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4510:   if (dm) {
4511:     PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4512:   }
4513:   PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4514:   PetscObjectStateIncrease((PetscObject)B);
4515:   return(0);
4516: }

4518: /*@
4519:    MatGetDiagonal - Gets the diagonal of a matrix.

4521:    Logically Collective on Mat and Vec

4523:    Input Parameters:
4524: +  mat - the matrix
4525: -  v - the vector for storing the diagonal

4527:    Output Parameter:
4528: .  v - the diagonal of the matrix

4530:    Level: intermediate

4532:    Note:
4533:    Currently only correct in parallel for square matrices.

4535:    Concepts: matrices^accessing diagonals

4537: .seealso: MatGetRow(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs()
4538: @*/
4539: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4540: {

4547:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4548:   if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4549:   MatCheckPreallocated(mat,1);

4551:   (*mat->ops->getdiagonal)(mat,v);
4552:   PetscObjectStateIncrease((PetscObject)v);
4553:   return(0);
4554: }

4556: /*@C
4557:    MatGetRowMin - Gets the minimum value (of the real part) of each
4558:         row of the matrix

4560:    Logically Collective on Mat and Vec

4562:    Input Parameters:
4563: .  mat - the matrix

4565:    Output Parameter:
4566: +  v - the vector for storing the maximums
4567: -  idx - the indices of the column found for each row (optional)

4569:    Level: intermediate

4571:    Notes: The result of this call are the same as if one converted the matrix to dense format
4572:       and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).

4574:     This code is only implemented for a couple of matrix formats.

4576:    Concepts: matrices^getting row maximums

4578: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(),
4579:           MatGetRowMax()
4580: @*/
4581: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4582: {

4589:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4590:   if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4591:   MatCheckPreallocated(mat,1);

4593:   (*mat->ops->getrowmin)(mat,v,idx);
4594:   PetscObjectStateIncrease((PetscObject)v);
4595:   return(0);
4596: }

4598: /*@C
4599:    MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4600:         row of the matrix

4602:    Logically Collective on Mat and Vec

4604:    Input Parameters:
4605: .  mat - the matrix

4607:    Output Parameter:
4608: +  v - the vector for storing the minimums
4609: -  idx - the indices of the column found for each row (or NULL if not needed)

4611:    Level: intermediate

4613:    Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4614:     row is 0 (the first column).

4616:     This code is only implemented for a couple of matrix formats.

4618:    Concepts: matrices^getting row maximums

4620: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4621: @*/
4622: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4623: {

4630:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4631:   if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4632:   MatCheckPreallocated(mat,1);
4633:   if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}

4635:   (*mat->ops->getrowminabs)(mat,v,idx);
4636:   PetscObjectStateIncrease((PetscObject)v);
4637:   return(0);
4638: }

4640: /*@C
4641:    MatGetRowMax - Gets the maximum value (of the real part) of each
4642:         row of the matrix

4644:    Logically Collective on Mat and Vec

4646:    Input Parameters:
4647: .  mat - the matrix

4649:    Output Parameter:
4650: +  v - the vector for storing the maximums
4651: -  idx - the indices of the column found for each row (optional)

4653:    Level: intermediate

4655:    Notes: The result of this call are the same as if one converted the matrix to dense format
4656:       and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).

4658:     This code is only implemented for a couple of matrix formats.

4660:    Concepts: matrices^getting row maximums

4662: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4663: @*/
4664: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4665: {

4672:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4673:   if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4674:   MatCheckPreallocated(mat,1);

4676:   (*mat->ops->getrowmax)(mat,v,idx);
4677:   PetscObjectStateIncrease((PetscObject)v);
4678:   return(0);
4679: }

4681: /*@C
4682:    MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4683:         row of the matrix

4685:    Logically Collective on Mat and Vec

4687:    Input Parameters:
4688: .  mat - the matrix

4690:    Output Parameter:
4691: +  v - the vector for storing the maximums
4692: -  idx - the indices of the column found for each row (or NULL if not needed)

4694:    Level: intermediate

4696:    Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4697:     row is 0 (the first column).

4699:     This code is only implemented for a couple of matrix formats.

4701:    Concepts: matrices^getting row maximums

4703: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4704: @*/
4705: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4706: {

4713:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4714:   if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4715:   MatCheckPreallocated(mat,1);
4716:   if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}

4718:   (*mat->ops->getrowmaxabs)(mat,v,idx);
4719:   PetscObjectStateIncrease((PetscObject)v);
4720:   return(0);
4721: }

4723: /*@
4724:    MatGetRowSum - Gets the sum of each row of the matrix

4726:    Logically or Neighborhood Collective on Mat and Vec

4728:    Input Parameters:
4729: .  mat - the matrix

4731:    Output Parameter:
4732: .  v - the vector for storing the sum of rows

4734:    Level: intermediate

4736:    Notes: This code is slow since it is not currently specialized for different formats

4738:    Concepts: matrices^getting row sums

4740: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4741: @*/
4742: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4743: {
4744:   Vec            ones;

4751:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4752:   MatCheckPreallocated(mat,1);
4753:   MatCreateVecs(mat,&ones,NULL);
4754:   VecSet(ones,1.);
4755:   MatMult(mat,ones,v);
4756:   VecDestroy(&ones);
4757:   return(0);
4758: }

4760: /*@
4761:    MatTranspose - Computes an in-place or out-of-place transpose of a matrix.

4763:    Collective on Mat

4765:    Input Parameter:
4766: +  mat - the matrix to transpose
4767: -  reuse - either MAT_INITIAL_MATRIX, MAT_REUSE_MATRIX, or MAT_INPLACE_MATRIX

4769:    Output Parameters:
4770: .  B - the transpose

4772:    Notes:
4773:      If you use MAT_INPLACE_MATRIX then you must pass in &mat for B

4775:      MAT_REUSE_MATRIX causes the B matrix from a previous call to this function with MAT_INITIAL_MATRIX to be used

4777:      Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.

4779:    Level: intermediate

4781:    Concepts: matrices^transposing

4783: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4784: @*/
4785: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4786: {

4792:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4793:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4794:   if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4795:   if (reuse == MAT_INPLACE_MATRIX && mat != *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires last matrix to match first");
4796:   if (reuse == MAT_REUSE_MATRIX && mat == *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Perhaps you mean MAT_INPLACE_MATRIX");
4797:   MatCheckPreallocated(mat,1);

4799:   PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4800:   (*mat->ops->transpose)(mat,reuse,B);
4801:   PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4802:   if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4803:   return(0);
4804: }

4806: /*@
4807:    MatIsTranspose - Test whether a matrix is another one's transpose,
4808:         or its own, in which case it tests symmetry.

4810:    Collective on Mat

4812:    Input Parameter:
4813: +  A - the matrix to test
4814: -  B - the matrix to test against, this can equal the first parameter

4816:    Output Parameters:
4817: .  flg - the result

4819:    Notes:
4820:    Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4821:    has a running time of the order of the number of nonzeros; the parallel
4822:    test involves parallel copies of the block-offdiagonal parts of the matrix.

4824:    Level: intermediate

4826:    Concepts: matrices^transposing, matrix^symmetry

4828: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4829: @*/
4830: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool  *flg)
4831: {
4832:   PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);

4838:   PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4839:   PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4840:   *flg = PETSC_FALSE;
4841:   if (f && g) {
4842:     if (f == g) {
4843:       (*f)(A,B,tol,flg);
4844:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4845:   } else {
4846:     MatType mattype;
4847:     if (!f) {
4848:       MatGetType(A,&mattype);
4849:     } else {
4850:       MatGetType(B,&mattype);
4851:     }
4852:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4853:   }
4854:   return(0);
4855: }

4857: /*@
4858:    MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.

4860:    Collective on Mat

4862:    Input Parameter:
4863: +  mat - the matrix to transpose and complex conjugate
4864: -  reuse - MAT_INITIAL_MATRIX to create a new matrix, MAT_INPLACE_MATRIX to reuse the first argument to store the transpose

4866:    Output Parameters:
4867: .  B - the Hermitian

4869:    Level: intermediate

4871:    Concepts: matrices^transposing, complex conjugatex

4873: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4874: @*/
4875: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4876: {

4880:   MatTranspose(mat,reuse,B);
4881: #if defined(PETSC_USE_COMPLEX)
4882:   MatConjugate(*B);
4883: #endif
4884:   return(0);
4885: }

4887: /*@
4888:    MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,

4890:    Collective on Mat

4892:    Input Parameter:
4893: +  A - the matrix to test
4894: -  B - the matrix to test against, this can equal the first parameter

4896:    Output Parameters:
4897: .  flg - the result

4899:    Notes:
4900:    Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4901:    has a running time of the order of the number of nonzeros; the parallel
4902:    test involves parallel copies of the block-offdiagonal parts of the matrix.

4904:    Level: intermediate

4906:    Concepts: matrices^transposing, matrix^symmetry

4908: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4909: @*/
4910: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool  *flg)
4911: {
4912:   PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);

4918:   PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4919:   PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4920:   if (f && g) {
4921:     if (f==g) {
4922:       (*f)(A,B,tol,flg);
4923:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4924:   }
4925:   return(0);
4926: }

4928: /*@
4929:    MatPermute - Creates a new matrix with rows and columns permuted from the
4930:    original.

4932:    Collective on Mat

4934:    Input Parameters:
4935: +  mat - the matrix to permute
4936: .  row - row permutation, each processor supplies only the permutation for its rows
4937: -  col - column permutation, each processor supplies only the permutation for its columns

4939:    Output Parameters:
4940: .  B - the permuted matrix

4942:    Level: advanced

4944:    Note:
4945:    The index sets map from row/col of permuted matrix to row/col of original matrix.
4946:    The index sets should be on the same communicator as Mat and have the same local sizes.

4948:    Concepts: matrices^permuting

4950: .seealso: MatGetOrdering(), ISAllGather()

4952: @*/
4953: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4954: {

4963:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4964:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4965:   if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4966:   MatCheckPreallocated(mat,1);

4968:   (*mat->ops->permute)(mat,row,col,B);
4969:   PetscObjectStateIncrease((PetscObject)*B);
4970:   return(0);
4971: }

4973: /*@
4974:    MatEqual - Compares two matrices.

4976:    Collective on Mat

4978:    Input Parameters:
4979: +  A - the first matrix
4980: -  B - the second matrix

4982:    Output Parameter:
4983: .  flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.

4985:    Level: intermediate

4987:    Concepts: matrices^equality between
4988: @*/
4989: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool  *flg)
4990: {

5000:   MatCheckPreallocated(B,2);
5001:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5002:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5003:   if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
5004:   if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
5005:   if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
5006:   if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
5007:   MatCheckPreallocated(A,1);

5009:   (*A->ops->equal)(A,B,flg);
5010:   return(0);
5011: }

5013: /*@C
5014:    MatDiagonalScale - Scales a matrix on the left and right by diagonal
5015:    matrices that are stored as vectors.  Either of the two scaling
5016:    matrices can be NULL.

5018:    Collective on Mat

5020:    Input Parameters:
5021: +  mat - the matrix to be scaled
5022: .  l - the left scaling vector (or NULL)
5023: -  r - the right scaling vector (or NULL)

5025:    Notes:
5026:    MatDiagonalScale() computes A = LAR, where
5027:    L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
5028:    The L scales the rows of the matrix, the R scales the columns of the matrix.

5030:    Level: intermediate

5032:    Concepts: matrices^diagonal scaling
5033:    Concepts: diagonal scaling of matrices

5035: .seealso: MatScale()
5036: @*/
5037: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
5038: {

5044:   if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5047:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5048:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5049:   MatCheckPreallocated(mat,1);

5051:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5052:   (*mat->ops->diagonalscale)(mat,l,r);
5053:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5054:   PetscObjectStateIncrease((PetscObject)mat);
5055: #if defined(PETSC_HAVE_CUSP)
5056:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5057:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5058:   }
5059: #elif defined(PETSC_HAVE_VIENNACL)
5060:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5061:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5062:   }
5063: #elif defined(PETSC_HAVE_VECCUDA)
5064:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5065:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5066:   }
5067: #endif
5068:   return(0);
5069: }

5071: /*@
5072:     MatScale - Scales all elements of a matrix by a given number.

5074:     Logically Collective on Mat

5076:     Input Parameters:
5077: +   mat - the matrix to be scaled
5078: -   a  - the scaling value

5080:     Output Parameter:
5081: .   mat - the scaled matrix

5083:     Level: intermediate

5085:     Concepts: matrices^scaling all entries

5087: .seealso: MatDiagonalScale()
5088: @*/
5089: PetscErrorCode MatScale(Mat mat,PetscScalar a)
5090: {

5096:   if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5097:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5098:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5100:   MatCheckPreallocated(mat,1);

5102:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5103:   if (a != (PetscScalar)1.0) {
5104:     (*mat->ops->scale)(mat,a);
5105:     PetscObjectStateIncrease((PetscObject)mat);
5106: #if defined(PETSC_HAVE_CUSP)
5107:     if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5108:       mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5109:     }
5110: #elif defined(PETSC_HAVE_VIENNACL)
5111:     if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5112:       mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5113:     }
5114: #elif defined(PETSC_HAVE_VECCUDA)
5115:     if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5116:       mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5117:     }
5118: #endif
5119:   }
5120:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5121:   return(0);
5122: }

5124: /*@
5125:    MatNorm - Calculates various norms of a matrix.

5127:    Collective on Mat

5129:    Input Parameters:
5130: +  mat - the matrix
5131: -  type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY

5133:    Output Parameters:
5134: .  nrm - the resulting norm

5136:    Level: intermediate

5138:    Concepts: matrices^norm
5139:    Concepts: norm^of matrix
5140: @*/
5141: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
5142: {


5150:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5151:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5152:   if (!mat->ops->norm) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5153:   MatCheckPreallocated(mat,1);

5155:   (*mat->ops->norm)(mat,type,nrm);
5156:   return(0);
5157: }

5159: /*
5160:      This variable is used to prevent counting of MatAssemblyBegin() that
5161:    are called from within a MatAssemblyEnd().
5162: */
5163: static PetscInt MatAssemblyEnd_InUse = 0;
5164: /*@
5165:    MatAssemblyBegin - Begins assembling the matrix.  This routine should
5166:    be called after completing all calls to MatSetValues().

5168:    Collective on Mat

5170:    Input Parameters:
5171: +  mat - the matrix
5172: -  type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY

5174:    Notes:
5175:    MatSetValues() generally caches the values.  The matrix is ready to
5176:    use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5177:    Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5178:    in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5179:    using the matrix.

5181:    ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
5182:    same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
5183:    a global collective operation requring all processes that share the matrix.

5185:    Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5186:    out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5187:    before MAT_FINAL_ASSEMBLY so the space is not compressed out.

5189:    Level: beginner

5191:    Concepts: matrices^assembling

5193: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
5194: @*/
5195: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
5196: {

5202:   MatCheckPreallocated(mat,1);
5203:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
5204:   if (mat->assembled) {
5205:     mat->was_assembled = PETSC_TRUE;
5206:     mat->assembled     = PETSC_FALSE;
5207:   }
5208:   if (!MatAssemblyEnd_InUse) {
5209:     PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5210:     if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5211:     PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5212:   } else if (mat->ops->assemblybegin) {
5213:     (*mat->ops->assemblybegin)(mat,type);
5214:   }
5215:   return(0);
5216: }

5218: /*@
5219:    MatAssembled - Indicates if a matrix has been assembled and is ready for
5220:      use; for example, in matrix-vector product.

5222:    Not Collective

5224:    Input Parameter:
5225: .  mat - the matrix

5227:    Output Parameter:
5228: .  assembled - PETSC_TRUE or PETSC_FALSE

5230:    Level: advanced

5232:    Concepts: matrices^assembled?

5234: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5235: @*/
5236: PetscErrorCode MatAssembled(Mat mat,PetscBool  *assembled)
5237: {
5242:   *assembled = mat->assembled;
5243:   return(0);
5244: }

5246: /*@
5247:    MatAssemblyEnd - Completes assembling the matrix.  This routine should
5248:    be called after MatAssemblyBegin().

5250:    Collective on Mat

5252:    Input Parameters:
5253: +  mat - the matrix
5254: -  type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY

5256:    Options Database Keys:
5257: +  -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5258: .  -mat_view ::ascii_info_detail - Prints more detailed info
5259: .  -mat_view - Prints matrix in ASCII format
5260: .  -mat_view ::ascii_matlab - Prints matrix in Matlab format
5261: .  -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5262: .  -display <name> - Sets display name (default is host)
5263: .  -draw_pause <sec> - Sets number of seconds to pause after display
5264: .  -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 12 Using MATLAB with PETSc )
5265: .  -viewer_socket_machine <machine> - Machine to use for socket
5266: .  -viewer_socket_port <port> - Port number to use for socket
5267: -  -mat_view binary:filename[:append] - Save matrix to file in binary format

5269:    Notes:
5270:    MatSetValues() generally caches the values.  The matrix is ready to
5271:    use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5272:    Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5273:    in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5274:    using the matrix.

5276:    Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5277:    out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5278:    before MAT_FINAL_ASSEMBLY so the space is not compressed out.

5280:    Level: beginner

5282: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5283: @*/
5284: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5285: {
5286:   PetscErrorCode  ierr;
5287:   static PetscInt inassm = 0;
5288:   PetscBool       flg    = PETSC_FALSE;


5294:   inassm++;
5295:   MatAssemblyEnd_InUse++;
5296:   if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5297:     PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5298:     if (mat->ops->assemblyend) {
5299:       (*mat->ops->assemblyend)(mat,type);
5300:     }
5301:     PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5302:   } else if (mat->ops->assemblyend) {
5303:     (*mat->ops->assemblyend)(mat,type);
5304:   }

5306:   /* Flush assembly is not a true assembly */
5307:   if (type != MAT_FLUSH_ASSEMBLY) {
5308:     mat->assembled = PETSC_TRUE; mat->num_ass++;
5309:   }
5310:   mat->insertmode = NOT_SET_VALUES;
5311:   MatAssemblyEnd_InUse--;
5312:   PetscObjectStateIncrease((PetscObject)mat);
5313:   if (!mat->symmetric_eternal) {
5314:     mat->symmetric_set              = PETSC_FALSE;
5315:     mat->hermitian_set              = PETSC_FALSE;
5316:     mat->structurally_symmetric_set = PETSC_FALSE;
5317:   }
5318: #if defined(PETSC_HAVE_CUSP)
5319:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5320:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5321:   }
5322: #elif defined(PETSC_HAVE_VIENNACL)
5323:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5324:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5325:   }
5326: #elif defined(PETSC_HAVE_VECCUDA)
5327:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5328:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5329:   }
5330: #endif
5331:   if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5332:     MatViewFromOptions(mat,NULL,"-mat_view");

5334:     if (mat->checksymmetryonassembly) {
5335:       MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5336:       if (flg) {
5337:         PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5338:       } else {
5339:         PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5340:       }
5341:     }
5342:     if (mat->nullsp && mat->checknullspaceonassembly) {
5343:       MatNullSpaceTest(mat->nullsp,mat,NULL);
5344:     }
5345:   }
5346:   inassm--;
5347:   return(0);
5348: }

5350: /*@
5351:    MatSetOption - Sets a parameter option for a matrix. Some options
5352:    may be specific to certain storage formats.  Some options
5353:    determine how values will be inserted (or added). Sorted,
5354:    row-oriented input will generally assemble the fastest. The default
5355:    is row-oriented.

5357:    Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption

5359:    Input Parameters:
5360: +  mat - the matrix
5361: .  option - the option, one of those listed below (and possibly others),
5362: -  flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)

5364:   Options Describing Matrix Structure:
5365: +    MAT_SPD - symmetric positive definite
5366: .    MAT_SYMMETRIC - symmetric in terms of both structure and value
5367: .    MAT_HERMITIAN - transpose is the complex conjugation
5368: .    MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5369: -    MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5370:                             you set to be kept with all future use of the matrix
5371:                             including after MatAssemblyBegin/End() which could
5372:                             potentially change the symmetry structure, i.e. you
5373:                             KNOW the matrix will ALWAYS have the property you set.


5376:    Options For Use with MatSetValues():
5377:    Insert a logically dense subblock, which can be
5378: .    MAT_ROW_ORIENTED - row-oriented (default)

5380:    Note these options reflect the data you pass in with MatSetValues(); it has
5381:    nothing to do with how the data is stored internally in the matrix
5382:    data structure.

5384:    When (re)assembling a matrix, we can restrict the input for
5385:    efficiency/debugging purposes.  These options include:
5386: +    MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5387: .    MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5388: .    MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5389: .    MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5390: .    MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5391: .    MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5392:         any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5393:         performance for very large process counts.
5394: -    MAT_SUBSET_OFF_PROC_ENTRIES - you know that the first assembly after setting this flag will set a superset
5395:         of the off-process entries required for all subsequent assemblies. This avoids a rendezvous step in the MatAssembly
5396:         functions, instead sending only neighbor messages.

5398:    Notes:
5399:    Except for MAT_UNUSED_NONZERO_LOCATION_ERR and  MAT_ROW_ORIENTED all processes that share the matrix must pass the same value in flg!

5401:    Some options are relevant only for particular matrix types and
5402:    are thus ignored by others.  Other options are not supported by
5403:    certain matrix types and will generate an error message if set.

5405:    If using a Fortran 77 module to compute a matrix, one may need to
5406:    use the column-oriented option (or convert to the row-oriented
5407:    format).

5409:    MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5410:    that would generate a new entry in the nonzero structure is instead
5411:    ignored.  Thus, if memory has not alredy been allocated for this particular
5412:    data, then the insertion is ignored. For dense matrices, in which
5413:    the entire array is allocated, no entries are ever ignored.
5414:    Set after the first MatAssemblyEnd(). If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5416:    MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5417:    that would generate a new entry in the nonzero structure instead produces
5418:    an error. (Currently supported for AIJ and BAIJ formats only.) If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5420:    MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5421:    that would generate a new entry that has not been preallocated will
5422:    instead produce an error. (Currently supported for AIJ and BAIJ formats
5423:    only.) This is a useful flag when debugging matrix memory preallocation.
5424:    If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5426:    MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5427:    other processors should be dropped, rather than stashed.
5428:    This is useful if you know that the "owning" processor is also
5429:    always generating the correct matrix entries, so that PETSc need
5430:    not transfer duplicate entries generated on another processor.

5432:    MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5433:    searches during matrix assembly. When this flag is set, the hash table
5434:    is created during the first Matrix Assembly. This hash table is
5435:    used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5436:    to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5437:    should be used with MAT_USE_HASH_TABLE flag. This option is currently
5438:    supported by MATMPIBAIJ format only.

5440:    MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5441:    are kept in the nonzero structure

5443:    MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5444:    a zero location in the matrix

5446:    MAT_USE_INODES - indicates using inode version of the code - works with AIJ matrix types

5448:    MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5449:         zero row routines and thus improves performance for very large process counts.

5451:    MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5452:         part of the matrix (since they should match the upper triangular part).

5454:    Notes: Can only be called after MatSetSizes() and MatSetType() have been set.

5456:    Level: intermediate

5458:    Concepts: matrices^setting options

5460: .seealso:  MatOption, Mat

5462: @*/
5463: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5464: {

5470:   if (op > 0) {
5473:   }

5475:   if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5476:   if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");

5478:   switch (op) {
5479:   case MAT_NO_OFF_PROC_ENTRIES:
5480:     mat->nooffprocentries = flg;
5481:     return(0);
5482:     break;
5483:   case MAT_SUBSET_OFF_PROC_ENTRIES:
5484:     mat->subsetoffprocentries = flg;
5485:     return(0);
5486:   case MAT_NO_OFF_PROC_ZERO_ROWS:
5487:     mat->nooffproczerorows = flg;
5488:     return(0);
5489:     break;
5490:   case MAT_SPD:
5491:     mat->spd_set = PETSC_TRUE;
5492:     mat->spd     = flg;
5493:     if (flg) {
5494:       mat->symmetric                  = PETSC_TRUE;
5495:       mat->structurally_symmetric     = PETSC_TRUE;
5496:       mat->symmetric_set              = PETSC_TRUE;
5497:       mat->structurally_symmetric_set = PETSC_TRUE;
5498:     }
5499:     break;
5500:   case MAT_SYMMETRIC:
5501:     mat->symmetric = flg;
5502:     if (flg) mat->structurally_symmetric = PETSC_TRUE;
5503:     mat->symmetric_set              = PETSC_TRUE;
5504:     mat->structurally_symmetric_set = flg;
5505: #if !defined(PETSC_USE_COMPLEX)
5506:     mat->hermitian     = flg;
5507:     mat->hermitian_set = PETSC_TRUE;
5508: #endif
5509:     break;
5510:   case MAT_HERMITIAN:
5511:     mat->hermitian = flg;
5512:     if (flg) mat->structurally_symmetric = PETSC_TRUE;
5513:     mat->hermitian_set              = PETSC_TRUE;
5514:     mat->structurally_symmetric_set = flg;
5515: #if !defined(PETSC_USE_COMPLEX)
5516:     mat->symmetric     = flg;
5517:     mat->symmetric_set = PETSC_TRUE;
5518: #endif
5519:     break;
5520:   case MAT_STRUCTURALLY_SYMMETRIC:
5521:     mat->structurally_symmetric     = flg;
5522:     mat->structurally_symmetric_set = PETSC_TRUE;
5523:     break;
5524:   case MAT_SYMMETRY_ETERNAL:
5525:     mat->symmetric_eternal = flg;
5526:     break;
5527:   case MAT_STRUCTURE_ONLY:
5528:     mat->structure_only = flg;
5529:     break;
5530:   default:
5531:     break;
5532:   }
5533:   if (mat->ops->setoption) {
5534:     (*mat->ops->setoption)(mat,op,flg);
5535:   }
5536:   return(0);
5537: }

5539: /*@
5540:    MatGetOption - Gets a parameter option that has been set for a matrix.

5542:    Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption

5544:    Input Parameters:
5545: +  mat - the matrix
5546: -  option - the option, this only responds to certain options, check the code for which ones

5548:    Output Parameter:
5549: .  flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)

5551:     Notes: Can only be called after MatSetSizes() and MatSetType() have been set.

5553:    Level: intermediate

5555:    Concepts: matrices^setting options

5557: .seealso:  MatOption, MatSetOption()

5559: @*/
5560: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5561: {

5566:   if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5567:   if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");

5569:   switch (op) {
5570:   case MAT_NO_OFF_PROC_ENTRIES:
5571:     *flg = mat->nooffprocentries;
5572:     break;
5573:   case MAT_NO_OFF_PROC_ZERO_ROWS:
5574:     *flg = mat->nooffproczerorows;
5575:     break;
5576:   case MAT_SYMMETRIC:
5577:     *flg = mat->symmetric;
5578:     break;
5579:   case MAT_HERMITIAN:
5580:     *flg = mat->hermitian;
5581:     break;
5582:   case MAT_STRUCTURALLY_SYMMETRIC:
5583:     *flg = mat->structurally_symmetric;
5584:     break;
5585:   case MAT_SYMMETRY_ETERNAL:
5586:     *flg = mat->symmetric_eternal;
5587:     break;
5588:   case MAT_SPD:
5589:     *flg = mat->spd;
5590:     break;
5591:   default:
5592:     break;
5593:   }
5594:   return(0);
5595: }

5597: /*@
5598:    MatZeroEntries - Zeros all entries of a matrix.  For sparse matrices
5599:    this routine retains the old nonzero structure.

5601:    Logically Collective on Mat

5603:    Input Parameters:
5604: .  mat - the matrix

5606:    Level: intermediate

5608:    Notes: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5609:    See the Performance chapter of the users manual for information on preallocating matrices.

5611:    Concepts: matrices^zeroing

5613: .seealso: MatZeroRows()
5614: @*/
5615: PetscErrorCode MatZeroEntries(Mat mat)
5616: {

5622:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5623:   if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5624:   if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5625:   MatCheckPreallocated(mat,1);

5627:   PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5628:   (*mat->ops->zeroentries)(mat);
5629:   PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5630:   PetscObjectStateIncrease((PetscObject)mat);
5631: #if defined(PETSC_HAVE_CUSP)
5632:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5633:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5634:   }
5635: #elif defined(PETSC_HAVE_VIENNACL)
5636:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5637:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5638:   }
5639: #elif defined(PETSC_HAVE_VECCUDA)
5640:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5641:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5642:   }
5643: #endif
5644:   return(0);
5645: }

5647: /*@C
5648:    MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5649:    of a set of rows and columns of a matrix.

5651:    Collective on Mat

5653:    Input Parameters:
5654: +  mat - the matrix
5655: .  numRows - the number of rows to remove
5656: .  rows - the global row indices
5657: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5658: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5659: -  b - optional vector of right hand side, that will be adjusted by provided solution

5661:    Notes:
5662:    This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.

5664:    The user can set a value in the diagonal entry (or for the AIJ and
5665:    row formats can optionally remove the main diagonal entry from the
5666:    nonzero structure as well, by passing 0.0 as the final argument).

5668:    For the parallel case, all processes that share the matrix (i.e.,
5669:    those in the communicator used for matrix creation) MUST call this
5670:    routine, regardless of whether any rows being zeroed are owned by
5671:    them.

5673:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5674:    list only rows local to itself).

5676:    The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.

5678:    Level: intermediate

5680:    Concepts: matrices^zeroing rows

5682: .seealso: MatZeroRowsIS(), MatZeroRows(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5683:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5684: @*/
5685: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5686: {

5693:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5694:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5695:   if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5696:   MatCheckPreallocated(mat,1);

5698:   (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5699:   MatViewFromOptions(mat,NULL,"-mat_view");
5700:   PetscObjectStateIncrease((PetscObject)mat);
5701: #if defined(PETSC_HAVE_CUSP)
5702:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5703:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5704:   }
5705: #elif defined(PETSC_HAVE_VIENNACL)
5706:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5707:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5708:   }
5709: #elif defined(PETSC_HAVE_VECCUDA)
5710:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5711:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5712:   }
5713: #endif
5714:   return(0);
5715: }

5717: /*@C
5718:    MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5719:    of a set of rows and columns of a matrix.

5721:    Collective on Mat

5723:    Input Parameters:
5724: +  mat - the matrix
5725: .  is - the rows to zero
5726: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5727: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5728: -  b - optional vector of right hand side, that will be adjusted by provided solution

5730:    Notes:
5731:    This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.

5733:    The user can set a value in the diagonal entry (or for the AIJ and
5734:    row formats can optionally remove the main diagonal entry from the
5735:    nonzero structure as well, by passing 0.0 as the final argument).

5737:    For the parallel case, all processes that share the matrix (i.e.,
5738:    those in the communicator used for matrix creation) MUST call this
5739:    routine, regardless of whether any rows being zeroed are owned by
5740:    them.

5742:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5743:    list only rows local to itself).

5745:    The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.

5747:    Level: intermediate

5749:    Concepts: matrices^zeroing rows

5751: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5752:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRows(), MatZeroRowsColumnsStencil()
5753: @*/
5754: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5755: {
5757:   PetscInt       numRows;
5758:   const PetscInt *rows;

5765:   ISGetLocalSize(is,&numRows);
5766:   ISGetIndices(is,&rows);
5767:   MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5768:   ISRestoreIndices(is,&rows);
5769:   return(0);
5770: }

5772: /*@C
5773:    MatZeroRows - Zeros all entries (except possibly the main diagonal)
5774:    of a set of rows of a matrix.

5776:    Collective on Mat

5778:    Input Parameters:
5779: +  mat - the matrix
5780: .  numRows - the number of rows to remove
5781: .  rows - the global row indices
5782: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5783: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5784: -  b - optional vector of right hand side, that will be adjusted by provided solution

5786:    Notes:
5787:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5788:    but does not release memory.  For the dense and block diagonal
5789:    formats this does not alter the nonzero structure.

5791:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5792:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5793:    merely zeroed.

5795:    The user can set a value in the diagonal entry (or for the AIJ and
5796:    row formats can optionally remove the main diagonal entry from the
5797:    nonzero structure as well, by passing 0.0 as the final argument).

5799:    For the parallel case, all processes that share the matrix (i.e.,
5800:    those in the communicator used for matrix creation) MUST call this
5801:    routine, regardless of whether any rows being zeroed are owned by
5802:    them.

5804:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5805:    list only rows local to itself).

5807:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5808:    owns that are to be zeroed. This saves a global synchronization in the implementation.

5810:    Level: intermediate

5812:    Concepts: matrices^zeroing rows

5814: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5815:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5816: @*/
5817: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5818: {

5825:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5826:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5827:   if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5828:   MatCheckPreallocated(mat,1);

5830:   (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5831:   MatViewFromOptions(mat,NULL,"-mat_view");
5832:   PetscObjectStateIncrease((PetscObject)mat);
5833: #if defined(PETSC_HAVE_CUSP)
5834:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5835:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5836:   }
5837: #elif defined(PETSC_HAVE_VIENNACL)
5838:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5839:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5840:   }
5841: #elif defined(PETSC_HAVE_VECCUDA)
5842:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5843:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5844:   }
5845: #endif
5846:   return(0);
5847: }

5849: /*@C
5850:    MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5851:    of a set of rows of a matrix.

5853:    Collective on Mat

5855:    Input Parameters:
5856: +  mat - the matrix
5857: .  is - index set of rows to remove
5858: .  diag - value put in all diagonals of eliminated rows
5859: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5860: -  b - optional vector of right hand side, that will be adjusted by provided solution

5862:    Notes:
5863:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5864:    but does not release memory.  For the dense and block diagonal
5865:    formats this does not alter the nonzero structure.

5867:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5868:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5869:    merely zeroed.

5871:    The user can set a value in the diagonal entry (or for the AIJ and
5872:    row formats can optionally remove the main diagonal entry from the
5873:    nonzero structure as well, by passing 0.0 as the final argument).

5875:    For the parallel case, all processes that share the matrix (i.e.,
5876:    those in the communicator used for matrix creation) MUST call this
5877:    routine, regardless of whether any rows being zeroed are owned by
5878:    them.

5880:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5881:    list only rows local to itself).

5883:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5884:    owns that are to be zeroed. This saves a global synchronization in the implementation.

5886:    Level: intermediate

5888:    Concepts: matrices^zeroing rows

5890: .seealso: MatZeroRows(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5891:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5892: @*/
5893: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5894: {
5895:   PetscInt       numRows;
5896:   const PetscInt *rows;

5903:   ISGetLocalSize(is,&numRows);
5904:   ISGetIndices(is,&rows);
5905:   MatZeroRows(mat,numRows,rows,diag,x,b);
5906:   ISRestoreIndices(is,&rows);
5907:   return(0);
5908: }

5910: /*@C
5911:    MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5912:    of a set of rows of a matrix. These rows must be local to the process.

5914:    Collective on Mat

5916:    Input Parameters:
5917: +  mat - the matrix
5918: .  numRows - the number of rows to remove
5919: .  rows - the grid coordinates (and component number when dof > 1) for matrix rows
5920: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5921: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5922: -  b - optional vector of right hand side, that will be adjusted by provided solution

5924:    Notes:
5925:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5926:    but does not release memory.  For the dense and block diagonal
5927:    formats this does not alter the nonzero structure.

5929:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5930:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5931:    merely zeroed.

5933:    The user can set a value in the diagonal entry (or for the AIJ and
5934:    row formats can optionally remove the main diagonal entry from the
5935:    nonzero structure as well, by passing 0.0 as the final argument).

5937:    For the parallel case, all processes that share the matrix (i.e.,
5938:    those in the communicator used for matrix creation) MUST call this
5939:    routine, regardless of whether any rows being zeroed are owned by
5940:    them.

5942:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5943:    list only rows local to itself).

5945:    The grid coordinates are across the entire grid, not just the local portion

5947:    In Fortran idxm and idxn should be declared as
5948: $     MatStencil idxm(4,m)
5949:    and the values inserted using
5950: $    idxm(MatStencil_i,1) = i
5951: $    idxm(MatStencil_j,1) = j
5952: $    idxm(MatStencil_k,1) = k
5953: $    idxm(MatStencil_c,1) = c
5954:    etc

5956:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5957:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5958:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5959:    DM_BOUNDARY_PERIODIC boundary type.

5961:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5962:    a single value per point) you can skip filling those indices.

5964:    Level: intermediate

5966:    Concepts: matrices^zeroing rows

5968: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsl(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5969:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5970: @*/
5971: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5972: {
5973:   PetscInt       dim     = mat->stencil.dim;
5974:   PetscInt       sdim    = dim - (1 - (PetscInt) mat->stencil.noc);
5975:   PetscInt       *dims   = mat->stencil.dims+1;
5976:   PetscInt       *starts = mat->stencil.starts;
5977:   PetscInt       *dxm    = (PetscInt*) rows;
5978:   PetscInt       *jdxm, i, j, tmp, numNewRows = 0;


5986:   PetscMalloc1(numRows, &jdxm);
5987:   for (i = 0; i < numRows; ++i) {
5988:     /* Skip unused dimensions (they are ordered k, j, i, c) */
5989:     for (j = 0; j < 3-sdim; ++j) dxm++;
5990:     /* Local index in X dir */
5991:     tmp = *dxm++ - starts[0];
5992:     /* Loop over remaining dimensions */
5993:     for (j = 0; j < dim-1; ++j) {
5994:       /* If nonlocal, set index to be negative */
5995:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5996:       /* Update local index */
5997:       else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5998:     }
5999:     /* Skip component slot if necessary */
6000:     if (mat->stencil.noc) dxm++;
6001:     /* Local row number */
6002:     if (tmp >= 0) {
6003:       jdxm[numNewRows++] = tmp;
6004:     }
6005:   }
6006:   MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
6007:   PetscFree(jdxm);
6008:   return(0);
6009: }

6011: /*@C
6012:    MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
6013:    of a set of rows and columns of a matrix.

6015:    Collective on Mat

6017:    Input Parameters:
6018: +  mat - the matrix
6019: .  numRows - the number of rows/columns to remove
6020: .  rows - the grid coordinates (and component number when dof > 1) for matrix rows
6021: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
6022: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6023: -  b - optional vector of right hand side, that will be adjusted by provided solution

6025:    Notes:
6026:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
6027:    but does not release memory.  For the dense and block diagonal
6028:    formats this does not alter the nonzero structure.

6030:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6031:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6032:    merely zeroed.

6034:    The user can set a value in the diagonal entry (or for the AIJ and
6035:    row formats can optionally remove the main diagonal entry from the
6036:    nonzero structure as well, by passing 0.0 as the final argument).

6038:    For the parallel case, all processes that share the matrix (i.e.,
6039:    those in the communicator used for matrix creation) MUST call this
6040:    routine, regardless of whether any rows being zeroed are owned by
6041:    them.

6043:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
6044:    list only rows local to itself, but the row/column numbers are given in local numbering).

6046:    The grid coordinates are across the entire grid, not just the local portion

6048:    In Fortran idxm and idxn should be declared as
6049: $     MatStencil idxm(4,m)
6050:    and the values inserted using
6051: $    idxm(MatStencil_i,1) = i
6052: $    idxm(MatStencil_j,1) = j
6053: $    idxm(MatStencil_k,1) = k
6054: $    idxm(MatStencil_c,1) = c
6055:    etc

6057:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
6058:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
6059:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
6060:    DM_BOUNDARY_PERIODIC boundary type.

6062:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
6063:    a single value per point) you can skip filling those indices.

6065:    Level: intermediate

6067:    Concepts: matrices^zeroing rows

6069: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6070:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRows()
6071: @*/
6072: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
6073: {
6074:   PetscInt       dim     = mat->stencil.dim;
6075:   PetscInt       sdim    = dim - (1 - (PetscInt) mat->stencil.noc);
6076:   PetscInt       *dims   = mat->stencil.dims+1;
6077:   PetscInt       *starts = mat->stencil.starts;
6078:   PetscInt       *dxm    = (PetscInt*) rows;
6079:   PetscInt       *jdxm, i, j, tmp, numNewRows = 0;


6087:   PetscMalloc1(numRows, &jdxm);
6088:   for (i = 0; i < numRows; ++i) {
6089:     /* Skip unused dimensions (they are ordered k, j, i, c) */
6090:     for (j = 0; j < 3-sdim; ++j) dxm++;
6091:     /* Local index in X dir */
6092:     tmp = *dxm++ - starts[0];
6093:     /* Loop over remaining dimensions */
6094:     for (j = 0; j < dim-1; ++j) {
6095:       /* If nonlocal, set index to be negative */
6096:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
6097:       /* Update local index */
6098:       else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
6099:     }
6100:     /* Skip component slot if necessary */
6101:     if (mat->stencil.noc) dxm++;
6102:     /* Local row number */
6103:     if (tmp >= 0) {
6104:       jdxm[numNewRows++] = tmp;
6105:     }
6106:   }
6107:   MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
6108:   PetscFree(jdxm);
6109:   return(0);
6110: }

6112: /*@C
6113:    MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
6114:    of a set of rows of a matrix; using local numbering of rows.

6116:    Collective on Mat

6118:    Input Parameters:
6119: +  mat - the matrix
6120: .  numRows - the number of rows to remove
6121: .  rows - the global row indices
6122: .  diag - value put in all diagonals of eliminated rows
6123: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6124: -  b - optional vector of right hand side, that will be adjusted by provided solution

6126:    Notes:
6127:    Before calling MatZeroRowsLocal(), the user must first set the
6128:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6130:    For the AIJ matrix formats this removes the old nonzero structure,
6131:    but does not release memory.  For the dense and block diagonal
6132:    formats this does not alter the nonzero structure.

6134:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6135:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6136:    merely zeroed.

6138:    The user can set a value in the diagonal entry (or for the AIJ and
6139:    row formats can optionally remove the main diagonal entry from the
6140:    nonzero structure as well, by passing 0.0 as the final argument).

6142:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6143:    owns that are to be zeroed. This saves a global synchronization in the implementation.

6145:    Level: intermediate

6147:    Concepts: matrices^zeroing

6149: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRows(), MatSetOption(),
6150:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6151: @*/
6152: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6153: {

6160:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6161:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6162:   MatCheckPreallocated(mat,1);

6164:   if (mat->ops->zerorowslocal) {
6165:     (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
6166:   } else {
6167:     IS             is, newis;
6168:     const PetscInt *newRows;

6170:     if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6171:     ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6172:     ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
6173:     ISGetIndices(newis,&newRows);
6174:     (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
6175:     ISRestoreIndices(newis,&newRows);
6176:     ISDestroy(&newis);
6177:     ISDestroy(&is);
6178:   }
6179:   PetscObjectStateIncrease((PetscObject)mat);
6180: #if defined(PETSC_HAVE_CUSP)
6181:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6182:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6183:   }
6184: #elif defined(PETSC_HAVE_VIENNACL)
6185:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6186:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6187:   }
6188: #elif defined(PETSC_HAVE_VECCUDA)
6189:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
6190:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
6191:   }
6192: #endif
6193:   return(0);
6194: }

6196: /*@C
6197:    MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
6198:    of a set of rows of a matrix; using local numbering of rows.

6200:    Collective on Mat

6202:    Input Parameters:
6203: +  mat - the matrix
6204: .  is - index set of rows to remove
6205: .  diag - value put in all diagonals of eliminated rows
6206: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6207: -  b - optional vector of right hand side, that will be adjusted by provided solution

6209:    Notes:
6210:    Before calling MatZeroRowsLocalIS(), the user must first set the
6211:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6213:    For the AIJ matrix formats this removes the old nonzero structure,
6214:    but does not release memory.  For the dense and block diagonal
6215:    formats this does not alter the nonzero structure.

6217:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6218:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6219:    merely zeroed.

6221:    The user can set a value in the diagonal entry (or for the AIJ and
6222:    row formats can optionally remove the main diagonal entry from the
6223:    nonzero structure as well, by passing 0.0 as the final argument).

6225:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6226:    owns that are to be zeroed. This saves a global synchronization in the implementation.

6228:    Level: intermediate

6230:    Concepts: matrices^zeroing

6232: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6233:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6234: @*/
6235: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6236: {
6238:   PetscInt       numRows;
6239:   const PetscInt *rows;

6245:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6246:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6247:   MatCheckPreallocated(mat,1);

6249:   ISGetLocalSize(is,&numRows);
6250:   ISGetIndices(is,&rows);
6251:   MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6252:   ISRestoreIndices(is,&rows);
6253:   return(0);
6254: }

6256: /*@C
6257:    MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6258:    of a set of rows and columns of a matrix; using local numbering of rows.

6260:    Collective on Mat

6262:    Input Parameters:
6263: +  mat - the matrix
6264: .  numRows - the number of rows to remove
6265: .  rows - the global row indices
6266: .  diag - value put in all diagonals of eliminated rows
6267: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6268: -  b - optional vector of right hand side, that will be adjusted by provided solution

6270:    Notes:
6271:    Before calling MatZeroRowsColumnsLocal(), the user must first set the
6272:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6274:    The user can set a value in the diagonal entry (or for the AIJ and
6275:    row formats can optionally remove the main diagonal entry from the
6276:    nonzero structure as well, by passing 0.0 as the final argument).

6278:    Level: intermediate

6280:    Concepts: matrices^zeroing

6282: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6283:           MatZeroRows(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6284: @*/
6285: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6286: {
6288:   IS             is, newis;
6289:   const PetscInt *newRows;

6295:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6296:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6297:   MatCheckPreallocated(mat,1);

6299:   if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6300:   ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6301:   ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6302:   ISGetIndices(newis,&newRows);
6303:   (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6304:   ISRestoreIndices(newis,&newRows);
6305:   ISDestroy(&newis);
6306:   ISDestroy(&is);
6307:   PetscObjectStateIncrease((PetscObject)mat);
6308: #if defined(PETSC_HAVE_CUSP)
6309:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6310:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6311:   }
6312: #elif defined(PETSC_HAVE_VIENNACL)
6313:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6314:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6315:   }
6316: #elif defined(PETSC_HAVE_VECCUDA)
6317:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
6318:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
6319:   }
6320: #endif
6321:   return(0);
6322: }

6324: /*@C
6325:    MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6326:    of a set of rows and columns of a matrix; using local numbering of rows.

6328:    Collective on Mat

6330:    Input Parameters:
6331: +  mat - the matrix
6332: .  is - index set of rows to remove
6333: .  diag - value put in all diagonals of eliminated rows
6334: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6335: -  b - optional vector of right hand side, that will be adjusted by provided solution

6337:    Notes:
6338:    Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6339:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6341:    The user can set a value in the diagonal entry (or for the AIJ and
6342:    row formats can optionally remove the main diagonal entry from the
6343:    nonzero structure as well, by passing 0.0 as the final argument).

6345:    Level: intermediate

6347:    Concepts: matrices^zeroing

6349: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6350:           MatZeroRowsColumnsLocal(), MatZeroRows(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6351: @*/
6352: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6353: {
6355:   PetscInt       numRows;
6356:   const PetscInt *rows;

6362:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6363:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6364:   MatCheckPreallocated(mat,1);

6366:   ISGetLocalSize(is,&numRows);
6367:   ISGetIndices(is,&rows);
6368:   MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6369:   ISRestoreIndices(is,&rows);
6370:   return(0);
6371: }

6373: /*@C
6374:    MatGetSize - Returns the numbers of rows and columns in a matrix.

6376:    Not Collective

6378:    Input Parameter:
6379: .  mat - the matrix

6381:    Output Parameters:
6382: +  m - the number of global rows
6383: -  n - the number of global columns

6385:    Note: both output parameters can be NULL on input.

6387:    Level: beginner

6389:    Concepts: matrices^size

6391: .seealso: MatGetLocalSize()
6392: @*/
6393: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6394: {
6397:   if (m) *m = mat->rmap->N;
6398:   if (n) *n = mat->cmap->N;
6399:   return(0);
6400: }

6402: /*@C
6403:    MatGetLocalSize - Returns the number of rows and columns in a matrix
6404:    stored locally.  This information may be implementation dependent, so
6405:    use with care.

6407:    Not Collective

6409:    Input Parameters:
6410: .  mat - the matrix

6412:    Output Parameters:
6413: +  m - the number of local rows
6414: -  n - the number of local columns

6416:    Note: both output parameters can be NULL on input.

6418:    Level: beginner

6420:    Concepts: matrices^local size

6422: .seealso: MatGetSize()
6423: @*/
6424: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6425: {
6430:   if (m) *m = mat->rmap->n;
6431:   if (n) *n = mat->cmap->n;
6432:   return(0);
6433: }

6435: /*@C
6436:    MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6437:    this processor. (The columns of the "diagonal block")

6439:    Not Collective, unless matrix has not been allocated, then collective on Mat

6441:    Input Parameters:
6442: .  mat - the matrix

6444:    Output Parameters:
6445: +  m - the global index of the first local column
6446: -  n - one more than the global index of the last local column

6448:    Notes: both output parameters can be NULL on input.

6450:    Level: developer

6452:    Concepts: matrices^column ownership

6454: .seealso:  MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()

6456: @*/
6457: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6458: {
6464:   MatCheckPreallocated(mat,1);
6465:   if (m) *m = mat->cmap->rstart;
6466:   if (n) *n = mat->cmap->rend;
6467:   return(0);
6468: }

6470: /*@C
6471:    MatGetOwnershipRange - Returns the range of matrix rows owned by
6472:    this processor, assuming that the matrix is laid out with the first
6473:    n1 rows on the first processor, the next n2 rows on the second, etc.
6474:    For certain parallel layouts this range may not be well defined.

6476:    Not Collective

6478:    Input Parameters:
6479: .  mat - the matrix

6481:    Output Parameters:
6482: +  m - the global index of the first local row
6483: -  n - one more than the global index of the last local row

6485:    Note: Both output parameters can be NULL on input.
6486: $  This function requires that the matrix be preallocated. If you have not preallocated, consider using
6487: $    PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6488: $  and then MPI_Scan() to calculate prefix sums of the local sizes.

6490:    Level: beginner

6492:    Concepts: matrices^row ownership

6494: .seealso:   MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()

6496: @*/
6497: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6498: {
6504:   MatCheckPreallocated(mat,1);
6505:   if (m) *m = mat->rmap->rstart;
6506:   if (n) *n = mat->rmap->rend;
6507:   return(0);
6508: }

6510: /*@C
6511:    MatGetOwnershipRanges - Returns the range of matrix rows owned by
6512:    each process

6514:    Not Collective, unless matrix has not been allocated, then collective on Mat

6516:    Input Parameters:
6517: .  mat - the matrix

6519:    Output Parameters:
6520: .  ranges - start of each processors portion plus one more than the total length at the end

6522:    Level: beginner

6524:    Concepts: matrices^row ownership

6526: .seealso:   MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()

6528: @*/
6529: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6530: {

6536:   MatCheckPreallocated(mat,1);
6537:   PetscLayoutGetRanges(mat->rmap,ranges);
6538:   return(0);
6539: }

6541: /*@C
6542:    MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6543:    this processor. (The columns of the "diagonal blocks" for each process)

6545:    Not Collective, unless matrix has not been allocated, then collective on Mat

6547:    Input Parameters:
6548: .  mat - the matrix

6550:    Output Parameters:
6551: .  ranges - start of each processors portion plus one more then the total length at the end

6553:    Level: beginner

6555:    Concepts: matrices^column ownership

6557: .seealso:   MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()

6559: @*/
6560: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6561: {

6567:   MatCheckPreallocated(mat,1);
6568:   PetscLayoutGetRanges(mat->cmap,ranges);
6569:   return(0);
6570: }

6572: /*@C
6573:    MatGetOwnershipIS - Get row and column ownership as index sets

6575:    Not Collective

6577:    Input Arguments:
6578: .  A - matrix of type Elemental

6580:    Output Arguments:
6581: +  rows - rows in which this process owns elements
6582: .  cols - columns in which this process owns elements

6584:    Level: intermediate

6586: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL
6587: @*/
6588: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6589: {
6590:   PetscErrorCode ierr,(*f)(Mat,IS*,IS*);

6593:   MatCheckPreallocated(A,1);
6594:   PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6595:   if (f) {
6596:     (*f)(A,rows,cols);
6597:   } else {   /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6598:     if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6599:     if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6600:   }
6601:   return(0);
6602: }

6604: /*@C
6605:    MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6606:    Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6607:    to complete the factorization.

6609:    Collective on Mat

6611:    Input Parameters:
6612: +  mat - the matrix
6613: .  row - row permutation
6614: .  column - column permutation
6615: -  info - structure containing
6616: $      levels - number of levels of fill.
6617: $      expected fill - as ratio of original fill.
6618: $      1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6619:                 missing diagonal entries)

6621:    Output Parameters:
6622: .  fact - new matrix that has been symbolically factored

6624:    Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.

6626:    Most users should employ the simplified KSP interface for linear solvers
6627:    instead of working directly with matrix algebra routines such as this.
6628:    See, e.g., KSPCreate().

6630:    Level: developer

6632:   Concepts: matrices^symbolic LU factorization
6633:   Concepts: matrices^factorization
6634:   Concepts: LU^symbolic factorization

6636: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6637:           MatGetOrdering(), MatFactorInfo

6639:     Developer Note: fortran interface is not autogenerated as the f90
6640:     interface defintion cannot be generated correctly [due to MatFactorInfo]

6642: @*/
6643: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6644: {

6654:   if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6655:   if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6656:   if (!(fact)->ops->ilufactorsymbolic) {
6657:     MatSolverType spackage;
6658:     MatFactorGetSolverType(fact,&spackage);
6659:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6660:   }
6661:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6662:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6663:   MatCheckPreallocated(mat,2);

6665:   PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6666:   (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6667:   PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6668:   return(0);
6669: }

6671: /*@C
6672:    MatICCFactorSymbolic - Performs symbolic incomplete
6673:    Cholesky factorization for a symmetric matrix.  Use
6674:    MatCholeskyFactorNumeric() to complete the factorization.

6676:    Collective on Mat

6678:    Input Parameters:
6679: +  mat - the matrix
6680: .  perm - row and column permutation
6681: -  info - structure containing
6682: $      levels - number of levels of fill.
6683: $      expected fill - as ratio of original fill.

6685:    Output Parameter:
6686: .  fact - the factored matrix

6688:    Notes:
6689:    Most users should employ the KSP interface for linear solvers
6690:    instead of working directly with matrix algebra routines such as this.
6691:    See, e.g., KSPCreate().

6693:    Level: developer

6695:   Concepts: matrices^symbolic incomplete Cholesky factorization
6696:   Concepts: matrices^factorization
6697:   Concepts: Cholsky^symbolic factorization

6699: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo

6701:     Developer Note: fortran interface is not autogenerated as the f90
6702:     interface defintion cannot be generated correctly [due to MatFactorInfo]

6704: @*/
6705: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6706: {

6715:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6716:   if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6717:   if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6718:   if (!(fact)->ops->iccfactorsymbolic) {
6719:     MatSolverType spackage;
6720:     MatFactorGetSolverType(fact,&spackage);
6721:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6722:   }
6723:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6724:   MatCheckPreallocated(mat,2);

6726:   PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6727:   (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6728:   PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6729:   return(0);
6730: }

6732: /*@C
6733:    MatCreateSubMatrices - Extracts several submatrices from a matrix. If submat
6734:    points to an array of valid matrices, they may be reused to store the new
6735:    submatrices.

6737:    Collective on Mat

6739:    Input Parameters:
6740: +  mat - the matrix
6741: .  n   - the number of submatrixes to be extracted (on this processor, may be zero)
6742: .  irow, icol - index sets of rows and columns to extract
6743: -  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

6745:    Output Parameter:
6746: .  submat - the array of submatrices

6748:    Notes:
6749:    MatCreateSubMatrices() can extract ONLY sequential submatrices
6750:    (from both sequential and parallel matrices). Use MatCreateSubMatrix()
6751:    to extract a parallel submatrix.

6753:    Some matrix types place restrictions on the row and column
6754:    indices, such as that they be sorted or that they be equal to each other.

6756:    The index sets may not have duplicate entries.

6758:    When extracting submatrices from a parallel matrix, each processor can
6759:    form a different submatrix by setting the rows and columns of its
6760:    individual index sets according to the local submatrix desired.

6762:    When finished using the submatrices, the user should destroy
6763:    them with MatDestroyMatrices().

6765:    MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6766:    original matrix has not changed from that last call to MatCreateSubMatrices().

6768:    This routine creates the matrices in submat; you should NOT create them before
6769:    calling it. It also allocates the array of matrix pointers submat.

6771:    For BAIJ matrices the index sets must respect the block structure, that is if they
6772:    request one row/column in a block, they must request all rows/columns that are in
6773:    that block. For example, if the block size is 2 you cannot request just row 0 and
6774:    column 0.

6776:    Fortran Note:
6777:    The Fortran interface is slightly different from that given below; it
6778:    requires one to pass in  as submat a Mat (integer) array of size at least m.

6780:    Level: advanced

6782:    Concepts: matrices^accessing submatrices
6783:    Concepts: submatrices

6785: .seealso: MatDestroySubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6786: @*/
6787: PetscErrorCode MatCreateSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6788: {
6790:   PetscInt       i;
6791:   PetscBool      eq;

6796:   if (n) {
6801:   }
6803:   if (n && scall == MAT_REUSE_MATRIX) {
6806:   }
6807:   if (!mat->ops->createsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6808:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6809:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6810:   MatCheckPreallocated(mat,1);

6812:   PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6813:   (*mat->ops->createsubmatrices)(mat,n,irow,icol,scall,submat);
6814:   PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6815:   for (i=0; i<n; i++) {
6816:     (*submat)[i]->factortype = MAT_FACTOR_NONE;  /* in case in place factorization was previously done on submatrix */
6817:     if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6818:       ISEqual(irow[i],icol[i],&eq);
6819:       if (eq) {
6820:         if (mat->symmetric) {
6821:           MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6822:         } else if (mat->hermitian) {
6823:           MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6824:         } else if (mat->structurally_symmetric) {
6825:           MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6826:         }
6827:       }
6828:     }
6829:   }
6830:   return(0);
6831: }

6833: /*@C
6834:    MatCreateSubMatricesMPI - Extracts MPI submatrices across a sub communicator of mat (by pairs of IS that may live on subcomms).

6836:    Collective on Mat

6838:    Input Parameters:
6839: +  mat - the matrix
6840: .  n   - the number of submatrixes to be extracted
6841: .  irow, icol - index sets of rows and columns to extract
6842: -  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

6844:    Output Parameter:
6845: .  submat - the array of submatrices

6847:    Level: advanced

6849:    Concepts: matrices^accessing submatrices
6850:    Concepts: submatrices

6852: .seealso: MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6853: @*/
6854: PetscErrorCode MatCreateSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6855: {
6857:   PetscInt       i;
6858:   PetscBool      eq;

6863:   if (n) {
6868:   }
6870:   if (n && scall == MAT_REUSE_MATRIX) {
6873:   }
6874:   if (!mat->ops->createsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6875:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6876:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6877:   MatCheckPreallocated(mat,1);

6879:   PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6880:   (*mat->ops->createsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6881:   PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6882:   for (i=0; i<n; i++) {
6883:     if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6884:       ISEqual(irow[i],icol[i],&eq);
6885:       if (eq) {
6886:         if (mat->symmetric) {
6887:           MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6888:         } else if (mat->hermitian) {
6889:           MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6890:         } else if (mat->structurally_symmetric) {
6891:           MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6892:         }
6893:       }
6894:     }
6895:   }
6896:   return(0);
6897: }

6899: /*@C
6900:    MatDestroyMatrices - Destroys an array of matrices.

6902:    Collective on Mat

6904:    Input Parameters:
6905: +  n - the number of local matrices
6906: -  mat - the matrices (note that this is a pointer to the array of matrices)

6908:    Level: advanced

6910:     Notes: Frees not only the matrices, but also the array that contains the matrices
6911:            In Fortran will not free the array.

6913: .seealso: MatCreateSubMatrices() MatDestroySubMatrices()
6914: @*/
6915: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6916: {
6918:   PetscInt       i;

6921:   if (!*mat) return(0);
6922:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);

6925:   for (i=0; i<n; i++) {
6926:     MatDestroy(&(*mat)[i]);
6927:   }

6929:   /* memory is allocated even if n = 0 */
6930:   PetscFree(*mat);
6931:   return(0);
6932: }

6934: /*@C
6935:    MatDestroySubMatrices - Destroys a set of matrices obtained with MatCreateSubMatrices().

6937:    Collective on Mat

6939:    Input Parameters:
6940: +  n - the number of local matrices
6941: -  mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6942:                        sequence of MatCreateSubMatrices())

6944:    Level: advanced

6946:     Notes: Frees not only the matrices, but also the array that contains the matrices
6947:            In Fortran will not free the array.

6949: .seealso: MatCreateSubMatrices()
6950: @*/
6951: PetscErrorCode MatDestroySubMatrices(PetscInt n,Mat *mat[])
6952: {
6954:   Mat            mat0;

6957:   if (!*mat) return(0);
6958:   /* mat[] is an array of length n+1, see MatCreateSubMatrices_xxx() */
6959:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);

6962:   mat0 = (*mat)[0];
6963:   if (mat0 && mat0->ops->destroysubmatrices) {
6964:     (mat0->ops->destroysubmatrices)(n,mat);
6965:   } else {
6966:     MatDestroyMatrices(n,mat);
6967:   }
6968:   return(0);
6969: }

6971: /*@C
6972:    MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.

6974:    Collective on Mat

6976:    Input Parameters:
6977: .  mat - the matrix

6979:    Output Parameter:
6980: .  matstruct - the sequential matrix with the nonzero structure of mat

6982:   Level: intermediate

6984: .seealso: MatDestroySeqNonzeroStructure(), MatCreateSubMatrices(), MatDestroyMatrices()
6985: @*/
6986: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6987: {


6995:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6996:   MatCheckPreallocated(mat,1);

6998:   if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6999:   PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
7000:   (*mat->ops->getseqnonzerostructure)(mat,matstruct);
7001:   PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
7002:   return(0);
7003: }

7005: /*@C
7006:    MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().

7008:    Collective on Mat

7010:    Input Parameters:
7011: .  mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
7012:                        sequence of MatGetSequentialNonzeroStructure())

7014:    Level: advanced

7016:     Notes: Frees not only the matrices, but also the array that contains the matrices

7018: .seealso: MatGetSeqNonzeroStructure()
7019: @*/
7020: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
7021: {

7026:   MatDestroy(mat);
7027:   return(0);
7028: }

7030: /*@
7031:    MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
7032:    replaces the index sets by larger ones that represent submatrices with
7033:    additional overlap.

7035:    Collective on Mat

7037:    Input Parameters:
7038: +  mat - the matrix
7039: .  n   - the number of index sets
7040: .  is  - the array of index sets (these index sets will changed during the call)
7041: -  ov  - the additional overlap requested

7043:    Options Database:
7044: .  -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)

7046:    Level: developer

7048:    Concepts: overlap
7049:    Concepts: ASM^computing overlap

7051: .seealso: MatCreateSubMatrices()
7052: @*/
7053: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
7054: {

7060:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7061:   if (n) {
7064:   }
7065:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7066:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7067:   MatCheckPreallocated(mat,1);

7069:   if (!ov) return(0);
7070:   if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7071:   PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7072:   (*mat->ops->increaseoverlap)(mat,n,is,ov);
7073:   PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7074:   return(0);
7075: }


7078: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat,IS*,PetscInt);

7080: /*@
7081:    MatIncreaseOverlapSplit - Given a set of submatrices indicated by index sets across
7082:    a sub communicator, replaces the index sets by larger ones that represent submatrices with
7083:    additional overlap.

7085:    Collective on Mat

7087:    Input Parameters:
7088: +  mat - the matrix
7089: .  n   - the number of index sets
7090: .  is  - the array of index sets (these index sets will changed during the call)
7091: -  ov  - the additional overlap requested

7093:    Options Database:
7094: .  -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)

7096:    Level: developer

7098:    Concepts: overlap
7099:    Concepts: ASM^computing overlap

7101: .seealso: MatCreateSubMatrices()
7102: @*/
7103: PetscErrorCode MatIncreaseOverlapSplit(Mat mat,PetscInt n,IS is[],PetscInt ov)
7104: {
7105:   PetscInt       i;

7111:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7112:   if (n) {
7115:   }
7116:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7117:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7118:   MatCheckPreallocated(mat,1);
7119:   if (!ov) return(0);
7120:   PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7121:   for(i=0; i<n; i++){
7122:          MatIncreaseOverlapSplit_Single(mat,&is[i],ov);
7123:   }
7124:   PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7125:   return(0);
7126: }




7131: /*@
7132:    MatGetBlockSize - Returns the matrix block size.

7134:    Not Collective

7136:    Input Parameter:
7137: .  mat - the matrix

7139:    Output Parameter:
7140: .  bs - block size

7142:    Notes:
7143:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.

7145:    If the block size has not been set yet this routine returns 1.

7147:    Level: intermediate

7149:    Concepts: matrices^block size

7151: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
7152: @*/
7153: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
7154: {
7158:   *bs = PetscAbs(mat->rmap->bs);
7159:   return(0);
7160: }

7162: /*@
7163:    MatGetBlockSizes - Returns the matrix block row and column sizes.

7165:    Not Collective

7167:    Input Parameter:
7168: .  mat - the matrix

7170:    Output Parameter:
7171: .  rbs - row block size
7172: .  cbs - column block size

7174:    Notes:
7175:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7176:     If you pass a different block size for the columns than the rows, the row block size determines the square block storage.

7178:    If a block size has not been set yet this routine returns 1.

7180:    Level: intermediate

7182:    Concepts: matrices^block size

7184: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
7185: @*/
7186: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
7187: {
7192:   if (rbs) *rbs = PetscAbs(mat->rmap->bs);
7193:   if (cbs) *cbs = PetscAbs(mat->cmap->bs);
7194:   return(0);
7195: }

7197: /*@
7198:    MatSetBlockSize - Sets the matrix block size.

7200:    Logically Collective on Mat

7202:    Input Parameters:
7203: +  mat - the matrix
7204: -  bs - block size

7206:    Notes:
7207:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7208:     This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later.

7210:     For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block size
7211:     is compatible with the matrix local sizes.

7213:    Level: intermediate

7215:    Concepts: matrices^block size

7217: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
7218: @*/
7219: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
7220: {

7226:   MatSetBlockSizes(mat,bs,bs);
7227:   return(0);
7228: }

7230: /*@
7231:    MatSetBlockSizes - Sets the matrix block row and column sizes.

7233:    Logically Collective on Mat

7235:    Input Parameters:
7236: +  mat - the matrix
7237: -  rbs - row block size
7238: -  cbs - column block size

7240:    Notes:
7241:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7242:     If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7243:     This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later

7245:     For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block sizes
7246:     are compatible with the matrix local sizes.

7248:     The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().

7250:    Level: intermediate

7252:    Concepts: matrices^block size

7254: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
7255: @*/
7256: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
7257: {

7264:   if (mat->ops->setblocksizes) {
7265:     (*mat->ops->setblocksizes)(mat,rbs,cbs);
7266:   }
7267:   if (mat->rmap->refcnt) {
7268:     ISLocalToGlobalMapping l2g = NULL;
7269:     PetscLayout            nmap = NULL;

7271:     PetscLayoutDuplicate(mat->rmap,&nmap);
7272:     if (mat->rmap->mapping) {
7273:       ISLocalToGlobalMappingDuplicate(mat->rmap->mapping,&l2g);
7274:     }
7275:     PetscLayoutDestroy(&mat->rmap);
7276:     mat->rmap = nmap;
7277:     mat->rmap->mapping = l2g;
7278:   }
7279:   if (mat->cmap->refcnt) {
7280:     ISLocalToGlobalMapping l2g = NULL;
7281:     PetscLayout            nmap = NULL;

7283:     PetscLayoutDuplicate(mat->cmap,&nmap);
7284:     if (mat->cmap->mapping) {
7285:       ISLocalToGlobalMappingDuplicate(mat->cmap->mapping,&l2g);
7286:     }
7287:     PetscLayoutDestroy(&mat->cmap);
7288:     mat->cmap = nmap;
7289:     mat->cmap->mapping = l2g;
7290:   }
7291:   PetscLayoutSetBlockSize(mat->rmap,rbs);
7292:   PetscLayoutSetBlockSize(mat->cmap,cbs);
7293:   return(0);
7294: }

7296: /*@
7297:    MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices

7299:    Logically Collective on Mat

7301:    Input Parameters:
7302: +  mat - the matrix
7303: .  fromRow - matrix from which to copy row block size
7304: -  fromCol - matrix from which to copy column block size (can be same as fromRow)

7306:    Level: developer

7308:    Concepts: matrices^block size

7310: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
7311: @*/
7312: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
7313: {

7320:   if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
7321:   if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
7322:   return(0);
7323: }

7325: /*@
7326:    MatResidual - Default routine to calculate the residual.

7328:    Collective on Mat and Vec

7330:    Input Parameters:
7331: +  mat - the matrix
7332: .  b   - the right-hand-side
7333: -  x   - the approximate solution

7335:    Output Parameter:
7336: .  r - location to store the residual

7338:    Level: developer

7340: .keywords: MG, default, multigrid, residual

7342: .seealso: PCMGSetResidual()
7343: @*/
7344: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7345: {

7354:   MatCheckPreallocated(mat,1);
7355:   PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7356:   if (!mat->ops->residual) {
7357:     MatMult(mat,x,r);
7358:     VecAYPX(r,-1.0,b);
7359:   } else {
7360:     (*mat->ops->residual)(mat,b,x,r);
7361:   }
7362:   PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7363:   return(0);
7364: }

7366: /*@C
7367:     MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.

7369:    Collective on Mat

7371:     Input Parameters:
7372: +   mat - the matrix
7373: .   shift -  0 or 1 indicating we want the indices starting at 0 or 1
7374: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be   symmetrized
7375: -   inodecompressed - PETSC_TRUE or PETSC_FALSE  indicating if the nonzero structure of the
7376:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7377:                  always used.

7379:     Output Parameters:
7380: +   n - number of rows in the (possibly compressed) matrix
7381: .   ia - the row pointers [of length n+1]
7382: .   ja - the column indices
7383: -   done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7384:            are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set

7386:     Level: developer

7388:     Notes:
7389:     You CANNOT change any of the ia[] or ja[] values.

7391:     Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values.

7393:     Fortran Notes:
7394:     In Fortran use
7395: $
7396: $      PetscInt ia(1), ja(1)
7397: $      PetscOffset iia, jja
7398: $      call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7399: $      ! Access the ith and jth entries via ia(iia + i) and ja(jja + j)

7401:      or
7402: $
7403: $    PetscInt, pointer :: ia(:),ja(:)
7404: $    call MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7405: $    ! Access the ith and jth entries via ia(i) and ja(j)

7407: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7408: @*/
7409: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7410: {

7420:   MatCheckPreallocated(mat,1);
7421:   if (!mat->ops->getrowij) *done = PETSC_FALSE;
7422:   else {
7423:     *done = PETSC_TRUE;
7424:     PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7425:     (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7426:     PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7427:   }
7428:   return(0);
7429: }

7431: /*@C
7432:     MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.

7434:     Collective on Mat

7436:     Input Parameters:
7437: +   mat - the matrix
7438: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7439: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7440:                 symmetrized
7441: .   inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7442:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7443:                  always used.
7444: .   n - number of columns in the (possibly compressed) matrix
7445: .   ia - the column pointers
7446: -   ja - the row indices

7448:     Output Parameters:
7449: .   done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned

7451:     Note:
7452:     This routine zeros out n, ia, and ja. This is to prevent accidental
7453:     us of the array after it has been restored. If you pass NULL, it will
7454:     not zero the pointers.  Use of ia or ja after MatRestoreColumnIJ() is invalid.

7456:     Level: developer

7458: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7459: @*/
7460: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7461: {

7471:   MatCheckPreallocated(mat,1);
7472:   if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7473:   else {
7474:     *done = PETSC_TRUE;
7475:     (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7476:   }
7477:   return(0);
7478: }

7480: /*@C
7481:     MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7482:     MatGetRowIJ().

7484:     Collective on Mat

7486:     Input Parameters:
7487: +   mat - the matrix
7488: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7489: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7490:                 symmetrized
7491: .   inodecompressed -  PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7492:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7493:                  always used.
7494: .   n - size of (possibly compressed) matrix
7495: .   ia - the row pointers
7496: -   ja - the column indices

7498:     Output Parameters:
7499: .   done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned

7501:     Note:
7502:     This routine zeros out n, ia, and ja. This is to prevent accidental
7503:     us of the array after it has been restored. If you pass NULL, it will
7504:     not zero the pointers.  Use of ia or ja after MatRestoreRowIJ() is invalid.

7506:     Level: developer

7508: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7509: @*/
7510: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7511: {

7520:   MatCheckPreallocated(mat,1);

7522:   if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7523:   else {
7524:     *done = PETSC_TRUE;
7525:     (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7526:     if (n)  *n = 0;
7527:     if (ia) *ia = NULL;
7528:     if (ja) *ja = NULL;
7529:   }
7530:   return(0);
7531: }

7533: /*@C
7534:     MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7535:     MatGetColumnIJ().

7537:     Collective on Mat

7539:     Input Parameters:
7540: +   mat - the matrix
7541: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7542: -   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7543:                 symmetrized
7544: -   inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7545:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7546:                  always used.

7548:     Output Parameters:
7549: +   n - size of (possibly compressed) matrix
7550: .   ia - the column pointers
7551: .   ja - the row indices
7552: -   done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned

7554:     Level: developer

7556: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7557: @*/
7558: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7559: {

7568:   MatCheckPreallocated(mat,1);

7570:   if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7571:   else {
7572:     *done = PETSC_TRUE;
7573:     (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7574:     if (n)  *n = 0;
7575:     if (ia) *ia = NULL;
7576:     if (ja) *ja = NULL;
7577:   }
7578:   return(0);
7579: }

7581: /*@C
7582:     MatColoringPatch -Used inside matrix coloring routines that
7583:     use MatGetRowIJ() and/or MatGetColumnIJ().

7585:     Collective on Mat

7587:     Input Parameters:
7588: +   mat - the matrix
7589: .   ncolors - max color value
7590: .   n   - number of entries in colorarray
7591: -   colorarray - array indicating color for each column

7593:     Output Parameters:
7594: .   iscoloring - coloring generated using colorarray information

7596:     Level: developer

7598: .seealso: MatGetRowIJ(), MatGetColumnIJ()

7600: @*/
7601: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7602: {

7610:   MatCheckPreallocated(mat,1);

7612:   if (!mat->ops->coloringpatch) {
7613:     ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7614:   } else {
7615:     (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7616:   }
7617:   return(0);
7618: }


7621: /*@
7622:    MatSetUnfactored - Resets a factored matrix to be treated as unfactored.

7624:    Logically Collective on Mat

7626:    Input Parameter:
7627: .  mat - the factored matrix to be reset

7629:    Notes:
7630:    This routine should be used only with factored matrices formed by in-place
7631:    factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7632:    format).  This option can save memory, for example, when solving nonlinear
7633:    systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7634:    ILU(0) preconditioner.

7636:    Note that one can specify in-place ILU(0) factorization by calling
7637: .vb
7638:      PCType(pc,PCILU);
7639:      PCFactorSeUseInPlace(pc);
7640: .ve
7641:    or by using the options -pc_type ilu -pc_factor_in_place

7643:    In-place factorization ILU(0) can also be used as a local
7644:    solver for the blocks within the block Jacobi or additive Schwarz
7645:    methods (runtime option: -sub_pc_factor_in_place).  See Users-Manual: ch_pc
7646:    for details on setting local solver options.

7648:    Most users should employ the simplified KSP interface for linear solvers
7649:    instead of working directly with matrix algebra routines such as this.
7650:    See, e.g., KSPCreate().

7652:    Level: developer

7654: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()

7656:    Concepts: matrices^unfactored

7658: @*/
7659: PetscErrorCode MatSetUnfactored(Mat mat)
7660: {

7666:   MatCheckPreallocated(mat,1);
7667:   mat->factortype = MAT_FACTOR_NONE;
7668:   if (!mat->ops->setunfactored) return(0);
7669:   (*mat->ops->setunfactored)(mat);
7670:   return(0);
7671: }

7673: /*MC
7674:     MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.

7676:     Synopsis:
7677:     MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)

7679:     Not collective

7681:     Input Parameter:
7682: .   x - matrix

7684:     Output Parameters:
7685: +   xx_v - the Fortran90 pointer to the array
7686: -   ierr - error code

7688:     Example of Usage:
7689: .vb
7690:       PetscScalar, pointer xx_v(:,:)
7691:       ....
7692:       call MatDenseGetArrayF90(x,xx_v,ierr)
7693:       a = xx_v(3)
7694:       call MatDenseRestoreArrayF90(x,xx_v,ierr)
7695: .ve

7697:     Level: advanced

7699: .seealso:  MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()

7701:     Concepts: matrices^accessing array

7703: M*/

7705: /*MC
7706:     MatDenseRestoreArrayF90 - Restores a matrix array that has been
7707:     accessed with MatDenseGetArrayF90().

7709:     Synopsis:
7710:     MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)

7712:     Not collective

7714:     Input Parameters:
7715: +   x - matrix
7716: -   xx_v - the Fortran90 pointer to the array

7718:     Output Parameter:
7719: .   ierr - error code

7721:     Example of Usage:
7722: .vb
7723:        PetscScalar, pointer xx_v(:,:)
7724:        ....
7725:        call MatDenseGetArrayF90(x,xx_v,ierr)
7726:        a = xx_v(3)
7727:        call MatDenseRestoreArrayF90(x,xx_v,ierr)
7728: .ve

7730:     Level: advanced

7732: .seealso:  MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()

7734: M*/


7737: /*MC
7738:     MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.

7740:     Synopsis:
7741:     MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)

7743:     Not collective

7745:     Input Parameter:
7746: .   x - matrix

7748:     Output Parameters:
7749: +   xx_v - the Fortran90 pointer to the array
7750: -   ierr - error code

7752:     Example of Usage:
7753: .vb
7754:       PetscScalar, pointer xx_v(:)
7755:       ....
7756:       call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7757:       a = xx_v(3)
7758:       call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7759: .ve

7761:     Level: advanced

7763: .seealso:  MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()

7765:     Concepts: matrices^accessing array

7767: M*/

7769: /*MC
7770:     MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7771:     accessed with MatSeqAIJGetArrayF90().

7773:     Synopsis:
7774:     MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)

7776:     Not collective

7778:     Input Parameters:
7779: +   x - matrix
7780: -   xx_v - the Fortran90 pointer to the array

7782:     Output Parameter:
7783: .   ierr - error code

7785:     Example of Usage:
7786: .vb
7787:        PetscScalar, pointer xx_v(:)
7788:        ....
7789:        call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7790:        a = xx_v(3)
7791:        call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7792: .ve

7794:     Level: advanced

7796: .seealso:  MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()

7798: M*/


7801: /*@
7802:     MatCreateSubMatrix - Gets a single submatrix on the same number of processors
7803:                       as the original matrix.

7805:     Collective on Mat

7807:     Input Parameters:
7808: +   mat - the original matrix
7809: .   isrow - parallel IS containing the rows this processor should obtain
7810: .   iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7811: -   cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

7813:     Output Parameter:
7814: .   newmat - the new submatrix, of the same type as the old

7816:     Level: advanced

7818:     Notes:
7819:     The submatrix will be able to be multiplied with vectors using the same layout as iscol.

7821:     Some matrix types place restrictions on the row and column indices, such
7822:     as that they be sorted or that they be equal to each other.

7824:     The index sets may not have duplicate entries.

7826:       The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7827:    the MatCreateSubMatrix() routine will create the newmat for you. Any additional calls
7828:    to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7829:    will reuse the matrix generated the first time.  You should call MatDestroy() on newmat when
7830:    you are finished using it.

7832:     The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7833:     the input matrix.

7835:     If iscol is NULL then all columns are obtained (not supported in Fortran).

7837:    Example usage:
7838:    Consider the following 8x8 matrix with 34 non-zero values, that is
7839:    assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7840:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7841:    as follows:

7843: .vb
7844:             1  2  0  |  0  3  0  |  0  4
7845:     Proc0   0  5  6  |  7  0  0  |  8  0
7846:             9  0 10  | 11  0  0  | 12  0
7847:     -------------------------------------
7848:            13  0 14  | 15 16 17  |  0  0
7849:     Proc1   0 18  0  | 19 20 21  |  0  0
7850:             0  0  0  | 22 23  0  | 24  0
7851:     -------------------------------------
7852:     Proc2  25 26 27  |  0  0 28  | 29  0
7853:            30  0  0  | 31 32 33  |  0 34
7854: .ve

7856:     Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6].  The resulting submatrix is

7858: .vb
7859:             2  0  |  0  3  0  |  0
7860:     Proc0   5  6  |  7  0  0  |  8
7861:     -------------------------------
7862:     Proc1  18  0  | 19 20 21  |  0
7863:     -------------------------------
7864:     Proc2  26 27  |  0  0 28  | 29
7865:             0  0  | 31 32 33  |  0
7866: .ve


7869:     Concepts: matrices^submatrices

7871: .seealso: MatCreateSubMatrices()
7872: @*/
7873: PetscErrorCode MatCreateSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7874: {
7876:   PetscMPIInt    size;
7877:   Mat            *local;
7878:   IS             iscoltmp;

7887:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7888:   if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");

7890:   MatCheckPreallocated(mat,1);
7891:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);

7893:   if (!iscol || isrow == iscol) {
7894:     PetscBool   stride;
7895:     PetscMPIInt grabentirematrix = 0,grab;
7896:     PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7897:     if (stride) {
7898:       PetscInt first,step,n,rstart,rend;
7899:       ISStrideGetInfo(isrow,&first,&step);
7900:       if (step == 1) {
7901:         MatGetOwnershipRange(mat,&rstart,&rend);
7902:         if (rstart == first) {
7903:           ISGetLocalSize(isrow,&n);
7904:           if (n == rend-rstart) {
7905:             grabentirematrix = 1;
7906:           }
7907:         }
7908:       }
7909:     }
7910:     MPIU_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7911:     if (grab) {
7912:       PetscInfo(mat,"Getting entire matrix as submatrix\n");
7913:       if (cll == MAT_INITIAL_MATRIX) {
7914:         *newmat = mat;
7915:         PetscObjectReference((PetscObject)mat);
7916:       }
7917:       return(0);
7918:     }
7919:   }

7921:   if (!iscol) {
7922:     ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7923:   } else {
7924:     iscoltmp = iscol;
7925:   }

7927:   /* if original matrix is on just one processor then use submatrix generated */
7928:   if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7929:     MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7930:     if (!iscol) {ISDestroy(&iscoltmp);}
7931:     return(0);
7932:   } else if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1) {
7933:     MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7934:     *newmat = *local;
7935:     PetscFree(local);
7936:     if (!iscol) {ISDestroy(&iscoltmp);}
7937:     return(0);
7938:   } else if (!mat->ops->createsubmatrix) {
7939:     /* Create a new matrix type that implements the operation using the full matrix */
7940:     PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7941:     switch (cll) {
7942:     case MAT_INITIAL_MATRIX:
7943:       MatCreateSubMatrixVirtual(mat,isrow,iscoltmp,newmat);
7944:       break;
7945:     case MAT_REUSE_MATRIX:
7946:       MatSubMatrixVirtualUpdate(*newmat,mat,isrow,iscoltmp);
7947:       break;
7948:     default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7949:     }
7950:     PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7951:     if (!iscol) {ISDestroy(&iscoltmp);}
7952:     return(0);
7953:   }

7955:   if (!mat->ops->createsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7956:   PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7957:   (*mat->ops->createsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7958:   PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7959:   if (!iscol) {ISDestroy(&iscoltmp);}
7960:   if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
7961:   return(0);
7962: }

7964: /*@
7965:    MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7966:    used during the assembly process to store values that belong to
7967:    other processors.

7969:    Not Collective

7971:    Input Parameters:
7972: +  mat   - the matrix
7973: .  size  - the initial size of the stash.
7974: -  bsize - the initial size of the block-stash(if used).

7976:    Options Database Keys:
7977: +   -matstash_initial_size <size> or <size0,size1,...sizep-1>
7978: -   -matstash_block_initial_size <bsize>  or <bsize0,bsize1,...bsizep-1>

7980:    Level: intermediate

7982:    Notes:
7983:      The block-stash is used for values set with MatSetValuesBlocked() while
7984:      the stash is used for values set with MatSetValues()

7986:      Run with the option -info and look for output of the form
7987:      MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7988:      to determine the appropriate value, MM, to use for size and
7989:      MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7990:      to determine the value, BMM to use for bsize

7992:    Concepts: stash^setting matrix size
7993:    Concepts: matrices^stash

7995: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()

7997: @*/
7998: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7999: {

8005:   MatStashSetInitialSize_Private(&mat->stash,size);
8006:   MatStashSetInitialSize_Private(&mat->bstash,bsize);
8007:   return(0);
8008: }

8010: /*@
8011:    MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
8012:      the matrix

8014:    Neighbor-wise Collective on Mat

8016:    Input Parameters:
8017: +  mat   - the matrix
8018: .  x,y - the vectors
8019: -  w - where the result is stored

8021:    Level: intermediate

8023:    Notes:
8024:     w may be the same vector as y.

8026:     This allows one to use either the restriction or interpolation (its transpose)
8027:     matrix to do the interpolation

8029:     Concepts: interpolation

8031: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()

8033: @*/
8034: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
8035: {
8037:   PetscInt       M,N,Ny;

8045:   MatCheckPreallocated(A,1);
8046:   MatGetSize(A,&M,&N);
8047:   VecGetSize(y,&Ny);
8048:   if (M == Ny) {
8049:     MatMultAdd(A,x,y,w);
8050:   } else {
8051:     MatMultTransposeAdd(A,x,y,w);
8052:   }
8053:   return(0);
8054: }

8056: /*@
8057:    MatInterpolate - y = A*x or A'*x depending on the shape of
8058:      the matrix

8060:    Neighbor-wise Collective on Mat

8062:    Input Parameters:
8063: +  mat   - the matrix
8064: -  x,y - the vectors

8066:    Level: intermediate

8068:    Notes:
8069:     This allows one to use either the restriction or interpolation (its transpose)
8070:     matrix to do the interpolation

8072:    Concepts: matrices^interpolation

8074: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()

8076: @*/
8077: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
8078: {
8080:   PetscInt       M,N,Ny;

8087:   MatCheckPreallocated(A,1);
8088:   MatGetSize(A,&M,&N);
8089:   VecGetSize(y,&Ny);
8090:   if (M == Ny) {
8091:     MatMult(A,x,y);
8092:   } else {
8093:     MatMultTranspose(A,x,y);
8094:   }
8095:   return(0);
8096: }

8098: /*@
8099:    MatRestrict - y = A*x or A'*x

8101:    Neighbor-wise Collective on Mat

8103:    Input Parameters:
8104: +  mat   - the matrix
8105: -  x,y - the vectors

8107:    Level: intermediate

8109:    Notes:
8110:     This allows one to use either the restriction or interpolation (its transpose)
8111:     matrix to do the restriction

8113:    Concepts: matrices^restriction

8115: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()

8117: @*/
8118: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
8119: {
8121:   PetscInt       M,N,Ny;

8128:   MatCheckPreallocated(A,1);

8130:   MatGetSize(A,&M,&N);
8131:   VecGetSize(y,&Ny);
8132:   if (M == Ny) {
8133:     MatMult(A,x,y);
8134:   } else {
8135:     MatMultTranspose(A,x,y);
8136:   }
8137:   return(0);
8138: }

8140: /*@C
8141:    MatGetNullSpace - retrieves the null space to a matrix.

8143:    Logically Collective on Mat and MatNullSpace

8145:    Input Parameters:
8146: +  mat - the matrix
8147: -  nullsp - the null space object

8149:    Level: developer

8151:    Concepts: null space^attaching to matrix

8153: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
8154: @*/
8155: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
8156: {
8160:   *nullsp = mat->nullsp;
8161:   return(0);
8162: }

8164: /*@C
8165:    MatSetNullSpace - attaches a null space to a matrix.

8167:    Logically Collective on Mat and MatNullSpace

8169:    Input Parameters:
8170: +  mat - the matrix
8171: -  nullsp - the null space object

8173:    Level: advanced

8175:    Notes:
8176:       This null space is used by the linear solvers. Overwrites any previous null space that may have been attached

8178:       For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
8179:       call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.

8181:       You can remove the null space by calling this routine with an nullsp of NULL


8184:       The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8185:    the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8186:    Similarly R^m = direct sum n(A^T) + R(A).  Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8187:    n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8188:    the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).

8190:       Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().

8192:     If the matrix is known to be symmetric because it is an SBAIJ matrix or one as called MatSetOption(mat,MAT_SYMMETRIC or MAT_SYMMETRIC_ETERNAL,PETSC_TRUE); this
8193:     routine also automatically calls MatSetTransposeNullSpace().

8195:    Concepts: null space^attaching to matrix

8197: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8198: @*/
8199: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
8200: {

8206:   if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8207:   MatNullSpaceDestroy(&mat->nullsp);
8208:   mat->nullsp = nullsp;
8209:   if (mat->symmetric_set && mat->symmetric) {
8210:     MatSetTransposeNullSpace(mat,nullsp);
8211:   }
8212:   return(0);
8213: }

8215: /*@
8216:    MatGetTransposeNullSpace - retrieves the null space of the transpose of a matrix.

8218:    Logically Collective on Mat and MatNullSpace

8220:    Input Parameters:
8221: +  mat - the matrix
8222: -  nullsp - the null space object

8224:    Level: developer

8226:    Concepts: null space^attaching to matrix

8228: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetTransposeNullSpace(), MatSetNullSpace(), MatGetNullSpace()
8229: @*/
8230: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
8231: {
8236:   *nullsp = mat->transnullsp;
8237:   return(0);
8238: }

8240: /*@
8241:    MatSetTransposeNullSpace - attaches a null space to a matrix.

8243:    Logically Collective on Mat and MatNullSpace

8245:    Input Parameters:
8246: +  mat - the matrix
8247: -  nullsp - the null space object

8249:    Level: advanced

8251:    Notes:
8252:       For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
8253:       You must also call MatSetNullSpace()


8256:       The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8257:    the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8258:    Similarly R^m = direct sum n(A^T) + R(A).  Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8259:    n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8260:    the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).

8262:       Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().

8264:    Concepts: null space^attaching to matrix

8266: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8267: @*/
8268: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
8269: {

8276:   MatCheckPreallocated(mat,1);
8277:   PetscObjectReference((PetscObject)nullsp);
8278:   MatNullSpaceDestroy(&mat->transnullsp);
8279:   mat->transnullsp = nullsp;
8280:   return(0);
8281: }

8283: /*@
8284:    MatSetNearNullSpace - attaches a null space to a matrix, which is often the null space (rigid body modes) of the operator without boundary conditions
8285:         This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.

8287:    Logically Collective on Mat and MatNullSpace

8289:    Input Parameters:
8290: +  mat - the matrix
8291: -  nullsp - the null space object

8293:    Level: advanced

8295:    Notes:
8296:       Overwrites any previous near null space that may have been attached

8298:       You can remove the null space by calling this routine with an nullsp of NULL

8300:    Concepts: null space^attaching to matrix

8302: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace(), MatNullSpaceCreateRigidBody(), MatGetNearNullSpace()
8303: @*/
8304: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8305: {

8312:   MatCheckPreallocated(mat,1);
8313:   if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8314:   MatNullSpaceDestroy(&mat->nearnullsp);
8315:   mat->nearnullsp = nullsp;
8316:   return(0);
8317: }

8319: /*@
8320:    MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()

8322:    Not Collective

8324:    Input Parameters:
8325: .  mat - the matrix

8327:    Output Parameters:
8328: .  nullsp - the null space object, NULL if not set

8330:    Level: developer

8332:    Concepts: null space^attaching to matrix

8334: .seealso: MatSetNearNullSpace(), MatGetNullSpace(), MatNullSpaceCreate()
8335: @*/
8336: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8337: {
8342:   MatCheckPreallocated(mat,1);
8343:   *nullsp = mat->nearnullsp;
8344:   return(0);
8345: }

8347: /*@C
8348:    MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.

8350:    Collective on Mat

8352:    Input Parameters:
8353: +  mat - the matrix
8354: .  row - row/column permutation
8355: .  fill - expected fill factor >= 1.0
8356: -  level - level of fill, for ICC(k)

8358:    Notes:
8359:    Probably really in-place only when level of fill is zero, otherwise allocates
8360:    new space to store factored matrix and deletes previous memory.

8362:    Most users should employ the simplified KSP interface for linear solvers
8363:    instead of working directly with matrix algebra routines such as this.
8364:    See, e.g., KSPCreate().

8366:    Level: developer

8368:    Concepts: matrices^incomplete Cholesky factorization
8369:    Concepts: Cholesky factorization

8371: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()

8373:     Developer Note: fortran interface is not autogenerated as the f90
8374:     interface defintion cannot be generated correctly [due to MatFactorInfo]

8376: @*/
8377: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8378: {

8386:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8387:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8388:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8389:   if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8390:   MatCheckPreallocated(mat,1);
8391:   (*mat->ops->iccfactor)(mat,row,info);
8392:   PetscObjectStateIncrease((PetscObject)mat);
8393:   return(0);
8394: }

8396: /*@
8397:    MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8398:          ghosted ones.

8400:    Not Collective

8402:    Input Parameters:
8403: +  mat - the matrix
8404: -  diag = the diagonal values, including ghost ones

8406:    Level: developer

8408:    Notes: Works only for MPIAIJ and MPIBAIJ matrices

8410: .seealso: MatDiagonalScale()
8411: @*/
8412: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8413: {
8415:   PetscMPIInt    size;


8422:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8423:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8424:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8425:   if (size == 1) {
8426:     PetscInt n,m;
8427:     VecGetSize(diag,&n);
8428:     MatGetSize(mat,0,&m);
8429:     if (m == n) {
8430:       MatDiagonalScale(mat,0,diag);
8431:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8432:   } else {
8433:     PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8434:   }
8435:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8436:   PetscObjectStateIncrease((PetscObject)mat);
8437:   return(0);
8438: }

8440: /*@
8441:    MatGetInertia - Gets the inertia from a factored matrix

8443:    Collective on Mat

8445:    Input Parameter:
8446: .  mat - the matrix

8448:    Output Parameters:
8449: +   nneg - number of negative eigenvalues
8450: .   nzero - number of zero eigenvalues
8451: -   npos - number of positive eigenvalues

8453:    Level: advanced

8455:    Notes: Matrix must have been factored by MatCholeskyFactor()


8458: @*/
8459: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8460: {

8466:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8467:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8468:   if (!mat->ops->getinertia) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8469:   (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8470:   return(0);
8471: }

8473: /* ----------------------------------------------------------------*/
8474: /*@C
8475:    MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors

8477:    Neighbor-wise Collective on Mat and Vecs

8479:    Input Parameters:
8480: +  mat - the factored matrix
8481: -  b - the right-hand-side vectors

8483:    Output Parameter:
8484: .  x - the result vectors

8486:    Notes:
8487:    The vectors b and x cannot be the same.  I.e., one cannot
8488:    call MatSolves(A,x,x).

8490:    Notes:
8491:    Most users should employ the simplified KSP interface for linear solvers
8492:    instead of working directly with matrix algebra routines such as this.
8493:    See, e.g., KSPCreate().

8495:    Level: developer

8497:    Concepts: matrices^triangular solves

8499: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8500: @*/
8501: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8502: {

8508:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8509:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8510:   if (!mat->rmap->N && !mat->cmap->N) return(0);

8512:   if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8513:   MatCheckPreallocated(mat,1);
8514:   PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8515:   (*mat->ops->solves)(mat,b,x);
8516:   PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8517:   return(0);
8518: }

8520: /*@
8521:    MatIsSymmetric - Test whether a matrix is symmetric

8523:    Collective on Mat

8525:    Input Parameter:
8526: +  A - the matrix to test
8527: -  tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)

8529:    Output Parameters:
8530: .  flg - the result

8532:    Notes: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results

8534:    Level: intermediate

8536:    Concepts: matrix^symmetry

8538: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8539: @*/
8540: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool  *flg)
8541: {


8548:   if (!A->symmetric_set) {
8549:     if (!A->ops->issymmetric) {
8550:       MatType mattype;
8551:       MatGetType(A,&mattype);
8552:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8553:     }
8554:     (*A->ops->issymmetric)(A,tol,flg);
8555:     if (!tol) {
8556:       A->symmetric_set = PETSC_TRUE;
8557:       A->symmetric     = *flg;
8558:       if (A->symmetric) {
8559:         A->structurally_symmetric_set = PETSC_TRUE;
8560:         A->structurally_symmetric     = PETSC_TRUE;
8561:       }
8562:     }
8563:   } else if (A->symmetric) {
8564:     *flg = PETSC_TRUE;
8565:   } else if (!tol) {
8566:     *flg = PETSC_FALSE;
8567:   } else {
8568:     if (!A->ops->issymmetric) {
8569:       MatType mattype;
8570:       MatGetType(A,&mattype);
8571:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8572:     }
8573:     (*A->ops->issymmetric)(A,tol,flg);
8574:   }
8575:   return(0);
8576: }

8578: /*@
8579:    MatIsHermitian - Test whether a matrix is Hermitian

8581:    Collective on Mat

8583:    Input Parameter:
8584: +  A - the matrix to test
8585: -  tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)

8587:    Output Parameters:
8588: .  flg - the result

8590:    Level: intermediate

8592:    Concepts: matrix^symmetry

8594: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8595:           MatIsSymmetricKnown(), MatIsSymmetric()
8596: @*/
8597: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool  *flg)
8598: {


8605:   if (!A->hermitian_set) {
8606:     if (!A->ops->ishermitian) {
8607:       MatType mattype;
8608:       MatGetType(A,&mattype);
8609:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8610:     }
8611:     (*A->ops->ishermitian)(A,tol,flg);
8612:     if (!tol) {
8613:       A->hermitian_set = PETSC_TRUE;
8614:       A->hermitian     = *flg;
8615:       if (A->hermitian) {
8616:         A->structurally_symmetric_set = PETSC_TRUE;
8617:         A->structurally_symmetric     = PETSC_TRUE;
8618:       }
8619:     }
8620:   } else if (A->hermitian) {
8621:     *flg = PETSC_TRUE;
8622:   } else if (!tol) {
8623:     *flg = PETSC_FALSE;
8624:   } else {
8625:     if (!A->ops->ishermitian) {
8626:       MatType mattype;
8627:       MatGetType(A,&mattype);
8628:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8629:     }
8630:     (*A->ops->ishermitian)(A,tol,flg);
8631:   }
8632:   return(0);
8633: }

8635: /*@
8636:    MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.

8638:    Not Collective

8640:    Input Parameter:
8641: .  A - the matrix to check

8643:    Output Parameters:
8644: +  set - if the symmetric flag is set (this tells you if the next flag is valid)
8645: -  flg - the result

8647:    Level: advanced

8649:    Concepts: matrix^symmetry

8651:    Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8652:          if you want it explicitly checked

8654: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8655: @*/
8656: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool  *set,PetscBool  *flg)
8657: {
8662:   if (A->symmetric_set) {
8663:     *set = PETSC_TRUE;
8664:     *flg = A->symmetric;
8665:   } else {
8666:     *set = PETSC_FALSE;
8667:   }
8668:   return(0);
8669: }

8671: /*@
8672:    MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.

8674:    Not Collective

8676:    Input Parameter:
8677: .  A - the matrix to check

8679:    Output Parameters:
8680: +  set - if the hermitian flag is set (this tells you if the next flag is valid)
8681: -  flg - the result

8683:    Level: advanced

8685:    Concepts: matrix^symmetry

8687:    Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8688:          if you want it explicitly checked

8690: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8691: @*/
8692: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool  *set,PetscBool  *flg)
8693: {
8698:   if (A->hermitian_set) {
8699:     *set = PETSC_TRUE;
8700:     *flg = A->hermitian;
8701:   } else {
8702:     *set = PETSC_FALSE;
8703:   }
8704:   return(0);
8705: }

8707: /*@
8708:    MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric

8710:    Collective on Mat

8712:    Input Parameter:
8713: .  A - the matrix to test

8715:    Output Parameters:
8716: .  flg - the result

8718:    Level: intermediate

8720:    Concepts: matrix^symmetry

8722: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8723: @*/
8724: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool  *flg)
8725: {

8731:   if (!A->structurally_symmetric_set) {
8732:     if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8733:     (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);

8735:     A->structurally_symmetric_set = PETSC_TRUE;
8736:   }
8737:   *flg = A->structurally_symmetric;
8738:   return(0);
8739: }

8741: /*@
8742:    MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8743:        to be communicated to other processors during the MatAssemblyBegin/End() process

8745:     Not collective

8747:    Input Parameter:
8748: .   vec - the vector

8750:    Output Parameters:
8751: +   nstash   - the size of the stash
8752: .   reallocs - the number of additional mallocs incurred.
8753: .   bnstash   - the size of the block stash
8754: -   breallocs - the number of additional mallocs incurred.in the block stash

8756:    Level: advanced

8758: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()

8760: @*/
8761: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8762: {

8766:   MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8767:   MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8768:   return(0);
8769: }

8771: /*@C
8772:    MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8773:      parallel layout

8775:    Collective on Mat

8777:    Input Parameter:
8778: .  mat - the matrix

8780:    Output Parameter:
8781: +   right - (optional) vector that the matrix can be multiplied against
8782: -   left - (optional) vector that the matrix vector product can be stored in

8784:    Notes:
8785:     The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().

8787:   Notes: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed

8789:   Level: advanced

8791: .seealso: MatCreate(), VecDestroy()
8792: @*/
8793: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8794: {

8800:   if (mat->ops->getvecs) {
8801:     (*mat->ops->getvecs)(mat,right,left);
8802:   } else {
8803:     PetscInt rbs,cbs;
8804:     MatGetBlockSizes(mat,&rbs,&cbs);
8805:     if (right) {
8806:       if (mat->cmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for columns not yet setup");
8807:       VecCreate(PetscObjectComm((PetscObject)mat),right);
8808:       VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8809:       VecSetBlockSize(*right,cbs);
8810:       VecSetType(*right,VECSTANDARD);
8811:       PetscLayoutReference(mat->cmap,&(*right)->map);
8812:     }
8813:     if (left) {
8814:       if (mat->rmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for rows not yet setup");
8815:       VecCreate(PetscObjectComm((PetscObject)mat),left);
8816:       VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8817:       VecSetBlockSize(*left,rbs);
8818:       VecSetType(*left,VECSTANDARD);
8819:       PetscLayoutReference(mat->rmap,&(*left)->map);
8820:     }
8821:   }
8822:   return(0);
8823: }

8825: /*@C
8826:    MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8827:      with default values.

8829:    Not Collective

8831:    Input Parameters:
8832: .    info - the MatFactorInfo data structure


8835:    Notes: The solvers are generally used through the KSP and PC objects, for example
8836:           PCLU, PCILU, PCCHOLESKY, PCICC

8838:    Level: developer

8840: .seealso: MatFactorInfo

8842:     Developer Note: fortran interface is not autogenerated as the f90
8843:     interface defintion cannot be generated correctly [due to MatFactorInfo]

8845: @*/

8847: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8848: {

8852:   PetscMemzero(info,sizeof(MatFactorInfo));
8853:   return(0);
8854: }

8856: /*@
8857:    MatFactorSetSchurIS - Set indices corresponding to the Schur complement you wish to have computed

8859:    Collective on Mat

8861:    Input Parameters:
8862: +  mat - the factored matrix
8863: -  is - the index set defining the Schur indices (0-based)

8865:    Notes:  Call MatFactorSolveSchurComplement() or MatFactorSolveSchurComplementTranspose() after this call to solve a Schur complement system.

8867:    You can call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() after this call.

8869:    Level: developer

8871:    Concepts:

8873: .seealso: MatGetFactor(), MatFactorGetSchurComplement(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSolveSchurComplement(),
8874:           MatFactorSolveSchurComplementTranspose(), MatFactorSolveSchurComplement()

8876: @*/
8877: PetscErrorCode MatFactorSetSchurIS(Mat mat,IS is)
8878: {
8879:   PetscErrorCode ierr,(*f)(Mat,IS);

8887:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
8888:   PetscObjectQueryFunction((PetscObject)mat,"MatFactorSetSchurIS_C",&f);
8889:   if (!f) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"The selected MatSolverType does not support Schur complement computation. You should use MATSOLVERMUMPS or MATSOLVERMKL_PARDISO");
8890:   if (mat->schur) {
8891:     MatDestroy(&mat->schur);
8892:   }
8893:   (*f)(mat,is);
8894:   if (!mat->schur) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_PLIB,"Schur complement has not been created");
8895:   MatFactorSetUpInPlaceSchur_Private(mat);
8896:   return(0);
8897: }

8899: /*@
8900:   MatFactorCreateSchurComplement - Create a Schur complement matrix object using Schur data computed during the factorization step

8902:    Logically Collective on Mat

8904:    Input Parameters:
8905: +  F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
8906: .  S - location where to return the Schur complement, can be NULL
8907: -  status - the status of the Schur complement matrix, can be NULL

8909:    Notes:
8910:    You must call MatFactorSetSchurIS() before calling this routine.

8912:    The routine provides a copy of the Schur matrix stored within the solver data structures.
8913:    The caller must destroy the object when it is no longer needed.
8914:    If MatFactorInvertSchurComplement() has been called, the routine gets back the inverse.

8916:    Use MatFactorGetSchurComplement() to get access to the Schur complement matrix inside the factored matrix instead of making a copy of it (which this function does)

8918:    Developer Notes: The reason this routine exists is because the representation of the Schur complement within the factor matrix may be different than a standard PETSc
8919:    matrix representation and we normally do not want to use the time or memory to make a copy as a regular PETSc matrix.

8921:    See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.

8923:    Level: advanced

8925:    References:

8927: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorSchurStatus
8928: @*/
8929: PetscErrorCode MatFactorCreateSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8930: {

8937:   if (S) {
8938:     PetscErrorCode (*f)(Mat,Mat*);

8940:     PetscObjectQueryFunction((PetscObject)F,"MatFactorCreateSchurComplement_C",&f);
8941:     if (f) {
8942:       (*f)(F,S);
8943:     } else {
8944:       MatDuplicate(F->schur,MAT_COPY_VALUES,S);
8945:     }
8946:   }
8947:   if (status) *status = F->schur_status;
8948:   return(0);
8949: }

8951: /*@
8952:   MatFactorGetSchurComplement - Gets access to a Schur complement matrix using the current Schur data within a factored matrix

8954:    Logically Collective on Mat

8956:    Input Parameters:
8957: +  F - the factored matrix obtained by calling MatGetFactor()
8958: .  *S - location where to return the Schur complement, can be NULL
8959: -  status - the status of the Schur complement matrix, can be NULL

8961:    Notes:
8962:    You must call MatFactorSetSchurIS() before calling this routine.

8964:    Schur complement mode is currently implemented for sequential matrices.
8965:    The routine returns a the Schur Complement stored within the data strutures of the solver.
8966:    If MatFactorInvertSchurComplement() has previously been called, the returned matrix is actually the inverse of the Schur complement.
8967:    The returned matrix should not be destroyed; the caller should call MatFactorRestoreSchurComplement() when the object is no longer needed.

8969:    Use MatFactorCreateSchurComplement() to create a copy of the Schur complement matrix that is within a factored matrix

8971:    See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.

8973:    Level: advanced

8975:    References:

8977: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8978: @*/
8979: PetscErrorCode MatFactorGetSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8980: {
8985:   if (S) *S = F->schur;
8986:   if (status) *status = F->schur_status;
8987:   return(0);
8988: }

8990: /*@
8991:   MatFactorRestoreSchurComplement - Restore the Schur complement matrix object obtained from a call to MatFactorGetSchurComplement

8993:    Logically Collective on Mat

8995:    Input Parameters:
8996: +  F - the factored matrix obtained by calling MatGetFactor()
8997: .  *S - location where the Schur complement is stored
8998: -  status - the status of the Schur complement matrix (see MatFactorSchurStatus)

9000:    Notes:

9002:    Level: advanced

9004:    References:

9006: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
9007: @*/
9008: PetscErrorCode MatFactorRestoreSchurComplement(Mat F,Mat* S,MatFactorSchurStatus status)
9009: {

9014:   if (S) {
9016:     *S = NULL;
9017:   }
9018:   F->schur_status = status;
9019:   MatFactorUpdateSchurStatus_Private(F);
9020:   return(0);
9021: }

9023: /*@
9024:   MatFactorSolveSchurComplementTranspose - Solve the transpose of the Schur complement system computed during the factorization step

9026:    Logically Collective on Mat

9028:    Input Parameters:
9029: +  F - the factored matrix obtained by calling MatGetFactor()
9030: .  rhs - location where the right hand side of the Schur complement system is stored
9031: -  sol - location where the solution of the Schur complement system has to be returned

9033:    Notes:
9034:    The sizes of the vectors should match the size of the Schur complement

9036:    Must be called after MatFactorSetSchurIS()

9038:    Level: advanced

9040:    References:

9042: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplement()
9043: @*/
9044: PetscErrorCode MatFactorSolveSchurComplementTranspose(Mat F, Vec rhs, Vec sol)
9045: {

9057:   MatFactorFactorizeSchurComplement(F);
9058:   switch (F->schur_status) {
9059:   case MAT_FACTOR_SCHUR_FACTORED:
9060:     MatSolveTranspose(F->schur,rhs,sol);
9061:     break;
9062:   case MAT_FACTOR_SCHUR_INVERTED:
9063:     MatMultTranspose(F->schur,rhs,sol);
9064:     break;
9065:   default:
9066:     SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9067:     break;
9068:   }
9069:   return(0);
9070: }

9072: /*@
9073:   MatFactorSolveSchurComplement - Solve the Schur complement system computed during the factorization step

9075:    Logically Collective on Mat

9077:    Input Parameters:
9078: +  F - the factored matrix obtained by calling MatGetFactor()
9079: .  rhs - location where the right hand side of the Schur complement system is stored
9080: -  sol - location where the solution of the Schur complement system has to be returned

9082:    Notes:
9083:    The sizes of the vectors should match the size of the Schur complement

9085:    Must be called after MatFactorSetSchurIS()

9087:    Level: advanced

9089:    References:

9091: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplementTranspose()
9092: @*/
9093: PetscErrorCode MatFactorSolveSchurComplement(Mat F, Vec rhs, Vec sol)
9094: {

9106:   MatFactorFactorizeSchurComplement(F);
9107:   switch (F->schur_status) {
9108:   case MAT_FACTOR_SCHUR_FACTORED:
9109:     MatSolve(F->schur,rhs,sol);
9110:     break;
9111:   case MAT_FACTOR_SCHUR_INVERTED:
9112:     MatMult(F->schur,rhs,sol);
9113:     break;
9114:   default:
9115:     SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9116:     break;
9117:   }
9118:   return(0);
9119: }

9121: /*@
9122:   MatFactorInvertSchurComplement - Invert the Schur complement matrix computed during the factorization step

9124:    Logically Collective on Mat

9126:    Input Parameters:
9127: +  F - the factored matrix obtained by calling MatGetFactor()

9129:    Notes: Must be called after MatFactorSetSchurIS().

9131:    Call MatFactorGetSchurComplement() or  MatFactorCreateSchurComplement() AFTER this call to actually compute the inverse and get access to it.

9133:    Level: advanced

9135:    References:

9137: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorCreateSchurComplement()
9138: @*/
9139: PetscErrorCode MatFactorInvertSchurComplement(Mat F)
9140: {

9146:   if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED) return(0);
9147:   MatFactorFactorizeSchurComplement(F);
9148:   MatFactorInvertSchurComplement_Private(F);
9149:   F->schur_status = MAT_FACTOR_SCHUR_INVERTED;
9150:   return(0);
9151: }

9153: /*@
9154:   MatFactorFactorizeSchurComplement - Factorize the Schur complement matrix computed during the factorization step

9156:    Logically Collective on Mat

9158:    Input Parameters:
9159: +  F - the factored matrix obtained by calling MatGetFactor()

9161:    Notes: Must be called after MatFactorSetSchurIS().

9163:    Level: advanced

9165:    References:

9167: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorInvertSchurComplement()
9168: @*/
9169: PetscErrorCode MatFactorFactorizeSchurComplement(Mat F)
9170: {

9176:   if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED || F->schur_status == MAT_FACTOR_SCHUR_FACTORED) return(0);
9177:   MatFactorFactorizeSchurComplement_Private(F);
9178:   F->schur_status = MAT_FACTOR_SCHUR_FACTORED;
9179:   return(0);
9180: }

9182: /*@
9183:    MatPtAP - Creates the matrix product C = P^T * A * P

9185:    Neighbor-wise Collective on Mat

9187:    Input Parameters:
9188: +  A - the matrix
9189: .  P - the projection matrix
9190: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9191: -  fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)), use PETSC_DEFAULT if you do not have a good estimate
9192:           if the result is a dense matrix this is irrelevent

9194:    Output Parameters:
9195: .  C - the product matrix

9197:    Notes:
9198:    C will be created and must be destroyed by the user with MatDestroy().

9200:    This routine is currently only implemented for pairs of sequential dense matrices, AIJ matrices and classes
9201:    which inherit from AIJ.

9203:    Level: intermediate

9205: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
9206: @*/
9207: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
9208: {
9210:   PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9211:   PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
9212:   PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;

9217:   MatCheckPreallocated(A,1);
9218:   if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9219:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9220:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9223:   MatCheckPreallocated(P,2);
9224:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9225:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9227:   if (A->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix A must be square, %D != %D",A->rmap->N,A->cmap->N);
9228:   if (P->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9229:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9230:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9232:   if (scall == MAT_REUSE_MATRIX) {

9236:     PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9237:     PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9238:     (*(*C)->ops->ptapnumeric)(A,P,*C);
9239:     PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9240:     PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9241:     return(0);
9242:   }

9244:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9245:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9247:   fA = A->ops->ptap;
9248:   fP = P->ops->ptap;
9249:   if (fP == fA) {
9250:     if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
9251:     ptap = fA;
9252:   } else {
9253:     /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
9254:     char ptapname[256];
9255:     PetscStrcpy(ptapname,"MatPtAP_");
9256:     PetscStrcat(ptapname,((PetscObject)A)->type_name);
9257:     PetscStrcat(ptapname,"_");
9258:     PetscStrcat(ptapname,((PetscObject)P)->type_name);
9259:     PetscStrcat(ptapname,"_C"); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
9260:     PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
9261:     if (!ptap) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s (Misses composed function %s)",((PetscObject)A)->type_name,((PetscObject)P)->type_name,ptapname);
9262:   }

9264:   PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9265:   (*ptap)(A,P,scall,fill,C);
9266:   PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9267:   return(0);
9268: }

9270: /*@
9271:    MatPtAPNumeric - Computes the matrix product C = P^T * A * P

9273:    Neighbor-wise Collective on Mat

9275:    Input Parameters:
9276: +  A - the matrix
9277: -  P - the projection matrix

9279:    Output Parameters:
9280: .  C - the product matrix

9282:    Notes:
9283:    C must have been created by calling MatPtAPSymbolic and must be destroyed by
9284:    the user using MatDeatroy().

9286:    This routine is currently only implemented for pairs of AIJ matrices and classes
9287:    which inherit from AIJ.  C will be of type MATAIJ.

9289:    Level: intermediate

9291: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
9292: @*/
9293: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
9294: {

9300:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9301:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9304:   MatCheckPreallocated(P,2);
9305:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9306:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9309:   MatCheckPreallocated(C,3);
9310:   if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9311:   if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
9312:   if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9313:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9314:   if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
9315:   MatCheckPreallocated(A,1);

9317:   if (!C->ops->ptapnumeric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"MatPtAPNumeric implementation is missing. You should call MatPtAPSymbolic first");
9318:   PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9319:   (*C->ops->ptapnumeric)(A,P,C);
9320:   PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9321:   return(0);
9322: }

9324: /*@
9325:    MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P

9327:    Neighbor-wise Collective on Mat

9329:    Input Parameters:
9330: +  A - the matrix
9331: -  P - the projection matrix

9333:    Output Parameters:
9334: .  C - the (i,j) structure of the product matrix

9336:    Notes:
9337:    C will be created and must be destroyed by the user with MatDestroy().

9339:    This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9340:    which inherit from SeqAIJ.  C will be of type MATSEQAIJ.  The product is computed using
9341:    this (i,j) structure by calling MatPtAPNumeric().

9343:    Level: intermediate

9345: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
9346: @*/
9347: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
9348: {

9354:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9355:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9356:   if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9359:   MatCheckPreallocated(P,2);
9360:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9361:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9364:   if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9365:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9366:   MatCheckPreallocated(A,1);

9368:   if (!A->ops->ptapsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatType %s",((PetscObject)A)->type_name);
9369:   PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
9370:   (*A->ops->ptapsymbolic)(A,P,fill,C);
9371:   PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);

9373:   /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
9374:   return(0);
9375: }

9377: /*@
9378:    MatRARt - Creates the matrix product C = R * A * R^T

9380:    Neighbor-wise Collective on Mat

9382:    Input Parameters:
9383: +  A - the matrix
9384: .  R - the projection matrix
9385: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9386: -  fill - expected fill as ratio of nnz(C)/nnz(A), use PETSC_DEFAULT if you do not have a good estimate
9387:           if the result is a dense matrix this is irrelevent

9389:    Output Parameters:
9390: .  C - the product matrix

9392:    Notes:
9393:    C will be created and must be destroyed by the user with MatDestroy().

9395:    This routine is currently only implemented for pairs of AIJ matrices and classes
9396:    which inherit from AIJ. Due to PETSc sparse matrix block row distribution among processes,
9397:    parallel MatRARt is implemented via explicit transpose of R, which could be very expensive.
9398:    We recommend using MatPtAP().

9400:    Level: intermediate

9402: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
9403: @*/
9404: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
9405: {

9411:   if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9412:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9413:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9416:   MatCheckPreallocated(R,2);
9417:   if (!R->assembled) SETERRQ(