Actual source code: matrix.c

petsc-master 2017-07-24
Report Typos and Errors

  2: /*
  3:    This is where the abstract matrix operations are defined
  4: */

  6:  #include <petsc/private/matimpl.h>
  7:  #include <petsc/private/isimpl.h>
  8:  #include <petsc/private/vecimpl.h>

 10: /* Logging support */
 11: PetscClassId MAT_CLASSID;
 12: PetscClassId MAT_COLORING_CLASSID;
 13: PetscClassId MAT_FDCOLORING_CLASSID;
 14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;

 16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
 17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve;
 18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
 19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
 20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
 21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_CreateSubMats, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
 22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
 23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_CreateSubMat;
 24: PetscLogEvent MAT_TransposeColoringCreate;
 25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
 26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
 27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
 28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
 29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
 30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
 31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
 32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
 33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
 34: PetscLogEvent MAT_GetMultiProcBlock;
 35: PetscLogEvent MAT_CUSPCopyToGPU, MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch, MAT_SetValuesBatchI, MAT_SetValuesBatchII, MAT_SetValuesBatchIII, MAT_SetValuesBatchIV;
 36: PetscLogEvent MAT_ViennaCLCopyToGPU;
 37: PetscLogEvent MAT_Merge,MAT_Residual,MAT_SetRandom;
 38: PetscLogEvent MATCOLORING_Apply,MATCOLORING_Comm,MATCOLORING_Local,MATCOLORING_ISCreate,MATCOLORING_SetUp,MATCOLORING_Weights;

 40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};

 42: /*@
 43:    MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations

 45:    Logically Collective on Vec

 47:    Input Parameters:
 48: +  x  - the vector
 49: -  rctx - the random number context, formed by PetscRandomCreate(), or NULL and
 50:           it will create one internally.

 52:    Output Parameter:
 53: .  x  - the vector

 55:    Example of Usage:
 56: .vb
 57:      PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
 58:      MatSetRandom(x,rctx);
 59:      PetscRandomDestroy(rctx);
 60: .ve

 62:    Level: intermediate

 64:    Concepts: matrix^setting to random
 65:    Concepts: random^matrix

 67: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
 68: @*/
 69: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
 70: {
 72:   PetscRandom    randObj = NULL;


 79:   if (!rctx) {
 80:     MPI_Comm comm;
 81:     PetscObjectGetComm((PetscObject)x,&comm);
 82:     PetscRandomCreate(comm,&randObj);
 83:     PetscRandomSetFromOptions(randObj);
 84:     rctx = randObj;
 85:   }

 87:   PetscLogEventBegin(MAT_SetRandom,x,rctx,0,0);
 88:   (*x->ops->setrandom)(x,rctx);
 89:   PetscLogEventEnd(MAT_SetRandom,x,rctx,0,0);

 91:   x->assembled = PETSC_TRUE;
 92:   PetscRandomDestroy(&randObj);
 93:   return(0);
 94: }

 96: /*@
 97:    MatFactorGetErrorZeroPivot - returns the pivot value that was determined to be zero and the row it occurred in

 99:    Logically Collective on Mat

101:    Input Parameters:
102: .  mat - the factored matrix

104:    Output Parameter:
105: +  pivot - the pivot value computed
106: -  row - the row that the zero pivot occurred. Note that this row must be interpreted carefully due to row reorderings and which processes
107:          the share the matrix

109:    Level: advanced

111:    Notes: This routine does not work for factorizations done with external packages.
112:    This routine should only be called if MatGetFactorError() returns a value of MAT_FACTOR_NUMERIC_ZEROPIVOT

114:    This can be called on non-factored matrices that come from, for example, matrices used in SOR.

116: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
117: @*/
118: PetscErrorCode MatFactorGetErrorZeroPivot(Mat mat,PetscReal *pivot,PetscInt *row)
119: {
122:   *pivot = mat->factorerror_zeropivot_value;
123:   *row   = mat->factorerror_zeropivot_row;
124:   return(0);
125: }

127: /*@
128:    MatFactorGetError - gets the error code from a factorization

130:    Logically Collective on Mat

132:    Input Parameters:
133: .  mat - the factored matrix

135:    Output Parameter:
136: .  err  - the error code

138:    Level: advanced

140:    Notes:    This can be called on non-factored matrices that come from, for example, matrices used in SOR.

142: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
143: @*/
144: PetscErrorCode MatFactorGetError(Mat mat,MatFactorError *err)
145: {
148:   *err = mat->factorerrortype;
149:   return(0);
150: }

152: /*@
153:    MatFactorClearError - clears the error code in a factorization

155:    Logically Collective on Mat

157:    Input Parameter:
158: .  mat - the factored matrix

160:    Level: developer

162:    Notes: This can be called on non-factored matrices that come from, for example, matrices used in SOR.

164: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorGetError(), MatFactorGetErrorZeroPivot()
165: @*/
166: PetscErrorCode MatFactorClearError(Mat mat)
167: {
170:   mat->factorerrortype             = MAT_FACTOR_NOERROR;
171:   mat->factorerror_zeropivot_value = 0.0;
172:   mat->factorerror_zeropivot_row   = 0;
173:   return(0);
174: }


177: /*@
178:       MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix

180:   Input Parameter:
181: .    A  - the matrix

183:   Output Parameter:
184: .    keptrows - the rows that are not completely zero

186:   Notes: keptrows is set to NULL if all rows are nonzero.

188:   Level: intermediate

190:  @*/
191: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
192: {

197:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
198:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
199:   if (!mat->ops->findnonzerorows) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not coded for this matrix type");
200:   (*mat->ops->findnonzerorows)(mat,keptrows);
201:   return(0);
202: }

204: /*@
205:       MatFindZeroRows - Locate all rows that are completely zero in the matrix

207:   Input Parameter:
208: .    A  - the matrix

210:   Output Parameter:
211: .    zerorows - the rows that are completely zero

213:   Notes: zerorows is set to NULL if no rows are zero.

215:   Level: intermediate

217:  @*/
218: PetscErrorCode MatFindZeroRows(Mat mat,IS *zerorows)
219: {
221:   IS keptrows;
222:   PetscInt m, n;


227:   MatFindNonzeroRows(mat, &keptrows);
228:   /* MatFindNonzeroRows sets keptrows to NULL if there are no zero rows.
229:      In keeping with this convention, we set zerorows to NULL if there are no zero
230:      rows. */
231:   if (keptrows == NULL) {
232:     *zerorows = NULL;
233:   } else {
234:     MatGetOwnershipRange(mat,&m,&n);
235:     ISComplement(keptrows,m,n,zerorows);
236:     ISDestroy(&keptrows);
237:   }
238:   return(0);
239: }

241: /*@
242:    MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling

244:    Not Collective

246:    Input Parameters:
247: .   A - the matrix

249:    Output Parameters:
250: .   a - the diagonal part (which is a SEQUENTIAL matrix)

252:    Notes: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
253:           Use caution, as the reference count on the returned matrix is not incremented and it is used as
254:           part of the containing MPI Mat's normal operation.

256:    Level: advanced

258: @*/
259: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
260: {

267:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
268:   if (!A->ops->getdiagonalblock) {
269:     PetscMPIInt size;
270:     MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
271:     if (size == 1) {
272:       *a = A;
273:       return(0);
274:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not coded for this matrix type");
275:   }
276:   (*A->ops->getdiagonalblock)(A,a);
277:   return(0);
278: }

280: /*@
281:    MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.

283:    Collective on Mat

285:    Input Parameters:
286: .  mat - the matrix

288:    Output Parameter:
289: .   trace - the sum of the diagonal entries

291:    Level: advanced

293: @*/
294: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
295: {
297:   Vec            diag;

300:   MatCreateVecs(mat,&diag,NULL);
301:   MatGetDiagonal(mat,diag);
302:   VecSum(diag,trace);
303:   VecDestroy(&diag);
304:   return(0);
305: }

307: /*@
308:    MatRealPart - Zeros out the imaginary part of the matrix

310:    Logically Collective on Mat

312:    Input Parameters:
313: .  mat - the matrix

315:    Level: advanced


318: .seealso: MatImaginaryPart()
319: @*/
320: PetscErrorCode MatRealPart(Mat mat)
321: {

327:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
328:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
329:   if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
330:   MatCheckPreallocated(mat,1);
331:   (*mat->ops->realpart)(mat);
332: #if defined(PETSC_HAVE_CUSP)
333:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
334:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
335:   }
336: #elif defined(PETSC_HAVE_VIENNACL)
337:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
338:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
339:   }
340: #elif defined(PETSC_HAVE_VECCUDA)
341:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
342:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
343:   }
344: #endif
345:   return(0);
346: }

348: /*@C
349:    MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix

351:    Collective on Mat

353:    Input Parameter:
354: .  mat - the matrix

356:    Output Parameters:
357: +   nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
358: -   ghosts - the global indices of the ghost points

360:    Notes: the nghosts and ghosts are suitable to pass into VecCreateGhost()

362:    Level: advanced

364: @*/
365: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
366: {

372:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
373:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
374:   if (!mat->ops->getghosts) {
375:     if (nghosts) *nghosts = 0;
376:     if (ghosts) *ghosts = 0;
377:   } else {
378:     (*mat->ops->getghosts)(mat,nghosts,ghosts);
379:   }
380:   return(0);
381: }


384: /*@
385:    MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part

387:    Logically Collective on Mat

389:    Input Parameters:
390: .  mat - the matrix

392:    Level: advanced


395: .seealso: MatRealPart()
396: @*/
397: PetscErrorCode MatImaginaryPart(Mat mat)
398: {

404:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
405:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
406:   if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
407:   MatCheckPreallocated(mat,1);
408:   (*mat->ops->imaginarypart)(mat);
409: #if defined(PETSC_HAVE_CUSP)
410:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
411:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
412:   }
413: #elif defined(PETSC_HAVE_VIENNACL)
414:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
415:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
416:   }
417: #elif defined(PETSC_HAVE_VECCUDA)
418:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
419:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
420:   }
421: #endif
422:   return(0);
423: }

425: /*@
426:    MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)

428:    Not Collective

430:    Input Parameter:
431: .  mat - the matrix

433:    Output Parameters:
434: +  missing - is any diagonal missing
435: -  dd - first diagonal entry that is missing (optional) on this process

437:    Level: advanced


440: .seealso: MatRealPart()
441: @*/
442: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
443: {

449:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
450:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
451:   if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
452:   (*mat->ops->missingdiagonal)(mat,missing,dd);
453:   return(0);
454: }

456: /*@C
457:    MatGetRow - Gets a row of a matrix.  You MUST call MatRestoreRow()
458:    for each row that you get to ensure that your application does
459:    not bleed memory.

461:    Not Collective

463:    Input Parameters:
464: +  mat - the matrix
465: -  row - the row to get

467:    Output Parameters:
468: +  ncols -  if not NULL, the number of nonzeros in the row
469: .  cols - if not NULL, the column numbers
470: -  vals - if not NULL, the values

472:    Notes:
473:    This routine is provided for people who need to have direct access
474:    to the structure of a matrix.  We hope that we provide enough
475:    high-level matrix routines that few users will need it.

477:    MatGetRow() always returns 0-based column indices, regardless of
478:    whether the internal representation is 0-based (default) or 1-based.

480:    For better efficiency, set cols and/or vals to NULL if you do
481:    not wish to extract these quantities.

483:    The user can only examine the values extracted with MatGetRow();
484:    the values cannot be altered.  To change the matrix entries, one
485:    must use MatSetValues().

487:    You can only have one call to MatGetRow() outstanding for a particular
488:    matrix at a time, per processor. MatGetRow() can only obtain rows
489:    associated with the given processor, it cannot get rows from the
490:    other processors; for that we suggest using MatCreateSubMatrices(), then
491:    MatGetRow() on the submatrix. The row index passed to MatGetRows()
492:    is in the global number of rows.

494:    Fortran Notes:
495:    The calling sequence from Fortran is
496: .vb
497:    MatGetRow(matrix,row,ncols,cols,values,ierr)
498:          Mat     matrix (input)
499:          integer row    (input)
500:          integer ncols  (output)
501:          integer cols(maxcols) (output)
502:          double precision (or double complex) values(maxcols) output
503: .ve
504:    where maxcols >= maximum nonzeros in any row of the matrix.


507:    Caution:
508:    Do not try to change the contents of the output arrays (cols and vals).
509:    In some cases, this may corrupt the matrix.

511:    Level: advanced

513:    Concepts: matrices^row access

515: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatCreateSubMatrices(), MatGetDiagonal()
516: @*/
517: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
518: {
520:   PetscInt       incols;

525:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
526:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
527:   if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
528:   MatCheckPreallocated(mat,1);
529:   PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
530:   (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
531:   if (ncols) *ncols = incols;
532:   PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
533:   return(0);
534: }

536: /*@
537:    MatConjugate - replaces the matrix values with their complex conjugates

539:    Logically Collective on Mat

541:    Input Parameters:
542: .  mat - the matrix

544:    Level: advanced

546: .seealso:  VecConjugate()
547: @*/
548: PetscErrorCode MatConjugate(Mat mat)
549: {
550: #if defined(PETSC_USE_COMPLEX)

555:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
556:   if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
557:   (*mat->ops->conjugate)(mat);
558: #if defined(PETSC_HAVE_CUSP)
559:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
560:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
561:   }
562: #elif defined(PETSC_HAVE_VIENNACL)
563:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
564:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
565:   }
566: #elif defined(PETSC_HAVE_VECCUDA)
567:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
568:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
569:   }
570: #endif
571:   return(0);
572: #else
573:   return 0;
574: #endif
575: }

577: /*@C
578:    MatRestoreRow - Frees any temporary space allocated by MatGetRow().

580:    Not Collective

582:    Input Parameters:
583: +  mat - the matrix
584: .  row - the row to get
585: .  ncols, cols - the number of nonzeros and their columns
586: -  vals - if nonzero the column values

588:    Notes:
589:    This routine should be called after you have finished examining the entries.

591:    This routine zeros out ncols, cols, and vals. This is to prevent accidental
592:    us of the array after it has been restored. If you pass NULL, it will
593:    not zero the pointers.  Use of cols or vals after MatRestoreRow is invalid.

595:    Fortran Notes:
596:    The calling sequence from Fortran is
597: .vb
598:    MatRestoreRow(matrix,row,ncols,cols,values,ierr)
599:       Mat     matrix (input)
600:       integer row    (input)
601:       integer ncols  (output)
602:       integer cols(maxcols) (output)
603:       double precision (or double complex) values(maxcols) output
604: .ve
605:    Where maxcols >= maximum nonzeros in any row of the matrix.

607:    In Fortran MatRestoreRow() MUST be called after MatGetRow()
608:    before another call to MatGetRow() can be made.

610:    Level: advanced

612: .seealso:  MatGetRow()
613: @*/
614: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
615: {

621:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
622:   if (!mat->ops->restorerow) return(0);
623:   (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
624:   if (ncols) *ncols = 0;
625:   if (cols)  *cols = NULL;
626:   if (vals)  *vals = NULL;
627:   return(0);
628: }

630: /*@
631:    MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
632:    You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.

634:    Not Collective

636:    Input Parameters:
637: +  mat - the matrix

639:    Notes:
640:    The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.

642:    Level: advanced

644:    Concepts: matrices^row access

646: .seealso: MatRestoreRowRowUpperTriangular()
647: @*/
648: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
649: {

655:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
656:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
657:   if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
658:   MatCheckPreallocated(mat,1);
659:   (*mat->ops->getrowuppertriangular)(mat);
660:   return(0);
661: }

663: /*@
664:    MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.

666:    Not Collective

668:    Input Parameters:
669: +  mat - the matrix

671:    Notes:
672:    This routine should be called after you have finished MatGetRow/MatRestoreRow().


675:    Level: advanced

677: .seealso:  MatGetRowUpperTriangular()
678: @*/
679: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
680: {

685:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
686:   if (!mat->ops->restorerowuppertriangular) return(0);
687:   (*mat->ops->restorerowuppertriangular)(mat);
688:   return(0);
689: }

691: /*@C
692:    MatSetOptionsPrefix - Sets the prefix used for searching for all
693:    Mat options in the database.

695:    Logically Collective on Mat

697:    Input Parameter:
698: +  A - the Mat context
699: -  prefix - the prefix to prepend to all option names

701:    Notes:
702:    A hyphen (-) must NOT be given at the beginning of the prefix name.
703:    The first character of all runtime options is AUTOMATICALLY the hyphen.

705:    Level: advanced

707: .keywords: Mat, set, options, prefix, database

709: .seealso: MatSetFromOptions()
710: @*/
711: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
712: {

717:   PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
718:   return(0);
719: }

721: /*@C
722:    MatAppendOptionsPrefix - Appends to the prefix used for searching for all
723:    Mat options in the database.

725:    Logically Collective on Mat

727:    Input Parameters:
728: +  A - the Mat context
729: -  prefix - the prefix to prepend to all option names

731:    Notes:
732:    A hyphen (-) must NOT be given at the beginning of the prefix name.
733:    The first character of all runtime options is AUTOMATICALLY the hyphen.

735:    Level: advanced

737: .keywords: Mat, append, options, prefix, database

739: .seealso: MatGetOptionsPrefix()
740: @*/
741: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
742: {

747:   PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
748:   return(0);
749: }

751: /*@C
752:    MatGetOptionsPrefix - Sets the prefix used for searching for all
753:    Mat options in the database.

755:    Not Collective

757:    Input Parameter:
758: .  A - the Mat context

760:    Output Parameter:
761: .  prefix - pointer to the prefix string used

763:    Notes: On the fortran side, the user should pass in a string 'prefix' of
764:    sufficient length to hold the prefix.

766:    Level: advanced

768: .keywords: Mat, get, options, prefix, database

770: .seealso: MatAppendOptionsPrefix()
771: @*/
772: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
773: {

778:   PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
779:   return(0);
780: }

782: /*@
783:    MatSetUp - Sets up the internal matrix data structures for the later use.

785:    Collective on Mat

787:    Input Parameters:
788: .  A - the Mat context

790:    Notes:
791:    If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.

793:    If a suitable preallocation routine is used, this function does not need to be called.

795:    See the Performance chapter of the PETSc users manual for how to preallocate matrices

797:    Level: beginner

799: .keywords: Mat, setup

801: .seealso: MatCreate(), MatDestroy()
802: @*/
803: PetscErrorCode MatSetUp(Mat A)
804: {
805:   PetscMPIInt    size;

810:   if (!((PetscObject)A)->type_name) {
811:     MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
812:     if (size == 1) {
813:       MatSetType(A, MATSEQAIJ);
814:     } else {
815:       MatSetType(A, MATMPIAIJ);
816:     }
817:   }
818:   if (!A->preallocated && A->ops->setup) {
819:     PetscInfo(A,"Warning not preallocating matrix storage\n");
820:     (*A->ops->setup)(A);
821:   }
822:   if (A->rmap->n < 0 || A->rmap->N < 0) {
823:     PetscLayoutSetUp(A->rmap);
824:   }
825:   if (A->cmap->n < 0 || A->cmap->N < 0) {
826:     PetscLayoutSetUp(A->cmap);
827:   }
828:   A->preallocated = PETSC_TRUE;
829:   return(0);
830: }

832: #if defined(PETSC_HAVE_SAWS)
833:  #include <petscviewersaws.h>
834: #endif
835: /*@C
836:    MatView - Visualizes a matrix object.

838:    Collective on Mat

840:    Input Parameters:
841: +  mat - the matrix
842: -  viewer - visualization context

844:   Notes:
845:   The available visualization contexts include
846: +    PETSC_VIEWER_STDOUT_SELF - for sequential matrices
847: .    PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
848: .    PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
849: -     PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure

851:    The user can open alternative visualization contexts with
852: +    PetscViewerASCIIOpen() - Outputs matrix to a specified file
853: .    PetscViewerBinaryOpen() - Outputs matrix in binary to a
854:          specified file; corresponding input uses MatLoad()
855: .    PetscViewerDrawOpen() - Outputs nonzero matrix structure to
856:          an X window display
857: -    PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
858:          Currently only the sequential dense and AIJ
859:          matrix types support the Socket viewer.

861:    The user can call PetscViewerPushFormat() to specify the output
862:    format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
863:    PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen).  Available formats include
864: +    PETSC_VIEWER_DEFAULT - default, prints matrix contents
865: .    PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
866: .    PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
867: .    PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
868:          format common among all matrix types
869: .    PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
870:          format (which is in many cases the same as the default)
871: .    PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
872:          size and structure (not the matrix entries)
873: .    PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
874:          the matrix structure

876:    Options Database Keys:
877: +  -mat_view ::ascii_info - Prints info on matrix at conclusion of MatAssemblyEnd()
878: .  -mat_view ::ascii_info_detail - Prints more detailed info
879: .  -mat_view - Prints matrix in ASCII format
880: .  -mat_view ::ascii_matlab - Prints matrix in Matlab format
881: .  -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
882: .  -display <name> - Sets display name (default is host)
883: .  -draw_pause <sec> - Sets number of seconds to pause after display
884: .  -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 12 Using MATLAB with PETSc for details)
885: .  -viewer_socket_machine <machine> -
886: .  -viewer_socket_port <port> -
887: .  -mat_view binary - save matrix to file in binary format
888: -  -viewer_binary_filename <name> -
889:    Level: beginner

891:    Notes: see the manual page for MatLoad() for the exact format of the binary file when the binary
892:       viewer is used.

894:       See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
895:       viewer is used.

897:       One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
898:       And then use the following mouse functions:
899:           left mouse: zoom in
900:           middle mouse: zoom out
901:           right mouse: continue with the simulation

903:    Concepts: matrices^viewing
904:    Concepts: matrices^plotting
905:    Concepts: matrices^printing

907: .seealso: PetscViewerPushFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
908:           PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
909: @*/
910: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
911: {
912:   PetscErrorCode    ierr;
913:   PetscInt          rows,cols,rbs,cbs;
914:   PetscBool         iascii,ibinary;
915:   PetscViewerFormat format;
916: #if defined(PETSC_HAVE_SAWS)
917:   PetscBool         issaws;
918: #endif

923:   if (!viewer) {
924:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
925:   }
928:   MatCheckPreallocated(mat,1);
929:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&ibinary);
930:   if (ibinary) {
931:     PetscBool mpiio;
932:     PetscViewerBinaryGetUseMPIIO(viewer,&mpiio);
933:     if (mpiio) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"PETSc matrix viewers do not support using MPI-IO, turn off that flag");
934:   }

936:   PetscLogEventBegin(MAT_View,mat,viewer,0,0);
937:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
938:   PetscViewerGetFormat(viewer,&format);
939:   if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
940:     SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
941:   }

943: #if defined(PETSC_HAVE_SAWS)
944:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
945: #endif
946:   if (iascii) {
947:     if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
948:     PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
949:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
950:       PetscViewerASCIIPushTab(viewer);
951:       MatGetSize(mat,&rows,&cols);
952:       MatGetBlockSizes(mat,&rbs,&cbs);
953:       if (rbs != 1 || cbs != 1) {
954:         if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
955:         else            {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
956:       } else {
957:         PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
958:       }
959:       if (mat->factortype) {
960:         const MatSolverPackage solver;
961:         MatFactorGetSolverPackage(mat,&solver);
962:         PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
963:       }
964:       if (mat->ops->getinfo) {
965:         MatInfo info;
966:         MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
967:         PetscViewerASCIIPrintf(viewer,"total: nonzeros=%.f, allocated nonzeros=%.f\n",info.nz_used,info.nz_allocated);
968:         PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
969:       }
970:       if (mat->nullsp) {PetscViewerASCIIPrintf(viewer,"  has attached null space\n");}
971:       if (mat->nearnullsp) {PetscViewerASCIIPrintf(viewer,"  has attached near null space\n");}
972:     }
973: #if defined(PETSC_HAVE_SAWS)
974:   } else if (issaws) {
975:     PetscMPIInt rank;

977:     PetscObjectName((PetscObject)mat);
978:     MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
979:     if (!((PetscObject)mat)->amsmem && !rank) {
980:       PetscObjectViewSAWs((PetscObject)mat,viewer);
981:     }
982: #endif
983:   }
984:   if (mat->ops->view) {
985:     PetscViewerASCIIPushTab(viewer);
986:     (*mat->ops->view)(mat,viewer);
987:     PetscViewerASCIIPopTab(viewer);
988:   }
989:   if (iascii) {
990:     if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
991:     PetscViewerGetFormat(viewer,&format);
992:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
993:       PetscViewerASCIIPopTab(viewer);
994:     }
995:   }
996:   PetscLogEventEnd(MAT_View,mat,viewer,0,0);
997:   return(0);
998: }

1000: #if defined(PETSC_USE_DEBUG)
1001: #include <../src/sys/totalview/tv_data_display.h>
1002: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
1003: {
1004:   TV_add_row("Local rows", "int", &mat->rmap->n);
1005:   TV_add_row("Local columns", "int", &mat->cmap->n);
1006:   TV_add_row("Global rows", "int", &mat->rmap->N);
1007:   TV_add_row("Global columns", "int", &mat->cmap->N);
1008:   TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
1009:   return TV_format_OK;
1010: }
1011: #endif

1013: /*@C
1014:    MatLoad - Loads a matrix that has been stored in binary format
1015:    with MatView().  The matrix format is determined from the options database.
1016:    Generates a parallel MPI matrix if the communicator has more than one
1017:    processor.  The default matrix type is AIJ.

1019:    Collective on PetscViewer

1021:    Input Parameters:
1022: +  newmat - the newly loaded matrix, this needs to have been created with MatCreate()
1023:             or some related function before a call to MatLoad()
1024: -  viewer - binary file viewer, created with PetscViewerBinaryOpen()

1026:    Options Database Keys:
1027:    Used with block matrix formats (MATSEQBAIJ,  ...) to specify
1028:    block size
1029: .    -matload_block_size <bs>

1031:    Level: beginner

1033:    Notes:
1034:    If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
1035:    Mat before calling this routine if you wish to set it from the options database.

1037:    MatLoad() automatically loads into the options database any options
1038:    given in the file filename.info where filename is the name of the file
1039:    that was passed to the PetscViewerBinaryOpen(). The options in the info
1040:    file will be ignored if you use the -viewer_binary_skip_info option.

1042:    If the type or size of newmat is not set before a call to MatLoad, PETSc
1043:    sets the default matrix type AIJ and sets the local and global sizes.
1044:    If type and/or size is already set, then the same are used.

1046:    In parallel, each processor can load a subset of rows (or the
1047:    entire matrix).  This routine is especially useful when a large
1048:    matrix is stored on disk and only part of it is desired on each
1049:    processor.  For example, a parallel solver may access only some of
1050:    the rows from each processor.  The algorithm used here reads
1051:    relatively small blocks of data rather than reading the entire
1052:    matrix and then subsetting it.

1054:    Notes for advanced users:
1055:    Most users should not need to know the details of the binary storage
1056:    format, since MatLoad() and MatView() completely hide these details.
1057:    But for anyone who's interested, the standard binary matrix storage
1058:    format is

1060: $    int    MAT_FILE_CLASSID
1061: $    int    number of rows
1062: $    int    number of columns
1063: $    int    total number of nonzeros
1064: $    int    *number nonzeros in each row
1065: $    int    *column indices of all nonzeros (starting index is zero)
1066: $    PetscScalar *values of all nonzeros

1068:    PETSc automatically does the byte swapping for
1069: machines that store the bytes reversed, e.g.  DEC alpha, freebsd,
1070: linux, Windows and the paragon; thus if you write your own binary
1071: read/write routines you have to swap the bytes; see PetscBinaryRead()
1072: and PetscBinaryWrite() to see how this may be done.

1074: .keywords: matrix, load, binary, input

1076: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()

1078:  @*/
1079: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
1080: {
1082:   PetscBool      isbinary,flg;

1087:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1088:   if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");

1090:   if (!((PetscObject)newmat)->type_name) {
1091:     MatSetType(newmat,MATAIJ);
1092:   }

1094:   if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
1095:   PetscLogEventBegin(MAT_Load,viewer,0,0,0);
1096:   (*newmat->ops->load)(newmat,viewer);
1097:   PetscLogEventEnd(MAT_Load,viewer,0,0,0);

1099:   flg  = PETSC_FALSE;
1100:   PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1101:   if (flg) {
1102:     MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1103:     MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1104:   }
1105:   flg  = PETSC_FALSE;
1106:   PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1107:   if (flg) {
1108:     MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1109:   }
1110:   return(0);
1111: }

1113: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1114: {
1116:   Mat_Redundant  *redund = *redundant;
1117:   PetscInt       i;

1120:   if (redund){
1121:     if (redund->matseq) { /* via MatCreateSubMatrices()  */
1122:       ISDestroy(&redund->isrow);
1123:       ISDestroy(&redund->iscol);
1124:       MatDestroy(&redund->matseq[0]);
1125:       PetscFree(redund->matseq);
1126:     } else {
1127:       PetscFree2(redund->send_rank,redund->recv_rank);
1128:       PetscFree(redund->sbuf_j);
1129:       PetscFree(redund->sbuf_a);
1130:       for (i=0; i<redund->nrecvs; i++) {
1131:         PetscFree(redund->rbuf_j[i]);
1132:         PetscFree(redund->rbuf_a[i]);
1133:       }
1134:       PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1135:     }

1137:     if (redund->subcomm) {
1138:       PetscCommDestroy(&redund->subcomm);
1139:     }
1140:     PetscFree(redund);
1141:   }
1142:   return(0);
1143: }

1145: /*@
1146:    MatDestroy - Frees space taken by a matrix.

1148:    Collective on Mat

1150:    Input Parameter:
1151: .  A - the matrix

1153:    Level: beginner

1155: @*/
1156: PetscErrorCode MatDestroy(Mat *A)
1157: {

1161:   if (!*A) return(0);
1163:   if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}

1165:   /* if memory was published with SAWs then destroy it */
1166:   PetscObjectSAWsViewOff((PetscObject)*A);
1167:   if ((*A)->ops->destroy) {
1168:     (*(*A)->ops->destroy)(*A);
1169:   }

1171:   PetscFree((*A)->solvertype);
1172:   MatDestroy_Redundant(&(*A)->redundant);
1173:   MatNullSpaceDestroy(&(*A)->nullsp);
1174:   MatNullSpaceDestroy(&(*A)->transnullsp);
1175:   MatNullSpaceDestroy(&(*A)->nearnullsp);
1176:   MatDestroy(&(*A)->schur);
1177:   PetscLayoutDestroy(&(*A)->rmap);
1178:   PetscLayoutDestroy(&(*A)->cmap);
1179:   PetscHeaderDestroy(A);
1180:   return(0);
1181: }

1183: /*@C
1184:    MatSetValues - Inserts or adds a block of values into a matrix.
1185:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1186:    MUST be called after all calls to MatSetValues() have been completed.

1188:    Not Collective

1190:    Input Parameters:
1191: +  mat - the matrix
1192: .  v - a logically two-dimensional array of values
1193: .  m, idxm - the number of rows and their global indices
1194: .  n, idxn - the number of columns and their global indices
1195: -  addv - either ADD_VALUES or INSERT_VALUES, where
1196:    ADD_VALUES adds values to any existing entries, and
1197:    INSERT_VALUES replaces existing entries with new values

1199:    Notes:
1200:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1201:       MatSetUp() before using this routine

1203:    By default the values, v, are row-oriented. See MatSetOption() for other options.

1205:    Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1206:    options cannot be mixed without intervening calls to the assembly
1207:    routines.

1209:    MatSetValues() uses 0-based row and column numbers in Fortran
1210:    as well as in C.

1212:    Negative indices may be passed in idxm and idxn, these rows and columns are
1213:    simply ignored. This allows easily inserting element stiffness matrices
1214:    with homogeneous Dirchlet boundary conditions that you don't want represented
1215:    in the matrix.

1217:    Efficiency Alert:
1218:    The routine MatSetValuesBlocked() may offer much better efficiency
1219:    for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).

1221:    Level: beginner

1223:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
1224:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

1226:    Concepts: matrices^putting entries in

1228: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1229:           InsertMode, INSERT_VALUES, ADD_VALUES
1230: @*/
1231: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1232: {
1234: #if defined(PETSC_USE_DEBUG)
1235:   PetscInt       i,j;
1236: #endif

1241:   if (!m || !n) return(0); /* no values to insert */
1245:   MatCheckPreallocated(mat,1);
1246:   if (mat->insertmode == NOT_SET_VALUES) {
1247:     mat->insertmode = addv;
1248:   }
1249: #if defined(PETSC_USE_DEBUG)
1250:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1251:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1252:   if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);

1254:   for (i=0; i<m; i++) {
1255:     for (j=0; j<n; j++) {
1256:       if (mat->erroriffailure && PetscIsInfOrNanScalar(v[i*n+j]))
1257: #if defined(PETSC_USE_COMPLEX)
1258:         SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1259: #else
1260:         SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1261: #endif
1262:     }
1263:   }
1264: #endif

1266:   if (mat->assembled) {
1267:     mat->was_assembled = PETSC_TRUE;
1268:     mat->assembled     = PETSC_FALSE;
1269:   }
1270:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1271:   (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1272:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1273: #if defined(PETSC_HAVE_CUSP)
1274:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1275:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1276:   }
1277: #elif defined(PETSC_HAVE_VIENNACL)
1278:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1279:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1280:   }
1281: #elif defined(PETSC_HAVE_VECCUDA)
1282:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1283:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1284:   }
1285: #endif
1286:   return(0);
1287: }


1290: /*@
1291:    MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1292:         values into a matrix

1294:    Not Collective

1296:    Input Parameters:
1297: +  mat - the matrix
1298: .  row - the (block) row to set
1299: -  v - a logically two-dimensional array of values

1301:    Notes:
1302:    By the values, v, are column-oriented (for the block version) and sorted

1304:    All the nonzeros in the row must be provided

1306:    The matrix must have previously had its column indices set

1308:    The row must belong to this process

1310:    Level: intermediate

1312:    Concepts: matrices^putting entries in

1314: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1315:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1316: @*/
1317: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1318: {
1320:   PetscInt       globalrow;

1326:   ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1327:   MatSetValuesRow(mat,globalrow,v);
1328: #if defined(PETSC_HAVE_CUSP)
1329:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1330:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1331:   }
1332: #elif defined(PETSC_HAVE_VIENNACL)
1333:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1334:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1335:   }
1336: #elif defined(PETSC_HAVE_VECCUDA)
1337:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1338:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1339:   }
1340: #endif
1341:   return(0);
1342: }

1344: /*@
1345:    MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1346:         values into a matrix

1348:    Not Collective

1350:    Input Parameters:
1351: +  mat - the matrix
1352: .  row - the (block) row to set
1353: -  v - a logically two-dimensional (column major) array of values for  block matrices with blocksize larger than one, otherwise a one dimensional array of values

1355:    Notes:
1356:    The values, v, are column-oriented for the block version.

1358:    All the nonzeros in the row must be provided

1360:    THE MATRIX MUST HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.

1362:    The row must belong to this process

1364:    Level: advanced

1366:    Concepts: matrices^putting entries in

1368: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1369:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1370: @*/
1371: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1372: {

1378:   MatCheckPreallocated(mat,1);
1380: #if defined(PETSC_USE_DEBUG)
1381:   if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1382:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1383: #endif
1384:   mat->insertmode = INSERT_VALUES;

1386:   if (mat->assembled) {
1387:     mat->was_assembled = PETSC_TRUE;
1388:     mat->assembled     = PETSC_FALSE;
1389:   }
1390:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1391:   if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1392:   (*mat->ops->setvaluesrow)(mat,row,v);
1393:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1394: #if defined(PETSC_HAVE_CUSP)
1395:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1396:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1397:   }
1398: #elif defined(PETSC_HAVE_VIENNACL)
1399:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1400:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1401:   }
1402: #elif defined(PETSC_HAVE_VECCUDA)
1403:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1404:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1405:   }
1406: #endif
1407:   return(0);
1408: }

1410: /*@
1411:    MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1412:      Using structured grid indexing

1414:    Not Collective

1416:    Input Parameters:
1417: +  mat - the matrix
1418: .  m - number of rows being entered
1419: .  idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1420: .  n - number of columns being entered
1421: .  idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1422: .  v - a logically two-dimensional array of values
1423: -  addv - either ADD_VALUES or INSERT_VALUES, where
1424:    ADD_VALUES adds values to any existing entries, and
1425:    INSERT_VALUES replaces existing entries with new values

1427:    Notes:
1428:    By default the values, v, are row-oriented.  See MatSetOption() for other options.

1430:    Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1431:    options cannot be mixed without intervening calls to the assembly
1432:    routines.

1434:    The grid coordinates are across the entire grid, not just the local portion

1436:    MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1437:    as well as in C.

1439:    For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine

1441:    In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1442:    or call MatSetLocalToGlobalMapping() and MatSetStencil() first.

1444:    The columns and rows in the stencil passed in MUST be contained within the
1445:    ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1446:    if you create a DMDA with an overlap of one grid level and on a particular process its first
1447:    local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1448:    first i index you can use in your column and row indices in MatSetStencil() is 5.

1450:    In Fortran idxm and idxn should be declared as
1451: $     MatStencil idxm(4,m),idxn(4,n)
1452:    and the values inserted using
1453: $    idxm(MatStencil_i,1) = i
1454: $    idxm(MatStencil_j,1) = j
1455: $    idxm(MatStencil_k,1) = k
1456: $    idxm(MatStencil_c,1) = c
1457:    etc

1459:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1460:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1461:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1462:    DM_BOUNDARY_PERIODIC boundary type.

1464:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1465:    a single value per point) you can skip filling those indices.

1467:    Inspired by the structured grid interface to the HYPRE package
1468:    (http://www.llnl.gov/CASC/hypre)

1470:    Efficiency Alert:
1471:    The routine MatSetValuesBlockedStencil() may offer much better efficiency
1472:    for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).

1474:    Level: beginner

1476:    Concepts: matrices^putting entries in

1478: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1479:           MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1480: @*/
1481: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1482: {
1484:   PetscInt       buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1485:   PetscInt       j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1486:   PetscInt       *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);

1489:   if (!m || !n) return(0); /* no values to insert */

1496:   if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1497:     jdxm = buf; jdxn = buf+m;
1498:   } else {
1499:     PetscMalloc2(m,&bufm,n,&bufn);
1500:     jdxm = bufm; jdxn = bufn;
1501:   }
1502:   for (i=0; i<m; i++) {
1503:     for (j=0; j<3-sdim; j++) dxm++;
1504:     tmp = *dxm++ - starts[0];
1505:     for (j=0; j<dim-1; j++) {
1506:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1507:       else                                       tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1508:     }
1509:     if (mat->stencil.noc) dxm++;
1510:     jdxm[i] = tmp;
1511:   }
1512:   for (i=0; i<n; i++) {
1513:     for (j=0; j<3-sdim; j++) dxn++;
1514:     tmp = *dxn++ - starts[0];
1515:     for (j=0; j<dim-1; j++) {
1516:       if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1517:       else                                       tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1518:     }
1519:     if (mat->stencil.noc) dxn++;
1520:     jdxn[i] = tmp;
1521:   }
1522:   MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1523:   PetscFree2(bufm,bufn);
1524:   return(0);
1525: }

1527: /*@
1528:    MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1529:      Using structured grid indexing

1531:    Not Collective

1533:    Input Parameters:
1534: +  mat - the matrix
1535: .  m - number of rows being entered
1536: .  idxm - grid coordinates for matrix rows being entered
1537: .  n - number of columns being entered
1538: .  idxn - grid coordinates for matrix columns being entered
1539: .  v - a logically two-dimensional array of values
1540: -  addv - either ADD_VALUES or INSERT_VALUES, where
1541:    ADD_VALUES adds values to any existing entries, and
1542:    INSERT_VALUES replaces existing entries with new values

1544:    Notes:
1545:    By default the values, v, are row-oriented and unsorted.
1546:    See MatSetOption() for other options.

1548:    Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1549:    options cannot be mixed without intervening calls to the assembly
1550:    routines.

1552:    The grid coordinates are across the entire grid, not just the local portion

1554:    MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1555:    as well as in C.

1557:    For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine

1559:    In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1560:    or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.

1562:    The columns and rows in the stencil passed in MUST be contained within the
1563:    ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1564:    if you create a DMDA with an overlap of one grid level and on a particular process its first
1565:    local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1566:    first i index you can use in your column and row indices in MatSetStencil() is 5.

1568:    In Fortran idxm and idxn should be declared as
1569: $     MatStencil idxm(4,m),idxn(4,n)
1570:    and the values inserted using
1571: $    idxm(MatStencil_i,1) = i
1572: $    idxm(MatStencil_j,1) = j
1573: $    idxm(MatStencil_k,1) = k
1574:    etc

1576:    Negative indices may be passed in idxm and idxn, these rows and columns are
1577:    simply ignored. This allows easily inserting element stiffness matrices
1578:    with homogeneous Dirchlet boundary conditions that you don't want represented
1579:    in the matrix.

1581:    Inspired by the structured grid interface to the HYPRE package
1582:    (http://www.llnl.gov/CASC/hypre)

1584:    Level: beginner

1586:    Concepts: matrices^putting entries in

1588: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1589:           MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1590:           MatSetBlockSize(), MatSetLocalToGlobalMapping()
1591: @*/
1592: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1593: {
1595:   PetscInt       buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1596:   PetscInt       j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1597:   PetscInt       *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);

1600:   if (!m || !n) return(0); /* no values to insert */

1607:   if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1608:     jdxm = buf; jdxn = buf+m;
1609:   } else {
1610:     PetscMalloc2(m,&bufm,n,&bufn);
1611:     jdxm = bufm; jdxn = bufn;
1612:   }
1613:   for (i=0; i<m; i++) {
1614:     for (j=0; j<3-sdim; j++) dxm++;
1615:     tmp = *dxm++ - starts[0];
1616:     for (j=0; j<sdim-1; j++) {
1617:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1618:       else                                       tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1619:     }
1620:     dxm++;
1621:     jdxm[i] = tmp;
1622:   }
1623:   for (i=0; i<n; i++) {
1624:     for (j=0; j<3-sdim; j++) dxn++;
1625:     tmp = *dxn++ - starts[0];
1626:     for (j=0; j<sdim-1; j++) {
1627:       if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1628:       else                                       tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1629:     }
1630:     dxn++;
1631:     jdxn[i] = tmp;
1632:   }
1633:   MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1634:   PetscFree2(bufm,bufn);
1635: #if defined(PETSC_HAVE_CUSP)
1636:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1637:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1638:   }
1639: #elif defined(PETSC_HAVE_VIENNACL)
1640:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1641:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1642:   }
1643: #elif defined(PETSC_HAVE_VECCUDA)
1644:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1645:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1646:   }
1647: #endif
1648:   return(0);
1649: }

1651: /*@
1652:    MatSetStencil - Sets the grid information for setting values into a matrix via
1653:         MatSetValuesStencil()

1655:    Not Collective

1657:    Input Parameters:
1658: +  mat - the matrix
1659: .  dim - dimension of the grid 1, 2, or 3
1660: .  dims - number of grid points in x, y, and z direction, including ghost points on your processor
1661: .  starts - starting point of ghost nodes on your processor in x, y, and z direction
1662: -  dof - number of degrees of freedom per node


1665:    Inspired by the structured grid interface to the HYPRE package
1666:    (www.llnl.gov/CASC/hyper)

1668:    For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1669:    user.

1671:    Level: beginner

1673:    Concepts: matrices^putting entries in

1675: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1676:           MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1677: @*/
1678: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1679: {
1680:   PetscInt i;


1687:   mat->stencil.dim = dim + (dof > 1);
1688:   for (i=0; i<dim; i++) {
1689:     mat->stencil.dims[i]   = dims[dim-i-1];      /* copy the values in backwards */
1690:     mat->stencil.starts[i] = starts[dim-i-1];
1691:   }
1692:   mat->stencil.dims[dim]   = dof;
1693:   mat->stencil.starts[dim] = 0;
1694:   mat->stencil.noc         = (PetscBool)(dof == 1);
1695:   return(0);
1696: }

1698: /*@C
1699:    MatSetValuesBlocked - Inserts or adds a block of values into a matrix.

1701:    Not Collective

1703:    Input Parameters:
1704: +  mat - the matrix
1705: .  v - a logically two-dimensional array of values
1706: .  m, idxm - the number of block rows and their global block indices
1707: .  n, idxn - the number of block columns and their global block indices
1708: -  addv - either ADD_VALUES or INSERT_VALUES, where
1709:    ADD_VALUES adds values to any existing entries, and
1710:    INSERT_VALUES replaces existing entries with new values

1712:    Notes:
1713:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1714:    MatXXXXSetPreallocation() or MatSetUp() before using this routine.

1716:    The m and n count the NUMBER of blocks in the row direction and column direction,
1717:    NOT the total number of rows/columns; for example, if the block size is 2 and
1718:    you are passing in values for rows 2,3,4,5  then m would be 2 (not 4).
1719:    The values in idxm would be 1 2; that is the first index for each block divided by
1720:    the block size.

1722:    Note that you must call MatSetBlockSize() when constructing this matrix (before
1723:    preallocating it).

1725:    By default the values, v, are row-oriented, so the layout of
1726:    v is the same as for MatSetValues(). See MatSetOption() for other options.

1728:    Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1729:    options cannot be mixed without intervening calls to the assembly
1730:    routines.

1732:    MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1733:    as well as in C.

1735:    Negative indices may be passed in idxm and idxn, these rows and columns are
1736:    simply ignored. This allows easily inserting element stiffness matrices
1737:    with homogeneous Dirchlet boundary conditions that you don't want represented
1738:    in the matrix.

1740:    Each time an entry is set within a sparse matrix via MatSetValues(),
1741:    internal searching must be done to determine where to place the
1742:    data in the matrix storage space.  By instead inserting blocks of
1743:    entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1744:    reduced.

1746:    Example:
1747: $   Suppose m=n=2 and block size(bs) = 2 The array is
1748: $
1749: $   1  2  | 3  4
1750: $   5  6  | 7  8
1751: $   - - - | - - -
1752: $   9  10 | 11 12
1753: $   13 14 | 15 16
1754: $
1755: $   v[] should be passed in like
1756: $   v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1757: $
1758: $  If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1759: $   v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]

1761:    Level: intermediate

1763:    Concepts: matrices^putting entries in blocked

1765: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1766: @*/
1767: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1768: {

1774:   if (!m || !n) return(0); /* no values to insert */
1778:   MatCheckPreallocated(mat,1);
1779:   if (mat->insertmode == NOT_SET_VALUES) {
1780:     mat->insertmode = addv;
1781:   }
1782: #if defined(PETSC_USE_DEBUG)
1783:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1784:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1785:   if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1786: #endif

1788:   if (mat->assembled) {
1789:     mat->was_assembled = PETSC_TRUE;
1790:     mat->assembled     = PETSC_FALSE;
1791:   }
1792:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1793:   if (mat->ops->setvaluesblocked) {
1794:     (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1795:   } else {
1796:     PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1797:     PetscInt i,j,bs,cbs;
1798:     MatGetBlockSizes(mat,&bs,&cbs);
1799:     if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1800:       iidxm = buf; iidxn = buf + m*bs;
1801:     } else {
1802:       PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1803:       iidxm = bufr; iidxn = bufc;
1804:     }
1805:     for (i=0; i<m; i++) {
1806:       for (j=0; j<bs; j++) {
1807:         iidxm[i*bs+j] = bs*idxm[i] + j;
1808:       }
1809:     }
1810:     for (i=0; i<n; i++) {
1811:       for (j=0; j<cbs; j++) {
1812:         iidxn[i*cbs+j] = cbs*idxn[i] + j;
1813:       }
1814:     }
1815:     MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1816:     PetscFree2(bufr,bufc);
1817:   }
1818:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1819: #if defined(PETSC_HAVE_CUSP)
1820:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1821:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1822:   }
1823: #elif defined(PETSC_HAVE_VIENNACL)
1824:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1825:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1826:   }
1827: #elif defined(PETSC_HAVE_VECCUDA)
1828:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1829:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1830:   }
1831: #endif
1832:   return(0);
1833: }

1835: /*@
1836:    MatGetValues - Gets a block of values from a matrix.

1838:    Not Collective; currently only returns a local block

1840:    Input Parameters:
1841: +  mat - the matrix
1842: .  v - a logically two-dimensional array for storing the values
1843: .  m, idxm - the number of rows and their global indices
1844: -  n, idxn - the number of columns and their global indices

1846:    Notes:
1847:    The user must allocate space (m*n PetscScalars) for the values, v.
1848:    The values, v, are then returned in a row-oriented format,
1849:    analogous to that used by default in MatSetValues().

1851:    MatGetValues() uses 0-based row and column numbers in
1852:    Fortran as well as in C.

1854:    MatGetValues() requires that the matrix has been assembled
1855:    with MatAssemblyBegin()/MatAssemblyEnd().  Thus, calls to
1856:    MatSetValues() and MatGetValues() CANNOT be made in succession
1857:    without intermediate matrix assembly.

1859:    Negative row or column indices will be ignored and those locations in v[] will be
1860:    left unchanged.

1862:    Level: advanced

1864:    Concepts: matrices^accessing values

1866: .seealso: MatGetRow(), MatCreateSubMatrices(), MatSetValues()
1867: @*/
1868: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1869: {

1875:   if (!m || !n) return(0);
1879:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1880:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1881:   if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1882:   MatCheckPreallocated(mat,1);

1884:   PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1885:   (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1886:   PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1887:   return(0);
1888: }

1890: /*@
1891:   MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1892:   the same size. Currently, this can only be called once and creates the given matrix.

1894:   Not Collective

1896:   Input Parameters:
1897: + mat - the matrix
1898: . nb - the number of blocks
1899: . bs - the number of rows (and columns) in each block
1900: . rows - a concatenation of the rows for each block
1901: - v - a concatenation of logically two-dimensional arrays of values

1903:   Notes:
1904:   In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.

1906:   Level: advanced

1908:   Concepts: matrices^putting entries in

1910: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1911:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1912: @*/
1913: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1914: {

1922: #if defined(PETSC_USE_DEBUG)
1923:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1924: #endif

1926:   PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1927:   if (mat->ops->setvaluesbatch) {
1928:     (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1929:   } else {
1930:     PetscInt b;
1931:     for (b = 0; b < nb; ++b) {
1932:       MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1933:     }
1934:   }
1935:   PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1936:   return(0);
1937: }

1939: /*@
1940:    MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1941:    the routine MatSetValuesLocal() to allow users to insert matrix entries
1942:    using a local (per-processor) numbering.

1944:    Not Collective

1946:    Input Parameters:
1947: +  x - the matrix
1948: .  rmapping - row mapping created with ISLocalToGlobalMappingCreate()   or ISLocalToGlobalMappingCreateIS()
1949: - cmapping - column mapping

1951:    Level: intermediate

1953:    Concepts: matrices^local to global mapping
1954:    Concepts: local to global mapping^for matrices

1956: .seealso:  MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1957: @*/
1958: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1959: {


1968:   if (x->ops->setlocaltoglobalmapping) {
1969:     (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
1970:   } else {
1971:     PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
1972:     PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
1973:   }
1974:   return(0);
1975: }


1978: /*@
1979:    MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()

1981:    Not Collective

1983:    Input Parameters:
1984: .  A - the matrix

1986:    Output Parameters:
1987: + rmapping - row mapping
1988: - cmapping - column mapping

1990:    Level: advanced

1992:    Concepts: matrices^local to global mapping
1993:    Concepts: local to global mapping^for matrices

1995: .seealso:  MatSetValuesLocal()
1996: @*/
1997: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
1998: {
2004:   if (rmapping) *rmapping = A->rmap->mapping;
2005:   if (cmapping) *cmapping = A->cmap->mapping;
2006:   return(0);
2007: }

2009: /*@
2010:    MatGetLayouts - Gets the PetscLayout objects for rows and columns

2012:    Not Collective

2014:    Input Parameters:
2015: .  A - the matrix

2017:    Output Parameters:
2018: + rmap - row layout
2019: - cmap - column layout

2021:    Level: advanced

2023: .seealso:  MatCreateVecs(), MatGetLocalToGlobalMapping()
2024: @*/
2025: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
2026: {
2032:   if (rmap) *rmap = A->rmap;
2033:   if (cmap) *cmap = A->cmap;
2034:   return(0);
2035: }

2037: /*@C
2038:    MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
2039:    using a local ordering of the nodes.

2041:    Not Collective

2043:    Input Parameters:
2044: +  mat - the matrix
2045: .  nrow, irow - number of rows and their local indices
2046: .  ncol, icol - number of columns and their local indices
2047: .  y -  a logically two-dimensional array of values
2048: -  addv - either INSERT_VALUES or ADD_VALUES, where
2049:    ADD_VALUES adds values to any existing entries, and
2050:    INSERT_VALUES replaces existing entries with new values

2052:    Notes:
2053:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2054:       MatSetUp() before using this routine

2056:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine

2058:    Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
2059:    options cannot be mixed without intervening calls to the assembly
2060:    routines.

2062:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2063:    MUST be called after all calls to MatSetValuesLocal() have been completed.

2065:    Level: intermediate

2067:    Concepts: matrices^putting entries in with local numbering

2069:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2070:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

2072: .seealso:  MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
2073:            MatSetValueLocal()
2074: @*/
2075: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2076: {

2082:   MatCheckPreallocated(mat,1);
2083:   if (!nrow || !ncol) return(0); /* no values to insert */
2087:   if (mat->insertmode == NOT_SET_VALUES) {
2088:     mat->insertmode = addv;
2089:   }
2090: #if defined(PETSC_USE_DEBUG)
2091:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2092:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2093:   if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2094: #endif

2096:   if (mat->assembled) {
2097:     mat->was_assembled = PETSC_TRUE;
2098:     mat->assembled     = PETSC_FALSE;
2099:   }
2100:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2101:   if (mat->ops->setvalueslocal) {
2102:     (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2103:   } else {
2104:     PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2105:     if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2106:       irowm = buf; icolm = buf+nrow;
2107:     } else {
2108:       PetscMalloc2(nrow,&bufr,ncol,&bufc);
2109:       irowm = bufr; icolm = bufc;
2110:     }
2111:     ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2112:     ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2113:     MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2114:     PetscFree2(bufr,bufc);
2115:   }
2116:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2117: #if defined(PETSC_HAVE_CUSP)
2118:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2119:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2120:   }
2121: #elif defined(PETSC_HAVE_VIENNACL)
2122:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2123:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2124:   }
2125: #elif defined(PETSC_HAVE_VECCUDA)
2126:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
2127:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
2128:   }
2129: #endif
2130:   return(0);
2131: }

2133: /*@C
2134:    MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2135:    using a local ordering of the nodes a block at a time.

2137:    Not Collective

2139:    Input Parameters:
2140: +  x - the matrix
2141: .  nrow, irow - number of rows and their local indices
2142: .  ncol, icol - number of columns and their local indices
2143: .  y -  a logically two-dimensional array of values
2144: -  addv - either INSERT_VALUES or ADD_VALUES, where
2145:    ADD_VALUES adds values to any existing entries, and
2146:    INSERT_VALUES replaces existing entries with new values

2148:    Notes:
2149:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2150:       MatSetUp() before using this routine

2152:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2153:       before using this routineBefore calling MatSetValuesLocal(), the user must first set the

2155:    Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2156:    options cannot be mixed without intervening calls to the assembly
2157:    routines.

2159:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2160:    MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.

2162:    Level: intermediate

2164:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2165:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

2167:    Concepts: matrices^putting blocked values in with local numbering

2169: .seealso:  MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2170:            MatSetValuesLocal(),  MatSetValuesBlocked()
2171: @*/
2172: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2173: {

2179:   MatCheckPreallocated(mat,1);
2180:   if (!nrow || !ncol) return(0); /* no values to insert */
2184:   if (mat->insertmode == NOT_SET_VALUES) {
2185:     mat->insertmode = addv;
2186:   }
2187: #if defined(PETSC_USE_DEBUG)
2188:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2189:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2190:   if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2191: #endif

2193:   if (mat->assembled) {
2194:     mat->was_assembled = PETSC_TRUE;
2195:     mat->assembled     = PETSC_FALSE;
2196:   }
2197:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2198:   if (mat->ops->setvaluesblockedlocal) {
2199:     (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2200:   } else {
2201:     PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2202:     if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2203:       irowm = buf; icolm = buf + nrow;
2204:     } else {
2205:       PetscMalloc2(nrow,&bufr,ncol,&bufc);
2206:       irowm = bufr; icolm = bufc;
2207:     }
2208:     ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2209:     ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2210:     MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2211:     PetscFree2(bufr,bufc);
2212:   }
2213:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2214: #if defined(PETSC_HAVE_CUSP)
2215:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2216:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2217:   }
2218: #elif defined(PETSC_HAVE_VIENNACL)
2219:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2220:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2221:   }
2222: #elif defined(PETSC_HAVE_VECCUDA)
2223:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
2224:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
2225:   }
2226: #endif
2227:   return(0);
2228: }

2230: /*@
2231:    MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal

2233:    Collective on Mat and Vec

2235:    Input Parameters:
2236: +  mat - the matrix
2237: -  x   - the vector to be multiplied

2239:    Output Parameters:
2240: .  y - the result

2242:    Notes:
2243:    The vectors x and y cannot be the same.  I.e., one cannot
2244:    call MatMult(A,y,y).

2246:    Level: developer

2248:    Concepts: matrix-vector product

2250: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2251: @*/
2252: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2253: {


2262:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2263:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2264:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2265:   MatCheckPreallocated(mat,1);

2267:   if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2268:   (*mat->ops->multdiagonalblock)(mat,x,y);
2269:   PetscObjectStateIncrease((PetscObject)y);
2270:   return(0);
2271: }

2273: /* --------------------------------------------------------*/
2274: /*@
2275:    MatMult - Computes the matrix-vector product, y = Ax.

2277:    Neighbor-wise Collective on Mat and Vec

2279:    Input Parameters:
2280: +  mat - the matrix
2281: -  x   - the vector to be multiplied

2283:    Output Parameters:
2284: .  y - the result

2286:    Notes:
2287:    The vectors x and y cannot be the same.  I.e., one cannot
2288:    call MatMult(A,y,y).

2290:    Level: beginner

2292:    Concepts: matrix-vector product

2294: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2295: @*/
2296: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2297: {

2305:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2306:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2307:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2308: #if !defined(PETSC_HAVE_CONSTRAINTS)
2309:   if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2310:   if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2311:   if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2312: #endif
2313:   VecLocked(y,3);
2314:   if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2315:   MatCheckPreallocated(mat,1);

2317:   VecLockPush(x);
2318:   if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2319:   PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2320:   (*mat->ops->mult)(mat,x,y);
2321:   PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2322:   if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2323:   VecLockPop(x);
2324:   return(0);
2325: }

2327: /*@
2328:    MatMultTranspose - Computes matrix transpose times a vector.

2330:    Neighbor-wise Collective on Mat and Vec

2332:    Input Parameters:
2333: +  mat - the matrix
2334: -  x   - the vector to be multilplied

2336:    Output Parameters:
2337: .  y - the result

2339:    Notes:
2340:    The vectors x and y cannot be the same.  I.e., one cannot
2341:    call MatMultTranspose(A,y,y).

2343:    For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2344:    use MatMultHermitianTranspose()

2346:    Level: beginner

2348:    Concepts: matrix vector product^transpose

2350: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2351: @*/
2352: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2353: {


2362:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2363:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2364:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2365: #if !defined(PETSC_HAVE_CONSTRAINTS)
2366:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2367:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2368: #endif
2369:   if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2370:   MatCheckPreallocated(mat,1);

2372:   if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply tranpose defined");
2373:   PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2374:   VecLockPush(x);
2375:   (*mat->ops->multtranspose)(mat,x,y);
2376:   VecLockPop(x);
2377:   PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2378:   PetscObjectStateIncrease((PetscObject)y);
2379:   if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2380:   return(0);
2381: }

2383: /*@
2384:    MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.

2386:    Neighbor-wise Collective on Mat and Vec

2388:    Input Parameters:
2389: +  mat - the matrix
2390: -  x   - the vector to be multilplied

2392:    Output Parameters:
2393: .  y - the result

2395:    Notes:
2396:    The vectors x and y cannot be the same.  I.e., one cannot
2397:    call MatMultHermitianTranspose(A,y,y).

2399:    Also called the conjugate transpose, complex conjugate transpose, or adjoint.

2401:    For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.

2403:    Level: beginner

2405:    Concepts: matrix vector product^transpose

2407: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2408: @*/
2409: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2410: {
2412:   Vec            w;


2420:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2421:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2422:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2423: #if !defined(PETSC_HAVE_CONSTRAINTS)
2424:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2425:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2426: #endif
2427:   MatCheckPreallocated(mat,1);

2429:   PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2430:   if (mat->ops->multhermitiantranspose) {
2431:     VecLockPush(x);
2432:     (*mat->ops->multhermitiantranspose)(mat,x,y);
2433:     VecLockPop(x);
2434:   } else {
2435:     VecDuplicate(x,&w);
2436:     VecCopy(x,w);
2437:     VecConjugate(w);
2438:     MatMultTranspose(mat,w,y);
2439:     VecDestroy(&w);
2440:     VecConjugate(y);
2441:   }
2442:   PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2443:   PetscObjectStateIncrease((PetscObject)y);
2444:   return(0);
2445: }

2447: /*@
2448:     MatMultAdd -  Computes v3 = v2 + A * v1.

2450:     Neighbor-wise Collective on Mat and Vec

2452:     Input Parameters:
2453: +   mat - the matrix
2454: -   v1, v2 - the vectors

2456:     Output Parameters:
2457: .   v3 - the result

2459:     Notes:
2460:     The vectors v1 and v3 cannot be the same.  I.e., one cannot
2461:     call MatMultAdd(A,v1,v2,v1).

2463:     Level: beginner

2465:     Concepts: matrix vector product^addition

2467: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2468: @*/
2469: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2470: {


2480:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2481:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2482:   if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2483:   /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2484:      if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2485:   if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2486:   if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2487:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2488:   MatCheckPreallocated(mat,1);

2490:   if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2491:   PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2492:   VecLockPush(v1);
2493:   (*mat->ops->multadd)(mat,v1,v2,v3);
2494:   VecLockPop(v1);
2495:   PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2496:   PetscObjectStateIncrease((PetscObject)v3);
2497:   return(0);
2498: }

2500: /*@
2501:    MatMultTransposeAdd - Computes v3 = v2 + A' * v1.

2503:    Neighbor-wise Collective on Mat and Vec

2505:    Input Parameters:
2506: +  mat - the matrix
2507: -  v1, v2 - the vectors

2509:    Output Parameters:
2510: .  v3 - the result

2512:    Notes:
2513:    The vectors v1 and v3 cannot be the same.  I.e., one cannot
2514:    call MatMultTransposeAdd(A,v1,v2,v1).

2516:    Level: beginner

2518:    Concepts: matrix vector product^transpose and addition

2520: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2521: @*/
2522: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2523: {


2533:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2534:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2535:   if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2536:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2537:   if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2538:   if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2539:   if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2540:   MatCheckPreallocated(mat,1);

2542:   PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2543:   VecLockPush(v1);
2544:   (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2545:   VecLockPop(v1);
2546:   PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2547:   PetscObjectStateIncrease((PetscObject)v3);
2548:   return(0);
2549: }

2551: /*@
2552:    MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.

2554:    Neighbor-wise Collective on Mat and Vec

2556:    Input Parameters:
2557: +  mat - the matrix
2558: -  v1, v2 - the vectors

2560:    Output Parameters:
2561: .  v3 - the result

2563:    Notes:
2564:    The vectors v1 and v3 cannot be the same.  I.e., one cannot
2565:    call MatMultHermitianTransposeAdd(A,v1,v2,v1).

2567:    Level: beginner

2569:    Concepts: matrix vector product^transpose and addition

2571: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2572: @*/
2573: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2574: {


2584:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2585:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2586:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2587:   if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2588:   if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2589:   if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2590:   MatCheckPreallocated(mat,1);

2592:   PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2593:   VecLockPush(v1);
2594:   if (mat->ops->multhermitiantransposeadd) {
2595:     (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2596:    } else {
2597:     Vec w,z;
2598:     VecDuplicate(v1,&w);
2599:     VecCopy(v1,w);
2600:     VecConjugate(w);
2601:     VecDuplicate(v3,&z);
2602:     MatMultTranspose(mat,w,z);
2603:     VecDestroy(&w);
2604:     VecConjugate(z);
2605:     VecWAXPY(v3,1.0,v2,z);
2606:     VecDestroy(&z);
2607:   }
2608:   VecLockPop(v1);
2609:   PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2610:   PetscObjectStateIncrease((PetscObject)v3);
2611:   return(0);
2612: }

2614: /*@
2615:    MatMultConstrained - The inner multiplication routine for a
2616:    constrained matrix P^T A P.

2618:    Neighbor-wise Collective on Mat and Vec

2620:    Input Parameters:
2621: +  mat - the matrix
2622: -  x   - the vector to be multilplied

2624:    Output Parameters:
2625: .  y - the result

2627:    Notes:
2628:    The vectors x and y cannot be the same.  I.e., one cannot
2629:    call MatMult(A,y,y).

2631:    Level: beginner

2633: .keywords: matrix, multiply, matrix-vector product, constraint
2634: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2635: @*/
2636: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2637: {

2644:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2645:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2646:   if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2647:   if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2648:   if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2649:   if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);

2651:   PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2652:   VecLockPush(x);
2653:   (*mat->ops->multconstrained)(mat,x,y);
2654:   VecLockPop(x);
2655:   PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2656:   PetscObjectStateIncrease((PetscObject)y);
2657:   return(0);
2658: }

2660: /*@
2661:    MatMultTransposeConstrained - The inner multiplication routine for a
2662:    constrained matrix P^T A^T P.

2664:    Neighbor-wise Collective on Mat and Vec

2666:    Input Parameters:
2667: +  mat - the matrix
2668: -  x   - the vector to be multilplied

2670:    Output Parameters:
2671: .  y - the result

2673:    Notes:
2674:    The vectors x and y cannot be the same.  I.e., one cannot
2675:    call MatMult(A,y,y).

2677:    Level: beginner

2679: .keywords: matrix, multiply, matrix-vector product, constraint
2680: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2681: @*/
2682: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2683: {

2690:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2691:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2692:   if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2693:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2694:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);

2696:   PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2697:   (*mat->ops->multtransposeconstrained)(mat,x,y);
2698:   PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2699:   PetscObjectStateIncrease((PetscObject)y);
2700:   return(0);
2701: }

2703: /*@C
2704:    MatGetFactorType - gets the type of factorization it is

2706:    Note Collective
2707:    as the flag

2709:    Input Parameters:
2710: .  mat - the matrix

2712:    Output Parameters:
2713: .  t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT

2715:     Level: intermediate

2717: .seealso:    MatFactorType, MatGetFactor()
2718: @*/
2719: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2720: {
2724:   *t = mat->factortype;
2725:   return(0);
2726: }

2728: /* ------------------------------------------------------------*/
2729: /*@C
2730:    MatGetInfo - Returns information about matrix storage (number of
2731:    nonzeros, memory, etc.).

2733:    Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag

2735:    Input Parameters:
2736: .  mat - the matrix

2738:    Output Parameters:
2739: +  flag - flag indicating the type of parameters to be returned
2740:    (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2741:    MAT_GLOBAL_SUM - sum over all processors)
2742: -  info - matrix information context

2744:    Notes:
2745:    The MatInfo context contains a variety of matrix data, including
2746:    number of nonzeros allocated and used, number of mallocs during
2747:    matrix assembly, etc.  Additional information for factored matrices
2748:    is provided (such as the fill ratio, number of mallocs during
2749:    factorization, etc.).  Much of this info is printed to PETSC_STDOUT
2750:    when using the runtime options
2751: $       -info -mat_view ::ascii_info

2753:    Example for C/C++ Users:
2754:    See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2755:    data within the MatInfo context.  For example,
2756: .vb
2757:       MatInfo info;
2758:       Mat     A;
2759:       double  mal, nz_a, nz_u;

2761:       MatGetInfo(A,MAT_LOCAL,&info);
2762:       mal  = info.mallocs;
2763:       nz_a = info.nz_allocated;
2764: .ve

2766:    Example for Fortran Users:
2767:    Fortran users should declare info as a double precision
2768:    array of dimension MAT_INFO_SIZE, and then extract the parameters
2769:    of interest.  See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2770:    a complete list of parameter names.
2771: .vb
2772:       double  precision info(MAT_INFO_SIZE)
2773:       double  precision mal, nz_a
2774:       Mat     A
2775:       integer ierr

2777:       call MatGetInfo(A,MAT_LOCAL,info,ierr)
2778:       mal = info(MAT_INFO_MALLOCS)
2779:       nz_a = info(MAT_INFO_NZ_ALLOCATED)
2780: .ve

2782:     Level: intermediate

2784:     Concepts: matrices^getting information on

2786:     Developer Note: fortran interface is not autogenerated as the f90
2787:     interface defintion cannot be generated correctly [due to MatInfo]

2789: .seealso: MatStashGetInfo()

2791: @*/
2792: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2793: {

2800:   if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2801:   MatCheckPreallocated(mat,1);
2802:   (*mat->ops->getinfo)(mat,flag,info);
2803:   return(0);
2804: }

2806: /*
2807:    This is used by external packages where it is not easy to get the info from the actual
2808:    matrix factorization.
2809: */
2810: PetscErrorCode MatGetInfo_External(Mat A,MatInfoType flag,MatInfo *info)
2811: {

2815:   PetscMemzero(info,sizeof(MatInfo));
2816:   return(0);
2817: }

2819: /* ----------------------------------------------------------*/

2821: /*@C
2822:    MatLUFactor - Performs in-place LU factorization of matrix.

2824:    Collective on Mat

2826:    Input Parameters:
2827: +  mat - the matrix
2828: .  row - row permutation
2829: .  col - column permutation
2830: -  info - options for factorization, includes
2831: $          fill - expected fill as ratio of original fill.
2832: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2833: $                   Run with the option -info to determine an optimal value to use

2835:    Notes:
2836:    Most users should employ the simplified KSP interface for linear solvers
2837:    instead of working directly with matrix algebra routines such as this.
2838:    See, e.g., KSPCreate().

2840:    This changes the state of the matrix to a factored matrix; it cannot be used
2841:    for example with MatSetValues() unless one first calls MatSetUnfactored().

2843:    Level: developer

2845:    Concepts: matrices^LU factorization

2847: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2848:           MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()

2850:     Developer Note: fortran interface is not autogenerated as the f90
2851:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2853: @*/
2854: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2855: {
2857:   MatFactorInfo  tinfo;

2865:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2866:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2867:   if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2868:   MatCheckPreallocated(mat,1);
2869:   if (!info) {
2870:     MatFactorInfoInitialize(&tinfo);
2871:     info = &tinfo;
2872:   }

2874:   PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2875:   (*mat->ops->lufactor)(mat,row,col,info);
2876:   PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2877:   PetscObjectStateIncrease((PetscObject)mat);
2878:   return(0);
2879: }

2881: /*@C
2882:    MatILUFactor - Performs in-place ILU factorization of matrix.

2884:    Collective on Mat

2886:    Input Parameters:
2887: +  mat - the matrix
2888: .  row - row permutation
2889: .  col - column permutation
2890: -  info - structure containing
2891: $      levels - number of levels of fill.
2892: $      expected fill - as ratio of original fill.
2893: $      1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2894:                 missing diagonal entries)

2896:    Notes:
2897:    Probably really in-place only when level of fill is zero, otherwise allocates
2898:    new space to store factored matrix and deletes previous memory.

2900:    Most users should employ the simplified KSP interface for linear solvers
2901:    instead of working directly with matrix algebra routines such as this.
2902:    See, e.g., KSPCreate().

2904:    Level: developer

2906:    Concepts: matrices^ILU factorization

2908: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo

2910:     Developer Note: fortran interface is not autogenerated as the f90
2911:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2913: @*/
2914: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2915: {

2924:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2925:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2926:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2927:   if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2928:   MatCheckPreallocated(mat,1);

2930:   PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2931:   (*mat->ops->ilufactor)(mat,row,col,info);
2932:   PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2933:   PetscObjectStateIncrease((PetscObject)mat);
2934:   return(0);
2935: }

2937: /*@C
2938:    MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2939:    Call this routine before calling MatLUFactorNumeric().

2941:    Collective on Mat

2943:    Input Parameters:
2944: +  fact - the factor matrix obtained with MatGetFactor()
2945: .  mat - the matrix
2946: .  row, col - row and column permutations
2947: -  info - options for factorization, includes
2948: $          fill - expected fill as ratio of original fill.
2949: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2950: $                   Run with the option -info to determine an optimal value to use


2953:    Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.

2955:    Most users should employ the simplified KSP interface for linear solvers
2956:    instead of working directly with matrix algebra routines such as this.
2957:    See, e.g., KSPCreate().

2959:    Level: developer

2961:    Concepts: matrices^LU symbolic factorization

2963: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()

2965:     Developer Note: fortran interface is not autogenerated as the f90
2966:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2968: @*/
2969: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2970: {

2980:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2981:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2982:   if (!(fact)->ops->lufactorsymbolic) {
2983:     const MatSolverPackage spackage;
2984:     MatFactorGetSolverPackage(fact,&spackage);
2985:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
2986:   }
2987:   MatCheckPreallocated(mat,2);

2989:   PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
2990:   (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
2991:   PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
2992:   PetscObjectStateIncrease((PetscObject)fact);
2993:   return(0);
2994: }

2996: /*@C
2997:    MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
2998:    Call this routine after first calling MatLUFactorSymbolic().

3000:    Collective on Mat

3002:    Input Parameters:
3003: +  fact - the factor matrix obtained with MatGetFactor()
3004: .  mat - the matrix
3005: -  info - options for factorization

3007:    Notes:
3008:    See MatLUFactor() for in-place factorization.  See
3009:    MatCholeskyFactorNumeric() for the symmetric, positive definite case.

3011:    Most users should employ the simplified KSP interface for linear solvers
3012:    instead of working directly with matrix algebra routines such as this.
3013:    See, e.g., KSPCreate().

3015:    Level: developer

3017:    Concepts: matrices^LU numeric factorization

3019: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()

3021:     Developer Note: fortran interface is not autogenerated as the f90
3022:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3024: @*/
3025: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3026: {

3034:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3035:   if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);

3037:   if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
3038:   MatCheckPreallocated(mat,2);
3039:   PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
3040:   (fact->ops->lufactornumeric)(fact,mat,info);
3041:   PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
3042:   MatViewFromOptions(fact,NULL,"-mat_factor_view");
3043:   PetscObjectStateIncrease((PetscObject)fact);
3044:   return(0);
3045: }

3047: /*@C
3048:    MatCholeskyFactor - Performs in-place Cholesky factorization of a
3049:    symmetric matrix.

3051:    Collective on Mat

3053:    Input Parameters:
3054: +  mat - the matrix
3055: .  perm - row and column permutations
3056: -  f - expected fill as ratio of original fill

3058:    Notes:
3059:    See MatLUFactor() for the nonsymmetric case.  See also
3060:    MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().

3062:    Most users should employ the simplified KSP interface for linear solvers
3063:    instead of working directly with matrix algebra routines such as this.
3064:    See, e.g., KSPCreate().

3066:    Level: developer

3068:    Concepts: matrices^Cholesky factorization

3070: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
3071:           MatGetOrdering()

3073:     Developer Note: fortran interface is not autogenerated as the f90
3074:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3076: @*/
3077: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
3078: {

3086:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3087:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3088:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3089:   if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3090:   MatCheckPreallocated(mat,1);

3092:   PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3093:   (*mat->ops->choleskyfactor)(mat,perm,info);
3094:   PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3095:   PetscObjectStateIncrease((PetscObject)mat);
3096:   return(0);
3097: }

3099: /*@C
3100:    MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3101:    of a symmetric matrix.

3103:    Collective on Mat

3105:    Input Parameters:
3106: +  fact - the factor matrix obtained with MatGetFactor()
3107: .  mat - the matrix
3108: .  perm - row and column permutations
3109: -  info - options for factorization, includes
3110: $          fill - expected fill as ratio of original fill.
3111: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3112: $                   Run with the option -info to determine an optimal value to use

3114:    Notes:
3115:    See MatLUFactorSymbolic() for the nonsymmetric case.  See also
3116:    MatCholeskyFactor() and MatCholeskyFactorNumeric().

3118:    Most users should employ the simplified KSP interface for linear solvers
3119:    instead of working directly with matrix algebra routines such as this.
3120:    See, e.g., KSPCreate().

3122:    Level: developer

3124:    Concepts: matrices^Cholesky symbolic factorization

3126: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3127:           MatGetOrdering()

3129:     Developer Note: fortran interface is not autogenerated as the f90
3130:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3132: @*/
3133: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3134: {

3143:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3144:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3145:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3146:   if (!(fact)->ops->choleskyfactorsymbolic) {
3147:     const MatSolverPackage spackage;
3148:     MatFactorGetSolverPackage(fact,&spackage);
3149:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3150:   }
3151:   MatCheckPreallocated(mat,2);

3153:   PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3154:   (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3155:   PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3156:   PetscObjectStateIncrease((PetscObject)fact);
3157:   return(0);
3158: }

3160: /*@C
3161:    MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3162:    of a symmetric matrix. Call this routine after first calling
3163:    MatCholeskyFactorSymbolic().

3165:    Collective on Mat

3167:    Input Parameters:
3168: +  fact - the factor matrix obtained with MatGetFactor()
3169: .  mat - the initial matrix
3170: .  info - options for factorization
3171: -  fact - the symbolic factor of mat


3174:    Notes:
3175:    Most users should employ the simplified KSP interface for linear solvers
3176:    instead of working directly with matrix algebra routines such as this.
3177:    See, e.g., KSPCreate().

3179:    Level: developer

3181:    Concepts: matrices^Cholesky numeric factorization

3183: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()

3185:     Developer Note: fortran interface is not autogenerated as the f90
3186:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3188: @*/
3189: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3190: {

3198:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3199:   if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3200:   if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3201:   MatCheckPreallocated(mat,2);

3203:   PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3204:   (fact->ops->choleskyfactornumeric)(fact,mat,info);
3205:   PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3206:   MatViewFromOptions(fact,NULL,"-mat_factor_view");
3207:   PetscObjectStateIncrease((PetscObject)fact);
3208:   return(0);
3209: }

3211: /* ----------------------------------------------------------------*/
3212: /*@
3213:    MatSolve - Solves A x = b, given a factored matrix.

3215:    Neighbor-wise Collective on Mat and Vec

3217:    Input Parameters:
3218: +  mat - the factored matrix
3219: -  b - the right-hand-side vector

3221:    Output Parameter:
3222: .  x - the result vector

3224:    Notes:
3225:    The vectors b and x cannot be the same.  I.e., one cannot
3226:    call MatSolve(A,x,x).

3228:    Notes:
3229:    Most users should employ the simplified KSP interface for linear solvers
3230:    instead of working directly with matrix algebra routines such as this.
3231:    See, e.g., KSPCreate().

3233:    Level: developer

3235:    Concepts: matrices^triangular solves

3237: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3238: @*/
3239: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3240: {

3250:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3251:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3252:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3253:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3254:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3255:   if (!mat->rmap->N && !mat->cmap->N) return(0);
3256:   if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3257:   MatCheckPreallocated(mat,1);

3259:   PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3260:   if (mat->factorerrortype) {
3261:     PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3262:     VecSetInf(x);
3263:   } else {
3264:     (*mat->ops->solve)(mat,b,x);
3265:   }
3266:   PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3267:   PetscObjectStateIncrease((PetscObject)x);
3268:   return(0);
3269: }

3271: static PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X, PetscBool trans)
3272: {
3274:   Vec            b,x;
3275:   PetscInt       m,N,i;
3276:   PetscScalar    *bb,*xx;
3277:   PetscBool      flg;

3280:   PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3281:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3282:   PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3283:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");

3285:   MatDenseGetArray(B,&bb);
3286:   MatDenseGetArray(X,&xx);
3287:   MatGetLocalSize(B,&m,NULL);  /* number local rows */
3288:   MatGetSize(B,NULL,&N);       /* total columns in dense matrix */
3289:   MatCreateVecs(A,&x,&b);
3290:   for (i=0; i<N; i++) {
3291:     VecPlaceArray(b,bb + i*m);
3292:     VecPlaceArray(x,xx + i*m);
3293:     if (trans) {
3294:       MatSolveTranspose(A,b,x);
3295:     } else {
3296:       MatSolve(A,b,x);
3297:     }
3298:     VecResetArray(x);
3299:     VecResetArray(b);
3300:   }
3301:   VecDestroy(&b);
3302:   VecDestroy(&x);
3303:   MatDenseRestoreArray(B,&bb);
3304:   MatDenseRestoreArray(X,&xx);
3305:   return(0);
3306: }

3308: /*@
3309:    MatMatSolve - Solves A X = B, given a factored matrix.

3311:    Neighbor-wise Collective on Mat

3313:    Input Parameters:
3314: +  A - the factored matrix
3315: -  B - the right-hand-side matrix  (dense matrix)

3317:    Output Parameter:
3318: .  X - the result matrix (dense matrix)

3320:    Notes:
3321:    The matrices b and x cannot be the same.  I.e., one cannot
3322:    call MatMatSolve(A,x,x).

3324:    Notes:
3325:    Most users should usually employ the simplified KSP interface for linear solvers
3326:    instead of working directly with matrix algebra routines such as this.
3327:    See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3328:    at a time.

3330:    When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3331:    it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.

3333:    Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.

3335:    Level: developer

3337:    Concepts: matrices^triangular solves

3339: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3340: @*/
3341: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3342: {

3352:   if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3353:   if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3354:   if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3355:   if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3356:   if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3357:   if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3358:   if (!A->rmap->N && !A->cmap->N) return(0);
3359:   MatCheckPreallocated(A,1);

3361:   PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3362:   if (!A->ops->matsolve) {
3363:     PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3364:     MatMatSolve_Basic(A,B,X,PETSC_FALSE);
3365:   } else {
3366:     (*A->ops->matsolve)(A,B,X);
3367:   }
3368:   PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3369:   PetscObjectStateIncrease((PetscObject)X);
3370:   return(0);
3371: }

3373: /*@
3374:    MatMatSolveTranspose - Solves A^T X = B, given a factored matrix.

3376:    Neighbor-wise Collective on Mat

3378:    Input Parameters:
3379: +  A - the factored matrix
3380: -  B - the right-hand-side matrix  (dense matrix)

3382:    Output Parameter:
3383: .  X - the result matrix (dense matrix)

3385:    Notes:
3386:    The matrices b and x cannot be the same.  I.e., one cannot
3387:    call MatMatSolveTranspose(A,x,x).

3389:    Notes:
3390:    Most users should usually employ the simplified KSP interface for linear solvers
3391:    instead of working directly with matrix algebra routines such as this.
3392:    See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3393:    at a time.

3395:    When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3396:    it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.

3398:    Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.

3400:    Level: developer

3402:    Concepts: matrices^triangular solves

3404: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3405: @*/
3406: PetscErrorCode MatMatSolveTranspose(Mat A,Mat B,Mat X)
3407: {

3417:   if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3418:   if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3419:   if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3420:   if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3421:   if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3422:   if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3423:   if (!A->rmap->N && !A->cmap->N) return(0);
3424:   MatCheckPreallocated(A,1);

3426:   PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3427:   if (!A->ops->matsolvetranspose) {
3428:     PetscInfo1(A,"Mat type %s using basic MatMatSolveTranspose\n",((PetscObject)A)->type_name);
3429:     MatMatSolve_Basic(A,B,X,PETSC_TRUE);
3430:   } else {
3431:     (*A->ops->matsolvetranspose)(A,B,X);
3432:   }
3433:   PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3434:   PetscObjectStateIncrease((PetscObject)X);
3435:   return(0);
3436: }

3438: /*@
3439:    MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3440:                             U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,

3442:    Neighbor-wise Collective on Mat and Vec

3444:    Input Parameters:
3445: +  mat - the factored matrix
3446: -  b - the right-hand-side vector

3448:    Output Parameter:
3449: .  x - the result vector

3451:    Notes:
3452:    MatSolve() should be used for most applications, as it performs
3453:    a forward solve followed by a backward solve.

3455:    The vectors b and x cannot be the same,  i.e., one cannot
3456:    call MatForwardSolve(A,x,x).

3458:    For matrix in seqsbaij format with block size larger than 1,
3459:    the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3460:    MatForwardSolve() solves U^T*D y = b, and
3461:    MatBackwardSolve() solves U x = y.
3462:    Thus they do not provide a symmetric preconditioner.

3464:    Most users should employ the simplified KSP interface for linear solvers
3465:    instead of working directly with matrix algebra routines such as this.
3466:    See, e.g., KSPCreate().

3468:    Level: developer

3470:    Concepts: matrices^forward solves

3472: .seealso: MatSolve(), MatBackwardSolve()
3473: @*/
3474: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3475: {

3485:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3486:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3487:   if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3488:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3489:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3490:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3491:   MatCheckPreallocated(mat,1);
3492:   PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3493:   (*mat->ops->forwardsolve)(mat,b,x);
3494:   PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3495:   PetscObjectStateIncrease((PetscObject)x);
3496:   return(0);
3497: }

3499: /*@
3500:    MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3501:                              D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,

3503:    Neighbor-wise Collective on Mat and Vec

3505:    Input Parameters:
3506: +  mat - the factored matrix
3507: -  b - the right-hand-side vector

3509:    Output Parameter:
3510: .  x - the result vector

3512:    Notes:
3513:    MatSolve() should be used for most applications, as it performs
3514:    a forward solve followed by a backward solve.

3516:    The vectors b and x cannot be the same.  I.e., one cannot
3517:    call MatBackwardSolve(A,x,x).

3519:    For matrix in seqsbaij format with block size larger than 1,
3520:    the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3521:    MatForwardSolve() solves U^T*D y = b, and
3522:    MatBackwardSolve() solves U x = y.
3523:    Thus they do not provide a symmetric preconditioner.

3525:    Most users should employ the simplified KSP interface for linear solvers
3526:    instead of working directly with matrix algebra routines such as this.
3527:    See, e.g., KSPCreate().

3529:    Level: developer

3531:    Concepts: matrices^backward solves

3533: .seealso: MatSolve(), MatForwardSolve()
3534: @*/
3535: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3536: {

3546:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3547:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3548:   if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3549:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3550:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3551:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3552:   MatCheckPreallocated(mat,1);

3554:   PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3555:   (*mat->ops->backwardsolve)(mat,b,x);
3556:   PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3557:   PetscObjectStateIncrease((PetscObject)x);
3558:   return(0);
3559: }

3561: /*@
3562:    MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.

3564:    Neighbor-wise Collective on Mat and Vec

3566:    Input Parameters:
3567: +  mat - the factored matrix
3568: .  b - the right-hand-side vector
3569: -  y - the vector to be added to

3571:    Output Parameter:
3572: .  x - the result vector

3574:    Notes:
3575:    The vectors b and x cannot be the same.  I.e., one cannot
3576:    call MatSolveAdd(A,x,y,x).

3578:    Most users should employ the simplified KSP interface for linear solvers
3579:    instead of working directly with matrix algebra routines such as this.
3580:    See, e.g., KSPCreate().

3582:    Level: developer

3584:    Concepts: matrices^triangular solves

3586: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3587: @*/
3588: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3589: {
3590:   PetscScalar    one = 1.0;
3591:   Vec            tmp;

3603:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3604:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3605:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3606:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3607:   if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3608:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3609:   if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3610:   MatCheckPreallocated(mat,1);

3612:   PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3613:   if (mat->ops->solveadd) {
3614:     (*mat->ops->solveadd)(mat,b,y,x);
3615:   } else {
3616:     /* do the solve then the add manually */
3617:     if (x != y) {
3618:       MatSolve(mat,b,x);
3619:       VecAXPY(x,one,y);
3620:     } else {
3621:       VecDuplicate(x,&tmp);
3622:       PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3623:       VecCopy(x,tmp);
3624:       MatSolve(mat,b,x);
3625:       VecAXPY(x,one,tmp);
3626:       VecDestroy(&tmp);
3627:     }
3628:   }
3629:   PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3630:   PetscObjectStateIncrease((PetscObject)x);
3631:   return(0);
3632: }

3634: /*@
3635:    MatSolveTranspose - Solves A' x = b, given a factored matrix.

3637:    Neighbor-wise Collective on Mat and Vec

3639:    Input Parameters:
3640: +  mat - the factored matrix
3641: -  b - the right-hand-side vector

3643:    Output Parameter:
3644: .  x - the result vector

3646:    Notes:
3647:    The vectors b and x cannot be the same.  I.e., one cannot
3648:    call MatSolveTranspose(A,x,x).

3650:    Most users should employ the simplified KSP interface for linear solvers
3651:    instead of working directly with matrix algebra routines such as this.
3652:    See, e.g., KSPCreate().

3654:    Level: developer

3656:    Concepts: matrices^triangular solves

3658: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3659: @*/
3660: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3661: {

3671:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3672:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3673:   if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3674:   if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3675:   if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3676:   MatCheckPreallocated(mat,1);
3677:   PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3678:   if (mat->factorerrortype) {
3679:     PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3680:     VecSetInf(x);
3681:   } else {
3682:     (*mat->ops->solvetranspose)(mat,b,x);
3683:   }
3684:   PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3685:   PetscObjectStateIncrease((PetscObject)x);
3686:   return(0);
3687: }

3689: /*@
3690:    MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3691:                       factored matrix.

3693:    Neighbor-wise Collective on Mat and Vec

3695:    Input Parameters:
3696: +  mat - the factored matrix
3697: .  b - the right-hand-side vector
3698: -  y - the vector to be added to

3700:    Output Parameter:
3701: .  x - the result vector

3703:    Notes:
3704:    The vectors b and x cannot be the same.  I.e., one cannot
3705:    call MatSolveTransposeAdd(A,x,y,x).

3707:    Most users should employ the simplified KSP interface for linear solvers
3708:    instead of working directly with matrix algebra routines such as this.
3709:    See, e.g., KSPCreate().

3711:    Level: developer

3713:    Concepts: matrices^triangular solves

3715: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3716: @*/
3717: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3718: {
3719:   PetscScalar    one = 1.0;
3721:   Vec            tmp;

3732:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3733:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3734:   if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3735:   if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3736:   if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3737:   if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3738:   MatCheckPreallocated(mat,1);

3740:   PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3741:   if (mat->ops->solvetransposeadd) {
3742:     if (mat->factorerrortype) {
3743:       PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3744:       VecSetInf(x);
3745:     } else {
3746:       (*mat->ops->solvetransposeadd)(mat,b,y,x);
3747:     }
3748:   } else {
3749:     /* do the solve then the add manually */
3750:     if (x != y) {
3751:       MatSolveTranspose(mat,b,x);
3752:       VecAXPY(x,one,y);
3753:     } else {
3754:       VecDuplicate(x,&tmp);
3755:       PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3756:       VecCopy(x,tmp);
3757:       MatSolveTranspose(mat,b,x);
3758:       VecAXPY(x,one,tmp);
3759:       VecDestroy(&tmp);
3760:     }
3761:   }
3762:   PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3763:   PetscObjectStateIncrease((PetscObject)x);
3764:   return(0);
3765: }
3766: /* ----------------------------------------------------------------*/

3768: /*@
3769:    MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.

3771:    Neighbor-wise Collective on Mat and Vec

3773:    Input Parameters:
3774: +  mat - the matrix
3775: .  b - the right hand side
3776: .  omega - the relaxation factor
3777: .  flag - flag indicating the type of SOR (see below)
3778: .  shift -  diagonal shift
3779: .  its - the number of iterations
3780: -  lits - the number of local iterations

3782:    Output Parameters:
3783: .  x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)

3785:    SOR Flags:
3786: .     SOR_FORWARD_SWEEP - forward SOR
3787: .     SOR_BACKWARD_SWEEP - backward SOR
3788: .     SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3789: .     SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3790: .     SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3791: .     SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3792: .     SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3793:          upper/lower triangular part of matrix to
3794:          vector (with omega)
3795: .     SOR_ZERO_INITIAL_GUESS - zero initial guess

3797:    Notes:
3798:    SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3799:    SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3800:    on each processor.

3802:    Application programmers will not generally use MatSOR() directly,
3803:    but instead will employ the KSP/PC interface.

3805:    Notes: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing

3807:    Notes for Advanced Users:
3808:    The flags are implemented as bitwise inclusive or operations.
3809:    For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3810:    to specify a zero initial guess for SSOR.

3812:    Most users should employ the simplified KSP interface for linear solvers
3813:    instead of working directly with matrix algebra routines such as this.
3814:    See, e.g., KSPCreate().

3816:    Vectors x and b CANNOT be the same

3818:    Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes

3820:    Level: developer

3822:    Concepts: matrices^relaxation
3823:    Concepts: matrices^SOR
3824:    Concepts: matrices^Gauss-Seidel

3826: @*/
3827: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3828: {

3838:   if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3839:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3840:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3841:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3842:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3843:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3844:   if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3845:   if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3846:   if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");

3848:   MatCheckPreallocated(mat,1);
3849:   PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3850:   ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3851:   PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3852:   PetscObjectStateIncrease((PetscObject)x);
3853:   return(0);
3854: }

3856: /*
3857:       Default matrix copy routine.
3858: */
3859: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3860: {
3861:   PetscErrorCode    ierr;
3862:   PetscInt          i,rstart = 0,rend = 0,nz;
3863:   const PetscInt    *cwork;
3864:   const PetscScalar *vwork;

3867:   if (B->assembled) {
3868:     MatZeroEntries(B);
3869:   }
3870:   MatGetOwnershipRange(A,&rstart,&rend);
3871:   for (i=rstart; i<rend; i++) {
3872:     MatGetRow(A,i,&nz,&cwork,&vwork);
3873:     MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3874:     MatRestoreRow(A,i,&nz,&cwork,&vwork);
3875:   }
3876:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3877:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3878:   return(0);
3879: }

3881: /*@
3882:    MatCopy - Copys a matrix to another matrix.

3884:    Collective on Mat

3886:    Input Parameters:
3887: +  A - the matrix
3888: -  str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN

3890:    Output Parameter:
3891: .  B - where the copy is put

3893:    Notes:
3894:    If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3895:    same nonzero pattern or the routine will crash.

3897:    MatCopy() copies the matrix entries of a matrix to another existing
3898:    matrix (after first zeroing the second matrix).  A related routine is
3899:    MatConvert(), which first creates a new matrix and then copies the data.

3901:    Level: intermediate

3903:    Concepts: matrices^copying

3905: .seealso: MatConvert(), MatDuplicate()

3907: @*/
3908: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3909: {
3911:   PetscInt       i;

3919:   MatCheckPreallocated(B,2);
3920:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3921:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3922:   if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
3923:   MatCheckPreallocated(A,1);

3925:   PetscLogEventBegin(MAT_Copy,A,B,0,0);
3926:   if (A->ops->copy) {
3927:     (*A->ops->copy)(A,B,str);
3928:   } else { /* generic conversion */
3929:     MatCopy_Basic(A,B,str);
3930:   }

3932:   B->stencil.dim = A->stencil.dim;
3933:   B->stencil.noc = A->stencil.noc;
3934:   for (i=0; i<=A->stencil.dim; i++) {
3935:     B->stencil.dims[i]   = A->stencil.dims[i];
3936:     B->stencil.starts[i] = A->stencil.starts[i];
3937:   }

3939:   PetscLogEventEnd(MAT_Copy,A,B,0,0);
3940:   PetscObjectStateIncrease((PetscObject)B);
3941:   return(0);
3942: }

3944: /*@C
3945:    MatConvert - Converts a matrix to another matrix, either of the same
3946:    or different type.

3948:    Collective on Mat

3950:    Input Parameters:
3951: +  mat - the matrix
3952: .  newtype - new matrix type.  Use MATSAME to create a new matrix of the
3953:    same type as the original matrix.
3954: -  reuse - denotes if the destination matrix is to be created or reused.
3955:    Use MAT_INPLACE_MATRIX for inplace conversion (that is when you want the input mat to be changed to contain the matrix in the new format), otherwise use
3956:    MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX (can only be used after the first call was made with MAT_INITIAL_MATRIX, causes the matrix space in M to be reused).

3958:    Output Parameter:
3959: .  M - pointer to place new matrix

3961:    Notes:
3962:    MatConvert() first creates a new matrix and then copies the data from
3963:    the first matrix.  A related routine is MatCopy(), which copies the matrix
3964:    entries of one matrix to another already existing matrix context.

3966:    Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
3967:    the MPI communicator of the generated matrix is always the same as the communicator
3968:    of the input matrix.

3970:    Level: intermediate

3972:    Concepts: matrices^converting between storage formats

3974: .seealso: MatCopy(), MatDuplicate()
3975: @*/
3976: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
3977: {
3979:   PetscBool      sametype,issame,flg;
3980:   char           convname[256],mtype[256];
3981:   Mat            B;

3987:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3988:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3989:   MatCheckPreallocated(mat,1);
3990:   MatSetOption(mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);

3992:   PetscOptionsGetString(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
3993:   if (flg) {
3994:     newtype = mtype;
3995:   }
3996:   PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
3997:   PetscStrcmp(newtype,"same",&issame);
3998:   if ((reuse == MAT_INPLACE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires same input and output matrix");
3999:   if ((reuse == MAT_REUSE_MATRIX) && (mat == *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX means reuse matrix in final argument, perhaps you mean MAT_INPLACE_MATRIX");

4001:   if ((reuse == MAT_INPLACE_MATRIX) && (issame || sametype)) return(0);

4003:   if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
4004:     (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
4005:   } else {
4006:     PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
4007:     const char     *prefix[3] = {"seq","mpi",""};
4008:     PetscInt       i;
4009:     /*
4010:        Order of precedence:
4011:        1) See if a specialized converter is known to the current matrix.
4012:        2) See if a specialized converter is known to the desired matrix class.
4013:        3) See if a good general converter is registered for the desired class
4014:           (as of 6/27/03 only MATMPIADJ falls into this category).
4015:        4) See if a good general converter is known for the current matrix.
4016:        5) Use a really basic converter.
4017:     */

4019:     /* 1) See if a specialized converter is known to the current matrix and the desired class */
4020:     for (i=0; i<3; i++) {
4021:       PetscStrcpy(convname,"MatConvert_");
4022:       PetscStrcat(convname,((PetscObject)mat)->type_name);
4023:       PetscStrcat(convname,"_");
4024:       PetscStrcat(convname,prefix[i]);
4025:       PetscStrcat(convname,issame ? ((PetscObject)mat)->type_name : newtype);
4026:       PetscStrcat(convname,"_C");
4027:       PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4028:       if (conv) goto foundconv;
4029:     }

4031:     /* 2)  See if a specialized converter is known to the desired matrix class. */
4032:     MatCreate(PetscObjectComm((PetscObject)mat),&B);
4033:     MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
4034:     MatSetType(B,newtype);
4035:     for (i=0; i<3; i++) {
4036:       PetscStrcpy(convname,"MatConvert_");
4037:       PetscStrcat(convname,((PetscObject)mat)->type_name);
4038:       PetscStrcat(convname,"_");
4039:       PetscStrcat(convname,prefix[i]);
4040:       PetscStrcat(convname,newtype);
4041:       PetscStrcat(convname,"_C");
4042:       PetscObjectQueryFunction((PetscObject)B,convname,&conv);
4043:       if (conv) {
4044:         MatDestroy(&B);
4045:         goto foundconv;
4046:       }
4047:     }

4049:     /* 3) See if a good general converter is registered for the desired class */
4050:     conv = B->ops->convertfrom;
4051:     MatDestroy(&B);
4052:     if (conv) goto foundconv;

4054:     /* 4) See if a good general converter is known for the current matrix */
4055:     if (mat->ops->convert) {
4056:       conv = mat->ops->convert;
4057:     }
4058:     if (conv) goto foundconv;

4060:     /* 5) Use a really basic converter. */
4061:     conv = MatConvert_Basic;

4063: foundconv:
4064:     PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4065:     (*conv)(mat,newtype,reuse,M);
4066:     PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4067:   }
4068:   PetscObjectStateIncrease((PetscObject)*M);

4070:   /* Copy Mat options */
4071:   if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
4072:   if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
4073:   return(0);
4074: }

4076: /*@C
4077:    MatFactorGetSolverPackage - Returns name of the package providing the factorization routines

4079:    Not Collective

4081:    Input Parameter:
4082: .  mat - the matrix, must be a factored matrix

4084:    Output Parameter:
4085: .   type - the string name of the package (do not free this string)

4087:    Notes:
4088:       In Fortran you pass in a empty string and the package name will be copied into it.
4089:     (Make sure the string is long enough)

4091:    Level: intermediate

4093: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4094: @*/
4095: PetscErrorCode MatFactorGetSolverPackage(Mat mat, const MatSolverPackage *type)
4096: {
4097:   PetscErrorCode ierr, (*conv)(Mat,const MatSolverPackage*);

4102:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
4103:   PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverPackage_C",&conv);
4104:   if (!conv) {
4105:     *type = MATSOLVERPETSC;
4106:   } else {
4107:     (*conv)(mat,type);
4108:   }
4109:   return(0);
4110: }

4112: typedef struct _MatSolverPackageForSpecifcType* MatSolverPackageForSpecifcType;
4113: struct _MatSolverPackageForSpecifcType {
4114:   MatType                        mtype;
4115:   PetscErrorCode                 (*getfactor[4])(Mat,MatFactorType,Mat*);
4116:   MatSolverPackageForSpecifcType next;
4117: };

4119: typedef struct _MatSolverPackageHolder* MatSolverPackageHolder;
4120: struct _MatSolverPackageHolder {
4121:   char                           *name;
4122:   MatSolverPackageForSpecifcType handlers;
4123:   MatSolverPackageHolder         next;
4124: };

4126: static MatSolverPackageHolder MatSolverPackageHolders = NULL;

4128: /*@C
4129:    MatSolvePackageRegister - Registers a MatSolverPackage that works for a particular matrix type

4131:    Input Parameters:
4132: +    package - name of the package, for example petsc or superlu
4133: .    mtype - the matrix type that works with this package
4134: .    ftype - the type of factorization supported by the package
4135: -    getfactor - routine that will create the factored matrix ready to be used

4137:     Level: intermediate

4139: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4140: @*/
4141: PetscErrorCode MatSolverPackageRegister(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
4142: {
4143:   PetscErrorCode                 ierr;
4144:   MatSolverPackageHolder         next = MatSolverPackageHolders,prev;
4145:   PetscBool                      flg;
4146:   MatSolverPackageForSpecifcType inext,iprev = NULL;

4149:   if (!next) {
4150:     PetscNew(&MatSolverPackageHolders);
4151:     PetscStrallocpy(package,&MatSolverPackageHolders->name);
4152:     PetscNew(&MatSolverPackageHolders->handlers);
4153:     PetscStrallocpy(mtype,(char **)&MatSolverPackageHolders->handlers->mtype);
4154:     MatSolverPackageHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4155:     return(0);
4156:   }
4157:   while (next) {
4158:     PetscStrcasecmp(package,next->name,&flg);
4159:     if (flg) {
4160:       if (!next->handlers) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MatSolverPackageHolder is missing handlers");
4161:       inext = next->handlers;
4162:       while (inext) {
4163:         PetscStrcasecmp(mtype,inext->mtype,&flg);
4164:         if (flg) {
4165:           inext->getfactor[(int)ftype-1] = getfactor;
4166:           return(0);
4167:         }
4168:         iprev = inext;
4169:         inext = inext->next;
4170:       }
4171:       PetscNew(&iprev->next);
4172:       PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4173:       iprev->next->getfactor[(int)ftype-1] = getfactor;
4174:       return(0);
4175:     }
4176:     prev = next;
4177:     next = next->next;
4178:   }
4179:   PetscNew(&prev->next);
4180:   PetscStrallocpy(package,&prev->next->name);
4181:   PetscNew(&prev->next->handlers);
4182:   PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4183:   prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4184:   return(0);
4185: }

4187: /*@C
4188:    MatSolvePackageGet - Get's the function that creates the factor matrix if it exist

4190:    Input Parameters:
4191: +    package - name of the package, for example petsc or superlu
4192: .    ftype - the type of factorization supported by the package
4193: -    mtype - the matrix type that works with this package

4195:    Output Parameters:
4196: +   foundpackage - PETSC_TRUE if the package was registered
4197: .   foundmtype - PETSC_TRUE if the package supports the requested mtype
4198: -   getfactor - routine that will create the factored matrix ready to be used or NULL if not found

4200:     Level: intermediate

4202: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4203: @*/
4204: PetscErrorCode MatSolverPackageGet(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4205: {
4206:   PetscErrorCode                 ierr;
4207:   MatSolverPackageHolder         next = MatSolverPackageHolders;
4208:   PetscBool                      flg;
4209:   MatSolverPackageForSpecifcType inext;

4212:   if (foundpackage) *foundpackage = PETSC_FALSE;
4213:   if (foundmtype)   *foundmtype   = PETSC_FALSE;
4214:   if (getfactor)    *getfactor    = NULL;

4216:   if (package) {
4217:     while (next) {
4218:       PetscStrcasecmp(package,next->name,&flg);
4219:       if (flg) {
4220:         if (foundpackage) *foundpackage = PETSC_TRUE;
4221:         inext = next->handlers;
4222:         while (inext) {
4223:           PetscStrbeginswith(mtype,inext->mtype,&flg);
4224:           if (flg) {
4225:             if (foundmtype) *foundmtype = PETSC_TRUE;
4226:             if (getfactor)  *getfactor  = inext->getfactor[(int)ftype-1];
4227:             return(0);
4228:           }
4229:           inext = inext->next;
4230:         }
4231:       }
4232:       next = next->next;
4233:     }
4234:   } else {
4235:     while (next) {
4236:       inext = next->handlers;
4237:       while (inext) {
4238:         PetscStrbeginswith(mtype,inext->mtype,&flg);
4239:         if (flg && inext->getfactor[(int)ftype-1]) {
4240:           if (foundpackage) *foundpackage = PETSC_TRUE;
4241:           if (foundmtype)   *foundmtype   = PETSC_TRUE;
4242:           if (getfactor)    *getfactor    = inext->getfactor[(int)ftype-1];
4243:           return(0);
4244:         }
4245:         inext = inext->next;
4246:       }
4247:       next = next->next;
4248:     }
4249:   }
4250:   return(0);
4251: }

4253: PetscErrorCode MatSolverPackageDestroy(void)
4254: {
4255:   PetscErrorCode                 ierr;
4256:   MatSolverPackageHolder         next = MatSolverPackageHolders,prev;
4257:   MatSolverPackageForSpecifcType inext,iprev;

4260:   while (next) {
4261:     PetscFree(next->name);
4262:     inext = next->handlers;
4263:     while (inext) {
4264:       PetscFree(inext->mtype);
4265:       iprev = inext;
4266:       inext = inext->next;
4267:       PetscFree(iprev);
4268:     }
4269:     prev = next;
4270:     next = next->next;
4271:     PetscFree(prev);
4272:   }
4273:   MatSolverPackageHolders = NULL;
4274:   return(0);
4275: }

4277: /*@C
4278:    MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()

4280:    Collective on Mat

4282:    Input Parameters:
4283: +  mat - the matrix
4284: .  type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4285: -  ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,

4287:    Output Parameters:
4288: .  f - the factor matrix used with MatXXFactorSymbolic() calls

4290:    Notes:
4291:       Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4292:      such as pastix, superlu, mumps etc.

4294:       PETSc must have been ./configure to use the external solver, using the option --download-package

4296:    Level: intermediate

4298: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4299: @*/
4300: PetscErrorCode MatGetFactor(Mat mat, const MatSolverPackage type,MatFactorType ftype,Mat *f)
4301: {
4302:   PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4303:   PetscBool      foundpackage,foundmtype;


4309:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4310:   MatCheckPreallocated(mat,1);

4312:   MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4313:   if (!foundpackage) {
4314:     if (type) {
4315:       SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4316:     } else {
4317:       SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate a solver package. Perhaps you must ./configure with --download-<package>");
4318:     }
4319:   }

4321:   if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4322:   if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support factorization type %s for  matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);

4324:   (*conv)(mat,ftype,f);
4325:   return(0);
4326: }

4328: /*@C
4329:    MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type

4331:    Not Collective

4333:    Input Parameters:
4334: +  mat - the matrix
4335: .  type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4336: -  ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,

4338:    Output Parameter:
4339: .    flg - PETSC_TRUE if the factorization is available

4341:    Notes:
4342:       Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4343:      such as pastix, superlu, mumps etc.

4345:       PETSc must have been ./configure to use the external solver, using the option --download-package

4347:    Level: intermediate

4349: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4350: @*/
4351: PetscErrorCode MatGetFactorAvailable(Mat mat, const MatSolverPackage type,MatFactorType ftype,PetscBool  *flg)
4352: {
4353:   PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);


4359:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4360:   MatCheckPreallocated(mat,1);

4362:   *flg = PETSC_FALSE;
4363:   MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4364:   if (gconv) {
4365:     *flg = PETSC_TRUE;
4366:   }
4367:   return(0);
4368: }

4370:  #include <petscdmtypes.h>

4372: /*@
4373:    MatDuplicate - Duplicates a matrix including the non-zero structure.

4375:    Collective on Mat

4377:    Input Parameters:
4378: +  mat - the matrix
4379: -  op - either MAT_DO_NOT_COPY_VALUES or MAT_COPY_VALUES, cause it to copy the numerical values in the matrix
4380:         MAT_SHARE_NONZERO_PATTERN to share the nonzero patterns with the previous matrix and not copy them.

4382:    Output Parameter:
4383: .  M - pointer to place new matrix

4385:    Level: intermediate

4387:    Concepts: matrices^duplicating

4389:     Notes: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.

4391: .seealso: MatCopy(), MatConvert()
4392: @*/
4393: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4394: {
4396:   Mat            B;
4397:   PetscInt       i;
4398:   DM             dm;

4404:   if (op == MAT_COPY_VALUES && !mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"MAT_COPY_VALUES not allowed for unassembled matrix");
4405:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4406:   MatCheckPreallocated(mat,1);

4408:   *M = 0;
4409:   if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4410:   PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4411:   (*mat->ops->duplicate)(mat,op,M);
4412:   B    = *M;

4414:   B->stencil.dim = mat->stencil.dim;
4415:   B->stencil.noc = mat->stencil.noc;
4416:   for (i=0; i<=mat->stencil.dim; i++) {
4417:     B->stencil.dims[i]   = mat->stencil.dims[i];
4418:     B->stencil.starts[i] = mat->stencil.starts[i];
4419:   }

4421:   B->nooffproczerorows = mat->nooffproczerorows;
4422:   B->nooffprocentries  = mat->nooffprocentries;

4424:   PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4425:   if (dm) {
4426:     PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4427:   }
4428:   PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4429:   PetscObjectStateIncrease((PetscObject)B);
4430:   return(0);
4431: }

4433: /*@
4434:    MatGetDiagonal - Gets the diagonal of a matrix.

4436:    Logically Collective on Mat and Vec

4438:    Input Parameters:
4439: +  mat - the matrix
4440: -  v - the vector for storing the diagonal

4442:    Output Parameter:
4443: .  v - the diagonal of the matrix

4445:    Level: intermediate

4447:    Note:
4448:    Currently only correct in parallel for square matrices.

4450:    Concepts: matrices^accessing diagonals

4452: .seealso: MatGetRow(), MatCreateSubMatrices(), MatCreateSubmatrix(), MatGetRowMaxAbs()
4453: @*/
4454: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4455: {

4462:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4463:   if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4464:   MatCheckPreallocated(mat,1);

4466:   (*mat->ops->getdiagonal)(mat,v);
4467:   PetscObjectStateIncrease((PetscObject)v);
4468:   return(0);
4469: }

4471: /*@C
4472:    MatGetRowMin - Gets the minimum value (of the real part) of each
4473:         row of the matrix

4475:    Logically Collective on Mat and Vec

4477:    Input Parameters:
4478: .  mat - the matrix

4480:    Output Parameter:
4481: +  v - the vector for storing the maximums
4482: -  idx - the indices of the column found for each row (optional)

4484:    Level: intermediate

4486:    Notes: The result of this call are the same as if one converted the matrix to dense format
4487:       and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).

4489:     This code is only implemented for a couple of matrix formats.

4491:    Concepts: matrices^getting row maximums

4493: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubmatrix(), MatGetRowMaxAbs(),
4494:           MatGetRowMax()
4495: @*/
4496: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4497: {

4504:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4505:   if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4506:   MatCheckPreallocated(mat,1);

4508:   (*mat->ops->getrowmin)(mat,v,idx);
4509:   PetscObjectStateIncrease((PetscObject)v);
4510:   return(0);
4511: }

4513: /*@C
4514:    MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4515:         row of the matrix

4517:    Logically Collective on Mat and Vec

4519:    Input Parameters:
4520: .  mat - the matrix

4522:    Output Parameter:
4523: +  v - the vector for storing the minimums
4524: -  idx - the indices of the column found for each row (or NULL if not needed)

4526:    Level: intermediate

4528:    Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4529:     row is 0 (the first column).

4531:     This code is only implemented for a couple of matrix formats.

4533:    Concepts: matrices^getting row maximums

4535: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubmatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4536: @*/
4537: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4538: {

4545:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4546:   if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4547:   MatCheckPreallocated(mat,1);
4548:   if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}

4550:   (*mat->ops->getrowminabs)(mat,v,idx);
4551:   PetscObjectStateIncrease((PetscObject)v);
4552:   return(0);
4553: }

4555: /*@C
4556:    MatGetRowMax - Gets the maximum value (of the real part) of each
4557:         row of the matrix

4559:    Logically Collective on Mat and Vec

4561:    Input Parameters:
4562: .  mat - the matrix

4564:    Output Parameter:
4565: +  v - the vector for storing the maximums
4566: -  idx - the indices of the column found for each row (optional)

4568:    Level: intermediate

4570:    Notes: The result of this call are the same as if one converted the matrix to dense format
4571:       and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).

4573:     This code is only implemented for a couple of matrix formats.

4575:    Concepts: matrices^getting row maximums

4577: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubmatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4578: @*/
4579: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4580: {

4587:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4588:   if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4589:   MatCheckPreallocated(mat,1);

4591:   (*mat->ops->getrowmax)(mat,v,idx);
4592:   PetscObjectStateIncrease((PetscObject)v);
4593:   return(0);
4594: }

4596: /*@C
4597:    MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4598:         row of the matrix

4600:    Logically Collective on Mat and Vec

4602:    Input Parameters:
4603: .  mat - the matrix

4605:    Output Parameter:
4606: +  v - the vector for storing the maximums
4607: -  idx - the indices of the column found for each row (or NULL if not needed)

4609:    Level: intermediate

4611:    Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4612:     row is 0 (the first column).

4614:     This code is only implemented for a couple of matrix formats.

4616:    Concepts: matrices^getting row maximums

4618: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubmatrix(), MatGetRowMax(), MatGetRowMin()
4619: @*/
4620: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4621: {

4628:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4629:   if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4630:   MatCheckPreallocated(mat,1);
4631:   if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}

4633:   (*mat->ops->getrowmaxabs)(mat,v,idx);
4634:   PetscObjectStateIncrease((PetscObject)v);
4635:   return(0);
4636: }

4638: /*@
4639:    MatGetRowSum - Gets the sum of each row of the matrix

4641:    Logically Collective on Mat and Vec

4643:    Input Parameters:
4644: .  mat - the matrix

4646:    Output Parameter:
4647: .  v - the vector for storing the sum of rows

4649:    Level: intermediate

4651:    Notes: This code is slow since it is not currently specialized for different formats

4653:    Concepts: matrices^getting row sums

4655: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubmatrix(), MatGetRowMax(), MatGetRowMin()
4656: @*/
4657: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4658: {
4659:   PetscInt       start = 0, end = 0, row;
4660:   PetscScalar    *array;

4667:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4668:   MatCheckPreallocated(mat,1);
4669:   MatGetOwnershipRange(mat, &start, &end);
4670:   VecGetArray(v, &array);
4671:   for (row = start; row < end; ++row) {
4672:     PetscInt          ncols, col;
4673:     const PetscInt    *cols;
4674:     const PetscScalar *vals;

4676:     array[row - start] = 0.0;

4678:     MatGetRow(mat, row, &ncols, &cols, &vals);
4679:     for (col = 0; col < ncols; col++) {
4680:       array[row - start] += vals[col];
4681:     }
4682:     MatRestoreRow(mat, row, &ncols, &cols, &vals);
4683:   }
4684:   VecRestoreArray(v, &array);
4685:   PetscObjectStateIncrease((PetscObject) v);
4686:   return(0);
4687: }

4689: /*@
4690:    MatTranspose - Computes an in-place or out-of-place transpose of a matrix.

4692:    Collective on Mat

4694:    Input Parameter:
4695: +  mat - the matrix to transpose
4696: -  reuse - either MAT_INITIAL_MATRIX, MAT_REUSE_MATRIX, or MAT_INPLACE_MATRIX

4698:    Output Parameters:
4699: .  B - the transpose

4701:    Notes:
4702:      If you use MAT_INPLACE_MATRIX then you must pass in &mat for B

4704:      MAT_REUSE_MATRIX causes the B matrix from a previous call to this function with MAT_INITIAL_MATRIX to be used

4706:      Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.

4708:    Level: intermediate

4710:    Concepts: matrices^transposing

4712: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4713: @*/
4714: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4715: {

4721:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4722:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4723:   if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4724:   if (reuse == MAT_INPLACE_MATRIX && mat != *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires last matrix to match first");
4725:   if (reuse == MAT_REUSE_MATRIX && mat == *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Perhaps you mean MAT_INPLACE_MATRIX");
4726:   MatCheckPreallocated(mat,1);

4728:   PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4729:   (*mat->ops->transpose)(mat,reuse,B);
4730:   PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4731:   if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4732:   return(0);
4733: }

4735: /*@
4736:    MatIsTranspose - Test whether a matrix is another one's transpose,
4737:         or its own, in which case it tests symmetry.

4739:    Collective on Mat

4741:    Input Parameter:
4742: +  A - the matrix to test
4743: -  B - the matrix to test against, this can equal the first parameter

4745:    Output Parameters:
4746: .  flg - the result

4748:    Notes:
4749:    Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4750:    has a running time of the order of the number of nonzeros; the parallel
4751:    test involves parallel copies of the block-offdiagonal parts of the matrix.

4753:    Level: intermediate

4755:    Concepts: matrices^transposing, matrix^symmetry

4757: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4758: @*/
4759: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool  *flg)
4760: {
4761:   PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);

4767:   PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4768:   PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4769:   *flg = PETSC_FALSE;
4770:   if (f && g) {
4771:     if (f == g) {
4772:       (*f)(A,B,tol,flg);
4773:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4774:   } else {
4775:     MatType mattype;
4776:     if (!f) {
4777:       MatGetType(A,&mattype);
4778:     } else {
4779:       MatGetType(B,&mattype);
4780:     }
4781:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4782:   }
4783:   return(0);
4784: }

4786: /*@
4787:    MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.

4789:    Collective on Mat

4791:    Input Parameter:
4792: +  mat - the matrix to transpose and complex conjugate
4793: -  reuse - MAT_INITIAL_MATRIX to create a new matrix, MAT_INPLACE_MATRIX to reuse the first argument to store the transpose

4795:    Output Parameters:
4796: .  B - the Hermitian

4798:    Level: intermediate

4800:    Concepts: matrices^transposing, complex conjugatex

4802: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4803: @*/
4804: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4805: {

4809:   MatTranspose(mat,reuse,B);
4810: #if defined(PETSC_USE_COMPLEX)
4811:   MatConjugate(*B);
4812: #endif
4813:   return(0);
4814: }

4816: /*@
4817:    MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,

4819:    Collective on Mat

4821:    Input Parameter:
4822: +  A - the matrix to test
4823: -  B - the matrix to test against, this can equal the first parameter

4825:    Output Parameters:
4826: .  flg - the result

4828:    Notes:
4829:    Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4830:    has a running time of the order of the number of nonzeros; the parallel
4831:    test involves parallel copies of the block-offdiagonal parts of the matrix.

4833:    Level: intermediate

4835:    Concepts: matrices^transposing, matrix^symmetry

4837: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4838: @*/
4839: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool  *flg)
4840: {
4841:   PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);

4847:   PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4848:   PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4849:   if (f && g) {
4850:     if (f==g) {
4851:       (*f)(A,B,tol,flg);
4852:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4853:   }
4854:   return(0);
4855: }

4857: /*@
4858:    MatPermute - Creates a new matrix with rows and columns permuted from the
4859:    original.

4861:    Collective on Mat

4863:    Input Parameters:
4864: +  mat - the matrix to permute
4865: .  row - row permutation, each processor supplies only the permutation for its rows
4866: -  col - column permutation, each processor supplies only the permutation for its columns

4868:    Output Parameters:
4869: .  B - the permuted matrix

4871:    Level: advanced

4873:    Note:
4874:    The index sets map from row/col of permuted matrix to row/col of original matrix.
4875:    The index sets should be on the same communicator as Mat and have the same local sizes.

4877:    Concepts: matrices^permuting

4879: .seealso: MatGetOrdering(), ISAllGather()

4881: @*/
4882: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4883: {

4892:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4893:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4894:   if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4895:   MatCheckPreallocated(mat,1);

4897:   (*mat->ops->permute)(mat,row,col,B);
4898:   PetscObjectStateIncrease((PetscObject)*B);
4899:   return(0);
4900: }

4902: /*@
4903:    MatEqual - Compares two matrices.

4905:    Collective on Mat

4907:    Input Parameters:
4908: +  A - the first matrix
4909: -  B - the second matrix

4911:    Output Parameter:
4912: .  flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.

4914:    Level: intermediate

4916:    Concepts: matrices^equality between
4917: @*/
4918: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool  *flg)
4919: {

4929:   MatCheckPreallocated(B,2);
4930:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4931:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4932:   if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4933:   if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
4934:   if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
4935:   if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
4936:   MatCheckPreallocated(A,1);

4938:   (*A->ops->equal)(A,B,flg);
4939:   return(0);
4940: }

4942: /*@C
4943:    MatDiagonalScale - Scales a matrix on the left and right by diagonal
4944:    matrices that are stored as vectors.  Either of the two scaling
4945:    matrices can be NULL.

4947:    Collective on Mat

4949:    Input Parameters:
4950: +  mat - the matrix to be scaled
4951: .  l - the left scaling vector (or NULL)
4952: -  r - the right scaling vector (or NULL)

4954:    Notes:
4955:    MatDiagonalScale() computes A = LAR, where
4956:    L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
4957:    The L scales the rows of the matrix, the R scales the columns of the matrix.

4959:    Level: intermediate

4961:    Concepts: matrices^diagonal scaling
4962:    Concepts: diagonal scaling of matrices

4964: .seealso: MatScale()
4965: @*/
4966: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
4967: {

4973:   if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4976:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4977:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4978:   MatCheckPreallocated(mat,1);

4980:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4981:   (*mat->ops->diagonalscale)(mat,l,r);
4982:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4983:   PetscObjectStateIncrease((PetscObject)mat);
4984: #if defined(PETSC_HAVE_CUSP)
4985:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4986:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4987:   }
4988: #elif defined(PETSC_HAVE_VIENNACL)
4989:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4990:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4991:   }
4992: #elif defined(PETSC_HAVE_VECCUDA)
4993:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
4994:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
4995:   }
4996: #endif
4997:   return(0);
4998: }

5000: /*@
5001:     MatScale - Scales all elements of a matrix by a given number.

5003:     Logically Collective on Mat

5005:     Input Parameters:
5006: +   mat - the matrix to be scaled
5007: -   a  - the scaling value

5009:     Output Parameter:
5010: .   mat - the scaled matrix

5012:     Level: intermediate

5014:     Concepts: matrices^scaling all entries

5016: .seealso: MatDiagonalScale()
5017: @*/
5018: PetscErrorCode MatScale(Mat mat,PetscScalar a)
5019: {

5025:   if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5026:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5027:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5029:   MatCheckPreallocated(mat,1);

5031:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5032:   if (a != (PetscScalar)1.0) {
5033:     (*mat->ops->scale)(mat,a);
5034:     PetscObjectStateIncrease((PetscObject)mat);
5035: #if defined(PETSC_HAVE_CUSP)
5036:     if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5037:       mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5038:     }
5039: #elif defined(PETSC_HAVE_VIENNACL)
5040:     if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5041:       mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5042:     }
5043: #elif defined(PETSC_HAVE_VECCUDA)
5044:     if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5045:       mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5046:     }
5047: #endif
5048:   }
5049:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5050:   return(0);
5051: }

5053: /*@
5054:    MatNorm - Calculates various norms of a matrix.

5056:    Collective on Mat

5058:    Input Parameters:
5059: +  mat - the matrix
5060: -  type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY

5062:    Output Parameters:
5063: .  nrm - the resulting norm

5065:    Level: intermediate

5067:    Concepts: matrices^norm
5068:    Concepts: norm^of matrix
5069: @*/
5070: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
5071: {


5079:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5080:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5081:   if (!mat->ops->norm) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5082:   MatCheckPreallocated(mat,1);

5084:   (*mat->ops->norm)(mat,type,nrm);
5085:   return(0);
5086: }

5088: /*
5089:      This variable is used to prevent counting of MatAssemblyBegin() that
5090:    are called from within a MatAssemblyEnd().
5091: */
5092: static PetscInt MatAssemblyEnd_InUse = 0;
5093: /*@
5094:    MatAssemblyBegin - Begins assembling the matrix.  This routine should
5095:    be called after completing all calls to MatSetValues().

5097:    Collective on Mat

5099:    Input Parameters:
5100: +  mat - the matrix
5101: -  type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY

5103:    Notes:
5104:    MatSetValues() generally caches the values.  The matrix is ready to
5105:    use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5106:    Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5107:    in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5108:    using the matrix.

5110:    ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
5111:    same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
5112:    a global collective operation requring all processes that share the matrix.

5114:    Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5115:    out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5116:    before MAT_FINAL_ASSEMBLY so the space is not compressed out.

5118:    Level: beginner

5120:    Concepts: matrices^assembling

5122: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
5123: @*/
5124: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
5125: {

5131:   MatCheckPreallocated(mat,1);
5132:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
5133:   if (mat->assembled) {
5134:     mat->was_assembled = PETSC_TRUE;
5135:     mat->assembled     = PETSC_FALSE;
5136:   }
5137:   if (!MatAssemblyEnd_InUse) {
5138:     PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5139:     if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5140:     PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5141:   } else if (mat->ops->assemblybegin) {
5142:     (*mat->ops->assemblybegin)(mat,type);
5143:   }
5144:   return(0);
5145: }

5147: /*@
5148:    MatAssembled - Indicates if a matrix has been assembled and is ready for
5149:      use; for example, in matrix-vector product.

5151:    Not Collective

5153:    Input Parameter:
5154: .  mat - the matrix

5156:    Output Parameter:
5157: .  assembled - PETSC_TRUE or PETSC_FALSE

5159:    Level: advanced

5161:    Concepts: matrices^assembled?

5163: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5164: @*/
5165: PetscErrorCode MatAssembled(Mat mat,PetscBool  *assembled)
5166: {
5171:   *assembled = mat->assembled;
5172:   return(0);
5173: }

5175: /*@
5176:    MatAssemblyEnd - Completes assembling the matrix.  This routine should
5177:    be called after MatAssemblyBegin().

5179:    Collective on Mat

5181:    Input Parameters:
5182: +  mat - the matrix
5183: -  type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY

5185:    Options Database Keys:
5186: +  -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5187: .  -mat_view ::ascii_info_detail - Prints more detailed info
5188: .  -mat_view - Prints matrix in ASCII format
5189: .  -mat_view ::ascii_matlab - Prints matrix in Matlab format
5190: .  -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5191: .  -display <name> - Sets display name (default is host)
5192: .  -draw_pause <sec> - Sets number of seconds to pause after display
5193: .  -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 12 Using MATLAB with PETSc )
5194: .  -viewer_socket_machine <machine> - Machine to use for socket
5195: .  -viewer_socket_port <port> - Port number to use for socket
5196: -  -mat_view binary:filename[:append] - Save matrix to file in binary format

5198:    Notes:
5199:    MatSetValues() generally caches the values.  The matrix is ready to
5200:    use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5201:    Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5202:    in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5203:    using the matrix.

5205:    Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5206:    out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5207:    before MAT_FINAL_ASSEMBLY so the space is not compressed out.

5209:    Level: beginner

5211: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5212: @*/
5213: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5214: {
5215:   PetscErrorCode  ierr;
5216:   static PetscInt inassm = 0;
5217:   PetscBool       flg    = PETSC_FALSE;


5223:   inassm++;
5224:   MatAssemblyEnd_InUse++;
5225:   if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5226:     PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5227:     if (mat->ops->assemblyend) {
5228:       (*mat->ops->assemblyend)(mat,type);
5229:     }
5230:     PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5231:   } else if (mat->ops->assemblyend) {
5232:     (*mat->ops->assemblyend)(mat,type);
5233:   }

5235:   /* Flush assembly is not a true assembly */
5236:   if (type != MAT_FLUSH_ASSEMBLY) {
5237:     mat->assembled = PETSC_TRUE; mat->num_ass++;
5238:   }
5239:   mat->insertmode = NOT_SET_VALUES;
5240:   MatAssemblyEnd_InUse--;
5241:   PetscObjectStateIncrease((PetscObject)mat);
5242:   if (!mat->symmetric_eternal) {
5243:     mat->symmetric_set              = PETSC_FALSE;
5244:     mat->hermitian_set              = PETSC_FALSE;
5245:     mat->structurally_symmetric_set = PETSC_FALSE;
5246:   }
5247: #if defined(PETSC_HAVE_CUSP)
5248:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5249:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5250:   }
5251: #elif defined(PETSC_HAVE_VIENNACL)
5252:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5253:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5254:   }
5255: #elif defined(PETSC_HAVE_VECCUDA)
5256:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5257:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5258:   }
5259: #endif
5260:   if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5261:     MatViewFromOptions(mat,NULL,"-mat_view");

5263:     if (mat->checksymmetryonassembly) {
5264:       MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5265:       if (flg) {
5266:         PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5267:       } else {
5268:         PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5269:       }
5270:     }
5271:     if (mat->nullsp && mat->checknullspaceonassembly) {
5272:       MatNullSpaceTest(mat->nullsp,mat,NULL);
5273:     }
5274:   }
5275:   inassm--;
5276:   return(0);
5277: }

5279: /*@
5280:    MatSetOption - Sets a parameter option for a matrix. Some options
5281:    may be specific to certain storage formats.  Some options
5282:    determine how values will be inserted (or added). Sorted,
5283:    row-oriented input will generally assemble the fastest. The default
5284:    is row-oriented.

5286:    Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption

5288:    Input Parameters:
5289: +  mat - the matrix
5290: .  option - the option, one of those listed below (and possibly others),
5291: -  flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)

5293:   Options Describing Matrix Structure:
5294: +    MAT_SPD - symmetric positive definite
5295: .    MAT_SYMMETRIC - symmetric in terms of both structure and value
5296: .    MAT_HERMITIAN - transpose is the complex conjugation
5297: .    MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5298: -    MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5299:                             you set to be kept with all future use of the matrix
5300:                             including after MatAssemblyBegin/End() which could
5301:                             potentially change the symmetry structure, i.e. you
5302:                             KNOW the matrix will ALWAYS have the property you set.


5305:    Options For Use with MatSetValues():
5306:    Insert a logically dense subblock, which can be
5307: .    MAT_ROW_ORIENTED - row-oriented (default)

5309:    Note these options reflect the data you pass in with MatSetValues(); it has
5310:    nothing to do with how the data is stored internally in the matrix
5311:    data structure.

5313:    When (re)assembling a matrix, we can restrict the input for
5314:    efficiency/debugging purposes.  These options include:
5315: +    MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5316: .    MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5317: .    MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5318: .    MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5319: .    MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5320: .    MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5321:         any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5322:         performance for very large process counts.
5323: -    MAT_SUBSET_OFF_PROC_ENTRIES - you know that the first assembly after setting this flag will set a superset
5324:         of the off-process entries required for all subsequent assemblies. This avoids a rendezvous step in the MatAssembly
5325:         functions, instead sending only neighbor messages.

5327:    Notes:
5328:    Except for MAT_UNUSED_NONZERO_LOCATION_ERR and  MAT_ROW_ORIENTED all processes that share the matrix must pass the same value in flg!

5330:    Some options are relevant only for particular matrix types and
5331:    are thus ignored by others.  Other options are not supported by
5332:    certain matrix types and will generate an error message if set.

5334:    If using a Fortran 77 module to compute a matrix, one may need to
5335:    use the column-oriented option (or convert to the row-oriented
5336:    format).

5338:    MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5339:    that would generate a new entry in the nonzero structure is instead
5340:    ignored.  Thus, if memory has not alredy been allocated for this particular
5341:    data, then the insertion is ignored. For dense matrices, in which
5342:    the entire array is allocated, no entries are ever ignored.
5343:    Set after the first MatAssemblyEnd(). If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5345:    MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5346:    that would generate a new entry in the nonzero structure instead produces
5347:    an error. (Currently supported for AIJ and BAIJ formats only.) If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5349:    MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5350:    that would generate a new entry that has not been preallocated will
5351:    instead produce an error. (Currently supported for AIJ and BAIJ formats
5352:    only.) This is a useful flag when debugging matrix memory preallocation.
5353:    If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5355:    MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5356:    other processors should be dropped, rather than stashed.
5357:    This is useful if you know that the "owning" processor is also
5358:    always generating the correct matrix entries, so that PETSc need
5359:    not transfer duplicate entries generated on another processor.

5361:    MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5362:    searches during matrix assembly. When this flag is set, the hash table
5363:    is created during the first Matrix Assembly. This hash table is
5364:    used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5365:    to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5366:    should be used with MAT_USE_HASH_TABLE flag. This option is currently
5367:    supported by MATMPIBAIJ format only.

5369:    MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5370:    are kept in the nonzero structure

5372:    MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5373:    a zero location in the matrix

5375:    MAT_USE_INODES - indicates using inode version of the code - works with AIJ matrix types

5377:    MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5378:         zero row routines and thus improves performance for very large process counts.

5380:    MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5381:         part of the matrix (since they should match the upper triangular part).

5383:    Notes: Can only be called after MatSetSizes() and MatSetType() have been set.

5385:    Level: intermediate

5387:    Concepts: matrices^setting options

5389: .seealso:  MatOption, Mat

5391: @*/
5392: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5393: {

5399:   if (op > 0) {
5402:   }

5404:   if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5405:   if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");

5407:   switch (op) {
5408:   case MAT_NO_OFF_PROC_ENTRIES:
5409:     mat->nooffprocentries = flg;
5410:     return(0);
5411:     break;
5412:   case MAT_SUBSET_OFF_PROC_ENTRIES:
5413:     mat->subsetoffprocentries = flg;
5414:     return(0);
5415:   case MAT_NO_OFF_PROC_ZERO_ROWS:
5416:     mat->nooffproczerorows = flg;
5417:     return(0);
5418:     break;
5419:   case MAT_SPD:
5420:     mat->spd_set = PETSC_TRUE;
5421:     mat->spd     = flg;
5422:     if (flg) {
5423:       mat->symmetric                  = PETSC_TRUE;
5424:       mat->structurally_symmetric     = PETSC_TRUE;
5425:       mat->symmetric_set              = PETSC_TRUE;
5426:       mat->structurally_symmetric_set = PETSC_TRUE;
5427:     }
5428:     break;
5429:   case MAT_SYMMETRIC:
5430:     mat->symmetric = flg;
5431:     if (flg) mat->structurally_symmetric = PETSC_TRUE;
5432:     mat->symmetric_set              = PETSC_TRUE;
5433:     mat->structurally_symmetric_set = flg;
5434: #if !defined(PETSC_USE_COMPLEX)
5435:     mat->hermitian     = flg;
5436:     mat->hermitian_set = PETSC_TRUE;
5437: #endif
5438:     break;
5439:   case MAT_HERMITIAN:
5440:     mat->hermitian = flg;
5441:     if (flg) mat->structurally_symmetric = PETSC_TRUE;
5442:     mat->hermitian_set              = PETSC_TRUE;
5443:     mat->structurally_symmetric_set = flg;
5444: #if !defined(PETSC_USE_COMPLEX)
5445:     mat->symmetric     = flg;
5446:     mat->symmetric_set = PETSC_TRUE;
5447: #endif
5448:     break;
5449:   case MAT_STRUCTURALLY_SYMMETRIC:
5450:     mat->structurally_symmetric     = flg;
5451:     mat->structurally_symmetric_set = PETSC_TRUE;
5452:     break;
5453:   case MAT_SYMMETRY_ETERNAL:
5454:     mat->symmetric_eternal = flg;
5455:     break;
5456:   case MAT_STRUCTURE_ONLY:
5457:     mat->structure_only = flg;
5458:     break;
5459:   default:
5460:     break;
5461:   }
5462:   if (mat->ops->setoption) {
5463:     (*mat->ops->setoption)(mat,op,flg);
5464:   }
5465:   return(0);
5466: }

5468: /*@
5469:    MatGetOption - Gets a parameter option that has been set for a matrix.

5471:    Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption

5473:    Input Parameters:
5474: +  mat - the matrix
5475: -  option - the option, this only responds to certain options, check the code for which ones

5477:    Output Parameter:
5478: .  flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)

5480:     Notes: Can only be called after MatSetSizes() and MatSetType() have been set.

5482:    Level: intermediate

5484:    Concepts: matrices^setting options

5486: .seealso:  MatOption, MatSetOption()

5488: @*/
5489: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5490: {

5495:   if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5496:   if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");

5498:   switch (op) {
5499:   case MAT_NO_OFF_PROC_ENTRIES:
5500:     *flg = mat->nooffprocentries;
5501:     break;
5502:   case MAT_NO_OFF_PROC_ZERO_ROWS:
5503:     *flg = mat->nooffproczerorows;
5504:     break;
5505:   case MAT_SYMMETRIC:
5506:     *flg = mat->symmetric;
5507:     break;
5508:   case MAT_HERMITIAN:
5509:     *flg = mat->hermitian;
5510:     break;
5511:   case MAT_STRUCTURALLY_SYMMETRIC:
5512:     *flg = mat->structurally_symmetric;
5513:     break;
5514:   case MAT_SYMMETRY_ETERNAL:
5515:     *flg = mat->symmetric_eternal;
5516:     break;
5517:   case MAT_SPD:
5518:     *flg = mat->spd;
5519:     break;
5520:   default:
5521:     break;
5522:   }
5523:   return(0);
5524: }

5526: /*@
5527:    MatZeroEntries - Zeros all entries of a matrix.  For sparse matrices
5528:    this routine retains the old nonzero structure.

5530:    Logically Collective on Mat

5532:    Input Parameters:
5533: .  mat - the matrix

5535:    Level: intermediate

5537:    Notes: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5538:    See the Performance chapter of the users manual for information on preallocating matrices.

5540:    Concepts: matrices^zeroing

5542: .seealso: MatZeroRows()
5543: @*/
5544: PetscErrorCode MatZeroEntries(Mat mat)
5545: {

5551:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5552:   if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5553:   if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5554:   MatCheckPreallocated(mat,1);

5556:   PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5557:   (*mat->ops->zeroentries)(mat);
5558:   PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5559:   PetscObjectStateIncrease((PetscObject)mat);
5560: #if defined(PETSC_HAVE_CUSP)
5561:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5562:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5563:   }
5564: #elif defined(PETSC_HAVE_VIENNACL)
5565:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5566:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5567:   }
5568: #elif defined(PETSC_HAVE_VECCUDA)
5569:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5570:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5571:   }
5572: #endif
5573:   return(0);
5574: }

5576: /*@C
5577:    MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5578:    of a set of rows and columns of a matrix.

5580:    Collective on Mat

5582:    Input Parameters:
5583: +  mat - the matrix
5584: .  numRows - the number of rows to remove
5585: .  rows - the global row indices
5586: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5587: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5588: -  b - optional vector of right hand side, that will be adjusted by provided solution

5590:    Notes:
5591:    This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.

5593:    The user can set a value in the diagonal entry (or for the AIJ and
5594:    row formats can optionally remove the main diagonal entry from the
5595:    nonzero structure as well, by passing 0.0 as the final argument).

5597:    For the parallel case, all processes that share the matrix (i.e.,
5598:    those in the communicator used for matrix creation) MUST call this
5599:    routine, regardless of whether any rows being zeroed are owned by
5600:    them.

5602:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5603:    list only rows local to itself).

5605:    The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.

5607:    Level: intermediate

5609:    Concepts: matrices^zeroing rows

5611: .seealso: MatZeroRowsIS(), MatZeroRows(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5612:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5613: @*/
5614: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5615: {

5622:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5623:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5624:   if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5625:   MatCheckPreallocated(mat,1);

5627:   (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5628:   MatViewFromOptions(mat,NULL,"-mat_view");
5629:   PetscObjectStateIncrease((PetscObject)mat);
5630: #if defined(PETSC_HAVE_CUSP)
5631:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5632:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5633:   }
5634: #elif defined(PETSC_HAVE_VIENNACL)
5635:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5636:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5637:   }
5638: #elif defined(PETSC_HAVE_VECCUDA)
5639:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5640:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5641:   }
5642: #endif
5643:   return(0);
5644: }

5646: /*@C
5647:    MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5648:    of a set of rows and columns of a matrix.

5650:    Collective on Mat

5652:    Input Parameters:
5653: +  mat - the matrix
5654: .  is - the rows to zero
5655: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5656: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5657: -  b - optional vector of right hand side, that will be adjusted by provided solution

5659:    Notes:
5660:    This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.

5662:    The user can set a value in the diagonal entry (or for the AIJ and
5663:    row formats can optionally remove the main diagonal entry from the
5664:    nonzero structure as well, by passing 0.0 as the final argument).

5666:    For the parallel case, all processes that share the matrix (i.e.,
5667:    those in the communicator used for matrix creation) MUST call this
5668:    routine, regardless of whether any rows being zeroed are owned by
5669:    them.

5671:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5672:    list only rows local to itself).

5674:    The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.

5676:    Level: intermediate

5678:    Concepts: matrices^zeroing rows

5680: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5681:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRows(), MatZeroRowsColumnsStencil()
5682: @*/
5683: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5684: {
5686:   PetscInt       numRows;
5687:   const PetscInt *rows;

5694:   ISGetLocalSize(is,&numRows);
5695:   ISGetIndices(is,&rows);
5696:   MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5697:   ISRestoreIndices(is,&rows);
5698:   return(0);
5699: }

5701: /*@C
5702:    MatZeroRows - Zeros all entries (except possibly the main diagonal)
5703:    of a set of rows of a matrix.

5705:    Collective on Mat

5707:    Input Parameters:
5708: +  mat - the matrix
5709: .  numRows - the number of rows to remove
5710: .  rows - the global row indices
5711: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5712: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5713: -  b - optional vector of right hand side, that will be adjusted by provided solution

5715:    Notes:
5716:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5717:    but does not release memory.  For the dense and block diagonal
5718:    formats this does not alter the nonzero structure.

5720:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5721:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5722:    merely zeroed.

5724:    The user can set a value in the diagonal entry (or for the AIJ and
5725:    row formats can optionally remove the main diagonal entry from the
5726:    nonzero structure as well, by passing 0.0 as the final argument).

5728:    For the parallel case, all processes that share the matrix (i.e.,
5729:    those in the communicator used for matrix creation) MUST call this
5730:    routine, regardless of whether any rows being zeroed are owned by
5731:    them.

5733:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5734:    list only rows local to itself).

5736:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5737:    owns that are to be zeroed. This saves a global synchronization in the implementation.

5739:    Level: intermediate

5741:    Concepts: matrices^zeroing rows

5743: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5744:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5745: @*/
5746: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5747: {

5754:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5755:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5756:   if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5757:   MatCheckPreallocated(mat,1);

5759:   (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5760:   MatViewFromOptions(mat,NULL,"-mat_view");
5761:   PetscObjectStateIncrease((PetscObject)mat);
5762: #if defined(PETSC_HAVE_CUSP)
5763:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5764:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5765:   }
5766: #elif defined(PETSC_HAVE_VIENNACL)
5767:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5768:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5769:   }
5770: #elif defined(PETSC_HAVE_VECCUDA)
5771:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5772:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5773:   }
5774: #endif
5775:   return(0);
5776: }

5778: /*@C
5779:    MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5780:    of a set of rows of a matrix.

5782:    Collective on Mat

5784:    Input Parameters:
5785: +  mat - the matrix
5786: .  is - index set of rows to remove
5787: .  diag - value put in all diagonals of eliminated rows
5788: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5789: -  b - optional vector of right hand side, that will be adjusted by provided solution

5791:    Notes:
5792:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5793:    but does not release memory.  For the dense and block diagonal
5794:    formats this does not alter the nonzero structure.

5796:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5797:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5798:    merely zeroed.

5800:    The user can set a value in the diagonal entry (or for the AIJ and
5801:    row formats can optionally remove the main diagonal entry from the
5802:    nonzero structure as well, by passing 0.0 as the final argument).

5804:    For the parallel case, all processes that share the matrix (i.e.,
5805:    those in the communicator used for matrix creation) MUST call this
5806:    routine, regardless of whether any rows being zeroed are owned by
5807:    them.

5809:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5810:    list only rows local to itself).

5812:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5813:    owns that are to be zeroed. This saves a global synchronization in the implementation.

5815:    Level: intermediate

5817:    Concepts: matrices^zeroing rows

5819: .seealso: MatZeroRows(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5820:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5821: @*/
5822: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5823: {
5824:   PetscInt       numRows;
5825:   const PetscInt *rows;

5832:   ISGetLocalSize(is,&numRows);
5833:   ISGetIndices(is,&rows);
5834:   MatZeroRows(mat,numRows,rows,diag,x,b);
5835:   ISRestoreIndices(is,&rows);
5836:   return(0);
5837: }

5839: /*@C
5840:    MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5841:    of a set of rows of a matrix. These rows must be local to the process.

5843:    Collective on Mat

5845:    Input Parameters:
5846: +  mat - the matrix
5847: .  numRows - the number of rows to remove
5848: .  rows - the grid coordinates (and component number when dof > 1) for matrix rows
5849: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5850: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5851: -  b - optional vector of right hand side, that will be adjusted by provided solution

5853:    Notes:
5854:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5855:    but does not release memory.  For the dense and block diagonal
5856:    formats this does not alter the nonzero structure.

5858:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5859:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5860:    merely zeroed.

5862:    The user can set a value in the diagonal entry (or for the AIJ and
5863:    row formats can optionally remove the main diagonal entry from the
5864:    nonzero structure as well, by passing 0.0 as the final argument).

5866:    For the parallel case, all processes that share the matrix (i.e.,
5867:    those in the communicator used for matrix creation) MUST call this
5868:    routine, regardless of whether any rows being zeroed are owned by
5869:    them.

5871:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5872:    list only rows local to itself).

5874:    The grid coordinates are across the entire grid, not just the local portion

5876:    In Fortran idxm and idxn should be declared as
5877: $     MatStencil idxm(4,m)
5878:    and the values inserted using
5879: $    idxm(MatStencil_i,1) = i
5880: $    idxm(MatStencil_j,1) = j
5881: $    idxm(MatStencil_k,1) = k
5882: $    idxm(MatStencil_c,1) = c
5883:    etc

5885:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5886:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5887:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5888:    DM_BOUNDARY_PERIODIC boundary type.

5890:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5891:    a single value per point) you can skip filling those indices.

5893:    Level: intermediate

5895:    Concepts: matrices^zeroing rows

5897: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsl(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5898:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5899: @*/
5900: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5901: {
5902:   PetscInt       dim     = mat->stencil.dim;
5903:   PetscInt       sdim    = dim - (1 - (PetscInt) mat->stencil.noc);
5904:   PetscInt       *dims   = mat->stencil.dims+1;
5905:   PetscInt       *starts = mat->stencil.starts;
5906:   PetscInt       *dxm    = (PetscInt*) rows;
5907:   PetscInt       *jdxm, i, j, tmp, numNewRows = 0;


5915:   PetscMalloc1(numRows, &jdxm);
5916:   for (i = 0; i < numRows; ++i) {
5917:     /* Skip unused dimensions (they are ordered k, j, i, c) */
5918:     for (j = 0; j < 3-sdim; ++j) dxm++;
5919:     /* Local index in X dir */
5920:     tmp = *dxm++ - starts[0];
5921:     /* Loop over remaining dimensions */
5922:     for (j = 0; j < dim-1; ++j) {
5923:       /* If nonlocal, set index to be negative */
5924:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5925:       /* Update local index */
5926:       else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5927:     }
5928:     /* Skip component slot if necessary */
5929:     if (mat->stencil.noc) dxm++;
5930:     /* Local row number */
5931:     if (tmp >= 0) {
5932:       jdxm[numNewRows++] = tmp;
5933:     }
5934:   }
5935:   MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
5936:   PetscFree(jdxm);
5937:   return(0);
5938: }

5940: /*@C
5941:    MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
5942:    of a set of rows and columns of a matrix.

5944:    Collective on Mat

5946:    Input Parameters:
5947: +  mat - the matrix
5948: .  numRows - the number of rows/columns to remove
5949: .  rows - the grid coordinates (and component number when dof > 1) for matrix rows
5950: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5951: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5952: -  b - optional vector of right hand side, that will be adjusted by provided solution

5954:    Notes:
5955:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5956:    but does not release memory.  For the dense and block diagonal
5957:    formats this does not alter the nonzero structure.

5959:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5960:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5961:    merely zeroed.

5963:    The user can set a value in the diagonal entry (or for the AIJ and
5964:    row formats can optionally remove the main diagonal entry from the
5965:    nonzero structure as well, by passing 0.0 as the final argument).

5967:    For the parallel case, all processes that share the matrix (i.e.,
5968:    those in the communicator used for matrix creation) MUST call this
5969:    routine, regardless of whether any rows being zeroed are owned by
5970:    them.

5972:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5973:    list only rows local to itself, but the row/column numbers are given in local numbering).

5975:    The grid coordinates are across the entire grid, not just the local portion

5977:    In Fortran idxm and idxn should be declared as
5978: $     MatStencil idxm(4,m)
5979:    and the values inserted using
5980: $    idxm(MatStencil_i,1) = i
5981: $    idxm(MatStencil_j,1) = j
5982: $    idxm(MatStencil_k,1) = k
5983: $    idxm(MatStencil_c,1) = c
5984:    etc

5986:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5987:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5988:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5989:    DM_BOUNDARY_PERIODIC boundary type.

5991:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5992:    a single value per point) you can skip filling those indices.

5994:    Level: intermediate

5996:    Concepts: matrices^zeroing rows

5998: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5999:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRows()
6000: @*/
6001: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
6002: {
6003:   PetscInt       dim     = mat->stencil.dim;
6004:   PetscInt       sdim    = dim - (1 - (PetscInt) mat->stencil.noc);
6005:   PetscInt       *dims   = mat->stencil.dims+1;
6006:   PetscInt       *starts = mat->stencil.starts;
6007:   PetscInt       *dxm    = (PetscInt*) rows;
6008:   PetscInt       *jdxm, i, j, tmp, numNewRows = 0;


6016:   PetscMalloc1(numRows, &jdxm);
6017:   for (i = 0; i < numRows; ++i) {
6018:     /* Skip unused dimensions (they are ordered k, j, i, c) */
6019:     for (j = 0; j < 3-sdim; ++j) dxm++;
6020:     /* Local index in X dir */
6021:     tmp = *dxm++ - starts[0];
6022:     /* Loop over remaining dimensions */
6023:     for (j = 0; j < dim-1; ++j) {
6024:       /* If nonlocal, set index to be negative */
6025:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
6026:       /* Update local index */
6027:       else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
6028:     }
6029:     /* Skip component slot if necessary */
6030:     if (mat->stencil.noc) dxm++;
6031:     /* Local row number */
6032:     if (tmp >= 0) {
6033:       jdxm[numNewRows++] = tmp;
6034:     }
6035:   }
6036:   MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
6037:   PetscFree(jdxm);
6038:   return(0);
6039: }

6041: /*@C
6042:    MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
6043:    of a set of rows of a matrix; using local numbering of rows.

6045:    Collective on Mat

6047:    Input Parameters:
6048: +  mat - the matrix
6049: .  numRows - the number of rows to remove
6050: .  rows - the global row indices
6051: .  diag - value put in all diagonals of eliminated rows
6052: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6053: -  b - optional vector of right hand side, that will be adjusted by provided solution

6055:    Notes:
6056:    Before calling MatZeroRowsLocal(), the user must first set the
6057:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6059:    For the AIJ matrix formats this removes the old nonzero structure,
6060:    but does not release memory.  For the dense and block diagonal
6061:    formats this does not alter the nonzero structure.

6063:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6064:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6065:    merely zeroed.

6067:    The user can set a value in the diagonal entry (or for the AIJ and
6068:    row formats can optionally remove the main diagonal entry from the
6069:    nonzero structure as well, by passing 0.0 as the final argument).

6071:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6072:    owns that are to be zeroed. This saves a global synchronization in the implementation.

6074:    Level: intermediate

6076:    Concepts: matrices^zeroing

6078: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRows(), MatSetOption(),
6079:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6080: @*/
6081: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6082: {

6089:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6090:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6091:   MatCheckPreallocated(mat,1);

6093:   if (mat->ops->zerorowslocal) {
6094:     (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
6095:   } else {
6096:     IS             is, newis;
6097:     const PetscInt *newRows;

6099:     if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6100:     ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6101:     ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
6102:     ISGetIndices(newis,&newRows);
6103:     (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
6104:     ISRestoreIndices(newis,&newRows);
6105:     ISDestroy(&newis);
6106:     ISDestroy(&is);
6107:   }
6108:   PetscObjectStateIncrease((PetscObject)mat);
6109: #if defined(PETSC_HAVE_CUSP)
6110:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6111:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6112:   }
6113: #elif defined(PETSC_HAVE_VIENNACL)
6114:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6115:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6116:   }
6117: #elif defined(PETSC_HAVE_VECCUDA)
6118:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
6119:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
6120:   }
6121: #endif
6122:   return(0);
6123: }

6125: /*@C
6126:    MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
6127:    of a set of rows of a matrix; using local numbering of rows.

6129:    Collective on Mat

6131:    Input Parameters:
6132: +  mat - the matrix
6133: .  is - index set of rows to remove
6134: .  diag - value put in all diagonals of eliminated rows
6135: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6136: -  b - optional vector of right hand side, that will be adjusted by provided solution

6138:    Notes:
6139:    Before calling MatZeroRowsLocalIS(), the user must first set the
6140:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6142:    For the AIJ matrix formats this removes the old nonzero structure,
6143:    but does not release memory.  For the dense and block diagonal
6144:    formats this does not alter the nonzero structure.

6146:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6147:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6148:    merely zeroed.

6150:    The user can set a value in the diagonal entry (or for the AIJ and
6151:    row formats can optionally remove the main diagonal entry from the
6152:    nonzero structure as well, by passing 0.0 as the final argument).

6154:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6155:    owns that are to be zeroed. This saves a global synchronization in the implementation.

6157:    Level: intermediate

6159:    Concepts: matrices^zeroing

6161: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6162:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6163: @*/
6164: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6165: {
6167:   PetscInt       numRows;
6168:   const PetscInt *rows;

6174:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6175:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6176:   MatCheckPreallocated(mat,1);

6178:   ISGetLocalSize(is,&numRows);
6179:   ISGetIndices(is,&rows);
6180:   MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6181:   ISRestoreIndices(is,&rows);
6182:   return(0);
6183: }

6185: /*@C
6186:    MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6187:    of a set of rows and columns of a matrix; using local numbering of rows.

6189:    Collective on Mat

6191:    Input Parameters:
6192: +  mat - the matrix
6193: .  numRows - the number of rows to remove
6194: .  rows - the global row indices
6195: .  diag - value put in all diagonals of eliminated rows
6196: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6197: -  b - optional vector of right hand side, that will be adjusted by provided solution

6199:    Notes:
6200:    Before calling MatZeroRowsColumnsLocal(), the user must first set the
6201:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6203:    The user can set a value in the diagonal entry (or for the AIJ and
6204:    row formats can optionally remove the main diagonal entry from the
6205:    nonzero structure as well, by passing 0.0 as the final argument).

6207:    Level: intermediate

6209:    Concepts: matrices^zeroing

6211: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6212:           MatZeroRows(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6213: @*/
6214: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6215: {
6217:   IS             is, newis;
6218:   const PetscInt *newRows;

6224:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6225:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6226:   MatCheckPreallocated(mat,1);

6228:   if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6229:   ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6230:   ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6231:   ISGetIndices(newis,&newRows);
6232:   (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6233:   ISRestoreIndices(newis,&newRows);
6234:   ISDestroy(&newis);
6235:   ISDestroy(&is);
6236:   PetscObjectStateIncrease((PetscObject)mat);
6237: #if defined(PETSC_HAVE_CUSP)
6238:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6239:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6240:   }
6241: #elif defined(PETSC_HAVE_VIENNACL)
6242:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6243:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6244:   }
6245: #elif defined(PETSC_HAVE_VECCUDA)
6246:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
6247:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
6248:   }
6249: #endif
6250:   return(0);
6251: }

6253: /*@C
6254:    MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6255:    of a set of rows and columns of a matrix; using local numbering of rows.

6257:    Collective on Mat

6259:    Input Parameters:
6260: +  mat - the matrix
6261: .  is - index set of rows to remove
6262: .  diag - value put in all diagonals of eliminated rows
6263: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6264: -  b - optional vector of right hand side, that will be adjusted by provided solution

6266:    Notes:
6267:    Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6268:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6270:    The user can set a value in the diagonal entry (or for the AIJ and
6271:    row formats can optionally remove the main diagonal entry from the
6272:    nonzero structure as well, by passing 0.0 as the final argument).

6274:    Level: intermediate

6276:    Concepts: matrices^zeroing

6278: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6279:           MatZeroRowsColumnsLocal(), MatZeroRows(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6280: @*/
6281: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6282: {
6284:   PetscInt       numRows;
6285:   const PetscInt *rows;

6291:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6292:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6293:   MatCheckPreallocated(mat,1);

6295:   ISGetLocalSize(is,&numRows);
6296:   ISGetIndices(is,&rows);
6297:   MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6298:   ISRestoreIndices(is,&rows);
6299:   return(0);
6300: }

6302: /*@C
6303:    MatGetSize - Returns the numbers of rows and columns in a matrix.

6305:    Not Collective

6307:    Input Parameter:
6308: .  mat - the matrix

6310:    Output Parameters:
6311: +  m - the number of global rows
6312: -  n - the number of global columns

6314:    Note: both output parameters can be NULL on input.

6316:    Level: beginner

6318:    Concepts: matrices^size

6320: .seealso: MatGetLocalSize()
6321: @*/
6322: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6323: {
6326:   if (m) *m = mat->rmap->N;
6327:   if (n) *n = mat->cmap->N;
6328:   return(0);
6329: }

6331: /*@C
6332:    MatGetLocalSize - Returns the number of rows and columns in a matrix
6333:    stored locally.  This information may be implementation dependent, so
6334:    use with care.

6336:    Not Collective

6338:    Input Parameters:
6339: .  mat - the matrix

6341:    Output Parameters:
6342: +  m - the number of local rows
6343: -  n - the number of local columns

6345:    Note: both output parameters can be NULL on input.

6347:    Level: beginner

6349:    Concepts: matrices^local size

6351: .seealso: MatGetSize()
6352: @*/
6353: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6354: {
6359:   if (m) *m = mat->rmap->n;
6360:   if (n) *n = mat->cmap->n;
6361:   return(0);
6362: }

6364: /*@
6365:    MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6366:    this processor. (The columns of the "diagonal block")

6368:    Not Collective, unless matrix has not been allocated, then collective on Mat

6370:    Input Parameters:
6371: .  mat - the matrix

6373:    Output Parameters:
6374: +  m - the global index of the first local column
6375: -  n - one more than the global index of the last local column

6377:    Notes: both output parameters can be NULL on input.

6379:    Level: developer

6381:    Concepts: matrices^column ownership

6383: .seealso:  MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()

6385: @*/
6386: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6387: {
6393:   MatCheckPreallocated(mat,1);
6394:   if (m) *m = mat->cmap->rstart;
6395:   if (n) *n = mat->cmap->rend;
6396:   return(0);
6397: }

6399: /*@
6400:    MatGetOwnershipRange - Returns the range of matrix rows owned by
6401:    this processor, assuming that the matrix is laid out with the first
6402:    n1 rows on the first processor, the next n2 rows on the second, etc.
6403:    For certain parallel layouts this range may not be well defined.

6405:    Not Collective

6407:    Input Parameters:
6408: .  mat - the matrix

6410:    Output Parameters:
6411: +  m - the global index of the first local row
6412: -  n - one more than the global index of the last local row

6414:    Note: Both output parameters can be NULL on input.
6415: $  This function requires that the matrix be preallocated. If you have not preallocated, consider using
6416: $    PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6417: $  and then MPI_Scan() to calculate prefix sums of the local sizes.

6419:    Level: beginner

6421:    Concepts: matrices^row ownership

6423: .seealso:   MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()

6425: @*/
6426: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6427: {
6433:   MatCheckPreallocated(mat,1);
6434:   if (m) *m = mat->rmap->rstart;
6435:   if (n) *n = mat->rmap->rend;
6436:   return(0);
6437: }

6439: /*@C
6440:    MatGetOwnershipRanges - Returns the range of matrix rows owned by
6441:    each process

6443:    Not Collective, unless matrix has not been allocated, then collective on Mat

6445:    Input Parameters:
6446: .  mat - the matrix

6448:    Output Parameters:
6449: .  ranges - start of each processors portion plus one more than the total length at the end

6451:    Level: beginner

6453:    Concepts: matrices^row ownership

6455: .seealso:   MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()

6457: @*/
6458: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6459: {

6465:   MatCheckPreallocated(mat,1);
6466:   PetscLayoutGetRanges(mat->rmap,ranges);
6467:   return(0);
6468: }

6470: /*@C
6471:    MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6472:    this processor. (The columns of the "diagonal blocks" for each process)

6474:    Not Collective, unless matrix has not been allocated, then collective on Mat

6476:    Input Parameters:
6477: .  mat - the matrix

6479:    Output Parameters:
6480: .  ranges - start of each processors portion plus one more then the total length at the end

6482:    Level: beginner

6484:    Concepts: matrices^column ownership

6486: .seealso:   MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()

6488: @*/
6489: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6490: {

6496:   MatCheckPreallocated(mat,1);
6497:   PetscLayoutGetRanges(mat->cmap,ranges);
6498:   return(0);
6499: }

6501: /*@C
6502:    MatGetOwnershipIS - Get row and column ownership as index sets

6504:    Not Collective

6506:    Input Arguments:
6507: .  A - matrix of type Elemental

6509:    Output Arguments:
6510: +  rows - rows in which this process owns elements
6511: .  cols - columns in which this process owns elements

6513:    Level: intermediate

6515: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL, MatSetValues()
6516: @*/
6517: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6518: {
6519:   PetscErrorCode ierr,(*f)(Mat,IS*,IS*);

6522:   MatCheckPreallocated(A,1);
6523:   PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6524:   if (f) {
6525:     (*f)(A,rows,cols);
6526:   } else {   /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6527:     if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6528:     if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6529:   }
6530:   return(0);
6531: }

6533: /*@C
6534:    MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6535:    Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6536:    to complete the factorization.

6538:    Collective on Mat

6540:    Input Parameters:
6541: +  mat - the matrix
6542: .  row - row permutation
6543: .  column - column permutation
6544: -  info - structure containing
6545: $      levels - number of levels of fill.
6546: $      expected fill - as ratio of original fill.
6547: $      1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6548:                 missing diagonal entries)

6550:    Output Parameters:
6551: .  fact - new matrix that has been symbolically factored

6553:    Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.

6555:    Most users should employ the simplified KSP interface for linear solvers
6556:    instead of working directly with matrix algebra routines such as this.
6557:    See, e.g., KSPCreate().

6559:    Level: developer

6561:   Concepts: matrices^symbolic LU factorization
6562:   Concepts: matrices^factorization
6563:   Concepts: LU^symbolic factorization

6565: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6566:           MatGetOrdering(), MatFactorInfo

6568:     Developer Note: fortran interface is not autogenerated as the f90
6569:     interface defintion cannot be generated correctly [due to MatFactorInfo]

6571: @*/
6572: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6573: {

6583:   if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6584:   if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6585:   if (!(fact)->ops->ilufactorsymbolic) {
6586:     const MatSolverPackage spackage;
6587:     MatFactorGetSolverPackage(fact,&spackage);
6588:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6589:   }
6590:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6591:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6592:   MatCheckPreallocated(mat,2);

6594:   PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6595:   (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6596:   PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6597:   return(0);
6598: }

6600: /*@C
6601:    MatICCFactorSymbolic - Performs symbolic incomplete
6602:    Cholesky factorization for a symmetric matrix.  Use
6603:    MatCholeskyFactorNumeric() to complete the factorization.

6605:    Collective on Mat

6607:    Input Parameters:
6608: +  mat - the matrix
6609: .  perm - row and column permutation
6610: -  info - structure containing
6611: $      levels - number of levels of fill.
6612: $      expected fill - as ratio of original fill.

6614:    Output Parameter:
6615: .  fact - the factored matrix

6617:    Notes:
6618:    Most users should employ the KSP interface for linear solvers
6619:    instead of working directly with matrix algebra routines such as this.
6620:    See, e.g., KSPCreate().

6622:    Level: developer

6624:   Concepts: matrices^symbolic incomplete Cholesky factorization
6625:   Concepts: matrices^factorization
6626:   Concepts: Cholsky^symbolic factorization

6628: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo

6630:     Developer Note: fortran interface is not autogenerated as the f90
6631:     interface defintion cannot be generated correctly [due to MatFactorInfo]

6633: @*/
6634: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6635: {

6644:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6645:   if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6646:   if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6647:   if (!(fact)->ops->iccfactorsymbolic) {
6648:     const MatSolverPackage spackage;
6649:     MatFactorGetSolverPackage(fact,&spackage);
6650:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6651:   }
6652:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6653:   MatCheckPreallocated(mat,2);

6655:   PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6656:   (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6657:   PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6658:   return(0);
6659: }

6661: /*@C
6662:    MatCreateSubMatrices - Extracts several submatrices from a matrix. If submat
6663:    points to an array of valid matrices, they may be reused to store the new
6664:    submatrices.

6666:    Collective on Mat

6668:    Input Parameters:
6669: +  mat - the matrix
6670: .  n   - the number of submatrixes to be extracted (on this processor, may be zero)
6671: .  irow, icol - index sets of rows and columns to extract
6672: -  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

6674:    Output Parameter:
6675: .  submat - the array of submatrices

6677:    Notes:
6678:    MatCreateSubMatrices() can extract ONLY sequential submatrices
6679:    (from both sequential and parallel matrices). Use MatCreateSubMatrix()
6680:    to extract a parallel submatrix.

6682:    Some matrix types place restrictions on the row and column
6683:    indices, such as that they be sorted or that they be equal to each other.

6685:    The index sets may not have duplicate entries.

6687:    When extracting submatrices from a parallel matrix, each processor can
6688:    form a different submatrix by setting the rows and columns of its
6689:    individual index sets according to the local submatrix desired.

6691:    When finished using the submatrices, the user should destroy
6692:    them with MatDestroyMatrices().

6694:    MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6695:    original matrix has not changed from that last call to MatCreateSubMatrices().

6697:    This routine creates the matrices in submat; you should NOT create them before
6698:    calling it. It also allocates the array of matrix pointers submat.

6700:    For BAIJ matrices the index sets must respect the block structure, that is if they
6701:    request one row/column in a block, they must request all rows/columns that are in
6702:    that block. For example, if the block size is 2 you cannot request just row 0 and
6703:    column 0.

6705:    Fortran Note:
6706:    The Fortran interface is slightly different from that given below; it
6707:    requires one to pass in  as submat a Mat (integer) array of size at least m.

6709:    Level: advanced

6711:    Concepts: matrices^accessing submatrices
6712:    Concepts: submatrices

6714: .seealso: MatDestroySubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6715: @*/
6716: PetscErrorCode MatCreateSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6717: {
6719:   PetscInt       i;
6720:   PetscBool      eq;

6725:   if (n) {
6730:   }
6732:   if (n && scall == MAT_REUSE_MATRIX) {
6735:   }
6736:   if (!mat->ops->createsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6737:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6738:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6739:   MatCheckPreallocated(mat,1);

6741:   PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6742:   (*mat->ops->createsubmatrices)(mat,n,irow,icol,scall,submat);
6743:   PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6744:   for (i=0; i<n; i++) {
6745:     (*submat)[i]->factortype = MAT_FACTOR_NONE;  /* in case in place factorization was previously done on submatrix */
6746:     if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6747:       ISEqual(irow[i],icol[i],&eq);
6748:       if (eq) {
6749:         if (mat->symmetric) {
6750:           MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6751:         } else if (mat->hermitian) {
6752:           MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6753:         } else if (mat->structurally_symmetric) {
6754:           MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6755:         }
6756:       }
6757:     }
6758:   }
6759:   return(0);
6760: }

6762: /*@C
6763:    MatCreateSubMatricesMPI - Extracts MPI submatrices across a sub communicator of mat (by pairs of IS that may live on subcomms).

6765:    Collective on Mat

6767:    Input Parameters:
6768: +  mat - the matrix
6769: .  n   - the number of submatrixes to be extracted
6770: .  irow, icol - index sets of rows and columns to extract
6771: -  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

6773:    Output Parameter:
6774: .  submat - the array of submatrices

6776:    Level: advanced

6778:    Concepts: matrices^accessing submatrices
6779:    Concepts: submatrices

6781: .seealso: MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6782: @*/
6783: PetscErrorCode MatCreateSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6784: {
6786:   PetscInt       i;
6787:   PetscBool      eq;

6792:   if (n) {
6797:   }
6799:   if (n && scall == MAT_REUSE_MATRIX) {
6802:   }
6803:   if (!mat->ops->createsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6804:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6805:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6806:   MatCheckPreallocated(mat,1);

6808:   PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6809:   (*mat->ops->createsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6810:   PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6811:   for (i=0; i<n; i++) {
6812:     if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6813:       ISEqual(irow[i],icol[i],&eq);
6814:       if (eq) {
6815:         if (mat->symmetric) {
6816:           MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6817:         } else if (mat->hermitian) {
6818:           MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6819:         } else if (mat->structurally_symmetric) {
6820:           MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6821:         }
6822:       }
6823:     }
6824:   }
6825:   return(0);
6826: }

6828: /*@C
6829:    MatDestroyMatrices - Destroys an array of matrices.

6831:    Collective on Mat

6833:    Input Parameters:
6834: +  n - the number of local matrices
6835: -  mat - the matrices (note that this is a pointer to the array of matrices)

6837:    Level: advanced

6839:     Notes: Frees not only the matrices, but also the array that contains the matrices
6840:            In Fortran will not free the array.

6842: .seealso: MatCreateSubMatrices() MatDestroySubMatrices()
6843: @*/
6844: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6845: {
6847:   PetscInt       i;

6850:   if (!*mat) return(0);
6851:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);

6854:   for (i=0; i<n; i++) {
6855:     MatDestroy(&(*mat)[i]);
6856:   }

6858:   /* memory is allocated even if n = 0 */
6859:   PetscFree(*mat);
6860:   return(0);
6861: }

6863: /*@C
6864:    MatDestroySubMatrices - Destroys a set of matrices obtained with MatCreateSubMatrices().

6866:    Collective on Mat

6868:    Input Parameters:
6869: +  n - the number of local matrices
6870: -  mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6871:                        sequence of MatCreateSubMatrices())

6873:    Level: advanced

6875:     Notes: Frees not only the matrices, but also the array that contains the matrices
6876:            In Fortran will not free the array.

6878: .seealso: MatCreateSubMatrices()
6879: @*/
6880: PetscErrorCode MatDestroySubMatrices(PetscInt n,Mat *mat[])
6881: {

6885:   if (!*mat) return(0);
6886:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);

6889:   /* Destroy dummy submatrices (*mat)[n]...(*mat)[n+nstages-1] used for reuse struct Mat_SubSppt */
6890:   if ((*mat)[n]) {
6891:     PetscBool      isdummy;
6892:     PetscObjectTypeCompare((PetscObject)(*mat)[n],MATDUMMY,&isdummy);
6893:     if (isdummy) {
6894:       Mat_SubSppt* smat = (Mat_SubSppt*)((*mat)[n]->data); /* singleis and nstages are saved in (*mat)[n]->data */

6896:       if (smat && !smat->singleis) {
6897:         PetscInt i,nstages=smat->nstages;
6898:         for (i=0; i<nstages; i++) {
6899:           MatDestroy(&(*mat)[n+i]);
6900:         }
6901:       }
6902:     }
6903:   }

6905:   MatDestroyMatrices(n,mat);
6906:   return(0);
6907: }

6909: /*@C
6910:    MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.

6912:    Collective on Mat

6914:    Input Parameters:
6915: .  mat - the matrix

6917:    Output Parameter:
6918: .  matstruct - the sequential matrix with the nonzero structure of mat

6920:   Level: intermediate

6922: .seealso: MatDestroySeqNonzeroStructure(), MatCreateSubMatrices(), MatDestroyMatrices()
6923: @*/
6924: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6925: {


6933:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6934:   MatCheckPreallocated(mat,1);

6936:   if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6937:   PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6938:   (*mat->ops->getseqnonzerostructure)(mat,matstruct);
6939:   PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6940:   return(0);
6941: }

6943: /*@C
6944:    MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().

6946:    Collective on Mat

6948:    Input Parameters:
6949: .  mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
6950:                        sequence of MatGetSequentialNonzeroStructure())

6952:    Level: advanced

6954:     Notes: Frees not only the matrices, but also the array that contains the matrices

6956: .seealso: MatGetSeqNonzeroStructure()
6957: @*/
6958: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
6959: {

6964:   MatDestroy(mat);
6965:   return(0);
6966: }

6968: /*@
6969:    MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
6970:    replaces the index sets by larger ones that represent submatrices with
6971:    additional overlap.

6973:    Collective on Mat

6975:    Input Parameters:
6976: +  mat - the matrix
6977: .  n   - the number of index sets
6978: .  is  - the array of index sets (these index sets will changed during the call)
6979: -  ov  - the additional overlap requested

6981:    Options Database:
6982: .  -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)

6984:    Level: developer

6986:    Concepts: overlap
6987:    Concepts: ASM^computing overlap

6989: .seealso: MatCreateSubMatrices()
6990: @*/
6991: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
6992: {

6998:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
6999:   if (n) {
7002:   }
7003:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7004:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7005:   MatCheckPreallocated(mat,1);

7007:   if (!ov) return(0);
7008:   if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7009:   PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7010:   (*mat->ops->increaseoverlap)(mat,n,is,ov);
7011:   PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7012:   return(0);
7013: }


7016: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat,IS*,PetscInt);

7018: /*@
7019:    MatIncreaseOverlapSplit - Given a set of submatrices indicated by index sets across
7020:    a sub communicator, replaces the index sets by larger ones that represent submatrices with
7021:    additional overlap.

7023:    Collective on Mat

7025:    Input Parameters:
7026: +  mat - the matrix
7027: .  n   - the number of index sets
7028: .  is  - the array of index sets (these index sets will changed during the call)
7029: -  ov  - the additional overlap requested

7031:    Options Database:
7032: .  -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)

7034:    Level: developer

7036:    Concepts: overlap
7037:    Concepts: ASM^computing overlap

7039: .seealso: MatCreateSubMatrices()
7040: @*/
7041: PetscErrorCode MatIncreaseOverlapSplit(Mat mat,PetscInt n,IS is[],PetscInt ov)
7042: {
7043:   PetscInt       i;

7049:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7050:   if (n) {
7053:   }
7054:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7055:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7056:   MatCheckPreallocated(mat,1);
7057:   if (!ov) return(0);
7058:   PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7059:   for(i=0; i<n; i++){
7060:          MatIncreaseOverlapSplit_Single(mat,&is[i],ov);
7061:   }
7062:   PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7063:   return(0);
7064: }




7069: /*@
7070:    MatGetBlockSize - Returns the matrix block size.

7072:    Not Collective

7074:    Input Parameter:
7075: .  mat - the matrix

7077:    Output Parameter:
7078: .  bs - block size

7080:    Notes:
7081:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.

7083:    If the block size has not been set yet this routine returns 1.

7085:    Level: intermediate

7087:    Concepts: matrices^block size

7089: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
7090: @*/
7091: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
7092: {
7096:   *bs = PetscAbs(mat->rmap->bs);
7097:   return(0);
7098: }

7100: /*@
7101:    MatGetBlockSizes - Returns the matrix block row and column sizes.

7103:    Not Collective

7105:    Input Parameter:
7106: .  mat - the matrix

7108:    Output Parameter:
7109: .  rbs - row block size
7110: .  cbs - column block size

7112:    Notes:
7113:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7114:     If you pass a different block size for the columns than the rows, the row block size determines the square block storage.

7116:    If a block size has not been set yet this routine returns 1.

7118:    Level: intermediate

7120:    Concepts: matrices^block size

7122: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
7123: @*/
7124: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
7125: {
7130:   if (rbs) *rbs = PetscAbs(mat->rmap->bs);
7131:   if (cbs) *cbs = PetscAbs(mat->cmap->bs);
7132:   return(0);
7133: }

7135: /*@
7136:    MatSetBlockSize - Sets the matrix block size.

7138:    Logically Collective on Mat

7140:    Input Parameters:
7141: +  mat - the matrix
7142: -  bs - block size

7144:    Notes:
7145:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7146:     This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later.

7148:     For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block size
7149:     is compatible with the matrix local sizes.

7151:    Level: intermediate

7153:    Concepts: matrices^block size

7155: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
7156: @*/
7157: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
7158: {

7164:   MatSetBlockSizes(mat,bs,bs);
7165:   return(0);
7166: }

7168: /*@
7169:    MatSetBlockSizes - Sets the matrix block row and column sizes.

7171:    Logically Collective on Mat

7173:    Input Parameters:
7174: +  mat - the matrix
7175: -  rbs - row block size
7176: -  cbs - column block size

7178:    Notes:
7179:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7180:     If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7181:     This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later

7183:     For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block sizes
7184:     are compatible with the matrix local sizes.

7186:     The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().

7188:    Level: intermediate

7190:    Concepts: matrices^block size

7192: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
7193: @*/
7194: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
7195: {

7202:   if (mat->ops->setblocksizes) {
7203:     (*mat->ops->setblocksizes)(mat,rbs,cbs);
7204:   }
7205:   if (mat->rmap->refcnt) {
7206:     ISLocalToGlobalMapping l2g = NULL;
7207:     PetscLayout            nmap = NULL;

7209:     PetscLayoutDuplicate(mat->rmap,&nmap);
7210:     if (mat->rmap->mapping) {
7211:       ISLocalToGlobalMappingDuplicate(mat->rmap->mapping,&l2g);
7212:     }
7213:     PetscLayoutDestroy(&mat->rmap);
7214:     mat->rmap = nmap;
7215:     mat->rmap->mapping = l2g;
7216:   }
7217:   if (mat->cmap->refcnt) {
7218:     ISLocalToGlobalMapping l2g = NULL;
7219:     PetscLayout            nmap = NULL;

7221:     PetscLayoutDuplicate(mat->cmap,&nmap);
7222:     if (mat->cmap->mapping) {
7223:       ISLocalToGlobalMappingDuplicate(mat->cmap->mapping,&l2g);
7224:     }
7225:     PetscLayoutDestroy(&mat->cmap);
7226:     mat->cmap = nmap;
7227:     mat->cmap->mapping = l2g;
7228:   }
7229:   PetscLayoutSetBlockSize(mat->rmap,rbs);
7230:   PetscLayoutSetBlockSize(mat->cmap,cbs);
7231:   return(0);
7232: }

7234: /*@
7235:    MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices

7237:    Logically Collective on Mat

7239:    Input Parameters:
7240: +  mat - the matrix
7241: .  fromRow - matrix from which to copy row block size
7242: -  fromCol - matrix from which to copy column block size (can be same as fromRow)

7244:    Level: developer

7246:    Concepts: matrices^block size

7248: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
7249: @*/
7250: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
7251: {

7258:   if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
7259:   if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
7260:   return(0);
7261: }

7263: /*@
7264:    MatResidual - Default routine to calculate the residual.

7266:    Collective on Mat and Vec

7268:    Input Parameters:
7269: +  mat - the matrix
7270: .  b   - the right-hand-side
7271: -  x   - the approximate solution

7273:    Output Parameter:
7274: .  r - location to store the residual

7276:    Level: developer

7278: .keywords: MG, default, multigrid, residual

7280: .seealso: PCMGSetResidual()
7281: @*/
7282: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7283: {

7292:   MatCheckPreallocated(mat,1);
7293:   PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7294:   if (!mat->ops->residual) {
7295:     MatMult(mat,x,r);
7296:     VecAYPX(r,-1.0,b);
7297:   } else {
7298:     (*mat->ops->residual)(mat,b,x,r);
7299:   }
7300:   PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7301:   return(0);
7302: }

7304: /*@C
7305:     MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.

7307:    Collective on Mat

7309:     Input Parameters:
7310: +   mat - the matrix
7311: .   shift -  0 or 1 indicating we want the indices starting at 0 or 1
7312: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be   symmetrized
7313: -   inodecompressed - PETSC_TRUE or PETSC_FALSE  indicating if the nonzero structure of the
7314:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7315:                  always used.

7317:     Output Parameters:
7318: +   n - number of rows in the (possibly compressed) matrix
7319: .   ia - the row pointers [of length n+1]
7320: .   ja - the column indices
7321: -   done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7322:            are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set

7324:     Level: developer

7326:     Notes: You CANNOT change any of the ia[] or ja[] values.

7328:            Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values

7330:     Fortran Node

7332:            In Fortran use
7333: $           PetscInt ia(1), ja(1)
7334: $           PetscOffset iia, jja
7335: $      call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7336: $      Acess the ith and jth entries via ia(iia + i) and ja(jja + j)
7337: $
7338: $          or
7339: $
7340: $           PetscInt, pointer :: ia(:),ja(:)
7341: $    call  MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7342: $      Acess the ith and jth entries via ia(i) and ja(j)



7346: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7347: @*/
7348: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7349: {

7359:   MatCheckPreallocated(mat,1);
7360:   if (!mat->ops->getrowij) *done = PETSC_FALSE;
7361:   else {
7362:     *done = PETSC_TRUE;
7363:     PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7364:     (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7365:     PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7366:   }
7367:   return(0);
7368: }

7370: /*@C
7371:     MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.

7373:     Collective on Mat

7375:     Input Parameters:
7376: +   mat - the matrix
7377: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7378: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7379:                 symmetrized
7380: .   inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7381:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7382:                  always used.
7383: .   n - number of columns in the (possibly compressed) matrix
7384: .   ia - the column pointers
7385: -   ja - the row indices

7387:     Output Parameters:
7388: .   done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned

7390:     Note:
7391:     This routine zeros out n, ia, and ja. This is to prevent accidental
7392:     us of the array after it has been restored. If you pass NULL, it will
7393:     not zero the pointers.  Use of ia or ja after MatRestoreColumnIJ() is invalid.

7395:     Level: developer

7397: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7398: @*/
7399: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7400: {

7410:   MatCheckPreallocated(mat,1);
7411:   if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7412:   else {
7413:     *done = PETSC_TRUE;
7414:     (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7415:   }
7416:   return(0);
7417: }

7419: /*@C
7420:     MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7421:     MatGetRowIJ().

7423:     Collective on Mat

7425:     Input Parameters:
7426: +   mat - the matrix
7427: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7428: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7429:                 symmetrized
7430: .   inodecompressed -  PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7431:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7432:                  always used.
7433: .   n - size of (possibly compressed) matrix
7434: .   ia - the row pointers
7435: -   ja - the column indices

7437:     Output Parameters:
7438: .   done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned

7440:     Note:
7441:     This routine zeros out n, ia, and ja. This is to prevent accidental
7442:     us of the array after it has been restored. If you pass NULL, it will
7443:     not zero the pointers.  Use of ia or ja after MatRestoreRowIJ() is invalid.

7445:     Level: developer

7447: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7448: @*/
7449: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7450: {

7459:   MatCheckPreallocated(mat,1);

7461:   if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7462:   else {
7463:     *done = PETSC_TRUE;
7464:     (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7465:     if (n)  *n = 0;
7466:     if (ia) *ia = NULL;
7467:     if (ja) *ja = NULL;
7468:   }
7469:   return(0);
7470: }

7472: /*@C
7473:     MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7474:     MatGetColumnIJ().

7476:     Collective on Mat

7478:     Input Parameters:
7479: +   mat - the matrix
7480: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7481: -   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7482:                 symmetrized
7483: -   inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7484:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7485:                  always used.

7487:     Output Parameters:
7488: +   n - size of (possibly compressed) matrix
7489: .   ia - the column pointers
7490: .   ja - the row indices
7491: -   done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned

7493:     Level: developer

7495: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7496: @*/
7497: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7498: {

7507:   MatCheckPreallocated(mat,1);

7509:   if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7510:   else {
7511:     *done = PETSC_TRUE;
7512:     (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7513:     if (n)  *n = 0;
7514:     if (ia) *ia = NULL;
7515:     if (ja) *ja = NULL;
7516:   }
7517:   return(0);
7518: }

7520: /*@C
7521:     MatColoringPatch -Used inside matrix coloring routines that
7522:     use MatGetRowIJ() and/or MatGetColumnIJ().

7524:     Collective on Mat

7526:     Input Parameters:
7527: +   mat - the matrix
7528: .   ncolors - max color value
7529: .   n   - number of entries in colorarray
7530: -   colorarray - array indicating color for each column

7532:     Output Parameters:
7533: .   iscoloring - coloring generated using colorarray information

7535:     Level: developer

7537: .seealso: MatGetRowIJ(), MatGetColumnIJ()

7539: @*/
7540: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7541: {

7549:   MatCheckPreallocated(mat,1);

7551:   if (!mat->ops->coloringpatch) {
7552:     ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7553:   } else {
7554:     (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7555:   }
7556:   return(0);
7557: }


7560: /*@
7561:    MatSetUnfactored - Resets a factored matrix to be treated as unfactored.

7563:    Logically Collective on Mat

7565:    Input Parameter:
7566: .  mat - the factored matrix to be reset

7568:    Notes:
7569:    This routine should be used only with factored matrices formed by in-place
7570:    factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7571:    format).  This option can save memory, for example, when solving nonlinear
7572:    systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7573:    ILU(0) preconditioner.

7575:    Note that one can specify in-place ILU(0) factorization by calling
7576: .vb
7577:      PCType(pc,PCILU);
7578:      PCFactorSeUseInPlace(pc);
7579: .ve
7580:    or by using the options -pc_type ilu -pc_factor_in_place

7582:    In-place factorization ILU(0) can also be used as a local
7583:    solver for the blocks within the block Jacobi or additive Schwarz
7584:    methods (runtime option: -sub_pc_factor_in_place).  See Users-Manual: ch_pc
7585:    for details on setting local solver options.

7587:    Most users should employ the simplified KSP interface for linear solvers
7588:    instead of working directly with matrix algebra routines such as this.
7589:    See, e.g., KSPCreate().

7591:    Level: developer

7593: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()

7595:    Concepts: matrices^unfactored

7597: @*/
7598: PetscErrorCode MatSetUnfactored(Mat mat)
7599: {

7605:   MatCheckPreallocated(mat,1);
7606:   mat->factortype = MAT_FACTOR_NONE;
7607:   if (!mat->ops->setunfactored) return(0);
7608:   (*mat->ops->setunfactored)(mat);
7609:   return(0);
7610: }

7612: /*MC
7613:     MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.

7615:     Synopsis:
7616:     MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)

7618:     Not collective

7620:     Input Parameter:
7621: .   x - matrix

7623:     Output Parameters:
7624: +   xx_v - the Fortran90 pointer to the array
7625: -   ierr - error code

7627:     Example of Usage:
7628: .vb
7629:       PetscScalar, pointer xx_v(:,:)
7630:       ....
7631:       call MatDenseGetArrayF90(x,xx_v,ierr)
7632:       a = xx_v(3)
7633:       call MatDenseRestoreArrayF90(x,xx_v,ierr)
7634: .ve

7636:     Level: advanced

7638: .seealso:  MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()

7640:     Concepts: matrices^accessing array

7642: M*/

7644: /*MC
7645:     MatDenseRestoreArrayF90 - Restores a matrix array that has been
7646:     accessed with MatDenseGetArrayF90().

7648:     Synopsis:
7649:     MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)

7651:     Not collective

7653:     Input Parameters:
7654: +   x - matrix
7655: -   xx_v - the Fortran90 pointer to the array

7657:     Output Parameter:
7658: .   ierr - error code

7660:     Example of Usage:
7661: .vb
7662:        PetscScalar, pointer xx_v(:,:)
7663:        ....
7664:        call MatDenseGetArrayF90(x,xx_v,ierr)
7665:        a = xx_v(3)
7666:        call MatDenseRestoreArrayF90(x,xx_v,ierr)
7667: .ve

7669:     Level: advanced

7671: .seealso:  MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()

7673: M*/


7676: /*MC
7677:     MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.

7679:     Synopsis:
7680:     MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)

7682:     Not collective

7684:     Input Parameter:
7685: .   x - matrix

7687:     Output Parameters:
7688: +   xx_v - the Fortran90 pointer to the array
7689: -   ierr - error code

7691:     Example of Usage:
7692: .vb
7693:       PetscScalar, pointer xx_v(:)
7694:       ....
7695:       call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7696:       a = xx_v(3)
7697:       call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7698: .ve

7700:     Level: advanced

7702: .seealso:  MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()

7704:     Concepts: matrices^accessing array

7706: M*/

7708: /*MC
7709:     MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7710:     accessed with MatSeqAIJGetArrayF90().

7712:     Synopsis:
7713:     MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)

7715:     Not collective

7717:     Input Parameters:
7718: +   x - matrix
7719: -   xx_v - the Fortran90 pointer to the array

7721:     Output Parameter:
7722: .   ierr - error code

7724:     Example of Usage:
7725: .vb
7726:        PetscScalar, pointer xx_v(:)
7727:        ....
7728:        call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7729:        a = xx_v(3)
7730:        call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7731: .ve

7733:     Level: advanced

7735: .seealso:  MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()

7737: M*/


7740: /*@
7741:     MatCreateSubMatrix - Gets a single submatrix on the same number of processors
7742:                       as the original matrix.

7744:     Collective on Mat

7746:     Input Parameters:
7747: +   mat - the original matrix
7748: .   isrow - parallel IS containing the rows this processor should obtain
7749: .   iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7750: -   cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

7752:     Output Parameter:
7753: .   newmat - the new submatrix, of the same type as the old

7755:     Level: advanced

7757:     Notes:
7758:     The submatrix will be able to be multiplied with vectors using the same layout as iscol.

7760:     Some matrix types place restrictions on the row and column indices, such
7761:     as that they be sorted or that they be equal to each other.

7763:     The index sets may not have duplicate entries.

7765:       The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7766:    the MatCreateSubMatrix() routine will create the newmat for you. Any additional calls
7767:    to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7768:    will reuse the matrix generated the first time.  You should call MatDestroy() on newmat when
7769:    you are finished using it.

7771:     The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7772:     the input matrix.

7774:     If iscol is NULL then all columns are obtained (not supported in Fortran).

7776:    Example usage:
7777:    Consider the following 8x8 matrix with 34 non-zero values, that is
7778:    assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7779:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7780:    as follows:

7782: .vb
7783:             1  2  0  |  0  3  0  |  0  4
7784:     Proc0   0  5  6  |  7  0  0  |  8  0
7785:             9  0 10  | 11  0  0  | 12  0
7786:     -------------------------------------
7787:            13  0 14  | 15 16 17  |  0  0
7788:     Proc1   0 18  0  | 19 20 21  |  0  0
7789:             0  0  0  | 22 23  0  | 24  0
7790:     -------------------------------------
7791:     Proc2  25 26 27  |  0  0 28  | 29  0
7792:            30  0  0  | 31 32 33  |  0 34
7793: .ve

7795:     Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6].  The resulting submatrix is

7797: .vb
7798:             2  0  |  0  3  0  |  0
7799:     Proc0   5  6  |  7  0  0  |  8
7800:     -------------------------------
7801:     Proc1  18  0  | 19 20 21  |  0
7802:     -------------------------------
7803:     Proc2  26 27  |  0  0 28  | 29
7804:             0  0  | 31 32 33  |  0
7805: .ve


7808:     Concepts: matrices^submatrices

7810: .seealso: MatCreateSubMatrices()
7811: @*/
7812: PetscErrorCode MatCreateSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7813: {
7815:   PetscMPIInt    size;
7816:   Mat            *local;
7817:   IS             iscoltmp;

7826:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7827:   if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");

7829:   MatCheckPreallocated(mat,1);
7830:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);

7832:   if (!iscol || isrow == iscol) {
7833:     PetscBool   stride;
7834:     PetscMPIInt grabentirematrix = 0,grab;
7835:     PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7836:     if (stride) {
7837:       PetscInt first,step,n,rstart,rend;
7838:       ISStrideGetInfo(isrow,&first,&step);
7839:       if (step == 1) {
7840:         MatGetOwnershipRange(mat,&rstart,&rend);
7841:         if (rstart == first) {
7842:           ISGetLocalSize(isrow,&n);
7843:           if (n == rend-rstart) {
7844:             grabentirematrix = 1;
7845:           }
7846:         }
7847:       }
7848:     }
7849:     MPIU_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7850:     if (grab) {
7851:       PetscInfo(mat,"Getting entire matrix as submatrix\n");
7852:       if (cll == MAT_INITIAL_MATRIX) {
7853:         *newmat = mat;
7854:         PetscObjectReference((PetscObject)mat);
7855:       }
7856:       return(0);
7857:     }
7858:   }

7860:   if (!iscol) {
7861:     ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7862:   } else {
7863:     iscoltmp = iscol;
7864:   }

7866:   /* if original matrix is on just one processor then use submatrix generated */
7867:   if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7868:     MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7869:     if (!iscol) {ISDestroy(&iscoltmp);}
7870:     return(0);
7871:   } else if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1) {
7872:     MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7873:     *newmat = *local;
7874:     PetscFree(local);
7875:     if (!iscol) {ISDestroy(&iscoltmp);}
7876:     return(0);
7877:   } else if (!mat->ops->createsubmatrix) {
7878:     /* Create a new matrix type that implements the operation using the full matrix */
7879:     PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7880:     switch (cll) {
7881:     case MAT_INITIAL_MATRIX:
7882:       MatCreateSubMatrixVirtual(mat,isrow,iscoltmp,newmat);
7883:       break;
7884:     case MAT_REUSE_MATRIX:
7885:       MatSubMatrixVirtualUpdate(*newmat,mat,isrow,iscoltmp);
7886:       break;
7887:     default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7888:     }
7889:     PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7890:     if (!iscol) {ISDestroy(&iscoltmp);}
7891:     return(0);
7892:   }

7894:   if (!mat->ops->createsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7895:   PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7896:   (*mat->ops->createsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7897:   PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7898:   if (!iscol) {ISDestroy(&iscoltmp);}
7899:   if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
7900:   return(0);
7901: }

7903: /*@
7904:    MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7905:    used during the assembly process to store values that belong to
7906:    other processors.

7908:    Not Collective

7910:    Input Parameters:
7911: +  mat   - the matrix
7912: .  size  - the initial size of the stash.
7913: -  bsize - the initial size of the block-stash(if used).

7915:    Options Database Keys:
7916: +   -matstash_initial_size <size> or <size0,size1,...sizep-1>
7917: -   -matstash_block_initial_size <bsize>  or <bsize0,bsize1,...bsizep-1>

7919:    Level: intermediate

7921:    Notes:
7922:      The block-stash is used for values set with MatSetValuesBlocked() while
7923:      the stash is used for values set with MatSetValues()

7925:      Run with the option -info and look for output of the form
7926:      MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7927:      to determine the appropriate value, MM, to use for size and
7928:      MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7929:      to determine the value, BMM to use for bsize

7931:    Concepts: stash^setting matrix size
7932:    Concepts: matrices^stash

7934: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()

7936: @*/
7937: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7938: {

7944:   MatStashSetInitialSize_Private(&mat->stash,size);
7945:   MatStashSetInitialSize_Private(&mat->bstash,bsize);
7946:   return(0);
7947: }

7949: /*@
7950:    MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
7951:      the matrix

7953:    Neighbor-wise Collective on Mat

7955:    Input Parameters:
7956: +  mat   - the matrix
7957: .  x,y - the vectors
7958: -  w - where the result is stored

7960:    Level: intermediate

7962:    Notes:
7963:     w may be the same vector as y.

7965:     This allows one to use either the restriction or interpolation (its transpose)
7966:     matrix to do the interpolation

7968:     Concepts: interpolation

7970: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()

7972: @*/
7973: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
7974: {
7976:   PetscInt       M,N,Ny;

7984:   MatCheckPreallocated(A,1);
7985:   MatGetSize(A,&M,&N);
7986:   VecGetSize(y,&Ny);
7987:   if (M == Ny) {
7988:     MatMultAdd(A,x,y,w);
7989:   } else {
7990:     MatMultTransposeAdd(A,x,y,w);
7991:   }
7992:   return(0);
7993: }

7995: /*@
7996:    MatInterpolate - y = A*x or A'*x depending on the shape of
7997:      the matrix

7999:    Neighbor-wise Collective on Mat

8001:    Input Parameters:
8002: +  mat   - the matrix
8003: -  x,y - the vectors

8005:    Level: intermediate

8007:    Notes:
8008:     This allows one to use either the restriction or interpolation (its transpose)
8009:     matrix to do the interpolation

8011:    Concepts: matrices^interpolation

8013: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()

8015: @*/
8016: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
8017: {
8019:   PetscInt       M,N,Ny;

8026:   MatCheckPreallocated(A,1);
8027:   MatGetSize(A,&M,&N);
8028:   VecGetSize(y,&Ny);
8029:   if (M == Ny) {
8030:     MatMult(A,x,y);
8031:   } else {
8032:     MatMultTranspose(A,x,y);
8033:   }
8034:   return(0);
8035: }

8037: /*@
8038:    MatRestrict - y = A*x or A'*x

8040:    Neighbor-wise Collective on Mat

8042:    Input Parameters:
8043: +  mat   - the matrix
8044: -  x,y - the vectors

8046:    Level: intermediate

8048:    Notes:
8049:     This allows one to use either the restriction or interpolation (its transpose)
8050:     matrix to do the restriction

8052:    Concepts: matrices^restriction

8054: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()

8056: @*/
8057: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
8058: {
8060:   PetscInt       M,N,Ny;

8067:   MatCheckPreallocated(A,1);

8069:   MatGetSize(A,&M,&N);
8070:   VecGetSize(y,&Ny);
8071:   if (M == Ny) {
8072:     MatMult(A,x,y);
8073:   } else {
8074:     MatMultTranspose(A,x,y);
8075:   }
8076:   return(0);
8077: }

8079: /*@
8080:    MatGetNullSpace - retrieves the null space to a matrix.

8082:    Logically Collective on Mat and MatNullSpace

8084:    Input Parameters:
8085: +  mat - the matrix
8086: -  nullsp - the null space object

8088:    Level: developer

8090:    Concepts: null space^attaching to matrix

8092: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
8093: @*/
8094: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
8095: {
8100:   *nullsp = mat->nullsp;
8101:   return(0);
8102: }

8104: /*@
8105:    MatSetNullSpace - attaches a null space to a matrix.

8107:    Logically Collective on Mat and MatNullSpace

8109:    Input Parameters:
8110: +  mat - the matrix
8111: -  nullsp - the null space object

8113:    Level: advanced

8115:    Notes:
8116:       This null space is used by the linear solvers. Overwrites any previous null space that may have been attached

8118:       For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
8119:       call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.

8121:       You can remove the null space by calling this routine with an nullsp of NULL


8124:       The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8125:    the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8126:    Similarly R^m = direct sum n(A^T) + R(A).  Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8127:    n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8128:    the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).

8130:       Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().

8132:     If the matrix is known to be symmetric because it is an SBAIJ matrix or one as called MatSetOption(mat,MAT_SYMMETRIC or MAT_SYMMETRIC_ETERNAL,PETSC_TRUE); this
8133:     routine also automatically calls MatSetTransposeNullSpace().

8135:    Concepts: null space^attaching to matrix

8137: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8138: @*/
8139: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
8140: {

8147:   MatCheckPreallocated(mat,1);
8148:   if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8149:   MatNullSpaceDestroy(&mat->nullsp);
8150:   mat->nullsp = nullsp;
8151:   if (mat->symmetric_set && mat->symmetric) {
8152:     MatSetTransposeNullSpace(mat,nullsp);
8153:   }
8154:   return(0);
8155: }

8157: /*@
8158:    MatGetTransposeNullSpace - retrieves the null space of the transpose of a matrix.

8160:    Logically Collective on Mat and MatNullSpace

8162:    Input Parameters:
8163: +  mat - the matrix
8164: -  nullsp - the null space object

8166:    Level: developer

8168:    Concepts: null space^attaching to matrix

8170: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetTransposeNullSpace(), MatSetNullSpace(), MatGetNullSpace()
8171: @*/
8172: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
8173: {
8178:   *nullsp = mat->transnullsp;
8179:   return(0);
8180: }

8182: /*@
8183:    MatSetTransposeNullSpace - attaches a null space to a matrix.

8185:    Logically Collective on Mat and MatNullSpace

8187:    Input Parameters:
8188: +  mat - the matrix
8189: -  nullsp - the null space object

8191:    Level: advanced

8193:    Notes:
8194:       For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
8195:       You must also call MatSetNullSpace()


8198:       The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8199:    the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8200:    Similarly R^m = direct sum n(A^T) + R(A).  Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8201:    n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8202:    the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).

8204:       Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().

8206:    Concepts: null space^attaching to matrix

8208: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8209: @*/
8210: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
8211: {

8218:   MatCheckPreallocated(mat,1);
8219:   PetscObjectReference((PetscObject)nullsp);
8220:   MatNullSpaceDestroy(&mat->transnullsp);
8221:   mat->transnullsp = nullsp;
8222:   return(0);
8223: }

8225: /*@
8226:    MatSetNearNullSpace - attaches a null space to a matrix, which is often the null space (rigid body modes) of the operator without boundary conditions
8227:         This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.

8229:    Logically Collective on Mat and MatNullSpace

8231:    Input Parameters:
8232: +  mat - the matrix
8233: -  nullsp - the null space object

8235:    Level: advanced

8237:    Notes:
8238:       Overwrites any previous near null space that may have been attached

8240:       You can remove the null space by calling this routine with an nullsp of NULL

8242:    Concepts: null space^attaching to matrix

8244: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace(), MatNullSpaceCreateRigidBody(), MatGetNearNullSpace()
8245: @*/
8246: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8247: {

8254:   MatCheckPreallocated(mat,1);
8255:   if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8256:   MatNullSpaceDestroy(&mat->nearnullsp);
8257:   mat->nearnullsp = nullsp;
8258:   return(0);
8259: }

8261: /*@
8262:    MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()

8264:    Not Collective

8266:    Input Parameters:
8267: .  mat - the matrix

8269:    Output Parameters:
8270: .  nullsp - the null space object, NULL if not set

8272:    Level: developer

8274:    Concepts: null space^attaching to matrix

8276: .seealso: MatSetNearNullSpace(), MatGetNullSpace(), MatNullSpaceCreate()
8277: @*/
8278: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8279: {
8284:   MatCheckPreallocated(mat,1);
8285:   *nullsp = mat->nearnullsp;
8286:   return(0);
8287: }

8289: /*@C
8290:    MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.

8292:    Collective on Mat

8294:    Input Parameters:
8295: +  mat - the matrix
8296: .  row - row/column permutation
8297: .  fill - expected fill factor >= 1.0
8298: -  level - level of fill, for ICC(k)

8300:    Notes:
8301:    Probably really in-place only when level of fill is zero, otherwise allocates
8302:    new space to store factored matrix and deletes previous memory.

8304:    Most users should employ the simplified KSP interface for linear solvers
8305:    instead of working directly with matrix algebra routines such as this.
8306:    See, e.g., KSPCreate().

8308:    Level: developer

8310:    Concepts: matrices^incomplete Cholesky factorization
8311:    Concepts: Cholesky factorization

8313: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()

8315:     Developer Note: fortran interface is not autogenerated as the f90
8316:     interface defintion cannot be generated correctly [due to MatFactorInfo]

8318: @*/
8319: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8320: {

8328:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8329:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8330:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8331:   if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8332:   MatCheckPreallocated(mat,1);
8333:   (*mat->ops->iccfactor)(mat,row,info);
8334:   PetscObjectStateIncrease((PetscObject)mat);
8335:   return(0);
8336: }

8338: /*@
8339:    MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8340:          ghosted ones.

8342:    Not Collective

8344:    Input Parameters:
8345: +  mat - the matrix
8346: -  diag = the diagonal values, including ghost ones

8348:    Level: developer

8350:    Notes: Works only for MPIAIJ and MPIBAIJ matrices

8352: .seealso: MatDiagonalScale()
8353: @*/
8354: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8355: {
8357:   PetscMPIInt    size;


8364:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8365:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8366:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8367:   if (size == 1) {
8368:     PetscInt n,m;
8369:     VecGetSize(diag,&n);
8370:     MatGetSize(mat,0,&m);
8371:     if (m == n) {
8372:       MatDiagonalScale(mat,0,diag);
8373:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8374:   } else {
8375:     PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8376:   }
8377:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8378:   PetscObjectStateIncrease((PetscObject)mat);
8379:   return(0);
8380: }

8382: /*@
8383:    MatGetInertia - Gets the inertia from a factored matrix

8385:    Collective on Mat

8387:    Input Parameter:
8388: .  mat - the matrix

8390:    Output Parameters:
8391: +   nneg - number of negative eigenvalues
8392: .   nzero - number of zero eigenvalues
8393: -   npos - number of positive eigenvalues

8395:    Level: advanced

8397:    Notes: Matrix must have been factored by MatCholeskyFactor()


8400: @*/
8401: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8402: {

8408:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8409:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8410:   if (!mat->ops->getinertia) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8411:   (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8412:   return(0);
8413: }

8415: /* ----------------------------------------------------------------*/
8416: /*@C
8417:    MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors

8419:    Neighbor-wise Collective on Mat and Vecs

8421:    Input Parameters:
8422: +  mat - the factored matrix
8423: -  b - the right-hand-side vectors

8425:    Output Parameter:
8426: .  x - the result vectors

8428:    Notes:
8429:    The vectors b and x cannot be the same.  I.e., one cannot
8430:    call MatSolves(A,x,x).

8432:    Notes:
8433:    Most users should employ the simplified KSP interface for linear solvers
8434:    instead of working directly with matrix algebra routines such as this.
8435:    See, e.g., KSPCreate().

8437:    Level: developer

8439:    Concepts: matrices^triangular solves

8441: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8442: @*/
8443: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8444: {

8450:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8451:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8452:   if (!mat->rmap->N && !mat->cmap->N) return(0);

8454:   if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8455:   MatCheckPreallocated(mat,1);
8456:   PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8457:   (*mat->ops->solves)(mat,b,x);
8458:   PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8459:   return(0);
8460: }

8462: /*@
8463:    MatIsSymmetric - Test whether a matrix is symmetric

8465:    Collective on Mat

8467:    Input Parameter:
8468: +  A - the matrix to test
8469: -  tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)

8471:    Output Parameters:
8472: .  flg - the result

8474:    Notes: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results

8476:    Level: intermediate

8478:    Concepts: matrix^symmetry

8480: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8481: @*/
8482: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool  *flg)
8483: {


8490:   if (!A->symmetric_set) {
8491:     if (!A->ops->issymmetric) {
8492:       MatType mattype;
8493:       MatGetType(A,&mattype);
8494:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8495:     }
8496:     (*A->ops->issymmetric)(A,tol,flg);
8497:     if (!tol) {
8498:       A->symmetric_set = PETSC_TRUE;
8499:       A->symmetric     = *flg;
8500:       if (A->symmetric) {
8501:         A->structurally_symmetric_set = PETSC_TRUE;
8502:         A->structurally_symmetric     = PETSC_TRUE;
8503:       }
8504:     }
8505:   } else if (A->symmetric) {
8506:     *flg = PETSC_TRUE;
8507:   } else if (!tol) {
8508:     *flg = PETSC_FALSE;
8509:   } else {
8510:     if (!A->ops->issymmetric) {
8511:       MatType mattype;
8512:       MatGetType(A,&mattype);
8513:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8514:     }
8515:     (*A->ops->issymmetric)(A,tol,flg);
8516:   }
8517:   return(0);
8518: }

8520: /*@
8521:    MatIsHermitian - Test whether a matrix is Hermitian

8523:    Collective on Mat

8525:    Input Parameter:
8526: +  A - the matrix to test
8527: -  tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)

8529:    Output Parameters:
8530: .  flg - the result

8532:    Level: intermediate

8534:    Concepts: matrix^symmetry

8536: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8537:           MatIsSymmetricKnown(), MatIsSymmetric()
8538: @*/
8539: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool  *flg)
8540: {


8547:   if (!A->hermitian_set) {
8548:     if (!A->ops->ishermitian) {
8549:       MatType mattype;
8550:       MatGetType(A,&mattype);
8551:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8552:     }
8553:     (*A->ops->ishermitian)(A,tol,flg);
8554:     if (!tol) {
8555:       A->hermitian_set = PETSC_TRUE;
8556:       A->hermitian     = *flg;
8557:       if (A->hermitian) {
8558:         A->structurally_symmetric_set = PETSC_TRUE;
8559:         A->structurally_symmetric     = PETSC_TRUE;
8560:       }
8561:     }
8562:   } else if (A->hermitian) {
8563:     *flg = PETSC_TRUE;
8564:   } else if (!tol) {
8565:     *flg = PETSC_FALSE;
8566:   } else {
8567:     if (!A->ops->ishermitian) {
8568:       MatType mattype;
8569:       MatGetType(A,&mattype);
8570:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8571:     }
8572:     (*A->ops->ishermitian)(A,tol,flg);
8573:   }
8574:   return(0);
8575: }

8577: /*@
8578:    MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.

8580:    Not Collective

8582:    Input Parameter:
8583: .  A - the matrix to check

8585:    Output Parameters:
8586: +  set - if the symmetric flag is set (this tells you if the next flag is valid)
8587: -  flg - the result

8589:    Level: advanced

8591:    Concepts: matrix^symmetry

8593:    Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8594:          if you want it explicitly checked

8596: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8597: @*/
8598: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool  *set,PetscBool  *flg)
8599: {
8604:   if (A->symmetric_set) {
8605:     *set = PETSC_TRUE;
8606:     *flg = A->symmetric;
8607:   } else {
8608:     *set = PETSC_FALSE;
8609:   }
8610:   return(0);
8611: }

8613: /*@
8614:    MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.

8616:    Not Collective

8618:    Input Parameter:
8619: .  A - the matrix to check

8621:    Output Parameters:
8622: +  set - if the hermitian flag is set (this tells you if the next flag is valid)
8623: -  flg - the result

8625:    Level: advanced

8627:    Concepts: matrix^symmetry

8629:    Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8630:          if you want it explicitly checked

8632: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8633: @*/
8634: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool  *set,PetscBool  *flg)
8635: {
8640:   if (A->hermitian_set) {
8641:     *set = PETSC_TRUE;
8642:     *flg = A->hermitian;
8643:   } else {
8644:     *set = PETSC_FALSE;
8645:   }
8646:   return(0);
8647: }

8649: /*@
8650:    MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric

8652:    Collective on Mat

8654:    Input Parameter:
8655: .  A - the matrix to test

8657:    Output Parameters:
8658: .  flg - the result

8660:    Level: intermediate

8662:    Concepts: matrix^symmetry

8664: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8665: @*/
8666: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool  *flg)
8667: {

8673:   if (!A->structurally_symmetric_set) {
8674:     if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8675:     (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);

8677:     A->structurally_symmetric_set = PETSC_TRUE;
8678:   }
8679:   *flg = A->structurally_symmetric;
8680:   return(0);
8681: }

8683: /*@
8684:    MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8685:        to be communicated to other processors during the MatAssemblyBegin/End() process

8687:     Not collective

8689:    Input Parameter:
8690: .   vec - the vector

8692:    Output Parameters:
8693: +   nstash   - the size of the stash
8694: .   reallocs - the number of additional mallocs incurred.
8695: .   bnstash   - the size of the block stash
8696: -   breallocs - the number of additional mallocs incurred.in the block stash

8698:    Level: advanced

8700: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()

8702: @*/
8703: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8704: {

8708:   MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8709:   MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8710:   return(0);
8711: }

8713: /*@C
8714:    MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8715:      parallel layout

8717:    Collective on Mat

8719:    Input Parameter:
8720: .  mat - the matrix

8722:    Output Parameter:
8723: +   right - (optional) vector that the matrix can be multiplied against
8724: -   left - (optional) vector that the matrix vector product can be stored in

8726:    Notes:
8727:     The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().

8729:   Notes: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed

8731:   Level: advanced

8733: .seealso: MatCreate(), VecDestroy()
8734: @*/
8735: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8736: {

8742:   if (mat->ops->getvecs) {
8743:     (*mat->ops->getvecs)(mat,right,left);
8744:   } else {
8745:     PetscInt rbs,cbs;
8746:     MatGetBlockSizes(mat,&rbs,&cbs);
8747:     if (right) {
8748:       if (mat->cmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for columns not yet setup");
8749:       VecCreate(PetscObjectComm((PetscObject)mat),right);
8750:       VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8751:       VecSetBlockSize(*right,cbs);
8752:       VecSetType(*right,VECSTANDARD);
8753:       PetscLayoutReference(mat->cmap,&(*right)->map);
8754:     }
8755:     if (left) {
8756:       if (mat->rmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for rows not yet setup");
8757:       VecCreate(PetscObjectComm((PetscObject)mat),left);
8758:       VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8759:       VecSetBlockSize(*left,rbs);
8760:       VecSetType(*left,VECSTANDARD);
8761:       PetscLayoutReference(mat->rmap,&(*left)->map);
8762:     }
8763:   }
8764:   return(0);
8765: }

8767: /*@C
8768:    MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8769:      with default values.

8771:    Not Collective

8773:    Input Parameters:
8774: .    info - the MatFactorInfo data structure


8777:    Notes: The solvers are generally used through the KSP and PC objects, for example
8778:           PCLU, PCILU, PCCHOLESKY, PCICC

8780:    Level: developer

8782: .seealso: MatFactorInfo

8784:     Developer Note: fortran interface is not autogenerated as the f90
8785:     interface defintion cannot be generated correctly [due to MatFactorInfo]

8787: @*/

8789: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8790: {

8794:   PetscMemzero(info,sizeof(MatFactorInfo));
8795:   return(0);
8796: }

8798: /*@
8799:    MatFactorSetSchurIS - Set indices corresponding to the Schur complement

8801:    Collective on Mat

8803:    Input Parameters:
8804: +  mat - the factored matrix
8805: -  is - the index set defining the Schur indices (0-based)

8807:    Notes:

8809:    Level: developer

8811:    Concepts:

8813: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement()

8815: @*/
8816: PetscErrorCode MatFactorSetSchurIS(Mat mat,IS is)
8817: {
8818:   PetscErrorCode ierr,(*f)(Mat,IS);

8826:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
8827:   PetscObjectQueryFunction((PetscObject)mat,"MatFactorSetSchurIS_C",&f);
8828:   if (!f) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"The selected MatSolverPackage does not support Schur complement computation. You should use MATSOLVERMUMPS or MATSOLVERMKL_PARDISO");
8829:   if (mat->schur) {
8830:     MatDestroy(&mat->schur);
8831:   }
8832:   (*f)(mat,is);
8833:   if (!mat->schur) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_PLIB,"Schur complement has not been created");
8834:   MatFactorSetUpInPlaceSchur_Private(mat);
8835:   return(0);
8836: }

8838: /*@
8839:   MatFactorCreateSchurComplement - Create a Schur complement matrix object using Schur data computed during the factorization step

8841:    Logically Collective on Mat

8843:    Input Parameters:
8844: +  F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
8845: .  S - location where to return the Schur complement, can be NULL
8846: -  status - the status of the Schur complement matrix, can be NULL

8848:    Notes:
8849:    The routine provides a copy of the Schur matrix stored within the solver data structures.
8850:    The caller must destroy the object when it is no longer needed.
8851:    If MatFactorInvertSchurComplement has been called, the routine gets back the inverse.

8853:    Level: advanced

8855:    References:

8857: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorSchurStatus
8858: @*/
8859: PetscErrorCode MatFactorCreateSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8860: {

8867:   if (S) {
8868:     PetscErrorCode (*f)(Mat,Mat*);

8870:     PetscObjectQueryFunction((PetscObject)F,"MatFactorCreateSchurComplement_C",&f);
8871:     if (f) {
8872:       (*f)(F,S);
8873:     } else {
8874:       MatDuplicate(F->schur,MAT_COPY_VALUES,S);
8875:     }
8876:   }
8877:   if (status) *status = F->schur_status;
8878:   return(0);
8879: }

8881: /*@
8882:   MatFactorGetSchurComplement - Get a Schur complement matrix object using the current Schur data

8884:    Logically Collective on Mat

8886:    Input Parameters:
8887: +  F - the factored matrix obtained by calling MatGetFactor()
8888: .  *S - location where to return the Schur complement, can be NULL
8889: -  status - the status of the Schur complement matrix, can be NULL

8891:    Notes:
8892:    Schur complement mode is currently implemented for sequential matrices.
8893:    The routine returns a the Schur Complement stored within the data strutures of the solver.
8894:    If MatFactorInvertSchurComplement has been called, the returned matrix is actually the inverse of the Schur complement.
8895:    The returned matrix should not be destroyed; the caller should call MatFactorRestoreSchurComplement when the object is no longer needed.

8897:    Level: advanced

8899:    References:

8901: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8902: @*/
8903: PetscErrorCode MatFactorGetSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8904: {
8909:   if (S) *S = F->schur;
8910:   if (status) *status = F->schur_status;
8911:   return(0);
8912: }

8914: /*@
8915:   MatFactorRestoreSchurComplement - Restore the Schur complement matrix object obtained from a call to MatFactorGetSchurComplement

8917:    Logically Collective on Mat

8919:    Input Parameters:
8920: +  F - the factored matrix obtained by calling MatGetFactor()
8921: .  *S - location where the Schur complement is stored
8922: -  status - the status of the Schur complement matrix (see MatFactorSchurStatus)

8924:    Notes:

8926:    Level: advanced

8928:    References:

8930: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8931: @*/
8932: PetscErrorCode MatFactorRestoreSchurComplement(Mat F,Mat* S,MatFactorSchurStatus status)
8933: {

8938:   if (S) {
8940:     *S = NULL;
8941:   }
8942:   F->schur_status = status;
8943:   MatFactorUpdateSchurStatus_Private(F);
8944:   return(0);
8945: }

8947: /*@
8948:   MatFactorSolveSchurComplementTranspose - Solve the transpose of the Schur complement system computed during the factorization step

8950:    Logically Collective on Mat

8952:    Input Parameters:
8953: +  F - the factored matrix obtained by calling MatGetFactor()
8954: .  rhs - location where the right hand side of the Schur complement system is stored
8955: -  sol - location where the solution of the Schur complement system has to be returned

8957:    Notes:
8958:    The sizes of the vectors should match the size of the Schur complement

8960:    Level: advanced

8962:    References:

8964: .seealso: MatGetFactor(), MatFactorSetSchurIS()
8965: @*/
8966: PetscErrorCode MatFactorSolveSchurComplementTranspose(Mat F, Vec rhs, Vec sol)
8967: {

8979:   MatFactorFactorizeSchurComplement(F);
8980:   switch (F->schur_status) {
8981:   case MAT_FACTOR_SCHUR_FACTORED:
8982:     MatSolveTranspose(F->schur,rhs,sol);
8983:     break;
8984:   case MAT_FACTOR_SCHUR_INVERTED:
8985:     MatMultTranspose(F->schur,rhs,sol);
8986:     break;
8987:   default:
8988:     SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
8989:     break;
8990:   }
8991:   return(0);
8992: }

8994: /*@
8995:   MatFactorSolveSchurComplement - Solve the Schur complement system computed during the factorization step

8997:    Logically Collective on Mat

8999:    Input Parameters:
9000: +  F - the factored matrix obtained by calling MatGetFactor()
9001: .  rhs - location where the right hand side of the Schur complement system is stored
9002: -  sol - location where the solution of the Schur complement system has to be returned

9004:    Notes:
9005:    The sizes of the vectors should match the size of the Schur complement

9007:    Level: advanced

9009:    References:

9011: .seealso: MatGetFactor(), MatFactorSetSchurIS()
9012: @*/
9013: PetscErrorCode MatFactorSolveSchurComplement(Mat F, Vec rhs, Vec sol)
9014: {

9026:   MatFactorFactorizeSchurComplement(F);
9027:   switch (F->schur_status) {
9028:   case MAT_FACTOR_SCHUR_FACTORED:
9029:     MatSolve(F->schur,rhs,sol);
9030:     break;
9031:   case MAT_FACTOR_SCHUR_INVERTED:
9032:     MatMult(F->schur,rhs,sol);
9033:     break;
9034:   default:
9035:     SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9036:     break;
9037:   }
9038:   return(0);
9039: }

9041: /*@
9042:   MatFactorInvertSchurComplement - Invert the Schur complement matrix computed during the factorization step

9044:    Logically Collective on Mat

9046:    Input Parameters:
9047: +  F - the factored matrix obtained by calling MatGetFactor()

9049:    Notes:

9051:    Level: advanced

9053:    References:

9055: .seealso: MatGetFactor(), MatFactorSetSchurIS()
9056: @*/
9057: PetscErrorCode MatFactorInvertSchurComplement(Mat F)
9058: {

9064:   if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED) return(0);
9065:   MatFactorFactorizeSchurComplement(F);
9066:   MatFactorInvertSchurComplement_Private(F);
9067:   F->schur_status = MAT_FACTOR_SCHUR_INVERTED;
9068:   return(0);
9069: }

9071: /*@
9072:   MatFactorFactorizeSchurComplement - Factorize the Schur complement matrix computed during the factorization step

9074:    Logically Collective on Mat

9076:    Input Parameters:
9077: +  F - the factored matrix obtained by calling MatGetFactor()

9079:    Notes:

9081:    Level: advanced

9083:    References:

9085: .seealso: MatGetFactor(), MatMumpsSetSchurIS()
9086: @*/
9087: PetscErrorCode MatFactorFactorizeSchurComplement(Mat F)
9088: {

9094:   if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED || F->schur_status == MAT_FACTOR_SCHUR_FACTORED) return(0);
9095:   MatFactorFactorizeSchurComplement_Private(F);
9096:   F->schur_status = MAT_FACTOR_SCHUR_FACTORED;
9097:   return(0);
9098: }

9100: /*@
9101:    MatPtAP - Creates the matrix product C = P^T * A * P

9103:    Neighbor-wise Collective on Mat

9105:    Input Parameters:
9106: +  A - the matrix
9107: .  P - the projection matrix
9108: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9109: -  fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)), use PETSC_DEFAULT if you do not have a good estimate
9110:           if the result is a dense matrix this is irrelevent

9112:    Output Parameters:
9113: .  C - the product matrix

9115:    Notes:
9116:    C will be created and must be destroyed by the user with MatDestroy().

9118:    This routine is currently only implemented for pairs of AIJ matrices and classes
9119:    which inherit from AIJ.

9121:    Level: intermediate

9123: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
9124: @*/
9125: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
9126: {
9128:   PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9129:   PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
9130:   PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9131:   PetscBool      viatranspose=PETSC_FALSE,viamatmatmatmult=PETSC_FALSE;

9134:   PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-matptap_viatranspose",&viatranspose,NULL);
9135:   PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-matptap_viamatmatmatmult",&viamatmatmatmult,NULL);

9139:   MatCheckPreallocated(A,1);
9140:   if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9141:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9142:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9145:   MatCheckPreallocated(P,2);
9146:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9147:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9149:   if (A->rmap->N!= A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix A must be square, %D != %D",A->rmap->N,A->cmap->N);
9150:   if (P->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9151:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9152:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9154:   if (scall == MAT_REUSE_MATRIX) {
9157:     if (viatranspose || viamatmatmatmult) {
9158:       Mat Pt;
9159:       MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
9160:       if (viamatmatmatmult) {
9161:         MatMatMatMult(Pt,A,P,scall,fill,C);
9162:       } else {
9163:         Mat AP;
9164:         MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
9165:         MatMatMult(Pt,AP,scall,fill,C);
9166:         MatDestroy(&AP);
9167:       }
9168:       MatDestroy(&Pt);
9169:     } else {
9170:       PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9171:       PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9172:       (*(*C)->ops->ptapnumeric)(A,P,*C);
9173:       PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9174:       PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9175:     }
9176:     return(0);
9177:   }

9179:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9180:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9182:   fA = A->ops->ptap;
9183:   fP = P->ops->ptap;
9184:   if (fP == fA) {
9185:     if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
9186:     ptap = fA;
9187:   } else {
9188:     /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
9189:     char ptapname[256];
9190:     PetscStrcpy(ptapname,"MatPtAP_");
9191:     PetscStrcat(ptapname,((PetscObject)A)->type_name);
9192:     PetscStrcat(ptapname,"_");
9193:     PetscStrcat(ptapname,((PetscObject)P)->type_name);
9194:     PetscStrcat(ptapname,"_C"); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
9195:     PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
9196:     if (!ptap) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
9197:   }

9199:   if (viatranspose || viamatmatmatmult) {
9200:     Mat Pt;
9201:     MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
9202:     if (viamatmatmatmult) {
9203:       MatMatMatMult(Pt,A,P,scall,fill,C);
9204:       PetscInfo(*C,"MatPtAP via MatMatMatMult\n");
9205:     } else {
9206:       Mat AP;
9207:       MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
9208:       MatMatMult(Pt,AP,scall,fill,C);
9209:       MatDestroy(&AP);
9210:       PetscInfo(*C,"MatPtAP via MatTranspose and MatMatMult\n");
9211:     }
9212:     MatDestroy(&Pt);
9213:   } else {
9214:     PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9215:     (*ptap)(A,P,scall,fill,C);
9216:     PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9217:   }
9218:   return(0);
9219: }

9221: /*@
9222:    MatPtAPNumeric - Computes the matrix product C = P^T * A * P

9224:    Neighbor-wise Collective on Mat

9226:    Input Parameters:
9227: +  A - the matrix
9228: -  P - the projection matrix

9230:    Output Parameters:
9231: .  C - the product matrix

9233:    Notes:
9234:    C must have been created by calling MatPtAPSymbolic and must be destroyed by
9235:    the user using MatDeatroy().

9237:    This routine is currently only implemented for pairs of AIJ matrices and classes
9238:    which inherit from AIJ.  C will be of type MATAIJ.

9240:    Level: intermediate

9242: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
9243: @*/
9244: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
9245: {

9251:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9252:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9255:   MatCheckPreallocated(P,2);
9256:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9257:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9260:   MatCheckPreallocated(C,3);
9261:   if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9262:   if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
9263:   if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9264:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9265:   if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
9266:   MatCheckPreallocated(A,1);

9268:   PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9269:   (*C->ops->ptapnumeric)(A,P,C);
9270:   PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9271:   return(0);
9272: }

9274: /*@
9275:    MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P

9277:    Neighbor-wise Collective on Mat

9279:    Input Parameters:
9280: +  A - the matrix
9281: -  P - the projection matrix

9283:    Output Parameters:
9284: .  C - the (i,j) structure of the product matrix

9286:    Notes:
9287:    C will be created and must be destroyed by the user with MatDestroy().

9289:    This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9290:    which inherit from SeqAIJ.  C will be of type MATSEQAIJ.  The product is computed using
9291:    this (i,j) structure by calling MatPtAPNumeric().

9293:    Level: intermediate

9295: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
9296: @*/
9297: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
9298: {

9304:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9305:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9306:   if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9309:   MatCheckPreallocated(P,2);
9310:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9311:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9314:   if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9315:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9316:   MatCheckPreallocated(A,1);
9317:   PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
9318:   (*A->ops->ptapsymbolic)(A,P,fill,C);
9319:   PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);

9321:   /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
9322:   return(0);
9323: }

9325: /*@
9326:    MatRARt - Creates the matrix product C = R * A * R^T

9328:    Neighbor-wise Collective on Mat

9330:    Input Parameters:
9331: +  A - the matrix
9332: .  R - the projection matrix
9333: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9334: -  fill - expected fill as ratio of nnz(C)/nnz(A), use PETSC_DEFAULT if you do not have a good estimate
9335:           if the result is a dense matrix this is irrelevent

9337:    Output Parameters:
9338: .  C - the product matrix

9340:    Notes:
9341:    C will be created and must be destroyed by the user with MatDestroy().

9343:    This routine is currently only implemented for pairs of AIJ matrices and classes
9344:    which inherit from AIJ. Due to PETSc sparse matrix block row distribution among processes,
9345:    parallel MatRARt is implemented via explicit transpose of R, which could be very expensive.
9346:    We recommend using MatPtAP().

9348:    Level: intermediate

9350: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
9351: @*/
9352: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
9353: {

9359:   if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9360:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9361:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9364:   MatCheckPreallocated(R,2);
9365:   if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9366:   if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9368:   if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)R),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);

9370:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9371:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9372:   MatCheckPreallocated(A,1);

9374: