Actual source code: matrix.c

petsc-3.8.3 2017-12-09
Report Typos and Errors

  2: /*
  3:    This is where the abstract matrix operations are defined
  4: */

  6:  #include <petsc/private/matimpl.h>
  7:  #include <petsc/private/isimpl.h>
  8:  #include <petsc/private/vecimpl.h>

 10: /* Logging support */
 11: PetscClassId MAT_CLASSID;
 12: PetscClassId MAT_COLORING_CLASSID;
 13: PetscClassId MAT_FDCOLORING_CLASSID;
 14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;

 16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
 17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve;
 18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
 19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
 20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
 21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_CreateSubMats, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
 22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
 23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_CreateSubMat;
 24: PetscLogEvent MAT_TransposeColoringCreate;
 25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
 26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
 27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
 28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
 29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
 30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
 31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
 32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
 33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
 34: PetscLogEvent MAT_GetMultiProcBlock;
 35: PetscLogEvent MAT_CUSPCopyToGPU, MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch, MAT_SetValuesBatchI, MAT_SetValuesBatchII, MAT_SetValuesBatchIII, MAT_SetValuesBatchIV;
 36: PetscLogEvent MAT_ViennaCLCopyToGPU;
 37: PetscLogEvent MAT_Merge,MAT_Residual,MAT_SetRandom;
 38: PetscLogEvent MATCOLORING_Apply,MATCOLORING_Comm,MATCOLORING_Local,MATCOLORING_ISCreate,MATCOLORING_SetUp,MATCOLORING_Weights;

 40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};

 42: /*@
 43:    MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations

 45:    Logically Collective on Vec

 47:    Input Parameters:
 48: +  x  - the vector
 49: -  rctx - the random number context, formed by PetscRandomCreate(), or NULL and
 50:           it will create one internally.

 52:    Output Parameter:
 53: .  x  - the vector

 55:    Example of Usage:
 56: .vb
 57:      PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
 58:      MatSetRandom(x,rctx);
 59:      PetscRandomDestroy(rctx);
 60: .ve

 62:    Level: intermediate

 64:    Concepts: matrix^setting to random
 65:    Concepts: random^matrix

 67: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
 68: @*/
 69: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
 70: {
 72:   PetscRandom    randObj = NULL;


 79:   if (!rctx) {
 80:     MPI_Comm comm;
 81:     PetscObjectGetComm((PetscObject)x,&comm);
 82:     PetscRandomCreate(comm,&randObj);
 83:     PetscRandomSetFromOptions(randObj);
 84:     rctx = randObj;
 85:   }

 87:   PetscLogEventBegin(MAT_SetRandom,x,rctx,0,0);
 88:   (*x->ops->setrandom)(x,rctx);
 89:   PetscLogEventEnd(MAT_SetRandom,x,rctx,0,0);

 91:   x->assembled = PETSC_TRUE;
 92:   PetscRandomDestroy(&randObj);
 93:   return(0);
 94: }

 96: /*@
 97:    MatFactorGetErrorZeroPivot - returns the pivot value that was determined to be zero and the row it occurred in

 99:    Logically Collective on Mat

101:    Input Parameters:
102: .  mat - the factored matrix

104:    Output Parameter:
105: +  pivot - the pivot value computed
106: -  row - the row that the zero pivot occurred. Note that this row must be interpreted carefully due to row reorderings and which processes
107:          the share the matrix

109:    Level: advanced

111:    Notes: This routine does not work for factorizations done with external packages.
112:    This routine should only be called if MatGetFactorError() returns a value of MAT_FACTOR_NUMERIC_ZEROPIVOT

114:    This can be called on non-factored matrices that come from, for example, matrices used in SOR.

116: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
117: @*/
118: PetscErrorCode MatFactorGetErrorZeroPivot(Mat mat,PetscReal *pivot,PetscInt *row)
119: {
122:   *pivot = mat->factorerror_zeropivot_value;
123:   *row   = mat->factorerror_zeropivot_row;
124:   return(0);
125: }

127: /*@
128:    MatFactorGetError - gets the error code from a factorization

130:    Logically Collective on Mat

132:    Input Parameters:
133: .  mat - the factored matrix

135:    Output Parameter:
136: .  err  - the error code

138:    Level: advanced

140:    Notes:    This can be called on non-factored matrices that come from, for example, matrices used in SOR.

142: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
143: @*/
144: PetscErrorCode MatFactorGetError(Mat mat,MatFactorError *err)
145: {
148:   *err = mat->factorerrortype;
149:   return(0);
150: }

152: /*@
153:    MatFactorClearError - clears the error code in a factorization

155:    Logically Collective on Mat

157:    Input Parameter:
158: .  mat - the factored matrix

160:    Level: developer

162:    Notes: This can be called on non-factored matrices that come from, for example, matrices used in SOR.

164: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorGetError(), MatFactorGetErrorZeroPivot()
165: @*/
166: PetscErrorCode MatFactorClearError(Mat mat)
167: {
170:   mat->factorerrortype             = MAT_FACTOR_NOERROR;
171:   mat->factorerror_zeropivot_value = 0.0;
172:   mat->factorerror_zeropivot_row   = 0;
173:   return(0);
174: }

176: static PetscErrorCode MatFindNonzeroRows_Basic(Mat mat,IS *keptrows)
177: {
178:   PetscErrorCode    ierr;
179:   Vec               r,l;
180:   const PetscScalar *al;
181:   PetscInt          i,nz,gnz,N,n;

184:   MatGetSize(mat,&N,NULL);
185:   MatGetLocalSize(mat,&n,NULL);
186:   MatCreateVecs(mat,&r,&l);
187:   VecSet(l,0.0);
188:   VecSetRandom(r,NULL);
189:   MatMult(mat,r,l);
190:   VecGetArrayRead(l,&al);
191:   for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nz++;
192:   MPIU_Allreduce(&nz,&gnz,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)mat));
193:   if (gnz != N) {
194:     PetscInt *nzr;
195:     PetscMalloc1(nz,&nzr);
196:     if (nz) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nzr[nz++] = i; }
197:     ISCreateGeneral(PetscObjectComm((PetscObject)mat),nz,nzr,PETSC_OWN_POINTER,keptrows);
198:   } else *keptrows = NULL;
199:   VecRestoreArrayRead(l,&al);
200:   VecDestroy(&l);
201:   VecDestroy(&r);
202:   return(0);
203: }

205: /*@
206:       MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix

208:   Input Parameter:
209: .    A  - the matrix

211:   Output Parameter:
212: .    keptrows - the rows that are not completely zero

214:   Notes: keptrows is set to NULL if all rows are nonzero.

216:   Level: intermediate

218:  @*/
219: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
220: {

227:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
228:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
229:   if (!mat->ops->findnonzerorows) {
230:     MatFindNonzeroRows_Basic(mat,keptrows);
231:   } else {
232:     (*mat->ops->findnonzerorows)(mat,keptrows);
233:   }
234:   return(0);
235: }

237: /*@
238:       MatFindZeroRows - Locate all rows that are completely zero in the matrix

240:   Input Parameter:
241: .    A  - the matrix

243:   Output Parameter:
244: .    zerorows - the rows that are completely zero

246:   Notes: zerorows is set to NULL if no rows are zero.

248:   Level: intermediate

250:  @*/
251: PetscErrorCode MatFindZeroRows(Mat mat,IS *zerorows)
252: {
254:   IS keptrows;
255:   PetscInt m, n;


260:   MatFindNonzeroRows(mat, &keptrows);
261:   /* MatFindNonzeroRows sets keptrows to NULL if there are no zero rows.
262:      In keeping with this convention, we set zerorows to NULL if there are no zero
263:      rows. */
264:   if (keptrows == NULL) {
265:     *zerorows = NULL;
266:   } else {
267:     MatGetOwnershipRange(mat,&m,&n);
268:     ISComplement(keptrows,m,n,zerorows);
269:     ISDestroy(&keptrows);
270:   }
271:   return(0);
272: }

274: /*@
275:    MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling

277:    Not Collective

279:    Input Parameters:
280: .   A - the matrix

282:    Output Parameters:
283: .   a - the diagonal part (which is a SEQUENTIAL matrix)

285:    Notes: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
286:           Use caution, as the reference count on the returned matrix is not incremented and it is used as
287:           part of the containing MPI Mat's normal operation.

289:    Level: advanced

291: @*/
292: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
293: {

300:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
301:   if (!A->ops->getdiagonalblock) {
302:     PetscMPIInt size;
303:     MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
304:     if (size == 1) {
305:       *a = A;
306:       return(0);
307:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not coded for this matrix type");
308:   }
309:   (*A->ops->getdiagonalblock)(A,a);
310:   return(0);
311: }

313: /*@
314:    MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.

316:    Collective on Mat

318:    Input Parameters:
319: .  mat - the matrix

321:    Output Parameter:
322: .   trace - the sum of the diagonal entries

324:    Level: advanced

326: @*/
327: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
328: {
330:   Vec            diag;

333:   MatCreateVecs(mat,&diag,NULL);
334:   MatGetDiagonal(mat,diag);
335:   VecSum(diag,trace);
336:   VecDestroy(&diag);
337:   return(0);
338: }

340: /*@
341:    MatRealPart - Zeros out the imaginary part of the matrix

343:    Logically Collective on Mat

345:    Input Parameters:
346: .  mat - the matrix

348:    Level: advanced


351: .seealso: MatImaginaryPart()
352: @*/
353: PetscErrorCode MatRealPart(Mat mat)
354: {

360:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
361:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
362:   if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
363:   MatCheckPreallocated(mat,1);
364:   (*mat->ops->realpart)(mat);
365: #if defined(PETSC_HAVE_CUSP)
366:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
367:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
368:   }
369: #elif defined(PETSC_HAVE_VIENNACL)
370:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
371:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
372:   }
373: #elif defined(PETSC_HAVE_VECCUDA)
374:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
375:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
376:   }
377: #endif
378:   return(0);
379: }

381: /*@C
382:    MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix

384:    Collective on Mat

386:    Input Parameter:
387: .  mat - the matrix

389:    Output Parameters:
390: +   nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
391: -   ghosts - the global indices of the ghost points

393:    Notes: the nghosts and ghosts are suitable to pass into VecCreateGhost()

395:    Level: advanced

397: @*/
398: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
399: {

405:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
406:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
407:   if (!mat->ops->getghosts) {
408:     if (nghosts) *nghosts = 0;
409:     if (ghosts) *ghosts = 0;
410:   } else {
411:     (*mat->ops->getghosts)(mat,nghosts,ghosts);
412:   }
413:   return(0);
414: }


417: /*@
418:    MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part

420:    Logically Collective on Mat

422:    Input Parameters:
423: .  mat - the matrix

425:    Level: advanced


428: .seealso: MatRealPart()
429: @*/
430: PetscErrorCode MatImaginaryPart(Mat mat)
431: {

437:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
438:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
439:   if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
440:   MatCheckPreallocated(mat,1);
441:   (*mat->ops->imaginarypart)(mat);
442: #if defined(PETSC_HAVE_CUSP)
443:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
444:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
445:   }
446: #elif defined(PETSC_HAVE_VIENNACL)
447:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
448:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
449:   }
450: #elif defined(PETSC_HAVE_VECCUDA)
451:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
452:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
453:   }
454: #endif
455:   return(0);
456: }

458: /*@
459:    MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)

461:    Not Collective

463:    Input Parameter:
464: .  mat - the matrix

466:    Output Parameters:
467: +  missing - is any diagonal missing
468: -  dd - first diagonal entry that is missing (optional) on this process

470:    Level: advanced


473: .seealso: MatRealPart()
474: @*/
475: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
476: {

482:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
483:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
484:   if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
485:   (*mat->ops->missingdiagonal)(mat,missing,dd);
486:   return(0);
487: }

489: /*@C
490:    MatGetRow - Gets a row of a matrix.  You MUST call MatRestoreRow()
491:    for each row that you get to ensure that your application does
492:    not bleed memory.

494:    Not Collective

496:    Input Parameters:
497: +  mat - the matrix
498: -  row - the row to get

500:    Output Parameters:
501: +  ncols -  if not NULL, the number of nonzeros in the row
502: .  cols - if not NULL, the column numbers
503: -  vals - if not NULL, the values

505:    Notes:
506:    This routine is provided for people who need to have direct access
507:    to the structure of a matrix.  We hope that we provide enough
508:    high-level matrix routines that few users will need it.

510:    MatGetRow() always returns 0-based column indices, regardless of
511:    whether the internal representation is 0-based (default) or 1-based.

513:    For better efficiency, set cols and/or vals to NULL if you do
514:    not wish to extract these quantities.

516:    The user can only examine the values extracted with MatGetRow();
517:    the values cannot be altered.  To change the matrix entries, one
518:    must use MatSetValues().

520:    You can only have one call to MatGetRow() outstanding for a particular
521:    matrix at a time, per processor. MatGetRow() can only obtain rows
522:    associated with the given processor, it cannot get rows from the
523:    other processors; for that we suggest using MatCreateSubMatrices(), then
524:    MatGetRow() on the submatrix. The row index passed to MatGetRows()
525:    is in the global number of rows.

527:    Fortran Notes:
528:    The calling sequence from Fortran is
529: .vb
530:    MatGetRow(matrix,row,ncols,cols,values,ierr)
531:          Mat     matrix (input)
532:          integer row    (input)
533:          integer ncols  (output)
534:          integer cols(maxcols) (output)
535:          double precision (or double complex) values(maxcols) output
536: .ve
537:    where maxcols >= maximum nonzeros in any row of the matrix.


540:    Caution:
541:    Do not try to change the contents of the output arrays (cols and vals).
542:    In some cases, this may corrupt the matrix.

544:    Level: advanced

546:    Concepts: matrices^row access

548: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatCreateSubMatrices(), MatGetDiagonal()
549: @*/
550: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
551: {
553:   PetscInt       incols;

558:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
559:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
560:   if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
561:   MatCheckPreallocated(mat,1);
562:   PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
563:   (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
564:   if (ncols) *ncols = incols;
565:   PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
566:   return(0);
567: }

569: /*@
570:    MatConjugate - replaces the matrix values with their complex conjugates

572:    Logically Collective on Mat

574:    Input Parameters:
575: .  mat - the matrix

577:    Level: advanced

579: .seealso:  VecConjugate()
580: @*/
581: PetscErrorCode MatConjugate(Mat mat)
582: {
583: #if defined(PETSC_USE_COMPLEX)

588:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
589:   if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
590:   (*mat->ops->conjugate)(mat);
591: #if defined(PETSC_HAVE_CUSP)
592:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
593:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
594:   }
595: #elif defined(PETSC_HAVE_VIENNACL)
596:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
597:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
598:   }
599: #elif defined(PETSC_HAVE_VECCUDA)
600:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
601:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
602:   }
603: #endif
604:   return(0);
605: #else
606:   return 0;
607: #endif
608: }

610: /*@C
611:    MatRestoreRow - Frees any temporary space allocated by MatGetRow().

613:    Not Collective

615:    Input Parameters:
616: +  mat - the matrix
617: .  row - the row to get
618: .  ncols, cols - the number of nonzeros and their columns
619: -  vals - if nonzero the column values

621:    Notes:
622:    This routine should be called after you have finished examining the entries.

624:    This routine zeros out ncols, cols, and vals. This is to prevent accidental
625:    us of the array after it has been restored. If you pass NULL, it will
626:    not zero the pointers.  Use of cols or vals after MatRestoreRow is invalid.

628:    Fortran Notes:
629:    The calling sequence from Fortran is
630: .vb
631:    MatRestoreRow(matrix,row,ncols,cols,values,ierr)
632:       Mat     matrix (input)
633:       integer row    (input)
634:       integer ncols  (output)
635:       integer cols(maxcols) (output)
636:       double precision (or double complex) values(maxcols) output
637: .ve
638:    Where maxcols >= maximum nonzeros in any row of the matrix.

640:    In Fortran MatRestoreRow() MUST be called after MatGetRow()
641:    before another call to MatGetRow() can be made.

643:    Level: advanced

645: .seealso:  MatGetRow()
646: @*/
647: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
648: {

654:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
655:   if (!mat->ops->restorerow) return(0);
656:   (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
657:   if (ncols) *ncols = 0;
658:   if (cols)  *cols = NULL;
659:   if (vals)  *vals = NULL;
660:   return(0);
661: }

663: /*@
664:    MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
665:    You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.

667:    Not Collective

669:    Input Parameters:
670: +  mat - the matrix

672:    Notes:
673:    The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.

675:    Level: advanced

677:    Concepts: matrices^row access

679: .seealso: MatRestoreRowRowUpperTriangular()
680: @*/
681: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
682: {

688:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
689:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
690:   if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
691:   MatCheckPreallocated(mat,1);
692:   (*mat->ops->getrowuppertriangular)(mat);
693:   return(0);
694: }

696: /*@
697:    MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.

699:    Not Collective

701:    Input Parameters:
702: +  mat - the matrix

704:    Notes:
705:    This routine should be called after you have finished MatGetRow/MatRestoreRow().


708:    Level: advanced

710: .seealso:  MatGetRowUpperTriangular()
711: @*/
712: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
713: {

718:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
719:   if (!mat->ops->restorerowuppertriangular) return(0);
720:   (*mat->ops->restorerowuppertriangular)(mat);
721:   return(0);
722: }

724: /*@C
725:    MatSetOptionsPrefix - Sets the prefix used for searching for all
726:    Mat options in the database.

728:    Logically Collective on Mat

730:    Input Parameter:
731: +  A - the Mat context
732: -  prefix - the prefix to prepend to all option names

734:    Notes:
735:    A hyphen (-) must NOT be given at the beginning of the prefix name.
736:    The first character of all runtime options is AUTOMATICALLY the hyphen.

738:    Level: advanced

740: .keywords: Mat, set, options, prefix, database

742: .seealso: MatSetFromOptions()
743: @*/
744: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
745: {

750:   PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
751:   return(0);
752: }

754: /*@C
755:    MatAppendOptionsPrefix - Appends to the prefix used for searching for all
756:    Mat options in the database.

758:    Logically Collective on Mat

760:    Input Parameters:
761: +  A - the Mat context
762: -  prefix - the prefix to prepend to all option names

764:    Notes:
765:    A hyphen (-) must NOT be given at the beginning of the prefix name.
766:    The first character of all runtime options is AUTOMATICALLY the hyphen.

768:    Level: advanced

770: .keywords: Mat, append, options, prefix, database

772: .seealso: MatGetOptionsPrefix()
773: @*/
774: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
775: {

780:   PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
781:   return(0);
782: }

784: /*@C
785:    MatGetOptionsPrefix - Sets the prefix used for searching for all
786:    Mat options in the database.

788:    Not Collective

790:    Input Parameter:
791: .  A - the Mat context

793:    Output Parameter:
794: .  prefix - pointer to the prefix string used

796:    Notes: On the fortran side, the user should pass in a string 'prefix' of
797:    sufficient length to hold the prefix.

799:    Level: advanced

801: .keywords: Mat, get, options, prefix, database

803: .seealso: MatAppendOptionsPrefix()
804: @*/
805: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
806: {

811:   PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
812:   return(0);
813: }

815: /*@
816:    MatSetUp - Sets up the internal matrix data structures for the later use.

818:    Collective on Mat

820:    Input Parameters:
821: .  A - the Mat context

823:    Notes:
824:    If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.

826:    If a suitable preallocation routine is used, this function does not need to be called.

828:    See the Performance chapter of the PETSc users manual for how to preallocate matrices

830:    Level: beginner

832: .keywords: Mat, setup

834: .seealso: MatCreate(), MatDestroy()
835: @*/
836: PetscErrorCode MatSetUp(Mat A)
837: {
838:   PetscMPIInt    size;

843:   if (!((PetscObject)A)->type_name) {
844:     MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
845:     if (size == 1) {
846:       MatSetType(A, MATSEQAIJ);
847:     } else {
848:       MatSetType(A, MATMPIAIJ);
849:     }
850:   }
851:   if (!A->preallocated && A->ops->setup) {
852:     PetscInfo(A,"Warning not preallocating matrix storage\n");
853:     (*A->ops->setup)(A);
854:   }
855:   PetscLayoutSetUp(A->rmap);
856:   PetscLayoutSetUp(A->cmap);
857:   A->preallocated = PETSC_TRUE;
858:   return(0);
859: }

861: #if defined(PETSC_HAVE_SAWS)
862:  #include <petscviewersaws.h>
863: #endif
864: /*@C
865:    MatView - Visualizes a matrix object.

867:    Collective on Mat

869:    Input Parameters:
870: +  mat - the matrix
871: -  viewer - visualization context

873:   Notes:
874:   The available visualization contexts include
875: +    PETSC_VIEWER_STDOUT_SELF - for sequential matrices
876: .    PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
877: .    PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
878: -     PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure

880:    The user can open alternative visualization contexts with
881: +    PetscViewerASCIIOpen() - Outputs matrix to a specified file
882: .    PetscViewerBinaryOpen() - Outputs matrix in binary to a
883:          specified file; corresponding input uses MatLoad()
884: .    PetscViewerDrawOpen() - Outputs nonzero matrix structure to
885:          an X window display
886: -    PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
887:          Currently only the sequential dense and AIJ
888:          matrix types support the Socket viewer.

890:    The user can call PetscViewerPushFormat() to specify the output
891:    format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
892:    PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen).  Available formats include
893: +    PETSC_VIEWER_DEFAULT - default, prints matrix contents
894: .    PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
895: .    PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
896: .    PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
897:          format common among all matrix types
898: .    PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
899:          format (which is in many cases the same as the default)
900: .    PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
901:          size and structure (not the matrix entries)
902: .    PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
903:          the matrix structure

905:    Options Database Keys:
906: +  -mat_view ::ascii_info - Prints info on matrix at conclusion of MatAssemblyEnd()
907: .  -mat_view ::ascii_info_detail - Prints more detailed info
908: .  -mat_view - Prints matrix in ASCII format
909: .  -mat_view ::ascii_matlab - Prints matrix in Matlab format
910: .  -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
911: .  -display <name> - Sets display name (default is host)
912: .  -draw_pause <sec> - Sets number of seconds to pause after display
913: .  -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 12 Using MATLAB with PETSc for details)
914: .  -viewer_socket_machine <machine> -
915: .  -viewer_socket_port <port> -
916: .  -mat_view binary - save matrix to file in binary format
917: -  -viewer_binary_filename <name> -
918:    Level: beginner

920:    Notes: see the manual page for MatLoad() for the exact format of the binary file when the binary
921:       viewer is used.

923:       See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
924:       viewer is used.

926:       One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
927:       And then use the following mouse functions:
928:           left mouse: zoom in
929:           middle mouse: zoom out
930:           right mouse: continue with the simulation

932:    Concepts: matrices^viewing
933:    Concepts: matrices^plotting
934:    Concepts: matrices^printing

936: .seealso: PetscViewerPushFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
937:           PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
938: @*/
939: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
940: {
941:   PetscErrorCode    ierr;
942:   PetscInt          rows,cols,rbs,cbs;
943:   PetscBool         iascii,ibinary;
944:   PetscViewerFormat format;
945: #if defined(PETSC_HAVE_SAWS)
946:   PetscBool         issaws;
947: #endif

952:   if (!viewer) {
953:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
954:   }
957:   MatCheckPreallocated(mat,1);
958:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&ibinary);
959:   if (ibinary) {
960:     PetscBool mpiio;
961:     PetscViewerBinaryGetUseMPIIO(viewer,&mpiio);
962:     if (mpiio) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"PETSc matrix viewers do not support using MPI-IO, turn off that flag");
963:   }

965:   PetscLogEventBegin(MAT_View,mat,viewer,0,0);
966:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
967:   PetscViewerGetFormat(viewer,&format);
968:   if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
969:     SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
970:   }

972: #if defined(PETSC_HAVE_SAWS)
973:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
974: #endif
975:   if (iascii) {
976:     if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
977:     PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
978:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
979:       PetscViewerASCIIPushTab(viewer);
980:       MatGetSize(mat,&rows,&cols);
981:       MatGetBlockSizes(mat,&rbs,&cbs);
982:       if (rbs != 1 || cbs != 1) {
983:         if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
984:         else            {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
985:       } else {
986:         PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
987:       }
988:       if (mat->factortype) {
989:         const MatSolverPackage solver;
990:         MatFactorGetSolverPackage(mat,&solver);
991:         PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
992:       }
993:       if (mat->ops->getinfo) {
994:         MatInfo info;
995:         MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
996:         PetscViewerASCIIPrintf(viewer,"total: nonzeros=%.f, allocated nonzeros=%.f\n",info.nz_used,info.nz_allocated);
997:         PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
998:       }
999:       if (mat->nullsp) {PetscViewerASCIIPrintf(viewer,"  has attached null space\n");}
1000:       if (mat->nearnullsp) {PetscViewerASCIIPrintf(viewer,"  has attached near null space\n");}
1001:     }
1002: #if defined(PETSC_HAVE_SAWS)
1003:   } else if (issaws) {
1004:     PetscMPIInt rank;

1006:     PetscObjectName((PetscObject)mat);
1007:     MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
1008:     if (!((PetscObject)mat)->amsmem && !rank) {
1009:       PetscObjectViewSAWs((PetscObject)mat,viewer);
1010:     }
1011: #endif
1012:   }
1013:   if (mat->ops->view) {
1014:     PetscViewerASCIIPushTab(viewer);
1015:     (*mat->ops->view)(mat,viewer);
1016:     PetscViewerASCIIPopTab(viewer);
1017:   }
1018:   if (iascii) {
1019:     if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
1020:     PetscViewerGetFormat(viewer,&format);
1021:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1022:       PetscViewerASCIIPopTab(viewer);
1023:     }
1024:   }
1025:   PetscLogEventEnd(MAT_View,mat,viewer,0,0);
1026:   return(0);
1027: }

1029: #if defined(PETSC_USE_DEBUG)
1030: #include <../src/sys/totalview/tv_data_display.h>
1031: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
1032: {
1033:   TV_add_row("Local rows", "int", &mat->rmap->n);
1034:   TV_add_row("Local columns", "int", &mat->cmap->n);
1035:   TV_add_row("Global rows", "int", &mat->rmap->N);
1036:   TV_add_row("Global columns", "int", &mat->cmap->N);
1037:   TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
1038:   return TV_format_OK;
1039: }
1040: #endif

1042: /*@C
1043:    MatLoad - Loads a matrix that has been stored in binary format
1044:    with MatView().  The matrix format is determined from the options database.
1045:    Generates a parallel MPI matrix if the communicator has more than one
1046:    processor.  The default matrix type is AIJ.

1048:    Collective on PetscViewer

1050:    Input Parameters:
1051: +  newmat - the newly loaded matrix, this needs to have been created with MatCreate()
1052:             or some related function before a call to MatLoad()
1053: -  viewer - binary file viewer, created with PetscViewerBinaryOpen()

1055:    Options Database Keys:
1056:    Used with block matrix formats (MATSEQBAIJ,  ...) to specify
1057:    block size
1058: .    -matload_block_size <bs>

1060:    Level: beginner

1062:    Notes:
1063:    If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
1064:    Mat before calling this routine if you wish to set it from the options database.

1066:    MatLoad() automatically loads into the options database any options
1067:    given in the file filename.info where filename is the name of the file
1068:    that was passed to the PetscViewerBinaryOpen(). The options in the info
1069:    file will be ignored if you use the -viewer_binary_skip_info option.

1071:    If the type or size of newmat is not set before a call to MatLoad, PETSc
1072:    sets the default matrix type AIJ and sets the local and global sizes.
1073:    If type and/or size is already set, then the same are used.

1075:    In parallel, each processor can load a subset of rows (or the
1076:    entire matrix).  This routine is especially useful when a large
1077:    matrix is stored on disk and only part of it is desired on each
1078:    processor.  For example, a parallel solver may access only some of
1079:    the rows from each processor.  The algorithm used here reads
1080:    relatively small blocks of data rather than reading the entire
1081:    matrix and then subsetting it.

1083:    Notes for advanced users:
1084:    Most users should not need to know the details of the binary storage
1085:    format, since MatLoad() and MatView() completely hide these details.
1086:    But for anyone who's interested, the standard binary matrix storage
1087:    format is

1089: $    int    MAT_FILE_CLASSID
1090: $    int    number of rows
1091: $    int    number of columns
1092: $    int    total number of nonzeros
1093: $    int    *number nonzeros in each row
1094: $    int    *column indices of all nonzeros (starting index is zero)
1095: $    PetscScalar *values of all nonzeros

1097:    PETSc automatically does the byte swapping for
1098: machines that store the bytes reversed, e.g.  DEC alpha, freebsd,
1099: linux, Windows and the paragon; thus if you write your own binary
1100: read/write routines you have to swap the bytes; see PetscBinaryRead()
1101: and PetscBinaryWrite() to see how this may be done.

1103: .keywords: matrix, load, binary, input

1105: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()

1107:  @*/
1108: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
1109: {
1111:   PetscBool      isbinary,flg;

1116:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1117:   if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");

1119:   if (!((PetscObject)newmat)->type_name) {
1120:     MatSetType(newmat,MATAIJ);
1121:   }

1123:   if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
1124:   PetscLogEventBegin(MAT_Load,viewer,0,0,0);
1125:   (*newmat->ops->load)(newmat,viewer);
1126:   PetscLogEventEnd(MAT_Load,viewer,0,0,0);

1128:   flg  = PETSC_FALSE;
1129:   PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1130:   if (flg) {
1131:     MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1132:     MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1133:   }
1134:   flg  = PETSC_FALSE;
1135:   PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1136:   if (flg) {
1137:     MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1138:   }
1139:   return(0);
1140: }

1142: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1143: {
1145:   Mat_Redundant  *redund = *redundant;
1146:   PetscInt       i;

1149:   if (redund){
1150:     if (redund->matseq) { /* via MatCreateSubMatrices()  */
1151:       ISDestroy(&redund->isrow);
1152:       ISDestroy(&redund->iscol);
1153:       MatDestroySubMatrices(1,&redund->matseq);
1154:     } else {
1155:       PetscFree2(redund->send_rank,redund->recv_rank);
1156:       PetscFree(redund->sbuf_j);
1157:       PetscFree(redund->sbuf_a);
1158:       for (i=0; i<redund->nrecvs; i++) {
1159:         PetscFree(redund->rbuf_j[i]);
1160:         PetscFree(redund->rbuf_a[i]);
1161:       }
1162:       PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1163:     }

1165:     if (redund->subcomm) {
1166:       PetscCommDestroy(&redund->subcomm);
1167:     }
1168:     PetscFree(redund);
1169:   }
1170:   return(0);
1171: }

1173: /*@
1174:    MatDestroy - Frees space taken by a matrix.

1176:    Collective on Mat

1178:    Input Parameter:
1179: .  A - the matrix

1181:    Level: beginner

1183: @*/
1184: PetscErrorCode MatDestroy(Mat *A)
1185: {

1189:   if (!*A) return(0);
1191:   if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}

1193:   /* if memory was published with SAWs then destroy it */
1194:   PetscObjectSAWsViewOff((PetscObject)*A);
1195:   if ((*A)->ops->destroy) {
1196:     (*(*A)->ops->destroy)(*A);
1197:   }

1199:   PetscFree((*A)->solvertype);
1200:   MatDestroy_Redundant(&(*A)->redundant);
1201:   MatNullSpaceDestroy(&(*A)->nullsp);
1202:   MatNullSpaceDestroy(&(*A)->transnullsp);
1203:   MatNullSpaceDestroy(&(*A)->nearnullsp);
1204:   MatDestroy(&(*A)->schur);
1205:   PetscLayoutDestroy(&(*A)->rmap);
1206:   PetscLayoutDestroy(&(*A)->cmap);
1207:   PetscHeaderDestroy(A);
1208:   return(0);
1209: }

1211: /*@C
1212:    MatSetValues - Inserts or adds a block of values into a matrix.
1213:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1214:    MUST be called after all calls to MatSetValues() have been completed.

1216:    Not Collective

1218:    Input Parameters:
1219: +  mat - the matrix
1220: .  v - a logically two-dimensional array of values
1221: .  m, idxm - the number of rows and their global indices
1222: .  n, idxn - the number of columns and their global indices
1223: -  addv - either ADD_VALUES or INSERT_VALUES, where
1224:    ADD_VALUES adds values to any existing entries, and
1225:    INSERT_VALUES replaces existing entries with new values

1227:    Notes:
1228:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1229:       MatSetUp() before using this routine

1231:    By default the values, v, are row-oriented. See MatSetOption() for other options.

1233:    Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1234:    options cannot be mixed without intervening calls to the assembly
1235:    routines.

1237:    MatSetValues() uses 0-based row and column numbers in Fortran
1238:    as well as in C.

1240:    Negative indices may be passed in idxm and idxn, these rows and columns are
1241:    simply ignored. This allows easily inserting element stiffness matrices
1242:    with homogeneous Dirchlet boundary conditions that you don't want represented
1243:    in the matrix.

1245:    Efficiency Alert:
1246:    The routine MatSetValuesBlocked() may offer much better efficiency
1247:    for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).

1249:    Level: beginner

1251:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
1252:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

1254:    Concepts: matrices^putting entries in

1256: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1257:           InsertMode, INSERT_VALUES, ADD_VALUES
1258: @*/
1259: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1260: {
1262: #if defined(PETSC_USE_DEBUG)
1263:   PetscInt       i,j;
1264: #endif

1269:   if (!m || !n) return(0); /* no values to insert */
1273:   MatCheckPreallocated(mat,1);
1274:   if (mat->insertmode == NOT_SET_VALUES) {
1275:     mat->insertmode = addv;
1276:   }
1277: #if defined(PETSC_USE_DEBUG)
1278:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1279:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1280:   if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);

1282:   for (i=0; i<m; i++) {
1283:     for (j=0; j<n; j++) {
1284:       if (mat->erroriffailure && PetscIsInfOrNanScalar(v[i*n+j]))
1285: #if defined(PETSC_USE_COMPLEX)
1286:         SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1287: #else
1288:         SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1289: #endif
1290:     }
1291:   }
1292: #endif

1294:   if (mat->assembled) {
1295:     mat->was_assembled = PETSC_TRUE;
1296:     mat->assembled     = PETSC_FALSE;
1297:   }
1298:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1299:   (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1300:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1301: #if defined(PETSC_HAVE_CUSP)
1302:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1303:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1304:   }
1305: #elif defined(PETSC_HAVE_VIENNACL)
1306:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1307:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1308:   }
1309: #elif defined(PETSC_HAVE_VECCUDA)
1310:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1311:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1312:   }
1313: #endif
1314:   return(0);
1315: }


1318: /*@
1319:    MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1320:         values into a matrix

1322:    Not Collective

1324:    Input Parameters:
1325: +  mat - the matrix
1326: .  row - the (block) row to set
1327: -  v - a logically two-dimensional array of values

1329:    Notes:
1330:    By the values, v, are column-oriented (for the block version) and sorted

1332:    All the nonzeros in the row must be provided

1334:    The matrix must have previously had its column indices set

1336:    The row must belong to this process

1338:    Level: intermediate

1340:    Concepts: matrices^putting entries in

1342: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1343:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1344: @*/
1345: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1346: {
1348:   PetscInt       globalrow;

1354:   ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1355:   MatSetValuesRow(mat,globalrow,v);
1356: #if defined(PETSC_HAVE_CUSP)
1357:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1358:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1359:   }
1360: #elif defined(PETSC_HAVE_VIENNACL)
1361:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1362:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1363:   }
1364: #elif defined(PETSC_HAVE_VECCUDA)
1365:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1366:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1367:   }
1368: #endif
1369:   return(0);
1370: }

1372: /*@
1373:    MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1374:         values into a matrix

1376:    Not Collective

1378:    Input Parameters:
1379: +  mat - the matrix
1380: .  row - the (block) row to set
1381: -  v - a logically two-dimensional (column major) array of values for  block matrices with blocksize larger than one, otherwise a one dimensional array of values

1383:    Notes:
1384:    The values, v, are column-oriented for the block version.

1386:    All the nonzeros in the row must be provided

1388:    THE MATRIX MUST HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.

1390:    The row must belong to this process

1392:    Level: advanced

1394:    Concepts: matrices^putting entries in

1396: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1397:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1398: @*/
1399: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1400: {

1406:   MatCheckPreallocated(mat,1);
1408: #if defined(PETSC_USE_DEBUG)
1409:   if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1410:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1411: #endif
1412:   mat->insertmode = INSERT_VALUES;

1414:   if (mat->assembled) {
1415:     mat->was_assembled = PETSC_TRUE;
1416:     mat->assembled     = PETSC_FALSE;
1417:   }
1418:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1419:   if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1420:   (*mat->ops->setvaluesrow)(mat,row,v);
1421:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1422: #if defined(PETSC_HAVE_CUSP)
1423:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1424:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1425:   }
1426: #elif defined(PETSC_HAVE_VIENNACL)
1427:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1428:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1429:   }
1430: #elif defined(PETSC_HAVE_VECCUDA)
1431:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1432:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1433:   }
1434: #endif
1435:   return(0);
1436: }

1438: /*@
1439:    MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1440:      Using structured grid indexing

1442:    Not Collective

1444:    Input Parameters:
1445: +  mat - the matrix
1446: .  m - number of rows being entered
1447: .  idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1448: .  n - number of columns being entered
1449: .  idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1450: .  v - a logically two-dimensional array of values
1451: -  addv - either ADD_VALUES or INSERT_VALUES, where
1452:    ADD_VALUES adds values to any existing entries, and
1453:    INSERT_VALUES replaces existing entries with new values

1455:    Notes:
1456:    By default the values, v, are row-oriented.  See MatSetOption() for other options.

1458:    Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1459:    options cannot be mixed without intervening calls to the assembly
1460:    routines.

1462:    The grid coordinates are across the entire grid, not just the local portion

1464:    MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1465:    as well as in C.

1467:    For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine

1469:    In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1470:    or call MatSetLocalToGlobalMapping() and MatSetStencil() first.

1472:    The columns and rows in the stencil passed in MUST be contained within the
1473:    ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1474:    if you create a DMDA with an overlap of one grid level and on a particular process its first
1475:    local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1476:    first i index you can use in your column and row indices in MatSetStencil() is 5.

1478:    In Fortran idxm and idxn should be declared as
1479: $     MatStencil idxm(4,m),idxn(4,n)
1480:    and the values inserted using
1481: $    idxm(MatStencil_i,1) = i
1482: $    idxm(MatStencil_j,1) = j
1483: $    idxm(MatStencil_k,1) = k
1484: $    idxm(MatStencil_c,1) = c
1485:    etc

1487:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1488:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1489:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1490:    DM_BOUNDARY_PERIODIC boundary type.

1492:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1493:    a single value per point) you can skip filling those indices.

1495:    Inspired by the structured grid interface to the HYPRE package
1496:    (http://www.llnl.gov/CASC/hypre)

1498:    Efficiency Alert:
1499:    The routine MatSetValuesBlockedStencil() may offer much better efficiency
1500:    for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).

1502:    Level: beginner

1504:    Concepts: matrices^putting entries in

1506: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1507:           MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1508: @*/
1509: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1510: {
1512:   PetscInt       buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1513:   PetscInt       j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1514:   PetscInt       *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);

1517:   if (!m || !n) return(0); /* no values to insert */

1524:   if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1525:     jdxm = buf; jdxn = buf+m;
1526:   } else {
1527:     PetscMalloc2(m,&bufm,n,&bufn);
1528:     jdxm = bufm; jdxn = bufn;
1529:   }
1530:   for (i=0; i<m; i++) {
1531:     for (j=0; j<3-sdim; j++) dxm++;
1532:     tmp = *dxm++ - starts[0];
1533:     for (j=0; j<dim-1; j++) {
1534:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1535:       else                                       tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1536:     }
1537:     if (mat->stencil.noc) dxm++;
1538:     jdxm[i] = tmp;
1539:   }
1540:   for (i=0; i<n; i++) {
1541:     for (j=0; j<3-sdim; j++) dxn++;
1542:     tmp = *dxn++ - starts[0];
1543:     for (j=0; j<dim-1; j++) {
1544:       if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1545:       else                                       tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1546:     }
1547:     if (mat->stencil.noc) dxn++;
1548:     jdxn[i] = tmp;
1549:   }
1550:   MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1551:   PetscFree2(bufm,bufn);
1552:   return(0);
1553: }

1555: /*@
1556:    MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1557:      Using structured grid indexing

1559:    Not Collective

1561:    Input Parameters:
1562: +  mat - the matrix
1563: .  m - number of rows being entered
1564: .  idxm - grid coordinates for matrix rows being entered
1565: .  n - number of columns being entered
1566: .  idxn - grid coordinates for matrix columns being entered
1567: .  v - a logically two-dimensional array of values
1568: -  addv - either ADD_VALUES or INSERT_VALUES, where
1569:    ADD_VALUES adds values to any existing entries, and
1570:    INSERT_VALUES replaces existing entries with new values

1572:    Notes:
1573:    By default the values, v, are row-oriented and unsorted.
1574:    See MatSetOption() for other options.

1576:    Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1577:    options cannot be mixed without intervening calls to the assembly
1578:    routines.

1580:    The grid coordinates are across the entire grid, not just the local portion

1582:    MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1583:    as well as in C.

1585:    For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine

1587:    In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1588:    or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.

1590:    The columns and rows in the stencil passed in MUST be contained within the
1591:    ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1592:    if you create a DMDA with an overlap of one grid level and on a particular process its first
1593:    local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1594:    first i index you can use in your column and row indices in MatSetStencil() is 5.

1596:    In Fortran idxm and idxn should be declared as
1597: $     MatStencil idxm(4,m),idxn(4,n)
1598:    and the values inserted using
1599: $    idxm(MatStencil_i,1) = i
1600: $    idxm(MatStencil_j,1) = j
1601: $    idxm(MatStencil_k,1) = k
1602:    etc

1604:    Negative indices may be passed in idxm and idxn, these rows and columns are
1605:    simply ignored. This allows easily inserting element stiffness matrices
1606:    with homogeneous Dirchlet boundary conditions that you don't want represented
1607:    in the matrix.

1609:    Inspired by the structured grid interface to the HYPRE package
1610:    (http://www.llnl.gov/CASC/hypre)

1612:    Level: beginner

1614:    Concepts: matrices^putting entries in

1616: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1617:           MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1618:           MatSetBlockSize(), MatSetLocalToGlobalMapping()
1619: @*/
1620: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1621: {
1623:   PetscInt       buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1624:   PetscInt       j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1625:   PetscInt       *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);

1628:   if (!m || !n) return(0); /* no values to insert */

1635:   if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1636:     jdxm = buf; jdxn = buf+m;
1637:   } else {
1638:     PetscMalloc2(m,&bufm,n,&bufn);
1639:     jdxm = bufm; jdxn = bufn;
1640:   }
1641:   for (i=0; i<m; i++) {
1642:     for (j=0; j<3-sdim; j++) dxm++;
1643:     tmp = *dxm++ - starts[0];
1644:     for (j=0; j<sdim-1; j++) {
1645:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1646:       else                                       tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1647:     }
1648:     dxm++;
1649:     jdxm[i] = tmp;
1650:   }
1651:   for (i=0; i<n; i++) {
1652:     for (j=0; j<3-sdim; j++) dxn++;
1653:     tmp = *dxn++ - starts[0];
1654:     for (j=0; j<sdim-1; j++) {
1655:       if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1656:       else                                       tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1657:     }
1658:     dxn++;
1659:     jdxn[i] = tmp;
1660:   }
1661:   MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1662:   PetscFree2(bufm,bufn);
1663: #if defined(PETSC_HAVE_CUSP)
1664:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1665:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1666:   }
1667: #elif defined(PETSC_HAVE_VIENNACL)
1668:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1669:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1670:   }
1671: #elif defined(PETSC_HAVE_VECCUDA)
1672:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1673:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1674:   }
1675: #endif
1676:   return(0);
1677: }

1679: /*@
1680:    MatSetStencil - Sets the grid information for setting values into a matrix via
1681:         MatSetValuesStencil()

1683:    Not Collective

1685:    Input Parameters:
1686: +  mat - the matrix
1687: .  dim - dimension of the grid 1, 2, or 3
1688: .  dims - number of grid points in x, y, and z direction, including ghost points on your processor
1689: .  starts - starting point of ghost nodes on your processor in x, y, and z direction
1690: -  dof - number of degrees of freedom per node


1693:    Inspired by the structured grid interface to the HYPRE package
1694:    (www.llnl.gov/CASC/hyper)

1696:    For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1697:    user.

1699:    Level: beginner

1701:    Concepts: matrices^putting entries in

1703: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1704:           MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1705: @*/
1706: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1707: {
1708:   PetscInt i;


1715:   mat->stencil.dim = dim + (dof > 1);
1716:   for (i=0; i<dim; i++) {
1717:     mat->stencil.dims[i]   = dims[dim-i-1];      /* copy the values in backwards */
1718:     mat->stencil.starts[i] = starts[dim-i-1];
1719:   }
1720:   mat->stencil.dims[dim]   = dof;
1721:   mat->stencil.starts[dim] = 0;
1722:   mat->stencil.noc         = (PetscBool)(dof == 1);
1723:   return(0);
1724: }

1726: /*@C
1727:    MatSetValuesBlocked - Inserts or adds a block of values into a matrix.

1729:    Not Collective

1731:    Input Parameters:
1732: +  mat - the matrix
1733: .  v - a logically two-dimensional array of values
1734: .  m, idxm - the number of block rows and their global block indices
1735: .  n, idxn - the number of block columns and their global block indices
1736: -  addv - either ADD_VALUES or INSERT_VALUES, where
1737:    ADD_VALUES adds values to any existing entries, and
1738:    INSERT_VALUES replaces existing entries with new values

1740:    Notes:
1741:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1742:    MatXXXXSetPreallocation() or MatSetUp() before using this routine.

1744:    The m and n count the NUMBER of blocks in the row direction and column direction,
1745:    NOT the total number of rows/columns; for example, if the block size is 2 and
1746:    you are passing in values for rows 2,3,4,5  then m would be 2 (not 4).
1747:    The values in idxm would be 1 2; that is the first index for each block divided by
1748:    the block size.

1750:    Note that you must call MatSetBlockSize() when constructing this matrix (before
1751:    preallocating it).

1753:    By default the values, v, are row-oriented, so the layout of
1754:    v is the same as for MatSetValues(). See MatSetOption() for other options.

1756:    Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1757:    options cannot be mixed without intervening calls to the assembly
1758:    routines.

1760:    MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1761:    as well as in C.

1763:    Negative indices may be passed in idxm and idxn, these rows and columns are
1764:    simply ignored. This allows easily inserting element stiffness matrices
1765:    with homogeneous Dirchlet boundary conditions that you don't want represented
1766:    in the matrix.

1768:    Each time an entry is set within a sparse matrix via MatSetValues(),
1769:    internal searching must be done to determine where to place the
1770:    data in the matrix storage space.  By instead inserting blocks of
1771:    entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1772:    reduced.

1774:    Example:
1775: $   Suppose m=n=2 and block size(bs) = 2 The array is
1776: $
1777: $   1  2  | 3  4
1778: $   5  6  | 7  8
1779: $   - - - | - - -
1780: $   9  10 | 11 12
1781: $   13 14 | 15 16
1782: $
1783: $   v[] should be passed in like
1784: $   v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1785: $
1786: $  If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1787: $   v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]

1789:    Level: intermediate

1791:    Concepts: matrices^putting entries in blocked

1793: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1794: @*/
1795: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1796: {

1802:   if (!m || !n) return(0); /* no values to insert */
1806:   MatCheckPreallocated(mat,1);
1807:   if (mat->insertmode == NOT_SET_VALUES) {
1808:     mat->insertmode = addv;
1809:   }
1810: #if defined(PETSC_USE_DEBUG)
1811:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1812:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1813:   if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1814: #endif

1816:   if (mat->assembled) {
1817:     mat->was_assembled = PETSC_TRUE;
1818:     mat->assembled     = PETSC_FALSE;
1819:   }
1820:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1821:   if (mat->ops->setvaluesblocked) {
1822:     (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1823:   } else {
1824:     PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1825:     PetscInt i,j,bs,cbs;
1826:     MatGetBlockSizes(mat,&bs,&cbs);
1827:     if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1828:       iidxm = buf; iidxn = buf + m*bs;
1829:     } else {
1830:       PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1831:       iidxm = bufr; iidxn = bufc;
1832:     }
1833:     for (i=0; i<m; i++) {
1834:       for (j=0; j<bs; j++) {
1835:         iidxm[i*bs+j] = bs*idxm[i] + j;
1836:       }
1837:     }
1838:     for (i=0; i<n; i++) {
1839:       for (j=0; j<cbs; j++) {
1840:         iidxn[i*cbs+j] = cbs*idxn[i] + j;
1841:       }
1842:     }
1843:     MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1844:     PetscFree2(bufr,bufc);
1845:   }
1846:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1847: #if defined(PETSC_HAVE_CUSP)
1848:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1849:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1850:   }
1851: #elif defined(PETSC_HAVE_VIENNACL)
1852:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1853:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1854:   }
1855: #elif defined(PETSC_HAVE_VECCUDA)
1856:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1857:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1858:   }
1859: #endif
1860:   return(0);
1861: }

1863: /*@
1864:    MatGetValues - Gets a block of values from a matrix.

1866:    Not Collective; currently only returns a local block

1868:    Input Parameters:
1869: +  mat - the matrix
1870: .  v - a logically two-dimensional array for storing the values
1871: .  m, idxm - the number of rows and their global indices
1872: -  n, idxn - the number of columns and their global indices

1874:    Notes:
1875:    The user must allocate space (m*n PetscScalars) for the values, v.
1876:    The values, v, are then returned in a row-oriented format,
1877:    analogous to that used by default in MatSetValues().

1879:    MatGetValues() uses 0-based row and column numbers in
1880:    Fortran as well as in C.

1882:    MatGetValues() requires that the matrix has been assembled
1883:    with MatAssemblyBegin()/MatAssemblyEnd().  Thus, calls to
1884:    MatSetValues() and MatGetValues() CANNOT be made in succession
1885:    without intermediate matrix assembly.

1887:    Negative row or column indices will be ignored and those locations in v[] will be
1888:    left unchanged.

1890:    Level: advanced

1892:    Concepts: matrices^accessing values

1894: .seealso: MatGetRow(), MatCreateSubMatrices(), MatSetValues()
1895: @*/
1896: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1897: {

1903:   if (!m || !n) return(0);
1907:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1908:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1909:   if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1910:   MatCheckPreallocated(mat,1);

1912:   PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1913:   (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1914:   PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1915:   return(0);
1916: }

1918: /*@
1919:   MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1920:   the same size. Currently, this can only be called once and creates the given matrix.

1922:   Not Collective

1924:   Input Parameters:
1925: + mat - the matrix
1926: . nb - the number of blocks
1927: . bs - the number of rows (and columns) in each block
1928: . rows - a concatenation of the rows for each block
1929: - v - a concatenation of logically two-dimensional arrays of values

1931:   Notes:
1932:   In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.

1934:   Level: advanced

1936:   Concepts: matrices^putting entries in

1938: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1939:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1940: @*/
1941: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1942: {

1950: #if defined(PETSC_USE_DEBUG)
1951:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1952: #endif

1954:   PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1955:   if (mat->ops->setvaluesbatch) {
1956:     (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1957:   } else {
1958:     PetscInt b;
1959:     for (b = 0; b < nb; ++b) {
1960:       MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1961:     }
1962:   }
1963:   PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1964:   return(0);
1965: }

1967: /*@
1968:    MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1969:    the routine MatSetValuesLocal() to allow users to insert matrix entries
1970:    using a local (per-processor) numbering.

1972:    Not Collective

1974:    Input Parameters:
1975: +  x - the matrix
1976: .  rmapping - row mapping created with ISLocalToGlobalMappingCreate()   or ISLocalToGlobalMappingCreateIS()
1977: - cmapping - column mapping

1979:    Level: intermediate

1981:    Concepts: matrices^local to global mapping
1982:    Concepts: local to global mapping^for matrices

1984: .seealso:  MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1985: @*/
1986: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1987: {


1996:   if (x->ops->setlocaltoglobalmapping) {
1997:     (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
1998:   } else {
1999:     PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
2000:     PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
2001:   }
2002:   return(0);
2003: }


2006: /*@
2007:    MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()

2009:    Not Collective

2011:    Input Parameters:
2012: .  A - the matrix

2014:    Output Parameters:
2015: + rmapping - row mapping
2016: - cmapping - column mapping

2018:    Level: advanced

2020:    Concepts: matrices^local to global mapping
2021:    Concepts: local to global mapping^for matrices

2023: .seealso:  MatSetValuesLocal()
2024: @*/
2025: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
2026: {
2032:   if (rmapping) *rmapping = A->rmap->mapping;
2033:   if (cmapping) *cmapping = A->cmap->mapping;
2034:   return(0);
2035: }

2037: /*@
2038:    MatGetLayouts - Gets the PetscLayout objects for rows and columns

2040:    Not Collective

2042:    Input Parameters:
2043: .  A - the matrix

2045:    Output Parameters:
2046: + rmap - row layout
2047: - cmap - column layout

2049:    Level: advanced

2051: .seealso:  MatCreateVecs(), MatGetLocalToGlobalMapping()
2052: @*/
2053: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
2054: {
2060:   if (rmap) *rmap = A->rmap;
2061:   if (cmap) *cmap = A->cmap;
2062:   return(0);
2063: }

2065: /*@C
2066:    MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
2067:    using a local ordering of the nodes.

2069:    Not Collective

2071:    Input Parameters:
2072: +  mat - the matrix
2073: .  nrow, irow - number of rows and their local indices
2074: .  ncol, icol - number of columns and their local indices
2075: .  y -  a logically two-dimensional array of values
2076: -  addv - either INSERT_VALUES or ADD_VALUES, where
2077:    ADD_VALUES adds values to any existing entries, and
2078:    INSERT_VALUES replaces existing entries with new values

2080:    Notes:
2081:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2082:       MatSetUp() before using this routine

2084:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine

2086:    Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
2087:    options cannot be mixed without intervening calls to the assembly
2088:    routines.

2090:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2091:    MUST be called after all calls to MatSetValuesLocal() have been completed.

2093:    Level: intermediate

2095:    Concepts: matrices^putting entries in with local numbering

2097:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2098:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

2100: .seealso:  MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
2101:            MatSetValueLocal()
2102: @*/
2103: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2104: {

2110:   MatCheckPreallocated(mat,1);
2111:   if (!nrow || !ncol) return(0); /* no values to insert */
2115:   if (mat->insertmode == NOT_SET_VALUES) {
2116:     mat->insertmode = addv;
2117:   }
2118: #if defined(PETSC_USE_DEBUG)
2119:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2120:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2121:   if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2122: #endif

2124:   if (mat->assembled) {
2125:     mat->was_assembled = PETSC_TRUE;
2126:     mat->assembled     = PETSC_FALSE;
2127:   }
2128:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2129:   if (mat->ops->setvalueslocal) {
2130:     (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2131:   } else {
2132:     PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2133:     if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2134:       irowm = buf; icolm = buf+nrow;
2135:     } else {
2136:       PetscMalloc2(nrow,&bufr,ncol,&bufc);
2137:       irowm = bufr; icolm = bufc;
2138:     }
2139:     ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2140:     ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2141:     MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2142:     PetscFree2(bufr,bufc);
2143:   }
2144:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2145: #if defined(PETSC_HAVE_CUSP)
2146:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2147:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2148:   }
2149: #elif defined(PETSC_HAVE_VIENNACL)
2150:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2151:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2152:   }
2153: #elif defined(PETSC_HAVE_VECCUDA)
2154:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
2155:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
2156:   }
2157: #endif
2158:   return(0);
2159: }

2161: /*@C
2162:    MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2163:    using a local ordering of the nodes a block at a time.

2165:    Not Collective

2167:    Input Parameters:
2168: +  x - the matrix
2169: .  nrow, irow - number of rows and their local indices
2170: .  ncol, icol - number of columns and their local indices
2171: .  y -  a logically two-dimensional array of values
2172: -  addv - either INSERT_VALUES or ADD_VALUES, where
2173:    ADD_VALUES adds values to any existing entries, and
2174:    INSERT_VALUES replaces existing entries with new values

2176:    Notes:
2177:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2178:       MatSetUp() before using this routine

2180:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2181:       before using this routineBefore calling MatSetValuesLocal(), the user must first set the

2183:    Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2184:    options cannot be mixed without intervening calls to the assembly
2185:    routines.

2187:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2188:    MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.

2190:    Level: intermediate

2192:    Developer Notes: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2193:                     because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.

2195:    Concepts: matrices^putting blocked values in with local numbering

2197: .seealso:  MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2198:            MatSetValuesLocal(),  MatSetValuesBlocked()
2199: @*/
2200: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2201: {

2207:   MatCheckPreallocated(mat,1);
2208:   if (!nrow || !ncol) return(0); /* no values to insert */
2212:   if (mat->insertmode == NOT_SET_VALUES) {
2213:     mat->insertmode = addv;
2214:   }
2215: #if defined(PETSC_USE_DEBUG)
2216:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2217:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2218:   if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2219: #endif

2221:   if (mat->assembled) {
2222:     mat->was_assembled = PETSC_TRUE;
2223:     mat->assembled     = PETSC_FALSE;
2224:   }
2225:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2226:   if (mat->ops->setvaluesblockedlocal) {
2227:     (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2228:   } else {
2229:     PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2230:     if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2231:       irowm = buf; icolm = buf + nrow;
2232:     } else {
2233:       PetscMalloc2(nrow,&bufr,ncol,&bufc);
2234:       irowm = bufr; icolm = bufc;
2235:     }
2236:     ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2237:     ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2238:     MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2239:     PetscFree2(bufr,bufc);
2240:   }
2241:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2242: #if defined(PETSC_HAVE_CUSP)
2243:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2244:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2245:   }
2246: #elif defined(PETSC_HAVE_VIENNACL)
2247:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2248:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2249:   }
2250: #elif defined(PETSC_HAVE_VECCUDA)
2251:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
2252:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
2253:   }
2254: #endif
2255:   return(0);
2256: }

2258: /*@
2259:    MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal

2261:    Collective on Mat and Vec

2263:    Input Parameters:
2264: +  mat - the matrix
2265: -  x   - the vector to be multiplied

2267:    Output Parameters:
2268: .  y - the result

2270:    Notes:
2271:    The vectors x and y cannot be the same.  I.e., one cannot
2272:    call MatMult(A,y,y).

2274:    Level: developer

2276:    Concepts: matrix-vector product

2278: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2279: @*/
2280: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2281: {


2290:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2291:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2292:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2293:   MatCheckPreallocated(mat,1);

2295:   if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2296:   (*mat->ops->multdiagonalblock)(mat,x,y);
2297:   PetscObjectStateIncrease((PetscObject)y);
2298:   return(0);
2299: }

2301: /* --------------------------------------------------------*/
2302: /*@
2303:    MatMult - Computes the matrix-vector product, y = Ax.

2305:    Neighbor-wise Collective on Mat and Vec

2307:    Input Parameters:
2308: +  mat - the matrix
2309: -  x   - the vector to be multiplied

2311:    Output Parameters:
2312: .  y - the result

2314:    Notes:
2315:    The vectors x and y cannot be the same.  I.e., one cannot
2316:    call MatMult(A,y,y).

2318:    Level: beginner

2320:    Concepts: matrix-vector product

2322: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2323: @*/
2324: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2325: {

2333:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2334:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2335:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2336: #if !defined(PETSC_HAVE_CONSTRAINTS)
2337:   if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2338:   if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2339:   if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2340: #endif
2341:   VecLocked(y,3);
2342:   if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2343:   MatCheckPreallocated(mat,1);

2345:   VecLockPush(x);
2346:   if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2347:   PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2348:   (*mat->ops->mult)(mat,x,y);
2349:   PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2350:   if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2351:   VecLockPop(x);
2352:   return(0);
2353: }

2355: /*@
2356:    MatMultTranspose - Computes matrix transpose times a vector.

2358:    Neighbor-wise Collective on Mat and Vec

2360:    Input Parameters:
2361: +  mat - the matrix
2362: -  x   - the vector to be multilplied

2364:    Output Parameters:
2365: .  y - the result

2367:    Notes:
2368:    The vectors x and y cannot be the same.  I.e., one cannot
2369:    call MatMultTranspose(A,y,y).

2371:    For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2372:    use MatMultHermitianTranspose()

2374:    Level: beginner

2376:    Concepts: matrix vector product^transpose

2378: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2379: @*/
2380: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2381: {


2390:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2391:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2392:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2393: #if !defined(PETSC_HAVE_CONSTRAINTS)
2394:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2395:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2396: #endif
2397:   if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2398:   MatCheckPreallocated(mat,1);

2400:   if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply tranpose defined");
2401:   PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2402:   VecLockPush(x);
2403:   (*mat->ops->multtranspose)(mat,x,y);
2404:   VecLockPop(x);
2405:   PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2406:   PetscObjectStateIncrease((PetscObject)y);
2407:   if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2408:   return(0);
2409: }

2411: /*@
2412:    MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.

2414:    Neighbor-wise Collective on Mat and Vec

2416:    Input Parameters:
2417: +  mat - the matrix
2418: -  x   - the vector to be multilplied

2420:    Output Parameters:
2421: .  y - the result

2423:    Notes:
2424:    The vectors x and y cannot be the same.  I.e., one cannot
2425:    call MatMultHermitianTranspose(A,y,y).

2427:    Also called the conjugate transpose, complex conjugate transpose, or adjoint.

2429:    For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.

2431:    Level: beginner

2433:    Concepts: matrix vector product^transpose

2435: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2436: @*/
2437: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2438: {
2440:   Vec            w;


2448:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2449:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2450:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2451: #if !defined(PETSC_HAVE_CONSTRAINTS)
2452:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2453:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2454: #endif
2455:   MatCheckPreallocated(mat,1);

2457:   PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2458:   if (mat->ops->multhermitiantranspose) {
2459:     VecLockPush(x);
2460:     (*mat->ops->multhermitiantranspose)(mat,x,y);
2461:     VecLockPop(x);
2462:   } else {
2463:     VecDuplicate(x,&w);
2464:     VecCopy(x,w);
2465:     VecConjugate(w);
2466:     MatMultTranspose(mat,w,y);
2467:     VecDestroy(&w);
2468:     VecConjugate(y);
2469:   }
2470:   PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2471:   PetscObjectStateIncrease((PetscObject)y);
2472:   return(0);
2473: }

2475: /*@
2476:     MatMultAdd -  Computes v3 = v2 + A * v1.

2478:     Neighbor-wise Collective on Mat and Vec

2480:     Input Parameters:
2481: +   mat - the matrix
2482: -   v1, v2 - the vectors

2484:     Output Parameters:
2485: .   v3 - the result

2487:     Notes:
2488:     The vectors v1 and v3 cannot be the same.  I.e., one cannot
2489:     call MatMultAdd(A,v1,v2,v1).

2491:     Level: beginner

2493:     Concepts: matrix vector product^addition

2495: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2496: @*/
2497: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2498: {


2508:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2509:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2510:   if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2511:   /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2512:      if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2513:   if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2514:   if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2515:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2516:   MatCheckPreallocated(mat,1);

2518:   if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2519:   PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2520:   VecLockPush(v1);
2521:   (*mat->ops->multadd)(mat,v1,v2,v3);
2522:   VecLockPop(v1);
2523:   PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2524:   PetscObjectStateIncrease((PetscObject)v3);
2525:   return(0);
2526: }

2528: /*@
2529:    MatMultTransposeAdd - Computes v3 = v2 + A' * v1.

2531:    Neighbor-wise Collective on Mat and Vec

2533:    Input Parameters:
2534: +  mat - the matrix
2535: -  v1, v2 - the vectors

2537:    Output Parameters:
2538: .  v3 - the result

2540:    Notes:
2541:    The vectors v1 and v3 cannot be the same.  I.e., one cannot
2542:    call MatMultTransposeAdd(A,v1,v2,v1).

2544:    Level: beginner

2546:    Concepts: matrix vector product^transpose and addition

2548: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2549: @*/
2550: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2551: {


2561:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2562:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2563:   if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2564:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2565:   if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2566:   if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2567:   if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2568:   MatCheckPreallocated(mat,1);

2570:   PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2571:   VecLockPush(v1);
2572:   (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2573:   VecLockPop(v1);
2574:   PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2575:   PetscObjectStateIncrease((PetscObject)v3);
2576:   return(0);
2577: }

2579: /*@
2580:    MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.

2582:    Neighbor-wise Collective on Mat and Vec

2584:    Input Parameters:
2585: +  mat - the matrix
2586: -  v1, v2 - the vectors

2588:    Output Parameters:
2589: .  v3 - the result

2591:    Notes:
2592:    The vectors v1 and v3 cannot be the same.  I.e., one cannot
2593:    call MatMultHermitianTransposeAdd(A,v1,v2,v1).

2595:    Level: beginner

2597:    Concepts: matrix vector product^transpose and addition

2599: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2600: @*/
2601: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2602: {


2612:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2613:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2614:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2615:   if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2616:   if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2617:   if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2618:   MatCheckPreallocated(mat,1);

2620:   PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2621:   VecLockPush(v1);
2622:   if (mat->ops->multhermitiantransposeadd) {
2623:     (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2624:    } else {
2625:     Vec w,z;
2626:     VecDuplicate(v1,&w);
2627:     VecCopy(v1,w);
2628:     VecConjugate(w);
2629:     VecDuplicate(v3,&z);
2630:     MatMultTranspose(mat,w,z);
2631:     VecDestroy(&w);
2632:     VecConjugate(z);
2633:     VecWAXPY(v3,1.0,v2,z);
2634:     VecDestroy(&z);
2635:   }
2636:   VecLockPop(v1);
2637:   PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2638:   PetscObjectStateIncrease((PetscObject)v3);
2639:   return(0);
2640: }

2642: /*@
2643:    MatMultConstrained - The inner multiplication routine for a
2644:    constrained matrix P^T A P.

2646:    Neighbor-wise Collective on Mat and Vec

2648:    Input Parameters:
2649: +  mat - the matrix
2650: -  x   - the vector to be multilplied

2652:    Output Parameters:
2653: .  y - the result

2655:    Notes:
2656:    The vectors x and y cannot be the same.  I.e., one cannot
2657:    call MatMult(A,y,y).

2659:    Level: beginner

2661: .keywords: matrix, multiply, matrix-vector product, constraint
2662: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2663: @*/
2664: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2665: {

2672:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2673:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2674:   if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2675:   if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2676:   if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2677:   if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);

2679:   PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2680:   VecLockPush(x);
2681:   (*mat->ops->multconstrained)(mat,x,y);
2682:   VecLockPop(x);
2683:   PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2684:   PetscObjectStateIncrease((PetscObject)y);
2685:   return(0);
2686: }

2688: /*@
2689:    MatMultTransposeConstrained - The inner multiplication routine for a
2690:    constrained matrix P^T A^T P.

2692:    Neighbor-wise Collective on Mat and Vec

2694:    Input Parameters:
2695: +  mat - the matrix
2696: -  x   - the vector to be multilplied

2698:    Output Parameters:
2699: .  y - the result

2701:    Notes:
2702:    The vectors x and y cannot be the same.  I.e., one cannot
2703:    call MatMult(A,y,y).

2705:    Level: beginner

2707: .keywords: matrix, multiply, matrix-vector product, constraint
2708: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2709: @*/
2710: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2711: {

2718:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2719:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2720:   if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2721:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2722:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);

2724:   PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2725:   (*mat->ops->multtransposeconstrained)(mat,x,y);
2726:   PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2727:   PetscObjectStateIncrease((PetscObject)y);
2728:   return(0);
2729: }

2731: /*@C
2732:    MatGetFactorType - gets the type of factorization it is

2734:    Note Collective
2735:    as the flag

2737:    Input Parameters:
2738: .  mat - the matrix

2740:    Output Parameters:
2741: .  t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT

2743:     Level: intermediate

2745: .seealso:    MatFactorType, MatGetFactor()
2746: @*/
2747: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2748: {
2752:   *t = mat->factortype;
2753:   return(0);
2754: }

2756: /* ------------------------------------------------------------*/
2757: /*@C
2758:    MatGetInfo - Returns information about matrix storage (number of
2759:    nonzeros, memory, etc.).

2761:    Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag

2763:    Input Parameters:
2764: .  mat - the matrix

2766:    Output Parameters:
2767: +  flag - flag indicating the type of parameters to be returned
2768:    (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2769:    MAT_GLOBAL_SUM - sum over all processors)
2770: -  info - matrix information context

2772:    Notes:
2773:    The MatInfo context contains a variety of matrix data, including
2774:    number of nonzeros allocated and used, number of mallocs during
2775:    matrix assembly, etc.  Additional information for factored matrices
2776:    is provided (such as the fill ratio, number of mallocs during
2777:    factorization, etc.).  Much of this info is printed to PETSC_STDOUT
2778:    when using the runtime options
2779: $       -info -mat_view ::ascii_info

2781:    Example for C/C++ Users:
2782:    See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2783:    data within the MatInfo context.  For example,
2784: .vb
2785:       MatInfo info;
2786:       Mat     A;
2787:       double  mal, nz_a, nz_u;

2789:       MatGetInfo(A,MAT_LOCAL,&info);
2790:       mal  = info.mallocs;
2791:       nz_a = info.nz_allocated;
2792: .ve

2794:    Example for Fortran Users:
2795:    Fortran users should declare info as a double precision
2796:    array of dimension MAT_INFO_SIZE, and then extract the parameters
2797:    of interest.  See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2798:    a complete list of parameter names.
2799: .vb
2800:       double  precision info(MAT_INFO_SIZE)
2801:       double  precision mal, nz_a
2802:       Mat     A
2803:       integer ierr

2805:       call MatGetInfo(A,MAT_LOCAL,info,ierr)
2806:       mal = info(MAT_INFO_MALLOCS)
2807:       nz_a = info(MAT_INFO_NZ_ALLOCATED)
2808: .ve

2810:     Level: intermediate

2812:     Concepts: matrices^getting information on

2814:     Developer Note: fortran interface is not autogenerated as the f90
2815:     interface defintion cannot be generated correctly [due to MatInfo]

2817: .seealso: MatStashGetInfo()

2819: @*/
2820: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2821: {

2828:   if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2829:   MatCheckPreallocated(mat,1);
2830:   (*mat->ops->getinfo)(mat,flag,info);
2831:   return(0);
2832: }

2834: /*
2835:    This is used by external packages where it is not easy to get the info from the actual
2836:    matrix factorization.
2837: */
2838: PetscErrorCode MatGetInfo_External(Mat A,MatInfoType flag,MatInfo *info)
2839: {

2843:   PetscMemzero(info,sizeof(MatInfo));
2844:   return(0);
2845: }

2847: /* ----------------------------------------------------------*/

2849: /*@C
2850:    MatLUFactor - Performs in-place LU factorization of matrix.

2852:    Collective on Mat

2854:    Input Parameters:
2855: +  mat - the matrix
2856: .  row - row permutation
2857: .  col - column permutation
2858: -  info - options for factorization, includes
2859: $          fill - expected fill as ratio of original fill.
2860: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2861: $                   Run with the option -info to determine an optimal value to use

2863:    Notes:
2864:    Most users should employ the simplified KSP interface for linear solvers
2865:    instead of working directly with matrix algebra routines such as this.
2866:    See, e.g., KSPCreate().

2868:    This changes the state of the matrix to a factored matrix; it cannot be used
2869:    for example with MatSetValues() unless one first calls MatSetUnfactored().

2871:    Level: developer

2873:    Concepts: matrices^LU factorization

2875: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2876:           MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()

2878:     Developer Note: fortran interface is not autogenerated as the f90
2879:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2881: @*/
2882: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2883: {
2885:   MatFactorInfo  tinfo;

2893:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2894:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2895:   if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2896:   MatCheckPreallocated(mat,1);
2897:   if (!info) {
2898:     MatFactorInfoInitialize(&tinfo);
2899:     info = &tinfo;
2900:   }

2902:   PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2903:   (*mat->ops->lufactor)(mat,row,col,info);
2904:   PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2905:   PetscObjectStateIncrease((PetscObject)mat);
2906:   return(0);
2907: }

2909: /*@C
2910:    MatILUFactor - Performs in-place ILU factorization of matrix.

2912:    Collective on Mat

2914:    Input Parameters:
2915: +  mat - the matrix
2916: .  row - row permutation
2917: .  col - column permutation
2918: -  info - structure containing
2919: $      levels - number of levels of fill.
2920: $      expected fill - as ratio of original fill.
2921: $      1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2922:                 missing diagonal entries)

2924:    Notes:
2925:    Probably really in-place only when level of fill is zero, otherwise allocates
2926:    new space to store factored matrix and deletes previous memory.

2928:    Most users should employ the simplified KSP interface for linear solvers
2929:    instead of working directly with matrix algebra routines such as this.
2930:    See, e.g., KSPCreate().

2932:    Level: developer

2934:    Concepts: matrices^ILU factorization

2936: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo

2938:     Developer Note: fortran interface is not autogenerated as the f90
2939:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2941: @*/
2942: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2943: {

2952:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2953:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2954:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2955:   if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2956:   MatCheckPreallocated(mat,1);

2958:   PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2959:   (*mat->ops->ilufactor)(mat,row,col,info);
2960:   PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2961:   PetscObjectStateIncrease((PetscObject)mat);
2962:   return(0);
2963: }

2965: /*@C
2966:    MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2967:    Call this routine before calling MatLUFactorNumeric().

2969:    Collective on Mat

2971:    Input Parameters:
2972: +  fact - the factor matrix obtained with MatGetFactor()
2973: .  mat - the matrix
2974: .  row, col - row and column permutations
2975: -  info - options for factorization, includes
2976: $          fill - expected fill as ratio of original fill.
2977: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2978: $                   Run with the option -info to determine an optimal value to use


2981:    Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.

2983:    Most users should employ the simplified KSP interface for linear solvers
2984:    instead of working directly with matrix algebra routines such as this.
2985:    See, e.g., KSPCreate().

2987:    Level: developer

2989:    Concepts: matrices^LU symbolic factorization

2991: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()

2993:     Developer Note: fortran interface is not autogenerated as the f90
2994:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2996: @*/
2997: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2998: {

3008:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3009:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3010:   if (!(fact)->ops->lufactorsymbolic) {
3011:     const MatSolverPackage spackage;
3012:     MatFactorGetSolverPackage(fact,&spackage);
3013:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
3014:   }
3015:   MatCheckPreallocated(mat,2);

3017:   PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
3018:   (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
3019:   PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
3020:   PetscObjectStateIncrease((PetscObject)fact);
3021:   return(0);
3022: }

3024: /*@C
3025:    MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
3026:    Call this routine after first calling MatLUFactorSymbolic().

3028:    Collective on Mat

3030:    Input Parameters:
3031: +  fact - the factor matrix obtained with MatGetFactor()
3032: .  mat - the matrix
3033: -  info - options for factorization

3035:    Notes:
3036:    See MatLUFactor() for in-place factorization.  See
3037:    MatCholeskyFactorNumeric() for the symmetric, positive definite case.

3039:    Most users should employ the simplified KSP interface for linear solvers
3040:    instead of working directly with matrix algebra routines such as this.
3041:    See, e.g., KSPCreate().

3043:    Level: developer

3045:    Concepts: matrices^LU numeric factorization

3047: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()

3049:     Developer Note: fortran interface is not autogenerated as the f90
3050:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3052: @*/
3053: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3054: {

3062:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3063:   if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);

3065:   if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
3066:   MatCheckPreallocated(mat,2);
3067:   PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
3068:   (fact->ops->lufactornumeric)(fact,mat,info);
3069:   PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
3070:   MatViewFromOptions(fact,NULL,"-mat_factor_view");
3071:   PetscObjectStateIncrease((PetscObject)fact);
3072:   return(0);
3073: }

3075: /*@C
3076:    MatCholeskyFactor - Performs in-place Cholesky factorization of a
3077:    symmetric matrix.

3079:    Collective on Mat

3081:    Input Parameters:
3082: +  mat - the matrix
3083: .  perm - row and column permutations
3084: -  f - expected fill as ratio of original fill

3086:    Notes:
3087:    See MatLUFactor() for the nonsymmetric case.  See also
3088:    MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().

3090:    Most users should employ the simplified KSP interface for linear solvers
3091:    instead of working directly with matrix algebra routines such as this.
3092:    See, e.g., KSPCreate().

3094:    Level: developer

3096:    Concepts: matrices^Cholesky factorization

3098: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
3099:           MatGetOrdering()

3101:     Developer Note: fortran interface is not autogenerated as the f90
3102:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3104: @*/
3105: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
3106: {

3114:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3115:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3116:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3117:   if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"In-place factorization for Mat type %s is not supported, try out-of-place factorization. See MatCholeskyFactorSymbolic/Numeric",((PetscObject)mat)->type_name);
3118:   MatCheckPreallocated(mat,1);

3120:   PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3121:   (*mat->ops->choleskyfactor)(mat,perm,info);
3122:   PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3123:   PetscObjectStateIncrease((PetscObject)mat);
3124:   return(0);
3125: }

3127: /*@C
3128:    MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3129:    of a symmetric matrix.

3131:    Collective on Mat

3133:    Input Parameters:
3134: +  fact - the factor matrix obtained with MatGetFactor()
3135: .  mat - the matrix
3136: .  perm - row and column permutations
3137: -  info - options for factorization, includes
3138: $          fill - expected fill as ratio of original fill.
3139: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3140: $                   Run with the option -info to determine an optimal value to use

3142:    Notes:
3143:    See MatLUFactorSymbolic() for the nonsymmetric case.  See also
3144:    MatCholeskyFactor() and MatCholeskyFactorNumeric().

3146:    Most users should employ the simplified KSP interface for linear solvers
3147:    instead of working directly with matrix algebra routines such as this.
3148:    See, e.g., KSPCreate().

3150:    Level: developer

3152:    Concepts: matrices^Cholesky symbolic factorization

3154: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3155:           MatGetOrdering()

3157:     Developer Note: fortran interface is not autogenerated as the f90
3158:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3160: @*/
3161: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3162: {

3171:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3172:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3173:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3174:   if (!(fact)->ops->choleskyfactorsymbolic) {
3175:     const MatSolverPackage spackage;
3176:     MatFactorGetSolverPackage(fact,&spackage);
3177:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3178:   }
3179:   MatCheckPreallocated(mat,2);

3181:   PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3182:   (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3183:   PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3184:   PetscObjectStateIncrease((PetscObject)fact);
3185:   return(0);
3186: }

3188: /*@C
3189:    MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3190:    of a symmetric matrix. Call this routine after first calling
3191:    MatCholeskyFactorSymbolic().

3193:    Collective on Mat

3195:    Input Parameters:
3196: +  fact - the factor matrix obtained with MatGetFactor()
3197: .  mat - the initial matrix
3198: .  info - options for factorization
3199: -  fact - the symbolic factor of mat


3202:    Notes:
3203:    Most users should employ the simplified KSP interface for linear solvers
3204:    instead of working directly with matrix algebra routines such as this.
3205:    See, e.g., KSPCreate().

3207:    Level: developer

3209:    Concepts: matrices^Cholesky numeric factorization

3211: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()

3213:     Developer Note: fortran interface is not autogenerated as the f90
3214:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3216: @*/
3217: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3218: {

3226:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3227:   if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3228:   if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3229:   MatCheckPreallocated(mat,2);

3231:   PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3232:   (fact->ops->choleskyfactornumeric)(fact,mat,info);
3233:   PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3234:   MatViewFromOptions(fact,NULL,"-mat_factor_view");
3235:   PetscObjectStateIncrease((PetscObject)fact);
3236:   return(0);
3237: }

3239: /* ----------------------------------------------------------------*/
3240: /*@
3241:    MatSolve - Solves A x = b, given a factored matrix.

3243:    Neighbor-wise Collective on Mat and Vec

3245:    Input Parameters:
3246: +  mat - the factored matrix
3247: -  b - the right-hand-side vector

3249:    Output Parameter:
3250: .  x - the result vector

3252:    Notes:
3253:    The vectors b and x cannot be the same.  I.e., one cannot
3254:    call MatSolve(A,x,x).

3256:    Notes:
3257:    Most users should employ the simplified KSP interface for linear solvers
3258:    instead of working directly with matrix algebra routines such as this.
3259:    See, e.g., KSPCreate().

3261:    Level: developer

3263:    Concepts: matrices^triangular solves

3265: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3266: @*/
3267: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3268: {

3278:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3279:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3280:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3281:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3282:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3283:   if (!mat->rmap->N && !mat->cmap->N) return(0);
3284:   if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3285:   MatCheckPreallocated(mat,1);

3287:   PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3288:   if (mat->factorerrortype) {
3289:     PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3290:     VecSetInf(x);
3291:   } else {
3292:     (*mat->ops->solve)(mat,b,x);
3293:   }
3294:   PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3295:   PetscObjectStateIncrease((PetscObject)x);
3296:   return(0);
3297: }

3299: static PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X, PetscBool trans)
3300: {
3302:   Vec            b,x;
3303:   PetscInt       m,N,i;
3304:   PetscScalar    *bb,*xx;
3305:   PetscBool      flg;

3308:   PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3309:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3310:   PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3311:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");

3313:   MatDenseGetArray(B,&bb);
3314:   MatDenseGetArray(X,&xx);
3315:   MatGetLocalSize(B,&m,NULL);  /* number local rows */
3316:   MatGetSize(B,NULL,&N);       /* total columns in dense matrix */
3317:   MatCreateVecs(A,&x,&b);
3318:   for (i=0; i<N; i++) {
3319:     VecPlaceArray(b,bb + i*m);
3320:     VecPlaceArray(x,xx + i*m);
3321:     if (trans) {
3322:       MatSolveTranspose(A,b,x);
3323:     } else {
3324:       MatSolve(A,b,x);
3325:     }
3326:     VecResetArray(x);
3327:     VecResetArray(b);
3328:   }
3329:   VecDestroy(&b);
3330:   VecDestroy(&x);
3331:   MatDenseRestoreArray(B,&bb);
3332:   MatDenseRestoreArray(X,&xx);
3333:   return(0);
3334: }

3336: /*@
3337:    MatMatSolve - Solves A X = B, given a factored matrix.

3339:    Neighbor-wise Collective on Mat

3341:    Input Parameters:
3342: +  A - the factored matrix
3343: -  B - the right-hand-side matrix  (dense matrix)

3345:    Output Parameter:
3346: .  X - the result matrix (dense matrix)

3348:    Notes:
3349:    The matrices b and x cannot be the same.  I.e., one cannot
3350:    call MatMatSolve(A,x,x).

3352:    Notes:
3353:    Most users should usually employ the simplified KSP interface for linear solvers
3354:    instead of working directly with matrix algebra routines such as this.
3355:    See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3356:    at a time.

3358:    When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3359:    it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.

3361:    Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.

3363:    Level: developer

3365:    Concepts: matrices^triangular solves

3367: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3368: @*/
3369: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3370: {

3380:   if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3381:   if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3382:   if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3383:   if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3384:   if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3385:   if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3386:   if (!A->rmap->N && !A->cmap->N) return(0);
3387:   MatCheckPreallocated(A,1);

3389:   PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3390:   if (!A->ops->matsolve) {
3391:     PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3392:     MatMatSolve_Basic(A,B,X,PETSC_FALSE);
3393:   } else {
3394:     (*A->ops->matsolve)(A,B,X);
3395:   }
3396:   PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3397:   PetscObjectStateIncrease((PetscObject)X);
3398:   return(0);
3399: }

3401: /*@
3402:    MatMatSolveTranspose - Solves A^T X = B, given a factored matrix.

3404:    Neighbor-wise Collective on Mat

3406:    Input Parameters:
3407: +  A - the factored matrix
3408: -  B - the right-hand-side matrix  (dense matrix)

3410:    Output Parameter:
3411: .  X - the result matrix (dense matrix)

3413:    Notes:
3414:    The matrices b and x cannot be the same.  I.e., one cannot
3415:    call MatMatSolveTranspose(A,x,x).

3417:    Notes:
3418:    Most users should usually employ the simplified KSP interface for linear solvers
3419:    instead of working directly with matrix algebra routines such as this.
3420:    See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3421:    at a time.

3423:    When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3424:    it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.

3426:    Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.

3428:    Level: developer

3430:    Concepts: matrices^triangular solves

3432: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3433: @*/
3434: PetscErrorCode MatMatSolveTranspose(Mat A,Mat B,Mat X)
3435: {

3445:   if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3446:   if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3447:   if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3448:   if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3449:   if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3450:   if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3451:   if (!A->rmap->N && !A->cmap->N) return(0);
3452:   MatCheckPreallocated(A,1);

3454:   PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3455:   if (!A->ops->matsolvetranspose) {
3456:     PetscInfo1(A,"Mat type %s using basic MatMatSolveTranspose\n",((PetscObject)A)->type_name);
3457:     MatMatSolve_Basic(A,B,X,PETSC_TRUE);
3458:   } else {
3459:     (*A->ops->matsolvetranspose)(A,B,X);
3460:   }
3461:   PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3462:   PetscObjectStateIncrease((PetscObject)X);
3463:   return(0);
3464: }

3466: /*@
3467:    MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3468:                             U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,

3470:    Neighbor-wise Collective on Mat and Vec

3472:    Input Parameters:
3473: +  mat - the factored matrix
3474: -  b - the right-hand-side vector

3476:    Output Parameter:
3477: .  x - the result vector

3479:    Notes:
3480:    MatSolve() should be used for most applications, as it performs
3481:    a forward solve followed by a backward solve.

3483:    The vectors b and x cannot be the same,  i.e., one cannot
3484:    call MatForwardSolve(A,x,x).

3486:    For matrix in seqsbaij format with block size larger than 1,
3487:    the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3488:    MatForwardSolve() solves U^T*D y = b, and
3489:    MatBackwardSolve() solves U x = y.
3490:    Thus they do not provide a symmetric preconditioner.

3492:    Most users should employ the simplified KSP interface for linear solvers
3493:    instead of working directly with matrix algebra routines such as this.
3494:    See, e.g., KSPCreate().

3496:    Level: developer

3498:    Concepts: matrices^forward solves

3500: .seealso: MatSolve(), MatBackwardSolve()
3501: @*/
3502: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3503: {

3513:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3514:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3515:   if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3516:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3517:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3518:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3519:   MatCheckPreallocated(mat,1);
3520:   PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3521:   (*mat->ops->forwardsolve)(mat,b,x);
3522:   PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3523:   PetscObjectStateIncrease((PetscObject)x);
3524:   return(0);
3525: }

3527: /*@
3528:    MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3529:                              D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,

3531:    Neighbor-wise Collective on Mat and Vec

3533:    Input Parameters:
3534: +  mat - the factored matrix
3535: -  b - the right-hand-side vector

3537:    Output Parameter:
3538: .  x - the result vector

3540:    Notes:
3541:    MatSolve() should be used for most applications, as it performs
3542:    a forward solve followed by a backward solve.

3544:    The vectors b and x cannot be the same.  I.e., one cannot
3545:    call MatBackwardSolve(A,x,x).

3547:    For matrix in seqsbaij format with block size larger than 1,
3548:    the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3549:    MatForwardSolve() solves U^T*D y = b, and
3550:    MatBackwardSolve() solves U x = y.
3551:    Thus they do not provide a symmetric preconditioner.

3553:    Most users should employ the simplified KSP interface for linear solvers
3554:    instead of working directly with matrix algebra routines such as this.
3555:    See, e.g., KSPCreate().

3557:    Level: developer

3559:    Concepts: matrices^backward solves

3561: .seealso: MatSolve(), MatForwardSolve()
3562: @*/
3563: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3564: {

3574:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3575:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3576:   if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3577:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3578:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3579:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3580:   MatCheckPreallocated(mat,1);

3582:   PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3583:   (*mat->ops->backwardsolve)(mat,b,x);
3584:   PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3585:   PetscObjectStateIncrease((PetscObject)x);
3586:   return(0);
3587: }

3589: /*@
3590:    MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.

3592:    Neighbor-wise Collective on Mat and Vec

3594:    Input Parameters:
3595: +  mat - the factored matrix
3596: .  b - the right-hand-side vector
3597: -  y - the vector to be added to

3599:    Output Parameter:
3600: .  x - the result vector

3602:    Notes:
3603:    The vectors b and x cannot be the same.  I.e., one cannot
3604:    call MatSolveAdd(A,x,y,x).

3606:    Most users should employ the simplified KSP interface for linear solvers
3607:    instead of working directly with matrix algebra routines such as this.
3608:    See, e.g., KSPCreate().

3610:    Level: developer

3612:    Concepts: matrices^triangular solves

3614: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3615: @*/
3616: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3617: {
3618:   PetscScalar    one = 1.0;
3619:   Vec            tmp;

3631:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3632:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3633:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3634:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3635:   if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3636:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3637:   if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3638:   MatCheckPreallocated(mat,1);

3640:   PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3641:   if (mat->ops->solveadd) {
3642:     (*mat->ops->solveadd)(mat,b,y,x);
3643:   } else {
3644:     /* do the solve then the add manually */
3645:     if (x != y) {
3646:       MatSolve(mat,b,x);
3647:       VecAXPY(x,one,y);
3648:     } else {
3649:       VecDuplicate(x,&tmp);
3650:       PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3651:       VecCopy(x,tmp);
3652:       MatSolve(mat,b,x);
3653:       VecAXPY(x,one,tmp);
3654:       VecDestroy(&tmp);
3655:     }
3656:   }
3657:   PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3658:   PetscObjectStateIncrease((PetscObject)x);
3659:   return(0);
3660: }

3662: /*@
3663:    MatSolveTranspose - Solves A' x = b, given a factored matrix.

3665:    Neighbor-wise Collective on Mat and Vec

3667:    Input Parameters:
3668: +  mat - the factored matrix
3669: -  b - the right-hand-side vector

3671:    Output Parameter:
3672: .  x - the result vector

3674:    Notes:
3675:    The vectors b and x cannot be the same.  I.e., one cannot
3676:    call MatSolveTranspose(A,x,x).

3678:    Most users should employ the simplified KSP interface for linear solvers
3679:    instead of working directly with matrix algebra routines such as this.
3680:    See, e.g., KSPCreate().

3682:    Level: developer

3684:    Concepts: matrices^triangular solves

3686: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3687: @*/
3688: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3689: {

3699:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3700:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3701:   if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3702:   if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3703:   if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3704:   MatCheckPreallocated(mat,1);
3705:   PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3706:   if (mat->factorerrortype) {
3707:     PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3708:     VecSetInf(x);
3709:   } else {
3710:     (*mat->ops->solvetranspose)(mat,b,x);
3711:   }
3712:   PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3713:   PetscObjectStateIncrease((PetscObject)x);
3714:   return(0);
3715: }

3717: /*@
3718:    MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3719:                       factored matrix.

3721:    Neighbor-wise Collective on Mat and Vec

3723:    Input Parameters:
3724: +  mat - the factored matrix
3725: .  b - the right-hand-side vector
3726: -  y - the vector to be added to

3728:    Output Parameter:
3729: .  x - the result vector

3731:    Notes:
3732:    The vectors b and x cannot be the same.  I.e., one cannot
3733:    call MatSolveTransposeAdd(A,x,y,x).

3735:    Most users should employ the simplified KSP interface for linear solvers
3736:    instead of working directly with matrix algebra routines such as this.
3737:    See, e.g., KSPCreate().

3739:    Level: developer

3741:    Concepts: matrices^triangular solves

3743: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3744: @*/
3745: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3746: {
3747:   PetscScalar    one = 1.0;
3749:   Vec            tmp;

3760:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3761:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3762:   if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3763:   if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3764:   if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3765:   if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3766:   MatCheckPreallocated(mat,1);

3768:   PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3769:   if (mat->ops->solvetransposeadd) {
3770:     if (mat->factorerrortype) {
3771:       PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3772:       VecSetInf(x);
3773:     } else {
3774:       (*mat->ops->solvetransposeadd)(mat,b,y,x);
3775:     }
3776:   } else {
3777:     /* do the solve then the add manually */
3778:     if (x != y) {
3779:       MatSolveTranspose(mat,b,x);
3780:       VecAXPY(x,one,y);
3781:     } else {
3782:       VecDuplicate(x,&tmp);
3783:       PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3784:       VecCopy(x,tmp);
3785:       MatSolveTranspose(mat,b,x);
3786:       VecAXPY(x,one,tmp);
3787:       VecDestroy(&tmp);
3788:     }
3789:   }
3790:   PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3791:   PetscObjectStateIncrease((PetscObject)x);
3792:   return(0);
3793: }
3794: /* ----------------------------------------------------------------*/

3796: /*@
3797:    MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.

3799:    Neighbor-wise Collective on Mat and Vec

3801:    Input Parameters:
3802: +  mat - the matrix
3803: .  b - the right hand side
3804: .  omega - the relaxation factor
3805: .  flag - flag indicating the type of SOR (see below)
3806: .  shift -  diagonal shift
3807: .  its - the number of iterations
3808: -  lits - the number of local iterations

3810:    Output Parameters:
3811: .  x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)

3813:    SOR Flags:
3814: .     SOR_FORWARD_SWEEP - forward SOR
3815: .     SOR_BACKWARD_SWEEP - backward SOR
3816: .     SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3817: .     SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3818: .     SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3819: .     SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3820: .     SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3821:          upper/lower triangular part of matrix to
3822:          vector (with omega)
3823: .     SOR_ZERO_INITIAL_GUESS - zero initial guess

3825:    Notes:
3826:    SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3827:    SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3828:    on each processor.

3830:    Application programmers will not generally use MatSOR() directly,
3831:    but instead will employ the KSP/PC interface.

3833:    Notes: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing

3835:    Notes for Advanced Users:
3836:    The flags are implemented as bitwise inclusive or operations.
3837:    For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3838:    to specify a zero initial guess for SSOR.

3840:    Most users should employ the simplified KSP interface for linear solvers
3841:    instead of working directly with matrix algebra routines such as this.
3842:    See, e.g., KSPCreate().

3844:    Vectors x and b CANNOT be the same

3846:    Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes

3848:    Level: developer

3850:    Concepts: matrices^relaxation
3851:    Concepts: matrices^SOR
3852:    Concepts: matrices^Gauss-Seidel

3854: @*/
3855: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3856: {

3866:   if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3867:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3868:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3869:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3870:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3871:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3872:   if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3873:   if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3874:   if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");

3876:   MatCheckPreallocated(mat,1);
3877:   PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3878:   ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3879:   PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3880:   PetscObjectStateIncrease((PetscObject)x);
3881:   return(0);
3882: }

3884: /*
3885:       Default matrix copy routine.
3886: */
3887: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3888: {
3889:   PetscErrorCode    ierr;
3890:   PetscInt          i,rstart = 0,rend = 0,nz;
3891:   const PetscInt    *cwork;
3892:   const PetscScalar *vwork;

3895:   if (B->assembled) {
3896:     MatZeroEntries(B);
3897:   }
3898:   MatGetOwnershipRange(A,&rstart,&rend);
3899:   for (i=rstart; i<rend; i++) {
3900:     MatGetRow(A,i,&nz,&cwork,&vwork);
3901:     MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3902:     MatRestoreRow(A,i,&nz,&cwork,&vwork);
3903:   }
3904:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3905:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3906:   return(0);
3907: }

3909: /*@
3910:    MatCopy - Copys a matrix to another matrix.

3912:    Collective on Mat

3914:    Input Parameters:
3915: +  A - the matrix
3916: -  str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN

3918:    Output Parameter:
3919: .  B - where the copy is put

3921:    Notes:
3922:    If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3923:    same nonzero pattern or the routine will crash.

3925:    MatCopy() copies the matrix entries of a matrix to another existing
3926:    matrix (after first zeroing the second matrix).  A related routine is
3927:    MatConvert(), which first creates a new matrix and then copies the data.

3929:    Level: intermediate

3931:    Concepts: matrices^copying

3933: .seealso: MatConvert(), MatDuplicate()

3935: @*/
3936: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3937: {
3939:   PetscInt       i;

3947:   MatCheckPreallocated(B,2);
3948:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3949:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3950:   if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
3951:   MatCheckPreallocated(A,1);
3952:   if (A == B) return(0);

3954:   PetscLogEventBegin(MAT_Copy,A,B,0,0);
3955:   if (A->ops->copy) {
3956:     (*A->ops->copy)(A,B,str);
3957:   } else { /* generic conversion */
3958:     MatCopy_Basic(A,B,str);
3959:   }

3961:   B->stencil.dim = A->stencil.dim;
3962:   B->stencil.noc = A->stencil.noc;
3963:   for (i=0; i<=A->stencil.dim; i++) {
3964:     B->stencil.dims[i]   = A->stencil.dims[i];
3965:     B->stencil.starts[i] = A->stencil.starts[i];
3966:   }

3968:   PetscLogEventEnd(MAT_Copy,A,B,0,0);
3969:   PetscObjectStateIncrease((PetscObject)B);
3970:   return(0);
3971: }

3973: /*@C
3974:    MatConvert - Converts a matrix to another matrix, either of the same
3975:    or different type.

3977:    Collective on Mat

3979:    Input Parameters:
3980: +  mat - the matrix
3981: .  newtype - new matrix type.  Use MATSAME to create a new matrix of the
3982:    same type as the original matrix.
3983: -  reuse - denotes if the destination matrix is to be created or reused.
3984:    Use MAT_INPLACE_MATRIX for inplace conversion (that is when you want the input mat to be changed to contain the matrix in the new format), otherwise use
3985:    MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX (can only be used after the first call was made with MAT_INITIAL_MATRIX, causes the matrix space in M to be reused).

3987:    Output Parameter:
3988: .  M - pointer to place new matrix

3990:    Notes:
3991:    MatConvert() first creates a new matrix and then copies the data from
3992:    the first matrix.  A related routine is MatCopy(), which copies the matrix
3993:    entries of one matrix to another already existing matrix context.

3995:    Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
3996:    the MPI communicator of the generated matrix is always the same as the communicator
3997:    of the input matrix.

3999:    Level: intermediate

4001:    Concepts: matrices^converting between storage formats

4003: .seealso: MatCopy(), MatDuplicate()
4004: @*/
4005: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
4006: {
4008:   PetscBool      sametype,issame,flg;
4009:   char           convname[256],mtype[256];
4010:   Mat            B;

4016:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4017:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4018:   MatCheckPreallocated(mat,1);
4019:   MatSetOption(mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);

4021:   PetscOptionsGetString(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
4022:   if (flg) {
4023:     newtype = mtype;
4024:   }
4025:   PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
4026:   PetscStrcmp(newtype,"same",&issame);
4027:   if ((reuse == MAT_INPLACE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires same input and output matrix");
4028:   if ((reuse == MAT_REUSE_MATRIX) && (mat == *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX means reuse matrix in final argument, perhaps you mean MAT_INPLACE_MATRIX");

4030:   if ((reuse == MAT_INPLACE_MATRIX) && (issame || sametype)) return(0);

4032:   if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
4033:     (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
4034:   } else {
4035:     PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
4036:     const char     *prefix[3] = {"seq","mpi",""};
4037:     PetscInt       i;
4038:     /*
4039:        Order of precedence:
4040:        1) See if a specialized converter is known to the current matrix.
4041:        2) See if a specialized converter is known to the desired matrix class.
4042:        3) See if a good general converter is registered for the desired class
4043:           (as of 6/27/03 only MATMPIADJ falls into this category).
4044:        4) See if a good general converter is known for the current matrix.
4045:        5) Use a really basic converter.
4046:     */

4048:     /* 1) See if a specialized converter is known to the current matrix and the desired class */
4049:     for (i=0; i<3; i++) {
4050:       PetscStrcpy(convname,"MatConvert_");
4051:       PetscStrcat(convname,((PetscObject)mat)->type_name);
4052:       PetscStrcat(convname,"_");
4053:       PetscStrcat(convname,prefix[i]);
4054:       PetscStrcat(convname,issame ? ((PetscObject)mat)->type_name : newtype);
4055:       PetscStrcat(convname,"_C");
4056:       PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4057:       if (conv) goto foundconv;
4058:     }

4060:     /* 2)  See if a specialized converter is known to the desired matrix class. */
4061:     MatCreate(PetscObjectComm((PetscObject)mat),&B);
4062:     MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
4063:     MatSetType(B,newtype);
4064:     for (i=0; i<3; i++) {
4065:       PetscStrcpy(convname,"MatConvert_");
4066:       PetscStrcat(convname,((PetscObject)mat)->type_name);
4067:       PetscStrcat(convname,"_");
4068:       PetscStrcat(convname,prefix[i]);
4069:       PetscStrcat(convname,newtype);
4070:       PetscStrcat(convname,"_C");
4071:       PetscObjectQueryFunction((PetscObject)B,convname,&conv);
4072:       if (conv) {
4073:         MatDestroy(&B);
4074:         goto foundconv;
4075:       }
4076:     }

4078:     /* 3) See if a good general converter is registered for the desired class */
4079:     conv = B->ops->convertfrom;
4080:     MatDestroy(&B);
4081:     if (conv) goto foundconv;

4083:     /* 4) See if a good general converter is known for the current matrix */
4084:     if (mat->ops->convert) {
4085:       conv = mat->ops->convert;
4086:     }
4087:     if (conv) goto foundconv;

4089:     /* 5) Use a really basic converter. */
4090:     conv = MatConvert_Basic;

4092: foundconv:
4093:     PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4094:     (*conv)(mat,newtype,reuse,M);
4095:     PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4096:   }
4097:   PetscObjectStateIncrease((PetscObject)*M);

4099:   /* Copy Mat options */
4100:   if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
4101:   if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
4102:   return(0);
4103: }

4105: /*@C
4106:    MatFactorGetSolverPackage - Returns name of the package providing the factorization routines

4108:    Not Collective

4110:    Input Parameter:
4111: .  mat - the matrix, must be a factored matrix

4113:    Output Parameter:
4114: .   type - the string name of the package (do not free this string)

4116:    Notes:
4117:       In Fortran you pass in a empty string and the package name will be copied into it.
4118:     (Make sure the string is long enough)

4120:    Level: intermediate

4122: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4123: @*/
4124: PetscErrorCode MatFactorGetSolverPackage(Mat mat, const MatSolverPackage *type)
4125: {
4126:   PetscErrorCode ierr, (*conv)(Mat,const MatSolverPackage*);

4131:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
4132:   PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverPackage_C",&conv);
4133:   if (!conv) {
4134:     *type = MATSOLVERPETSC;
4135:   } else {
4136:     (*conv)(mat,type);
4137:   }
4138:   return(0);
4139: }

4141: typedef struct _MatSolverPackageForSpecifcType* MatSolverPackageForSpecifcType;
4142: struct _MatSolverPackageForSpecifcType {
4143:   MatType                        mtype;
4144:   PetscErrorCode                 (*getfactor[4])(Mat,MatFactorType,Mat*);
4145:   MatSolverPackageForSpecifcType next;
4146: };

4148: typedef struct _MatSolverPackageHolder* MatSolverPackageHolder;
4149: struct _MatSolverPackageHolder {
4150:   char                           *name;
4151:   MatSolverPackageForSpecifcType handlers;
4152:   MatSolverPackageHolder         next;
4153: };

4155: static MatSolverPackageHolder MatSolverPackageHolders = NULL;

4157: /*@C
4158:    MatSolvePackageRegister - Registers a MatSolverPackage that works for a particular matrix type

4160:    Input Parameters:
4161: +    package - name of the package, for example petsc or superlu
4162: .    mtype - the matrix type that works with this package
4163: .    ftype - the type of factorization supported by the package
4164: -    getfactor - routine that will create the factored matrix ready to be used

4166:     Level: intermediate

4168: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4169: @*/
4170: PetscErrorCode MatSolverPackageRegister(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
4171: {
4172:   PetscErrorCode                 ierr;
4173:   MatSolverPackageHolder         next = MatSolverPackageHolders,prev;
4174:   PetscBool                      flg;
4175:   MatSolverPackageForSpecifcType inext,iprev = NULL;

4178:   if (!next) {
4179:     PetscNew(&MatSolverPackageHolders);
4180:     PetscStrallocpy(package,&MatSolverPackageHolders->name);
4181:     PetscNew(&MatSolverPackageHolders->handlers);
4182:     PetscStrallocpy(mtype,(char **)&MatSolverPackageHolders->handlers->mtype);
4183:     MatSolverPackageHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4184:     return(0);
4185:   }
4186:   while (next) {
4187:     PetscStrcasecmp(package,next->name,&flg);
4188:     if (flg) {
4189:       if (!next->handlers) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MatSolverPackageHolder is missing handlers");
4190:       inext = next->handlers;
4191:       while (inext) {
4192:         PetscStrcasecmp(mtype,inext->mtype,&flg);
4193:         if (flg) {
4194:           inext->getfactor[(int)ftype-1] = getfactor;
4195:           return(0);
4196:         }
4197:         iprev = inext;
4198:         inext = inext->next;
4199:       }
4200:       PetscNew(&iprev->next);
4201:       PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4202:       iprev->next->getfactor[(int)ftype-1] = getfactor;
4203:       return(0);
4204:     }
4205:     prev = next;
4206:     next = next->next;
4207:   }
4208:   PetscNew(&prev->next);
4209:   PetscStrallocpy(package,&prev->next->name);
4210:   PetscNew(&prev->next->handlers);
4211:   PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4212:   prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4213:   return(0);
4214: }

4216: /*@C
4217:    MatSolvePackageGet - Get's the function that creates the factor matrix if it exist

4219:    Input Parameters:
4220: +    package - name of the package, for example petsc or superlu
4221: .    ftype - the type of factorization supported by the package
4222: -    mtype - the matrix type that works with this package

4224:    Output Parameters:
4225: +   foundpackage - PETSC_TRUE if the package was registered
4226: .   foundmtype - PETSC_TRUE if the package supports the requested mtype
4227: -   getfactor - routine that will create the factored matrix ready to be used or NULL if not found

4229:     Level: intermediate

4231: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4232: @*/
4233: PetscErrorCode MatSolverPackageGet(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4234: {
4235:   PetscErrorCode                 ierr;
4236:   MatSolverPackageHolder         next = MatSolverPackageHolders;
4237:   PetscBool                      flg;
4238:   MatSolverPackageForSpecifcType inext;

4241:   if (foundpackage) *foundpackage = PETSC_FALSE;
4242:   if (foundmtype)   *foundmtype   = PETSC_FALSE;
4243:   if (getfactor)    *getfactor    = NULL;

4245:   if (package) {
4246:     while (next) {
4247:       PetscStrcasecmp(package,next->name,&flg);
4248:       if (flg) {
4249:         if (foundpackage) *foundpackage = PETSC_TRUE;
4250:         inext = next->handlers;
4251:         while (inext) {
4252:           PetscStrbeginswith(mtype,inext->mtype,&flg);
4253:           if (flg) {
4254:             if (foundmtype) *foundmtype = PETSC_TRUE;
4255:             if (getfactor)  *getfactor  = inext->getfactor[(int)ftype-1];
4256:             return(0);
4257:           }
4258:           inext = inext->next;
4259:         }
4260:       }
4261:       next = next->next;
4262:     }
4263:   } else {
4264:     while (next) {
4265:       inext = next->handlers;
4266:       while (inext) {
4267:         PetscStrbeginswith(mtype,inext->mtype,&flg);
4268:         if (flg && inext->getfactor[(int)ftype-1]) {
4269:           if (foundpackage) *foundpackage = PETSC_TRUE;
4270:           if (foundmtype)   *foundmtype   = PETSC_TRUE;
4271:           if (getfactor)    *getfactor    = inext->getfactor[(int)ftype-1];
4272:           return(0);
4273:         }
4274:         inext = inext->next;
4275:       }
4276:       next = next->next;
4277:     }
4278:   }
4279:   return(0);
4280: }

4282: PetscErrorCode MatSolverPackageDestroy(void)
4283: {
4284:   PetscErrorCode                 ierr;
4285:   MatSolverPackageHolder         next = MatSolverPackageHolders,prev;
4286:   MatSolverPackageForSpecifcType inext,iprev;

4289:   while (next) {
4290:     PetscFree(next->name);
4291:     inext = next->handlers;
4292:     while (inext) {
4293:       PetscFree(inext->mtype);
4294:       iprev = inext;
4295:       inext = inext->next;
4296:       PetscFree(iprev);
4297:     }
4298:     prev = next;
4299:     next = next->next;
4300:     PetscFree(prev);
4301:   }
4302:   MatSolverPackageHolders = NULL;
4303:   return(0);
4304: }

4306: /*@C
4307:    MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()

4309:    Collective on Mat

4311:    Input Parameters:
4312: +  mat - the matrix
4313: .  type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4314: -  ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,

4316:    Output Parameters:
4317: .  f - the factor matrix used with MatXXFactorSymbolic() calls

4319:    Notes:
4320:       Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4321:      such as pastix, superlu, mumps etc.

4323:       PETSc must have been ./configure to use the external solver, using the option --download-package

4325:    Level: intermediate

4327: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4328: @*/
4329: PetscErrorCode MatGetFactor(Mat mat, const MatSolverPackage type,MatFactorType ftype,Mat *f)
4330: {
4331:   PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4332:   PetscBool      foundpackage,foundmtype;


4338:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4339:   MatCheckPreallocated(mat,1);

4341:   MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4342:   if (!foundpackage) {
4343:     if (type) {
4344:       SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4345:     } else {
4346:       SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate a solver package. Perhaps you must ./configure with --download-<package>");
4347:     }
4348:   }

4350:   if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4351:   if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support factorization type %s for  matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);

4353:   (*conv)(mat,ftype,f);
4354:   return(0);
4355: }

4357: /*@C
4358:    MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type

4360:    Not Collective

4362:    Input Parameters:
4363: +  mat - the matrix
4364: .  type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4365: -  ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,

4367:    Output Parameter:
4368: .    flg - PETSC_TRUE if the factorization is available

4370:    Notes:
4371:       Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4372:      such as pastix, superlu, mumps etc.

4374:       PETSc must have been ./configure to use the external solver, using the option --download-package

4376:    Level: intermediate

4378: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4379: @*/
4380: PetscErrorCode MatGetFactorAvailable(Mat mat, const MatSolverPackage type,MatFactorType ftype,PetscBool  *flg)
4381: {
4382:   PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);


4388:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4389:   MatCheckPreallocated(mat,1);

4391:   *flg = PETSC_FALSE;
4392:   MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4393:   if (gconv) {
4394:     *flg = PETSC_TRUE;
4395:   }
4396:   return(0);
4397: }

4399:  #include <petscdmtypes.h>

4401: /*@
4402:    MatDuplicate - Duplicates a matrix including the non-zero structure.

4404:    Collective on Mat

4406:    Input Parameters:
4407: +  mat - the matrix
4408: -  op - One of MAT_DO_NOT_COPY_VALUES, MAT_COPY_VALUES, or MAT_SHARE_NONZERO_PATTERN.
4409:         See the manual page for MatDuplicateOption for an explanation of these options.

4411:    Output Parameter:
4412: .  M - pointer to place new matrix

4414:    Level: intermediate

4416:    Concepts: matrices^duplicating

4418:    Notes: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.

4420: .seealso: MatCopy(), MatConvert(), MatDuplicateOption
4421: @*/
4422: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4423: {
4425:   Mat            B;
4426:   PetscInt       i;
4427:   DM             dm;

4433:   if (op == MAT_COPY_VALUES && !mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"MAT_COPY_VALUES not allowed for unassembled matrix");
4434:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4435:   MatCheckPreallocated(mat,1);

4437:   *M = 0;
4438:   if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4439:   PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4440:   (*mat->ops->duplicate)(mat,op,M);
4441:   B    = *M;

4443:   B->stencil.dim = mat->stencil.dim;
4444:   B->stencil.noc = mat->stencil.noc;
4445:   for (i=0; i<=mat->stencil.dim; i++) {
4446:     B->stencil.dims[i]   = mat->stencil.dims[i];
4447:     B->stencil.starts[i] = mat->stencil.starts[i];
4448:   }

4450:   B->nooffproczerorows = mat->nooffproczerorows;
4451:   B->nooffprocentries  = mat->nooffprocentries;

4453:   PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4454:   if (dm) {
4455:     PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4456:   }
4457:   PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4458:   PetscObjectStateIncrease((PetscObject)B);
4459:   return(0);
4460: }

4462: /*@
4463:    MatGetDiagonal - Gets the diagonal of a matrix.

4465:    Logically Collective on Mat and Vec

4467:    Input Parameters:
4468: +  mat - the matrix
4469: -  v - the vector for storing the diagonal

4471:    Output Parameter:
4472: .  v - the diagonal of the matrix

4474:    Level: intermediate

4476:    Note:
4477:    Currently only correct in parallel for square matrices.

4479:    Concepts: matrices^accessing diagonals

4481: .seealso: MatGetRow(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs()
4482: @*/
4483: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4484: {

4491:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4492:   if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4493:   MatCheckPreallocated(mat,1);

4495:   (*mat->ops->getdiagonal)(mat,v);
4496:   PetscObjectStateIncrease((PetscObject)v);
4497:   return(0);
4498: }

4500: /*@C
4501:    MatGetRowMin - Gets the minimum value (of the real part) of each
4502:         row of the matrix

4504:    Logically Collective on Mat and Vec

4506:    Input Parameters:
4507: .  mat - the matrix

4509:    Output Parameter:
4510: +  v - the vector for storing the maximums
4511: -  idx - the indices of the column found for each row (optional)

4513:    Level: intermediate

4515:    Notes: The result of this call are the same as if one converted the matrix to dense format
4516:       and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).

4518:     This code is only implemented for a couple of matrix formats.

4520:    Concepts: matrices^getting row maximums

4522: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(),
4523:           MatGetRowMax()
4524: @*/
4525: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4526: {

4533:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4534:   if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4535:   MatCheckPreallocated(mat,1);

4537:   (*mat->ops->getrowmin)(mat,v,idx);
4538:   PetscObjectStateIncrease((PetscObject)v);
4539:   return(0);
4540: }

4542: /*@C
4543:    MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4544:         row of the matrix

4546:    Logically Collective on Mat and Vec

4548:    Input Parameters:
4549: .  mat - the matrix

4551:    Output Parameter:
4552: +  v - the vector for storing the minimums
4553: -  idx - the indices of the column found for each row (or NULL if not needed)

4555:    Level: intermediate

4557:    Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4558:     row is 0 (the first column).

4560:     This code is only implemented for a couple of matrix formats.

4562:    Concepts: matrices^getting row maximums

4564: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4565: @*/
4566: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4567: {

4574:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4575:   if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4576:   MatCheckPreallocated(mat,1);
4577:   if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}

4579:   (*mat->ops->getrowminabs)(mat,v,idx);
4580:   PetscObjectStateIncrease((PetscObject)v);
4581:   return(0);
4582: }

4584: /*@C
4585:    MatGetRowMax - Gets the maximum value (of the real part) of each
4586:         row of the matrix

4588:    Logically Collective on Mat and Vec

4590:    Input Parameters:
4591: .  mat - the matrix

4593:    Output Parameter:
4594: +  v - the vector for storing the maximums
4595: -  idx - the indices of the column found for each row (optional)

4597:    Level: intermediate

4599:    Notes: The result of this call are the same as if one converted the matrix to dense format
4600:       and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).

4602:     This code is only implemented for a couple of matrix formats.

4604:    Concepts: matrices^getting row maximums

4606: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4607: @*/
4608: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4609: {

4616:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4617:   if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4618:   MatCheckPreallocated(mat,1);

4620:   (*mat->ops->getrowmax)(mat,v,idx);
4621:   PetscObjectStateIncrease((PetscObject)v);
4622:   return(0);
4623: }

4625: /*@C
4626:    MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4627:         row of the matrix

4629:    Logically Collective on Mat and Vec

4631:    Input Parameters:
4632: .  mat - the matrix

4634:    Output Parameter:
4635: +  v - the vector for storing the maximums
4636: -  idx - the indices of the column found for each row (or NULL if not needed)

4638:    Level: intermediate

4640:    Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4641:     row is 0 (the first column).

4643:     This code is only implemented for a couple of matrix formats.

4645:    Concepts: matrices^getting row maximums

4647: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4648: @*/
4649: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4650: {

4657:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4658:   if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4659:   MatCheckPreallocated(mat,1);
4660:   if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}

4662:   (*mat->ops->getrowmaxabs)(mat,v,idx);
4663:   PetscObjectStateIncrease((PetscObject)v);
4664:   return(0);
4665: }

4667: /*@
4668:    MatGetRowSum - Gets the sum of each row of the matrix

4670:    Logically or Neighborhood Collective on Mat and Vec

4672:    Input Parameters:
4673: .  mat - the matrix

4675:    Output Parameter:
4676: .  v - the vector for storing the sum of rows

4678:    Level: intermediate

4680:    Notes: This code is slow since it is not currently specialized for different formats

4682:    Concepts: matrices^getting row sums

4684: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4685: @*/
4686: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4687: {
4688:   Vec            ones;

4695:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4696:   MatCheckPreallocated(mat,1);
4697:   MatCreateVecs(mat,&ones,NULL);
4698:   VecSet(ones,1.);
4699:   MatMult(mat,ones,v);
4700:   VecDestroy(&ones);
4701:   return(0);
4702: }

4704: /*@
4705:    MatTranspose - Computes an in-place or out-of-place transpose of a matrix.

4707:    Collective on Mat

4709:    Input Parameter:
4710: +  mat - the matrix to transpose
4711: -  reuse - either MAT_INITIAL_MATRIX, MAT_REUSE_MATRIX, or MAT_INPLACE_MATRIX

4713:    Output Parameters:
4714: .  B - the transpose

4716:    Notes:
4717:      If you use MAT_INPLACE_MATRIX then you must pass in &mat for B

4719:      MAT_REUSE_MATRIX causes the B matrix from a previous call to this function with MAT_INITIAL_MATRIX to be used

4721:      Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.

4723:    Level: intermediate

4725:    Concepts: matrices^transposing

4727: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4728: @*/
4729: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4730: {

4736:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4737:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4738:   if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4739:   if (reuse == MAT_INPLACE_MATRIX && mat != *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires last matrix to match first");
4740:   if (reuse == MAT_REUSE_MATRIX && mat == *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Perhaps you mean MAT_INPLACE_MATRIX");
4741:   MatCheckPreallocated(mat,1);

4743:   PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4744:   (*mat->ops->transpose)(mat,reuse,B);
4745:   PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4746:   if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4747:   return(0);
4748: }

4750: /*@
4751:    MatIsTranspose - Test whether a matrix is another one's transpose,
4752:         or its own, in which case it tests symmetry.

4754:    Collective on Mat

4756:    Input Parameter:
4757: +  A - the matrix to test
4758: -  B - the matrix to test against, this can equal the first parameter

4760:    Output Parameters:
4761: .  flg - the result

4763:    Notes:
4764:    Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4765:    has a running time of the order of the number of nonzeros; the parallel
4766:    test involves parallel copies of the block-offdiagonal parts of the matrix.

4768:    Level: intermediate

4770:    Concepts: matrices^transposing, matrix^symmetry

4772: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4773: @*/
4774: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool  *flg)
4775: {
4776:   PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);

4782:   PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4783:   PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4784:   *flg = PETSC_FALSE;
4785:   if (f && g) {
4786:     if (f == g) {
4787:       (*f)(A,B,tol,flg);
4788:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4789:   } else {
4790:     MatType mattype;
4791:     if (!f) {
4792:       MatGetType(A,&mattype);
4793:     } else {
4794:       MatGetType(B,&mattype);
4795:     }
4796:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4797:   }
4798:   return(0);
4799: }

4801: /*@
4802:    MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.

4804:    Collective on Mat

4806:    Input Parameter:
4807: +  mat - the matrix to transpose and complex conjugate
4808: -  reuse - MAT_INITIAL_MATRIX to create a new matrix, MAT_INPLACE_MATRIX to reuse the first argument to store the transpose

4810:    Output Parameters:
4811: .  B - the Hermitian

4813:    Level: intermediate

4815:    Concepts: matrices^transposing, complex conjugatex

4817: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4818: @*/
4819: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4820: {

4824:   MatTranspose(mat,reuse,B);
4825: #if defined(PETSC_USE_COMPLEX)
4826:   MatConjugate(*B);
4827: #endif
4828:   return(0);
4829: }

4831: /*@
4832:    MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,

4834:    Collective on Mat

4836:    Input Parameter:
4837: +  A - the matrix to test
4838: -  B - the matrix to test against, this can equal the first parameter

4840:    Output Parameters:
4841: .  flg - the result

4843:    Notes:
4844:    Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4845:    has a running time of the order of the number of nonzeros; the parallel
4846:    test involves parallel copies of the block-offdiagonal parts of the matrix.

4848:    Level: intermediate

4850:    Concepts: matrices^transposing, matrix^symmetry

4852: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4853: @*/
4854: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool  *flg)
4855: {
4856:   PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);

4862:   PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4863:   PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4864:   if (f && g) {
4865:     if (f==g) {
4866:       (*f)(A,B,tol,flg);
4867:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4868:   }
4869:   return(0);
4870: }

4872: /*@
4873:    MatPermute - Creates a new matrix with rows and columns permuted from the
4874:    original.

4876:    Collective on Mat

4878:    Input Parameters:
4879: +  mat - the matrix to permute
4880: .  row - row permutation, each processor supplies only the permutation for its rows
4881: -  col - column permutation, each processor supplies only the permutation for its columns

4883:    Output Parameters:
4884: .  B - the permuted matrix

4886:    Level: advanced

4888:    Note:
4889:    The index sets map from row/col of permuted matrix to row/col of original matrix.
4890:    The index sets should be on the same communicator as Mat and have the same local sizes.

4892:    Concepts: matrices^permuting

4894: .seealso: MatGetOrdering(), ISAllGather()

4896: @*/
4897: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4898: {

4907:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4908:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4909:   if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4910:   MatCheckPreallocated(mat,1);

4912:   (*mat->ops->permute)(mat,row,col,B);
4913:   PetscObjectStateIncrease((PetscObject)*B);
4914:   return(0);
4915: }

4917: /*@
4918:    MatEqual - Compares two matrices.

4920:    Collective on Mat

4922:    Input Parameters:
4923: +  A - the first matrix
4924: -  B - the second matrix

4926:    Output Parameter:
4927: .  flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.

4929:    Level: intermediate

4931:    Concepts: matrices^equality between
4932: @*/
4933: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool  *flg)
4934: {

4944:   MatCheckPreallocated(B,2);
4945:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4946:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4947:   if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4948:   if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
4949:   if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
4950:   if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
4951:   MatCheckPreallocated(A,1);

4953:   (*A->ops->equal)(A,B,flg);
4954:   return(0);
4955: }

4957: /*@C
4958:    MatDiagonalScale - Scales a matrix on the left and right by diagonal
4959:    matrices that are stored as vectors.  Either of the two scaling
4960:    matrices can be NULL.

4962:    Collective on Mat

4964:    Input Parameters:
4965: +  mat - the matrix to be scaled
4966: .  l - the left scaling vector (or NULL)
4967: -  r - the right scaling vector (or NULL)

4969:    Notes:
4970:    MatDiagonalScale() computes A = LAR, where
4971:    L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
4972:    The L scales the rows of the matrix, the R scales the columns of the matrix.

4974:    Level: intermediate

4976:    Concepts: matrices^diagonal scaling
4977:    Concepts: diagonal scaling of matrices

4979: .seealso: MatScale()
4980: @*/
4981: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
4982: {

4988:   if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4991:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4992:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4993:   MatCheckPreallocated(mat,1);

4995:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4996:   (*mat->ops->diagonalscale)(mat,l,r);
4997:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4998:   PetscObjectStateIncrease((PetscObject)mat);
4999: #if defined(PETSC_HAVE_CUSP)
5000:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5001:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5002:   }
5003: #elif defined(PETSC_HAVE_VIENNACL)
5004:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5005:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5006:   }
5007: #elif defined(PETSC_HAVE_VECCUDA)
5008:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5009:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5010:   }
5011: #endif
5012:   return(0);
5013: }

5015: /*@
5016:     MatScale - Scales all elements of a matrix by a given number.

5018:     Logically Collective on Mat

5020:     Input Parameters:
5021: +   mat - the matrix to be scaled
5022: -   a  - the scaling value

5024:     Output Parameter:
5025: .   mat - the scaled matrix

5027:     Level: intermediate

5029:     Concepts: matrices^scaling all entries

5031: .seealso: MatDiagonalScale()
5032: @*/
5033: PetscErrorCode MatScale(Mat mat,PetscScalar a)
5034: {

5040:   if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5041:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5042:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5044:   MatCheckPreallocated(mat,1);

5046:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5047:   if (a != (PetscScalar)1.0) {
5048:     (*mat->ops->scale)(mat,a);
5049:     PetscObjectStateIncrease((PetscObject)mat);
5050: #if defined(PETSC_HAVE_CUSP)
5051:     if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5052:       mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5053:     }
5054: #elif defined(PETSC_HAVE_VIENNACL)
5055:     if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5056:       mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5057:     }
5058: #elif defined(PETSC_HAVE_VECCUDA)
5059:     if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5060:       mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5061:     }
5062: #endif
5063:   }
5064:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5065:   return(0);
5066: }

5068: /*@
5069:    MatNorm - Calculates various norms of a matrix.

5071:    Collective on Mat

5073:    Input Parameters:
5074: +  mat - the matrix
5075: -  type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY

5077:    Output Parameters:
5078: .  nrm - the resulting norm

5080:    Level: intermediate

5082:    Concepts: matrices^norm
5083:    Concepts: norm^of matrix
5084: @*/
5085: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
5086: {


5094:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5095:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5096:   if (!mat->ops->norm) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5097:   MatCheckPreallocated(mat,1);

5099:   (*mat->ops->norm)(mat,type,nrm);
5100:   return(0);
5101: }

5103: /*
5104:      This variable is used to prevent counting of MatAssemblyBegin() that
5105:    are called from within a MatAssemblyEnd().
5106: */
5107: static PetscInt MatAssemblyEnd_InUse = 0;
5108: /*@
5109:    MatAssemblyBegin - Begins assembling the matrix.  This routine should
5110:    be called after completing all calls to MatSetValues().

5112:    Collective on Mat

5114:    Input Parameters:
5115: +  mat - the matrix
5116: -  type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY

5118:    Notes:
5119:    MatSetValues() generally caches the values.  The matrix is ready to
5120:    use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5121:    Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5122:    in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5123:    using the matrix.

5125:    ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
5126:    same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
5127:    a global collective operation requring all processes that share the matrix.

5129:    Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5130:    out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5131:    before MAT_FINAL_ASSEMBLY so the space is not compressed out.

5133:    Level: beginner

5135:    Concepts: matrices^assembling

5137: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
5138: @*/
5139: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
5140: {

5146:   MatCheckPreallocated(mat,1);
5147:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
5148:   if (mat->assembled) {
5149:     mat->was_assembled = PETSC_TRUE;
5150:     mat->assembled     = PETSC_FALSE;
5151:   }
5152:   if (!MatAssemblyEnd_InUse) {
5153:     PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5154:     if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5155:     PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5156:   } else if (mat->ops->assemblybegin) {
5157:     (*mat->ops->assemblybegin)(mat,type);
5158:   }
5159:   return(0);
5160: }

5162: /*@
5163:    MatAssembled - Indicates if a matrix has been assembled and is ready for
5164:      use; for example, in matrix-vector product.

5166:    Not Collective

5168:    Input Parameter:
5169: .  mat - the matrix

5171:    Output Parameter:
5172: .  assembled - PETSC_TRUE or PETSC_FALSE

5174:    Level: advanced

5176:    Concepts: matrices^assembled?

5178: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5179: @*/
5180: PetscErrorCode MatAssembled(Mat mat,PetscBool  *assembled)
5181: {
5186:   *assembled = mat->assembled;
5187:   return(0);
5188: }

5190: /*@
5191:    MatAssemblyEnd - Completes assembling the matrix.  This routine should
5192:    be called after MatAssemblyBegin().

5194:    Collective on Mat

5196:    Input Parameters:
5197: +  mat - the matrix
5198: -  type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY

5200:    Options Database Keys:
5201: +  -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5202: .  -mat_view ::ascii_info_detail - Prints more detailed info
5203: .  -mat_view - Prints matrix in ASCII format
5204: .  -mat_view ::ascii_matlab - Prints matrix in Matlab format
5205: .  -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5206: .  -display <name> - Sets display name (default is host)
5207: .  -draw_pause <sec> - Sets number of seconds to pause after display
5208: .  -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 12 Using MATLAB with PETSc )
5209: .  -viewer_socket_machine <machine> - Machine to use for socket
5210: .  -viewer_socket_port <port> - Port number to use for socket
5211: -  -mat_view binary:filename[:append] - Save matrix to file in binary format

5213:    Notes:
5214:    MatSetValues() generally caches the values.  The matrix is ready to
5215:    use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5216:    Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5217:    in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5218:    using the matrix.

5220:    Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5221:    out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5222:    before MAT_FINAL_ASSEMBLY so the space is not compressed out.

5224:    Level: beginner

5226: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5227: @*/
5228: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5229: {
5230:   PetscErrorCode  ierr;
5231:   static PetscInt inassm = 0;
5232:   PetscBool       flg    = PETSC_FALSE;


5238:   inassm++;
5239:   MatAssemblyEnd_InUse++;
5240:   if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5241:     PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5242:     if (mat->ops->assemblyend) {
5243:       (*mat->ops->assemblyend)(mat,type);
5244:     }
5245:     PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5246:   } else if (mat->ops->assemblyend) {
5247:     (*mat->ops->assemblyend)(mat,type);
5248:   }

5250:   /* Flush assembly is not a true assembly */
5251:   if (type != MAT_FLUSH_ASSEMBLY) {
5252:     mat->assembled = PETSC_TRUE; mat->num_ass++;
5253:   }
5254:   mat->insertmode = NOT_SET_VALUES;
5255:   MatAssemblyEnd_InUse--;
5256:   PetscObjectStateIncrease((PetscObject)mat);
5257:   if (!mat->symmetric_eternal) {
5258:     mat->symmetric_set              = PETSC_FALSE;
5259:     mat->hermitian_set              = PETSC_FALSE;
5260:     mat->structurally_symmetric_set = PETSC_FALSE;
5261:   }
5262: #if defined(PETSC_HAVE_CUSP)
5263:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5264:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5265:   }
5266: #elif defined(PETSC_HAVE_VIENNACL)
5267:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5268:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5269:   }
5270: #elif defined(PETSC_HAVE_VECCUDA)
5271:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5272:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5273:   }
5274: #endif
5275:   if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5276:     MatViewFromOptions(mat,NULL,"-mat_view");

5278:     if (mat->checksymmetryonassembly) {
5279:       MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5280:       if (flg) {
5281:         PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5282:       } else {
5283:         PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5284:       }
5285:     }
5286:     if (mat->nullsp && mat->checknullspaceonassembly) {
5287:       MatNullSpaceTest(mat->nullsp,mat,NULL);
5288:     }
5289:   }
5290:   inassm--;
5291:   return(0);
5292: }

5294: /*@
5295:    MatSetOption - Sets a parameter option for a matrix. Some options
5296:    may be specific to certain storage formats.  Some options
5297:    determine how values will be inserted (or added). Sorted,
5298:    row-oriented input will generally assemble the fastest. The default
5299:    is row-oriented.

5301:    Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption

5303:    Input Parameters:
5304: +  mat - the matrix
5305: .  option - the option, one of those listed below (and possibly others),
5306: -  flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)

5308:   Options Describing Matrix Structure:
5309: +    MAT_SPD - symmetric positive definite
5310: .    MAT_SYMMETRIC - symmetric in terms of both structure and value
5311: .    MAT_HERMITIAN - transpose is the complex conjugation
5312: .    MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5313: -    MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5314:                             you set to be kept with all future use of the matrix
5315:                             including after MatAssemblyBegin/End() which could
5316:                             potentially change the symmetry structure, i.e. you
5317:                             KNOW the matrix will ALWAYS have the property you set.


5320:    Options For Use with MatSetValues():
5321:    Insert a logically dense subblock, which can be
5322: .    MAT_ROW_ORIENTED - row-oriented (default)

5324:    Note these options reflect the data you pass in with MatSetValues(); it has
5325:    nothing to do with how the data is stored internally in the matrix
5326:    data structure.

5328:    When (re)assembling a matrix, we can restrict the input for
5329:    efficiency/debugging purposes.  These options include:
5330: +    MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5331: .    MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5332: .    MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5333: .    MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5334: .    MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5335: .    MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5336:         any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5337:         performance for very large process counts.
5338: -    MAT_SUBSET_OFF_PROC_ENTRIES - you know that the first assembly after setting this flag will set a superset
5339:         of the off-process entries required for all subsequent assemblies. This avoids a rendezvous step in the MatAssembly
5340:         functions, instead sending only neighbor messages.

5342:    Notes:
5343:    Except for MAT_UNUSED_NONZERO_LOCATION_ERR and  MAT_ROW_ORIENTED all processes that share the matrix must pass the same value in flg!

5345:    Some options are relevant only for particular matrix types and
5346:    are thus ignored by others.  Other options are not supported by
5347:    certain matrix types and will generate an error message if set.

5349:    If using a Fortran 77 module to compute a matrix, one may need to
5350:    use the column-oriented option (or convert to the row-oriented
5351:    format).

5353:    MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5354:    that would generate a new entry in the nonzero structure is instead
5355:    ignored.  Thus, if memory has not alredy been allocated for this particular
5356:    data, then the insertion is ignored. For dense matrices, in which
5357:    the entire array is allocated, no entries are ever ignored.
5358:    Set after the first MatAssemblyEnd(). If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5360:    MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5361:    that would generate a new entry in the nonzero structure instead produces
5362:    an error. (Currently supported for AIJ and BAIJ formats only.) If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5364:    MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5365:    that would generate a new entry that has not been preallocated will
5366:    instead produce an error. (Currently supported for AIJ and BAIJ formats
5367:    only.) This is a useful flag when debugging matrix memory preallocation.
5368:    If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5370:    MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5371:    other processors should be dropped, rather than stashed.
5372:    This is useful if you know that the "owning" processor is also
5373:    always generating the correct matrix entries, so that PETSc need
5374:    not transfer duplicate entries generated on another processor.

5376:    MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5377:    searches during matrix assembly. When this flag is set, the hash table
5378:    is created during the first Matrix Assembly. This hash table is
5379:    used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5380:    to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5381:    should be used with MAT_USE_HASH_TABLE flag. This option is currently
5382:    supported by MATMPIBAIJ format only.

5384:    MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5385:    are kept in the nonzero structure

5387:    MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5388:    a zero location in the matrix

5390:    MAT_USE_INODES - indicates using inode version of the code - works with AIJ matrix types

5392:    MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5393:         zero row routines and thus improves performance for very large process counts.

5395:    MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5396:         part of the matrix (since they should match the upper triangular part).

5398:    Notes: Can only be called after MatSetSizes() and MatSetType() have been set.

5400:    Level: intermediate

5402:    Concepts: matrices^setting options

5404: .seealso:  MatOption, Mat

5406: @*/
5407: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5408: {

5414:   if (op > 0) {
5417:   }

5419:   if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5420:   if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");

5422:   switch (op) {
5423:   case MAT_NO_OFF_PROC_ENTRIES:
5424:     mat->nooffprocentries = flg;
5425:     return(0);
5426:     break;
5427:   case MAT_SUBSET_OFF_PROC_ENTRIES:
5428:     mat->subsetoffprocentries = flg;
5429:     return(0);
5430:   case MAT_NO_OFF_PROC_ZERO_ROWS:
5431:     mat->nooffproczerorows = flg;
5432:     return(0);
5433:     break;
5434:   case MAT_SPD:
5435:     mat->spd_set = PETSC_TRUE;
5436:     mat->spd     = flg;
5437:     if (flg) {
5438:       mat->symmetric                  = PETSC_TRUE;
5439:       mat->structurally_symmetric     = PETSC_TRUE;
5440:       mat->symmetric_set              = PETSC_TRUE;
5441:       mat->structurally_symmetric_set = PETSC_TRUE;
5442:     }
5443:     break;
5444:   case MAT_SYMMETRIC:
5445:     mat->symmetric = flg;
5446:     if (flg) mat->structurally_symmetric = PETSC_TRUE;
5447:     mat->symmetric_set              = PETSC_TRUE;
5448:     mat->structurally_symmetric_set = flg;
5449: #if !defined(PETSC_USE_COMPLEX)
5450:     mat->hermitian     = flg;
5451:     mat->hermitian_set = PETSC_TRUE;
5452: #endif
5453:     break;
5454:   case MAT_HERMITIAN:
5455:     mat->hermitian = flg;
5456:     if (flg) mat->structurally_symmetric = PETSC_TRUE;
5457:     mat->hermitian_set              = PETSC_TRUE;
5458:     mat->structurally_symmetric_set = flg;
5459: #if !defined(PETSC_USE_COMPLEX)
5460:     mat->symmetric     = flg;
5461:     mat->symmetric_set = PETSC_TRUE;
5462: #endif
5463:     break;
5464:   case MAT_STRUCTURALLY_SYMMETRIC:
5465:     mat->structurally_symmetric     = flg;
5466:     mat->structurally_symmetric_set = PETSC_TRUE;
5467:     break;
5468:   case MAT_SYMMETRY_ETERNAL:
5469:     mat->symmetric_eternal = flg;
5470:     break;
5471:   case MAT_STRUCTURE_ONLY:
5472:     mat->structure_only = flg;
5473:     break;
5474:   default:
5475:     break;
5476:   }
5477:   if (mat->ops->setoption) {
5478:     (*mat->ops->setoption)(mat,op,flg);
5479:   }
5480:   return(0);
5481: }

5483: /*@
5484:    MatGetOption - Gets a parameter option that has been set for a matrix.

5486:    Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption

5488:    Input Parameters:
5489: +  mat - the matrix
5490: -  option - the option, this only responds to certain options, check the code for which ones

5492:    Output Parameter:
5493: .  flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)

5495:     Notes: Can only be called after MatSetSizes() and MatSetType() have been set.

5497:    Level: intermediate

5499:    Concepts: matrices^setting options

5501: .seealso:  MatOption, MatSetOption()

5503: @*/
5504: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5505: {

5510:   if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5511:   if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");

5513:   switch (op) {
5514:   case MAT_NO_OFF_PROC_ENTRIES:
5515:     *flg = mat->nooffprocentries;
5516:     break;
5517:   case MAT_NO_OFF_PROC_ZERO_ROWS:
5518:     *flg = mat->nooffproczerorows;
5519:     break;
5520:   case MAT_SYMMETRIC:
5521:     *flg = mat->symmetric;
5522:     break;
5523:   case MAT_HERMITIAN:
5524:     *flg = mat->hermitian;
5525:     break;
5526:   case MAT_STRUCTURALLY_SYMMETRIC:
5527:     *flg = mat->structurally_symmetric;
5528:     break;
5529:   case MAT_SYMMETRY_ETERNAL:
5530:     *flg = mat->symmetric_eternal;
5531:     break;
5532:   case MAT_SPD:
5533:     *flg = mat->spd;
5534:     break;
5535:   default:
5536:     break;
5537:   }
5538:   return(0);
5539: }

5541: /*@
5542:    MatZeroEntries - Zeros all entries of a matrix.  For sparse matrices
5543:    this routine retains the old nonzero structure.

5545:    Logically Collective on Mat

5547:    Input Parameters:
5548: .  mat - the matrix

5550:    Level: intermediate

5552:    Notes: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5553:    See the Performance chapter of the users manual for information on preallocating matrices.

5555:    Concepts: matrices^zeroing

5557: .seealso: MatZeroRows()
5558: @*/
5559: PetscErrorCode MatZeroEntries(Mat mat)
5560: {

5566:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5567:   if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5568:   if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5569:   MatCheckPreallocated(mat,1);

5571:   PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5572:   (*mat->ops->zeroentries)(mat);
5573:   PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5574:   PetscObjectStateIncrease((PetscObject)mat);
5575: #if defined(PETSC_HAVE_CUSP)
5576:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5577:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5578:   }
5579: #elif defined(PETSC_HAVE_VIENNACL)
5580:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5581:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5582:   }
5583: #elif defined(PETSC_HAVE_VECCUDA)
5584:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5585:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5586:   }
5587: #endif
5588:   return(0);
5589: }

5591: /*@C
5592:    MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5593:    of a set of rows and columns of a matrix.

5595:    Collective on Mat

5597:    Input Parameters:
5598: +  mat - the matrix
5599: .  numRows - the number of rows to remove
5600: .  rows - the global row indices
5601: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5602: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5603: -  b - optional vector of right hand side, that will be adjusted by provided solution

5605:    Notes:
5606:    This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.

5608:    The user can set a value in the diagonal entry (or for the AIJ and
5609:    row formats can optionally remove the main diagonal entry from the
5610:    nonzero structure as well, by passing 0.0 as the final argument).

5612:    For the parallel case, all processes that share the matrix (i.e.,
5613:    those in the communicator used for matrix creation) MUST call this
5614:    routine, regardless of whether any rows being zeroed are owned by
5615:    them.

5617:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5618:    list only rows local to itself).

5620:    The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.

5622:    Level: intermediate

5624:    Concepts: matrices^zeroing rows

5626: .seealso: MatZeroRowsIS(), MatZeroRows(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5627:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5628: @*/
5629: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5630: {

5637:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5638:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5639:   if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5640:   MatCheckPreallocated(mat,1);

5642:   (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5643:   MatViewFromOptions(mat,NULL,"-mat_view");
5644:   PetscObjectStateIncrease((PetscObject)mat);
5645: #if defined(PETSC_HAVE_CUSP)
5646:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5647:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5648:   }
5649: #elif defined(PETSC_HAVE_VIENNACL)
5650:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5651:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5652:   }
5653: #elif defined(PETSC_HAVE_VECCUDA)
5654:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5655:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5656:   }
5657: #endif
5658:   return(0);
5659: }

5661: /*@C
5662:    MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5663:    of a set of rows and columns of a matrix.

5665:    Collective on Mat

5667:    Input Parameters:
5668: +  mat - the matrix
5669: .  is - the rows to zero
5670: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5671: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5672: -  b - optional vector of right hand side, that will be adjusted by provided solution

5674:    Notes:
5675:    This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.

5677:    The user can set a value in the diagonal entry (or for the AIJ and
5678:    row formats can optionally remove the main diagonal entry from the
5679:    nonzero structure as well, by passing 0.0 as the final argument).

5681:    For the parallel case, all processes that share the matrix (i.e.,
5682:    those in the communicator used for matrix creation) MUST call this
5683:    routine, regardless of whether any rows being zeroed are owned by
5684:    them.

5686:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5687:    list only rows local to itself).

5689:    The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.

5691:    Level: intermediate

5693:    Concepts: matrices^zeroing rows

5695: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5696:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRows(), MatZeroRowsColumnsStencil()
5697: @*/
5698: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5699: {
5701:   PetscInt       numRows;
5702:   const PetscInt *rows;

5709:   ISGetLocalSize(is,&numRows);
5710:   ISGetIndices(is,&rows);
5711:   MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5712:   ISRestoreIndices(is,&rows);
5713:   return(0);
5714: }

5716: /*@C
5717:    MatZeroRows - Zeros all entries (except possibly the main diagonal)
5718:    of a set of rows of a matrix.

5720:    Collective on Mat

5722:    Input Parameters:
5723: +  mat - the matrix
5724: .  numRows - the number of rows to remove
5725: .  rows - the global row indices
5726: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5727: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5728: -  b - optional vector of right hand side, that will be adjusted by provided solution

5730:    Notes:
5731:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5732:    but does not release memory.  For the dense and block diagonal
5733:    formats this does not alter the nonzero structure.

5735:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5736:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5737:    merely zeroed.

5739:    The user can set a value in the diagonal entry (or for the AIJ and
5740:    row formats can optionally remove the main diagonal entry from the
5741:    nonzero structure as well, by passing 0.0 as the final argument).

5743:    For the parallel case, all processes that share the matrix (i.e.,
5744:    those in the communicator used for matrix creation) MUST call this
5745:    routine, regardless of whether any rows being zeroed are owned by
5746:    them.

5748:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5749:    list only rows local to itself).

5751:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5752:    owns that are to be zeroed. This saves a global synchronization in the implementation.

5754:    Level: intermediate

5756:    Concepts: matrices^zeroing rows

5758: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5759:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5760: @*/
5761: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5762: {

5769:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5770:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5771:   if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5772:   MatCheckPreallocated(mat,1);

5774:   (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5775:   MatViewFromOptions(mat,NULL,"-mat_view");
5776:   PetscObjectStateIncrease((PetscObject)mat);
5777: #if defined(PETSC_HAVE_CUSP)
5778:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5779:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5780:   }
5781: #elif defined(PETSC_HAVE_VIENNACL)
5782:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5783:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5784:   }
5785: #elif defined(PETSC_HAVE_VECCUDA)
5786:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5787:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5788:   }
5789: #endif
5790:   return(0);
5791: }

5793: /*@C
5794:    MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5795:    of a set of rows of a matrix.

5797:    Collective on Mat

5799:    Input Parameters:
5800: +  mat - the matrix
5801: .  is - index set of rows to remove
5802: .  diag - value put in all diagonals of eliminated rows
5803: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5804: -  b - optional vector of right hand side, that will be adjusted by provided solution

5806:    Notes:
5807:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5808:    but does not release memory.  For the dense and block diagonal
5809:    formats this does not alter the nonzero structure.

5811:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5812:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5813:    merely zeroed.

5815:    The user can set a value in the diagonal entry (or for the AIJ and
5816:    row formats can optionally remove the main diagonal entry from the
5817:    nonzero structure as well, by passing 0.0 as the final argument).

5819:    For the parallel case, all processes that share the matrix (i.e.,
5820:    those in the communicator used for matrix creation) MUST call this
5821:    routine, regardless of whether any rows being zeroed are owned by
5822:    them.

5824:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5825:    list only rows local to itself).

5827:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5828:    owns that are to be zeroed. This saves a global synchronization in the implementation.

5830:    Level: intermediate

5832:    Concepts: matrices^zeroing rows

5834: .seealso: MatZeroRows(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5835:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5836: @*/
5837: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5838: {
5839:   PetscInt       numRows;
5840:   const PetscInt *rows;

5847:   ISGetLocalSize(is,&numRows);
5848:   ISGetIndices(is,&rows);
5849:   MatZeroRows(mat,numRows,rows,diag,x,b);
5850:   ISRestoreIndices(is,&rows);
5851:   return(0);
5852: }

5854: /*@C
5855:    MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5856:    of a set of rows of a matrix. These rows must be local to the process.

5858:    Collective on Mat

5860:    Input Parameters:
5861: +  mat - the matrix
5862: .  numRows - the number of rows to remove
5863: .  rows - the grid coordinates (and component number when dof > 1) for matrix rows
5864: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5865: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5866: -  b - optional vector of right hand side, that will be adjusted by provided solution

5868:    Notes:
5869:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5870:    but does not release memory.  For the dense and block diagonal
5871:    formats this does not alter the nonzero structure.

5873:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5874:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5875:    merely zeroed.

5877:    The user can set a value in the diagonal entry (or for the AIJ and
5878:    row formats can optionally remove the main diagonal entry from the
5879:    nonzero structure as well, by passing 0.0 as the final argument).

5881:    For the parallel case, all processes that share the matrix (i.e.,
5882:    those in the communicator used for matrix creation) MUST call this
5883:    routine, regardless of whether any rows being zeroed are owned by
5884:    them.

5886:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5887:    list only rows local to itself).

5889:    The grid coordinates are across the entire grid, not just the local portion

5891:    In Fortran idxm and idxn should be declared as
5892: $     MatStencil idxm(4,m)
5893:    and the values inserted using
5894: $    idxm(MatStencil_i,1) = i
5895: $    idxm(MatStencil_j,1) = j
5896: $    idxm(MatStencil_k,1) = k
5897: $    idxm(MatStencil_c,1) = c
5898:    etc

5900:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5901:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5902:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5903:    DM_BOUNDARY_PERIODIC boundary type.

5905:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5906:    a single value per point) you can skip filling those indices.

5908:    Level: intermediate

5910:    Concepts: matrices^zeroing rows

5912: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsl(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5913:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5914: @*/
5915: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5916: {
5917:   PetscInt       dim     = mat->stencil.dim;
5918:   PetscInt       sdim    = dim - (1 - (PetscInt) mat->stencil.noc);
5919:   PetscInt       *dims   = mat->stencil.dims+1;
5920:   PetscInt       *starts = mat->stencil.starts;
5921:   PetscInt       *dxm    = (PetscInt*) rows;
5922:   PetscInt       *jdxm, i, j, tmp, numNewRows = 0;


5930:   PetscMalloc1(numRows, &jdxm);
5931:   for (i = 0; i < numRows; ++i) {
5932:     /* Skip unused dimensions (they are ordered k, j, i, c) */
5933:     for (j = 0; j < 3-sdim; ++j) dxm++;
5934:     /* Local index in X dir */
5935:     tmp = *dxm++ - starts[0];
5936:     /* Loop over remaining dimensions */
5937:     for (j = 0; j < dim-1; ++j) {
5938:       /* If nonlocal, set index to be negative */
5939:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5940:       /* Update local index */
5941:       else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5942:     }
5943:     /* Skip component slot if necessary */
5944:     if (mat->stencil.noc) dxm++;
5945:     /* Local row number */
5946:     if (tmp >= 0) {
5947:       jdxm[numNewRows++] = tmp;
5948:     }
5949:   }
5950:   MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
5951:   PetscFree(jdxm);
5952:   return(0);
5953: }

5955: /*@C
5956:    MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
5957:    of a set of rows and columns of a matrix.

5959:    Collective on Mat

5961:    Input Parameters:
5962: +  mat - the matrix
5963: .  numRows - the number of rows/columns to remove
5964: .  rows - the grid coordinates (and component number when dof > 1) for matrix rows
5965: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5966: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5967: -  b - optional vector of right hand side, that will be adjusted by provided solution

5969:    Notes:
5970:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5971:    but does not release memory.  For the dense and block diagonal
5972:    formats this does not alter the nonzero structure.

5974:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5975:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5976:    merely zeroed.

5978:    The user can set a value in the diagonal entry (or for the AIJ and
5979:    row formats can optionally remove the main diagonal entry from the
5980:    nonzero structure as well, by passing 0.0 as the final argument).

5982:    For the parallel case, all processes that share the matrix (i.e.,
5983:    those in the communicator used for matrix creation) MUST call this
5984:    routine, regardless of whether any rows being zeroed are owned by
5985:    them.

5987:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5988:    list only rows local to itself, but the row/column numbers are given in local numbering).

5990:    The grid coordinates are across the entire grid, not just the local portion

5992:    In Fortran idxm and idxn should be declared as
5993: $     MatStencil idxm(4,m)
5994:    and the values inserted using
5995: $    idxm(MatStencil_i,1) = i
5996: $    idxm(MatStencil_j,1) = j
5997: $    idxm(MatStencil_k,1) = k
5998: $    idxm(MatStencil_c,1) = c
5999:    etc

6001:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
6002:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
6003:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
6004:    DM_BOUNDARY_PERIODIC boundary type.

6006:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
6007:    a single value per point) you can skip filling those indices.

6009:    Level: intermediate

6011:    Concepts: matrices^zeroing rows

6013: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6014:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRows()
6015: @*/
6016: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
6017: {
6018:   PetscInt       dim     = mat->stencil.dim;
6019:   PetscInt       sdim    = dim - (1 - (PetscInt) mat->stencil.noc);
6020:   PetscInt       *dims   = mat->stencil.dims+1;
6021:   PetscInt       *starts = mat->stencil.starts;
6022:   PetscInt       *dxm    = (PetscInt*) rows;
6023:   PetscInt       *jdxm, i, j, tmp, numNewRows = 0;


6031:   PetscMalloc1(numRows, &jdxm);
6032:   for (i = 0; i < numRows; ++i) {
6033:     /* Skip unused dimensions (they are ordered k, j, i, c) */
6034:     for (j = 0; j < 3-sdim; ++j) dxm++;
6035:     /* Local index in X dir */
6036:     tmp = *dxm++ - starts[0];
6037:     /* Loop over remaining dimensions */
6038:     for (j = 0; j < dim-1; ++j) {
6039:       /* If nonlocal, set index to be negative */
6040:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
6041:       /* Update local index */
6042:       else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
6043:     }
6044:     /* Skip component slot if necessary */
6045:     if (mat->stencil.noc) dxm++;
6046:     /* Local row number */
6047:     if (tmp >= 0) {
6048:       jdxm[numNewRows++] = tmp;
6049:     }
6050:   }
6051:   MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
6052:   PetscFree(jdxm);
6053:   return(0);
6054: }

6056: /*@C
6057:    MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
6058:    of a set of rows of a matrix; using local numbering of rows.

6060:    Collective on Mat

6062:    Input Parameters:
6063: +  mat - the matrix
6064: .  numRows - the number of rows to remove
6065: .  rows - the global row indices
6066: .  diag - value put in all diagonals of eliminated rows
6067: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6068: -  b - optional vector of right hand side, that will be adjusted by provided solution

6070:    Notes:
6071:    Before calling MatZeroRowsLocal(), the user must first set the
6072:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6074:    For the AIJ matrix formats this removes the old nonzero structure,
6075:    but does not release memory.  For the dense and block diagonal
6076:    formats this does not alter the nonzero structure.

6078:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6079:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6080:    merely zeroed.

6082:    The user can set a value in the diagonal entry (or for the AIJ and
6083:    row formats can optionally remove the main diagonal entry from the
6084:    nonzero structure as well, by passing 0.0 as the final argument).

6086:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6087:    owns that are to be zeroed. This saves a global synchronization in the implementation.

6089:    Level: intermediate

6091:    Concepts: matrices^zeroing

6093: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRows(), MatSetOption(),
6094:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6095: @*/
6096: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6097: {

6104:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6105:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6106:   MatCheckPreallocated(mat,1);

6108:   if (mat->ops->zerorowslocal) {
6109:     (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
6110:   } else {
6111:     IS             is, newis;
6112:     const PetscInt *newRows;

6114:     if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6115:     ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6116:     ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
6117:     ISGetIndices(newis,&newRows);
6118:     (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
6119:     ISRestoreIndices(newis,&newRows);
6120:     ISDestroy(&newis);
6121:     ISDestroy(&is);
6122:   }
6123:   PetscObjectStateIncrease((PetscObject)mat);
6124: #if defined(PETSC_HAVE_CUSP)
6125:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6126:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6127:   }
6128: #elif defined(PETSC_HAVE_VIENNACL)
6129:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6130:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6131:   }
6132: #elif defined(PETSC_HAVE_VECCUDA)
6133:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
6134:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
6135:   }
6136: #endif
6137:   return(0);
6138: }

6140: /*@C
6141:    MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
6142:    of a set of rows of a matrix; using local numbering of rows.

6144:    Collective on Mat

6146:    Input Parameters:
6147: +  mat - the matrix
6148: .  is - index set of rows to remove
6149: .  diag - value put in all diagonals of eliminated rows
6150: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6151: -  b - optional vector of right hand side, that will be adjusted by provided solution

6153:    Notes:
6154:    Before calling MatZeroRowsLocalIS(), the user must first set the
6155:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6157:    For the AIJ matrix formats this removes the old nonzero structure,
6158:    but does not release memory.  For the dense and block diagonal
6159:    formats this does not alter the nonzero structure.

6161:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6162:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6163:    merely zeroed.

6165:    The user can set a value in the diagonal entry (or for the AIJ and
6166:    row formats can optionally remove the main diagonal entry from the
6167:    nonzero structure as well, by passing 0.0 as the final argument).

6169:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6170:    owns that are to be zeroed. This saves a global synchronization in the implementation.

6172:    Level: intermediate

6174:    Concepts: matrices^zeroing

6176: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6177:           MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6178: @*/
6179: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6180: {
6182:   PetscInt       numRows;
6183:   const PetscInt *rows;

6189:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6190:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6191:   MatCheckPreallocated(mat,1);

6193:   ISGetLocalSize(is,&numRows);
6194:   ISGetIndices(is,&rows);
6195:   MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6196:   ISRestoreIndices(is,&rows);
6197:   return(0);
6198: }

6200: /*@C
6201:    MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6202:    of a set of rows and columns of a matrix; using local numbering of rows.

6204:    Collective on Mat

6206:    Input Parameters:
6207: +  mat - the matrix
6208: .  numRows - the number of rows to remove
6209: .  rows - the global row indices
6210: .  diag - value put in all diagonals of eliminated rows
6211: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6212: -  b - optional vector of right hand side, that will be adjusted by provided solution

6214:    Notes:
6215:    Before calling MatZeroRowsColumnsLocal(), the user must first set the
6216:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6218:    The user can set a value in the diagonal entry (or for the AIJ and
6219:    row formats can optionally remove the main diagonal entry from the
6220:    nonzero structure as well, by passing 0.0 as the final argument).

6222:    Level: intermediate

6224:    Concepts: matrices^zeroing

6226: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6227:           MatZeroRows(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6228: @*/
6229: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6230: {
6232:   IS             is, newis;
6233:   const PetscInt *newRows;

6239:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6240:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6241:   MatCheckPreallocated(mat,1);

6243:   if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6244:   ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6245:   ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6246:   ISGetIndices(newis,&newRows);
6247:   (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6248:   ISRestoreIndices(newis,&newRows);
6249:   ISDestroy(&newis);
6250:   ISDestroy(&is);
6251:   PetscObjectStateIncrease((PetscObject)mat);
6252: #if defined(PETSC_HAVE_CUSP)
6253:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6254:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6255:   }
6256: #elif defined(PETSC_HAVE_VIENNACL)
6257:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6258:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6259:   }
6260: #elif defined(PETSC_HAVE_VECCUDA)
6261:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
6262:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
6263:   }
6264: #endif
6265:   return(0);
6266: }

6268: /*@C
6269:    MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6270:    of a set of rows and columns of a matrix; using local numbering of rows.

6272:    Collective on Mat

6274:    Input Parameters:
6275: +  mat - the matrix
6276: .  is - index set of rows to remove
6277: .  diag - value put in all diagonals of eliminated rows
6278: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6279: -  b - optional vector of right hand side, that will be adjusted by provided solution

6281:    Notes:
6282:    Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6283:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6285:    The user can set a value in the diagonal entry (or for the AIJ and
6286:    row formats can optionally remove the main diagonal entry from the
6287:    nonzero structure as well, by passing 0.0 as the final argument).

6289:    Level: intermediate

6291:    Concepts: matrices^zeroing

6293: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6294:           MatZeroRowsColumnsLocal(), MatZeroRows(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6295: @*/
6296: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6297: {
6299:   PetscInt       numRows;
6300:   const PetscInt *rows;

6306:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6307:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6308:   MatCheckPreallocated(mat,1);

6310:   ISGetLocalSize(is,&numRows);
6311:   ISGetIndices(is,&rows);
6312:   MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6313:   ISRestoreIndices(is,&rows);
6314:   return(0);
6315: }

6317: /*@C
6318:    MatGetSize - Returns the numbers of rows and columns in a matrix.

6320:    Not Collective

6322:    Input Parameter:
6323: .  mat - the matrix

6325:    Output Parameters:
6326: +  m - the number of global rows
6327: -  n - the number of global columns

6329:    Note: both output parameters can be NULL on input.

6331:    Level: beginner

6333:    Concepts: matrices^size

6335: .seealso: MatGetLocalSize()
6336: @*/
6337: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6338: {
6341:   if (m) *m = mat->rmap->N;
6342:   if (n) *n = mat->cmap->N;
6343:   return(0);
6344: }

6346: /*@C
6347:    MatGetLocalSize - Returns the number of rows and columns in a matrix
6348:    stored locally.  This information may be implementation dependent, so
6349:    use with care.

6351:    Not Collective

6353:    Input Parameters:
6354: .  mat - the matrix

6356:    Output Parameters:
6357: +  m - the number of local rows
6358: -  n - the number of local columns

6360:    Note: both output parameters can be NULL on input.

6362:    Level: beginner

6364:    Concepts: matrices^local size

6366: .seealso: MatGetSize()
6367: @*/
6368: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6369: {
6374:   if (m) *m = mat->rmap->n;
6375:   if (n) *n = mat->cmap->n;
6376:   return(0);
6377: }

6379: /*@
6380:    MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6381:    this processor. (The columns of the "diagonal block")

6383:    Not Collective, unless matrix has not been allocated, then collective on Mat

6385:    Input Parameters:
6386: .  mat - the matrix

6388:    Output Parameters:
6389: +  m - the global index of the first local column
6390: -  n - one more than the global index of the last local column

6392:    Notes: both output parameters can be NULL on input.

6394:    Level: developer

6396:    Concepts: matrices^column ownership

6398: .seealso:  MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()

6400: @*/
6401: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6402: {
6408:   MatCheckPreallocated(mat,1);
6409:   if (m) *m = mat->cmap->rstart;
6410:   if (n) *n = mat->cmap->rend;
6411:   return(0);
6412: }

6414: /*@
6415:    MatGetOwnershipRange - Returns the range of matrix rows owned by
6416:    this processor, assuming that the matrix is laid out with the first
6417:    n1 rows on the first processor, the next n2 rows on the second, etc.
6418:    For certain parallel layouts this range may not be well defined.

6420:    Not Collective

6422:    Input Parameters:
6423: .  mat - the matrix

6425:    Output Parameters:
6426: +  m - the global index of the first local row
6427: -  n - one more than the global index of the last local row

6429:    Note: Both output parameters can be NULL on input.
6430: $  This function requires that the matrix be preallocated. If you have not preallocated, consider using
6431: $    PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6432: $  and then MPI_Scan() to calculate prefix sums of the local sizes.

6434:    Level: beginner

6436:    Concepts: matrices^row ownership

6438: .seealso:   MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()

6440: @*/
6441: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6442: {
6448:   MatCheckPreallocated(mat,1);
6449:   if (m) *m = mat->rmap->rstart;
6450:   if (n) *n = mat->rmap->rend;
6451:   return(0);
6452: }

6454: /*@C
6455:    MatGetOwnershipRanges - Returns the range of matrix rows owned by
6456:    each process

6458:    Not Collective, unless matrix has not been allocated, then collective on Mat

6460:    Input Parameters:
6461: .  mat - the matrix

6463:    Output Parameters:
6464: .  ranges - start of each processors portion plus one more than the total length at the end

6466:    Level: beginner

6468:    Concepts: matrices^row ownership

6470: .seealso:   MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()

6472: @*/
6473: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6474: {

6480:   MatCheckPreallocated(mat,1);
6481:   PetscLayoutGetRanges(mat->rmap,ranges);
6482:   return(0);
6483: }

6485: /*@C
6486:    MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6487:    this processor. (The columns of the "diagonal blocks" for each process)

6489:    Not Collective, unless matrix has not been allocated, then collective on Mat

6491:    Input Parameters:
6492: .  mat - the matrix

6494:    Output Parameters:
6495: .  ranges - start of each processors portion plus one more then the total length at the end

6497:    Level: beginner

6499:    Concepts: matrices^column ownership

6501: .seealso:   MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()

6503: @*/
6504: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6505: {

6511:   MatCheckPreallocated(mat,1);
6512:   PetscLayoutGetRanges(mat->cmap,ranges);
6513:   return(0);
6514: }

6516: /*@C
6517:    MatGetOwnershipIS - Get row and column ownership as index sets

6519:    Not Collective

6521:    Input Arguments:
6522: .  A - matrix of type Elemental

6524:    Output Arguments:
6525: +  rows - rows in which this process owns elements
6526: .  cols - columns in which this process owns elements

6528:    Level: intermediate

6530: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL
6531: @*/
6532: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6533: {
6534:   PetscErrorCode ierr,(*f)(Mat,IS*,IS*);

6537:   MatCheckPreallocated(A,1);
6538:   PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6539:   if (f) {
6540:     (*f)(A,rows,cols);
6541:   } else {   /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6542:     if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6543:     if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6544:   }
6545:   return(0);
6546: }

6548: /*@C
6549:    MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6550:    Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6551:    to complete the factorization.

6553:    Collective on Mat

6555:    Input Parameters:
6556: +  mat - the matrix
6557: .  row - row permutation
6558: .  column - column permutation
6559: -  info - structure containing
6560: $      levels - number of levels of fill.
6561: $      expected fill - as ratio of original fill.
6562: $      1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6563:                 missing diagonal entries)

6565:    Output Parameters:
6566: .  fact - new matrix that has been symbolically factored

6568:    Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.

6570:    Most users should employ the simplified KSP interface for linear solvers
6571:    instead of working directly with matrix algebra routines such as this.
6572:    See, e.g., KSPCreate().

6574:    Level: developer

6576:   Concepts: matrices^symbolic LU factorization
6577:   Concepts: matrices^factorization
6578:   Concepts: LU^symbolic factorization

6580: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6581:           MatGetOrdering(), MatFactorInfo

6583:     Developer Note: fortran interface is not autogenerated as the f90
6584:     interface defintion cannot be generated correctly [due to MatFactorInfo]

6586: @*/
6587: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6588: {

6598:   if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6599:   if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6600:   if (!(fact)->ops->ilufactorsymbolic) {
6601:     const MatSolverPackage spackage;
6602:     MatFactorGetSolverPackage(fact,&spackage);
6603:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6604:   }
6605:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6606:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6607:   MatCheckPreallocated(mat,2);

6609:   PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6610:   (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6611:   PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6612:   return(0);
6613: }

6615: /*@C
6616:    MatICCFactorSymbolic - Performs symbolic incomplete
6617:    Cholesky factorization for a symmetric matrix.  Use
6618:    MatCholeskyFactorNumeric() to complete the factorization.

6620:    Collective on Mat

6622:    Input Parameters:
6623: +  mat - the matrix
6624: .  perm - row and column permutation
6625: -  info - structure containing
6626: $      levels - number of levels of fill.
6627: $      expected fill - as ratio of original fill.

6629:    Output Parameter:
6630: .  fact - the factored matrix

6632:    Notes:
6633:    Most users should employ the KSP interface for linear solvers
6634:    instead of working directly with matrix algebra routines such as this.
6635:    See, e.g., KSPCreate().

6637:    Level: developer

6639:   Concepts: matrices^symbolic incomplete Cholesky factorization
6640:   Concepts: matrices^factorization
6641:   Concepts: Cholsky^symbolic factorization

6643: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo

6645:     Developer Note: fortran interface is not autogenerated as the f90
6646:     interface defintion cannot be generated correctly [due to MatFactorInfo]

6648: @*/
6649: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6650: {

6659:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6660:   if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6661:   if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6662:   if (!(fact)->ops->iccfactorsymbolic) {
6663:     const MatSolverPackage spackage;
6664:     MatFactorGetSolverPackage(fact,&spackage);
6665:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6666:   }
6667:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6668:   MatCheckPreallocated(mat,2);

6670:   PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6671:   (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6672:   PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6673:   return(0);
6674: }

6676: /*@C
6677:    MatCreateSubMatrices - Extracts several submatrices from a matrix. If submat
6678:    points to an array of valid matrices, they may be reused to store the new
6679:    submatrices.

6681:    Collective on Mat

6683:    Input Parameters:
6684: +  mat - the matrix
6685: .  n   - the number of submatrixes to be extracted (on this processor, may be zero)
6686: .  irow, icol - index sets of rows and columns to extract
6687: -  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

6689:    Output Parameter:
6690: .  submat - the array of submatrices

6692:    Notes:
6693:    MatCreateSubMatrices() can extract ONLY sequential submatrices
6694:    (from both sequential and parallel matrices). Use MatCreateSubMatrix()
6695:    to extract a parallel submatrix.

6697:    Some matrix types place restrictions on the row and column
6698:    indices, such as that they be sorted or that they be equal to each other.

6700:    The index sets may not have duplicate entries.

6702:    When extracting submatrices from a parallel matrix, each processor can
6703:    form a different submatrix by setting the rows and columns of its
6704:    individual index sets according to the local submatrix desired.

6706:    When finished using the submatrices, the user should destroy
6707:    them with MatDestroyMatrices().

6709:    MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6710:    original matrix has not changed from that last call to MatCreateSubMatrices().

6712:    This routine creates the matrices in submat; you should NOT create them before
6713:    calling it. It also allocates the array of matrix pointers submat.

6715:    For BAIJ matrices the index sets must respect the block structure, that is if they
6716:    request one row/column in a block, they must request all rows/columns that are in
6717:    that block. For example, if the block size is 2 you cannot request just row 0 and
6718:    column 0.

6720:    Fortran Note:
6721:    The Fortran interface is slightly different from that given below; it
6722:    requires one to pass in  as submat a Mat (integer) array of size at least m.

6724:    Level: advanced

6726:    Concepts: matrices^accessing submatrices
6727:    Concepts: submatrices

6729: .seealso: MatDestroySubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6730: @*/
6731: PetscErrorCode MatCreateSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6732: {
6734:   PetscInt       i;
6735:   PetscBool      eq;

6740:   if (n) {
6745:   }
6747:   if (n && scall == MAT_REUSE_MATRIX) {
6750:   }
6751:   if (!mat->ops->createsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6752:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6753:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6754:   MatCheckPreallocated(mat,1);

6756:   PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6757:   (*mat->ops->createsubmatrices)(mat,n,irow,icol,scall,submat);
6758:   PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6759:   for (i=0; i<n; i++) {
6760:     (*submat)[i]->factortype = MAT_FACTOR_NONE;  /* in case in place factorization was previously done on submatrix */
6761:     if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6762:       ISEqual(irow[i],icol[i],&eq);
6763:       if (eq) {
6764:         if (mat->symmetric) {
6765:           MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6766:         } else if (mat->hermitian) {
6767:           MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6768:         } else if (mat->structurally_symmetric) {
6769:           MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6770:         }
6771:       }
6772:     }
6773:   }
6774:   return(0);
6775: }

6777: /*@C
6778:    MatCreateSubMatricesMPI - Extracts MPI submatrices across a sub communicator of mat (by pairs of IS that may live on subcomms).

6780:    Collective on Mat

6782:    Input Parameters:
6783: +  mat - the matrix
6784: .  n   - the number of submatrixes to be extracted
6785: .  irow, icol - index sets of rows and columns to extract
6786: -  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

6788:    Output Parameter:
6789: .  submat - the array of submatrices

6791:    Level: advanced

6793:    Concepts: matrices^accessing submatrices
6794:    Concepts: submatrices

6796: .seealso: MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6797: @*/
6798: PetscErrorCode MatCreateSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6799: {
6801:   PetscInt       i;
6802:   PetscBool      eq;

6807:   if (n) {
6812:   }
6814:   if (n && scall == MAT_REUSE_MATRIX) {
6817:   }
6818:   if (!mat->ops->createsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6819:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6820:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6821:   MatCheckPreallocated(mat,1);

6823:   PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6824:   (*mat->ops->createsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6825:   PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6826:   for (i=0; i<n; i++) {
6827:     if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6828:       ISEqual(irow[i],icol[i],&eq);
6829:       if (eq) {
6830:         if (mat->symmetric) {
6831:           MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6832:         } else if (mat->hermitian) {
6833:           MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6834:         } else if (mat->structurally_symmetric) {
6835:           MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6836:         }
6837:       }
6838:     }
6839:   }
6840:   return(0);
6841: }

6843: /*@C
6844:    MatDestroyMatrices - Destroys an array of matrices.

6846:    Collective on Mat

6848:    Input Parameters:
6849: +  n - the number of local matrices
6850: -  mat - the matrices (note that this is a pointer to the array of matrices)

6852:    Level: advanced

6854:     Notes: Frees not only the matrices, but also the array that contains the matrices
6855:            In Fortran will not free the array.

6857: .seealso: MatCreateSubMatrices() MatDestroySubMatrices()
6858: @*/
6859: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6860: {
6862:   PetscInt       i;

6865:   if (!*mat) return(0);
6866:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);

6869:   for (i=0; i<n; i++) {
6870:     MatDestroy(&(*mat)[i]);
6871:   }

6873:   /* memory is allocated even if n = 0 */
6874:   PetscFree(*mat);
6875:   return(0);
6876: }

6878: /*@C
6879:    MatDestroySubMatrices - Destroys a set of matrices obtained with MatCreateSubMatrices().

6881:    Collective on Mat

6883:    Input Parameters:
6884: +  n - the number of local matrices
6885: -  mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6886:                        sequence of MatCreateSubMatrices())

6888:    Level: advanced

6890:     Notes: Frees not only the matrices, but also the array that contains the matrices
6891:            In Fortran will not free the array.

6893: .seealso: MatCreateSubMatrices()
6894: @*/
6895: PetscErrorCode MatDestroySubMatrices(PetscInt n,Mat *mat[])
6896: {
6898:   Mat            mat0;

6901:   if (!*mat) return(0);
6902:   /* mat[] is an array of length n+1, see MatCreateSubMatrices_xxx() */
6903:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);

6906:   mat0 = (*mat)[0];
6907:   if (mat0 && mat0->ops->destroysubmatrices) {
6908:     (mat0->ops->destroysubmatrices)(n,mat);
6909:   } else {
6910:     MatDestroyMatrices(n,mat);
6911:   }
6912:   return(0);
6913: }

6915: /*@C
6916:    MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.

6918:    Collective on Mat

6920:    Input Parameters:
6921: .  mat - the matrix

6923:    Output Parameter:
6924: .  matstruct - the sequential matrix with the nonzero structure of mat

6926:   Level: intermediate

6928: .seealso: MatDestroySeqNonzeroStructure(), MatCreateSubMatrices(), MatDestroyMatrices()
6929: @*/
6930: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6931: {


6939:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6940:   MatCheckPreallocated(mat,1);

6942:   if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6943:   PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6944:   (*mat->ops->getseqnonzerostructure)(mat,matstruct);
6945:   PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6946:   return(0);
6947: }

6949: /*@C
6950:    MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().

6952:    Collective on Mat

6954:    Input Parameters:
6955: .  mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
6956:                        sequence of MatGetSequentialNonzeroStructure())

6958:    Level: advanced

6960:     Notes: Frees not only the matrices, but also the array that contains the matrices

6962: .seealso: MatGetSeqNonzeroStructure()
6963: @*/
6964: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
6965: {

6970:   MatDestroy(mat);
6971:   return(0);
6972: }

6974: /*@
6975:    MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
6976:    replaces the index sets by larger ones that represent submatrices with
6977:    additional overlap.

6979:    Collective on Mat

6981:    Input Parameters:
6982: +  mat - the matrix
6983: .  n   - the number of index sets
6984: .  is  - the array of index sets (these index sets will changed during the call)
6985: -  ov  - the additional overlap requested

6987:    Options Database:
6988: .  -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)

6990:    Level: developer

6992:    Concepts: overlap
6993:    Concepts: ASM^computing overlap

6995: .seealso: MatCreateSubMatrices()
6996: @*/
6997: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
6998: {

7004:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7005:   if (n) {
7008:   }
7009:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7010:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7011:   MatCheckPreallocated(mat,1);

7013:   if (!ov) return(0);
7014:   if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7015:   PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7016:   (*mat->ops->increaseoverlap)(mat,n,is,ov);
7017:   PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7018:   return(0);
7019: }


7022: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat,IS*,PetscInt);

7024: /*@
7025:    MatIncreaseOverlapSplit - Given a set of submatrices indicated by index sets across
7026:    a sub communicator, replaces the index sets by larger ones that represent submatrices with
7027:    additional overlap.

7029:    Collective on Mat

7031:    Input Parameters:
7032: +  mat - the matrix
7033: .  n   - the number of index sets
7034: .  is  - the array of index sets (these index sets will changed during the call)
7035: -  ov  - the additional overlap requested

7037:    Options Database:
7038: .  -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)

7040:    Level: developer

7042:    Concepts: overlap
7043:    Concepts: ASM^computing overlap

7045: .seealso: MatCreateSubMatrices()
7046: @*/
7047: PetscErrorCode MatIncreaseOverlapSplit(Mat mat,PetscInt n,IS is[],PetscInt ov)
7048: {
7049:   PetscInt       i;

7055:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7056:   if (n) {
7059:   }
7060:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7061:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7062:   MatCheckPreallocated(mat,1);
7063:   if (!ov) return(0);
7064:   PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7065:   for(i=0; i<n; i++){
7066:          MatIncreaseOverlapSplit_Single(mat,&is[i],ov);
7067:   }
7068:   PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7069:   return(0);
7070: }




7075: /*@
7076:    MatGetBlockSize - Returns the matrix block size.

7078:    Not Collective

7080:    Input Parameter:
7081: .  mat - the matrix

7083:    Output Parameter:
7084: .  bs - block size

7086:    Notes:
7087:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.

7089:    If the block size has not been set yet this routine returns 1.

7091:    Level: intermediate

7093:    Concepts: matrices^block size

7095: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
7096: @*/
7097: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
7098: {
7102:   *bs = PetscAbs(mat->rmap->bs);
7103:   return(0);
7104: }

7106: /*@
7107:    MatGetBlockSizes - Returns the matrix block row and column sizes.

7109:    Not Collective

7111:    Input Parameter:
7112: .  mat - the matrix

7114:    Output Parameter:
7115: .  rbs - row block size
7116: .  cbs - column block size

7118:    Notes:
7119:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7120:     If you pass a different block size for the columns than the rows, the row block size determines the square block storage.

7122:    If a block size has not been set yet this routine returns 1.

7124:    Level: intermediate

7126:    Concepts: matrices^block size

7128: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
7129: @*/
7130: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
7131: {
7136:   if (rbs) *rbs = PetscAbs(mat->rmap->bs);
7137:   if (cbs) *cbs = PetscAbs(mat->cmap->bs);
7138:   return(0);
7139: }

7141: /*@
7142:    MatSetBlockSize - Sets the matrix block size.

7144:    Logically Collective on Mat

7146:    Input Parameters:
7147: +  mat - the matrix
7148: -  bs - block size

7150:    Notes:
7151:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7152:     This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later.

7154:     For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block size
7155:     is compatible with the matrix local sizes.

7157:    Level: intermediate

7159:    Concepts: matrices^block size

7161: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
7162: @*/
7163: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
7164: {

7170:   MatSetBlockSizes(mat,bs,bs);
7171:   return(0);
7172: }

7174: /*@
7175:    MatSetBlockSizes - Sets the matrix block row and column sizes.

7177:    Logically Collective on Mat

7179:    Input Parameters:
7180: +  mat - the matrix
7181: -  rbs - row block size
7182: -  cbs - column block size

7184:    Notes:
7185:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7186:     If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7187:     This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later

7189:     For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block sizes
7190:     are compatible with the matrix local sizes.

7192:     The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().

7194:    Level: intermediate

7196:    Concepts: matrices^block size

7198: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
7199: @*/
7200: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
7201: {

7208:   if (mat->ops->setblocksizes) {
7209:     (*mat->ops->setblocksizes)(mat,rbs,cbs);
7210:   }
7211:   if (mat->rmap->refcnt) {
7212:     ISLocalToGlobalMapping l2g = NULL;
7213:     PetscLayout            nmap = NULL;

7215:     PetscLayoutDuplicate(mat->rmap,&nmap);
7216:     if (mat->rmap->mapping) {
7217:       ISLocalToGlobalMappingDuplicate(mat->rmap->mapping,&l2g);
7218:     }
7219:     PetscLayoutDestroy(&mat->rmap);
7220:     mat->rmap = nmap;
7221:     mat->rmap->mapping = l2g;
7222:   }
7223:   if (mat->cmap->refcnt) {
7224:     ISLocalToGlobalMapping l2g = NULL;
7225:     PetscLayout            nmap = NULL;

7227:     PetscLayoutDuplicate(mat->cmap,&nmap);
7228:     if (mat->cmap->mapping) {
7229:       ISLocalToGlobalMappingDuplicate(mat->cmap->mapping,&l2g);
7230:     }
7231:     PetscLayoutDestroy(&mat->cmap);
7232:     mat->cmap = nmap;
7233:     mat->cmap->mapping = l2g;
7234:   }
7235:   PetscLayoutSetBlockSize(mat->rmap,rbs);
7236:   PetscLayoutSetBlockSize(mat->cmap,cbs);
7237:   return(0);
7238: }

7240: /*@
7241:    MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices

7243:    Logically Collective on Mat

7245:    Input Parameters:
7246: +  mat - the matrix
7247: .  fromRow - matrix from which to copy row block size
7248: -  fromCol - matrix from which to copy column block size (can be same as fromRow)

7250:    Level: developer

7252:    Concepts: matrices^block size

7254: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
7255: @*/
7256: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
7257: {

7264:   if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
7265:   if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
7266:   return(0);
7267: }

7269: /*@
7270:    MatResidual - Default routine to calculate the residual.

7272:    Collective on Mat and Vec

7274:    Input Parameters:
7275: +  mat - the matrix
7276: .  b   - the right-hand-side
7277: -  x   - the approximate solution

7279:    Output Parameter:
7280: .  r - location to store the residual

7282:    Level: developer

7284: .keywords: MG, default, multigrid, residual

7286: .seealso: PCMGSetResidual()
7287: @*/
7288: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7289: {

7298:   MatCheckPreallocated(mat,1);
7299:   PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7300:   if (!mat->ops->residual) {
7301:     MatMult(mat,x,r);
7302:     VecAYPX(r,-1.0,b);
7303:   } else {
7304:     (*mat->ops->residual)(mat,b,x,r);
7305:   }
7306:   PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7307:   return(0);
7308: }

7310: /*@C
7311:     MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.

7313:    Collective on Mat

7315:     Input Parameters:
7316: +   mat - the matrix
7317: .   shift -  0 or 1 indicating we want the indices starting at 0 or 1
7318: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be   symmetrized
7319: -   inodecompressed - PETSC_TRUE or PETSC_FALSE  indicating if the nonzero structure of the
7320:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7321:                  always used.

7323:     Output Parameters:
7324: +   n - number of rows in the (possibly compressed) matrix
7325: .   ia - the row pointers [of length n+1]
7326: .   ja - the column indices
7327: -   done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7328:            are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set

7330:     Level: developer

7332:     Notes: You CANNOT change any of the ia[] or ja[] values.

7334:            Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values

7336:     Fortran Node

7338:            In Fortran use
7339: $           PetscInt ia(1), ja(1)
7340: $           PetscOffset iia, jja
7341: $      call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7342: $      Acess the ith and jth entries via ia(iia + i) and ja(jja + j)
7343: $
7344: $          or
7345: $
7346: $           PetscInt, pointer :: ia(:),ja(:)
7347: $    call  MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7348: $      Acess the ith and jth entries via ia(i) and ja(j)



7352: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7353: @*/
7354: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7355: {

7365:   MatCheckPreallocated(mat,1);
7366:   if (!mat->ops->getrowij) *done = PETSC_FALSE;
7367:   else {
7368:     *done = PETSC_TRUE;
7369:     PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7370:     (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7371:     PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7372:   }
7373:   return(0);
7374: }

7376: /*@C
7377:     MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.

7379:     Collective on Mat

7381:     Input Parameters:
7382: +   mat - the matrix
7383: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7384: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7385:                 symmetrized
7386: .   inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7387:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7388:                  always used.
7389: .   n - number of columns in the (possibly compressed) matrix
7390: .   ia - the column pointers
7391: -   ja - the row indices

7393:     Output Parameters:
7394: .   done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned

7396:     Note:
7397:     This routine zeros out n, ia, and ja. This is to prevent accidental
7398:     us of the array after it has been restored. If you pass NULL, it will
7399:     not zero the pointers.  Use of ia or ja after MatRestoreColumnIJ() is invalid.

7401:     Level: developer

7403: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7404: @*/
7405: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7406: {

7416:   MatCheckPreallocated(mat,1);
7417:   if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7418:   else {
7419:     *done = PETSC_TRUE;
7420:     (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7421:   }
7422:   return(0);
7423: }

7425: /*@C
7426:     MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7427:     MatGetRowIJ().

7429:     Collective on Mat

7431:     Input Parameters:
7432: +   mat - the matrix
7433: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7434: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7435:                 symmetrized
7436: .   inodecompressed -  PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7437:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7438:                  always used.
7439: .   n - size of (possibly compressed) matrix
7440: .   ia - the row pointers
7441: -   ja - the column indices

7443:     Output Parameters:
7444: .   done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned

7446:     Note:
7447:     This routine zeros out n, ia, and ja. This is to prevent accidental
7448:     us of the array after it has been restored. If you pass NULL, it will
7449:     not zero the pointers.  Use of ia or ja after MatRestoreRowIJ() is invalid.

7451:     Level: developer

7453: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7454: @*/
7455: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7456: {

7465:   MatCheckPreallocated(mat,1);

7467:   if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7468:   else {
7469:     *done = PETSC_TRUE;
7470:     (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7471:     if (n)  *n = 0;
7472:     if (ia) *ia = NULL;
7473:     if (ja) *ja = NULL;
7474:   }
7475:   return(0);
7476: }

7478: /*@C
7479:     MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7480:     MatGetColumnIJ().

7482:     Collective on Mat

7484:     Input Parameters:
7485: +   mat - the matrix
7486: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7487: -   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7488:                 symmetrized
7489: -   inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7490:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7491:                  always used.

7493:     Output Parameters:
7494: +   n - size of (possibly compressed) matrix
7495: .   ia - the column pointers
7496: .   ja - the row indices
7497: -   done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned

7499:     Level: developer

7501: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7502: @*/
7503: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7504: {

7513:   MatCheckPreallocated(mat,1);

7515:   if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7516:   else {
7517:     *done = PETSC_TRUE;
7518:     (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7519:     if (n)  *n = 0;
7520:     if (ia) *ia = NULL;
7521:     if (ja) *ja = NULL;
7522:   }
7523:   return(0);
7524: }

7526: /*@C
7527:     MatColoringPatch -Used inside matrix coloring routines that
7528:     use MatGetRowIJ() and/or MatGetColumnIJ().

7530:     Collective on Mat

7532:     Input Parameters:
7533: +   mat - the matrix
7534: .   ncolors - max color value
7535: .   n   - number of entries in colorarray
7536: -   colorarray - array indicating color for each column

7538:     Output Parameters:
7539: .   iscoloring - coloring generated using colorarray information

7541:     Level: developer

7543: .seealso: MatGetRowIJ(), MatGetColumnIJ()

7545: @*/
7546: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7547: {

7555:   MatCheckPreallocated(mat,1);

7557:   if (!mat->ops->coloringpatch) {
7558:     ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7559:   } else {
7560:     (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7561:   }
7562:   return(0);
7563: }


7566: /*@
7567:    MatSetUnfactored - Resets a factored matrix to be treated as unfactored.

7569:    Logically Collective on Mat

7571:    Input Parameter:
7572: .  mat - the factored matrix to be reset

7574:    Notes:
7575:    This routine should be used only with factored matrices formed by in-place
7576:    factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7577:    format).  This option can save memory, for example, when solving nonlinear
7578:    systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7579:    ILU(0) preconditioner.

7581:    Note that one can specify in-place ILU(0) factorization by calling
7582: .vb
7583:      PCType(pc,PCILU);
7584:      PCFactorSeUseInPlace(pc);
7585: .ve
7586:    or by using the options -pc_type ilu -pc_factor_in_place

7588:    In-place factorization ILU(0) can also be used as a local
7589:    solver for the blocks within the block Jacobi or additive Schwarz
7590:    methods (runtime option: -sub_pc_factor_in_place).  See Users-Manual: ch_pc
7591:    for details on setting local solver options.

7593:    Most users should employ the simplified KSP interface for linear solvers
7594:    instead of working directly with matrix algebra routines such as this.
7595:    See, e.g., KSPCreate().

7597:    Level: developer

7599: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()

7601:    Concepts: matrices^unfactored

7603: @*/
7604: PetscErrorCode MatSetUnfactored(Mat mat)
7605: {

7611:   MatCheckPreallocated(mat,1);
7612:   mat->factortype = MAT_FACTOR_NONE;
7613:   if (!mat->ops->setunfactored) return(0);
7614:   (*mat->ops->setunfactored)(mat);
7615:   return(0);
7616: }

7618: /*MC
7619:     MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.

7621:     Synopsis:
7622:     MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)

7624:     Not collective

7626:     Input Parameter:
7627: .   x - matrix

7629:     Output Parameters:
7630: +   xx_v - the Fortran90 pointer to the array
7631: -   ierr - error code

7633:     Example of Usage:
7634: .vb
7635:       PetscScalar, pointer xx_v(:,:)
7636:       ....
7637:       call MatDenseGetArrayF90(x,xx_v,ierr)
7638:       a = xx_v(3)
7639:       call MatDenseRestoreArrayF90(x,xx_v,ierr)
7640: .ve

7642:     Level: advanced

7644: .seealso:  MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()

7646:     Concepts: matrices^accessing array

7648: M*/

7650: /*MC
7651:     MatDenseRestoreArrayF90 - Restores a matrix array that has been
7652:     accessed with MatDenseGetArrayF90().

7654:     Synopsis:
7655:     MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)

7657:     Not collective

7659:     Input Parameters:
7660: +   x - matrix
7661: -   xx_v - the Fortran90 pointer to the array

7663:     Output Parameter:
7664: .   ierr - error code

7666:     Example of Usage:
7667: .vb
7668:        PetscScalar, pointer xx_v(:,:)
7669:        ....
7670:        call MatDenseGetArrayF90(x,xx_v,ierr)
7671:        a = xx_v(3)
7672:        call MatDenseRestoreArrayF90(x,xx_v,ierr)
7673: .ve

7675:     Level: advanced

7677: .seealso:  MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()

7679: M*/


7682: /*MC
7683:     MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.

7685:     Synopsis:
7686:     MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)

7688:     Not collective

7690:     Input Parameter:
7691: .   x - matrix

7693:     Output Parameters:
7694: +   xx_v - the Fortran90 pointer to the array
7695: -   ierr - error code

7697:     Example of Usage:
7698: .vb
7699:       PetscScalar, pointer xx_v(:)
7700:       ....
7701:       call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7702:       a = xx_v(3)
7703:       call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7704: .ve

7706:     Level: advanced

7708: .seealso:  MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()

7710:     Concepts: matrices^accessing array

7712: M*/

7714: /*MC
7715:     MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7716:     accessed with MatSeqAIJGetArrayF90().

7718:     Synopsis:
7719:     MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)

7721:     Not collective

7723:     Input Parameters:
7724: +   x - matrix
7725: -   xx_v - the Fortran90 pointer to the array

7727:     Output Parameter:
7728: .   ierr - error code

7730:     Example of Usage:
7731: .vb
7732:        PetscScalar, pointer xx_v(:)
7733:        ....
7734:        call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7735:        a = xx_v(3)
7736:        call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7737: .ve

7739:     Level: advanced

7741: .seealso:  MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()

7743: M*/


7746: /*@
7747:     MatCreateSubMatrix - Gets a single submatrix on the same number of processors
7748:                       as the original matrix.

7750:     Collective on Mat

7752:     Input Parameters:
7753: +   mat - the original matrix
7754: .   isrow - parallel IS containing the rows this processor should obtain
7755: .   iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7756: -   cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

7758:     Output Parameter:
7759: .   newmat - the new submatrix, of the same type as the old

7761:     Level: advanced

7763:     Notes:
7764:     The submatrix will be able to be multiplied with vectors using the same layout as iscol.

7766:     Some matrix types place restrictions on the row and column indices, such
7767:     as that they be sorted or that they be equal to each other.

7769:     The index sets may not have duplicate entries.

7771:       The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7772:    the MatCreateSubMatrix() routine will create the newmat for you. Any additional calls
7773:    to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7774:    will reuse the matrix generated the first time.  You should call MatDestroy() on newmat when
7775:    you are finished using it.

7777:     The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7778:     the input matrix.

7780:     If iscol is NULL then all columns are obtained (not supported in Fortran).

7782:    Example usage:
7783:    Consider the following 8x8 matrix with 34 non-zero values, that is
7784:    assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7785:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7786:    as follows:

7788: .vb
7789:             1  2  0  |  0  3  0  |  0  4
7790:     Proc0   0  5  6  |  7  0  0  |  8  0
7791:             9  0 10  | 11  0  0  | 12  0
7792:     -------------------------------------
7793:            13  0 14  | 15 16 17  |  0  0
7794:     Proc1   0 18  0  | 19 20 21  |  0  0
7795:             0  0  0  | 22 23  0  | 24  0
7796:     -------------------------------------
7797:     Proc2  25 26 27  |  0  0 28  | 29  0
7798:            30  0  0  | 31 32 33  |  0 34
7799: .ve

7801:     Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6].  The resulting submatrix is

7803: .vb
7804:             2  0  |  0  3  0  |  0
7805:     Proc0   5  6  |  7  0  0  |  8
7806:     -------------------------------
7807:     Proc1  18  0  | 19 20 21  |  0
7808:     -------------------------------
7809:     Proc2  26 27  |  0  0 28  | 29
7810:             0  0  | 31 32 33  |  0
7811: .ve


7814:     Concepts: matrices^submatrices

7816: .seealso: MatCreateSubMatrices()
7817: @*/
7818: PetscErrorCode MatCreateSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7819: {
7821:   PetscMPIInt    size;
7822:   Mat            *local;
7823:   IS             iscoltmp;

7832:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7833:   if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");

7835:   MatCheckPreallocated(mat,1);
7836:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);

7838:   if (!iscol || isrow == iscol) {
7839:     PetscBool   stride;
7840:     PetscMPIInt grabentirematrix = 0,grab;
7841:     PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7842:     if (stride) {
7843:       PetscInt first,step,n,rstart,rend;
7844:       ISStrideGetInfo(isrow,&first,&step);
7845:       if (step == 1) {
7846:         MatGetOwnershipRange(mat,&rstart,&rend);
7847:         if (rstart == first) {
7848:           ISGetLocalSize(isrow,&n);
7849:           if (n == rend-rstart) {
7850:             grabentirematrix = 1;
7851:           }
7852:         }
7853:       }
7854:     }
7855:     MPIU_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7856:     if (grab) {
7857:       PetscInfo(mat,"Getting entire matrix as submatrix\n");
7858:       if (cll == MAT_INITIAL_MATRIX) {
7859:         *newmat = mat;
7860:         PetscObjectReference((PetscObject)mat);
7861:       }
7862:       return(0);
7863:     }
7864:   }

7866:   if (!iscol) {
7867:     ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7868:   } else {
7869:     iscoltmp = iscol;
7870:   }

7872:   /* if original matrix is on just one processor then use submatrix generated */
7873:   if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7874:     MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7875:     if (!iscol) {ISDestroy(&iscoltmp);}
7876:     return(0);
7877:   } else if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1) {
7878:     MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7879:     *newmat = *local;
7880:     PetscFree(local);
7881:     if (!iscol) {ISDestroy(&iscoltmp);}
7882:     return(0);
7883:   } else if (!mat->ops->createsubmatrix) {
7884:     /* Create a new matrix type that implements the operation using the full matrix */
7885:     PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7886:     switch (cll) {
7887:     case MAT_INITIAL_MATRIX:
7888:       MatCreateSubMatrixVirtual(mat,isrow,iscoltmp,newmat);
7889:       break;
7890:     case MAT_REUSE_MATRIX:
7891:       MatSubMatrixVirtualUpdate(*newmat,mat,isrow,iscoltmp);
7892:       break;
7893:     default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7894:     }
7895:     PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7896:     if (!iscol) {ISDestroy(&iscoltmp);}
7897:     return(0);
7898:   }

7900:   if (!mat->ops->createsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7901:   PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7902:   (*mat->ops->createsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7903:   PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7904:   if (!iscol) {ISDestroy(&iscoltmp);}
7905:   if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
7906:   return(0);
7907: }

7909: /*@
7910:    MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7911:    used during the assembly process to store values that belong to
7912:    other processors.

7914:    Not Collective

7916:    Input Parameters:
7917: +  mat   - the matrix
7918: .  size  - the initial size of the stash.
7919: -  bsize - the initial size of the block-stash(if used).

7921:    Options Database Keys:
7922: +   -matstash_initial_size <size> or <size0,size1,...sizep-1>
7923: -   -matstash_block_initial_size <bsize>  or <bsize0,bsize1,...bsizep-1>

7925:    Level: intermediate

7927:    Notes:
7928:      The block-stash is used for values set with MatSetValuesBlocked() while
7929:      the stash is used for values set with MatSetValues()

7931:      Run with the option -info and look for output of the form
7932:      MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7933:      to determine the appropriate value, MM, to use for size and
7934:      MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7935:      to determine the value, BMM to use for bsize

7937:    Concepts: stash^setting matrix size
7938:    Concepts: matrices^stash

7940: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()

7942: @*/
7943: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7944: {

7950:   MatStashSetInitialSize_Private(&mat->stash,size);
7951:   MatStashSetInitialSize_Private(&mat->bstash,bsize);
7952:   return(0);
7953: }

7955: /*@
7956:    MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
7957:      the matrix

7959:    Neighbor-wise Collective on Mat

7961:    Input Parameters:
7962: +  mat   - the matrix
7963: .  x,y - the vectors
7964: -  w - where the result is stored

7966:    Level: intermediate

7968:    Notes:
7969:     w may be the same vector as y.

7971:     This allows one to use either the restriction or interpolation (its transpose)
7972:     matrix to do the interpolation

7974:     Concepts: interpolation

7976: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()

7978: @*/
7979: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
7980: {
7982:   PetscInt       M,N,Ny;

7990:   MatCheckPreallocated(A,1);
7991:   MatGetSize(A,&M,&N);
7992:   VecGetSize(y,&Ny);
7993:   if (M == Ny) {
7994:     MatMultAdd(A,x,y,w);
7995:   } else {
7996:     MatMultTransposeAdd(A,x,y,w);
7997:   }
7998:   return(0);
7999: }

8001: /*@
8002:    MatInterpolate - y = A*x or A'*x depending on the shape of
8003:      the matrix

8005:    Neighbor-wise Collective on Mat

8007:    Input Parameters:
8008: +  mat   - the matrix
8009: -  x,y - the vectors

8011:    Level: intermediate

8013:    Notes:
8014:     This allows one to use either the restriction or interpolation (its transpose)
8015:     matrix to do the interpolation

8017:    Concepts: matrices^interpolation

8019: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()

8021: @*/
8022: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
8023: {
8025:   PetscInt       M,N,Ny;

8032:   MatCheckPreallocated(A,1);
8033:   MatGetSize(A,&M,&N);
8034:   VecGetSize(y,&Ny);
8035:   if (M == Ny) {
8036:     MatMult(A,x,y);
8037:   } else {
8038:     MatMultTranspose(A,x,y);
8039:   }
8040:   return(0);
8041: }

8043: /*@
8044:    MatRestrict - y = A*x or A'*x

8046:    Neighbor-wise Collective on Mat

8048:    Input Parameters:
8049: +  mat   - the matrix
8050: -  x,y - the vectors

8052:    Level: intermediate

8054:    Notes:
8055:     This allows one to use either the restriction or interpolation (its transpose)
8056:     matrix to do the restriction

8058:    Concepts: matrices^restriction

8060: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()

8062: @*/
8063: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
8064: {
8066:   PetscInt       M,N,Ny;

8073:   MatCheckPreallocated(A,1);

8075:   MatGetSize(A,&M,&N);
8076:   VecGetSize(y,&Ny);
8077:   if (M == Ny) {
8078:     MatMult(A,x,y);
8079:   } else {
8080:     MatMultTranspose(A,x,y);
8081:   }
8082:   return(0);
8083: }

8085: /*@
8086:    MatGetNullSpace - retrieves the null space to a matrix.

8088:    Logically Collective on Mat and MatNullSpace

8090:    Input Parameters:
8091: +  mat - the matrix
8092: -  nullsp - the null space object

8094:    Level: developer

8096:    Concepts: null space^attaching to matrix

8098: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
8099: @*/
8100: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
8101: {
8106:   *nullsp = mat->nullsp;
8107:   return(0);
8108: }

8110: /*@
8111:    MatSetNullSpace - attaches a null space to a matrix.

8113:    Logically Collective on Mat and MatNullSpace

8115:    Input Parameters:
8116: +  mat - the matrix
8117: -  nullsp - the null space object

8119:    Level: advanced

8121:    Notes:
8122:       This null space is used by the linear solvers. Overwrites any previous null space that may have been attached

8124:       For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
8125:       call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.

8127:       You can remove the null space by calling this routine with an nullsp of NULL


8130:       The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8131:    the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8132:    Similarly R^m = direct sum n(A^T) + R(A).  Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8133:    n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8134:    the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).

8136:       Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().

8138:     If the matrix is known to be symmetric because it is an SBAIJ matrix or one as called MatSetOption(mat,MAT_SYMMETRIC or MAT_SYMMETRIC_ETERNAL,PETSC_TRUE); this
8139:     routine also automatically calls MatSetTransposeNullSpace().

8141:    Concepts: null space^attaching to matrix

8143: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8144: @*/
8145: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
8146: {

8153:   MatCheckPreallocated(mat,1);
8154:   if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8155:   MatNullSpaceDestroy(&mat->nullsp);
8156:   mat->nullsp = nullsp;
8157:   if (mat->symmetric_set && mat->symmetric) {
8158:     MatSetTransposeNullSpace(mat,nullsp);
8159:   }
8160:   return(0);
8161: }

8163: /*@
8164:    MatGetTransposeNullSpace - retrieves the null space of the transpose of a matrix.

8166:    Logically Collective on Mat and MatNullSpace

8168:    Input Parameters:
8169: +  mat - the matrix
8170: -  nullsp - the null space object

8172:    Level: developer

8174:    Concepts: null space^attaching to matrix

8176: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetTransposeNullSpace(), MatSetNullSpace(), MatGetNullSpace()
8177: @*/
8178: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
8179: {
8184:   *nullsp = mat->transnullsp;
8185:   return(0);
8186: }

8188: /*@
8189:    MatSetTransposeNullSpace - attaches a null space to a matrix.

8191:    Logically Collective on Mat and MatNullSpace

8193:    Input Parameters:
8194: +  mat - the matrix
8195: -  nullsp - the null space object

8197:    Level: advanced

8199:    Notes:
8200:       For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
8201:       You must also call MatSetNullSpace()


8204:       The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8205:    the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8206:    Similarly R^m = direct sum n(A^T) + R(A).  Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8207:    n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8208:    the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).

8210:       Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().

8212:    Concepts: null space^attaching to matrix

8214: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8215: @*/
8216: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
8217: {

8224:   MatCheckPreallocated(mat,1);
8225:   PetscObjectReference((PetscObject)nullsp);
8226:   MatNullSpaceDestroy(&mat->transnullsp);
8227:   mat->transnullsp = nullsp;
8228:   return(0);
8229: }

8231: /*@
8232:    MatSetNearNullSpace - attaches a null space to a matrix, which is often the null space (rigid body modes) of the operator without boundary conditions
8233:         This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.

8235:    Logically Collective on Mat and MatNullSpace

8237:    Input Parameters:
8238: +  mat - the matrix
8239: -  nullsp - the null space object

8241:    Level: advanced

8243:    Notes:
8244:       Overwrites any previous near null space that may have been attached

8246:       You can remove the null space by calling this routine with an nullsp of NULL

8248:    Concepts: null space^attaching to matrix

8250: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace(), MatNullSpaceCreateRigidBody(), MatGetNearNullSpace()
8251: @*/
8252: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8253: {

8260:   MatCheckPreallocated(mat,1);
8261:   if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8262:   MatNullSpaceDestroy(&mat->nearnullsp);
8263:   mat->nearnullsp = nullsp;
8264:   return(0);
8265: }

8267: /*@
8268:    MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()

8270:    Not Collective

8272:    Input Parameters:
8273: .  mat - the matrix

8275:    Output Parameters:
8276: .  nullsp - the null space object, NULL if not set

8278:    Level: developer

8280:    Concepts: null space^attaching to matrix

8282: .seealso: MatSetNearNullSpace(), MatGetNullSpace(), MatNullSpaceCreate()
8283: @*/
8284: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8285: {
8290:   MatCheckPreallocated(mat,1);
8291:   *nullsp = mat->nearnullsp;
8292:   return(0);
8293: }

8295: /*@C
8296:    MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.

8298:    Collective on Mat

8300:    Input Parameters:
8301: +  mat - the matrix
8302: .  row - row/column permutation
8303: .  fill - expected fill factor >= 1.0
8304: -  level - level of fill, for ICC(k)

8306:    Notes:
8307:    Probably really in-place only when level of fill is zero, otherwise allocates
8308:    new space to store factored matrix and deletes previous memory.

8310:    Most users should employ the simplified KSP interface for linear solvers
8311:    instead of working directly with matrix algebra routines such as this.
8312:    See, e.g., KSPCreate().

8314:    Level: developer

8316:    Concepts: matrices^incomplete Cholesky factorization
8317:    Concepts: Cholesky factorization

8319: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()

8321:     Developer Note: fortran interface is not autogenerated as the f90
8322:     interface defintion cannot be generated correctly [due to MatFactorInfo]

8324: @*/
8325: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8326: {

8334:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8335:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8336:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8337:   if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8338:   MatCheckPreallocated(mat,1);
8339:   (*mat->ops->iccfactor)(mat,row,info);
8340:   PetscObjectStateIncrease((PetscObject)mat);
8341:   return(0);
8342: }

8344: /*@
8345:    MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8346:          ghosted ones.

8348:    Not Collective

8350:    Input Parameters:
8351: +  mat - the matrix
8352: -  diag = the diagonal values, including ghost ones

8354:    Level: developer

8356:    Notes: Works only for MPIAIJ and MPIBAIJ matrices

8358: .seealso: MatDiagonalScale()
8359: @*/
8360: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8361: {
8363:   PetscMPIInt    size;


8370:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8371:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8372:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8373:   if (size == 1) {
8374:     PetscInt n,m;
8375:     VecGetSize(diag,&n);
8376:     MatGetSize(mat,0,&m);
8377:     if (m == n) {
8378:       MatDiagonalScale(mat,0,diag);
8379:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8380:   } else {
8381:     PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8382:   }
8383:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8384:   PetscObjectStateIncrease((PetscObject)mat);
8385:   return(0);
8386: }

8388: /*@
8389:    MatGetInertia - Gets the inertia from a factored matrix

8391:    Collective on Mat

8393:    Input Parameter:
8394: .  mat - the matrix

8396:    Output Parameters:
8397: +   nneg - number of negative eigenvalues
8398: .   nzero - number of zero eigenvalues
8399: -   npos - number of positive eigenvalues

8401:    Level: advanced

8403:    Notes: Matrix must have been factored by MatCholeskyFactor()


8406: @*/
8407: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8408: {

8414:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8415:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8416:   if (!mat->ops->getinertia) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8417:   (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8418:   return(0);
8419: }

8421: /* ----------------------------------------------------------------*/
8422: /*@C
8423:    MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors

8425:    Neighbor-wise Collective on Mat and Vecs

8427:    Input Parameters:
8428: +  mat - the factored matrix
8429: -  b - the right-hand-side vectors

8431:    Output Parameter:
8432: .  x - the result vectors

8434:    Notes:
8435:    The vectors b and x cannot be the same.  I.e., one cannot
8436:    call MatSolves(A,x,x).

8438:    Notes:
8439:    Most users should employ the simplified KSP interface for linear solvers
8440:    instead of working directly with matrix algebra routines such as this.
8441:    See, e.g., KSPCreate().

8443:    Level: developer

8445:    Concepts: matrices^triangular solves

8447: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8448: @*/
8449: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8450: {

8456:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8457:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8458:   if (!mat->rmap->N && !mat->cmap->N) return(0);

8460:   if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8461:   MatCheckPreallocated(mat,1);
8462:   PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8463:   (*mat->ops->solves)(mat,b,x);
8464:   PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8465:   return(0);
8466: }

8468: /*@
8469:    MatIsSymmetric - Test whether a matrix is symmetric

8471:    Collective on Mat

8473:    Input Parameter:
8474: +  A - the matrix to test
8475: -  tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)

8477:    Output Parameters:
8478: .  flg - the result

8480:    Notes: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results

8482:    Level: intermediate

8484:    Concepts: matrix^symmetry

8486: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8487: @*/
8488: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool  *flg)
8489: {


8496:   if (!A->symmetric_set) {
8497:     if (!A->ops->issymmetric) {
8498:       MatType mattype;
8499:       MatGetType(A,&mattype);
8500:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8501:     }
8502:     (*A->ops->issymmetric)(A,tol,flg);
8503:     if (!tol) {
8504:       A->symmetric_set = PETSC_TRUE;
8505:       A->symmetric     = *flg;
8506:       if (A->symmetric) {
8507:         A->structurally_symmetric_set = PETSC_TRUE;
8508:         A->structurally_symmetric     = PETSC_TRUE;
8509:       }
8510:     }
8511:   } else if (A->symmetric) {
8512:     *flg = PETSC_TRUE;
8513:   } else if (!tol) {
8514:     *flg = PETSC_FALSE;
8515:   } else {
8516:     if (!A->ops->issymmetric) {
8517:       MatType mattype;
8518:       MatGetType(A,&mattype);
8519:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8520:     }
8521:     (*A->ops->issymmetric)(A,tol,flg);
8522:   }
8523:   return(0);
8524: }

8526: /*@
8527:    MatIsHermitian - Test whether a matrix is Hermitian

8529:    Collective on Mat

8531:    Input Parameter:
8532: +  A - the matrix to test
8533: -  tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)

8535:    Output Parameters:
8536: .  flg - the result

8538:    Level: intermediate

8540:    Concepts: matrix^symmetry

8542: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8543:           MatIsSymmetricKnown(), MatIsSymmetric()
8544: @*/
8545: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool  *flg)
8546: {


8553:   if (!A->hermitian_set) {
8554:     if (!A->ops->ishermitian) {
8555:       MatType mattype;
8556:       MatGetType(A,&mattype);
8557:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8558:     }
8559:     (*A->ops->ishermitian)(A,tol,flg);
8560:     if (!tol) {
8561:       A->hermitian_set = PETSC_TRUE;
8562:       A->hermitian     = *flg;
8563:       if (A->hermitian) {
8564:         A->structurally_symmetric_set = PETSC_TRUE;
8565:         A->structurally_symmetric     = PETSC_TRUE;
8566:       }
8567:     }
8568:   } else if (A->hermitian) {
8569:     *flg = PETSC_TRUE;
8570:   } else if (!tol) {
8571:     *flg = PETSC_FALSE;
8572:   } else {
8573:     if (!A->ops->ishermitian) {
8574:       MatType mattype;
8575:       MatGetType(A,&mattype);
8576:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8577:     }
8578:     (*A->ops->ishermitian)(A,tol,flg);
8579:   }
8580:   return(0);
8581: }

8583: /*@
8584:    MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.

8586:    Not Collective

8588:    Input Parameter:
8589: .  A - the matrix to check

8591:    Output Parameters:
8592: +  set - if the symmetric flag is set (this tells you if the next flag is valid)
8593: -  flg - the result

8595:    Level: advanced

8597:    Concepts: matrix^symmetry

8599:    Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8600:          if you want it explicitly checked

8602: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8603: @*/
8604: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool  *set,PetscBool  *flg)
8605: {
8610:   if (A->symmetric_set) {
8611:     *set = PETSC_TRUE;
8612:     *flg = A->symmetric;
8613:   } else {
8614:     *set = PETSC_FALSE;
8615:   }
8616:   return(0);
8617: }

8619: /*@
8620:    MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.

8622:    Not Collective

8624:    Input Parameter:
8625: .  A - the matrix to check

8627:    Output Parameters:
8628: +  set - if the hermitian flag is set (this tells you if the next flag is valid)
8629: -  flg - the result

8631:    Level: advanced

8633:    Concepts: matrix^symmetry

8635:    Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8636:          if you want it explicitly checked

8638: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8639: @*/
8640: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool  *set,PetscBool  *flg)
8641: {
8646:   if (A->hermitian_set) {
8647:     *set = PETSC_TRUE;
8648:     *flg = A->hermitian;
8649:   } else {
8650:     *set = PETSC_FALSE;
8651:   }
8652:   return(0);
8653: }

8655: /*@
8656:    MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric

8658:    Collective on Mat

8660:    Input Parameter:
8661: .  A - the matrix to test

8663:    Output Parameters:
8664: .  flg - the result

8666:    Level: intermediate

8668:    Concepts: matrix^symmetry

8670: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8671: @*/
8672: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool  *flg)
8673: {

8679:   if (!A->structurally_symmetric_set) {
8680:     if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8681:     (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);

8683:     A->structurally_symmetric_set = PETSC_TRUE;
8684:   }
8685:   *flg = A->structurally_symmetric;
8686:   return(0);
8687: }

8689: /*@
8690:    MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8691:        to be communicated to other processors during the MatAssemblyBegin/End() process

8693:     Not collective

8695:    Input Parameter:
8696: .   vec - the vector

8698:    Output Parameters:
8699: +   nstash   - the size of the stash
8700: .   reallocs - the number of additional mallocs incurred.
8701: .   bnstash   - the size of the block stash
8702: -   breallocs - the number of additional mallocs incurred.in the block stash

8704:    Level: advanced

8706: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()

8708: @*/
8709: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8710: {

8714:   MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8715:   MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8716:   return(0);
8717: }

8719: /*@C
8720:    MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8721:      parallel layout

8723:    Collective on Mat

8725:    Input Parameter:
8726: .  mat - the matrix

8728:    Output Parameter:
8729: +   right - (optional) vector that the matrix can be multiplied against
8730: -   left - (optional) vector that the matrix vector product can be stored in

8732:    Notes:
8733:     The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().

8735:   Notes: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed

8737:   Level: advanced

8739: .seealso: MatCreate(), VecDestroy()
8740: @*/
8741: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8742: {

8748:   if (mat->ops->getvecs) {
8749:     (*mat->ops->getvecs)(mat,right,left);
8750:   } else {
8751:     PetscInt rbs,cbs;
8752:     MatGetBlockSizes(mat,&rbs,&cbs);
8753:     if (right) {
8754:       if (mat->cmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for columns not yet setup");
8755:       VecCreate(PetscObjectComm((PetscObject)mat),right);
8756:       VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8757:       VecSetBlockSize(*right,cbs);
8758:       VecSetType(*right,VECSTANDARD);
8759:       PetscLayoutReference(mat->cmap,&(*right)->map);
8760:     }
8761:     if (left) {
8762:       if (mat->rmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for rows not yet setup");
8763:       VecCreate(PetscObjectComm((PetscObject)mat),left);
8764:       VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8765:       VecSetBlockSize(*left,rbs);
8766:       VecSetType(*left,VECSTANDARD);
8767:       PetscLayoutReference(mat->rmap,&(*left)->map);
8768:     }
8769:   }
8770:   return(0);
8771: }

8773: /*@C
8774:    MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8775:      with default values.

8777:    Not Collective

8779:    Input Parameters:
8780: .    info - the MatFactorInfo data structure


8783:    Notes: The solvers are generally used through the KSP and PC objects, for example
8784:           PCLU, PCILU, PCCHOLESKY, PCICC

8786:    Level: developer

8788: .seealso: MatFactorInfo

8790:     Developer Note: fortran interface is not autogenerated as the f90
8791:     interface defintion cannot be generated correctly [due to MatFactorInfo]

8793: @*/

8795: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8796: {

8800:   PetscMemzero(info,sizeof(MatFactorInfo));
8801:   return(0);
8802: }

8804: /*@
8805:    MatFactorSetSchurIS - Set indices corresponding to the Schur complement you wish to have computed

8807:    Collective on Mat

8809:    Input Parameters:
8810: +  mat - the factored matrix
8811: -  is - the index set defining the Schur indices (0-based)

8813:    Notes:  Call MatFactorSolveSchurComplement() or MatFactorSolveSchurComplementTranspose() after this call to solve a Schur complement system.

8815:    You can call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() after this call.

8817:    Level: developer

8819:    Concepts:

8821: .seealso: MatGetFactor(), MatFactorGetSchurComplement(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSolveSchurComplement(),
8822:           MatFactorSolveSchurComplementTranspose(), MatFactorSolveSchurComplement()

8824: @*/
8825: PetscErrorCode MatFactorSetSchurIS(Mat mat,IS is)
8826: {
8827:   PetscErrorCode ierr,(*f)(Mat,IS);

8835:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
8836:   PetscObjectQueryFunction((PetscObject)mat,"MatFactorSetSchurIS_C",&f);
8837:   if (!f) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"The selected MatSolverPackage does not support Schur complement computation. You should use MATSOLVERMUMPS or MATSOLVERMKL_PARDISO");
8838:   if (mat->schur) {
8839:     MatDestroy(&mat->schur);
8840:   }
8841:   (*f)(mat,is);
8842:   if (!mat->schur) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_PLIB,"Schur complement has not been created");
8843:   MatFactorSetUpInPlaceSchur_Private(mat);
8844:   return(0);
8845: }

8847: /*@
8848:   MatFactorCreateSchurComplement - Create a Schur complement matrix object using Schur data computed during the factorization step

8850:    Logically Collective on Mat

8852:    Input Parameters:
8853: +  F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
8854: .  S - location where to return the Schur complement, can be NULL
8855: -  status - the status of the Schur complement matrix, can be NULL

8857:    Notes:
8858:    You must call MatFactorSetSchurIS() before calling this routine.

8860:    The routine provides a copy of the Schur matrix stored within the solver data structures.
8861:    The caller must destroy the object when it is no longer needed.
8862:    If MatFactorInvertSchurComplement() has been called, the routine gets back the inverse.

8864:    Use MatFactorGetSchurComplement() to get access to the Schur complement matrix inside the factored matrix instead of making a copy of it (which this function does)

8866:    Developer Notes: The reason this routine exists is because the representation of the Schur complement within the factor matrix may be different than a standard PETSc
8867:    matrix representation and we normally do not want to use the time or memory to make a copy as a regular PETSc matrix. 

8869:    See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.

8871:    Level: advanced

8873:    References:

8875: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorSchurStatus
8876: @*/
8877: PetscErrorCode MatFactorCreateSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8878: {

8885:   if (S) {
8886:     PetscErrorCode (*f)(Mat,Mat*);

8888:     PetscObjectQueryFunction((PetscObject)F,"MatFactorCreateSchurComplement_C",&f);
8889:     if (f) {
8890:       (*f)(F,S);
8891:     } else {
8892:       MatDuplicate(F->schur,MAT_COPY_VALUES,S);
8893:     }
8894:   }
8895:   if (status) *status = F->schur_status;
8896:   return(0);
8897: }

8899: /*@
8900:   MatFactorGetSchurComplement - Gets access to a Schur complement matrix using the current Schur data within a factored matrix

8902:    Logically Collective on Mat

8904:    Input Parameters:
8905: +  F - the factored matrix obtained by calling MatGetFactor()
8906: .  *S - location where to return the Schur complement, can be NULL
8907: -  status - the status of the Schur complement matrix, can be NULL

8909:    Notes:
8910:    You must call MatFactorSetSchurIS() before calling this routine.

8912:    Schur complement mode is currently implemented for sequential matrices.
8913:    The routine returns a the Schur Complement stored within the data strutures of the solver.
8914:    If MatFactorInvertSchurComplement() has previously been called, the returned matrix is actually the inverse of the Schur complement.
8915:    The returned matrix should not be destroyed; the caller should call MatFactorRestoreSchurComplement() when the object is no longer needed.

8917:    Use MatFactorCreateSchurComplement() to create a copy of the Schur complement matrix that is within a factored matrix

8919:    See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.

8921:    Level: advanced

8923:    References:

8925: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8926: @*/
8927: PetscErrorCode MatFactorGetSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8928: {
8933:   if (S) *S = F->schur;
8934:   if (status) *status = F->schur_status;
8935:   return(0);
8936: }

8938: /*@
8939:   MatFactorRestoreSchurComplement - Restore the Schur complement matrix object obtained from a call to MatFactorGetSchurComplement

8941:    Logically Collective on Mat

8943:    Input Parameters:
8944: +  F - the factored matrix obtained by calling MatGetFactor()
8945: .  *S - location where the Schur complement is stored
8946: -  status - the status of the Schur complement matrix (see MatFactorSchurStatus)

8948:    Notes:

8950:    Level: advanced

8952:    References:

8954: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8955: @*/
8956: PetscErrorCode MatFactorRestoreSchurComplement(Mat F,Mat* S,MatFactorSchurStatus status)
8957: {

8962:   if (S) {
8964:     *S = NULL;
8965:   }
8966:   F->schur_status = status;
8967:   MatFactorUpdateSchurStatus_Private(F);
8968:   return(0);
8969: }

8971: /*@
8972:   MatFactorSolveSchurComplementTranspose - Solve the transpose of the Schur complement system computed during the factorization step

8974:    Logically Collective on Mat

8976:    Input Parameters:
8977: +  F - the factored matrix obtained by calling MatGetFactor()
8978: .  rhs - location where the right hand side of the Schur complement system is stored
8979: -  sol - location where the solution of the Schur complement system has to be returned

8981:    Notes:
8982:    The sizes of the vectors should match the size of the Schur complement

8984:    Must be called after MatFactorSetSchurIS()

8986:    Level: advanced

8988:    References:

8990: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplement()
8991: @*/
8992: PetscErrorCode MatFactorSolveSchurComplementTranspose(Mat F, Vec rhs, Vec sol)
8993: {

9005:   MatFactorFactorizeSchurComplement(F);
9006:   switch (F->schur_status) {
9007:   case MAT_FACTOR_SCHUR_FACTORED:
9008:     MatSolveTranspose(F->schur,rhs,sol);
9009:     break;
9010:   case MAT_FACTOR_SCHUR_INVERTED:
9011:     MatMultTranspose(F->schur,rhs,sol);
9012:     break;
9013:   default:
9014:     SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9015:     break;
9016:   }
9017:   return(0);
9018: }

9020: /*@
9021:   MatFactorSolveSchurComplement - Solve the Schur complement system computed during the factorization step

9023:    Logically Collective on Mat

9025:    Input Parameters:
9026: +  F - the factored matrix obtained by calling MatGetFactor()
9027: .  rhs - location where the right hand side of the Schur complement system is stored
9028: -  sol - location where the solution of the Schur complement system has to be returned

9030:    Notes:
9031:    The sizes of the vectors should match the size of the Schur complement

9033:    Must be called after MatFactorSetSchurIS()

9035:    Level: advanced

9037:    References:

9039: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplementTranspose()
9040: @*/
9041: PetscErrorCode MatFactorSolveSchurComplement(Mat F, Vec rhs, Vec sol)
9042: {

9054:   MatFactorFactorizeSchurComplement(F);
9055:   switch (F->schur_status) {
9056:   case MAT_FACTOR_SCHUR_FACTORED:
9057:     MatSolve(F->schur,rhs,sol);
9058:     break;
9059:   case MAT_FACTOR_SCHUR_INVERTED:
9060:     MatMult(F->schur,rhs,sol);
9061:     break;
9062:   default:
9063:     SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9064:     break;
9065:   }
9066:   return(0);
9067: }

9069: /*@
9070:   MatFactorInvertSchurComplement - Invert the Schur complement matrix computed during the factorization step

9072:    Logically Collective on Mat

9074:    Input Parameters:
9075: +  F - the factored matrix obtained by calling MatGetFactor()

9077:    Notes: Must be called after MatFactorSetSchurIS().

9079:    Call MatFactorGetSchurComplement() or  MatFactorCreateSchurComplement() AFTER this call to actually compute the inverse and get access to it.

9081:    Level: advanced

9083:    References:

9085: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorCreateSchurComplement()
9086: @*/
9087: PetscErrorCode MatFactorInvertSchurComplement(Mat F)
9088: {

9094:   if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED) return(0);
9095:   MatFactorFactorizeSchurComplement(F);
9096:   MatFactorInvertSchurComplement_Private(F);
9097:   F->schur_status = MAT_FACTOR_SCHUR_INVERTED;
9098:   return(0);
9099: }

9101: /*@
9102:   MatFactorFactorizeSchurComplement - Factorize the Schur complement matrix computed during the factorization step

9104:    Logically Collective on Mat

9106:    Input Parameters:
9107: +  F - the factored matrix obtained by calling MatGetFactor()

9109:    Notes: Must be called after MatFactorSetSchurIS().

9111:    Level: advanced

9113:    References:

9115: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorInvertSchurComplement()
9116: @*/
9117: PetscErrorCode MatFactorFactorizeSchurComplement(Mat F)
9118: {

9124:   if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED || F->schur_status == MAT_FACTOR_SCHUR_FACTORED) return(0);
9125:   MatFactorFactorizeSchurComplement_Private(F);
9126:   F->schur_status = MAT_FACTOR_SCHUR_FACTORED;
9127:   return(0);
9128: }

9130: /*@
9131:    MatPtAP - Creates the matrix product C = P^T * A * P

9133:    Neighbor-wise Collective on Mat

9135:    Input Parameters:
9136: +  A - the matrix
9137: .  P - the projection matrix
9138: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9139: -  fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)), use PETSC_DEFAULT if you do not have a good estimate
9140:           if the result is a dense matrix this is irrelevent

9142:    Output Parameters:
9143: .  C - the product matrix

9145:    Notes:
9146:    C will be created and must be destroyed by the user with MatDestroy().

9148:    This routine is currently only implemented for pairs of AIJ matrices and classes
9149:    which inherit from AIJ.

9151:    Level: intermediate

9153: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
9154: @*/
9155: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
9156: {
9158:   PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9159:   PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
9160:   PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9161:   PetscBool      viatranspose=PETSC_FALSE,viamatmatmatmult=PETSC_FALSE;

9164:   PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-matptap_viatranspose",&viatranspose,NULL);
9165:   PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-matptap_viamatmatmatmult",&viamatmatmatmult,NULL);

9169:   MatCheckPreallocated(A,1);
9170:   if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9171:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9172:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9175:   MatCheckPreallocated(P,2);
9176:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9177:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9179:   if (A->rmap->N!= A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix A must be square, %D != %D",A->rmap->N,A->cmap->N);
9180:   if (P->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9181:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9182:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9184:   if (scall == MAT_REUSE_MATRIX) {
9187:     if (viatranspose || viamatmatmatmult) {
9188:       Mat Pt;
9189:       MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
9190:       if (viamatmatmatmult) {
9191:         MatMatMatMult(Pt,A,P,scall,fill,C);
9192:       } else {
9193:         Mat AP;
9194:         MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
9195:         MatMatMult(Pt,AP,scall,fill,C);
9196:         MatDestroy(&AP);
9197:       }
9198:       MatDestroy(&Pt);
9199:     } else {
9200:       PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9201:       PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9202:       (*(*C)->ops->ptapnumeric)(A,P,*C);
9203:       PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9204:       PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9205:     }
9206:     return(0);
9207:   }

9209:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9210:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9212:   fA = A->ops->ptap;
9213:   fP = P->ops->ptap;
9214:   if (fP == fA) {
9215:     if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
9216:     ptap = fA;
9217:   } else {
9218:     /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
9219:     char ptapname[256];
9220:     PetscStrcpy(ptapname,"MatPtAP_");
9221:     PetscStrcat(ptapname,((PetscObject)A)->type_name);
9222:     PetscStrcat(ptapname,"_");
9223:     PetscStrcat(ptapname,((PetscObject)P)->type_name);
9224:     PetscStrcat(ptapname,"_C"); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
9225:     PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
9226:     if (!ptap) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
9227:   }

9229:   if (viatranspose || viamatmatmatmult) {
9230:     Mat Pt;
9231:     MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
9232:     if (viamatmatmatmult) {
9233:       MatMatMatMult(Pt,A,P,scall,fill,C);
9234:       PetscInfo(*C,"MatPtAP via MatMatMatMult\n");
9235:     } else {
9236:       Mat AP;
9237:       MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
9238:       MatMatMult(Pt,AP,scall,fill,C);
9239:       MatDestroy(&AP);
9240:       PetscInfo(*C,"MatPtAP via MatTranspose and MatMatMult\n");
9241:     }
9242:     MatDestroy(&Pt);
9243:   } else {
9244:     PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9245:     (*ptap)(A,P,scall,fill,C);
9246:     PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9247:   }
9248:   return(0);
9249: }

9251: /*@
9252:    MatPtAPNumeric - Computes the matrix product C = P^T * A * P

9254:    Neighbor-wise Collective on Mat

9256:    Input Parameters:
9257: +  A - the matrix
9258: -  P - the projection matrix

9260:    Output Parameters:
9261: .  C - the product matrix

9263:    Notes:
9264:    C must have been created by calling MatPtAPSymbolic and must be destroyed by
9265:    the user using MatDeatroy().

9267:    This routine is currently only implemented for pairs of AIJ matrices and classes
9268:    which inherit from AIJ.  C will be of type MATAIJ.

9270:    Level: intermediate

9272: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
9273: @*/
9274: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
9275: {

9281:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9282:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9285:   MatCheckPreallocated(P,2);
9286:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9287:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9290:   MatCheckPreallocated(C,3);
9291:   if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9292:   if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
9293:   if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9294:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9295:   if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
9296:   MatCheckPreallocated(A,1);

9298:   PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9299:   (*C->ops->ptapnumeric)(A,P,C);
9300:   PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9301:   return(0);
9302: }

9304: /*@
9305:    MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P

9307:    Neighbor-wise Collective on Mat

9309:    Input Parameters:
9310: +  A - the matrix
9311: -  P - the projection matrix

9313:    Output Parameters:
9314: .  C - the (i,j) structure of the product matrix

9316:    Notes:
9317:    C will be created and must be destroyed by the user with MatDestroy().

9319:    This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9320:    which inherit from SeqAIJ.  C will be of type MATSEQAIJ.  The product is computed using
9321:    this (i,j) structure by calling MatPtAPNumeric().

9323:    Level: intermediate

9325: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
9326: @*/
9327: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
9328: {

9334:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9335:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9336:   if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9339:   MatCheckPreallocated(P,2);
9340:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9341:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9344:   if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9345:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9346:   MatCheckPreallocated(A,1);
9347:   PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
9348:   (*A->ops->ptapsymbolic)(A,P,fill,C);
9349:   PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);

9351:   /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not al