Actual source code: matrix.c
petsc-3.10.0 2018-09-12
2: /*
3: This is where the abstract matrix operations are defined
4: */
6: #include <petsc/private/matimpl.h>
7: #include <petsc/private/isimpl.h>
8: #include <petsc/private/vecimpl.h>
10: /* Logging support */
11: PetscClassId MAT_CLASSID;
12: PetscClassId MAT_COLORING_CLASSID;
13: PetscClassId MAT_FDCOLORING_CLASSID;
14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;
16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve,MAT_MatTrSolve;
18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_CreateSubMats, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_PartitioningND, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_CreateSubMat;
24: PetscLogEvent MAT_TransposeColoringCreate;
25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
34: PetscLogEvent MAT_GetMultiProcBlock;
35: PetscLogEvent MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch;
36: PetscLogEvent MAT_ViennaCLCopyToGPU;
37: PetscLogEvent MAT_Merge,MAT_Residual,MAT_SetRandom;
38: PetscLogEvent MATCOLORING_Apply,MATCOLORING_Comm,MATCOLORING_Local,MATCOLORING_ISCreate,MATCOLORING_SetUp,MATCOLORING_Weights;
40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};
42: /*@
43: MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations
45: Logically Collective on Mat
47: Input Parameters:
48: + x - the matrix
49: - rctx - the random number context, formed by PetscRandomCreate(), or NULL and
50: it will create one internally.
52: Output Parameter:
53: . x - the matrix
55: Example of Usage:
56: .vb
57: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
58: MatSetRandom(x,rctx);
59: PetscRandomDestroy(rctx);
60: .ve
62: Level: intermediate
64: Concepts: matrix^setting to random
65: Concepts: random^matrix
67: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
68: @*/
69: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
70: {
72: PetscRandom randObj = NULL;
79: if (!x->ops->setrandom) SETERRQ1(PetscObjectComm((PetscObject)x),PETSC_ERR_SUP,"Mat type %s",((PetscObject)x)->type_name);
81: if (!rctx) {
82: MPI_Comm comm;
83: PetscObjectGetComm((PetscObject)x,&comm);
84: PetscRandomCreate(comm,&randObj);
85: PetscRandomSetFromOptions(randObj);
86: rctx = randObj;
87: }
89: PetscLogEventBegin(MAT_SetRandom,x,rctx,0,0);
90: (*x->ops->setrandom)(x,rctx);
91: PetscLogEventEnd(MAT_SetRandom,x,rctx,0,0);
93: x->assembled = PETSC_TRUE;
94: PetscRandomDestroy(&randObj);
95: return(0);
96: }
98: /*@
99: MatFactorGetErrorZeroPivot - returns the pivot value that was determined to be zero and the row it occurred in
101: Logically Collective on Mat
103: Input Parameters:
104: . mat - the factored matrix
106: Output Parameter:
107: + pivot - the pivot value computed
108: - row - the row that the zero pivot occurred. Note that this row must be interpreted carefully due to row reorderings and which processes
109: the share the matrix
111: Level: advanced
113: Notes:
114: This routine does not work for factorizations done with external packages.
115: This routine should only be called if MatGetFactorError() returns a value of MAT_FACTOR_NUMERIC_ZEROPIVOT
117: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
119: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
120: @*/
121: PetscErrorCode MatFactorGetErrorZeroPivot(Mat mat,PetscReal *pivot,PetscInt *row)
122: {
125: *pivot = mat->factorerror_zeropivot_value;
126: *row = mat->factorerror_zeropivot_row;
127: return(0);
128: }
130: /*@
131: MatFactorGetError - gets the error code from a factorization
133: Logically Collective on Mat
135: Input Parameters:
136: . mat - the factored matrix
138: Output Parameter:
139: . err - the error code
141: Level: advanced
143: Notes:
144: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
146: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
147: @*/
148: PetscErrorCode MatFactorGetError(Mat mat,MatFactorError *err)
149: {
152: *err = mat->factorerrortype;
153: return(0);
154: }
156: /*@
157: MatFactorClearError - clears the error code in a factorization
159: Logically Collective on Mat
161: Input Parameter:
162: . mat - the factored matrix
164: Level: developer
166: Notes:
167: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
169: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorGetError(), MatFactorGetErrorZeroPivot()
170: @*/
171: PetscErrorCode MatFactorClearError(Mat mat)
172: {
175: mat->factorerrortype = MAT_FACTOR_NOERROR;
176: mat->factorerror_zeropivot_value = 0.0;
177: mat->factorerror_zeropivot_row = 0;
178: return(0);
179: }
181: PETSC_INTERN PetscErrorCode MatFindNonzeroRowsOrCols_Basic(Mat mat,PetscBool cols,PetscReal tol,IS *nonzero)
182: {
183: PetscErrorCode ierr;
184: Vec r,l;
185: const PetscScalar *al;
186: PetscInt i,nz,gnz,N,n;
189: MatCreateVecs(mat,&r,&l);
190: if (!cols) { /* nonzero rows */
191: MatGetSize(mat,&N,NULL);
192: MatGetLocalSize(mat,&n,NULL);
193: VecSet(l,0.0);
194: VecSetRandom(r,NULL);
195: MatMult(mat,r,l);
196: VecGetArrayRead(l,&al);
197: } else { /* nonzero columns */
198: MatGetSize(mat,NULL,&N);
199: MatGetLocalSize(mat,NULL,&n);
200: VecSet(r,0.0);
201: VecSetRandom(l,NULL);
202: MatMultTranspose(mat,l,r);
203: VecGetArrayRead(r,&al);
204: }
205: if (tol <= 0.0) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nz++; }
206: else { for (i=0,nz=0;i<n;i++) if (PetscAbsScalar(al[i]) > tol) nz++; }
207: MPIU_Allreduce(&nz,&gnz,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)mat));
208: if (gnz != N) {
209: PetscInt *nzr;
210: PetscMalloc1(nz,&nzr);
211: if (nz) {
212: if (tol < 0) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nzr[nz++] = i; }
213: else { for (i=0,nz=0;i<n;i++) if (PetscAbsScalar(al[i]) > tol) nzr[nz++] = i; }
214: }
215: ISCreateGeneral(PetscObjectComm((PetscObject)mat),nz,nzr,PETSC_OWN_POINTER,nonzero);
216: } else *nonzero = NULL;
217: if (!cols) { /* nonzero rows */
218: VecRestoreArrayRead(l,&al);
219: } else {
220: VecRestoreArrayRead(r,&al);
221: }
222: VecDestroy(&l);
223: VecDestroy(&r);
224: return(0);
225: }
227: /*@
228: MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix
230: Input Parameter:
231: . A - the matrix
233: Output Parameter:
234: . keptrows - the rows that are not completely zero
236: Notes:
237: keptrows is set to NULL if all rows are nonzero.
239: Level: intermediate
241: @*/
242: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
243: {
250: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
251: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
252: if (!mat->ops->findnonzerorows) {
253: MatFindNonzeroRowsOrCols_Basic(mat,PETSC_FALSE,0.0,keptrows);
254: } else {
255: (*mat->ops->findnonzerorows)(mat,keptrows);
256: }
257: return(0);
258: }
260: /*@
261: MatFindZeroRows - Locate all rows that are completely zero in the matrix
263: Input Parameter:
264: . A - the matrix
266: Output Parameter:
267: . zerorows - the rows that are completely zero
269: Notes:
270: zerorows is set to NULL if no rows are zero.
272: Level: intermediate
274: @*/
275: PetscErrorCode MatFindZeroRows(Mat mat,IS *zerorows)
276: {
278: IS keptrows;
279: PetscInt m, n;
284: MatFindNonzeroRows(mat, &keptrows);
285: /* MatFindNonzeroRows sets keptrows to NULL if there are no zero rows.
286: In keeping with this convention, we set zerorows to NULL if there are no zero
287: rows. */
288: if (keptrows == NULL) {
289: *zerorows = NULL;
290: } else {
291: MatGetOwnershipRange(mat,&m,&n);
292: ISComplement(keptrows,m,n,zerorows);
293: ISDestroy(&keptrows);
294: }
295: return(0);
296: }
298: /*@
299: MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling
301: Not Collective
303: Input Parameters:
304: . A - the matrix
306: Output Parameters:
307: . a - the diagonal part (which is a SEQUENTIAL matrix)
309: Notes:
310: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
311: Use caution, as the reference count on the returned matrix is not incremented and it is used as
312: part of the containing MPI Mat's normal operation.
314: Level: advanced
316: @*/
317: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
318: {
325: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
326: if (!A->ops->getdiagonalblock) {
327: PetscMPIInt size;
328: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
329: if (size == 1) {
330: *a = A;
331: return(0);
332: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not coded for this matrix type");
333: }
334: (*A->ops->getdiagonalblock)(A,a);
335: return(0);
336: }
338: /*@
339: MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.
341: Collective on Mat
343: Input Parameters:
344: . mat - the matrix
346: Output Parameter:
347: . trace - the sum of the diagonal entries
349: Level: advanced
351: @*/
352: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
353: {
355: Vec diag;
358: MatCreateVecs(mat,&diag,NULL);
359: MatGetDiagonal(mat,diag);
360: VecSum(diag,trace);
361: VecDestroy(&diag);
362: return(0);
363: }
365: /*@
366: MatRealPart - Zeros out the imaginary part of the matrix
368: Logically Collective on Mat
370: Input Parameters:
371: . mat - the matrix
373: Level: advanced
376: .seealso: MatImaginaryPart()
377: @*/
378: PetscErrorCode MatRealPart(Mat mat)
379: {
385: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
386: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
387: if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
388: MatCheckPreallocated(mat,1);
389: (*mat->ops->realpart)(mat);
390: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
391: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
392: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
393: }
394: #endif
395: return(0);
396: }
398: /*@C
399: MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix
401: Collective on Mat
403: Input Parameter:
404: . mat - the matrix
406: Output Parameters:
407: + nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
408: - ghosts - the global indices of the ghost points
410: Notes:
411: the nghosts and ghosts are suitable to pass into VecCreateGhost()
413: Level: advanced
415: @*/
416: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
417: {
423: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
424: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
425: if (!mat->ops->getghosts) {
426: if (nghosts) *nghosts = 0;
427: if (ghosts) *ghosts = 0;
428: } else {
429: (*mat->ops->getghosts)(mat,nghosts,ghosts);
430: }
431: return(0);
432: }
435: /*@
436: MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part
438: Logically Collective on Mat
440: Input Parameters:
441: . mat - the matrix
443: Level: advanced
446: .seealso: MatRealPart()
447: @*/
448: PetscErrorCode MatImaginaryPart(Mat mat)
449: {
455: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
456: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
457: if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
458: MatCheckPreallocated(mat,1);
459: (*mat->ops->imaginarypart)(mat);
460: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
461: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
462: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
463: }
464: #endif
465: return(0);
466: }
468: /*@
469: MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)
471: Not Collective
473: Input Parameter:
474: . mat - the matrix
476: Output Parameters:
477: + missing - is any diagonal missing
478: - dd - first diagonal entry that is missing (optional) on this process
480: Level: advanced
483: .seealso: MatRealPart()
484: @*/
485: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
486: {
492: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
493: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
494: if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
495: (*mat->ops->missingdiagonal)(mat,missing,dd);
496: return(0);
497: }
499: /*@C
500: MatGetRow - Gets a row of a matrix. You MUST call MatRestoreRow()
501: for each row that you get to ensure that your application does
502: not bleed memory.
504: Not Collective
506: Input Parameters:
507: + mat - the matrix
508: - row - the row to get
510: Output Parameters:
511: + ncols - if not NULL, the number of nonzeros in the row
512: . cols - if not NULL, the column numbers
513: - vals - if not NULL, the values
515: Notes:
516: This routine is provided for people who need to have direct access
517: to the structure of a matrix. We hope that we provide enough
518: high-level matrix routines that few users will need it.
520: MatGetRow() always returns 0-based column indices, regardless of
521: whether the internal representation is 0-based (default) or 1-based.
523: For better efficiency, set cols and/or vals to NULL if you do
524: not wish to extract these quantities.
526: The user can only examine the values extracted with MatGetRow();
527: the values cannot be altered. To change the matrix entries, one
528: must use MatSetValues().
530: You can only have one call to MatGetRow() outstanding for a particular
531: matrix at a time, per processor. MatGetRow() can only obtain rows
532: associated with the given processor, it cannot get rows from the
533: other processors; for that we suggest using MatCreateSubMatrices(), then
534: MatGetRow() on the submatrix. The row index passed to MatGetRows()
535: is in the global number of rows.
537: Fortran Notes:
538: The calling sequence from Fortran is
539: .vb
540: MatGetRow(matrix,row,ncols,cols,values,ierr)
541: Mat matrix (input)
542: integer row (input)
543: integer ncols (output)
544: integer cols(maxcols) (output)
545: double precision (or double complex) values(maxcols) output
546: .ve
547: where maxcols >= maximum nonzeros in any row of the matrix.
550: Caution:
551: Do not try to change the contents of the output arrays (cols and vals).
552: In some cases, this may corrupt the matrix.
554: Level: advanced
556: Concepts: matrices^row access
558: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatCreateSubMatrices(), MatGetDiagonal()
559: @*/
560: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
561: {
563: PetscInt incols;
568: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
569: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
570: if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
571: MatCheckPreallocated(mat,1);
572: PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
573: (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
574: if (ncols) *ncols = incols;
575: PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
576: return(0);
577: }
579: /*@
580: MatConjugate - replaces the matrix values with their complex conjugates
582: Logically Collective on Mat
584: Input Parameters:
585: . mat - the matrix
587: Level: advanced
589: .seealso: VecConjugate()
590: @*/
591: PetscErrorCode MatConjugate(Mat mat)
592: {
593: #if defined(PETSC_USE_COMPLEX)
598: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
599: if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
600: (*mat->ops->conjugate)(mat);
601: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
602: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
603: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
604: }
605: #endif
606: return(0);
607: #else
608: return 0;
609: #endif
610: }
612: /*@C
613: MatRestoreRow - Frees any temporary space allocated by MatGetRow().
615: Not Collective
617: Input Parameters:
618: + mat - the matrix
619: . row - the row to get
620: . ncols, cols - the number of nonzeros and their columns
621: - vals - if nonzero the column values
623: Notes:
624: This routine should be called after you have finished examining the entries.
626: This routine zeros out ncols, cols, and vals. This is to prevent accidental
627: us of the array after it has been restored. If you pass NULL, it will
628: not zero the pointers. Use of cols or vals after MatRestoreRow is invalid.
630: Fortran Notes:
631: The calling sequence from Fortran is
632: .vb
633: MatRestoreRow(matrix,row,ncols,cols,values,ierr)
634: Mat matrix (input)
635: integer row (input)
636: integer ncols (output)
637: integer cols(maxcols) (output)
638: double precision (or double complex) values(maxcols) output
639: .ve
640: Where maxcols >= maximum nonzeros in any row of the matrix.
642: In Fortran MatRestoreRow() MUST be called after MatGetRow()
643: before another call to MatGetRow() can be made.
645: Level: advanced
647: .seealso: MatGetRow()
648: @*/
649: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
650: {
656: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
657: if (!mat->ops->restorerow) return(0);
658: (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
659: if (ncols) *ncols = 0;
660: if (cols) *cols = NULL;
661: if (vals) *vals = NULL;
662: return(0);
663: }
665: /*@
666: MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
667: You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.
669: Not Collective
671: Input Parameters:
672: + mat - the matrix
674: Notes:
675: The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.
677: Level: advanced
679: Concepts: matrices^row access
681: .seealso: MatRestoreRowRowUpperTriangular()
682: @*/
683: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
684: {
690: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
691: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
692: if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
693: MatCheckPreallocated(mat,1);
694: (*mat->ops->getrowuppertriangular)(mat);
695: return(0);
696: }
698: /*@
699: MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.
701: Not Collective
703: Input Parameters:
704: + mat - the matrix
706: Notes:
707: This routine should be called after you have finished MatGetRow/MatRestoreRow().
710: Level: advanced
712: .seealso: MatGetRowUpperTriangular()
713: @*/
714: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
715: {
720: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
721: if (!mat->ops->restorerowuppertriangular) return(0);
722: (*mat->ops->restorerowuppertriangular)(mat);
723: return(0);
724: }
726: /*@C
727: MatSetOptionsPrefix - Sets the prefix used for searching for all
728: Mat options in the database.
730: Logically Collective on Mat
732: Input Parameter:
733: + A - the Mat context
734: - prefix - the prefix to prepend to all option names
736: Notes:
737: A hyphen (-) must NOT be given at the beginning of the prefix name.
738: The first character of all runtime options is AUTOMATICALLY the hyphen.
740: Level: advanced
742: .keywords: Mat, set, options, prefix, database
744: .seealso: MatSetFromOptions()
745: @*/
746: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
747: {
752: PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
753: return(0);
754: }
756: /*@C
757: MatAppendOptionsPrefix - Appends to the prefix used for searching for all
758: Mat options in the database.
760: Logically Collective on Mat
762: Input Parameters:
763: + A - the Mat context
764: - prefix - the prefix to prepend to all option names
766: Notes:
767: A hyphen (-) must NOT be given at the beginning of the prefix name.
768: The first character of all runtime options is AUTOMATICALLY the hyphen.
770: Level: advanced
772: .keywords: Mat, append, options, prefix, database
774: .seealso: MatGetOptionsPrefix()
775: @*/
776: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
777: {
782: PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
783: return(0);
784: }
786: /*@C
787: MatGetOptionsPrefix - Sets the prefix used for searching for all
788: Mat options in the database.
790: Not Collective
792: Input Parameter:
793: . A - the Mat context
795: Output Parameter:
796: . prefix - pointer to the prefix string used
798: Notes:
799: On the fortran side, the user should pass in a string 'prefix' of
800: sufficient length to hold the prefix.
802: Level: advanced
804: .keywords: Mat, get, options, prefix, database
806: .seealso: MatAppendOptionsPrefix()
807: @*/
808: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
809: {
814: PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
815: return(0);
816: }
818: /*@
819: MatResetPreallocation - Reset mat to use the original nonzero pattern provided by users.
821: Collective on Mat
823: Input Parameters:
824: . A - the Mat context
826: Notes:
827: The allocated memory will be shrunk after calling MatAssembly with MAT_FINAL_ASSEMBLY. Users can reset the preallocation to access the original memory.
828: Currently support MPIAIJ and SEQAIJ.
830: Level: beginner
832: .keywords: Mat, ResetPreallocation
834: .seealso: MatSeqAIJSetPreallocation(), MatMPIAIJSetPreallocation(), MatXAIJSetPreallocation()
835: @*/
836: PetscErrorCode MatResetPreallocation(Mat A)
837: {
843: PetscUseMethod(A,"MatResetPreallocation_C",(Mat),(A));
844: return(0);
845: }
848: /*@
849: MatSetUp - Sets up the internal matrix data structures for the later use.
851: Collective on Mat
853: Input Parameters:
854: . A - the Mat context
856: Notes:
857: If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.
859: If a suitable preallocation routine is used, this function does not need to be called.
861: See the Performance chapter of the PETSc users manual for how to preallocate matrices
863: Level: beginner
865: .keywords: Mat, setup
867: .seealso: MatCreate(), MatDestroy()
868: @*/
869: PetscErrorCode MatSetUp(Mat A)
870: {
871: PetscMPIInt size;
876: if (!((PetscObject)A)->type_name) {
877: MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
878: if (size == 1) {
879: MatSetType(A, MATSEQAIJ);
880: } else {
881: MatSetType(A, MATMPIAIJ);
882: }
883: }
884: if (!A->preallocated && A->ops->setup) {
885: PetscInfo(A,"Warning not preallocating matrix storage\n");
886: (*A->ops->setup)(A);
887: }
888: PetscLayoutSetUp(A->rmap);
889: PetscLayoutSetUp(A->cmap);
890: A->preallocated = PETSC_TRUE;
891: return(0);
892: }
894: #if defined(PETSC_HAVE_SAWS)
895: #include <petscviewersaws.h>
896: #endif
897: /*@C
898: MatView - Visualizes a matrix object.
900: Collective on Mat
902: Input Parameters:
903: + mat - the matrix
904: - viewer - visualization context
906: Notes:
907: The available visualization contexts include
908: + PETSC_VIEWER_STDOUT_SELF - for sequential matrices
909: . PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
910: . PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
911: - PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure
913: The user can open alternative visualization contexts with
914: + PetscViewerASCIIOpen() - Outputs matrix to a specified file
915: . PetscViewerBinaryOpen() - Outputs matrix in binary to a
916: specified file; corresponding input uses MatLoad()
917: . PetscViewerDrawOpen() - Outputs nonzero matrix structure to
918: an X window display
919: - PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
920: Currently only the sequential dense and AIJ
921: matrix types support the Socket viewer.
923: The user can call PetscViewerPushFormat() to specify the output
924: format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
925: PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen). Available formats include
926: + PETSC_VIEWER_DEFAULT - default, prints matrix contents
927: . PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
928: . PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
929: . PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
930: format common among all matrix types
931: . PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
932: format (which is in many cases the same as the default)
933: . PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
934: size and structure (not the matrix entries)
935: . PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
936: the matrix structure
938: Options Database Keys:
939: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatAssemblyEnd()
940: . -mat_view ::ascii_info_detail - Prints more detailed info
941: . -mat_view - Prints matrix in ASCII format
942: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
943: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
944: . -display <name> - Sets display name (default is host)
945: . -draw_pause <sec> - Sets number of seconds to pause after display
946: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 12 Using MATLAB with PETSc for details)
947: . -viewer_socket_machine <machine> -
948: . -viewer_socket_port <port> -
949: . -mat_view binary - save matrix to file in binary format
950: - -viewer_binary_filename <name> -
951: Level: beginner
953: Notes:
954: see the manual page for MatLoad() for the exact format of the binary file when the binary
955: viewer is used.
957: See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
958: viewer is used.
960: One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
961: And then use the following mouse functions:
962: left mouse: zoom in
963: middle mouse: zoom out
964: right mouse: continue with the simulation
966: Concepts: matrices^viewing
967: Concepts: matrices^plotting
968: Concepts: matrices^printing
970: .seealso: PetscViewerPushFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
971: PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
972: @*/
973: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
974: {
975: PetscErrorCode ierr;
976: PetscInt rows,cols,rbs,cbs;
977: PetscBool iascii,ibinary;
978: PetscViewerFormat format;
979: PetscMPIInt size;
980: #if defined(PETSC_HAVE_SAWS)
981: PetscBool issaws;
982: #endif
987: if (!viewer) {
988: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
989: }
992: MatCheckPreallocated(mat,1);
993: PetscViewerGetFormat(viewer,&format);
994: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
995: if (size == 1 && format == PETSC_VIEWER_LOAD_BALANCE) return(0);
996: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&ibinary);
997: if (ibinary) {
998: PetscBool mpiio;
999: PetscViewerBinaryGetUseMPIIO(viewer,&mpiio);
1000: if (mpiio) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"PETSc matrix viewers do not support using MPI-IO, turn off that flag");
1001: }
1003: PetscLogEventBegin(MAT_View,mat,viewer,0,0);
1004: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1005: if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
1006: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
1007: }
1009: #if defined(PETSC_HAVE_SAWS)
1010: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
1011: #endif
1012: if (iascii) {
1013: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
1014: PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
1015: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1016: MatNullSpace nullsp,transnullsp;
1018: PetscViewerASCIIPushTab(viewer);
1019: MatGetSize(mat,&rows,&cols);
1020: MatGetBlockSizes(mat,&rbs,&cbs);
1021: if (rbs != 1 || cbs != 1) {
1022: if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
1023: else {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
1024: } else {
1025: PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
1026: }
1027: if (mat->factortype) {
1028: MatSolverType solver;
1029: MatFactorGetSolverType(mat,&solver);
1030: PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
1031: }
1032: if (mat->ops->getinfo) {
1033: MatInfo info;
1034: MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
1035: PetscViewerASCIIPrintf(viewer,"total: nonzeros=%.f, allocated nonzeros=%.f\n",info.nz_used,info.nz_allocated);
1036: PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
1037: }
1038: MatGetNullSpace(mat,&nullsp);
1039: MatGetTransposeNullSpace(mat,&transnullsp);
1040: if (nullsp) {PetscViewerASCIIPrintf(viewer," has attached null space\n");}
1041: if (transnullsp && transnullsp != nullsp) {PetscViewerASCIIPrintf(viewer," has attached transposed null space\n");}
1042: MatGetNearNullSpace(mat,&nullsp);
1043: if (nullsp) {PetscViewerASCIIPrintf(viewer," has attached near null space\n");}
1044: }
1045: #if defined(PETSC_HAVE_SAWS)
1046: } else if (issaws) {
1047: PetscMPIInt rank;
1049: PetscObjectName((PetscObject)mat);
1050: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
1051: if (!((PetscObject)mat)->amsmem && !rank) {
1052: PetscObjectViewSAWs((PetscObject)mat,viewer);
1053: }
1054: #endif
1055: }
1056: if ((format == PETSC_VIEWER_NATIVE || format == PETSC_VIEWER_LOAD_BALANCE) && mat->ops->viewnative) {
1057: PetscViewerASCIIPushTab(viewer);
1058: (*mat->ops->viewnative)(mat,viewer);
1059: PetscViewerASCIIPopTab(viewer);
1060: } else if (mat->ops->view) {
1061: PetscViewerASCIIPushTab(viewer);
1062: (*mat->ops->view)(mat,viewer);
1063: PetscViewerASCIIPopTab(viewer);
1064: }
1065: if (iascii) {
1066: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
1067: PetscViewerGetFormat(viewer,&format);
1068: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1069: PetscViewerASCIIPopTab(viewer);
1070: }
1071: }
1072: PetscLogEventEnd(MAT_View,mat,viewer,0,0);
1073: return(0);
1074: }
1076: #if defined(PETSC_USE_DEBUG)
1077: #include <../src/sys/totalview/tv_data_display.h>
1078: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
1079: {
1080: TV_add_row("Local rows", "int", &mat->rmap->n);
1081: TV_add_row("Local columns", "int", &mat->cmap->n);
1082: TV_add_row("Global rows", "int", &mat->rmap->N);
1083: TV_add_row("Global columns", "int", &mat->cmap->N);
1084: TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
1085: return TV_format_OK;
1086: }
1087: #endif
1089: /*@C
1090: MatLoad - Loads a matrix that has been stored in binary format
1091: with MatView(). The matrix format is determined from the options database.
1092: Generates a parallel MPI matrix if the communicator has more than one
1093: processor. The default matrix type is AIJ.
1095: Collective on PetscViewer
1097: Input Parameters:
1098: + newmat - the newly loaded matrix, this needs to have been created with MatCreate()
1099: or some related function before a call to MatLoad()
1100: - viewer - binary file viewer, created with PetscViewerBinaryOpen()
1102: Options Database Keys:
1103: Used with block matrix formats (MATSEQBAIJ, ...) to specify
1104: block size
1105: . -matload_block_size <bs>
1107: Level: beginner
1109: Notes:
1110: If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
1111: Mat before calling this routine if you wish to set it from the options database.
1113: MatLoad() automatically loads into the options database any options
1114: given in the file filename.info where filename is the name of the file
1115: that was passed to the PetscViewerBinaryOpen(). The options in the info
1116: file will be ignored if you use the -viewer_binary_skip_info option.
1118: If the type or size of newmat is not set before a call to MatLoad, PETSc
1119: sets the default matrix type AIJ and sets the local and global sizes.
1120: If type and/or size is already set, then the same are used.
1122: In parallel, each processor can load a subset of rows (or the
1123: entire matrix). This routine is especially useful when a large
1124: matrix is stored on disk and only part of it is desired on each
1125: processor. For example, a parallel solver may access only some of
1126: the rows from each processor. The algorithm used here reads
1127: relatively small blocks of data rather than reading the entire
1128: matrix and then subsetting it.
1130: Notes for advanced users:
1131: Most users should not need to know the details of the binary storage
1132: format, since MatLoad() and MatView() completely hide these details.
1133: But for anyone who's interested, the standard binary matrix storage
1134: format is
1136: $ int MAT_FILE_CLASSID
1137: $ int number of rows
1138: $ int number of columns
1139: $ int total number of nonzeros
1140: $ int *number nonzeros in each row
1141: $ int *column indices of all nonzeros (starting index is zero)
1142: $ PetscScalar *values of all nonzeros
1144: PETSc automatically does the byte swapping for
1145: machines that store the bytes reversed, e.g. DEC alpha, freebsd,
1146: linux, Windows and the paragon; thus if you write your own binary
1147: read/write routines you have to swap the bytes; see PetscBinaryRead()
1148: and PetscBinaryWrite() to see how this may be done.
1150: .keywords: matrix, load, binary, input
1152: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()
1154: @*/
1155: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
1156: {
1158: PetscBool isbinary,flg;
1163: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1164: if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");
1166: if (!((PetscObject)newmat)->type_name) {
1167: MatSetType(newmat,MATAIJ);
1168: }
1170: if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
1171: PetscLogEventBegin(MAT_Load,viewer,0,0,0);
1172: (*newmat->ops->load)(newmat,viewer);
1173: PetscLogEventEnd(MAT_Load,viewer,0,0,0);
1175: flg = PETSC_FALSE;
1176: PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1177: if (flg) {
1178: MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1179: MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1180: }
1181: flg = PETSC_FALSE;
1182: PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1183: if (flg) {
1184: MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1185: }
1186: return(0);
1187: }
1189: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1190: {
1192: Mat_Redundant *redund = *redundant;
1193: PetscInt i;
1196: if (redund){
1197: if (redund->matseq) { /* via MatCreateSubMatrices() */
1198: ISDestroy(&redund->isrow);
1199: ISDestroy(&redund->iscol);
1200: MatDestroySubMatrices(1,&redund->matseq);
1201: } else {
1202: PetscFree2(redund->send_rank,redund->recv_rank);
1203: PetscFree(redund->sbuf_j);
1204: PetscFree(redund->sbuf_a);
1205: for (i=0; i<redund->nrecvs; i++) {
1206: PetscFree(redund->rbuf_j[i]);
1207: PetscFree(redund->rbuf_a[i]);
1208: }
1209: PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1210: }
1212: if (redund->subcomm) {
1213: PetscCommDestroy(&redund->subcomm);
1214: }
1215: PetscFree(redund);
1216: }
1217: return(0);
1218: }
1220: /*@
1221: MatDestroy - Frees space taken by a matrix.
1223: Collective on Mat
1225: Input Parameter:
1226: . A - the matrix
1228: Level: beginner
1230: @*/
1231: PetscErrorCode MatDestroy(Mat *A)
1232: {
1236: if (!*A) return(0);
1238: if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}
1240: /* if memory was published with SAWs then destroy it */
1241: PetscObjectSAWsViewOff((PetscObject)*A);
1242: if ((*A)->ops->destroy) {
1243: (*(*A)->ops->destroy)(*A);
1244: }
1246: PetscFree((*A)->defaultvectype);
1247: PetscFree((*A)->bsizes);
1248: PetscFree((*A)->solvertype);
1249: MatDestroy_Redundant(&(*A)->redundant);
1250: MatNullSpaceDestroy(&(*A)->nullsp);
1251: MatNullSpaceDestroy(&(*A)->transnullsp);
1252: MatNullSpaceDestroy(&(*A)->nearnullsp);
1253: MatDestroy(&(*A)->schur);
1254: PetscLayoutDestroy(&(*A)->rmap);
1255: PetscLayoutDestroy(&(*A)->cmap);
1256: PetscHeaderDestroy(A);
1257: return(0);
1258: }
1260: /*@C
1261: MatSetValues - Inserts or adds a block of values into a matrix.
1262: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1263: MUST be called after all calls to MatSetValues() have been completed.
1265: Not Collective
1267: Input Parameters:
1268: + mat - the matrix
1269: . v - a logically two-dimensional array of values
1270: . m, idxm - the number of rows and their global indices
1271: . n, idxn - the number of columns and their global indices
1272: - addv - either ADD_VALUES or INSERT_VALUES, where
1273: ADD_VALUES adds values to any existing entries, and
1274: INSERT_VALUES replaces existing entries with new values
1276: Notes:
1277: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1278: MatSetUp() before using this routine
1280: By default the values, v, are row-oriented. See MatSetOption() for other options.
1282: Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1283: options cannot be mixed without intervening calls to the assembly
1284: routines.
1286: MatSetValues() uses 0-based row and column numbers in Fortran
1287: as well as in C.
1289: Negative indices may be passed in idxm and idxn, these rows and columns are
1290: simply ignored. This allows easily inserting element stiffness matrices
1291: with homogeneous Dirchlet boundary conditions that you don't want represented
1292: in the matrix.
1294: Efficiency Alert:
1295: The routine MatSetValuesBlocked() may offer much better efficiency
1296: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1298: Level: beginner
1300: Developer Notes:
1301: This is labeled with C so does not automatically generate Fortran stubs and interfaces
1302: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
1304: Concepts: matrices^putting entries in
1306: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1307: InsertMode, INSERT_VALUES, ADD_VALUES
1308: @*/
1309: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1310: {
1312: #if defined(PETSC_USE_DEBUG)
1313: PetscInt i,j;
1314: #endif
1319: if (!m || !n) return(0); /* no values to insert */
1323: MatCheckPreallocated(mat,1);
1324: if (mat->insertmode == NOT_SET_VALUES) {
1325: mat->insertmode = addv;
1326: }
1327: #if defined(PETSC_USE_DEBUG)
1328: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1329: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1330: if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1332: for (i=0; i<m; i++) {
1333: for (j=0; j<n; j++) {
1334: if (mat->erroriffailure && PetscIsInfOrNanScalar(v[i*n+j]))
1335: #if defined(PETSC_USE_COMPLEX)
1336: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1337: #else
1338: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1339: #endif
1340: }
1341: }
1342: #endif
1344: if (mat->assembled) {
1345: mat->was_assembled = PETSC_TRUE;
1346: mat->assembled = PETSC_FALSE;
1347: }
1348: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1349: (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1350: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1351: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1352: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1353: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1354: }
1355: #endif
1356: return(0);
1357: }
1360: /*@
1361: MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1362: values into a matrix
1364: Not Collective
1366: Input Parameters:
1367: + mat - the matrix
1368: . row - the (block) row to set
1369: - v - a logically two-dimensional array of values
1371: Notes:
1372: By the values, v, are column-oriented (for the block version) and sorted
1374: All the nonzeros in the row must be provided
1376: The matrix must have previously had its column indices set
1378: The row must belong to this process
1380: Level: intermediate
1382: Concepts: matrices^putting entries in
1384: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1385: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1386: @*/
1387: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1388: {
1390: PetscInt globalrow;
1396: ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1397: MatSetValuesRow(mat,globalrow,v);
1398: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1399: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1400: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1401: }
1402: #endif
1403: return(0);
1404: }
1406: /*@
1407: MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1408: values into a matrix
1410: Not Collective
1412: Input Parameters:
1413: + mat - the matrix
1414: . row - the (block) row to set
1415: - v - a logically two-dimensional (column major) array of values for block matrices with blocksize larger than one, otherwise a one dimensional array of values
1417: Notes:
1418: The values, v, are column-oriented for the block version.
1420: All the nonzeros in the row must be provided
1422: THE MATRIX MUST HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.
1424: The row must belong to this process
1426: Level: advanced
1428: Concepts: matrices^putting entries in
1430: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1431: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1432: @*/
1433: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1434: {
1440: MatCheckPreallocated(mat,1);
1442: #if defined(PETSC_USE_DEBUG)
1443: if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1444: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1445: #endif
1446: mat->insertmode = INSERT_VALUES;
1448: if (mat->assembled) {
1449: mat->was_assembled = PETSC_TRUE;
1450: mat->assembled = PETSC_FALSE;
1451: }
1452: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1453: if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1454: (*mat->ops->setvaluesrow)(mat,row,v);
1455: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1456: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1457: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1458: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1459: }
1460: #endif
1461: return(0);
1462: }
1464: /*@
1465: MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1466: Using structured grid indexing
1468: Not Collective
1470: Input Parameters:
1471: + mat - the matrix
1472: . m - number of rows being entered
1473: . idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1474: . n - number of columns being entered
1475: . idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1476: . v - a logically two-dimensional array of values
1477: - addv - either ADD_VALUES or INSERT_VALUES, where
1478: ADD_VALUES adds values to any existing entries, and
1479: INSERT_VALUES replaces existing entries with new values
1481: Notes:
1482: By default the values, v, are row-oriented. See MatSetOption() for other options.
1484: Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1485: options cannot be mixed without intervening calls to the assembly
1486: routines.
1488: The grid coordinates are across the entire grid, not just the local portion
1490: MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1491: as well as in C.
1493: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1495: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1496: or call MatSetLocalToGlobalMapping() and MatSetStencil() first.
1498: The columns and rows in the stencil passed in MUST be contained within the
1499: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1500: if you create a DMDA with an overlap of one grid level and on a particular process its first
1501: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1502: first i index you can use in your column and row indices in MatSetStencil() is 5.
1504: In Fortran idxm and idxn should be declared as
1505: $ MatStencil idxm(4,m),idxn(4,n)
1506: and the values inserted using
1507: $ idxm(MatStencil_i,1) = i
1508: $ idxm(MatStencil_j,1) = j
1509: $ idxm(MatStencil_k,1) = k
1510: $ idxm(MatStencil_c,1) = c
1511: etc
1513: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1514: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1515: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1516: DM_BOUNDARY_PERIODIC boundary type.
1518: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1519: a single value per point) you can skip filling those indices.
1521: Inspired by the structured grid interface to the HYPRE package
1522: (http://www.llnl.gov/CASC/hypre)
1524: Efficiency Alert:
1525: The routine MatSetValuesBlockedStencil() may offer much better efficiency
1526: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1528: Level: beginner
1530: Concepts: matrices^putting entries in
1532: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1533: MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1534: @*/
1535: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1536: {
1538: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1539: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1540: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1543: if (!m || !n) return(0); /* no values to insert */
1550: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1551: jdxm = buf; jdxn = buf+m;
1552: } else {
1553: PetscMalloc2(m,&bufm,n,&bufn);
1554: jdxm = bufm; jdxn = bufn;
1555: }
1556: for (i=0; i<m; i++) {
1557: for (j=0; j<3-sdim; j++) dxm++;
1558: tmp = *dxm++ - starts[0];
1559: for (j=0; j<dim-1; j++) {
1560: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1561: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1562: }
1563: if (mat->stencil.noc) dxm++;
1564: jdxm[i] = tmp;
1565: }
1566: for (i=0; i<n; i++) {
1567: for (j=0; j<3-sdim; j++) dxn++;
1568: tmp = *dxn++ - starts[0];
1569: for (j=0; j<dim-1; j++) {
1570: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1571: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1572: }
1573: if (mat->stencil.noc) dxn++;
1574: jdxn[i] = tmp;
1575: }
1576: MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1577: PetscFree2(bufm,bufn);
1578: return(0);
1579: }
1581: /*@
1582: MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1583: Using structured grid indexing
1585: Not Collective
1587: Input Parameters:
1588: + mat - the matrix
1589: . m - number of rows being entered
1590: . idxm - grid coordinates for matrix rows being entered
1591: . n - number of columns being entered
1592: . idxn - grid coordinates for matrix columns being entered
1593: . v - a logically two-dimensional array of values
1594: - addv - either ADD_VALUES or INSERT_VALUES, where
1595: ADD_VALUES adds values to any existing entries, and
1596: INSERT_VALUES replaces existing entries with new values
1598: Notes:
1599: By default the values, v, are row-oriented and unsorted.
1600: See MatSetOption() for other options.
1602: Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1603: options cannot be mixed without intervening calls to the assembly
1604: routines.
1606: The grid coordinates are across the entire grid, not just the local portion
1608: MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1609: as well as in C.
1611: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1613: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1614: or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.
1616: The columns and rows in the stencil passed in MUST be contained within the
1617: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1618: if you create a DMDA with an overlap of one grid level and on a particular process its first
1619: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1620: first i index you can use in your column and row indices in MatSetStencil() is 5.
1622: In Fortran idxm and idxn should be declared as
1623: $ MatStencil idxm(4,m),idxn(4,n)
1624: and the values inserted using
1625: $ idxm(MatStencil_i,1) = i
1626: $ idxm(MatStencil_j,1) = j
1627: $ idxm(MatStencil_k,1) = k
1628: etc
1630: Negative indices may be passed in idxm and idxn, these rows and columns are
1631: simply ignored. This allows easily inserting element stiffness matrices
1632: with homogeneous Dirchlet boundary conditions that you don't want represented
1633: in the matrix.
1635: Inspired by the structured grid interface to the HYPRE package
1636: (http://www.llnl.gov/CASC/hypre)
1638: Level: beginner
1640: Concepts: matrices^putting entries in
1642: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1643: MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1644: MatSetBlockSize(), MatSetLocalToGlobalMapping()
1645: @*/
1646: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1647: {
1649: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1650: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1651: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1654: if (!m || !n) return(0); /* no values to insert */
1661: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1662: jdxm = buf; jdxn = buf+m;
1663: } else {
1664: PetscMalloc2(m,&bufm,n,&bufn);
1665: jdxm = bufm; jdxn = bufn;
1666: }
1667: for (i=0; i<m; i++) {
1668: for (j=0; j<3-sdim; j++) dxm++;
1669: tmp = *dxm++ - starts[0];
1670: for (j=0; j<sdim-1; j++) {
1671: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1672: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1673: }
1674: dxm++;
1675: jdxm[i] = tmp;
1676: }
1677: for (i=0; i<n; i++) {
1678: for (j=0; j<3-sdim; j++) dxn++;
1679: tmp = *dxn++ - starts[0];
1680: for (j=0; j<sdim-1; j++) {
1681: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1682: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1683: }
1684: dxn++;
1685: jdxn[i] = tmp;
1686: }
1687: MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1688: PetscFree2(bufm,bufn);
1689: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1690: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1691: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1692: }
1693: #endif
1694: return(0);
1695: }
1697: /*@
1698: MatSetStencil - Sets the grid information for setting values into a matrix via
1699: MatSetValuesStencil()
1701: Not Collective
1703: Input Parameters:
1704: + mat - the matrix
1705: . dim - dimension of the grid 1, 2, or 3
1706: . dims - number of grid points in x, y, and z direction, including ghost points on your processor
1707: . starts - starting point of ghost nodes on your processor in x, y, and z direction
1708: - dof - number of degrees of freedom per node
1711: Inspired by the structured grid interface to the HYPRE package
1712: (www.llnl.gov/CASC/hyper)
1714: For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1715: user.
1717: Level: beginner
1719: Concepts: matrices^putting entries in
1721: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1722: MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1723: @*/
1724: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1725: {
1726: PetscInt i;
1733: mat->stencil.dim = dim + (dof > 1);
1734: for (i=0; i<dim; i++) {
1735: mat->stencil.dims[i] = dims[dim-i-1]; /* copy the values in backwards */
1736: mat->stencil.starts[i] = starts[dim-i-1];
1737: }
1738: mat->stencil.dims[dim] = dof;
1739: mat->stencil.starts[dim] = 0;
1740: mat->stencil.noc = (PetscBool)(dof == 1);
1741: return(0);
1742: }
1744: /*@C
1745: MatSetValuesBlocked - Inserts or adds a block of values into a matrix.
1747: Not Collective
1749: Input Parameters:
1750: + mat - the matrix
1751: . v - a logically two-dimensional array of values
1752: . m, idxm - the number of block rows and their global block indices
1753: . n, idxn - the number of block columns and their global block indices
1754: - addv - either ADD_VALUES or INSERT_VALUES, where
1755: ADD_VALUES adds values to any existing entries, and
1756: INSERT_VALUES replaces existing entries with new values
1758: Notes:
1759: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1760: MatXXXXSetPreallocation() or MatSetUp() before using this routine.
1762: The m and n count the NUMBER of blocks in the row direction and column direction,
1763: NOT the total number of rows/columns; for example, if the block size is 2 and
1764: you are passing in values for rows 2,3,4,5 then m would be 2 (not 4).
1765: The values in idxm would be 1 2; that is the first index for each block divided by
1766: the block size.
1768: Note that you must call MatSetBlockSize() when constructing this matrix (before
1769: preallocating it).
1771: By default the values, v, are row-oriented, so the layout of
1772: v is the same as for MatSetValues(). See MatSetOption() for other options.
1774: Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1775: options cannot be mixed without intervening calls to the assembly
1776: routines.
1778: MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1779: as well as in C.
1781: Negative indices may be passed in idxm and idxn, these rows and columns are
1782: simply ignored. This allows easily inserting element stiffness matrices
1783: with homogeneous Dirchlet boundary conditions that you don't want represented
1784: in the matrix.
1786: Each time an entry is set within a sparse matrix via MatSetValues(),
1787: internal searching must be done to determine where to place the
1788: data in the matrix storage space. By instead inserting blocks of
1789: entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1790: reduced.
1792: Example:
1793: $ Suppose m=n=2 and block size(bs) = 2 The array is
1794: $
1795: $ 1 2 | 3 4
1796: $ 5 6 | 7 8
1797: $ - - - | - - -
1798: $ 9 10 | 11 12
1799: $ 13 14 | 15 16
1800: $
1801: $ v[] should be passed in like
1802: $ v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1803: $
1804: $ If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1805: $ v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]
1807: Level: intermediate
1809: Concepts: matrices^putting entries in blocked
1811: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1812: @*/
1813: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1814: {
1820: if (!m || !n) return(0); /* no values to insert */
1824: MatCheckPreallocated(mat,1);
1825: if (mat->insertmode == NOT_SET_VALUES) {
1826: mat->insertmode = addv;
1827: }
1828: #if defined(PETSC_USE_DEBUG)
1829: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1830: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1831: if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1832: #endif
1834: if (mat->assembled) {
1835: mat->was_assembled = PETSC_TRUE;
1836: mat->assembled = PETSC_FALSE;
1837: }
1838: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1839: if (mat->ops->setvaluesblocked) {
1840: (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1841: } else {
1842: PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1843: PetscInt i,j,bs,cbs;
1844: MatGetBlockSizes(mat,&bs,&cbs);
1845: if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1846: iidxm = buf; iidxn = buf + m*bs;
1847: } else {
1848: PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1849: iidxm = bufr; iidxn = bufc;
1850: }
1851: for (i=0; i<m; i++) {
1852: for (j=0; j<bs; j++) {
1853: iidxm[i*bs+j] = bs*idxm[i] + j;
1854: }
1855: }
1856: for (i=0; i<n; i++) {
1857: for (j=0; j<cbs; j++) {
1858: iidxn[i*cbs+j] = cbs*idxn[i] + j;
1859: }
1860: }
1861: MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1862: PetscFree2(bufr,bufc);
1863: }
1864: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1865: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1866: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1867: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1868: }
1869: #endif
1870: return(0);
1871: }
1873: /*@
1874: MatGetValues - Gets a block of values from a matrix.
1876: Not Collective; currently only returns a local block
1878: Input Parameters:
1879: + mat - the matrix
1880: . v - a logically two-dimensional array for storing the values
1881: . m, idxm - the number of rows and their global indices
1882: - n, idxn - the number of columns and their global indices
1884: Notes:
1885: The user must allocate space (m*n PetscScalars) for the values, v.
1886: The values, v, are then returned in a row-oriented format,
1887: analogous to that used by default in MatSetValues().
1889: MatGetValues() uses 0-based row and column numbers in
1890: Fortran as well as in C.
1892: MatGetValues() requires that the matrix has been assembled
1893: with MatAssemblyBegin()/MatAssemblyEnd(). Thus, calls to
1894: MatSetValues() and MatGetValues() CANNOT be made in succession
1895: without intermediate matrix assembly.
1897: Negative row or column indices will be ignored and those locations in v[] will be
1898: left unchanged.
1900: Level: advanced
1902: Concepts: matrices^accessing values
1904: .seealso: MatGetRow(), MatCreateSubMatrices(), MatSetValues()
1905: @*/
1906: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1907: {
1913: if (!m || !n) return(0);
1917: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1918: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1919: if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1920: MatCheckPreallocated(mat,1);
1922: PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1923: (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1924: PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1925: return(0);
1926: }
1928: /*@
1929: MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1930: the same size. Currently, this can only be called once and creates the given matrix.
1932: Not Collective
1934: Input Parameters:
1935: + mat - the matrix
1936: . nb - the number of blocks
1937: . bs - the number of rows (and columns) in each block
1938: . rows - a concatenation of the rows for each block
1939: - v - a concatenation of logically two-dimensional arrays of values
1941: Notes:
1942: In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.
1944: Level: advanced
1946: Concepts: matrices^putting entries in
1948: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1949: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1950: @*/
1951: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1952: {
1960: #if defined(PETSC_USE_DEBUG)
1961: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1962: #endif
1964: PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1965: if (mat->ops->setvaluesbatch) {
1966: (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1967: } else {
1968: PetscInt b;
1969: for (b = 0; b < nb; ++b) {
1970: MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1971: }
1972: }
1973: PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1974: return(0);
1975: }
1977: /*@
1978: MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1979: the routine MatSetValuesLocal() to allow users to insert matrix entries
1980: using a local (per-processor) numbering.
1982: Not Collective
1984: Input Parameters:
1985: + x - the matrix
1986: . rmapping - row mapping created with ISLocalToGlobalMappingCreate() or ISLocalToGlobalMappingCreateIS()
1987: - cmapping - column mapping
1989: Level: intermediate
1991: Concepts: matrices^local to global mapping
1992: Concepts: local to global mapping^for matrices
1994: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1995: @*/
1996: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1997: {
2006: if (x->ops->setlocaltoglobalmapping) {
2007: (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
2008: } else {
2009: PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
2010: PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
2011: }
2012: return(0);
2013: }
2016: /*@
2017: MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()
2019: Not Collective
2021: Input Parameters:
2022: . A - the matrix
2024: Output Parameters:
2025: + rmapping - row mapping
2026: - cmapping - column mapping
2028: Level: advanced
2030: Concepts: matrices^local to global mapping
2031: Concepts: local to global mapping^for matrices
2033: .seealso: MatSetValuesLocal()
2034: @*/
2035: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
2036: {
2042: if (rmapping) *rmapping = A->rmap->mapping;
2043: if (cmapping) *cmapping = A->cmap->mapping;
2044: return(0);
2045: }
2047: /*@
2048: MatGetLayouts - Gets the PetscLayout objects for rows and columns
2050: Not Collective
2052: Input Parameters:
2053: . A - the matrix
2055: Output Parameters:
2056: + rmap - row layout
2057: - cmap - column layout
2059: Level: advanced
2061: .seealso: MatCreateVecs(), MatGetLocalToGlobalMapping()
2062: @*/
2063: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
2064: {
2070: if (rmap) *rmap = A->rmap;
2071: if (cmap) *cmap = A->cmap;
2072: return(0);
2073: }
2075: /*@C
2076: MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
2077: using a local ordering of the nodes.
2079: Not Collective
2081: Input Parameters:
2082: + mat - the matrix
2083: . nrow, irow - number of rows and their local indices
2084: . ncol, icol - number of columns and their local indices
2085: . y - a logically two-dimensional array of values
2086: - addv - either INSERT_VALUES or ADD_VALUES, where
2087: ADD_VALUES adds values to any existing entries, and
2088: INSERT_VALUES replaces existing entries with new values
2090: Notes:
2091: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2092: MatSetUp() before using this routine
2094: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine
2096: Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
2097: options cannot be mixed without intervening calls to the assembly
2098: routines.
2100: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2101: MUST be called after all calls to MatSetValuesLocal() have been completed.
2103: Level: intermediate
2105: Concepts: matrices^putting entries in with local numbering
2107: Developer Notes:
2108: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2109: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
2111: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
2112: MatSetValueLocal()
2113: @*/
2114: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2115: {
2121: MatCheckPreallocated(mat,1);
2122: if (!nrow || !ncol) return(0); /* no values to insert */
2126: if (mat->insertmode == NOT_SET_VALUES) {
2127: mat->insertmode = addv;
2128: }
2129: #if defined(PETSC_USE_DEBUG)
2130: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2131: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2132: if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2133: #endif
2135: if (mat->assembled) {
2136: mat->was_assembled = PETSC_TRUE;
2137: mat->assembled = PETSC_FALSE;
2138: }
2139: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2140: if (mat->ops->setvalueslocal) {
2141: (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2142: } else {
2143: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2144: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2145: irowm = buf; icolm = buf+nrow;
2146: } else {
2147: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2148: irowm = bufr; icolm = bufc;
2149: }
2150: ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2151: ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2152: MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2153: PetscFree2(bufr,bufc);
2154: }
2155: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2156: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
2157: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
2158: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
2159: }
2160: #endif
2161: return(0);
2162: }
2164: /*@C
2165: MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2166: using a local ordering of the nodes a block at a time.
2168: Not Collective
2170: Input Parameters:
2171: + x - the matrix
2172: . nrow, irow - number of rows and their local indices
2173: . ncol, icol - number of columns and their local indices
2174: . y - a logically two-dimensional array of values
2175: - addv - either INSERT_VALUES or ADD_VALUES, where
2176: ADD_VALUES adds values to any existing entries, and
2177: INSERT_VALUES replaces existing entries with new values
2179: Notes:
2180: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2181: MatSetUp() before using this routine
2183: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2184: before using this routineBefore calling MatSetValuesLocal(), the user must first set the
2186: Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2187: options cannot be mixed without intervening calls to the assembly
2188: routines.
2190: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2191: MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.
2193: Level: intermediate
2195: Developer Notes:
2196: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2197: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
2199: Concepts: matrices^putting blocked values in with local numbering
2201: .seealso: MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2202: MatSetValuesLocal(), MatSetValuesBlocked()
2203: @*/
2204: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2205: {
2211: MatCheckPreallocated(mat,1);
2212: if (!nrow || !ncol) return(0); /* no values to insert */
2216: if (mat->insertmode == NOT_SET_VALUES) {
2217: mat->insertmode = addv;
2218: }
2219: #if defined(PETSC_USE_DEBUG)
2220: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2221: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2222: if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2223: #endif
2225: if (mat->assembled) {
2226: mat->was_assembled = PETSC_TRUE;
2227: mat->assembled = PETSC_FALSE;
2228: }
2229: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2230: if (mat->ops->setvaluesblockedlocal) {
2231: (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2232: } else {
2233: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2234: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2235: irowm = buf; icolm = buf + nrow;
2236: } else {
2237: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2238: irowm = bufr; icolm = bufc;
2239: }
2240: ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2241: ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2242: MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2243: PetscFree2(bufr,bufc);
2244: }
2245: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2246: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
2247: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
2248: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
2249: }
2250: #endif
2251: return(0);
2252: }
2254: /*@
2255: MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal
2257: Collective on Mat and Vec
2259: Input Parameters:
2260: + mat - the matrix
2261: - x - the vector to be multiplied
2263: Output Parameters:
2264: . y - the result
2266: Notes:
2267: The vectors x and y cannot be the same. I.e., one cannot
2268: call MatMult(A,y,y).
2270: Level: developer
2272: Concepts: matrix-vector product
2274: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2275: @*/
2276: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2277: {
2286: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2287: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2288: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2289: MatCheckPreallocated(mat,1);
2291: if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2292: (*mat->ops->multdiagonalblock)(mat,x,y);
2293: PetscObjectStateIncrease((PetscObject)y);
2294: return(0);
2295: }
2297: /* --------------------------------------------------------*/
2298: /*@
2299: MatMult - Computes the matrix-vector product, y = Ax.
2301: Neighbor-wise Collective on Mat and Vec
2303: Input Parameters:
2304: + mat - the matrix
2305: - x - the vector to be multiplied
2307: Output Parameters:
2308: . y - the result
2310: Notes:
2311: The vectors x and y cannot be the same. I.e., one cannot
2312: call MatMult(A,y,y).
2314: Level: beginner
2316: Concepts: matrix-vector product
2318: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2319: @*/
2320: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2321: {
2329: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2330: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2331: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2332: #if !defined(PETSC_HAVE_CONSTRAINTS)
2333: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2334: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2335: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2336: #endif
2337: VecLocked(y,3);
2338: if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2339: MatCheckPreallocated(mat,1);
2341: VecLockPush(x);
2342: if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2343: PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2344: (*mat->ops->mult)(mat,x,y);
2345: PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2346: if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2347: VecLockPop(x);
2348: return(0);
2349: }
2351: /*@
2352: MatMultTranspose - Computes matrix transpose times a vector y = A^T * x.
2354: Neighbor-wise Collective on Mat and Vec
2356: Input Parameters:
2357: + mat - the matrix
2358: - x - the vector to be multiplied
2360: Output Parameters:
2361: . y - the result
2363: Notes:
2364: The vectors x and y cannot be the same. I.e., one cannot
2365: call MatMultTranspose(A,y,y).
2367: For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2368: use MatMultHermitianTranspose()
2370: Level: beginner
2372: Concepts: matrix vector product^transpose
2374: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2375: @*/
2376: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2377: {
2386: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2387: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2388: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2389: #if !defined(PETSC_HAVE_CONSTRAINTS)
2390: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2391: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2392: #endif
2393: if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2394: MatCheckPreallocated(mat,1);
2396: if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply transpose defined");
2397: PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2398: VecLockPush(x);
2399: (*mat->ops->multtranspose)(mat,x,y);
2400: VecLockPop(x);
2401: PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2402: PetscObjectStateIncrease((PetscObject)y);
2403: if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2404: return(0);
2405: }
2407: /*@
2408: MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.
2410: Neighbor-wise Collective on Mat and Vec
2412: Input Parameters:
2413: + mat - the matrix
2414: - x - the vector to be multilplied
2416: Output Parameters:
2417: . y - the result
2419: Notes:
2420: The vectors x and y cannot be the same. I.e., one cannot
2421: call MatMultHermitianTranspose(A,y,y).
2423: Also called the conjugate transpose, complex conjugate transpose, or adjoint.
2425: For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.
2427: Level: beginner
2429: Concepts: matrix vector product^transpose
2431: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2432: @*/
2433: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2434: {
2436: Vec w;
2444: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2445: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2446: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2447: #if !defined(PETSC_HAVE_CONSTRAINTS)
2448: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2449: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2450: #endif
2451: MatCheckPreallocated(mat,1);
2453: PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2454: if (mat->ops->multhermitiantranspose) {
2455: VecLockPush(x);
2456: (*mat->ops->multhermitiantranspose)(mat,x,y);
2457: VecLockPop(x);
2458: } else {
2459: VecDuplicate(x,&w);
2460: VecCopy(x,w);
2461: VecConjugate(w);
2462: MatMultTranspose(mat,w,y);
2463: VecDestroy(&w);
2464: VecConjugate(y);
2465: }
2466: PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2467: PetscObjectStateIncrease((PetscObject)y);
2468: return(0);
2469: }
2471: /*@
2472: MatMultAdd - Computes v3 = v2 + A * v1.
2474: Neighbor-wise Collective on Mat and Vec
2476: Input Parameters:
2477: + mat - the matrix
2478: - v1, v2 - the vectors
2480: Output Parameters:
2481: . v3 - the result
2483: Notes:
2484: The vectors v1 and v3 cannot be the same. I.e., one cannot
2485: call MatMultAdd(A,v1,v2,v1).
2487: Level: beginner
2489: Concepts: matrix vector product^addition
2491: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2492: @*/
2493: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2494: {
2504: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2505: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2506: if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2507: /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2508: if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2509: if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2510: if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2511: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2512: MatCheckPreallocated(mat,1);
2514: if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2515: PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2516: VecLockPush(v1);
2517: (*mat->ops->multadd)(mat,v1,v2,v3);
2518: VecLockPop(v1);
2519: PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2520: PetscObjectStateIncrease((PetscObject)v3);
2521: return(0);
2522: }
2524: /*@
2525: MatMultTransposeAdd - Computes v3 = v2 + A' * v1.
2527: Neighbor-wise Collective on Mat and Vec
2529: Input Parameters:
2530: + mat - the matrix
2531: - v1, v2 - the vectors
2533: Output Parameters:
2534: . v3 - the result
2536: Notes:
2537: The vectors v1 and v3 cannot be the same. I.e., one cannot
2538: call MatMultTransposeAdd(A,v1,v2,v1).
2540: Level: beginner
2542: Concepts: matrix vector product^transpose and addition
2544: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2545: @*/
2546: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2547: {
2557: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2558: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2559: if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2560: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2561: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2562: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2563: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2564: MatCheckPreallocated(mat,1);
2566: PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2567: VecLockPush(v1);
2568: (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2569: VecLockPop(v1);
2570: PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2571: PetscObjectStateIncrease((PetscObject)v3);
2572: return(0);
2573: }
2575: /*@
2576: MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.
2578: Neighbor-wise Collective on Mat and Vec
2580: Input Parameters:
2581: + mat - the matrix
2582: - v1, v2 - the vectors
2584: Output Parameters:
2585: . v3 - the result
2587: Notes:
2588: The vectors v1 and v3 cannot be the same. I.e., one cannot
2589: call MatMultHermitianTransposeAdd(A,v1,v2,v1).
2591: Level: beginner
2593: Concepts: matrix vector product^transpose and addition
2595: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2596: @*/
2597: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2598: {
2608: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2609: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2610: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2611: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2612: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2613: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2614: MatCheckPreallocated(mat,1);
2616: PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2617: VecLockPush(v1);
2618: if (mat->ops->multhermitiantransposeadd) {
2619: (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2620: } else {
2621: Vec w,z;
2622: VecDuplicate(v1,&w);
2623: VecCopy(v1,w);
2624: VecConjugate(w);
2625: VecDuplicate(v3,&z);
2626: MatMultTranspose(mat,w,z);
2627: VecDestroy(&w);
2628: VecConjugate(z);
2629: VecWAXPY(v3,1.0,v2,z);
2630: VecDestroy(&z);
2631: }
2632: VecLockPop(v1);
2633: PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2634: PetscObjectStateIncrease((PetscObject)v3);
2635: return(0);
2636: }
2638: /*@
2639: MatMultConstrained - The inner multiplication routine for a
2640: constrained matrix P^T A P.
2642: Neighbor-wise Collective on Mat and Vec
2644: Input Parameters:
2645: + mat - the matrix
2646: - x - the vector to be multilplied
2648: Output Parameters:
2649: . y - the result
2651: Notes:
2652: The vectors x and y cannot be the same. I.e., one cannot
2653: call MatMult(A,y,y).
2655: Level: beginner
2657: .keywords: matrix, multiply, matrix-vector product, constraint
2658: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2659: @*/
2660: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2661: {
2668: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2669: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2670: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2671: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2672: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2673: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2675: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2676: VecLockPush(x);
2677: (*mat->ops->multconstrained)(mat,x,y);
2678: VecLockPop(x);
2679: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2680: PetscObjectStateIncrease((PetscObject)y);
2681: return(0);
2682: }
2684: /*@
2685: MatMultTransposeConstrained - The inner multiplication routine for a
2686: constrained matrix P^T A^T P.
2688: Neighbor-wise Collective on Mat and Vec
2690: Input Parameters:
2691: + mat - the matrix
2692: - x - the vector to be multilplied
2694: Output Parameters:
2695: . y - the result
2697: Notes:
2698: The vectors x and y cannot be the same. I.e., one cannot
2699: call MatMult(A,y,y).
2701: Level: beginner
2703: .keywords: matrix, multiply, matrix-vector product, constraint
2704: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2705: @*/
2706: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2707: {
2714: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2715: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2716: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2717: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2718: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2720: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2721: (*mat->ops->multtransposeconstrained)(mat,x,y);
2722: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2723: PetscObjectStateIncrease((PetscObject)y);
2724: return(0);
2725: }
2727: /*@C
2728: MatGetFactorType - gets the type of factorization it is
2730: Note Collective
2731: as the flag
2733: Input Parameters:
2734: . mat - the matrix
2736: Output Parameters:
2737: . t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT
2739: Level: intermediate
2741: .seealso: MatFactorType, MatGetFactor()
2742: @*/
2743: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2744: {
2748: *t = mat->factortype;
2749: return(0);
2750: }
2752: /* ------------------------------------------------------------*/
2753: /*@C
2754: MatGetInfo - Returns information about matrix storage (number of
2755: nonzeros, memory, etc.).
2757: Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag
2759: Input Parameters:
2760: . mat - the matrix
2762: Output Parameters:
2763: + flag - flag indicating the type of parameters to be returned
2764: (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2765: MAT_GLOBAL_SUM - sum over all processors)
2766: - info - matrix information context
2768: Notes:
2769: The MatInfo context contains a variety of matrix data, including
2770: number of nonzeros allocated and used, number of mallocs during
2771: matrix assembly, etc. Additional information for factored matrices
2772: is provided (such as the fill ratio, number of mallocs during
2773: factorization, etc.). Much of this info is printed to PETSC_STDOUT
2774: when using the runtime options
2775: $ -info -mat_view ::ascii_info
2777: Example for C/C++ Users:
2778: See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2779: data within the MatInfo context. For example,
2780: .vb
2781: MatInfo info;
2782: Mat A;
2783: double mal, nz_a, nz_u;
2785: MatGetInfo(A,MAT_LOCAL,&info);
2786: mal = info.mallocs;
2787: nz_a = info.nz_allocated;
2788: .ve
2790: Example for Fortran Users:
2791: Fortran users should declare info as a double precision
2792: array of dimension MAT_INFO_SIZE, and then extract the parameters
2793: of interest. See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2794: a complete list of parameter names.
2795: .vb
2796: double precision info(MAT_INFO_SIZE)
2797: double precision mal, nz_a
2798: Mat A
2799: integer ierr
2801: call MatGetInfo(A,MAT_LOCAL,info,ierr)
2802: mal = info(MAT_INFO_MALLOCS)
2803: nz_a = info(MAT_INFO_NZ_ALLOCATED)
2804: .ve
2806: Level: intermediate
2808: Concepts: matrices^getting information on
2810: Developer Note: fortran interface is not autogenerated as the f90
2811: interface defintion cannot be generated correctly [due to MatInfo]
2813: .seealso: MatStashGetInfo()
2815: @*/
2816: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2817: {
2824: if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2825: MatCheckPreallocated(mat,1);
2826: (*mat->ops->getinfo)(mat,flag,info);
2827: return(0);
2828: }
2830: /*
2831: This is used by external packages where it is not easy to get the info from the actual
2832: matrix factorization.
2833: */
2834: PetscErrorCode MatGetInfo_External(Mat A,MatInfoType flag,MatInfo *info)
2835: {
2839: PetscMemzero(info,sizeof(MatInfo));
2840: return(0);
2841: }
2843: /* ----------------------------------------------------------*/
2845: /*@C
2846: MatLUFactor - Performs in-place LU factorization of matrix.
2848: Collective on Mat
2850: Input Parameters:
2851: + mat - the matrix
2852: . row - row permutation
2853: . col - column permutation
2854: - info - options for factorization, includes
2855: $ fill - expected fill as ratio of original fill.
2856: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2857: $ Run with the option -info to determine an optimal value to use
2859: Notes:
2860: Most users should employ the simplified KSP interface for linear solvers
2861: instead of working directly with matrix algebra routines such as this.
2862: See, e.g., KSPCreate().
2864: This changes the state of the matrix to a factored matrix; it cannot be used
2865: for example with MatSetValues() unless one first calls MatSetUnfactored().
2867: Level: developer
2869: Concepts: matrices^LU factorization
2871: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2872: MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()
2874: Developer Note: fortran interface is not autogenerated as the f90
2875: interface defintion cannot be generated correctly [due to MatFactorInfo]
2877: @*/
2878: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2879: {
2881: MatFactorInfo tinfo;
2889: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2890: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2891: if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2892: MatCheckPreallocated(mat,1);
2893: if (!info) {
2894: MatFactorInfoInitialize(&tinfo);
2895: info = &tinfo;
2896: }
2898: PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2899: (*mat->ops->lufactor)(mat,row,col,info);
2900: PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2901: PetscObjectStateIncrease((PetscObject)mat);
2902: return(0);
2903: }
2905: /*@C
2906: MatILUFactor - Performs in-place ILU factorization of matrix.
2908: Collective on Mat
2910: Input Parameters:
2911: + mat - the matrix
2912: . row - row permutation
2913: . col - column permutation
2914: - info - structure containing
2915: $ levels - number of levels of fill.
2916: $ expected fill - as ratio of original fill.
2917: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2918: missing diagonal entries)
2920: Notes:
2921: Probably really in-place only when level of fill is zero, otherwise allocates
2922: new space to store factored matrix and deletes previous memory.
2924: Most users should employ the simplified KSP interface for linear solvers
2925: instead of working directly with matrix algebra routines such as this.
2926: See, e.g., KSPCreate().
2928: Level: developer
2930: Concepts: matrices^ILU factorization
2932: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
2934: Developer Note: fortran interface is not autogenerated as the f90
2935: interface defintion cannot be generated correctly [due to MatFactorInfo]
2937: @*/
2938: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2939: {
2948: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2949: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2950: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2951: if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2952: MatCheckPreallocated(mat,1);
2954: PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2955: (*mat->ops->ilufactor)(mat,row,col,info);
2956: PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2957: PetscObjectStateIncrease((PetscObject)mat);
2958: return(0);
2959: }
2961: /*@C
2962: MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2963: Call this routine before calling MatLUFactorNumeric().
2965: Collective on Mat
2967: Input Parameters:
2968: + fact - the factor matrix obtained with MatGetFactor()
2969: . mat - the matrix
2970: . row, col - row and column permutations
2971: - info - options for factorization, includes
2972: $ fill - expected fill as ratio of original fill.
2973: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2974: $ Run with the option -info to determine an optimal value to use
2977: Notes:
2978: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
2980: Most users should employ the simplified KSP interface for linear solvers
2981: instead of working directly with matrix algebra routines such as this.
2982: See, e.g., KSPCreate().
2984: Level: developer
2986: Concepts: matrices^LU symbolic factorization
2988: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()
2990: Developer Note: fortran interface is not autogenerated as the f90
2991: interface defintion cannot be generated correctly [due to MatFactorInfo]
2993: @*/
2994: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2995: {
3005: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3006: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3007: if (!(fact)->ops->lufactorsymbolic) {
3008: MatSolverType spackage;
3009: MatFactorGetSolverType(fact,&spackage);
3010: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
3011: }
3012: MatCheckPreallocated(mat,2);
3014: PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
3015: (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
3016: PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
3017: PetscObjectStateIncrease((PetscObject)fact);
3018: return(0);
3019: }
3021: /*@C
3022: MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
3023: Call this routine after first calling MatLUFactorSymbolic().
3025: Collective on Mat
3027: Input Parameters:
3028: + fact - the factor matrix obtained with MatGetFactor()
3029: . mat - the matrix
3030: - info - options for factorization
3032: Notes:
3033: See MatLUFactor() for in-place factorization. See
3034: MatCholeskyFactorNumeric() for the symmetric, positive definite case.
3036: Most users should employ the simplified KSP interface for linear solvers
3037: instead of working directly with matrix algebra routines such as this.
3038: See, e.g., KSPCreate().
3040: Level: developer
3042: Concepts: matrices^LU numeric factorization
3044: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()
3046: Developer Note: fortran interface is not autogenerated as the f90
3047: interface defintion cannot be generated correctly [due to MatFactorInfo]
3049: @*/
3050: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3051: {
3059: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3060: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3062: if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
3063: MatCheckPreallocated(mat,2);
3064: PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
3065: (fact->ops->lufactornumeric)(fact,mat,info);
3066: PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
3067: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3068: PetscObjectStateIncrease((PetscObject)fact);
3069: return(0);
3070: }
3072: /*@C
3073: MatCholeskyFactor - Performs in-place Cholesky factorization of a
3074: symmetric matrix.
3076: Collective on Mat
3078: Input Parameters:
3079: + mat - the matrix
3080: . perm - row and column permutations
3081: - f - expected fill as ratio of original fill
3083: Notes:
3084: See MatLUFactor() for the nonsymmetric case. See also
3085: MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().
3087: Most users should employ the simplified KSP interface for linear solvers
3088: instead of working directly with matrix algebra routines such as this.
3089: See, e.g., KSPCreate().
3091: Level: developer
3093: Concepts: matrices^Cholesky factorization
3095: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
3096: MatGetOrdering()
3098: Developer Note: fortran interface is not autogenerated as the f90
3099: interface defintion cannot be generated correctly [due to MatFactorInfo]
3101: @*/
3102: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
3103: {
3111: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3112: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3113: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3114: if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"In-place factorization for Mat type %s is not supported, try out-of-place factorization. See MatCholeskyFactorSymbolic/Numeric",((PetscObject)mat)->type_name);
3115: MatCheckPreallocated(mat,1);
3117: PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3118: (*mat->ops->choleskyfactor)(mat,perm,info);
3119: PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3120: PetscObjectStateIncrease((PetscObject)mat);
3121: return(0);
3122: }
3124: /*@C
3125: MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3126: of a symmetric matrix.
3128: Collective on Mat
3130: Input Parameters:
3131: + fact - the factor matrix obtained with MatGetFactor()
3132: . mat - the matrix
3133: . perm - row and column permutations
3134: - info - options for factorization, includes
3135: $ fill - expected fill as ratio of original fill.
3136: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3137: $ Run with the option -info to determine an optimal value to use
3139: Notes:
3140: See MatLUFactorSymbolic() for the nonsymmetric case. See also
3141: MatCholeskyFactor() and MatCholeskyFactorNumeric().
3143: Most users should employ the simplified KSP interface for linear solvers
3144: instead of working directly with matrix algebra routines such as this.
3145: See, e.g., KSPCreate().
3147: Level: developer
3149: Concepts: matrices^Cholesky symbolic factorization
3151: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3152: MatGetOrdering()
3154: Developer Note: fortran interface is not autogenerated as the f90
3155: interface defintion cannot be generated correctly [due to MatFactorInfo]
3157: @*/
3158: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3159: {
3168: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3169: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3170: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3171: if (!(fact)->ops->choleskyfactorsymbolic) {
3172: MatSolverType spackage;
3173: MatFactorGetSolverType(fact,&spackage);
3174: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3175: }
3176: MatCheckPreallocated(mat,2);
3178: PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3179: (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3180: PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3181: PetscObjectStateIncrease((PetscObject)fact);
3182: return(0);
3183: }
3185: /*@C
3186: MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3187: of a symmetric matrix. Call this routine after first calling
3188: MatCholeskyFactorSymbolic().
3190: Collective on Mat
3192: Input Parameters:
3193: + fact - the factor matrix obtained with MatGetFactor()
3194: . mat - the initial matrix
3195: . info - options for factorization
3196: - fact - the symbolic factor of mat
3199: Notes:
3200: Most users should employ the simplified KSP interface for linear solvers
3201: instead of working directly with matrix algebra routines such as this.
3202: See, e.g., KSPCreate().
3204: Level: developer
3206: Concepts: matrices^Cholesky numeric factorization
3208: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()
3210: Developer Note: fortran interface is not autogenerated as the f90
3211: interface defintion cannot be generated correctly [due to MatFactorInfo]
3213: @*/
3214: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3215: {
3223: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3224: if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3225: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3226: MatCheckPreallocated(mat,2);
3228: PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3229: (fact->ops->choleskyfactornumeric)(fact,mat,info);
3230: PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3231: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3232: PetscObjectStateIncrease((PetscObject)fact);
3233: return(0);
3234: }
3236: /* ----------------------------------------------------------------*/
3237: /*@
3238: MatSolve - Solves A x = b, given a factored matrix.
3240: Neighbor-wise Collective on Mat and Vec
3242: Input Parameters:
3243: + mat - the factored matrix
3244: - b - the right-hand-side vector
3246: Output Parameter:
3247: . x - the result vector
3249: Notes:
3250: The vectors b and x cannot be the same. I.e., one cannot
3251: call MatSolve(A,x,x).
3253: Notes:
3254: Most users should employ the simplified KSP interface for linear solvers
3255: instead of working directly with matrix algebra routines such as this.
3256: See, e.g., KSPCreate().
3258: Level: developer
3260: Concepts: matrices^triangular solves
3262: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3263: @*/
3264: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3265: {
3275: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3276: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3277: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3278: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3279: if (!mat->rmap->N && !mat->cmap->N) return(0);
3280: if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3281: MatCheckPreallocated(mat,1);
3283: PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3284: if (mat->factorerrortype) {
3285: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3286: VecSetInf(x);
3287: } else {
3288: if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3289: (*mat->ops->solve)(mat,b,x);
3290: }
3291: PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3292: PetscObjectStateIncrease((PetscObject)x);
3293: return(0);
3294: }
3296: static PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X, PetscBool trans)
3297: {
3299: Vec b,x;
3300: PetscInt m,N,i;
3301: PetscScalar *bb,*xx;
3302: PetscBool flg;
3305: PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3306: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3307: PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3308: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");
3310: MatDenseGetArray(B,&bb);
3311: MatDenseGetArray(X,&xx);
3312: MatGetLocalSize(B,&m,NULL); /* number local rows */
3313: MatGetSize(B,NULL,&N); /* total columns in dense matrix */
3314: MatCreateVecs(A,&x,&b);
3315: for (i=0; i<N; i++) {
3316: VecPlaceArray(b,bb + i*m);
3317: VecPlaceArray(x,xx + i*m);
3318: if (trans) {
3319: MatSolveTranspose(A,b,x);
3320: } else {
3321: MatSolve(A,b,x);
3322: }
3323: VecResetArray(x);
3324: VecResetArray(b);
3325: }
3326: VecDestroy(&b);
3327: VecDestroy(&x);
3328: MatDenseRestoreArray(B,&bb);
3329: MatDenseRestoreArray(X,&xx);
3330: return(0);
3331: }
3333: /*@
3334: MatMatSolve - Solves A X = B, given a factored matrix.
3336: Neighbor-wise Collective on Mat
3338: Input Parameters:
3339: + A - the factored matrix
3340: - B - the right-hand-side matrix (dense matrix)
3342: Output Parameter:
3343: . X - the result matrix (dense matrix)
3345: Notes:
3346: The matrices b and x cannot be the same. I.e., one cannot
3347: call MatMatSolve(A,x,x).
3349: Notes:
3350: Most users should usually employ the simplified KSP interface for linear solvers
3351: instead of working directly with matrix algebra routines such as this.
3352: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3353: at a time.
3355: When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3356: it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.
3358: Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.
3360: Level: developer
3362: Concepts: matrices^triangular solves
3364: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3365: @*/
3366: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3367: {
3377: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3378: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3379: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3380: if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3381: if (!A->rmap->N && !A->cmap->N) return(0);
3382: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3383: MatCheckPreallocated(A,1);
3385: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3386: if (!A->ops->matsolve) {
3387: PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3388: MatMatSolve_Basic(A,B,X,PETSC_FALSE);
3389: } else {
3390: (*A->ops->matsolve)(A,B,X);
3391: }
3392: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3393: PetscObjectStateIncrease((PetscObject)X);
3394: return(0);
3395: }
3397: /*@
3398: MatMatSolveTranspose - Solves A^T X = B, given a factored matrix.
3400: Neighbor-wise Collective on Mat
3402: Input Parameters:
3403: + A - the factored matrix
3404: - B - the right-hand-side matrix (dense matrix)
3406: Output Parameter:
3407: . X - the result matrix (dense matrix)
3409: Notes:
3410: The matrices B and X cannot be the same. I.e., one cannot
3411: call MatMatSolveTranspose(A,X,X).
3413: Notes:
3414: Most users should usually employ the simplified KSP interface for linear solvers
3415: instead of working directly with matrix algebra routines such as this.
3416: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3417: at a time.
3419: When using SuperLU_Dist or MUMPS as a parallel solver, PETSc will use their functionality to solve multiple right hand sides simultaneously.
3421: Level: developer
3423: Concepts: matrices^triangular solves
3425: .seealso: MatMatSolve(), MatLUFactor(), MatCholeskyFactor()
3426: @*/
3427: PetscErrorCode MatMatSolveTranspose(Mat A,Mat B,Mat X)
3428: {
3438: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3439: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3440: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3441: if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3442: if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3443: if (!A->rmap->N && !A->cmap->N) return(0);
3444: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3445: MatCheckPreallocated(A,1);
3447: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3448: if (!A->ops->matsolvetranspose) {
3449: PetscInfo1(A,"Mat type %s using basic MatMatSolveTranspose\n",((PetscObject)A)->type_name);
3450: MatMatSolve_Basic(A,B,X,PETSC_TRUE);
3451: } else {
3452: (*A->ops->matsolvetranspose)(A,B,X);
3453: }
3454: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3455: PetscObjectStateIncrease((PetscObject)X);
3456: return(0);
3457: }
3459: /*@
3460: MatMatTransposeSolve - Solves A X = B^T, given a factored matrix.
3462: Neighbor-wise Collective on Mat
3464: Input Parameters:
3465: + A - the factored matrix
3466: - Bt - the transpose of right-hand-side matrix
3468: Output Parameter:
3469: . X - the result matrix (dense matrix)
3471: Notes:
3472: Most users should usually employ the simplified KSP interface for linear solvers
3473: instead of working directly with matrix algebra routines such as this.
3474: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3475: at a time.
3477: For MUMPS, it only supports centralized sparse compressed column format on the host processor for right hand side matrix. User must create B^T in sparse compressed row format on the host processor and call MatMatTransposeSolve() to implement MUMPS' MatMatSolve().
3479: Level: developer
3481: Concepts: matrices^triangular solves
3483: .seealso: MatMatSolve(), MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3484: @*/
3485: PetscErrorCode MatMatTransposeSolve(Mat A,Mat Bt,Mat X)
3486: {
3497: if (X == Bt) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3498: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3499: if (A->rmap->N != Bt->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat Bt: global dim %D %D",A->rmap->N,Bt->cmap->N);
3500: if (X->cmap->N < Bt->rmap->N) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as row number of the rhs matrix");
3501: if (!A->rmap->N && !A->cmap->N) return(0);
3502: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3503: MatCheckPreallocated(A,1);
3505: if (!A->ops->mattransposesolve) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
3506: PetscLogEventBegin(MAT_MatTrSolve,A,Bt,X,0);
3507: (*A->ops->mattransposesolve)(A,Bt,X);
3508: PetscLogEventEnd(MAT_MatTrSolve,A,Bt,X,0);
3509: PetscObjectStateIncrease((PetscObject)X);
3510: return(0);
3511: }
3513: /*@
3514: MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3515: U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,
3517: Neighbor-wise Collective on Mat and Vec
3519: Input Parameters:
3520: + mat - the factored matrix
3521: - b - the right-hand-side vector
3523: Output Parameter:
3524: . x - the result vector
3526: Notes:
3527: MatSolve() should be used for most applications, as it performs
3528: a forward solve followed by a backward solve.
3530: The vectors b and x cannot be the same, i.e., one cannot
3531: call MatForwardSolve(A,x,x).
3533: For matrix in seqsbaij format with block size larger than 1,
3534: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3535: MatForwardSolve() solves U^T*D y = b, and
3536: MatBackwardSolve() solves U x = y.
3537: Thus they do not provide a symmetric preconditioner.
3539: Most users should employ the simplified KSP interface for linear solvers
3540: instead of working directly with matrix algebra routines such as this.
3541: See, e.g., KSPCreate().
3543: Level: developer
3545: Concepts: matrices^forward solves
3547: .seealso: MatSolve(), MatBackwardSolve()
3548: @*/
3549: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3550: {
3560: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3561: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3562: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3563: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3564: if (!mat->rmap->N && !mat->cmap->N) return(0);
3565: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3566: MatCheckPreallocated(mat,1);
3568: if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3569: PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3570: (*mat->ops->forwardsolve)(mat,b,x);
3571: PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3572: PetscObjectStateIncrease((PetscObject)x);
3573: return(0);
3574: }
3576: /*@
3577: MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3578: D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,
3580: Neighbor-wise Collective on Mat and Vec
3582: Input Parameters:
3583: + mat - the factored matrix
3584: - b - the right-hand-side vector
3586: Output Parameter:
3587: . x - the result vector
3589: Notes:
3590: MatSolve() should be used for most applications, as it performs
3591: a forward solve followed by a backward solve.
3593: The vectors b and x cannot be the same. I.e., one cannot
3594: call MatBackwardSolve(A,x,x).
3596: For matrix in seqsbaij format with block size larger than 1,
3597: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3598: MatForwardSolve() solves U^T*D y = b, and
3599: MatBackwardSolve() solves U x = y.
3600: Thus they do not provide a symmetric preconditioner.
3602: Most users should employ the simplified KSP interface for linear solvers
3603: instead of working directly with matrix algebra routines such as this.
3604: See, e.g., KSPCreate().
3606: Level: developer
3608: Concepts: matrices^backward solves
3610: .seealso: MatSolve(), MatForwardSolve()
3611: @*/
3612: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3613: {
3623: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3624: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3625: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3626: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3627: if (!mat->rmap->N && !mat->cmap->N) return(0);
3628: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3629: MatCheckPreallocated(mat,1);
3631: if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3632: PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3633: (*mat->ops->backwardsolve)(mat,b,x);
3634: PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3635: PetscObjectStateIncrease((PetscObject)x);
3636: return(0);
3637: }
3639: /*@
3640: MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.
3642: Neighbor-wise Collective on Mat and Vec
3644: Input Parameters:
3645: + mat - the factored matrix
3646: . b - the right-hand-side vector
3647: - y - the vector to be added to
3649: Output Parameter:
3650: . x - the result vector
3652: Notes:
3653: The vectors b and x cannot be the same. I.e., one cannot
3654: call MatSolveAdd(A,x,y,x).
3656: Most users should employ the simplified KSP interface for linear solvers
3657: instead of working directly with matrix algebra routines such as this.
3658: See, e.g., KSPCreate().
3660: Level: developer
3662: Concepts: matrices^triangular solves
3664: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3665: @*/
3666: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3667: {
3668: PetscScalar one = 1.0;
3669: Vec tmp;
3681: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3682: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3683: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3684: if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3685: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3686: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3687: if (!mat->rmap->N && !mat->cmap->N) return(0);
3688: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3689: MatCheckPreallocated(mat,1);
3691: PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3692: if (mat->ops->solveadd) {
3693: (*mat->ops->solveadd)(mat,b,y,x);
3694: } else {
3695: /* do the solve then the add manually */
3696: if (x != y) {
3697: MatSolve(mat,b,x);
3698: VecAXPY(x,one,y);
3699: } else {
3700: VecDuplicate(x,&tmp);
3701: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3702: VecCopy(x,tmp);
3703: MatSolve(mat,b,x);
3704: VecAXPY(x,one,tmp);
3705: VecDestroy(&tmp);
3706: }
3707: }
3708: PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3709: PetscObjectStateIncrease((PetscObject)x);
3710: return(0);
3711: }
3713: /*@
3714: MatSolveTranspose - Solves A' x = b, given a factored matrix.
3716: Neighbor-wise Collective on Mat and Vec
3718: Input Parameters:
3719: + mat - the factored matrix
3720: - b - the right-hand-side vector
3722: Output Parameter:
3723: . x - the result vector
3725: Notes:
3726: The vectors b and x cannot be the same. I.e., one cannot
3727: call MatSolveTranspose(A,x,x).
3729: Most users should employ the simplified KSP interface for linear solvers
3730: instead of working directly with matrix algebra routines such as this.
3731: See, e.g., KSPCreate().
3733: Level: developer
3735: Concepts: matrices^triangular solves
3737: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3738: @*/
3739: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3740: {
3750: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3751: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3752: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3753: if (!mat->rmap->N && !mat->cmap->N) return(0);
3754: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3755: MatCheckPreallocated(mat,1);
3756: PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3757: if (mat->factorerrortype) {
3758: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3759: VecSetInf(x);
3760: } else {
3761: if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3762: (*mat->ops->solvetranspose)(mat,b,x);
3763: }
3764: PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3765: PetscObjectStateIncrease((PetscObject)x);
3766: return(0);
3767: }
3769: /*@
3770: MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3771: factored matrix.
3773: Neighbor-wise Collective on Mat and Vec
3775: Input Parameters:
3776: + mat - the factored matrix
3777: . b - the right-hand-side vector
3778: - y - the vector to be added to
3780: Output Parameter:
3781: . x - the result vector
3783: Notes:
3784: The vectors b and x cannot be the same. I.e., one cannot
3785: call MatSolveTransposeAdd(A,x,y,x).
3787: Most users should employ the simplified KSP interface for linear solvers
3788: instead of working directly with matrix algebra routines such as this.
3789: See, e.g., KSPCreate().
3791: Level: developer
3793: Concepts: matrices^triangular solves
3795: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3796: @*/
3797: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3798: {
3799: PetscScalar one = 1.0;
3801: Vec tmp;
3812: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3813: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3814: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3815: if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3816: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3817: if (!mat->rmap->N && !mat->cmap->N) return(0);
3818: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3819: MatCheckPreallocated(mat,1);
3821: PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3822: if (mat->ops->solvetransposeadd) {
3823: if (mat->factorerrortype) {
3824: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3825: VecSetInf(x);
3826: } else {
3827: (*mat->ops->solvetransposeadd)(mat,b,y,x);
3828: }
3829: } else {
3830: /* do the solve then the add manually */
3831: if (x != y) {
3832: MatSolveTranspose(mat,b,x);
3833: VecAXPY(x,one,y);
3834: } else {
3835: VecDuplicate(x,&tmp);
3836: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3837: VecCopy(x,tmp);
3838: MatSolveTranspose(mat,b,x);
3839: VecAXPY(x,one,tmp);
3840: VecDestroy(&tmp);
3841: }
3842: }
3843: PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3844: PetscObjectStateIncrease((PetscObject)x);
3845: return(0);
3846: }
3847: /* ----------------------------------------------------------------*/
3849: /*@
3850: MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.
3852: Neighbor-wise Collective on Mat and Vec
3854: Input Parameters:
3855: + mat - the matrix
3856: . b - the right hand side
3857: . omega - the relaxation factor
3858: . flag - flag indicating the type of SOR (see below)
3859: . shift - diagonal shift
3860: . its - the number of iterations
3861: - lits - the number of local iterations
3863: Output Parameters:
3864: . x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)
3866: SOR Flags:
3867: . SOR_FORWARD_SWEEP - forward SOR
3868: . SOR_BACKWARD_SWEEP - backward SOR
3869: . SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3870: . SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3871: . SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3872: . SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3873: . SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3874: upper/lower triangular part of matrix to
3875: vector (with omega)
3876: . SOR_ZERO_INITIAL_GUESS - zero initial guess
3878: Notes:
3879: SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3880: SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3881: on each processor.
3883: Application programmers will not generally use MatSOR() directly,
3884: but instead will employ the KSP/PC interface.
3886: Notes:
3887: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing
3889: Notes for Advanced Users:
3890: The flags are implemented as bitwise inclusive or operations.
3891: For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3892: to specify a zero initial guess for SSOR.
3894: Most users should employ the simplified KSP interface for linear solvers
3895: instead of working directly with matrix algebra routines such as this.
3896: See, e.g., KSPCreate().
3898: Vectors x and b CANNOT be the same
3900: Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes
3902: Level: developer
3904: Concepts: matrices^relaxation
3905: Concepts: matrices^SOR
3906: Concepts: matrices^Gauss-Seidel
3908: @*/
3909: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3910: {
3920: if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3921: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3922: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3923: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3924: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3925: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3926: if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3927: if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3928: if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");
3930: MatCheckPreallocated(mat,1);
3931: PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3932: ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3933: PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3934: PetscObjectStateIncrease((PetscObject)x);
3935: return(0);
3936: }
3938: /*
3939: Default matrix copy routine.
3940: */
3941: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3942: {
3943: PetscErrorCode ierr;
3944: PetscInt i,rstart = 0,rend = 0,nz;
3945: const PetscInt *cwork;
3946: const PetscScalar *vwork;
3949: if (B->assembled) {
3950: MatZeroEntries(B);
3951: }
3952: MatGetOwnershipRange(A,&rstart,&rend);
3953: for (i=rstart; i<rend; i++) {
3954: MatGetRow(A,i,&nz,&cwork,&vwork);
3955: MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3956: MatRestoreRow(A,i,&nz,&cwork,&vwork);
3957: }
3958: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3959: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3960: return(0);
3961: }
3963: /*@
3964: MatCopy - Copys a matrix to another matrix.
3966: Collective on Mat
3968: Input Parameters:
3969: + A - the matrix
3970: - str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN
3972: Output Parameter:
3973: . B - where the copy is put
3975: Notes:
3976: If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3977: same nonzero pattern or the routine will crash.
3979: MatCopy() copies the matrix entries of a matrix to another existing
3980: matrix (after first zeroing the second matrix). A related routine is
3981: MatConvert(), which first creates a new matrix and then copies the data.
3983: Level: intermediate
3985: Concepts: matrices^copying
3987: .seealso: MatConvert(), MatDuplicate()
3989: @*/
3990: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3991: {
3993: PetscInt i;
4001: MatCheckPreallocated(B,2);
4002: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4003: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4004: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4005: MatCheckPreallocated(A,1);
4006: if (A == B) return(0);
4008: PetscLogEventBegin(MAT_Copy,A,B,0,0);
4009: if (A->ops->copy) {
4010: (*A->ops->copy)(A,B,str);
4011: } else { /* generic conversion */
4012: MatCopy_Basic(A,B,str);
4013: }
4015: B->stencil.dim = A->stencil.dim;
4016: B->stencil.noc = A->stencil.noc;
4017: for (i=0; i<=A->stencil.dim; i++) {
4018: B->stencil.dims[i] = A->stencil.dims[i];
4019: B->stencil.starts[i] = A->stencil.starts[i];
4020: }
4022: PetscLogEventEnd(MAT_Copy,A,B,0,0);
4023: PetscObjectStateIncrease((PetscObject)B);
4024: return(0);
4025: }
4027: /*@C
4028: MatConvert - Converts a matrix to another matrix, either of the same
4029: or different type.
4031: Collective on Mat
4033: Input Parameters:
4034: + mat - the matrix
4035: . newtype - new matrix type. Use MATSAME to create a new matrix of the
4036: same type as the original matrix.
4037: - reuse - denotes if the destination matrix is to be created or reused.
4038: Use MAT_INPLACE_MATRIX for inplace conversion (that is when you want the input mat to be changed to contain the matrix in the new format), otherwise use
4039: MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX (can only be used after the first call was made with MAT_INITIAL_MATRIX, causes the matrix space in M to be reused).
4041: Output Parameter:
4042: . M - pointer to place new matrix
4044: Notes:
4045: MatConvert() first creates a new matrix and then copies the data from
4046: the first matrix. A related routine is MatCopy(), which copies the matrix
4047: entries of one matrix to another already existing matrix context.
4049: Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
4050: the MPI communicator of the generated matrix is always the same as the communicator
4051: of the input matrix.
4053: Level: intermediate
4055: Concepts: matrices^converting between storage formats
4057: .seealso: MatCopy(), MatDuplicate()
4058: @*/
4059: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
4060: {
4062: PetscBool sametype,issame,flg;
4063: char convname[256],mtype[256];
4064: Mat B;
4070: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4071: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4072: MatCheckPreallocated(mat,1);
4074: PetscOptionsGetString(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
4075: if (flg) {
4076: newtype = mtype;
4077: }
4078: PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
4079: PetscStrcmp(newtype,"same",&issame);
4080: if ((reuse == MAT_INPLACE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires same input and output matrix");
4081: if ((reuse == MAT_REUSE_MATRIX) && (mat == *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX means reuse matrix in final argument, perhaps you mean MAT_INPLACE_MATRIX");
4083: if ((reuse == MAT_INPLACE_MATRIX) && (issame || sametype)) return(0);
4085: if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
4086: (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
4087: } else {
4088: PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
4089: const char *prefix[3] = {"seq","mpi",""};
4090: PetscInt i;
4091: /*
4092: Order of precedence:
4093: 1) See if a specialized converter is known to the current matrix.
4094: 2) See if a specialized converter is known to the desired matrix class.
4095: 3) See if a good general converter is registered for the desired class
4096: (as of 6/27/03 only MATMPIADJ falls into this category).
4097: 4) See if a good general converter is known for the current matrix.
4098: 5) Use a really basic converter.
4099: */
4101: /* 1) See if a specialized converter is known to the current matrix and the desired class */
4102: for (i=0; i<3; i++) {
4103: PetscStrncpy(convname,"MatConvert_",sizeof(convname));
4104: PetscStrlcat(convname,((PetscObject)mat)->type_name,sizeof(convname));
4105: PetscStrlcat(convname,"_",sizeof(convname));
4106: PetscStrlcat(convname,prefix[i],sizeof(convname));
4107: PetscStrlcat(convname,issame ? ((PetscObject)mat)->type_name : newtype,sizeof(convname));
4108: PetscStrlcat(convname,"_C",sizeof(convname));
4109: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4110: if (conv) goto foundconv;
4111: }
4113: /* 2) See if a specialized converter is known to the desired matrix class. */
4114: MatCreate(PetscObjectComm((PetscObject)mat),&B);
4115: MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
4116: MatSetType(B,newtype);
4117: for (i=0; i<3; i++) {
4118: PetscStrncpy(convname,"MatConvert_",sizeof(convname));
4119: PetscStrlcat(convname,((PetscObject)mat)->type_name,sizeof(convname));
4120: PetscStrlcat(convname,"_",sizeof(convname));
4121: PetscStrlcat(convname,prefix[i],sizeof(convname));
4122: PetscStrlcat(convname,newtype,sizeof(convname));
4123: PetscStrlcat(convname,"_C",sizeof(convname));
4124: PetscObjectQueryFunction((PetscObject)B,convname,&conv);
4125: if (conv) {
4126: MatDestroy(&B);
4127: goto foundconv;
4128: }
4129: }
4131: /* 3) See if a good general converter is registered for the desired class */
4132: conv = B->ops->convertfrom;
4133: MatDestroy(&B);
4134: if (conv) goto foundconv;
4136: /* 4) See if a good general converter is known for the current matrix */
4137: if (mat->ops->convert) {
4138: conv = mat->ops->convert;
4139: }
4140: if (conv) goto foundconv;
4142: /* 5) Use a really basic converter. */
4143: conv = MatConvert_Basic;
4145: foundconv:
4146: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4147: (*conv)(mat,newtype,reuse,M);
4148: if (mat->rmap->mapping && mat->cmap->mapping && !(*M)->rmap->mapping && !(*M)->cmap->mapping) {
4149: /* the block sizes must be same if the mappings are copied over */
4150: (*M)->rmap->bs = mat->rmap->bs;
4151: (*M)->cmap->bs = mat->cmap->bs;
4152: PetscObjectReference((PetscObject)mat->rmap->mapping);
4153: PetscObjectReference((PetscObject)mat->cmap->mapping);
4154: (*M)->rmap->mapping = mat->rmap->mapping;
4155: (*M)->cmap->mapping = mat->cmap->mapping;
4156: }
4157: (*M)->stencil.dim = mat->stencil.dim;
4158: (*M)->stencil.noc = mat->stencil.noc;
4159: for (i=0; i<=mat->stencil.dim; i++) {
4160: (*M)->stencil.dims[i] = mat->stencil.dims[i];
4161: (*M)->stencil.starts[i] = mat->stencil.starts[i];
4162: }
4163: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4164: }
4165: PetscObjectStateIncrease((PetscObject)*M);
4167: /* Copy Mat options */
4168: if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
4169: if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
4170: return(0);
4171: }
4173: /*@C
4174: MatFactorGetSolverType - Returns name of the package providing the factorization routines
4176: Not Collective
4178: Input Parameter:
4179: . mat - the matrix, must be a factored matrix
4181: Output Parameter:
4182: . type - the string name of the package (do not free this string)
4184: Notes:
4185: In Fortran you pass in a empty string and the package name will be copied into it.
4186: (Make sure the string is long enough)
4188: Level: intermediate
4190: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4191: @*/
4192: PetscErrorCode MatFactorGetSolverType(Mat mat, MatSolverType *type)
4193: {
4194: PetscErrorCode ierr, (*conv)(Mat,MatSolverType*);
4199: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
4200: PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverType_C",&conv);
4201: if (!conv) {
4202: *type = MATSOLVERPETSC;
4203: } else {
4204: (*conv)(mat,type);
4205: }
4206: return(0);
4207: }
4209: typedef struct _MatSolverTypeForSpecifcType* MatSolverTypeForSpecifcType;
4210: struct _MatSolverTypeForSpecifcType {
4211: MatType mtype;
4212: PetscErrorCode (*getfactor[4])(Mat,MatFactorType,Mat*);
4213: MatSolverTypeForSpecifcType next;
4214: };
4216: typedef struct _MatSolverTypeHolder* MatSolverTypeHolder;
4217: struct _MatSolverTypeHolder {
4218: char *name;
4219: MatSolverTypeForSpecifcType handlers;
4220: MatSolverTypeHolder next;
4221: };
4223: static MatSolverTypeHolder MatSolverTypeHolders = NULL;
4225: /*@C
4226: MatSolvePackageRegister - Registers a MatSolverType that works for a particular matrix type
4228: Input Parameters:
4229: + package - name of the package, for example petsc or superlu
4230: . mtype - the matrix type that works with this package
4231: . ftype - the type of factorization supported by the package
4232: - getfactor - routine that will create the factored matrix ready to be used
4234: Level: intermediate
4236: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4237: @*/
4238: PetscErrorCode MatSolverTypeRegister(MatSolverType package,MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
4239: {
4240: PetscErrorCode ierr;
4241: MatSolverTypeHolder next = MatSolverTypeHolders,prev;
4242: PetscBool flg;
4243: MatSolverTypeForSpecifcType inext,iprev = NULL;
4246: if (!next) {
4247: PetscNew(&MatSolverTypeHolders);
4248: PetscStrallocpy(package,&MatSolverTypeHolders->name);
4249: PetscNew(&MatSolverTypeHolders->handlers);
4250: PetscStrallocpy(mtype,(char **)&MatSolverTypeHolders->handlers->mtype);
4251: MatSolverTypeHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4252: return(0);
4253: }
4254: while (next) {
4255: PetscStrcasecmp(package,next->name,&flg);
4256: if (flg) {
4257: if (!next->handlers) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MatSolverTypeHolder is missing handlers");
4258: inext = next->handlers;
4259: while (inext) {
4260: PetscStrcasecmp(mtype,inext->mtype,&flg);
4261: if (flg) {
4262: inext->getfactor[(int)ftype-1] = getfactor;
4263: return(0);
4264: }
4265: iprev = inext;
4266: inext = inext->next;
4267: }
4268: PetscNew(&iprev->next);
4269: PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4270: iprev->next->getfactor[(int)ftype-1] = getfactor;
4271: return(0);
4272: }
4273: prev = next;
4274: next = next->next;
4275: }
4276: PetscNew(&prev->next);
4277: PetscStrallocpy(package,&prev->next->name);
4278: PetscNew(&prev->next->handlers);
4279: PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4280: prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4281: return(0);
4282: }
4284: /*@C
4285: MatSolvePackageGet - Get's the function that creates the factor matrix if it exist
4287: Input Parameters:
4288: + package - name of the package, for example petsc or superlu
4289: . ftype - the type of factorization supported by the package
4290: - mtype - the matrix type that works with this package
4292: Output Parameters:
4293: + foundpackage - PETSC_TRUE if the package was registered
4294: . foundmtype - PETSC_TRUE if the package supports the requested mtype
4295: - getfactor - routine that will create the factored matrix ready to be used or NULL if not found
4297: Level: intermediate
4299: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4300: @*/
4301: PetscErrorCode MatSolverTypeGet(MatSolverType package,MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4302: {
4303: PetscErrorCode ierr;
4304: MatSolverTypeHolder next = MatSolverTypeHolders;
4305: PetscBool flg;
4306: MatSolverTypeForSpecifcType inext;
4309: if (foundpackage) *foundpackage = PETSC_FALSE;
4310: if (foundmtype) *foundmtype = PETSC_FALSE;
4311: if (getfactor) *getfactor = NULL;
4313: if (package) {
4314: while (next) {
4315: PetscStrcasecmp(package,next->name,&flg);
4316: if (flg) {
4317: if (foundpackage) *foundpackage = PETSC_TRUE;
4318: inext = next->handlers;
4319: while (inext) {
4320: PetscStrbeginswith(mtype,inext->mtype,&flg);
4321: if (flg) {
4322: if (foundmtype) *foundmtype = PETSC_TRUE;
4323: if (getfactor) *getfactor = inext->getfactor[(int)ftype-1];
4324: return(0);
4325: }
4326: inext = inext->next;
4327: }
4328: }
4329: next = next->next;
4330: }
4331: } else {
4332: while (next) {
4333: inext = next->handlers;
4334: while (inext) {
4335: PetscStrbeginswith(mtype,inext->mtype,&flg);
4336: if (flg && inext->getfactor[(int)ftype-1]) {
4337: if (foundpackage) *foundpackage = PETSC_TRUE;
4338: if (foundmtype) *foundmtype = PETSC_TRUE;
4339: if (getfactor) *getfactor = inext->getfactor[(int)ftype-1];
4340: return(0);
4341: }
4342: inext = inext->next;
4343: }
4344: next = next->next;
4345: }
4346: }
4347: return(0);
4348: }
4350: PetscErrorCode MatSolverTypeDestroy(void)
4351: {
4352: PetscErrorCode ierr;
4353: MatSolverTypeHolder next = MatSolverTypeHolders,prev;
4354: MatSolverTypeForSpecifcType inext,iprev;
4357: while (next) {
4358: PetscFree(next->name);
4359: inext = next->handlers;
4360: while (inext) {
4361: PetscFree(inext->mtype);
4362: iprev = inext;
4363: inext = inext->next;
4364: PetscFree(iprev);
4365: }
4366: prev = next;
4367: next = next->next;
4368: PetscFree(prev);
4369: }
4370: MatSolverTypeHolders = NULL;
4371: return(0);
4372: }
4374: /*@C
4375: MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()
4377: Collective on Mat
4379: Input Parameters:
4380: + mat - the matrix
4381: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4382: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4384: Output Parameters:
4385: . f - the factor matrix used with MatXXFactorSymbolic() calls
4387: Notes:
4388: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4389: such as pastix, superlu, mumps etc.
4391: PETSc must have been ./configure to use the external solver, using the option --download-package
4393: Level: intermediate
4395: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4396: @*/
4397: PetscErrorCode MatGetFactor(Mat mat, MatSolverType type,MatFactorType ftype,Mat *f)
4398: {
4399: PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4400: PetscBool foundpackage,foundmtype;
4406: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4407: MatCheckPreallocated(mat,1);
4409: MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4410: if (!foundpackage) {
4411: if (type) {
4412: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4413: } else {
4414: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate a solver package. Perhaps you must ./configure with --download-<package>");
4415: }
4416: }
4418: if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverType %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4419: if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverType %s does not support factorization type %s for matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);
4421: #if defined(PETSC_USE_COMPLEX)
4422: if (mat->hermitian && !mat->symmetric && (ftype == MAT_FACTOR_CHOLESKY||ftype == MAT_FACTOR_ICC)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Hermitian CHOLESKY or ICC Factor is not supported");
4423: #endif
4425: (*conv)(mat,ftype,f);
4426: return(0);
4427: }
4429: /*@C
4430: MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type
4432: Not Collective
4434: Input Parameters:
4435: + mat - the matrix
4436: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4437: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4439: Output Parameter:
4440: . flg - PETSC_TRUE if the factorization is available
4442: Notes:
4443: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4444: such as pastix, superlu, mumps etc.
4446: PETSc must have been ./configure to use the external solver, using the option --download-package
4448: Level: intermediate
4450: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4451: @*/
4452: PetscErrorCode MatGetFactorAvailable(Mat mat, MatSolverType type,MatFactorType ftype,PetscBool *flg)
4453: {
4454: PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);
4460: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4461: MatCheckPreallocated(mat,1);
4463: *flg = PETSC_FALSE;
4464: MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4465: if (gconv) {
4466: *flg = PETSC_TRUE;
4467: }
4468: return(0);
4469: }
4471: #include <petscdmtypes.h>
4473: /*@
4474: MatDuplicate - Duplicates a matrix including the non-zero structure.
4476: Collective on Mat
4478: Input Parameters:
4479: + mat - the matrix
4480: - op - One of MAT_DO_NOT_COPY_VALUES, MAT_COPY_VALUES, or MAT_SHARE_NONZERO_PATTERN.
4481: See the manual page for MatDuplicateOption for an explanation of these options.
4483: Output Parameter:
4484: . M - pointer to place new matrix
4486: Level: intermediate
4488: Concepts: matrices^duplicating
4490: Notes:
4491: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.
4493: .seealso: MatCopy(), MatConvert(), MatDuplicateOption
4494: @*/
4495: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4496: {
4498: Mat B;
4499: PetscInt i;
4500: DM dm;
4501: void (*viewf)(void);
4507: if (op == MAT_COPY_VALUES && !mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"MAT_COPY_VALUES not allowed for unassembled matrix");
4508: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4509: MatCheckPreallocated(mat,1);
4511: *M = 0;
4512: if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4513: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4514: (*mat->ops->duplicate)(mat,op,M);
4515: B = *M;
4517: MatGetOperation(mat,MATOP_VIEW,&viewf);
4518: if (viewf) {
4519: MatSetOperation(B,MATOP_VIEW,viewf);
4520: }
4522: B->stencil.dim = mat->stencil.dim;
4523: B->stencil.noc = mat->stencil.noc;
4524: for (i=0; i<=mat->stencil.dim; i++) {
4525: B->stencil.dims[i] = mat->stencil.dims[i];
4526: B->stencil.starts[i] = mat->stencil.starts[i];
4527: }
4529: B->nooffproczerorows = mat->nooffproczerorows;
4530: B->nooffprocentries = mat->nooffprocentries;
4532: PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4533: if (dm) {
4534: PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4535: }
4536: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4537: PetscObjectStateIncrease((PetscObject)B);
4538: return(0);
4539: }
4541: /*@
4542: MatGetDiagonal - Gets the diagonal of a matrix.
4544: Logically Collective on Mat and Vec
4546: Input Parameters:
4547: + mat - the matrix
4548: - v - the vector for storing the diagonal
4550: Output Parameter:
4551: . v - the diagonal of the matrix
4553: Level: intermediate
4555: Note:
4556: Currently only correct in parallel for square matrices.
4558: Concepts: matrices^accessing diagonals
4560: .seealso: MatGetRow(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs()
4561: @*/
4562: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4563: {
4570: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4571: if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4572: MatCheckPreallocated(mat,1);
4574: (*mat->ops->getdiagonal)(mat,v);
4575: PetscObjectStateIncrease((PetscObject)v);
4576: return(0);
4577: }
4579: /*@C
4580: MatGetRowMin - Gets the minimum value (of the real part) of each
4581: row of the matrix
4583: Logically Collective on Mat and Vec
4585: Input Parameters:
4586: . mat - the matrix
4588: Output Parameter:
4589: + v - the vector for storing the maximums
4590: - idx - the indices of the column found for each row (optional)
4592: Level: intermediate
4594: Notes:
4595: The result of this call are the same as if one converted the matrix to dense format
4596: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4598: This code is only implemented for a couple of matrix formats.
4600: Concepts: matrices^getting row maximums
4602: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(),
4603: MatGetRowMax()
4604: @*/
4605: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4606: {
4613: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4614: if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4615: MatCheckPreallocated(mat,1);
4617: (*mat->ops->getrowmin)(mat,v,idx);
4618: PetscObjectStateIncrease((PetscObject)v);
4619: return(0);
4620: }
4622: /*@C
4623: MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4624: row of the matrix
4626: Logically Collective on Mat and Vec
4628: Input Parameters:
4629: . mat - the matrix
4631: Output Parameter:
4632: + v - the vector for storing the minimums
4633: - idx - the indices of the column found for each row (or NULL if not needed)
4635: Level: intermediate
4637: Notes:
4638: if a row is completely empty or has only 0.0 values then the idx[] value for that
4639: row is 0 (the first column).
4641: This code is only implemented for a couple of matrix formats.
4643: Concepts: matrices^getting row maximums
4645: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4646: @*/
4647: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4648: {
4655: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4656: if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4657: MatCheckPreallocated(mat,1);
4658: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4660: (*mat->ops->getrowminabs)(mat,v,idx);
4661: PetscObjectStateIncrease((PetscObject)v);
4662: return(0);
4663: }
4665: /*@C
4666: MatGetRowMax - Gets the maximum value (of the real part) of each
4667: row of the matrix
4669: Logically Collective on Mat and Vec
4671: Input Parameters:
4672: . mat - the matrix
4674: Output Parameter:
4675: + v - the vector for storing the maximums
4676: - idx - the indices of the column found for each row (optional)
4678: Level: intermediate
4680: Notes:
4681: The result of this call are the same as if one converted the matrix to dense format
4682: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4684: This code is only implemented for a couple of matrix formats.
4686: Concepts: matrices^getting row maximums
4688: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4689: @*/
4690: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4691: {
4698: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4699: if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4700: MatCheckPreallocated(mat,1);
4702: (*mat->ops->getrowmax)(mat,v,idx);
4703: PetscObjectStateIncrease((PetscObject)v);
4704: return(0);
4705: }
4707: /*@C
4708: MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4709: row of the matrix
4711: Logically Collective on Mat and Vec
4713: Input Parameters:
4714: . mat - the matrix
4716: Output Parameter:
4717: + v - the vector for storing the maximums
4718: - idx - the indices of the column found for each row (or NULL if not needed)
4720: Level: intermediate
4722: Notes:
4723: if a row is completely empty or has only 0.0 values then the idx[] value for that
4724: row is 0 (the first column).
4726: This code is only implemented for a couple of matrix formats.
4728: Concepts: matrices^getting row maximums
4730: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4731: @*/
4732: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4733: {
4740: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4741: if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4742: MatCheckPreallocated(mat,1);
4743: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4745: (*mat->ops->getrowmaxabs)(mat,v,idx);
4746: PetscObjectStateIncrease((PetscObject)v);
4747: return(0);
4748: }
4750: /*@
4751: MatGetRowSum - Gets the sum of each row of the matrix
4753: Logically or Neighborhood Collective on Mat and Vec
4755: Input Parameters:
4756: . mat - the matrix
4758: Output Parameter:
4759: . v - the vector for storing the sum of rows
4761: Level: intermediate
4763: Notes:
4764: This code is slow since it is not currently specialized for different formats
4766: Concepts: matrices^getting row sums
4768: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4769: @*/
4770: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4771: {
4772: Vec ones;
4779: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4780: MatCheckPreallocated(mat,1);
4781: MatCreateVecs(mat,&ones,NULL);
4782: VecSet(ones,1.);
4783: MatMult(mat,ones,v);
4784: VecDestroy(&ones);
4785: return(0);
4786: }
4788: /*@
4789: MatTranspose - Computes an in-place or out-of-place transpose of a matrix.
4791: Collective on Mat
4793: Input Parameter:
4794: + mat - the matrix to transpose
4795: - reuse - either MAT_INITIAL_MATRIX, MAT_REUSE_MATRIX, or MAT_INPLACE_MATRIX
4797: Output Parameters:
4798: . B - the transpose
4800: Notes:
4801: If you use MAT_INPLACE_MATRIX then you must pass in &mat for B
4803: MAT_REUSE_MATRIX causes the B matrix from a previous call to this function with MAT_INITIAL_MATRIX to be used
4805: Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.
4807: Level: intermediate
4809: Concepts: matrices^transposing
4811: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4812: @*/
4813: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4814: {
4820: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4821: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4822: if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4823: if (reuse == MAT_INPLACE_MATRIX && mat != *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires last matrix to match first");
4824: if (reuse == MAT_REUSE_MATRIX && mat == *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Perhaps you mean MAT_INPLACE_MATRIX");
4825: MatCheckPreallocated(mat,1);
4827: PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4828: (*mat->ops->transpose)(mat,reuse,B);
4829: PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4830: if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4831: return(0);
4832: }
4834: /*@
4835: MatIsTranspose - Test whether a matrix is another one's transpose,
4836: or its own, in which case it tests symmetry.
4838: Collective on Mat
4840: Input Parameter:
4841: + A - the matrix to test
4842: - B - the matrix to test against, this can equal the first parameter
4844: Output Parameters:
4845: . flg - the result
4847: Notes:
4848: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4849: has a running time of the order of the number of nonzeros; the parallel
4850: test involves parallel copies of the block-offdiagonal parts of the matrix.
4852: Level: intermediate
4854: Concepts: matrices^transposing, matrix^symmetry
4856: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4857: @*/
4858: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4859: {
4860: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4866: PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4867: PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4868: *flg = PETSC_FALSE;
4869: if (f && g) {
4870: if (f == g) {
4871: (*f)(A,B,tol,flg);
4872: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4873: } else {
4874: MatType mattype;
4875: if (!f) {
4876: MatGetType(A,&mattype);
4877: } else {
4878: MatGetType(B,&mattype);
4879: }
4880: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4881: }
4882: return(0);
4883: }
4885: /*@
4886: MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.
4888: Collective on Mat
4890: Input Parameter:
4891: + mat - the matrix to transpose and complex conjugate
4892: - reuse - MAT_INITIAL_MATRIX to create a new matrix, MAT_INPLACE_MATRIX to reuse the first argument to store the transpose
4894: Output Parameters:
4895: . B - the Hermitian
4897: Level: intermediate
4899: Concepts: matrices^transposing, complex conjugatex
4901: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4902: @*/
4903: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4904: {
4908: MatTranspose(mat,reuse,B);
4909: #if defined(PETSC_USE_COMPLEX)
4910: MatConjugate(*B);
4911: #endif
4912: return(0);
4913: }
4915: /*@
4916: MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,
4918: Collective on Mat
4920: Input Parameter:
4921: + A - the matrix to test
4922: - B - the matrix to test against, this can equal the first parameter
4924: Output Parameters:
4925: . flg - the result
4927: Notes:
4928: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4929: has a running time of the order of the number of nonzeros; the parallel
4930: test involves parallel copies of the block-offdiagonal parts of the matrix.
4932: Level: intermediate
4934: Concepts: matrices^transposing, matrix^symmetry
4936: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4937: @*/
4938: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4939: {
4940: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4946: PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4947: PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4948: if (f && g) {
4949: if (f==g) {
4950: (*f)(A,B,tol,flg);
4951: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4952: }
4953: return(0);
4954: }
4956: /*@
4957: MatPermute - Creates a new matrix with rows and columns permuted from the
4958: original.
4960: Collective on Mat
4962: Input Parameters:
4963: + mat - the matrix to permute
4964: . row - row permutation, each processor supplies only the permutation for its rows
4965: - col - column permutation, each processor supplies only the permutation for its columns
4967: Output Parameters:
4968: . B - the permuted matrix
4970: Level: advanced
4972: Note:
4973: The index sets map from row/col of permuted matrix to row/col of original matrix.
4974: The index sets should be on the same communicator as Mat and have the same local sizes.
4976: Concepts: matrices^permuting
4978: .seealso: MatGetOrdering(), ISAllGather()
4980: @*/
4981: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4982: {
4991: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4992: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4993: if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4994: MatCheckPreallocated(mat,1);
4996: (*mat->ops->permute)(mat,row,col,B);
4997: PetscObjectStateIncrease((PetscObject)*B);
4998: return(0);
4999: }
5001: /*@
5002: MatEqual - Compares two matrices.
5004: Collective on Mat
5006: Input Parameters:
5007: + A - the first matrix
5008: - B - the second matrix
5010: Output Parameter:
5011: . flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.
5013: Level: intermediate
5015: Concepts: matrices^equality between
5016: @*/
5017: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool *flg)
5018: {
5028: MatCheckPreallocated(B,2);
5029: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5030: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5031: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
5032: if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
5033: if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
5034: if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
5035: MatCheckPreallocated(A,1);
5037: (*A->ops->equal)(A,B,flg);
5038: return(0);
5039: }
5041: /*@
5042: MatDiagonalScale - Scales a matrix on the left and right by diagonal
5043: matrices that are stored as vectors. Either of the two scaling
5044: matrices can be NULL.
5046: Collective on Mat
5048: Input Parameters:
5049: + mat - the matrix to be scaled
5050: . l - the left scaling vector (or NULL)
5051: - r - the right scaling vector (or NULL)
5053: Notes:
5054: MatDiagonalScale() computes A = LAR, where
5055: L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
5056: The L scales the rows of the matrix, the R scales the columns of the matrix.
5058: Level: intermediate
5060: Concepts: matrices^diagonal scaling
5061: Concepts: diagonal scaling of matrices
5063: .seealso: MatScale(), MatShift(), MatDiagonalSet()
5064: @*/
5065: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
5066: {
5072: if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5075: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5076: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5077: MatCheckPreallocated(mat,1);
5079: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5080: (*mat->ops->diagonalscale)(mat,l,r);
5081: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5082: PetscObjectStateIncrease((PetscObject)mat);
5083: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5084: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5085: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5086: }
5087: #endif
5088: return(0);
5089: }
5091: /*@
5092: MatScale - Scales all elements of a matrix by a given number.
5094: Logically Collective on Mat
5096: Input Parameters:
5097: + mat - the matrix to be scaled
5098: - a - the scaling value
5100: Output Parameter:
5101: . mat - the scaled matrix
5103: Level: intermediate
5105: Concepts: matrices^scaling all entries
5107: .seealso: MatDiagonalScale()
5108: @*/
5109: PetscErrorCode MatScale(Mat mat,PetscScalar a)
5110: {
5116: if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5117: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5118: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5120: MatCheckPreallocated(mat,1);
5122: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5123: if (a != (PetscScalar)1.0) {
5124: (*mat->ops->scale)(mat,a);
5125: PetscObjectStateIncrease((PetscObject)mat);
5126: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5127: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5128: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5129: }
5130: #endif
5131: }
5132: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5133: return(0);
5134: }
5136: static PetscErrorCode MatNorm_Basic(Mat A,NormType type,PetscReal *nrm)
5137: {
5141: if (type == NORM_1 || type == NORM_INFINITY) {
5142: Vec l,r;
5144: MatCreateVecs(A,&r,&l);
5145: if (type == NORM_INFINITY) {
5146: VecSet(r,1.);
5147: MatMult(A,r,l);
5148: VecNorm(l,NORM_INFINITY,nrm);
5149: } else {
5150: VecSet(l,1.);
5151: MatMultTranspose(A,l,r);
5152: VecNorm(r,NORM_INFINITY,nrm);
5153: }
5154: VecDestroy(&l);
5155: VecDestroy(&r);
5156: } else SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix class %s, norm type %d",((PetscObject)A)->type_name,type);
5157: return(0);
5158: }
5160: /*@
5161: MatNorm - Calculates various norms of a matrix.
5163: Collective on Mat
5165: Input Parameters:
5166: + mat - the matrix
5167: - type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY
5169: Output Parameters:
5170: . nrm - the resulting norm
5172: Level: intermediate
5174: Concepts: matrices^norm
5175: Concepts: norm^of matrix
5176: @*/
5177: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
5178: {
5187: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5188: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5189: MatCheckPreallocated(mat,1);
5191: if (!mat->ops->norm) {
5192: MatNorm_Basic(mat,type,nrm);
5193: } else {
5194: (*mat->ops->norm)(mat,type,nrm);
5195: }
5196: return(0);
5197: }
5199: /*
5200: This variable is used to prevent counting of MatAssemblyBegin() that
5201: are called from within a MatAssemblyEnd().
5202: */
5203: static PetscInt MatAssemblyEnd_InUse = 0;
5204: /*@
5205: MatAssemblyBegin - Begins assembling the matrix. This routine should
5206: be called after completing all calls to MatSetValues().
5208: Collective on Mat
5210: Input Parameters:
5211: + mat - the matrix
5212: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5214: Notes:
5215: MatSetValues() generally caches the values. The matrix is ready to
5216: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5217: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5218: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5219: using the matrix.
5221: ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
5222: same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
5223: a global collective operation requring all processes that share the matrix.
5225: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5226: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5227: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5229: Level: beginner
5231: Concepts: matrices^assembling
5233: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
5234: @*/
5235: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
5236: {
5242: MatCheckPreallocated(mat,1);
5243: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
5244: if (mat->assembled) {
5245: mat->was_assembled = PETSC_TRUE;
5246: mat->assembled = PETSC_FALSE;
5247: }
5248: if (!MatAssemblyEnd_InUse) {
5249: PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5250: if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5251: PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5252: } else if (mat->ops->assemblybegin) {
5253: (*mat->ops->assemblybegin)(mat,type);
5254: }
5255: return(0);
5256: }
5258: /*@
5259: MatAssembled - Indicates if a matrix has been assembled and is ready for
5260: use; for example, in matrix-vector product.
5262: Not Collective
5264: Input Parameter:
5265: . mat - the matrix
5267: Output Parameter:
5268: . assembled - PETSC_TRUE or PETSC_FALSE
5270: Level: advanced
5272: Concepts: matrices^assembled?
5274: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5275: @*/
5276: PetscErrorCode MatAssembled(Mat mat,PetscBool *assembled)
5277: {
5282: *assembled = mat->assembled;
5283: return(0);
5284: }
5286: /*@
5287: MatAssemblyEnd - Completes assembling the matrix. This routine should
5288: be called after MatAssemblyBegin().
5290: Collective on Mat
5292: Input Parameters:
5293: + mat - the matrix
5294: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5296: Options Database Keys:
5297: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5298: . -mat_view ::ascii_info_detail - Prints more detailed info
5299: . -mat_view - Prints matrix in ASCII format
5300: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
5301: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5302: . -display <name> - Sets display name (default is host)
5303: . -draw_pause <sec> - Sets number of seconds to pause after display
5304: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 12 Using MATLAB with PETSc )
5305: . -viewer_socket_machine <machine> - Machine to use for socket
5306: . -viewer_socket_port <port> - Port number to use for socket
5307: - -mat_view binary:filename[:append] - Save matrix to file in binary format
5309: Notes:
5310: MatSetValues() generally caches the values. The matrix is ready to
5311: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5312: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5313: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5314: using the matrix.
5316: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5317: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5318: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5320: Level: beginner
5322: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5323: @*/
5324: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5325: {
5326: PetscErrorCode ierr;
5327: static PetscInt inassm = 0;
5328: PetscBool flg = PETSC_FALSE;
5334: inassm++;
5335: MatAssemblyEnd_InUse++;
5336: if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5337: PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5338: if (mat->ops->assemblyend) {
5339: (*mat->ops->assemblyend)(mat,type);
5340: }
5341: PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5342: } else if (mat->ops->assemblyend) {
5343: (*mat->ops->assemblyend)(mat,type);
5344: }
5346: /* Flush assembly is not a true assembly */
5347: if (type != MAT_FLUSH_ASSEMBLY) {
5348: mat->assembled = PETSC_TRUE; mat->num_ass++;
5349: }
5350: mat->insertmode = NOT_SET_VALUES;
5351: MatAssemblyEnd_InUse--;
5352: PetscObjectStateIncrease((PetscObject)mat);
5353: if (!mat->symmetric_eternal) {
5354: mat->symmetric_set = PETSC_FALSE;
5355: mat->hermitian_set = PETSC_FALSE;
5356: mat->structurally_symmetric_set = PETSC_FALSE;
5357: }
5358: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5359: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5360: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5361: }
5362: #endif
5363: if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5364: MatViewFromOptions(mat,NULL,"-mat_view");
5366: if (mat->checksymmetryonassembly) {
5367: MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5368: if (flg) {
5369: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5370: } else {
5371: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5372: }
5373: }
5374: if (mat->nullsp && mat->checknullspaceonassembly) {
5375: MatNullSpaceTest(mat->nullsp,mat,NULL);
5376: }
5377: }
5378: inassm--;
5379: return(0);
5380: }
5382: /*@
5383: MatSetOption - Sets a parameter option for a matrix. Some options
5384: may be specific to certain storage formats. Some options
5385: determine how values will be inserted (or added). Sorted,
5386: row-oriented input will generally assemble the fastest. The default
5387: is row-oriented.
5389: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5391: Input Parameters:
5392: + mat - the matrix
5393: . option - the option, one of those listed below (and possibly others),
5394: - flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5396: Options Describing Matrix Structure:
5397: + MAT_SPD - symmetric positive definite
5398: . MAT_SYMMETRIC - symmetric in terms of both structure and value
5399: . MAT_HERMITIAN - transpose is the complex conjugation
5400: . MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5401: - MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5402: you set to be kept with all future use of the matrix
5403: including after MatAssemblyBegin/End() which could
5404: potentially change the symmetry structure, i.e. you
5405: KNOW the matrix will ALWAYS have the property you set.
5408: Options For Use with MatSetValues():
5409: Insert a logically dense subblock, which can be
5410: . MAT_ROW_ORIENTED - row-oriented (default)
5412: Note these options reflect the data you pass in with MatSetValues(); it has
5413: nothing to do with how the data is stored internally in the matrix
5414: data structure.
5416: When (re)assembling a matrix, we can restrict the input for
5417: efficiency/debugging purposes. These options include:
5418: + MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5419: . MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5420: . MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5421: . MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5422: . MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5423: . MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5424: any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5425: performance for very large process counts.
5426: - MAT_SUBSET_OFF_PROC_ENTRIES - you know that the first assembly after setting this flag will set a superset
5427: of the off-process entries required for all subsequent assemblies. This avoids a rendezvous step in the MatAssembly
5428: functions, instead sending only neighbor messages.
5430: Notes:
5431: Except for MAT_UNUSED_NONZERO_LOCATION_ERR and MAT_ROW_ORIENTED all processes that share the matrix must pass the same value in flg!
5433: Some options are relevant only for particular matrix types and
5434: are thus ignored by others. Other options are not supported by
5435: certain matrix types and will generate an error message if set.
5437: If using a Fortran 77 module to compute a matrix, one may need to
5438: use the column-oriented option (or convert to the row-oriented
5439: format).
5441: MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5442: that would generate a new entry in the nonzero structure is instead
5443: ignored. Thus, if memory has not alredy been allocated for this particular
5444: data, then the insertion is ignored. For dense matrices, in which
5445: the entire array is allocated, no entries are ever ignored.
5446: Set after the first MatAssemblyEnd(). If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5448: MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5449: that would generate a new entry in the nonzero structure instead produces
5450: an error. (Currently supported for AIJ and BAIJ formats only.) If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5452: MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5453: that would generate a new entry that has not been preallocated will
5454: instead produce an error. (Currently supported for AIJ and BAIJ formats
5455: only.) This is a useful flag when debugging matrix memory preallocation.
5456: If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5458: MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5459: other processors should be dropped, rather than stashed.
5460: This is useful if you know that the "owning" processor is also
5461: always generating the correct matrix entries, so that PETSc need
5462: not transfer duplicate entries generated on another processor.
5464: MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5465: searches during matrix assembly. When this flag is set, the hash table
5466: is created during the first Matrix Assembly. This hash table is
5467: used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5468: to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5469: should be used with MAT_USE_HASH_TABLE flag. This option is currently
5470: supported by MATMPIBAIJ format only.
5472: MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5473: are kept in the nonzero structure
5475: MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5476: a zero location in the matrix
5478: MAT_USE_INODES - indicates using inode version of the code - works with AIJ matrix types
5480: MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5481: zero row routines and thus improves performance for very large process counts.
5483: MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5484: part of the matrix (since they should match the upper triangular part).
5486: Notes:
5487: Can only be called after MatSetSizes() and MatSetType() have been set.
5489: Level: intermediate
5491: Concepts: matrices^setting options
5493: .seealso: MatOption, Mat
5495: @*/
5496: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5497: {
5503: if (op > 0) {
5506: }
5508: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5509: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");
5511: switch (op) {
5512: case MAT_NO_OFF_PROC_ENTRIES:
5513: mat->nooffprocentries = flg;
5514: return(0);
5515: break;
5516: case MAT_SUBSET_OFF_PROC_ENTRIES:
5517: mat->subsetoffprocentries = flg;
5518: return(0);
5519: case MAT_NO_OFF_PROC_ZERO_ROWS:
5520: mat->nooffproczerorows = flg;
5521: return(0);
5522: break;
5523: case MAT_SPD:
5524: mat->spd_set = PETSC_TRUE;
5525: mat->spd = flg;
5526: if (flg) {
5527: mat->symmetric = PETSC_TRUE;
5528: mat->structurally_symmetric = PETSC_TRUE;
5529: mat->symmetric_set = PETSC_TRUE;
5530: mat->structurally_symmetric_set = PETSC_TRUE;
5531: }
5532: break;
5533: case MAT_SYMMETRIC:
5534: mat->symmetric = flg;
5535: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5536: mat->symmetric_set = PETSC_TRUE;
5537: mat->structurally_symmetric_set = flg;
5538: #if !defined(PETSC_USE_COMPLEX)
5539: mat->hermitian = flg;
5540: mat->hermitian_set = PETSC_TRUE;
5541: #endif
5542: break;
5543: case MAT_HERMITIAN:
5544: mat->hermitian = flg;
5545: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5546: mat->hermitian_set = PETSC_TRUE;
5547: mat->structurally_symmetric_set = flg;
5548: #if !defined(PETSC_USE_COMPLEX)
5549: mat->symmetric = flg;
5550: mat->symmetric_set = PETSC_TRUE;
5551: #endif
5552: break;
5553: case MAT_STRUCTURALLY_SYMMETRIC:
5554: mat->structurally_symmetric = flg;
5555: mat->structurally_symmetric_set = PETSC_TRUE;
5556: break;
5557: case MAT_SYMMETRY_ETERNAL:
5558: mat->symmetric_eternal = flg;
5559: break;
5560: case MAT_STRUCTURE_ONLY:
5561: mat->structure_only = flg;
5562: break;
5563: default:
5564: break;
5565: }
5566: if (mat->ops->setoption) {
5567: (*mat->ops->setoption)(mat,op,flg);
5568: }
5569: return(0);
5570: }
5572: /*@
5573: MatGetOption - Gets a parameter option that has been set for a matrix.
5575: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5577: Input Parameters:
5578: + mat - the matrix
5579: - option - the option, this only responds to certain options, check the code for which ones
5581: Output Parameter:
5582: . flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5584: Notes:
5585: Can only be called after MatSetSizes() and MatSetType() have been set.
5587: Level: intermediate
5589: Concepts: matrices^setting options
5591: .seealso: MatOption, MatSetOption()
5593: @*/
5594: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5595: {
5600: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5601: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");
5603: switch (op) {
5604: case MAT_NO_OFF_PROC_ENTRIES:
5605: *flg = mat->nooffprocentries;
5606: break;
5607: case MAT_NO_OFF_PROC_ZERO_ROWS:
5608: *flg = mat->nooffproczerorows;
5609: break;
5610: case MAT_SYMMETRIC:
5611: *flg = mat->symmetric;
5612: break;
5613: case MAT_HERMITIAN:
5614: *flg = mat->hermitian;
5615: break;
5616: case MAT_STRUCTURALLY_SYMMETRIC:
5617: *flg = mat->structurally_symmetric;
5618: break;
5619: case MAT_SYMMETRY_ETERNAL:
5620: *flg = mat->symmetric_eternal;
5621: break;
5622: case MAT_SPD:
5623: *flg = mat->spd;
5624: break;
5625: default:
5626: break;
5627: }
5628: return(0);
5629: }
5631: /*@
5632: MatZeroEntries - Zeros all entries of a matrix. For sparse matrices
5633: this routine retains the old nonzero structure.
5635: Logically Collective on Mat
5637: Input Parameters:
5638: . mat - the matrix
5640: Level: intermediate
5642: Notes:
5643: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5644: See the Performance chapter of the users manual for information on preallocating matrices.
5646: Concepts: matrices^zeroing
5648: .seealso: MatZeroRows()
5649: @*/
5650: PetscErrorCode MatZeroEntries(Mat mat)
5651: {
5657: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5658: if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5659: if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5660: MatCheckPreallocated(mat,1);
5662: PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5663: (*mat->ops->zeroentries)(mat);
5664: PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5665: PetscObjectStateIncrease((PetscObject)mat);
5666: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5667: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5668: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5669: }
5670: #endif
5671: return(0);
5672: }
5674: /*@
5675: MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5676: of a set of rows and columns of a matrix.
5678: Collective on Mat
5680: Input Parameters:
5681: + mat - the matrix
5682: . numRows - the number of rows to remove
5683: . rows - the global row indices
5684: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5685: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5686: - b - optional vector of right hand side, that will be adjusted by provided solution
5688: Notes:
5689: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5691: The user can set a value in the diagonal entry (or for the AIJ and
5692: row formats can optionally remove the main diagonal entry from the
5693: nonzero structure as well, by passing 0.0 as the final argument).
5695: For the parallel case, all processes that share the matrix (i.e.,
5696: those in the communicator used for matrix creation) MUST call this
5697: routine, regardless of whether any rows being zeroed are owned by
5698: them.
5700: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5701: list only rows local to itself).
5703: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5705: Level: intermediate
5707: Concepts: matrices^zeroing rows
5709: .seealso: MatZeroRowsIS(), MatZeroRows(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5710: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5711: @*/
5712: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5713: {
5720: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5721: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5722: if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5723: MatCheckPreallocated(mat,1);
5725: (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5726: MatViewFromOptions(mat,NULL,"-mat_view");
5727: PetscObjectStateIncrease((PetscObject)mat);
5728: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5729: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5730: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5731: }
5732: #endif
5733: return(0);
5734: }
5736: /*@
5737: MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5738: of a set of rows and columns of a matrix.
5740: Collective on Mat
5742: Input Parameters:
5743: + mat - the matrix
5744: . is - the rows to zero
5745: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5746: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5747: - b - optional vector of right hand side, that will be adjusted by provided solution
5749: Notes:
5750: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5752: The user can set a value in the diagonal entry (or for the AIJ and
5753: row formats can optionally remove the main diagonal entry from the
5754: nonzero structure as well, by passing 0.0 as the final argument).
5756: For the parallel case, all processes that share the matrix (i.e.,
5757: those in the communicator used for matrix creation) MUST call this
5758: routine, regardless of whether any rows being zeroed are owned by
5759: them.
5761: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5762: list only rows local to itself).
5764: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5766: Level: intermediate
5768: Concepts: matrices^zeroing rows
5770: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5771: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRows(), MatZeroRowsColumnsStencil()
5772: @*/
5773: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5774: {
5776: PetscInt numRows;
5777: const PetscInt *rows;
5784: ISGetLocalSize(is,&numRows);
5785: ISGetIndices(is,&rows);
5786: MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5787: ISRestoreIndices(is,&rows);
5788: return(0);
5789: }
5791: /*@
5792: MatZeroRows - Zeros all entries (except possibly the main diagonal)
5793: of a set of rows of a matrix.
5795: Collective on Mat
5797: Input Parameters:
5798: + mat - the matrix
5799: . numRows - the number of rows to remove
5800: . rows - the global row indices
5801: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5802: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5803: - b - optional vector of right hand side, that will be adjusted by provided solution
5805: Notes:
5806: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5807: but does not release memory. For the dense and block diagonal
5808: formats this does not alter the nonzero structure.
5810: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5811: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5812: merely zeroed.
5814: The user can set a value in the diagonal entry (or for the AIJ and
5815: row formats can optionally remove the main diagonal entry from the
5816: nonzero structure as well, by passing 0.0 as the final argument).
5818: For the parallel case, all processes that share the matrix (i.e.,
5819: those in the communicator used for matrix creation) MUST call this
5820: routine, regardless of whether any rows being zeroed are owned by
5821: them.
5823: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5824: list only rows local to itself).
5826: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5827: owns that are to be zeroed. This saves a global synchronization in the implementation.
5829: Level: intermediate
5831: Concepts: matrices^zeroing rows
5833: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5834: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5835: @*/
5836: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5837: {
5844: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5845: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5846: if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5847: MatCheckPreallocated(mat,1);
5849: (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5850: MatViewFromOptions(mat,NULL,"-mat_view");
5851: PetscObjectStateIncrease((PetscObject)mat);
5852: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5853: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5854: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5855: }
5856: #endif
5857: return(0);
5858: }
5860: /*@
5861: MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5862: of a set of rows of a matrix.
5864: Collective on Mat
5866: Input Parameters:
5867: + mat - the matrix
5868: . is - index set of rows to remove
5869: . diag - value put in all diagonals of eliminated rows
5870: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5871: - b - optional vector of right hand side, that will be adjusted by provided solution
5873: Notes:
5874: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5875: but does not release memory. For the dense and block diagonal
5876: formats this does not alter the nonzero structure.
5878: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5879: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5880: merely zeroed.
5882: The user can set a value in the diagonal entry (or for the AIJ and
5883: row formats can optionally remove the main diagonal entry from the
5884: nonzero structure as well, by passing 0.0 as the final argument).
5886: For the parallel case, all processes that share the matrix (i.e.,
5887: those in the communicator used for matrix creation) MUST call this
5888: routine, regardless of whether any rows being zeroed are owned by
5889: them.
5891: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5892: list only rows local to itself).
5894: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5895: owns that are to be zeroed. This saves a global synchronization in the implementation.
5897: Level: intermediate
5899: Concepts: matrices^zeroing rows
5901: .seealso: MatZeroRows(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5902: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5903: @*/
5904: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5905: {
5906: PetscInt numRows;
5907: const PetscInt *rows;
5914: ISGetLocalSize(is,&numRows);
5915: ISGetIndices(is,&rows);
5916: MatZeroRows(mat,numRows,rows,diag,x,b);
5917: ISRestoreIndices(is,&rows);
5918: return(0);
5919: }
5921: /*@
5922: MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5923: of a set of rows of a matrix. These rows must be local to the process.
5925: Collective on Mat
5927: Input Parameters:
5928: + mat - the matrix
5929: . numRows - the number of rows to remove
5930: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5931: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5932: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5933: - b - optional vector of right hand side, that will be adjusted by provided solution
5935: Notes:
5936: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5937: but does not release memory. For the dense and block diagonal
5938: formats this does not alter the nonzero structure.
5940: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5941: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5942: merely zeroed.
5944: The user can set a value in the diagonal entry (or for the AIJ and
5945: row formats can optionally remove the main diagonal entry from the
5946: nonzero structure as well, by passing 0.0 as the final argument).
5948: For the parallel case, all processes that share the matrix (i.e.,
5949: those in the communicator used for matrix creation) MUST call this
5950: routine, regardless of whether any rows being zeroed are owned by
5951: them.
5953: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5954: list only rows local to itself).
5956: The grid coordinates are across the entire grid, not just the local portion
5958: In Fortran idxm and idxn should be declared as
5959: $ MatStencil idxm(4,m)
5960: and the values inserted using
5961: $ idxm(MatStencil_i,1) = i
5962: $ idxm(MatStencil_j,1) = j
5963: $ idxm(MatStencil_k,1) = k
5964: $ idxm(MatStencil_c,1) = c
5965: etc
5967: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5968: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5969: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5970: DM_BOUNDARY_PERIODIC boundary type.
5972: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5973: a single value per point) you can skip filling those indices.
5975: Level: intermediate
5977: Concepts: matrices^zeroing rows
5979: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsl(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5980: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5981: @*/
5982: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5983: {
5984: PetscInt dim = mat->stencil.dim;
5985: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5986: PetscInt *dims = mat->stencil.dims+1;
5987: PetscInt *starts = mat->stencil.starts;
5988: PetscInt *dxm = (PetscInt*) rows;
5989: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5997: PetscMalloc1(numRows, &jdxm);
5998: for (i = 0; i < numRows; ++i) {
5999: /* Skip unused dimensions (they are ordered k, j, i, c) */
6000: for (j = 0; j < 3-sdim; ++j) dxm++;
6001: /* Local index in X dir */
6002: tmp = *dxm++ - starts[0];
6003: /* Loop over remaining dimensions */
6004: for (j = 0; j < dim-1; ++j) {
6005: /* If nonlocal, set index to be negative */
6006: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
6007: /* Update local index */
6008: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
6009: }
6010: /* Skip component slot if necessary */
6011: if (mat->stencil.noc) dxm++;
6012: /* Local row number */
6013: if (tmp >= 0) {
6014: jdxm[numNewRows++] = tmp;
6015: }
6016: }
6017: MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
6018: PetscFree(jdxm);
6019: return(0);
6020: }
6022: /*@
6023: MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
6024: of a set of rows and columns of a matrix.
6026: Collective on Mat
6028: Input Parameters:
6029: + mat - the matrix
6030: . numRows - the number of rows/columns to remove
6031: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
6032: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
6033: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6034: - b - optional vector of right hand side, that will be adjusted by provided solution
6036: Notes:
6037: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
6038: but does not release memory. For the dense and block diagonal
6039: formats this does not alter the nonzero structure.
6041: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6042: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6043: merely zeroed.
6045: The user can set a value in the diagonal entry (or for the AIJ and
6046: row formats can optionally remove the main diagonal entry from the
6047: nonzero structure as well, by passing 0.0 as the final argument).
6049: For the parallel case, all processes that share the matrix (i.e.,
6050: those in the communicator used for matrix creation) MUST call this
6051: routine, regardless of whether any rows being zeroed are owned by
6052: them.
6054: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
6055: list only rows local to itself, but the row/column numbers are given in local numbering).
6057: The grid coordinates are across the entire grid, not just the local portion
6059: In Fortran idxm and idxn should be declared as
6060: $ MatStencil idxm(4,m)
6061: and the values inserted using
6062: $ idxm(MatStencil_i,1) = i
6063: $ idxm(MatStencil_j,1) = j
6064: $ idxm(MatStencil_k,1) = k
6065: $ idxm(MatStencil_c,1) = c
6066: etc
6068: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
6069: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
6070: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
6071: DM_BOUNDARY_PERIODIC boundary type.
6073: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
6074: a single value per point) you can skip filling those indices.
6076: Level: intermediate
6078: Concepts: matrices^zeroing rows
6080: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6081: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRows()
6082: @*/
6083: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
6084: {
6085: PetscInt dim = mat->stencil.dim;
6086: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
6087: PetscInt *dims = mat->stencil.dims+1;
6088: PetscInt *starts = mat->stencil.starts;
6089: PetscInt *dxm = (PetscInt*) rows;
6090: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
6098: PetscMalloc1(numRows, &jdxm);
6099: for (i = 0; i < numRows; ++i) {
6100: /* Skip unused dimensions (they are ordered k, j, i, c) */
6101: for (j = 0; j < 3-sdim; ++j) dxm++;
6102: /* Local index in X dir */
6103: tmp = *dxm++ - starts[0];
6104: /* Loop over remaining dimensions */
6105: for (j = 0; j < dim-1; ++j) {
6106: /* If nonlocal, set index to be negative */
6107: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
6108: /* Update local index */
6109: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
6110: }
6111: /* Skip component slot if necessary */
6112: if (mat->stencil.noc) dxm++;
6113: /* Local row number */
6114: if (tmp >= 0) {
6115: jdxm[numNewRows++] = tmp;
6116: }
6117: }
6118: MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
6119: PetscFree(jdxm);
6120: return(0);
6121: }
6123: /*@
6124: MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
6125: of a set of rows of a matrix; using local numbering of rows.
6127: Collective on Mat
6129: Input Parameters:
6130: + mat - the matrix
6131: . numRows - the number of rows to remove
6132: . rows - the global row indices
6133: . diag - value put in all diagonals of eliminated rows
6134: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6135: - b - optional vector of right hand side, that will be adjusted by provided solution
6137: Notes:
6138: Before calling MatZeroRowsLocal(), the user must first set the
6139: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6141: For the AIJ matrix formats this removes the old nonzero structure,
6142: but does not release memory. For the dense and block diagonal
6143: formats this does not alter the nonzero structure.
6145: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6146: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6147: merely zeroed.
6149: The user can set a value in the diagonal entry (or for the AIJ and
6150: row formats can optionally remove the main diagonal entry from the
6151: nonzero structure as well, by passing 0.0 as the final argument).
6153: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6154: owns that are to be zeroed. This saves a global synchronization in the implementation.
6156: Level: intermediate
6158: Concepts: matrices^zeroing
6160: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRows(), MatSetOption(),
6161: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6162: @*/
6163: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6164: {
6171: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6172: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6173: MatCheckPreallocated(mat,1);
6175: if (mat->ops->zerorowslocal) {
6176: (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
6177: } else {
6178: IS is, newis;
6179: const PetscInt *newRows;
6181: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6182: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6183: ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
6184: ISGetIndices(newis,&newRows);
6185: (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
6186: ISRestoreIndices(newis,&newRows);
6187: ISDestroy(&newis);
6188: ISDestroy(&is);
6189: }
6190: PetscObjectStateIncrease((PetscObject)mat);
6191: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
6192: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
6193: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
6194: }
6195: #endif
6196: return(0);
6197: }
6199: /*@
6200: MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
6201: of a set of rows of a matrix; using local numbering of rows.
6203: Collective on Mat
6205: Input Parameters:
6206: + mat - the matrix
6207: . is - index set of rows to remove
6208: . diag - value put in all diagonals of eliminated rows
6209: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6210: - b - optional vector of right hand side, that will be adjusted by provided solution
6212: Notes:
6213: Before calling MatZeroRowsLocalIS(), the user must first set the
6214: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6216: For the AIJ matrix formats this removes the old nonzero structure,
6217: but does not release memory. For the dense and block diagonal
6218: formats this does not alter the nonzero structure.
6220: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6221: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6222: merely zeroed.
6224: The user can set a value in the diagonal entry (or for the AIJ and
6225: row formats can optionally remove the main diagonal entry from the
6226: nonzero structure as well, by passing 0.0 as the final argument).
6228: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6229: owns that are to be zeroed. This saves a global synchronization in the implementation.
6231: Level: intermediate
6233: Concepts: matrices^zeroing
6235: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6236: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6237: @*/
6238: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6239: {
6241: PetscInt numRows;
6242: const PetscInt *rows;
6248: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6249: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6250: MatCheckPreallocated(mat,1);
6252: ISGetLocalSize(is,&numRows);
6253: ISGetIndices(is,&rows);
6254: MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6255: ISRestoreIndices(is,&rows);
6256: return(0);
6257: }
6259: /*@
6260: MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6261: of a set of rows and columns of a matrix; using local numbering of rows.
6263: Collective on Mat
6265: Input Parameters:
6266: + mat - the matrix
6267: . numRows - the number of rows to remove
6268: . rows - the global row indices
6269: . diag - value put in all diagonals of eliminated rows
6270: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6271: - b - optional vector of right hand side, that will be adjusted by provided solution
6273: Notes:
6274: Before calling MatZeroRowsColumnsLocal(), the user must first set the
6275: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6277: The user can set a value in the diagonal entry (or for the AIJ and
6278: row formats can optionally remove the main diagonal entry from the
6279: nonzero structure as well, by passing 0.0 as the final argument).
6281: Level: intermediate
6283: Concepts: matrices^zeroing
6285: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6286: MatZeroRows(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6287: @*/
6288: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6289: {
6291: IS is, newis;
6292: const PetscInt *newRows;
6298: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6299: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6300: MatCheckPreallocated(mat,1);
6302: if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6303: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6304: ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6305: ISGetIndices(newis,&newRows);
6306: (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6307: ISRestoreIndices(newis,&newRows);
6308: ISDestroy(&newis);
6309: ISDestroy(&is);
6310: PetscObjectStateIncrease((PetscObject)mat);
6311: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
6312: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
6313: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
6314: }
6315: #endif
6316: return(0);
6317: }
6319: /*@
6320: MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6321: of a set of rows and columns of a matrix; using local numbering of rows.
6323: Collective on Mat
6325: Input Parameters:
6326: + mat - the matrix
6327: . is - index set of rows to remove
6328: . diag - value put in all diagonals of eliminated rows
6329: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6330: - b - optional vector of right hand side, that will be adjusted by provided solution
6332: Notes:
6333: Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6334: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6336: The user can set a value in the diagonal entry (or for the AIJ and
6337: row formats can optionally remove the main diagonal entry from the
6338: nonzero structure as well, by passing 0.0 as the final argument).
6340: Level: intermediate
6342: Concepts: matrices^zeroing
6344: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6345: MatZeroRowsColumnsLocal(), MatZeroRows(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6346: @*/
6347: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6348: {
6350: PetscInt numRows;
6351: const PetscInt *rows;
6357: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6358: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6359: MatCheckPreallocated(mat,1);
6361: ISGetLocalSize(is,&numRows);
6362: ISGetIndices(is,&rows);
6363: MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6364: ISRestoreIndices(is,&rows);
6365: return(0);
6366: }
6368: /*@C
6369: MatGetSize - Returns the numbers of rows and columns in a matrix.
6371: Not Collective
6373: Input Parameter:
6374: . mat - the matrix
6376: Output Parameters:
6377: + m - the number of global rows
6378: - n - the number of global columns
6380: Note: both output parameters can be NULL on input.
6382: Level: beginner
6384: Concepts: matrices^size
6386: .seealso: MatGetLocalSize()
6387: @*/
6388: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6389: {
6392: if (m) *m = mat->rmap->N;
6393: if (n) *n = mat->cmap->N;
6394: return(0);
6395: }
6397: /*@C
6398: MatGetLocalSize - Returns the number of rows and columns in a matrix
6399: stored locally. This information may be implementation dependent, so
6400: use with care.
6402: Not Collective
6404: Input Parameters:
6405: . mat - the matrix
6407: Output Parameters:
6408: + m - the number of local rows
6409: - n - the number of local columns
6411: Note: both output parameters can be NULL on input.
6413: Level: beginner
6415: Concepts: matrices^local size
6417: .seealso: MatGetSize()
6418: @*/
6419: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6420: {
6425: if (m) *m = mat->rmap->n;
6426: if (n) *n = mat->cmap->n;
6427: return(0);
6428: }
6430: /*@C
6431: MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6432: this processor. (The columns of the "diagonal block")
6434: Not Collective, unless matrix has not been allocated, then collective on Mat
6436: Input Parameters:
6437: . mat - the matrix
6439: Output Parameters:
6440: + m - the global index of the first local column
6441: - n - one more than the global index of the last local column
6443: Notes:
6444: both output parameters can be NULL on input.
6446: Level: developer
6448: Concepts: matrices^column ownership
6450: .seealso: MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()
6452: @*/
6453: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6454: {
6460: MatCheckPreallocated(mat,1);
6461: if (m) *m = mat->cmap->rstart;
6462: if (n) *n = mat->cmap->rend;
6463: return(0);
6464: }
6466: /*@C
6467: MatGetOwnershipRange - Returns the range of matrix rows owned by
6468: this processor, assuming that the matrix is laid out with the first
6469: n1 rows on the first processor, the next n2 rows on the second, etc.
6470: For certain parallel layouts this range may not be well defined.
6472: Not Collective
6474: Input Parameters:
6475: . mat - the matrix
6477: Output Parameters:
6478: + m - the global index of the first local row
6479: - n - one more than the global index of the last local row
6481: Note: Both output parameters can be NULL on input.
6482: $ This function requires that the matrix be preallocated. If you have not preallocated, consider using
6483: $ PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6484: $ and then MPI_Scan() to calculate prefix sums of the local sizes.
6486: Level: beginner
6488: Concepts: matrices^row ownership
6490: .seealso: MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()
6492: @*/
6493: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6494: {
6500: MatCheckPreallocated(mat,1);
6501: if (m) *m = mat->rmap->rstart;
6502: if (n) *n = mat->rmap->rend;
6503: return(0);
6504: }
6506: /*@C
6507: MatGetOwnershipRanges - Returns the range of matrix rows owned by
6508: each process
6510: Not Collective, unless matrix has not been allocated, then collective on Mat
6512: Input Parameters:
6513: . mat - the matrix
6515: Output Parameters:
6516: . ranges - start of each processors portion plus one more than the total length at the end
6518: Level: beginner
6520: Concepts: matrices^row ownership
6522: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()
6524: @*/
6525: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6526: {
6532: MatCheckPreallocated(mat,1);
6533: PetscLayoutGetRanges(mat->rmap,ranges);
6534: return(0);
6535: }
6537: /*@C
6538: MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6539: this processor. (The columns of the "diagonal blocks" for each process)
6541: Not Collective, unless matrix has not been allocated, then collective on Mat
6543: Input Parameters:
6544: . mat - the matrix
6546: Output Parameters:
6547: . ranges - start of each processors portion plus one more then the total length at the end
6549: Level: beginner
6551: Concepts: matrices^column ownership
6553: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()
6555: @*/
6556: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6557: {
6563: MatCheckPreallocated(mat,1);
6564: PetscLayoutGetRanges(mat->cmap,ranges);
6565: return(0);
6566: }
6568: /*@C
6569: MatGetOwnershipIS - Get row and column ownership as index sets
6571: Not Collective
6573: Input Arguments:
6574: . A - matrix of type Elemental
6576: Output Arguments:
6577: + rows - rows in which this process owns elements
6578: . cols - columns in which this process owns elements
6580: Level: intermediate
6582: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL
6583: @*/
6584: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6585: {
6586: PetscErrorCode ierr,(*f)(Mat,IS*,IS*);
6589: MatCheckPreallocated(A,1);
6590: PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6591: if (f) {
6592: (*f)(A,rows,cols);
6593: } else { /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6594: if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6595: if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6596: }
6597: return(0);
6598: }
6600: /*@C
6601: MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6602: Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6603: to complete the factorization.
6605: Collective on Mat
6607: Input Parameters:
6608: + mat - the matrix
6609: . row - row permutation
6610: . column - column permutation
6611: - info - structure containing
6612: $ levels - number of levels of fill.
6613: $ expected fill - as ratio of original fill.
6614: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6615: missing diagonal entries)
6617: Output Parameters:
6618: . fact - new matrix that has been symbolically factored
6620: Notes:
6621: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
6623: Most users should employ the simplified KSP interface for linear solvers
6624: instead of working directly with matrix algebra routines such as this.
6625: See, e.g., KSPCreate().
6627: Level: developer
6629: Concepts: matrices^symbolic LU factorization
6630: Concepts: matrices^factorization
6631: Concepts: LU^symbolic factorization
6633: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6634: MatGetOrdering(), MatFactorInfo
6636: Developer Note: fortran interface is not autogenerated as the f90
6637: interface defintion cannot be generated correctly [due to MatFactorInfo]
6639: @*/
6640: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6641: {
6651: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6652: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6653: if (!(fact)->ops->ilufactorsymbolic) {
6654: MatSolverType spackage;
6655: MatFactorGetSolverType(fact,&spackage);
6656: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6657: }
6658: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6659: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6660: MatCheckPreallocated(mat,2);
6662: PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6663: (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6664: PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6665: return(0);
6666: }
6668: /*@C
6669: MatICCFactorSymbolic - Performs symbolic incomplete
6670: Cholesky factorization for a symmetric matrix. Use
6671: MatCholeskyFactorNumeric() to complete the factorization.
6673: Collective on Mat
6675: Input Parameters:
6676: + mat - the matrix
6677: . perm - row and column permutation
6678: - info - structure containing
6679: $ levels - number of levels of fill.
6680: $ expected fill - as ratio of original fill.
6682: Output Parameter:
6683: . fact - the factored matrix
6685: Notes:
6686: Most users should employ the KSP interface for linear solvers
6687: instead of working directly with matrix algebra routines such as this.
6688: See, e.g., KSPCreate().
6690: Level: developer
6692: Concepts: matrices^symbolic incomplete Cholesky factorization
6693: Concepts: matrices^factorization
6694: Concepts: Cholsky^symbolic factorization
6696: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
6698: Developer Note: fortran interface is not autogenerated as the f90
6699: interface defintion cannot be generated correctly [due to MatFactorInfo]
6701: @*/
6702: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6703: {
6712: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6713: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6714: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6715: if (!(fact)->ops->iccfactorsymbolic) {
6716: MatSolverType spackage;
6717: MatFactorGetSolverType(fact,&spackage);
6718: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6719: }
6720: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6721: MatCheckPreallocated(mat,2);
6723: PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6724: (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6725: PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6726: return(0);
6727: }
6729: /*@C
6730: MatCreateSubMatrices - Extracts several submatrices from a matrix. If submat
6731: points to an array of valid matrices, they may be reused to store the new
6732: submatrices.
6734: Collective on Mat
6736: Input Parameters:
6737: + mat - the matrix
6738: . n - the number of submatrixes to be extracted (on this processor, may be zero)
6739: . irow, icol - index sets of rows and columns to extract
6740: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6742: Output Parameter:
6743: . submat - the array of submatrices
6745: Notes:
6746: MatCreateSubMatrices() can extract ONLY sequential submatrices
6747: (from both sequential and parallel matrices). Use MatCreateSubMatrix()
6748: to extract a parallel submatrix.
6750: Some matrix types place restrictions on the row and column
6751: indices, such as that they be sorted or that they be equal to each other.
6753: The index sets may not have duplicate entries.
6755: When extracting submatrices from a parallel matrix, each processor can
6756: form a different submatrix by setting the rows and columns of its
6757: individual index sets according to the local submatrix desired.
6759: When finished using the submatrices, the user should destroy
6760: them with MatDestroySubMatrices().
6762: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6763: original matrix has not changed from that last call to MatCreateSubMatrices().
6765: This routine creates the matrices in submat; you should NOT create them before
6766: calling it. It also allocates the array of matrix pointers submat.
6768: For BAIJ matrices the index sets must respect the block structure, that is if they
6769: request one row/column in a block, they must request all rows/columns that are in
6770: that block. For example, if the block size is 2 you cannot request just row 0 and
6771: column 0.
6773: Fortran Note:
6774: The Fortran interface is slightly different from that given below; it
6775: requires one to pass in as submat a Mat (integer) array of size at least n+1.
6777: Level: advanced
6779: Concepts: matrices^accessing submatrices
6780: Concepts: submatrices
6782: .seealso: MatDestroySubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6783: @*/
6784: PetscErrorCode MatCreateSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6785: {
6787: PetscInt i;
6788: PetscBool eq;
6793: if (n) {
6798: }
6800: if (n && scall == MAT_REUSE_MATRIX) {
6803: }
6804: if (!mat->ops->createsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6805: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6806: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6807: MatCheckPreallocated(mat,1);
6809: PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6810: (*mat->ops->createsubmatrices)(mat,n,irow,icol,scall,submat);
6811: PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6812: for (i=0; i<n; i++) {
6813: (*submat)[i]->factortype = MAT_FACTOR_NONE; /* in case in place factorization was previously done on submatrix */
6814: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6815: ISEqual(irow[i],icol[i],&eq);
6816: if (eq) {
6817: if (mat->symmetric) {
6818: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6819: } else if (mat->hermitian) {
6820: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6821: } else if (mat->structurally_symmetric) {
6822: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6823: }
6824: }
6825: }
6826: }
6827: return(0);
6828: }
6830: /*@C
6831: MatCreateSubMatricesMPI - Extracts MPI submatrices across a sub communicator of mat (by pairs of IS that may live on subcomms).
6833: Collective on Mat
6835: Input Parameters:
6836: + mat - the matrix
6837: . n - the number of submatrixes to be extracted
6838: . irow, icol - index sets of rows and columns to extract
6839: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6841: Output Parameter:
6842: . submat - the array of submatrices
6844: Level: advanced
6846: Concepts: matrices^accessing submatrices
6847: Concepts: submatrices
6849: .seealso: MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6850: @*/
6851: PetscErrorCode MatCreateSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6852: {
6854: PetscInt i;
6855: PetscBool eq;
6860: if (n) {
6865: }
6867: if (n && scall == MAT_REUSE_MATRIX) {
6870: }
6871: if (!mat->ops->createsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6872: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6873: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6874: MatCheckPreallocated(mat,1);
6876: PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6877: (*mat->ops->createsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6878: PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6879: for (i=0; i<n; i++) {
6880: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6881: ISEqual(irow[i],icol[i],&eq);
6882: if (eq) {
6883: if (mat->symmetric) {
6884: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6885: } else if (mat->hermitian) {
6886: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6887: } else if (mat->structurally_symmetric) {
6888: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6889: }
6890: }
6891: }
6892: }
6893: return(0);
6894: }
6896: /*@C
6897: MatDestroyMatrices - Destroys an array of matrices.
6899: Collective on Mat
6901: Input Parameters:
6902: + n - the number of local matrices
6903: - mat - the matrices (note that this is a pointer to the array of matrices)
6905: Level: advanced
6907: Notes:
6908: Frees not only the matrices, but also the array that contains the matrices
6909: In Fortran will not free the array.
6911: .seealso: MatCreateSubMatrices() MatDestroySubMatrices()
6912: @*/
6913: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6914: {
6916: PetscInt i;
6919: if (!*mat) return(0);
6920: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6923: for (i=0; i<n; i++) {
6924: MatDestroy(&(*mat)[i]);
6925: }
6927: /* memory is allocated even if n = 0 */
6928: PetscFree(*mat);
6929: return(0);
6930: }
6932: /*@C
6933: MatDestroySubMatrices - Destroys a set of matrices obtained with MatCreateSubMatrices().
6935: Collective on Mat
6937: Input Parameters:
6938: + n - the number of local matrices
6939: - mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6940: sequence of MatCreateSubMatrices())
6942: Level: advanced
6944: Notes:
6945: Frees not only the matrices, but also the array that contains the matrices
6946: In Fortran will not free the array.
6948: .seealso: MatCreateSubMatrices()
6949: @*/
6950: PetscErrorCode MatDestroySubMatrices(PetscInt n,Mat *mat[])
6951: {
6953: Mat mat0;
6956: if (!*mat) return(0);
6957: /* mat[] is an array of length n+1, see MatCreateSubMatrices_xxx() */
6958: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6961: mat0 = (*mat)[0];
6962: if (mat0 && mat0->ops->destroysubmatrices) {
6963: (mat0->ops->destroysubmatrices)(n,mat);
6964: } else {
6965: MatDestroyMatrices(n,mat);
6966: }
6967: return(0);
6968: }
6970: /*@C
6971: MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.
6973: Collective on Mat
6975: Input Parameters:
6976: . mat - the matrix
6978: Output Parameter:
6979: . matstruct - the sequential matrix with the nonzero structure of mat
6981: Level: intermediate
6983: .seealso: MatDestroySeqNonzeroStructure(), MatCreateSubMatrices(), MatDestroyMatrices()
6984: @*/
6985: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6986: {
6994: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6995: MatCheckPreallocated(mat,1);
6997: if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6998: PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6999: (*mat->ops->getseqnonzerostructure)(mat,matstruct);
7000: PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
7001: return(0);
7002: }
7004: /*@C
7005: MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().
7007: Collective on Mat
7009: Input Parameters:
7010: . mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
7011: sequence of MatGetSequentialNonzeroStructure())
7013: Level: advanced
7015: Notes:
7016: Frees not only the matrices, but also the array that contains the matrices
7018: .seealso: MatGetSeqNonzeroStructure()
7019: @*/
7020: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
7021: {
7026: MatDestroy(mat);
7027: return(0);
7028: }
7030: /*@
7031: MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
7032: replaces the index sets by larger ones that represent submatrices with
7033: additional overlap.
7035: Collective on Mat
7037: Input Parameters:
7038: + mat - the matrix
7039: . n - the number of index sets
7040: . is - the array of index sets (these index sets will changed during the call)
7041: - ov - the additional overlap requested
7043: Options Database:
7044: . -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)
7046: Level: developer
7048: Concepts: overlap
7049: Concepts: ASM^computing overlap
7051: .seealso: MatCreateSubMatrices()
7052: @*/
7053: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
7054: {
7060: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7061: if (n) {
7064: }
7065: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7066: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7067: MatCheckPreallocated(mat,1);
7069: if (!ov) return(0);
7070: if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7071: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7072: (*mat->ops->increaseoverlap)(mat,n,is,ov);
7073: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7074: return(0);
7075: }
7078: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat,IS*,PetscInt);
7080: /*@
7081: MatIncreaseOverlapSplit - Given a set of submatrices indicated by index sets across
7082: a sub communicator, replaces the index sets by larger ones that represent submatrices with
7083: additional overlap.
7085: Collective on Mat
7087: Input Parameters:
7088: + mat - the matrix
7089: . n - the number of index sets
7090: . is - the array of index sets (these index sets will changed during the call)
7091: - ov - the additional overlap requested
7093: Options Database:
7094: . -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)
7096: Level: developer
7098: Concepts: overlap
7099: Concepts: ASM^computing overlap
7101: .seealso: MatCreateSubMatrices()
7102: @*/
7103: PetscErrorCode MatIncreaseOverlapSplit(Mat mat,PetscInt n,IS is[],PetscInt ov)
7104: {
7105: PetscInt i;
7111: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7112: if (n) {
7115: }
7116: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7117: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7118: MatCheckPreallocated(mat,1);
7119: if (!ov) return(0);
7120: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7121: for(i=0; i<n; i++){
7122: MatIncreaseOverlapSplit_Single(mat,&is[i],ov);
7123: }
7124: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7125: return(0);
7126: }
7131: /*@
7132: MatGetBlockSize - Returns the matrix block size.
7134: Not Collective
7136: Input Parameter:
7137: . mat - the matrix
7139: Output Parameter:
7140: . bs - block size
7142: Notes:
7143: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7145: If the block size has not been set yet this routine returns 1.
7147: Level: intermediate
7149: Concepts: matrices^block size
7151: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
7152: @*/
7153: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
7154: {
7158: *bs = PetscAbs(mat->rmap->bs);
7159: return(0);
7160: }
7162: /*@
7163: MatGetBlockSizes - Returns the matrix block row and column sizes.
7165: Not Collective
7167: Input Parameter:
7168: . mat - the matrix
7170: Output Parameter:
7171: . rbs - row block size
7172: . cbs - column block size
7174: Notes:
7175: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7176: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7178: If a block size has not been set yet this routine returns 1.
7180: Level: intermediate
7182: Concepts: matrices^block size
7184: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
7185: @*/
7186: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
7187: {
7192: if (rbs) *rbs = PetscAbs(mat->rmap->bs);
7193: if (cbs) *cbs = PetscAbs(mat->cmap->bs);
7194: return(0);
7195: }
7197: /*@
7198: MatSetBlockSize - Sets the matrix block size.
7200: Logically Collective on Mat
7202: Input Parameters:
7203: + mat - the matrix
7204: - bs - block size
7206: Notes:
7207: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7208: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later.
7210: For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block size
7211: is compatible with the matrix local sizes.
7213: Level: intermediate
7215: Concepts: matrices^block size
7217: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
7218: @*/
7219: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
7220: {
7226: MatSetBlockSizes(mat,bs,bs);
7227: return(0);
7228: }
7230: /*@
7231: MatSetVariableBlockSizes - Sets a diagonal blocks of the matrix that need not be of the same size
7233: Logically Collective on Mat
7235: Input Parameters:
7236: + mat - the matrix
7237: . nblocks - the number of blocks on this process
7238: - bsizes - the block sizes
7240: Notes:
7241: Currently used by PCVPBJACOBI for SeqAIJ matrices
7243: Level: intermediate
7245: Concepts: matrices^block size
7247: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes(), MatGetVariableBlockSizes()
7248: @*/
7249: PetscErrorCode MatSetVariableBlockSizes(Mat mat,PetscInt nblocks,PetscInt *bsizes)
7250: {
7252: PetscInt i,ncnt = 0, nlocal;
7256: if (nblocks < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Number of local blocks must be great than or equal to zero");
7257: MatGetLocalSize(mat,&nlocal,NULL);
7258: for (i=0; i<nblocks; i++) ncnt += bsizes[i];
7259: if (ncnt != nlocal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Sum of local block sizes %D does not equal local size of matrix %D",ncnt,nlocal);
7260: PetscFree(mat->bsizes);
7261: mat->nblocks = nblocks;
7262: PetscMalloc1(nblocks,&mat->bsizes);
7263: PetscMemcpy(mat->bsizes,bsizes,nblocks*sizeof(PetscInt));
7264: return(0);
7265: }
7267: /*@C
7268: MatGetVariableBlockSizes - Gets a diagonal blocks of the matrix that need not be of the same size
7270: Logically Collective on Mat
7272: Input Parameters:
7273: . mat - the matrix
7275: Output Parameters:
7276: + nblocks - the number of blocks on this process
7277: - bsizes - the block sizes
7279: Notes: Currently not supported from Fortran
7281: Level: intermediate
7283: Concepts: matrices^block size
7285: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes(), MatSetVariableBlockSizes()
7286: @*/
7287: PetscErrorCode MatGetVariableBlockSizes(Mat mat,PetscInt *nblocks,const PetscInt **bsizes)
7288: {
7291: *nblocks = mat->nblocks;
7292: *bsizes = mat->bsizes;
7293: return(0);
7294: }
7296: /*@
7297: MatSetBlockSizes - Sets the matrix block row and column sizes.
7299: Logically Collective on Mat
7301: Input Parameters:
7302: + mat - the matrix
7303: - rbs - row block size
7304: - cbs - column block size
7306: Notes:
7307: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7308: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7309: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
7311: For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block sizes
7312: are compatible with the matrix local sizes.
7314: The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().
7316: Level: intermediate
7318: Concepts: matrices^block size
7320: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
7321: @*/
7322: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
7323: {
7330: if (mat->ops->setblocksizes) {
7331: (*mat->ops->setblocksizes)(mat,rbs,cbs);
7332: }
7333: if (mat->rmap->refcnt) {
7334: ISLocalToGlobalMapping l2g = NULL;
7335: PetscLayout nmap = NULL;
7337: PetscLayoutDuplicate(mat->rmap,&nmap);
7338: if (mat->rmap->mapping) {
7339: ISLocalToGlobalMappingDuplicate(mat->rmap->mapping,&l2g);
7340: }
7341: PetscLayoutDestroy(&mat->rmap);
7342: mat->rmap = nmap;
7343: mat->rmap->mapping = l2g;
7344: }
7345: if (mat->cmap->refcnt) {
7346: ISLocalToGlobalMapping l2g = NULL;
7347: PetscLayout nmap = NULL;
7349: PetscLayoutDuplicate(mat->cmap,&nmap);
7350: if (mat->cmap->mapping) {
7351: ISLocalToGlobalMappingDuplicate(mat->cmap->mapping,&l2g);
7352: }
7353: PetscLayoutDestroy(&mat->cmap);
7354: mat->cmap = nmap;
7355: mat->cmap->mapping = l2g;
7356: }
7357: PetscLayoutSetBlockSize(mat->rmap,rbs);
7358: PetscLayoutSetBlockSize(mat->cmap,cbs);
7359: return(0);
7360: }
7362: /*@
7363: MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices
7365: Logically Collective on Mat
7367: Input Parameters:
7368: + mat - the matrix
7369: . fromRow - matrix from which to copy row block size
7370: - fromCol - matrix from which to copy column block size (can be same as fromRow)
7372: Level: developer
7374: Concepts: matrices^block size
7376: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
7377: @*/
7378: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
7379: {
7386: if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
7387: if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
7388: return(0);
7389: }
7391: /*@
7392: MatResidual - Default routine to calculate the residual.
7394: Collective on Mat and Vec
7396: Input Parameters:
7397: + mat - the matrix
7398: . b - the right-hand-side
7399: - x - the approximate solution
7401: Output Parameter:
7402: . r - location to store the residual
7404: Level: developer
7406: .keywords: MG, default, multigrid, residual
7408: .seealso: PCMGSetResidual()
7409: @*/
7410: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7411: {
7420: MatCheckPreallocated(mat,1);
7421: PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7422: if (!mat->ops->residual) {
7423: MatMult(mat,x,r);
7424: VecAYPX(r,-1.0,b);
7425: } else {
7426: (*mat->ops->residual)(mat,b,x,r);
7427: }
7428: PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7429: return(0);
7430: }
7432: /*@C
7433: MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.
7435: Collective on Mat
7437: Input Parameters:
7438: + mat - the matrix
7439: . shift - 0 or 1 indicating we want the indices starting at 0 or 1
7440: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be symmetrized
7441: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7442: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7443: always used.
7445: Output Parameters:
7446: + n - number of rows in the (possibly compressed) matrix
7447: . ia - the row pointers [of length n+1]
7448: . ja - the column indices
7449: - done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7450: are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set
7452: Level: developer
7454: Notes:
7455: You CANNOT change any of the ia[] or ja[] values.
7457: Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values.
7459: Fortran Notes:
7460: In Fortran use
7461: $
7462: $ PetscInt ia(1), ja(1)
7463: $ PetscOffset iia, jja
7464: $ call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7465: $ ! Access the ith and jth entries via ia(iia + i) and ja(jja + j)
7467: or
7468: $
7469: $ PetscInt, pointer :: ia(:),ja(:)
7470: $ call MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7471: $ ! Access the ith and jth entries via ia(i) and ja(j)
7473: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7474: @*/
7475: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7476: {
7486: MatCheckPreallocated(mat,1);
7487: if (!mat->ops->getrowij) *done = PETSC_FALSE;
7488: else {
7489: *done = PETSC_TRUE;
7490: PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7491: (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7492: PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7493: }
7494: return(0);
7495: }
7497: /*@C
7498: MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.
7500: Collective on Mat
7502: Input Parameters:
7503: + mat - the matrix
7504: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7505: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7506: symmetrized
7507: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7508: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7509: always used.
7510: . n - number of columns in the (possibly compressed) matrix
7511: . ia - the column pointers
7512: - ja - the row indices
7514: Output Parameters:
7515: . done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned
7517: Note:
7518: This routine zeros out n, ia, and ja. This is to prevent accidental
7519: us of the array after it has been restored. If you pass NULL, it will
7520: not zero the pointers. Use of ia or ja after MatRestoreColumnIJ() is invalid.
7522: Level: developer
7524: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7525: @*/
7526: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7527: {
7537: MatCheckPreallocated(mat,1);
7538: if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7539: else {
7540: *done = PETSC_TRUE;
7541: (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7542: }
7543: return(0);
7544: }
7546: /*@C
7547: MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7548: MatGetRowIJ().
7550: Collective on Mat
7552: Input Parameters:
7553: + mat - the matrix
7554: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7555: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7556: symmetrized
7557: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7558: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7559: always used.
7560: . n - size of (possibly compressed) matrix
7561: . ia - the row pointers
7562: - ja - the column indices
7564: Output Parameters:
7565: . done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7567: Note:
7568: This routine zeros out n, ia, and ja. This is to prevent accidental
7569: us of the array after it has been restored. If you pass NULL, it will
7570: not zero the pointers. Use of ia or ja after MatRestoreRowIJ() is invalid.
7572: Level: developer
7574: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7575: @*/
7576: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7577: {
7586: MatCheckPreallocated(mat,1);
7588: if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7589: else {
7590: *done = PETSC_TRUE;
7591: (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7592: if (n) *n = 0;
7593: if (ia) *ia = NULL;
7594: if (ja) *ja = NULL;
7595: }
7596: return(0);
7597: }
7599: /*@C
7600: MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7601: MatGetColumnIJ().
7603: Collective on Mat
7605: Input Parameters:
7606: + mat - the matrix
7607: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7608: - symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7609: symmetrized
7610: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7611: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7612: always used.
7614: Output Parameters:
7615: + n - size of (possibly compressed) matrix
7616: . ia - the column pointers
7617: . ja - the row indices
7618: - done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7620: Level: developer
7622: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7623: @*/
7624: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7625: {
7634: MatCheckPreallocated(mat,1);
7636: if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7637: else {
7638: *done = PETSC_TRUE;
7639: (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7640: if (n) *n = 0;
7641: if (ia) *ia = NULL;
7642: if (ja) *ja = NULL;
7643: }
7644: return(0);
7645: }
7647: /*@C
7648: MatColoringPatch -Used inside matrix coloring routines that
7649: use MatGetRowIJ() and/or MatGetColumnIJ().
7651: Collective on Mat
7653: Input Parameters:
7654: + mat - the matrix
7655: . ncolors - max color value
7656: . n - number of entries in colorarray
7657: - colorarray - array indicating color for each column
7659: Output Parameters:
7660: . iscoloring - coloring generated using colorarray information
7662: Level: developer
7664: .seealso: MatGetRowIJ(), MatGetColumnIJ()
7666: @*/
7667: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7668: {
7676: MatCheckPreallocated(mat,1);
7678: if (!mat->ops->coloringpatch) {
7679: ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7680: } else {
7681: (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7682: }
7683: return(0);
7684: }
7687: /*@
7688: MatSetUnfactored - Resets a factored matrix to be treated as unfactored.
7690: Logically Collective on Mat
7692: Input Parameter:
7693: . mat - the factored matrix to be reset
7695: Notes:
7696: This routine should be used only with factored matrices formed by in-place
7697: factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7698: format). This option can save memory, for example, when solving nonlinear
7699: systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7700: ILU(0) preconditioner.
7702: Note that one can specify in-place ILU(0) factorization by calling
7703: .vb
7704: PCType(pc,PCILU);
7705: PCFactorSeUseInPlace(pc);
7706: .ve
7707: or by using the options -pc_type ilu -pc_factor_in_place
7709: In-place factorization ILU(0) can also be used as a local
7710: solver for the blocks within the block Jacobi or additive Schwarz
7711: methods (runtime option: -sub_pc_factor_in_place). See Users-Manual: ch_pc
7712: for details on setting local solver options.
7714: Most users should employ the simplified KSP interface for linear solvers
7715: instead of working directly with matrix algebra routines such as this.
7716: See, e.g., KSPCreate().
7718: Level: developer
7720: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()
7722: Concepts: matrices^unfactored
7724: @*/
7725: PetscErrorCode MatSetUnfactored(Mat mat)
7726: {
7732: MatCheckPreallocated(mat,1);
7733: mat->factortype = MAT_FACTOR_NONE;
7734: if (!mat->ops->setunfactored) return(0);
7735: (*mat->ops->setunfactored)(mat);
7736: return(0);
7737: }
7739: /*MC
7740: MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.
7742: Synopsis:
7743: MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7745: Not collective
7747: Input Parameter:
7748: . x - matrix
7750: Output Parameters:
7751: + xx_v - the Fortran90 pointer to the array
7752: - ierr - error code
7754: Example of Usage:
7755: .vb
7756: PetscScalar, pointer xx_v(:,:)
7757: ....
7758: call MatDenseGetArrayF90(x,xx_v,ierr)
7759: a = xx_v(3)
7760: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7761: .ve
7763: Level: advanced
7765: .seealso: MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()
7767: Concepts: matrices^accessing array
7769: M*/
7771: /*MC
7772: MatDenseRestoreArrayF90 - Restores a matrix array that has been
7773: accessed with MatDenseGetArrayF90().
7775: Synopsis:
7776: MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7778: Not collective
7780: Input Parameters:
7781: + x - matrix
7782: - xx_v - the Fortran90 pointer to the array
7784: Output Parameter:
7785: . ierr - error code
7787: Example of Usage:
7788: .vb
7789: PetscScalar, pointer xx_v(:,:)
7790: ....
7791: call MatDenseGetArrayF90(x,xx_v,ierr)
7792: a = xx_v(3)
7793: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7794: .ve
7796: Level: advanced
7798: .seealso: MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()
7800: M*/
7803: /*MC
7804: MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.
7806: Synopsis:
7807: MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7809: Not collective
7811: Input Parameter:
7812: . x - matrix
7814: Output Parameters:
7815: + xx_v - the Fortran90 pointer to the array
7816: - ierr - error code
7818: Example of Usage:
7819: .vb
7820: PetscScalar, pointer xx_v(:)
7821: ....
7822: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7823: a = xx_v(3)
7824: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7825: .ve
7827: Level: advanced
7829: .seealso: MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()
7831: Concepts: matrices^accessing array
7833: M*/
7835: /*MC
7836: MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7837: accessed with MatSeqAIJGetArrayF90().
7839: Synopsis:
7840: MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7842: Not collective
7844: Input Parameters:
7845: + x - matrix
7846: - xx_v - the Fortran90 pointer to the array
7848: Output Parameter:
7849: . ierr - error code
7851: Example of Usage:
7852: .vb
7853: PetscScalar, pointer xx_v(:)
7854: ....
7855: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7856: a = xx_v(3)
7857: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7858: .ve
7860: Level: advanced
7862: .seealso: MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()
7864: M*/
7867: /*@
7868: MatCreateSubMatrix - Gets a single submatrix on the same number of processors
7869: as the original matrix.
7871: Collective on Mat
7873: Input Parameters:
7874: + mat - the original matrix
7875: . isrow - parallel IS containing the rows this processor should obtain
7876: . iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7877: - cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
7879: Output Parameter:
7880: . newmat - the new submatrix, of the same type as the old
7882: Level: advanced
7884: Notes:
7885: The submatrix will be able to be multiplied with vectors using the same layout as iscol.
7887: Some matrix types place restrictions on the row and column indices, such
7888: as that they be sorted or that they be equal to each other.
7890: The index sets may not have duplicate entries.
7892: The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7893: the MatCreateSubMatrix() routine will create the newmat for you. Any additional calls
7894: to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7895: will reuse the matrix generated the first time. You should call MatDestroy() on newmat when
7896: you are finished using it.
7898: The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7899: the input matrix.
7901: If iscol is NULL then all columns are obtained (not supported in Fortran).
7903: Example usage:
7904: Consider the following 8x8 matrix with 34 non-zero values, that is
7905: assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7906: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7907: as follows:
7909: .vb
7910: 1 2 0 | 0 3 0 | 0 4
7911: Proc0 0 5 6 | 7 0 0 | 8 0
7912: 9 0 10 | 11 0 0 | 12 0
7913: -------------------------------------
7914: 13 0 14 | 15 16 17 | 0 0
7915: Proc1 0 18 0 | 19 20 21 | 0 0
7916: 0 0 0 | 22 23 0 | 24 0
7917: -------------------------------------
7918: Proc2 25 26 27 | 0 0 28 | 29 0
7919: 30 0 0 | 31 32 33 | 0 34
7920: .ve
7922: Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6]. The resulting submatrix is
7924: .vb
7925: 2 0 | 0 3 0 | 0
7926: Proc0 5 6 | 7 0 0 | 8
7927: -------------------------------
7928: Proc1 18 0 | 19 20 21 | 0
7929: -------------------------------
7930: Proc2 26 27 | 0 0 28 | 29
7931: 0 0 | 31 32 33 | 0
7932: .ve
7935: Concepts: matrices^submatrices
7937: .seealso: MatCreateSubMatrices()
7938: @*/
7939: PetscErrorCode MatCreateSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7940: {
7942: PetscMPIInt size;
7943: Mat *local;
7944: IS iscoltmp;
7953: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7954: if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");
7956: MatCheckPreallocated(mat,1);
7957: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
7959: if (!iscol || isrow == iscol) {
7960: PetscBool stride;
7961: PetscMPIInt grabentirematrix = 0,grab;
7962: PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7963: if (stride) {
7964: PetscInt first,step,n,rstart,rend;
7965: ISStrideGetInfo(isrow,&first,&step);
7966: if (step == 1) {
7967: MatGetOwnershipRange(mat,&rstart,&rend);
7968: if (rstart == first) {
7969: ISGetLocalSize(isrow,&n);
7970: if (n == rend-rstart) {
7971: grabentirematrix = 1;
7972: }
7973: }
7974: }
7975: }
7976: MPIU_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7977: if (grab) {
7978: PetscInfo(mat,"Getting entire matrix as submatrix\n");
7979: if (cll == MAT_INITIAL_MATRIX) {
7980: *newmat = mat;
7981: PetscObjectReference((PetscObject)mat);
7982: }
7983: return(0);
7984: }
7985: }
7987: if (!iscol) {
7988: ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7989: } else {
7990: iscoltmp = iscol;
7991: }
7993: /* if original matrix is on just one processor then use submatrix generated */
7994: if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7995: MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7996: if (!iscol) {ISDestroy(&iscoltmp);}
7997: return(0);
7998: } else if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1) {
7999: MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
8000: *newmat = *local;
8001: PetscFree(local);
8002: if (!iscol) {ISDestroy(&iscoltmp);}
8003: return(0);
8004: } else if (!mat->ops->createsubmatrix) {
8005: /* Create a new matrix type that implements the operation using the full matrix */
8006: PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
8007: switch (cll) {
8008: case MAT_INITIAL_MATRIX:
8009: MatCreateSubMatrixVirtual(mat,isrow,iscoltmp,newmat);
8010: break;
8011: case MAT_REUSE_MATRIX:
8012: MatSubMatrixVirtualUpdate(*newmat,mat,isrow,iscoltmp);
8013: break;
8014: default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
8015: }
8016: PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
8017: if (!iscol) {ISDestroy(&iscoltmp);}
8018: return(0);
8019: }
8021: if (!mat->ops->createsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8022: PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
8023: (*mat->ops->createsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
8024: PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
8026: /* Propagate symmetry information for diagonal blocks */
8027: if (isrow == iscoltmp) {
8028: if (mat->symmetric_set && mat->symmetric) {
8029: MatSetOption(*newmat,MAT_SYMMETRIC,PETSC_TRUE);
8030: }
8031: if (mat->structurally_symmetric_set && mat->structurally_symmetric) {
8032: MatSetOption(*newmat,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
8033: }
8034: if (mat->hermitian_set && mat->hermitian) {
8035: MatSetOption(*newmat,MAT_HERMITIAN,PETSC_TRUE);
8036: }
8037: if (mat->spd_set && mat->spd) {
8038: MatSetOption(*newmat,MAT_SPD,PETSC_TRUE);
8039: }
8040: }
8042: if (!iscol) {ISDestroy(&iscoltmp);}
8043: if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
8044: return(0);
8045: }
8047: /*@
8048: MatStashSetInitialSize - sets the sizes of the matrix stash, that is
8049: used during the assembly process to store values that belong to
8050: other processors.
8052: Not Collective
8054: Input Parameters:
8055: + mat - the matrix
8056: . size - the initial size of the stash.
8057: - bsize - the initial size of the block-stash(if used).
8059: Options Database Keys:
8060: + -matstash_initial_size <size> or <size0,size1,...sizep-1>
8061: - -matstash_block_initial_size <bsize> or <bsize0,bsize1,...bsizep-1>
8063: Level: intermediate
8065: Notes:
8066: The block-stash is used for values set with MatSetValuesBlocked() while
8067: the stash is used for values set with MatSetValues()
8069: Run with the option -info and look for output of the form
8070: MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
8071: to determine the appropriate value, MM, to use for size and
8072: MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
8073: to determine the value, BMM to use for bsize
8075: Concepts: stash^setting matrix size
8076: Concepts: matrices^stash
8078: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()
8080: @*/
8081: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
8082: {
8088: MatStashSetInitialSize_Private(&mat->stash,size);
8089: MatStashSetInitialSize_Private(&mat->bstash,bsize);
8090: return(0);
8091: }
8093: /*@
8094: MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
8095: the matrix
8097: Neighbor-wise Collective on Mat
8099: Input Parameters:
8100: + mat - the matrix
8101: . x,y - the vectors
8102: - w - where the result is stored
8104: Level: intermediate
8106: Notes:
8107: w may be the same vector as y.
8109: This allows one to use either the restriction or interpolation (its transpose)
8110: matrix to do the interpolation
8112: Concepts: interpolation
8114: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
8116: @*/
8117: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
8118: {
8120: PetscInt M,N,Ny;
8128: MatCheckPreallocated(A,1);
8129: MatGetSize(A,&M,&N);
8130: VecGetSize(y,&Ny);
8131: if (M == Ny) {
8132: MatMultAdd(A,x,y,w);
8133: } else {
8134: MatMultTransposeAdd(A,x,y,w);
8135: }
8136: return(0);
8137: }
8139: /*@
8140: MatInterpolate - y = A*x or A'*x depending on the shape of
8141: the matrix
8143: Neighbor-wise Collective on Mat
8145: Input Parameters:
8146: + mat - the matrix
8147: - x,y - the vectors
8149: Level: intermediate
8151: Notes:
8152: This allows one to use either the restriction or interpolation (its transpose)
8153: matrix to do the interpolation
8155: Concepts: matrices^interpolation
8157: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
8159: @*/
8160: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
8161: {
8163: PetscInt M,N,Ny;
8170: MatCheckPreallocated(A,1);
8171: MatGetSize(A,&M,&N);
8172: VecGetSize(y,&Ny);
8173: if (M == Ny) {
8174: MatMult(A,x,y);
8175: } else {
8176: MatMultTranspose(A,x,y);
8177: }
8178: return(0);
8179: }
8181: /*@
8182: MatRestrict - y = A*x or A'*x
8184: Neighbor-wise Collective on Mat
8186: Input Parameters:
8187: + mat - the matrix
8188: - x,y - the vectors
8190: Level: intermediate
8192: Notes:
8193: This allows one to use either the restriction or interpolation (its transpose)
8194: matrix to do the restriction
8196: Concepts: matrices^restriction
8198: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()
8200: @*/
8201: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
8202: {
8204: PetscInt M,N,Ny;
8211: MatCheckPreallocated(A,1);
8213: MatGetSize(A,&M,&N);
8214: VecGetSize(y,&Ny);
8215: if (M == Ny) {
8216: MatMult(A,x,y);
8217: } else {
8218: MatMultTranspose(A,x,y);
8219: }
8220: return(0);
8221: }
8223: /*@
8224: MatGetNullSpace - retrieves the null space of a matrix.
8226: Logically Collective on Mat and MatNullSpace
8228: Input Parameters:
8229: + mat - the matrix
8230: - nullsp - the null space object
8232: Level: developer
8234: Concepts: null space^attaching to matrix
8236: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
8237: @*/
8238: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
8239: {
8243: *nullsp = (mat->symmetric_set && mat->symmetric && !mat->nullsp) ? mat->transnullsp : mat->nullsp;
8244: return(0);
8245: }
8247: /*@
8248: MatSetNullSpace - attaches a null space to a matrix.
8250: Logically Collective on Mat and MatNullSpace
8252: Input Parameters:
8253: + mat - the matrix
8254: - nullsp - the null space object
8256: Level: advanced
8258: Notes:
8259: This null space is used by the linear solvers. Overwrites any previous null space that may have been attached
8261: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
8262: call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.
8264: You can remove the null space by calling this routine with an nullsp of NULL
8267: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8268: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8269: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8270: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8271: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
8273: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
8275: If the matrix is known to be symmetric because it is an SBAIJ matrix or one as called MatSetOption(mat,MAT_SYMMETRIC or MAT_SYMMETRIC_ETERNAL,PETSC_TRUE); this
8276: routine also automatically calls MatSetTransposeNullSpace().
8278: Concepts: null space^attaching to matrix
8280: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8281: @*/
8282: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
8283: {
8289: if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8290: MatNullSpaceDestroy(&mat->nullsp);
8291: mat->nullsp = nullsp;
8292: if (mat->symmetric_set && mat->symmetric) {
8293: MatSetTransposeNullSpace(mat,nullsp);
8294: }
8295: return(0);
8296: }
8298: /*@
8299: MatGetTransposeNullSpace - retrieves the null space of the transpose of a matrix.
8301: Logically Collective on Mat and MatNullSpace
8303: Input Parameters:
8304: + mat - the matrix
8305: - nullsp - the null space object
8307: Level: developer
8309: Concepts: null space^attaching to matrix
8311: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetTransposeNullSpace(), MatSetNullSpace(), MatGetNullSpace()
8312: @*/
8313: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
8314: {
8319: *nullsp = (mat->symmetric_set && mat->symmetric && !mat->transnullsp) ? mat->nullsp : mat->transnullsp;
8320: return(0);
8321: }
8323: /*@
8324: MatSetTransposeNullSpace - attaches a null space to a matrix.
8326: Logically Collective on Mat and MatNullSpace
8328: Input Parameters:
8329: + mat - the matrix
8330: - nullsp - the null space object
8332: Level: advanced
8334: Notes:
8335: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
8336: You must also call MatSetNullSpace()
8339: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8340: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8341: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8342: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8343: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
8345: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
8347: Concepts: null space^attaching to matrix
8349: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8350: @*/
8351: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
8352: {
8358: if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8359: MatNullSpaceDestroy(&mat->transnullsp);
8360: mat->transnullsp = nullsp;
8361: return(0);
8362: }
8364: /*@
8365: MatSetNearNullSpace - attaches a null space to a matrix, which is often the null space (rigid body modes) of the operator without boundary conditions
8366: This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.
8368: Logically Collective on Mat and MatNullSpace
8370: Input Parameters:
8371: + mat - the matrix
8372: - nullsp - the null space object
8374: Level: advanced
8376: Notes:
8377: Overwrites any previous near null space that may have been attached
8379: You can remove the null space by calling this routine with an nullsp of NULL
8381: Concepts: null space^attaching to matrix
8383: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace(), MatNullSpaceCreateRigidBody(), MatGetNearNullSpace()
8384: @*/
8385: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8386: {
8393: MatCheckPreallocated(mat,1);
8394: if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8395: MatNullSpaceDestroy(&mat->nearnullsp);
8396: mat->nearnullsp = nullsp;
8397: return(0);
8398: }
8400: /*@
8401: MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()
8403: Not Collective
8405: Input Parameters:
8406: . mat - the matrix
8408: Output Parameters:
8409: . nullsp - the null space object, NULL if not set
8411: Level: developer
8413: Concepts: null space^attaching to matrix
8415: .seealso: MatSetNearNullSpace(), MatGetNullSpace(), MatNullSpaceCreate()
8416: @*/
8417: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8418: {
8423: MatCheckPreallocated(mat,1);
8424: *nullsp = mat->nearnullsp;
8425: return(0);
8426: }
8428: /*@C
8429: MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.
8431: Collective on Mat
8433: Input Parameters:
8434: + mat - the matrix
8435: . row - row/column permutation
8436: . fill - expected fill factor >= 1.0
8437: - level - level of fill, for ICC(k)
8439: Notes:
8440: Probably really in-place only when level of fill is zero, otherwise allocates
8441: new space to store factored matrix and deletes previous memory.
8443: Most users should employ the simplified KSP interface for linear solvers
8444: instead of working directly with matrix algebra routines such as this.
8445: See, e.g., KSPCreate().
8447: Level: developer
8449: Concepts: matrices^incomplete Cholesky factorization
8450: Concepts: Cholesky factorization
8452: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
8454: Developer Note: fortran interface is not autogenerated as the f90
8455: interface defintion cannot be generated correctly [due to MatFactorInfo]
8457: @*/
8458: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8459: {
8467: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8468: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8469: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8470: if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8471: MatCheckPreallocated(mat,1);
8472: (*mat->ops->iccfactor)(mat,row,info);
8473: PetscObjectStateIncrease((PetscObject)mat);
8474: return(0);
8475: }
8477: /*@
8478: MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8479: ghosted ones.
8481: Not Collective
8483: Input Parameters:
8484: + mat - the matrix
8485: - diag = the diagonal values, including ghost ones
8487: Level: developer
8489: Notes:
8490: Works only for MPIAIJ and MPIBAIJ matrices
8492: .seealso: MatDiagonalScale()
8493: @*/
8494: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8495: {
8497: PetscMPIInt size;
8504: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8505: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8506: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8507: if (size == 1) {
8508: PetscInt n,m;
8509: VecGetSize(diag,&n);
8510: MatGetSize(mat,0,&m);
8511: if (m == n) {
8512: MatDiagonalScale(mat,0,diag);
8513: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8514: } else {
8515: PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8516: }
8517: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8518: PetscObjectStateIncrease((PetscObject)mat);
8519: return(0);
8520: }
8522: /*@
8523: MatGetInertia - Gets the inertia from a factored matrix
8525: Collective on Mat
8527: Input Parameter:
8528: . mat - the matrix
8530: Output Parameters:
8531: + nneg - number of negative eigenvalues
8532: . nzero - number of zero eigenvalues
8533: - npos - number of positive eigenvalues
8535: Level: advanced
8537: Notes:
8538: Matrix must have been factored by MatCholeskyFactor()
8541: @*/
8542: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8543: {
8549: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8550: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8551: if (!mat->ops->getinertia) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8552: (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8553: return(0);
8554: }
8556: /* ----------------------------------------------------------------*/
8557: /*@C
8558: MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors
8560: Neighbor-wise Collective on Mat and Vecs
8562: Input Parameters:
8563: + mat - the factored matrix
8564: - b - the right-hand-side vectors
8566: Output Parameter:
8567: . x - the result vectors
8569: Notes:
8570: The vectors b and x cannot be the same. I.e., one cannot
8571: call MatSolves(A,x,x).
8573: Notes:
8574: Most users should employ the simplified KSP interface for linear solvers
8575: instead of working directly with matrix algebra routines such as this.
8576: See, e.g., KSPCreate().
8578: Level: developer
8580: Concepts: matrices^triangular solves
8582: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8583: @*/
8584: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8585: {
8591: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8592: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8593: if (!mat->rmap->N && !mat->cmap->N) return(0);
8595: if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8596: MatCheckPreallocated(mat,1);
8597: PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8598: (*mat->ops->solves)(mat,b,x);
8599: PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8600: return(0);
8601: }
8603: /*@
8604: MatIsSymmetric - Test whether a matrix is symmetric
8606: Collective on Mat
8608: Input Parameter:
8609: + A - the matrix to test
8610: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)
8612: Output Parameters:
8613: . flg - the result
8615: Notes:
8616: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results
8618: Level: intermediate
8620: Concepts: matrix^symmetry
8622: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8623: @*/
8624: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool *flg)
8625: {
8632: if (!A->symmetric_set) {
8633: if (!A->ops->issymmetric) {
8634: MatType mattype;
8635: MatGetType(A,&mattype);
8636: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8637: }
8638: (*A->ops->issymmetric)(A,tol,flg);
8639: if (!tol) {
8640: A->symmetric_set = PETSC_TRUE;
8641: A->symmetric = *flg;
8642: if (A->symmetric) {
8643: A->structurally_symmetric_set = PETSC_TRUE;
8644: A->structurally_symmetric = PETSC_TRUE;
8645: }
8646: }
8647: } else if (A->symmetric) {
8648: *flg = PETSC_TRUE;
8649: } else if (!tol) {
8650: *flg = PETSC_FALSE;
8651: } else {
8652: if (!A->ops->issymmetric) {
8653: MatType mattype;
8654: MatGetType(A,&mattype);
8655: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8656: }
8657: (*A->ops->issymmetric)(A,tol,flg);
8658: }
8659: return(0);
8660: }
8662: /*@
8663: MatIsHermitian - Test whether a matrix is Hermitian
8665: Collective on Mat
8667: Input Parameter:
8668: + A - the matrix to test
8669: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)
8671: Output Parameters:
8672: . flg - the result
8674: Level: intermediate
8676: Concepts: matrix^symmetry
8678: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8679: MatIsSymmetricKnown(), MatIsSymmetric()
8680: @*/
8681: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool *flg)
8682: {
8689: if (!A->hermitian_set) {
8690: if (!A->ops->ishermitian) {
8691: MatType mattype;
8692: MatGetType(A,&mattype);
8693: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8694: }
8695: (*A->ops->ishermitian)(A,tol,flg);
8696: if (!tol) {
8697: A->hermitian_set = PETSC_TRUE;
8698: A->hermitian = *flg;
8699: if (A->hermitian) {
8700: A->structurally_symmetric_set = PETSC_TRUE;
8701: A->structurally_symmetric = PETSC_TRUE;
8702: }
8703: }
8704: } else if (A->hermitian) {
8705: *flg = PETSC_TRUE;
8706: } else if (!tol) {
8707: *flg = PETSC_FALSE;
8708: } else {
8709: if (!A->ops->ishermitian) {
8710: MatType mattype;
8711: MatGetType(A,&mattype);
8712: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8713: }
8714: (*A->ops->ishermitian)(A,tol,flg);
8715: }
8716: return(0);
8717: }
8719: /*@
8720: MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.
8722: Not Collective
8724: Input Parameter:
8725: . A - the matrix to check
8727: Output Parameters:
8728: + set - if the symmetric flag is set (this tells you if the next flag is valid)
8729: - flg - the result
8731: Level: advanced
8733: Concepts: matrix^symmetry
8735: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8736: if you want it explicitly checked
8738: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8739: @*/
8740: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool *set,PetscBool *flg)
8741: {
8746: if (A->symmetric_set) {
8747: *set = PETSC_TRUE;
8748: *flg = A->symmetric;
8749: } else {
8750: *set = PETSC_FALSE;
8751: }
8752: return(0);
8753: }
8755: /*@
8756: MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.
8758: Not Collective
8760: Input Parameter:
8761: . A - the matrix to check
8763: Output Parameters:
8764: + set - if the hermitian flag is set (this tells you if the next flag is valid)
8765: - flg - the result
8767: Level: advanced
8769: Concepts: matrix^symmetry
8771: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8772: if you want it explicitly checked
8774: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8775: @*/
8776: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool *set,PetscBool *flg)
8777: {
8782: if (A->hermitian_set) {
8783: *set = PETSC_TRUE;
8784: *flg = A->hermitian;
8785: } else {
8786: *set = PETSC_FALSE;
8787: }
8788: return(0);
8789: }
8791: /*@
8792: MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric
8794: Collective on Mat
8796: Input Parameter:
8797: . A - the matrix to test
8799: Output Parameters:
8800: . flg - the result
8802: Level: intermediate
8804: Concepts: matrix^symmetry
8806: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8807: @*/
8808: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool *flg)
8809: {
8815: if (!A->structurally_symmetric_set) {
8816: if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8817: (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);
8819: A->structurally_symmetric_set = PETSC_TRUE;
8820: }
8821: *flg = A->structurally_symmetric;
8822: return(0);
8823: }
8825: /*@
8826: MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8827: to be communicated to other processors during the MatAssemblyBegin/End() process
8829: Not collective
8831: Input Parameter:
8832: . vec - the vector
8834: Output Parameters:
8835: + nstash - the size of the stash
8836: . reallocs - the number of additional mallocs incurred.
8837: . bnstash - the size of the block stash
8838: - breallocs - the number of additional mallocs incurred.in the block stash
8840: Level: advanced
8842: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()
8844: @*/
8845: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8846: {
8850: MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8851: MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8852: return(0);
8853: }
8855: /*@C
8856: MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8857: parallel layout
8859: Collective on Mat
8861: Input Parameter:
8862: . mat - the matrix
8864: Output Parameter:
8865: + right - (optional) vector that the matrix can be multiplied against
8866: - left - (optional) vector that the matrix vector product can be stored in
8868: Notes:
8869: The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().
8871: Notes:
8872: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed
8874: Level: advanced
8876: .seealso: MatCreate(), VecDestroy()
8877: @*/
8878: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8879: {
8885: if (mat->ops->getvecs) {
8886: (*mat->ops->getvecs)(mat,right,left);
8887: } else {
8888: PetscInt rbs,cbs;
8889: MatGetBlockSizes(mat,&rbs,&cbs);
8890: if (right) {
8891: if (mat->cmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for columns not yet setup");
8892: VecCreate(PetscObjectComm((PetscObject)mat),right);
8893: VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8894: VecSetBlockSize(*right,cbs);
8895: VecSetType(*right,mat->defaultvectype);
8896: PetscLayoutReference(mat->cmap,&(*right)->map);
8897: }
8898: if (left) {
8899: if (mat->rmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for rows not yet setup");
8900: VecCreate(PetscObjectComm((PetscObject)mat),left);
8901: VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8902: VecSetBlockSize(*left,rbs);
8903: VecSetType(*left,mat->defaultvectype);
8904: PetscLayoutReference(mat->rmap,&(*left)->map);
8905: }
8906: }
8907: return(0);
8908: }
8910: /*@C
8911: MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8912: with default values.
8914: Not Collective
8916: Input Parameters:
8917: . info - the MatFactorInfo data structure
8920: Notes:
8921: The solvers are generally used through the KSP and PC objects, for example
8922: PCLU, PCILU, PCCHOLESKY, PCICC
8924: Level: developer
8926: .seealso: MatFactorInfo
8928: Developer Note: fortran interface is not autogenerated as the f90
8929: interface defintion cannot be generated correctly [due to MatFactorInfo]
8931: @*/
8933: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8934: {
8938: PetscMemzero(info,sizeof(MatFactorInfo));
8939: return(0);
8940: }
8942: /*@
8943: MatFactorSetSchurIS - Set indices corresponding to the Schur complement you wish to have computed
8945: Collective on Mat
8947: Input Parameters:
8948: + mat - the factored matrix
8949: - is - the index set defining the Schur indices (0-based)
8951: Notes:
8952: Call MatFactorSolveSchurComplement() or MatFactorSolveSchurComplementTranspose() after this call to solve a Schur complement system.
8954: You can call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() after this call.
8956: Level: developer
8958: Concepts:
8960: .seealso: MatGetFactor(), MatFactorGetSchurComplement(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSolveSchurComplement(),
8961: MatFactorSolveSchurComplementTranspose(), MatFactorSolveSchurComplement()
8963: @*/
8964: PetscErrorCode MatFactorSetSchurIS(Mat mat,IS is)
8965: {
8966: PetscErrorCode ierr,(*f)(Mat,IS);
8974: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
8975: PetscObjectQueryFunction((PetscObject)mat,"MatFactorSetSchurIS_C",&f);
8976: if (!f) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"The selected MatSolverType does not support Schur complement computation. You should use MATSOLVERMUMPS or MATSOLVERMKL_PARDISO");
8977: if (mat->schur) {
8978: MatDestroy(&mat->schur);
8979: }
8980: (*f)(mat,is);
8981: if (!mat->schur) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_PLIB,"Schur complement has not been created");
8982: MatFactorSetUpInPlaceSchur_Private(mat);
8983: return(0);
8984: }
8986: /*@
8987: MatFactorCreateSchurComplement - Create a Schur complement matrix object using Schur data computed during the factorization step
8989: Logically Collective on Mat
8991: Input Parameters:
8992: + F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
8993: . S - location where to return the Schur complement, can be NULL
8994: - status - the status of the Schur complement matrix, can be NULL
8996: Notes:
8997: You must call MatFactorSetSchurIS() before calling this routine.
8999: The routine provides a copy of the Schur matrix stored within the solver data structures.
9000: The caller must destroy the object when it is no longer needed.
9001: If MatFactorInvertSchurComplement() has been called, the routine gets back the inverse.
9003: Use MatFactorGetSchurComplement() to get access to the Schur complement matrix inside the factored matrix instead of making a copy of it (which this function does)
9005: Developer Notes:
9006: The reason this routine exists is because the representation of the Schur complement within the factor matrix may be different than a standard PETSc
9007: matrix representation and we normally do not want to use the time or memory to make a copy as a regular PETSc matrix.
9009: See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.
9011: Level: advanced
9013: References:
9015: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorSchurStatus
9016: @*/
9017: PetscErrorCode MatFactorCreateSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
9018: {
9025: if (S) {
9026: PetscErrorCode (*f)(Mat,Mat*);
9028: PetscObjectQueryFunction((PetscObject)F,"MatFactorCreateSchurComplement_C",&f);
9029: if (f) {
9030: (*f)(F,S);
9031: } else {
9032: MatDuplicate(F->schur,MAT_COPY_VALUES,S);
9033: }
9034: }
9035: if (status) *status = F->schur_status;
9036: return(0);
9037: }
9039: /*@
9040: MatFactorGetSchurComplement - Gets access to a Schur complement matrix using the current Schur data within a factored matrix
9042: Logically Collective on Mat
9044: Input Parameters:
9045: + F - the factored matrix obtained by calling MatGetFactor()
9046: . *S - location where to return the Schur complement, can be NULL
9047: - status - the status of the Schur complement matrix, can be NULL
9049: Notes:
9050: You must call MatFactorSetSchurIS() before calling this routine.
9052: Schur complement mode is currently implemented for sequential matrices.
9053: The routine returns a the Schur Complement stored within the data strutures of the solver.
9054: If MatFactorInvertSchurComplement() has previously been called, the returned matrix is actually the inverse of the Schur complement.
9055: The returned matrix should not be destroyed; the caller should call MatFactorRestoreSchurComplement() when the object is no longer needed.
9057: Use MatFactorCreateSchurComplement() to create a copy of the Schur complement matrix that is within a factored matrix
9059: See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.
9061: Level: advanced
9063: References:
9065: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
9066: @*/
9067: PetscErrorCode MatFactorGetSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
9068: {
9073: if (S) *S = F->schur;
9074: if (status) *status = F->schur_status;
9075: return(0);
9076: }
9078: /*@
9079: MatFactorRestoreSchurComplement - Restore the Schur complement matrix object obtained from a call to MatFactorGetSchurComplement
9081: Logically Collective on Mat
9083: Input Parameters:
9084: + F - the factored matrix obtained by calling MatGetFactor()
9085: . *S - location where the Schur complement is stored
9086: - status - the status of the Schur complement matrix (see MatFactorSchurStatus)
9088: Notes:
9090: Level: advanced
9092: References:
9094: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
9095: @*/
9096: PetscErrorCode MatFactorRestoreSchurComplement(Mat F,Mat* S,MatFactorSchurStatus status)
9097: {
9102: if (S) {
9104: *S = NULL;
9105: }
9106: F->schur_status = status;
9107: MatFactorUpdateSchurStatus_Private(F);
9108: return(0);
9109: }
9111: /*@
9112: MatFactorSolveSchurComplementTranspose - Solve the transpose of the Schur complement system computed during the factorization step
9114: Logically Collective on Mat
9116: Input Parameters:
9117: + F - the factored matrix obtained by calling MatGetFactor()
9118: . rhs - location where the right hand side of the Schur complement system is stored
9119: - sol - location where the solution of the Schur complement system has to be returned
9121: Notes:
9122: The sizes of the vectors should match the size of the Schur complement
9124: Must be called after MatFactorSetSchurIS()
9126: Level: advanced
9128: References:
9130: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplement()
9131: @*/
9132: PetscErrorCode MatFactorSolveSchurComplementTranspose(Mat F, Vec rhs, Vec sol)
9133: {
9145: MatFactorFactorizeSchurComplement(F);
9146: switch (F->schur_status) {
9147: case MAT_FACTOR_SCHUR_FACTORED:
9148: MatSolveTranspose(F->schur,rhs,sol);
9149: break;
9150: case MAT_FACTOR_SCHUR_INVERTED:
9151: MatMultTranspose(F->schur,rhs,sol);
9152: break;
9153: default:
9154: SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9155: break;
9156: }
9157: return(0);
9158: }
9160: /*@
9161: MatFactorSolveSchurComplement - Solve the Schur complement system computed during the factorization step
9163: Logically Collective on Mat
9165: Input Parameters:
9166: + F - the factored matrix obtained by calling MatGetFactor()
9167: . rhs - location where the right hand side of the Schur complement system is stored
9168: - sol - location where the solution of the Schur complement system has to be returned
9170: Notes:
9171: The sizes of the vectors should match the size of the Schur complement
9173: Must be called after MatFactorSetSchurIS()
9175: Level: advanced
9177: References:
9179: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplementTranspose()
9180: @*/
9181: PetscErrorCode MatFactorSolveSchurComplement(Mat F, Vec rhs, Vec sol)
9182: {
9194: MatFactorFactorizeSchurComplement(F);
9195: switch (F->schur_status) {
9196: case MAT_FACTOR_SCHUR_FACTORED:
9197: MatSolve(F->schur,rhs,sol);
9198: break;
9199: case MAT_FACTOR_SCHUR_INVERTED:
9200: MatMult(F->schur,rhs,sol);
9201: break;
9202: default:
9203: SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9204: break;
9205: }
9206: return(0);
9207: }
9209: /*@
9210: MatFactorInvertSchurComplement - Invert the Schur complement matrix computed during the factorization step
9212: Logically Collective on Mat
9214: Input Parameters:
9215: + F - the factored matrix obtained by calling MatGetFactor()
9217: Notes:
9218: Must be called after MatFactorSetSchurIS().
9220: Call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() AFTER this call to actually compute the inverse and get access to it.
9222: Level: advanced
9224: References:
9226: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorCreateSchurComplement()
9227: @*/
9228: PetscErrorCode MatFactorInvertSchurComplement(Mat F)
9229: {
9235: if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED) return(0);
9236: MatFactorFactorizeSchurComplement(F);
9237: MatFactorInvertSchurComplement_Private(F);
9238: F->schur_status = MAT_FACTOR_SCHUR_INVERTED;
9239: return(0);
9240: }
9242: /*@
9243: MatFactorFactorizeSchurComplement - Factorize the Schur complement matrix computed during the factorization step
9245: Logically Collective on Mat
9247: Input Parameters:
9248: + F - the factored matrix obtained by calling MatGetFactor()
9250: Notes:
9251: Must be called after MatFactorSetSchurIS().
9253: Level: advanced
9255: References:
9257: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorInvertSchurComplement()
9258: @*/
9259: PetscErrorCode MatFactorFactorizeSchurComplement(Mat F)
9260: {
9266: if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED || F->schur_status == MAT_FACTOR_SCHUR_FACTORED) return(0);
9267: MatFactorFactorizeSchurComplement_Private(F);
9268: F->schur_status = MAT_FACTOR_SCHUR_FACTORED;
9269: return(0);
9270: }
9272: /*@
9273: MatPtAP - Creates the matrix product C = P^T * A * P
9275: Neighbor-wise Collective on Mat
9277: Input Parameters:
9278: + A - the matrix
9279: . P - the projection matrix
9280: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9281: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)), use PETSC_DEFAULT if you do not have a good estimate
9282: if the result is a dense matrix this is irrelevent
9284: Output Parameters:
9285: . C - the product matrix
9287: Notes:
9288: C will be created and must be destroyed by the user with MatDestroy().
9290: This routine is currently only implemented for pairs of sequential dense matrices, AIJ matrices and classes
9291: which inherit from AIJ.
9293: Level: intermediate
9295: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
9296: @*/
9297: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
9298: {
9300: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9301: PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
9302: PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9303: PetscBool sametype;
9308: MatCheckPreallocated(A,1);
9309: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9310: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9311: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9314: MatCheckPreallocated(P,2);
9315: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9316: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9318: if (A->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix A must be square, %D != %D",A->rmap->N,A->cmap->N);
9319: if (P->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9320: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9321: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9323: if (scall == MAT_REUSE_MATRIX) {
9327: if (!(*C)->ops->ptapnumeric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"MatPtAPNumeric implementation is missing. You cannot use MAT_REUSE_MATRIX");
9328: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9329: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9330: (*(*C)->ops->ptapnumeric)(A,P,*C);
9331: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9332: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9333: return(0);
9334: }
9336: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9337: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9339: fA = A->ops->ptap;
9340: fP = P->ops->ptap;
9341: PetscStrcmp(((PetscObject)A)->type_name,((PetscObject)P)->type_name,&sametype);
9342: if (fP == fA && sametype) {
9343: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
9344: ptap = fA;
9345: } else {
9346: /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
9347: char ptapname[256];
9348: PetscStrncpy(ptapname,"MatPtAP_",sizeof(ptapname));
9349: PetscStrlcat(ptapname,((PetscObject)A)->type_name,sizeof(ptapname));
9350: PetscStrlcat(ptapname,"_",sizeof(ptapname));
9351: PetscStrlcat(ptapname,((PetscObject)P)->type_name,sizeof(ptapname));
9352: PetscStrlcat(ptapname,"_C",sizeof(ptapname)); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
9353: PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
9354: if (!ptap) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s (Misses composed function %s)",((PetscObject)A)->type_name,((PetscObject)P)->type_name,ptapname);
9355: }
9357: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9358: (*ptap)(A,P,scall,fill,C);
9359: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9360: if (A->symmetric_set && A->symmetric) {
9361: MatSetOption(*C,MAT_SYMMETRIC,PETSC_TRUE);
9362: }
9363: return(0);
9364: }
9366: /*@
9367: MatPtAPNumeric - Computes the matrix product C = P^T * A * P
9369: Neighbor-wise Collective on Mat
9371: Input Parameters:
9372: + A - the matrix
9373: - P - the projection matrix
9375: Output Parameters:
9376: . C - the product matrix
9378: Notes:
9379: C must have been created by calling MatPtAPSymbolic and must be destroyed by
9380: the user using MatDeatroy().
9382: This routine is currently only implemented for pairs of AIJ matrices and classes
9383: which inherit from AIJ. C will be of type MATAIJ.
9385: Level: intermediate
9387: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
9388: @*/
9389: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
9390: {
9396: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9397: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9400: MatCheckPreallocated(P,2);
9401: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9402: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9405: MatCheckPreallocated(C,3);
9406: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9407: if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
9408: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9409: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9410: if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
9411: MatCheckPreallocated(A,1);
9413: if (!C->ops->ptapnumeric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"MatPtAPNumeric implementation is missing. You should call MatPtAPSymbolic first");
9414: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9415: (*C->ops->ptapnumeric)(A,P,C);
9416: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9417: return(0);
9418: }
9420: /*@
9421: MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P
9423: Neighbor-wise Collective on Mat
9425: Input Parameters:
9426: + A - the matrix
9427: - P - the projection matrix
9429: Output Parameters:
9430: . C - the (i,j) structure of the product matrix
9432: Notes:
9433: C will be created and must be destroyed by the user with MatDestroy().
9435: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9436: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
9437: this (i,j) structure by calling MatPtAPNumeric().
9439: Level: intermediate
9441: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
9442: @*/
9443: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
9444: {
9450: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9451: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9452: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9455: MatCheckPreallocated(P,2);
9456: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9457: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9460: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9461: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9462: MatCheckPreallocated(A,1);
9464: if (!A->ops->ptapsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatType %s",((PetscObject)A)->type_name);
9465: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
9466: (*A->ops->ptapsymbolic)(A,P,fill,C);
9467: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
9469: /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
9470: return(0);
9471: }
9473: /*@
9474: MatRARt - Creates the matrix product C = R * A * R^T
9476: Neighbor-wise Collective on Mat
9478: Input Parameters:
9479: + A - the matrix
9480: . R - the projection matrix
9481: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9482: - fill - expected fill as ratio of nnz(C)/nnz(A), use PETSC_DEFAULT if you do not have a good estimate
9483: if the result is a dense matrix this is irrelevent
9485: Output Parameters:
9486: . C - the product matrix
9488: Notes:
9489: C will be created and must be destroyed by the user with MatDestroy().
9491: This routine is currently only implemented for pairs of AIJ matrices and classes
9492: which inherit from AIJ. Due to PETSc sparse matrix block row distribution among processes,
9493: parallel MatRARt is implemented via explicit transpose of R, which could be very expensive.
9494: We recommend using MatPtAP().
9496: Level: intermediate
9498: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
9499: @*/
9500: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
9501: {
9507: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9508: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9509: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9512: MatCheckPreallocated(R,2);
9513: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9514: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9516: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)R),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9518: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9519: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9520: MatCheckPreallocated(A,1);
9522: if (!A->ops->rart) {
9523: Mat Rt;
9524: MatTranspose(R,MAT_INITIAL_MATRIX,&Rt);
9525: MatMatMatMult(R,A,Rt,scall,fill,C);
9526: MatDestroy(&Rt);
9527: return(0);
9528: }
9529: PetscLogEventBegin(MAT_RARt,A,R,0,0);
9530: (*A->ops->rart)(A,R,scall,fill,C);
9531: PetscLogEventEnd(MAT_RARt,A,R,0,0);
9532: return(0);
9533: }
9535: /*@
9536: MatRARtNumeric - Computes the matrix product C = R * A * R^T
9538: Neighbor-wise Collective on Mat
9540: Input Parameters:
9541: + A - the matrix
9542: - R - the projection matrix
9544: Output Parameters:
9545: . C - the product matrix
9547: Notes:
9548: C must have been created by calling MatRARtSymbolic and must be destroyed by
9549: the user using MatDestroy().
9551: This routine is currently only implemented for pairs of AIJ matrices and classes
9552: which inherit from AIJ. C will be of type MATAIJ.
9554: Level: intermediate
9556: .seealso: MatRARt(), MatRARtSymbolic(), MatMatMultNumeric()
9557: @*/
9558: PetscErrorCode MatRARtNumeric(Mat A,Mat R,Mat C)
9559: {
9565: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9566: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9569: MatCheckPreallocated(R,2);
9570: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9571: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9574: MatCheckPreallocated(C,3);
9575: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9576: if (R->rmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->rmap->N);
9577: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9578: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9579: if (R->rmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->cmap->N);
9580: MatCheckPreallocated(A,1);
9582: PetscLogEventBegin(MAT_RARtNumeric,A,R,0,0);
9583: (*A->ops->rartnumeric)(A,R,C);
9584: PetscLogEventEnd(MAT_RARtNumeric,A,R,0,0);
9585: return(0);
9586: }
9588: /*@
9589: MatRARtSymbolic - Creates the (i,j) structure of the matrix product C = R * A * R^T
9591: Neighbor-wise Collective on Mat
9593: Input Parameters:
9594: + A - the matrix
9595: - R - the projection matrix
9597: Output Parameters:
9598: . C - the (i,j) structure of the product matrix
9600: Notes:
9601: C will be created and must be destroyed by the user with MatDestroy().
9603: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9604: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
9605: this (i,j) structure by calling MatRARtNumeric().
9607: Level: intermediate
9609: .seealso: MatRARt(), MatRARtNumeric(), MatMatMultSymbolic()
9610: @*/
9611: PetscErrorCode MatRARtSymbolic(Mat A,Mat R,PetscReal fill,Mat *C)
9612: {
9618: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9619: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9620: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9623: MatCheckPreallocated(R,2);
9624: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9625: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9628: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9629: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9630: MatCheckPreallocated(A,1);
9631: PetscLogEventBegin(MAT_RARtSymbolic,A,R,0,0);
9632: (*A->ops->rartsymbolic)(A,R,fill,C);
9633: PetscLogEventEnd(MAT_RARtSymbolic,A,R,0,0);
9635: MatSetBlockSizes(*C,PetscAbs(R->rmap->bs),PetscAbs(R->rmap->bs));
9636: return(0);
9637: }
9639: /*@
9640: MatMatMult - Performs Matrix-Matrix Multiplication C=A*B.
9642: Neighbor-wise Collective on Mat
9644: Input Parameters:
9645: + A - the left matrix
9646: . B - the right matrix
9647: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9648: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate
9649: if the result is a dense matrix this is irrelevent
9651: Output Parameters:
9652: . C - the product matrix
9654: Notes:
9655: Unless scall is MAT_REUSE_MATRIX C will be created.
9657: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call and C was obtained from a previous
9658: call to this function with either MAT_INITIAL_MATRIX or MatMatMultSymbolic()
9660: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9661: actually needed.
9663: If you have many matrices with the same non-zero structure to multiply, you
9664: should either
9665: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
9666: $ 2) call MatMatMultSymbolic() once and then MatMatMultNumeric() for each product needed
9667: In the special case where matrix B (and hence C) are dense you can create the correctly sized matrix C yourself and then call this routine
9668: with MAT_REUSE_MATRIX, rather than first having MatMatMult() create it for you. You can NEVER do this if the matrix C is sparse.
9670: Level: intermediate
9672: .seealso: MatMatMultSymbolic(), MatMatMultNumeric(), MatTransposeMatMult(), MatMatTransposeMult(), MatPtAP()
9673: @*/
9674: PetscErrorCode MatMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9675: {
9677: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9678: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9679: PetscErrorCode (*mult)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9684: MatCheckPreallocated(A,1);
9685: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9686: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9689: MatCheckPreallocated(B,2);
9690: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9691: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9693: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9694: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9695: if (scall == MAT_REUSE_MATRIX) {
9698: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9699: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
9700: (*(*C)->ops->matmultnumeric)(A,B,*C);
9701: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
9702: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9703: return(0);
9704: }
9705: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9706: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9708: fA = A->ops->matmult;
9709: fB = B->ops->matmult;
9710: if (fB == fA) {
9711: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMult not supported for B of type %s",((PetscObject)B)->type_name);
9712: mult = fB;
9713: } else {
9714: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9715: char multname[256];
9716: PetscStrncpy(multname,"MatMatMult_",sizeof(multname));
9717: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
9718: PetscStrlcat(multname,"_",sizeof(multname));
9719: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
9720: PetscStrlcat(multname,"_C",sizeof(multname)); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9721: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9722: if (!mult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9723: }
9724: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9725: (*mult)(A,B,scall,fill,C);
9726: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9727: return(0);
9728: }
9730: /*@
9731: MatMatMultSymbolic - Performs construction, preallocation, and computes the ij structure
9732: of the matrix-matrix product C=A*B. Call this routine before calling MatMatMultNumeric().
9734: Neighbor-wise Collective on Mat
9736: Input Parameters:
9737: + A - the left matrix
9738: . B - the right matrix
9739: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate,
9740: if C is a dense matrix this is irrelevent
9742: Output Parameters:
9743: . C - the product matrix
9745: Notes:
9746: Unless scall is MAT_REUSE_MATRIX C will be created.
9748: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9749: actually needed.
9751: This routine is currently implemented for
9752: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type AIJ
9753: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9754: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9756: Level: intermediate
9758: Developers Note: There are ways to estimate the number of nonzeros in the resulting product, see for example, http://arxiv.org/abs/1006.4173
9759: We should incorporate them into PETSc.
9761: .seealso: MatMatMult(), MatMatMultNumeric()
9762: @*/
9763: PetscErrorCode MatMatMultSymbolic(Mat A,Mat B,PetscReal fill,Mat *C)
9764: {
9766: PetscErrorCode (*Asymbolic)(Mat,Mat,PetscReal,Mat*);
9767: PetscErrorCode (*Bsymbolic)(Mat,Mat,PetscReal,Mat*);
9768: PetscErrorCode (*symbolic)(Mat,Mat,PetscReal,Mat*)=NULL;
9773: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9774: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9778: MatCheckPreallocated(B,2);
9779: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9780: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9783: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9784: if (fill == PETSC_DEFAULT) fill = 2.0;
9785: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9786: MatCheckPreallocated(A,1);
9788: Asymbolic = A->ops->matmultsymbolic;
9789: Bsymbolic = B->ops->matmultsymbolic;
9790: if (Asymbolic == Bsymbolic) {
9791: if (!Bsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"C=A*B not implemented for B of type %s",((PetscObject)B)->type_name);
9792: symbolic = Bsymbolic;
9793: } else { /* dispatch based on the type of A and B */
9794: char symbolicname[256];
9795: PetscStrncpy(symbolicname,"MatMatMultSymbolic_",sizeof(symbolicname));
9796: PetscStrlcat(symbolicname,((PetscObject)A)->type_name,sizeof(symbolicname));
9797: PetscStrlcat(symbolicname,"_",sizeof(symbolicname));
9798: PetscStrlcat(symbolicname,((PetscObject)B)->type_name,sizeof(symbolicname));
9799: PetscStrlcat(symbolicname,"_C",sizeof(symbolicname));
9800: PetscObjectQueryFunction((PetscObject)B,symbolicname,&symbolic);
9801: if (!symbolic) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMultSymbolic requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9802: }
9803: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
9804: (*symbolic)(A,B,fill,C);
9805: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
9806: return(0);
9807: }
9809: /*@
9810: MatMatMultNumeric - Performs the numeric matrix-matrix product.
9811: Call this routine after first calling MatMatMultSymbolic().
9813: Neighbor-wise Collective on Mat
9815: Input Parameters:
9816: + A - the left matrix
9817: - B - the right matrix
9819: Output Parameters:
9820: . C - the product matrix, which was created by from MatMatMultSymbolic() or a call to MatMatMult().
9822: Notes:
9823: C must have been created with MatMatMultSymbolic().
9825: This routine is currently implemented for
9826: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type MATAIJ.
9827: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9828: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9830: Level: intermediate
9832: .seealso: MatMatMult(), MatMatMultSymbolic()
9833: @*/
9834: PetscErrorCode MatMatMultNumeric(Mat A,Mat B,Mat C)
9835: {
9839: MatMatMult(A,B,MAT_REUSE_MATRIX,0.0,&C);
9840: return(0);
9841: }
9843: /*@
9844: MatMatTransposeMult - Performs Matrix-Matrix Multiplication C=A*B^T.
9846: Neighbor-wise Collective on Mat
9848: Input Parameters:
9849: + A - the left matrix
9850: . B - the right matrix
9851: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9852: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9854: Output Parameters:
9855: . C - the product matrix
9857: Notes:
9858: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9860: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9862: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9863: actually needed.
9865: This routine is currently only implemented for pairs of SeqAIJ matrices and for the SeqDense class.
9867: Level: intermediate
9869: .seealso: MatMatTransposeMultSymbolic(), MatMatTransposeMultNumeric(), MatMatMult(), MatTransposeMatMult() MatPtAP()
9870: @*/
9871: PetscErrorCode MatMatTransposeMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9872: {
9874: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9875: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9880: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9881: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9882: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9885: MatCheckPreallocated(B,2);
9886: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9887: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9889: if (B->cmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, AN %D != BN %D",A->cmap->N,B->cmap->N);
9890: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9891: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9892: MatCheckPreallocated(A,1);
9894: fA = A->ops->mattransposemult;
9895: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for A of type %s",((PetscObject)A)->type_name);
9896: fB = B->ops->mattransposemult;
9897: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for B of type %s",((PetscObject)B)->type_name);
9898: if (fB!=fA) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatTransposeMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9900: PetscLogEventBegin(MAT_MatTransposeMult,A,B,0,0);
9901: if (scall == MAT_INITIAL_MATRIX) {
9902: PetscLogEventBegin(MAT_MatTransposeMultSymbolic,A,B,0,0);
9903: (*A->ops->mattransposemultsymbolic)(A,B,fill,C);
9904: PetscLogEventEnd(MAT_MatTransposeMultSymbolic,A,B,0,0);
9905: }
9906: PetscLogEventBegin(MAT_MatTransposeMultNumeric,A,B,0,0);
9907: (*A->ops->mattransposemultnumeric)(A,B,*C);
9908: PetscLogEventEnd(MAT_MatTransposeMultNumeric,A,B,0,0);
9909: PetscLogEventEnd(MAT_MatTransposeMult,A,B,0,0);
9910: return(0);
9911: }
9913: /*@
9914: MatTransposeMatMult - Performs Matrix-Matrix Multiplication C=A^T*B.
9916: Neighbor-wise Collective on Mat
9918: Input Parameters:
9919: + A - the left matrix
9920: . B - the right matrix
9921: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9922: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9924: Output Parameters:
9925: . C - the product matrix
9927: Notes:
9928: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9930: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9932: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9933: actually needed.
9935: This routine is currently implemented for pairs of AIJ matrices and pairs of SeqDense matrices and classes
9936: which inherit from SeqAIJ. C will be of same type as the input matrices.
9938: Level: intermediate
9940: .seealso: MatTransposeMatMultSymbolic(), MatTransposeMatMultNumeric(), MatMatMult(), MatMatTransposeMult(), MatPtAP()
9941: @*/
9942: PetscErrorCode MatTransposeMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9943: {
9945: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9946: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9947: PetscErrorCode (*transposematmult)(Mat,Mat,MatReuse,PetscReal,Mat*) = NULL;
9952: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9953: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9954: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9957: MatCheckPreallocated(B,2);
9958: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9959: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9961: if (B->rmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->rmap->N);
9962: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9963: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9964: MatCheckPreallocated(A,1);
9966: fA = A->ops->transposematmult;
9967: fB = B->ops->transposematmult;
9968: if (fB==fA) {
9969: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatTransposeMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9970: transposematmult = fA;
9971: } else {
9972: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9973: char multname[256];
9974: PetscStrncpy(multname,"MatTransposeMatMult_",sizeof(multname));
9975: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
9976: PetscStrlcat(multname,"_",sizeof(multname));
9977: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
9978: PetscStrlcat(multname,"_C",sizeof(multname)); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9979: PetscObjectQueryFunction((PetscObject)B,multname,&transposematmult);
9980: if (!transposematmult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatTransposeMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9981: }
9982: PetscLogEventBegin(MAT_TransposeMatMult,A,B,0,0);
9983: (*transposematmult)(A,B,scall,fill,C);
9984: PetscLogEventEnd(MAT_TransposeMatMult,A,B,0,0);
9985: return(0);
9986: }
9988: /*@
9989: MatMatMatMult - Performs Matrix-Matrix-Matrix Multiplication D=A*B*C.
9991: Neighbor-wise Collective on Mat
9993: Input Parameters:
9994: + A - the left matrix
9995: . B - the middle matrix
9996: . C - the right matrix
9997: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9998: - fill - expected fill as ratio of nnz(D)/(nnz(A) + nnz(B)+nnz(C)), use PETSC_DEFAULT if you do not have a good estimate
9999: if the result is a dense matrix this is irrelevent
10001: Output Parameters:
10002: . D - the product matrix
10004: Notes:
10005: Unless scall is MAT_REUSE_MATRIX D will be created.
10007: MAT_REUSE_MATRIX can only be used if the matrices A, B and C have the same nonzero pattern as in the previous call
10009: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
10010: actually needed.
10012: If you have many matrices with the same non-zero structure to multiply, you
10013: should use MAT_REUSE_MATRIX in all calls but the first or
10015: Level: intermediate
10017: .seealso: MatMatMult, MatPtAP()
10018: @*/
10019: PetscErrorCode MatMatMatMult(Mat A,Mat B,Mat C,MatReuse scall,PetscReal fill,Mat *D)
10020: {
10022: PetscErrorCode (*fA)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
10023: PetscErrorCode (*fB)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
10024: PetscErrorCode (*fC)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
10025: PetscErrorCode (*mult)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
10030: MatCheckPreallocated(A,1);
10031: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
10032: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10033: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10036: MatCheckPreallocated(B,2);
10037: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10038: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10041: MatCheckPreallocated(C,3);
10042: if (!C->assembled) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10043: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10044: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
10045: if (C->rmap->N!=B->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",C->rmap->N,B->cmap->N);
10046: if (scall == MAT_REUSE_MATRIX) {
10049: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
10050: (*(*D)->ops->matmatmult)(A,B,C,scall,fill,D);
10051: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
10052: return(0);
10053: }
10054: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
10055: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
10057: fA = A->ops->matmatmult;
10058: fB = B->ops->matmatmult;
10059: fC = C->ops->matmatmult;
10060: if (fA == fB && fA == fC) {
10061: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMatMult not supported for A of type %s",((PetscObject)A)->type_name);
10062: mult = fA;
10063: } else {
10064: /* dispatch based on the type of A, B and C from their PetscObject's PetscFunctionLists. */
10065: char multname[256];
10066: PetscStrncpy(multname,"MatMatMatMult_",sizeof(multname));
10067: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
10068: PetscStrlcat(multname,"_",sizeof(multname));
10069: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
10070: PetscStrlcat(multname,"_",sizeof(multname));
10071: PetscStrlcat(multname,((PetscObject)C)->type_name,sizeof(multname));
10072: PetscStrlcat(multname,"_C",sizeof(multname));
10073: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
10074: if (!mult) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMatMult requires A, %s, to be compatible with B, %s, C, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name,((PetscObject)C)->type_name);
10075: }
10076: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
10077: (*mult)(A,B,C,scall,fill,D);
10078: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
10079: return(0);
10080: }
10082: /*@
10083: MatCreateRedundantMatrix - Create redundant matrices and put them into processors of subcommunicators.
10085: Collective on Mat
10087: Input Parameters:
10088: + mat - the matrix
10089: . nsubcomm - the number of subcommunicators (= number of redundant parallel or sequential matrices)
10090: . subcomm - MPI communicator split from the communicator where mat resides in (or MPI_COMM_NULL if nsubcomm is used)
10091: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10093: Output Parameter:
10094: . matredundant - redundant matrix
10096: Notes:
10097: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
10098: original matrix has not changed from that last call to MatCreateRedundantMatrix().
10100: This routine creates the duplicated matrices in subcommunicators; you should NOT create them before
10101: calling it.
10103: Level: advanced
10105: Concepts: subcommunicator
10106: Concepts: duplicate matrix
10108: .seealso: MatDestroy()
10109: @*/
10110: PetscErrorCode MatCreateRedundantMatrix(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,MatReuse reuse,Mat *matredundant)
10111: {
10113: MPI_Comm comm;
10114: PetscMPIInt size;
10115: PetscInt mloc_sub,nloc_sub,rstart,rend,M=mat->rmap->N,N=mat->cmap->N,bs=mat->rmap->bs;
10116: Mat_Redundant *redund=NULL;
10117: PetscSubcomm psubcomm=NULL;
10118: MPI_Comm subcomm_in=subcomm;
10119: Mat *matseq;
10120: IS isrow,iscol;
10121: PetscBool newsubcomm=PETSC_FALSE;
10125: if (nsubcomm && reuse == MAT_REUSE_MATRIX) {
10128: }
10130: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
10131: if (size == 1 || nsubcomm == 1) {
10132: if (reuse == MAT_INITIAL_MATRIX) {
10133: MatDuplicate(mat,MAT_COPY_VALUES,matredundant);
10134: } else {
10135: if (*matredundant == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10136: MatCopy(mat,*matredundant,SAME_NONZERO_PATTERN);
10137: }
10138: return(0);
10139: }
10141: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10142: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10143: MatCheckPreallocated(mat,1);
10145: PetscLogEventBegin(MAT_RedundantMat,mat,0,0,0);
10146: if (subcomm_in == MPI_COMM_NULL && reuse == MAT_INITIAL_MATRIX) { /* get subcomm if user does not provide subcomm */
10147: /* create psubcomm, then get subcomm */
10148: PetscObjectGetComm((PetscObject)mat,&comm);
10149: MPI_Comm_size(comm,&size);
10150: if (nsubcomm < 1 || nsubcomm > size) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"nsubcomm must between 1 and %D",size);
10152: PetscSubcommCreate(comm,&psubcomm);
10153: PetscSubcommSetNumber(psubcomm,nsubcomm);
10154: PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);
10155: PetscSubcommSetFromOptions(psubcomm);
10156: PetscCommDuplicate(PetscSubcommChild(psubcomm),&subcomm,NULL);
10157: newsubcomm = PETSC_TRUE;
10158: PetscSubcommDestroy(&psubcomm);
10159: }
10161: /* get isrow, iscol and a local sequential matrix matseq[0] */
10162: if (reuse == MAT_INITIAL_MATRIX) {
10163: mloc_sub = PETSC_DECIDE;
10164: nloc_sub = PETSC_DECIDE;
10165: if (bs < 1) {
10166: PetscSplitOwnership(subcomm,&mloc_sub,&M);
10167: PetscSplitOwnership(subcomm,&nloc_sub,&N);
10168: } else {
10169: PetscSplitOwnershipBlock(subcomm,bs,&mloc_sub,&M);
10170: PetscSplitOwnershipBlock(subcomm,bs,&nloc_sub,&N);
10171: }
10172: MPI_Scan(&mloc_sub,&rend,1,MPIU_INT,MPI_SUM,subcomm);
10173: rstart = rend - mloc_sub;
10174: ISCreateStride(PETSC_COMM_SELF,mloc_sub,rstart,1,&isrow);
10175: ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol);
10176: } else { /* reuse == MAT_REUSE_MATRIX */
10177: if (*matredundant == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10178: /* retrieve subcomm */
10179: PetscObjectGetComm((PetscObject)(*matredundant),&subcomm);
10180: redund = (*matredundant)->redundant;
10181: isrow = redund->isrow;
10182: iscol = redund->iscol;
10183: matseq = redund->matseq;
10184: }
10185: MatCreateSubMatrices(mat,1,&isrow,&iscol,reuse,&matseq);
10187: /* get matredundant over subcomm */
10188: if (reuse == MAT_INITIAL_MATRIX) {
10189: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],nloc_sub,reuse,matredundant);
10191: /* create a supporting struct and attach it to C for reuse */
10192: PetscNewLog(*matredundant,&redund);
10193: (*matredundant)->redundant = redund;
10194: redund->isrow = isrow;
10195: redund->iscol = iscol;
10196: redund->matseq = matseq;
10197: if (newsubcomm) {
10198: redund->subcomm = subcomm;
10199: } else {
10200: redund->subcomm = MPI_COMM_NULL;
10201: }
10202: } else {
10203: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],PETSC_DECIDE,reuse,matredundant);
10204: }
10205: PetscLogEventEnd(MAT_RedundantMat,mat,0,0,0);
10206: return(0);
10207: }
10209: /*@C
10210: MatGetMultiProcBlock - Create multiple [bjacobi] 'parallel submatrices' from
10211: a given 'mat' object. Each submatrix can span multiple procs.
10213: Collective on Mat
10215: Input Parameters:
10216: + mat - the matrix
10217: . subcomm - the subcommunicator obtained by com_split(comm)
10218: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10220: Output Parameter:
10221: . subMat - 'parallel submatrices each spans a given subcomm
10223: Notes:
10224: The submatrix partition across processors is dictated by 'subComm' a
10225: communicator obtained by com_split(comm). The comm_split
10226: is not restriced to be grouped with consecutive original ranks.
10228: Due the comm_split() usage, the parallel layout of the submatrices
10229: map directly to the layout of the original matrix [wrt the local
10230: row,col partitioning]. So the original 'DiagonalMat' naturally maps
10231: into the 'DiagonalMat' of the subMat, hence it is used directly from
10232: the subMat. However the offDiagMat looses some columns - and this is
10233: reconstructed with MatSetValues()
10235: Level: advanced
10237: Concepts: subcommunicator
10238: Concepts: submatrices
10240: .seealso: MatCreateSubMatrices()
10241: @*/
10242: PetscErrorCode MatGetMultiProcBlock(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
10243: {
10245: PetscMPIInt commsize,subCommSize;
10248: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
10249: MPI_Comm_size(subComm,&subCommSize);
10250: if (subCommSize > commsize) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"CommSize %D < SubCommZize %D",commsize,subCommSize);
10252: if (scall == MAT_REUSE_MATRIX && *subMat == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10253: PetscLogEventBegin(MAT_GetMultiProcBlock,mat,0,0,0);
10254: (*mat->ops->getmultiprocblock)(mat,subComm,scall,subMat);
10255: PetscLogEventEnd(MAT_GetMultiProcBlock,mat,0,0,0);
10256: return(0);
10257: }
10259: /*@
10260: MatGetLocalSubMatrix - Gets a reference to a submatrix specified in local numbering
10262: Not Collective
10264: Input Arguments:
10265: mat - matrix to extract local submatrix from
10266: isrow - local row indices for submatrix
10267: iscol - local column indices for submatrix
10269: Output Arguments:
10270: submat - the submatrix
10272: Level: intermediate
10274: Notes:
10275: The submat should be returned with MatRestoreLocalSubMatrix().
10277: Depending on the format of mat, the returned submat may not implement MatMult(). Its communicator may be
10278: the same as mat, it may be PETSC_COMM_SELF, or some other subcomm of mat's.
10280: The submat always implements MatSetValuesLocal(). If isrow and iscol have the same block size, then
10281: MatSetValuesBlockedLocal() will also be implemented.
10283: The mat must have had a ISLocalToGlobalMapping provided to it with MatSetLocalToGlobalMapping(). Note that
10284: matrices obtained with DMCreateMatrix() generally already have the local to global mapping provided.
10286: .seealso: MatRestoreLocalSubMatrix(), MatCreateLocalRef(), MatSetLocalToGlobalMapping()
10287: @*/
10288: PetscErrorCode MatGetLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
10289: {
10298: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must have local to global mapping provided before this call");
10300: if (mat->ops->getlocalsubmatrix) {
10301: (*mat->ops->getlocalsubmatrix)(mat,isrow,iscol,submat);
10302: } else {
10303: MatCreateLocalRef(mat,isrow,iscol,submat);
10304: }
10305: return(0);
10306: }
10308: /*@
10309: MatRestoreLocalSubMatrix - Restores a reference to a submatrix specified in local numbering
10311: Not Collective
10313: Input Arguments:
10314: mat - matrix to extract local submatrix from
10315: isrow - local row indices for submatrix
10316: iscol - local column indices for submatrix
10317: submat - the submatrix
10319: Level: intermediate
10321: .seealso: MatGetLocalSubMatrix()
10322: @*/
10323: PetscErrorCode MatRestoreLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
10324: {
10333: if (*submat) {
10335: }
10337: if (mat->ops->restorelocalsubmatrix) {
10338: (*mat->ops->restorelocalsubmatrix)(mat,isrow,iscol,submat);
10339: } else {
10340: MatDestroy(submat);
10341: }
10342: *submat = NULL;
10343: return(0);
10344: }
10346: /* --------------------------------------------------------*/
10347: /*@
10348: MatFindZeroDiagonals - Finds all the rows of a matrix that have zero or no diagonal entry in the matrix
10350: Collective on Mat
10352: Input Parameter:
10353: . mat - the matrix
10355: Output Parameter:
10356: . is - if any rows have zero diagonals this contains the list of them
10358: Level: developer
10360: Concepts: matrix-vector product
10362: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
10363: @*/
10364: PetscErrorCode MatFindZeroDiagonals(Mat mat,IS *is)
10365: {
10371: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10372: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10374: if (!mat->ops->findzerodiagonals) {
10375: Vec diag;
10376: const PetscScalar *a;
10377: PetscInt *rows;
10378: PetscInt rStart, rEnd, r, nrow = 0;
10380: MatCreateVecs(mat, &diag, NULL);
10381: MatGetDiagonal(mat, diag);
10382: MatGetOwnershipRange(mat, &rStart, &rEnd);
10383: VecGetArrayRead(diag, &a);
10384: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) ++nrow;
10385: PetscMalloc1(nrow, &rows);
10386: nrow = 0;
10387: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) rows[nrow++] = r+rStart;
10388: VecRestoreArrayRead(diag, &a);
10389: VecDestroy(&diag);
10390: ISCreateGeneral(PetscObjectComm((PetscObject) mat), nrow, rows, PETSC_OWN_POINTER, is);
10391: } else {
10392: (*mat->ops->findzerodiagonals)(mat, is);
10393: }
10394: return(0);
10395: }
10397: /*@
10398: MatFindOffBlockDiagonalEntries - Finds all the rows of a matrix that have entries outside of the main diagonal block (defined by the matrix block size)
10400: Collective on Mat
10402: Input Parameter:
10403: . mat - the matrix
10405: Output Parameter:
10406: . is - contains the list of rows with off block diagonal entries
10408: Level: developer
10410: Concepts: matrix-vector product
10412: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
10413: @*/
10414: PetscErrorCode MatFindOffBlockDiagonalEntries(Mat mat,IS *is)
10415: {
10421: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10422: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10424: if (!mat->ops->findoffblockdiagonalentries) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a find off block diagonal entries defined");
10425: (*mat->ops->findoffblockdiagonalentries)(mat,is);
10426: return(0);
10427: }
10429: /*@C
10430: MatInvertBlockDiagonal - Inverts the block diagonal entries.
10432: Collective on Mat
10434: Input Parameters:
10435: . mat - the matrix
10437: Output Parameters:
10438: . values - the block inverses in column major order (FORTRAN-like)
10440: Note:
10441: This routine is not available from Fortran.
10443: Level: advanced
10445: .seealso: MatInvertBockDiagonalMat
10446: @*/
10447: PetscErrorCode MatInvertBlockDiagonal(Mat mat,const PetscScalar **values)
10448: {
10453: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10454: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10455: if (!mat->ops->invertblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
10456: (*mat->ops->invertblockdiagonal)(mat,values);
10457: return(0);
10458: }
10460: /*@C
10461: MatInvertVariableBlockDiagonal - Inverts the block diagonal entries.
10463: Collective on Mat
10465: Input Parameters:
10466: + mat - the matrix
10467: . nblocks - the number of blocks
10468: - bsizes - the size of each block
10470: Output Parameters:
10471: . values - the block inverses in column major order (FORTRAN-like)
10473: Note:
10474: This routine is not available from Fortran.
10476: Level: advanced
10478: .seealso: MatInvertBockDiagonal()
10479: @*/
10480: PetscErrorCode MatInvertVariableBlockDiagonal(Mat mat,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *values)
10481: {
10486: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10487: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10488: if (!mat->ops->invertvariableblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
10489: (*mat->ops->invertvariableblockdiagonal)(mat,nblocks,bsizes,values);
10490: return(0);
10491: }
10493: /*@
10494: MatInvertBlockDiagonalMat - set matrix C to be the inverted block diagonal of matrix A
10496: Collective on Mat
10498: Input Parameters:
10499: . A - the matrix
10501: Output Parameters:
10502: . C - matrix with inverted block diagonal of A. This matrix should be created and may have its type set.
10504: Notes: the blocksize of the matrix is used to determine the blocks on the diagonal of C
10506: Level: advanced
10508: .seealso: MatInvertBockDiagonal()
10509: @*/
10510: PetscErrorCode MatInvertBlockDiagonalMat(Mat A,Mat C)
10511: {
10512: PetscErrorCode ierr;
10513: const PetscScalar *vals;
10514: PetscInt *dnnz;
10515: PetscInt M,N,m,n,rstart,rend,bs,i,j;
10518: MatInvertBlockDiagonal(A,&vals);
10519: MatGetBlockSize(A,&bs);
10520: MatGetSize(A,&M,&N);
10521: MatGetLocalSize(A,&m,&n);
10522: MatSetSizes(C,m,n,M,N);
10523: MatSetBlockSize(C,bs);
10524: PetscMalloc1(m/bs,&dnnz);
10525: for(j = 0; j < m/bs; j++) {
10526: dnnz[j] = 1;
10527: }
10528: MatXAIJSetPreallocation(C,bs,dnnz,NULL,NULL,NULL);
10529: PetscFree(dnnz);
10530: MatGetOwnershipRange(C,&rstart,&rend);
10531: MatSetOption(C,MAT_ROW_ORIENTED,PETSC_FALSE);
10532: for (i = rstart/bs; i < rend/bs; i++) {
10533: MatSetValuesBlocked(C,1,&i,1,&i,&vals[(i-rstart/bs)*bs*bs],INSERT_VALUES);
10534: }
10535: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
10536: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
10537: MatSetOption(C,MAT_ROW_ORIENTED,PETSC_TRUE);
10538: return(0);
10539: }
10541: /*@C
10542: MatTransposeColoringDestroy - Destroys a coloring context for matrix product C=A*B^T that was created
10543: via MatTransposeColoringCreate().
10545: Collective on MatTransposeColoring
10547: Input Parameter:
10548: . c - coloring context
10550: Level: intermediate
10552: .seealso: MatTransposeColoringCreate()
10553: @*/
10554: PetscErrorCode MatTransposeColoringDestroy(MatTransposeColoring *c)
10555: {
10556: PetscErrorCode ierr;
10557: MatTransposeColoring matcolor=*c;
10560: if (!matcolor) return(0);
10561: if (--((PetscObject)matcolor)->refct > 0) {matcolor = 0; return(0);}
10563: PetscFree3(matcolor->ncolumns,matcolor->nrows,matcolor->colorforrow);
10564: PetscFree(matcolor->rows);
10565: PetscFree(matcolor->den2sp);
10566: PetscFree(matcolor->colorforcol);
10567: PetscFree(matcolor->columns);
10568: if (matcolor->brows>0) {
10569: PetscFree(matcolor->lstart);
10570: }
10571: PetscHeaderDestroy(c);
10572: return(0);
10573: }
10575: /*@C
10576: MatTransColoringApplySpToDen - Given a symbolic matrix product C=A*B^T for which
10577: a MatTransposeColoring context has been created, computes a dense B^T by Apply
10578: MatTransposeColoring to sparse B.
10580: Collective on MatTransposeColoring
10582: Input Parameters:
10583: + B - sparse matrix B
10584: . Btdense - symbolic dense matrix B^T
10585: - coloring - coloring context created with MatTransposeColoringCreate()
10587: Output Parameter:
10588: . Btdense - dense matrix B^T
10590: Level: advanced
10592: Notes:
10593: These are used internally for some implementations of MatRARt()
10595: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplyDenToSp()
10597: .keywords: coloring
10598: @*/
10599: PetscErrorCode MatTransColoringApplySpToDen(MatTransposeColoring coloring,Mat B,Mat Btdense)
10600: {
10608: if (!B->ops->transcoloringapplysptoden) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)B)->type_name);
10609: (B->ops->transcoloringapplysptoden)(coloring,B,Btdense);
10610: return(0);
10611: }
10613: /*@C
10614: MatTransColoringApplyDenToSp - Given a symbolic matrix product Csp=A*B^T for which
10615: a MatTransposeColoring context has been created and a dense matrix Cden=A*Btdense
10616: in which Btdens is obtained from MatTransColoringApplySpToDen(), recover sparse matrix
10617: Csp from Cden.
10619: Collective on MatTransposeColoring
10621: Input Parameters:
10622: + coloring - coloring context created with MatTransposeColoringCreate()
10623: - Cden - matrix product of a sparse matrix and a dense matrix Btdense
10625: Output Parameter:
10626: . Csp - sparse matrix
10628: Level: advanced
10630: Notes:
10631: These are used internally for some implementations of MatRARt()
10633: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplySpToDen()
10635: .keywords: coloring
10636: @*/
10637: PetscErrorCode MatTransColoringApplyDenToSp(MatTransposeColoring matcoloring,Mat Cden,Mat Csp)
10638: {
10646: if (!Csp->ops->transcoloringapplydentosp) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)Csp)->type_name);
10647: (Csp->ops->transcoloringapplydentosp)(matcoloring,Cden,Csp);
10648: return(0);
10649: }
10651: /*@C
10652: MatTransposeColoringCreate - Creates a matrix coloring context for matrix product C=A*B^T.
10654: Collective on Mat
10656: Input Parameters:
10657: + mat - the matrix product C
10658: - iscoloring - the coloring of the matrix; usually obtained with MatColoringCreate() or DMCreateColoring()
10660: Output Parameter:
10661: . color - the new coloring context
10663: Level: intermediate
10665: .seealso: MatTransposeColoringDestroy(), MatTransColoringApplySpToDen(),
10666: MatTransColoringApplyDenToSp()
10667: @*/
10668: PetscErrorCode MatTransposeColoringCreate(Mat mat,ISColoring iscoloring,MatTransposeColoring *color)
10669: {
10670: MatTransposeColoring c;
10671: MPI_Comm comm;
10672: PetscErrorCode ierr;
10675: PetscLogEventBegin(MAT_TransposeColoringCreate,mat,0,0,0);
10676: PetscObjectGetComm((PetscObject)mat,&comm);
10677: PetscHeaderCreate(c,MAT_TRANSPOSECOLORING_CLASSID,"MatTransposeColoring","Matrix product C=A*B^T via coloring","Mat",comm,MatTransposeColoringDestroy,NULL);
10679: c->ctype = iscoloring->ctype;
10680: if (mat->ops->transposecoloringcreate) {
10681: (*mat->ops->transposecoloringcreate)(mat,iscoloring,c);
10682: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Code not yet written for this matrix type");
10684: *color = c;
10685: PetscLogEventEnd(MAT_TransposeColoringCreate,mat,0,0,0);
10686: return(0);
10687: }
10689: /*@
10690: MatGetNonzeroState - Returns a 64 bit integer representing the current state of nonzeros in the matrix. If the
10691: matrix has had no new nonzero locations added to the matrix since the previous call then the value will be the
10692: same, otherwise it will be larger
10694: Not Collective
10696: Input Parameter:
10697: . A - the matrix
10699: Output Parameter:
10700: . state - the current state
10702: Notes:
10703: You can only compare states from two different calls to the SAME matrix, you cannot compare calls between
10704: different matrices
10706: Level: intermediate
10708: @*/
10709: PetscErrorCode MatGetNonzeroState(Mat mat,PetscObjectState *state)
10710: {
10713: *state = mat->nonzerostate;
10714: return(0);
10715: }
10717: /*@
10718: MatCreateMPIMatConcatenateSeqMat - Creates a single large PETSc matrix by concatenating sequential
10719: matrices from each processor
10721: Collective on MPI_Comm
10723: Input Parameters:
10724: + comm - the communicators the parallel matrix will live on
10725: . seqmat - the input sequential matrices
10726: . n - number of local columns (or PETSC_DECIDE)
10727: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10729: Output Parameter:
10730: . mpimat - the parallel matrix generated
10732: Level: advanced
10734: Notes:
10735: The number of columns of the matrix in EACH processor MUST be the same.
10737: @*/
10738: PetscErrorCode MatCreateMPIMatConcatenateSeqMat(MPI_Comm comm,Mat seqmat,PetscInt n,MatReuse reuse,Mat *mpimat)
10739: {
10743: if (!seqmat->ops->creatempimatconcatenateseqmat) SETERRQ1(PetscObjectComm((PetscObject)seqmat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)seqmat)->type_name);
10744: if (reuse == MAT_REUSE_MATRIX && seqmat == *mpimat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10746: PetscLogEventBegin(MAT_Merge,seqmat,0,0,0);
10747: (*seqmat->ops->creatempimatconcatenateseqmat)(comm,seqmat,n,reuse,mpimat);
10748: PetscLogEventEnd(MAT_Merge,seqmat,0,0,0);
10749: return(0);
10750: }
10752: /*@
10753: MatSubdomainsCreateCoalesce - Creates index subdomains by coalescing adjacent
10754: ranks' ownership ranges.
10756: Collective on A
10758: Input Parameters:
10759: + A - the matrix to create subdomains from
10760: - N - requested number of subdomains
10763: Output Parameters:
10764: + n - number of subdomains resulting on this rank
10765: - iss - IS list with indices of subdomains on this rank
10767: Level: advanced
10769: Notes:
10770: number of subdomains must be smaller than the communicator size
10771: @*/
10772: PetscErrorCode MatSubdomainsCreateCoalesce(Mat A,PetscInt N,PetscInt *n,IS *iss[])
10773: {
10774: MPI_Comm comm,subcomm;
10775: PetscMPIInt size,rank,color;
10776: PetscInt rstart,rend,k;
10777: PetscErrorCode ierr;
10780: PetscObjectGetComm((PetscObject)A,&comm);
10781: MPI_Comm_size(comm,&size);
10782: MPI_Comm_rank(comm,&rank);
10783: if (N < 1 || N >= (PetscInt)size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subdomains must be > 0 and < %D, got N = %D",size,N);
10784: *n = 1;
10785: k = ((PetscInt)size)/N + ((PetscInt)size%N>0); /* There are up to k ranks to a color */
10786: color = rank/k;
10787: MPI_Comm_split(comm,color,rank,&subcomm);
10788: PetscMalloc1(1,iss);
10789: MatGetOwnershipRange(A,&rstart,&rend);
10790: ISCreateStride(subcomm,rend-rstart,rstart,1,iss[0]);
10791: MPI_Comm_free(&subcomm);
10792: return(0);
10793: }
10795: /*@
10796: MatGalerkin - Constructs the coarse grid problem via Galerkin projection.
10798: If the interpolation and restriction operators are the same, uses MatPtAP.
10799: If they are not the same, use MatMatMatMult.
10801: Once the coarse grid problem is constructed, correct for interpolation operators
10802: that are not of full rank, which can legitimately happen in the case of non-nested
10803: geometric multigrid.
10805: Input Parameters:
10806: + restrct - restriction operator
10807: . dA - fine grid matrix
10808: . interpolate - interpolation operator
10809: . reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10810: - fill - expected fill, use PETSC_DEFAULT if you do not have a good estimate
10812: Output Parameters:
10813: . A - the Galerkin coarse matrix
10815: Options Database Key:
10816: . -pc_mg_galerkin <both,pmat,mat,none>
10818: Level: developer
10820: .keywords: MG, multigrid, Galerkin
10822: .seealso: MatPtAP(), MatMatMatMult()
10823: @*/
10824: PetscErrorCode MatGalerkin(Mat restrct, Mat dA, Mat interpolate, MatReuse reuse, PetscReal fill, Mat *A)
10825: {
10827: IS zerorows;
10828: Vec diag;
10831: if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
10832: /* Construct the coarse grid matrix */
10833: if (interpolate == restrct) {
10834: MatPtAP(dA,interpolate,reuse,fill,A);
10835: } else {
10836: MatMatMatMult(restrct,dA,interpolate,reuse,fill,A);
10837: }
10839: /* If the interpolation matrix is not of full rank, A will have zero rows.
10840: This can legitimately happen in the case of non-nested geometric multigrid.
10841: In that event, we set the rows of the matrix to the rows of the identity,
10842: ignoring the equations (as the RHS will also be zero). */
10844: MatFindZeroRows(*A, &zerorows);
10846: if (zerorows != NULL) { /* if there are any zero rows */
10847: MatCreateVecs(*A, &diag, NULL);
10848: MatGetDiagonal(*A, diag);
10849: VecISSet(diag, zerorows, 1.0);
10850: MatDiagonalSet(*A, diag, INSERT_VALUES);
10851: VecDestroy(&diag);
10852: ISDestroy(&zerorows);
10853: }
10854: return(0);
10855: }
10857: /*@C
10858: MatSetOperation - Allows user to set a matrix operation for any matrix type
10860: Logically Collective on Mat
10862: Input Parameters:
10863: + mat - the matrix
10864: . op - the name of the operation
10865: - f - the function that provides the operation
10867: Level: developer
10869: Usage:
10870: $ extern PetscErrorCode usermult(Mat,Vec,Vec);
10871: $ MatCreateXXX(comm,...&A);
10872: $ MatSetOperation(A,MATOP_MULT,(void(*)(void))usermult);
10874: Notes:
10875: See the file include/petscmat.h for a complete list of matrix
10876: operations, which all have the form MATOP_<OPERATION>, where
10877: <OPERATION> is the name (in all capital letters) of the
10878: user interface routine (e.g., MatMult() -> MATOP_MULT).
10880: All user-provided functions (except for MATOP_DESTROY) should have the same calling
10881: sequence as the usual matrix interface routines, since they
10882: are intended to be accessed via the usual matrix interface
10883: routines, e.g.,
10884: $ MatMult(Mat,Vec,Vec) -> usermult(Mat,Vec,Vec)
10886: In particular each function MUST return an error code of 0 on success and
10887: nonzero on failure.
10889: This routine is distinct from MatShellSetOperation() in that it can be called on any matrix type.
10891: .keywords: matrix, set, operation
10893: .seealso: MatGetOperation(), MatCreateShell(), MatShellSetContext(), MatShellSetOperation()
10894: @*/
10895: PetscErrorCode MatSetOperation(Mat mat,MatOperation op,void (*f)(void))
10896: {
10899: if (op == MATOP_VIEW && !mat->ops->viewnative && f != (void (*)(void))(mat->ops->view)) {
10900: mat->ops->viewnative = mat->ops->view;
10901: }
10902: (((void(**)(void))mat->ops)[op]) = f;
10903: return(0);
10904: }
10906: /*@C
10907: MatGetOperation - Gets a matrix operation for any matrix type.
10909: Not Collective
10911: Input Parameters:
10912: + mat - the matrix
10913: - op - the name of the operation
10915: Output Parameter:
10916: . f - the function that provides the operation
10918: Level: developer
10920: Usage:
10921: $ PetscErrorCode (*usermult)(Mat,Vec,Vec);
10922: $ MatGetOperation(A,MATOP_MULT,(void(**)(void))&usermult);
10924: Notes:
10925: See the file include/petscmat.h for a complete list of matrix
10926: operations, which all have the form MATOP_<OPERATION>, where
10927: <OPERATION> is the name (in all capital letters) of the
10928: user interface routine (e.g., MatMult() -> MATOP_MULT).
10930: This routine is distinct from MatShellGetOperation() in that it can be called on any matrix type.
10932: .keywords: matrix, get, operation
10934: .seealso: MatSetOperation(), MatCreateShell(), MatShellGetContext(), MatShellGetOperation()
10935: @*/
10936: PetscErrorCode MatGetOperation(Mat mat,MatOperation op,void(**f)(void))
10937: {
10940: *f = (((void (**)(void))mat->ops)[op]);
10941: return(0);
10942: }
10944: /*@
10945: MatHasOperation - Determines whether the given matrix supports the particular
10946: operation.
10948: Not Collective
10950: Input Parameters:
10951: + mat - the matrix
10952: - op - the operation, for example, MATOP_GET_DIAGONAL
10954: Output Parameter:
10955: . has - either PETSC_TRUE or PETSC_FALSE
10957: Level: advanced
10959: Notes:
10960: See the file include/petscmat.h for a complete list of matrix
10961: operations, which all have the form MATOP_<OPERATION>, where
10962: <OPERATION> is the name (in all capital letters) of the
10963: user-level routine. E.g., MatNorm() -> MATOP_NORM.
10965: .keywords: matrix, has, operation
10967: .seealso: MatCreateShell()
10968: @*/
10969: PetscErrorCode MatHasOperation(Mat mat,MatOperation op,PetscBool *has)
10970: {
10977: if (mat->ops->hasoperation) {
10978: (*mat->ops->hasoperation)(mat,op,has);
10979: } else {
10980: if (((void**)mat->ops)[op]) *has = PETSC_TRUE;
10981: else {
10982: *has = PETSC_FALSE;
10983: if (op == MATOP_CREATE_SUBMATRIX) {
10984: PetscMPIInt size;
10986: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
10987: if (size == 1) {
10988: MatHasOperation(mat,MATOP_CREATE_SUBMATRICES,has);
10989: }
10990: }
10991: }
10992: }
10993: return(0);
10994: }
10996: /*@
10997: MatHasCongruentLayouts - Determines whether the rows and columns layouts
10998: of the matrix are congruent
11000: Collective on mat
11002: Input Parameters:
11003: . mat - the matrix
11005: Output Parameter:
11006: . cong - either PETSC_TRUE or PETSC_FALSE
11008: Level: beginner
11010: Notes:
11012: .keywords: matrix, has
11014: .seealso: MatCreate(), MatSetSizes()
11015: @*/
11016: PetscErrorCode MatHasCongruentLayouts(Mat mat,PetscBool *cong)
11017: {
11024: if (!mat->rmap || !mat->cmap) {
11025: *cong = mat->rmap == mat->cmap ? PETSC_TRUE : PETSC_FALSE;
11026: return(0);
11027: }
11028: if (mat->congruentlayouts == PETSC_DECIDE) { /* first time we compare rows and cols layouts */
11029: PetscLayoutCompare(mat->rmap,mat->cmap,cong);
11030: if (*cong) mat->congruentlayouts = 1;
11031: else mat->congruentlayouts = 0;
11032: } else *cong = mat->congruentlayouts ? PETSC_TRUE : PETSC_FALSE;
11033: return(0);
11034: }