Actual source code: mumps.c

  1: /*
  2:     Provides an interface to the MUMPS sparse solver
  3: */
  4: #include <petscpkg_version.h>
  5: #include <petscsf.h>
  6: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  7: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
  8: #include <../src/mat/impls/sell/mpi/mpisell.h>

 10: #define MUMPS_MANUALS "(see users manual https://mumps-solver.org/index.php?page=doc \"Error and warning diagnostics\")"

 12: EXTERN_C_BEGIN
 13: #if defined(PETSC_USE_COMPLEX)
 14:   #if defined(PETSC_USE_REAL_SINGLE)
 15:     #include <cmumps_c.h>
 16:   #else
 17:     #include <zmumps_c.h>
 18:   #endif
 19: #else
 20:   #if defined(PETSC_USE_REAL_SINGLE)
 21:     #include <smumps_c.h>
 22:   #else
 23:     #include <dmumps_c.h>
 24:   #endif
 25: #endif
 26: EXTERN_C_END
 27: #define JOB_INIT         -1
 28: #define JOB_NULL         0
 29: #define JOB_FACTSYMBOLIC 1
 30: #define JOB_FACTNUMERIC  2
 31: #define JOB_SOLVE        3
 32: #define JOB_END          -2

 34: /* calls to MUMPS */
 35: #if defined(PETSC_USE_COMPLEX)
 36:   #if defined(PETSC_USE_REAL_SINGLE)
 37:     #define MUMPS_c cmumps_c
 38:   #else
 39:     #define MUMPS_c zmumps_c
 40:   #endif
 41: #else
 42:   #if defined(PETSC_USE_REAL_SINGLE)
 43:     #define MUMPS_c smumps_c
 44:   #else
 45:     #define MUMPS_c dmumps_c
 46:   #endif
 47: #endif

 49: /* MUMPS uses MUMPS_INT for nonzero indices such as irn/jcn, irn_loc/jcn_loc and uses int64_t for
 50:    number of nonzeros such as nnz, nnz_loc. We typedef MUMPS_INT to PetscMUMPSInt to follow the
 51:    naming convention in PetscMPIInt, PetscBLASInt etc.
 52: */
 53: typedef MUMPS_INT PetscMUMPSInt;

 55: #if PETSC_PKG_MUMPS_VERSION_GE(5, 3, 0)
 56:   #if defined(MUMPS_INTSIZE64) /* MUMPS_INTSIZE64 is in MUMPS headers if it is built in full 64-bit mode, therefore the macro is more reliable */
 57:     #error "Petsc has not been tested with full 64-bit MUMPS and we choose to error out"
 58:   #endif
 59: #else
 60:   #if defined(INTSIZE64) /* INTSIZE64 is a command line macro one used to build MUMPS in full 64-bit mode */
 61:     #error "Petsc has not been tested with full 64-bit MUMPS and we choose to error out"
 62:   #endif
 63: #endif

 65: #define MPIU_MUMPSINT       MPI_INT
 66: #define PETSC_MUMPS_INT_MAX 2147483647
 67: #define PETSC_MUMPS_INT_MIN -2147483648

 69: /* Cast PetscInt to PetscMUMPSInt. Usually there is no overflow since <a> is row/col indices or some small integers*/
 70: static inline PetscErrorCode PetscMUMPSIntCast(PetscInt a, PetscMUMPSInt *b)
 71: {
 72:   PetscFunctionBegin;
 73: #if PetscDefined(USE_64BIT_INDICES)
 74:   PetscAssert(a <= PETSC_MUMPS_INT_MAX && a >= PETSC_MUMPS_INT_MIN, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "PetscInt too long for PetscMUMPSInt");
 75: #endif
 76:   *b = (PetscMUMPSInt)(a);
 77:   PetscFunctionReturn(PETSC_SUCCESS);
 78: }

 80: /* Put these utility routines here since they are only used in this file */
 81: static inline PetscErrorCode PetscOptionsMUMPSInt_Private(PetscOptionItems *PetscOptionsObject, const char opt[], const char text[], const char man[], PetscMUMPSInt currentvalue, PetscMUMPSInt *value, PetscBool *set, PetscMUMPSInt lb, PetscMUMPSInt ub)
 82: {
 83:   PetscInt  myval;
 84:   PetscBool myset;
 85:   PetscFunctionBegin;
 86:   /* PetscInt's size should be always >= PetscMUMPSInt's. It is safe to call PetscOptionsInt_Private to read a PetscMUMPSInt */
 87:   PetscCall(PetscOptionsInt_Private(PetscOptionsObject, opt, text, man, (PetscInt)currentvalue, &myval, &myset, lb, ub));
 88:   if (myset) PetscCall(PetscMUMPSIntCast(myval, value));
 89:   if (set) *set = myset;
 90:   PetscFunctionReturn(PETSC_SUCCESS);
 91: }
 92: #define PetscOptionsMUMPSInt(a, b, c, d, e, f) PetscOptionsMUMPSInt_Private(PetscOptionsObject, a, b, c, d, e, f, PETSC_MUMPS_INT_MIN, PETSC_MUMPS_INT_MAX)

 94: /* if using PETSc OpenMP support, we only call MUMPS on master ranks. Before/after the call, we change/restore CPUs the master ranks can run on */
 95: #if defined(PETSC_HAVE_OPENMP_SUPPORT)
 96:   #define PetscMUMPS_c(mumps) \
 97:     do { \
 98:       if (mumps->use_petsc_omp_support) { \
 99:         if (mumps->is_omp_master) { \
100:           PetscCall(PetscOmpCtrlOmpRegionOnMasterBegin(mumps->omp_ctrl)); \
101:           PetscCall(PetscFPTrapPush(PETSC_FP_TRAP_OFF)); \
102:           PetscStackCallExternalVoid(PetscStringize(MUMPS_c), MUMPS_c(&mumps->id)); \
103:           PetscCall(PetscFPTrapPop()); \
104:           PetscCall(PetscOmpCtrlOmpRegionOnMasterEnd(mumps->omp_ctrl)); \
105:         } \
106:         PetscCall(PetscOmpCtrlBarrier(mumps->omp_ctrl)); \
107:         /* Global info is same on all processes so we Bcast it within omp_comm. Local info is specific      \
108:          to processes, so we only Bcast info[1], an error code and leave others (since they do not have   \
109:          an easy translation between omp_comm and petsc_comm). See MUMPS-5.1.2 manual p82.                   \
110:          omp_comm is a small shared memory communicator, hence doing multiple Bcast as shown below is OK. \
111:       */ \
112:         PetscCallMPI(MPI_Bcast(mumps->id.infog, PETSC_STATIC_ARRAY_LENGTH(mumps->id.infog), MPIU_MUMPSINT, 0, mumps->omp_comm)); \
113:         PetscCallMPI(MPI_Bcast(mumps->id.rinfog, PETSC_STATIC_ARRAY_LENGTH(mumps->id.rinfog), MPIU_REAL, 0, mumps->omp_comm)); \
114:         PetscCallMPI(MPI_Bcast(mumps->id.info, PETSC_STATIC_ARRAY_LENGTH(mumps->id.info), MPIU_MUMPSINT, 0, mumps->omp_comm)); \
115:         PetscCallMPI(MPI_Bcast(mumps->id.rinfo, PETSC_STATIC_ARRAY_LENGTH(mumps->id.rinfo), MPIU_REAL, 0, mumps->omp_comm)); \
116:       } else { \
117:         PetscCall(PetscFPTrapPush(PETSC_FP_TRAP_OFF)); \
118:         PetscStackCallExternalVoid(PetscStringize(MUMPS_c), MUMPS_c(&mumps->id)); \
119:         PetscCall(PetscFPTrapPop()); \
120:       } \
121:     } while (0)
122: #else
123:   #define PetscMUMPS_c(mumps) \
124:     do { \
125:       PetscCall(PetscFPTrapPush(PETSC_FP_TRAP_OFF)); \
126:       PetscStackCallExternalVoid(PetscStringize(MUMPS_c), MUMPS_c(&mumps->id)); \
127:       PetscCall(PetscFPTrapPop()); \
128:     } while (0)
129: #endif

131: /* declare MumpsScalar */
132: #if defined(PETSC_USE_COMPLEX)
133:   #if defined(PETSC_USE_REAL_SINGLE)
134:     #define MumpsScalar mumps_complex
135:   #else
136:     #define MumpsScalar mumps_double_complex
137:   #endif
138: #else
139:   #define MumpsScalar PetscScalar
140: #endif

142: /* macros s.t. indices match MUMPS documentation */
143: #define ICNTL(I)  icntl[(I)-1]
144: #define CNTL(I)   cntl[(I)-1]
145: #define INFOG(I)  infog[(I)-1]
146: #define INFO(I)   info[(I)-1]
147: #define RINFOG(I) rinfog[(I)-1]
148: #define RINFO(I)  rinfo[(I)-1]

150: typedef struct Mat_MUMPS Mat_MUMPS;
151: struct Mat_MUMPS {
152: #if defined(PETSC_USE_COMPLEX)
153:   #if defined(PETSC_USE_REAL_SINGLE)
154:   CMUMPS_STRUC_C id;
155:   #else
156:   ZMUMPS_STRUC_C id;
157:   #endif
158: #else
159:   #if defined(PETSC_USE_REAL_SINGLE)
160:   SMUMPS_STRUC_C id;
161:   #else
162:   DMUMPS_STRUC_C id;
163:   #endif
164: #endif

166:   MatStructure   matstruc;
167:   PetscMPIInt    myid, petsc_size;
168:   PetscMUMPSInt *irn, *jcn;       /* the (i,j,v) triplets passed to mumps. */
169:   PetscScalar   *val, *val_alloc; /* For some matrices, we can directly access their data array without a buffer. For others, we need a buffer. So comes val_alloc. */
170:   PetscInt64     nnz;             /* number of nonzeros. The type is called selective 64-bit in mumps */
171:   PetscMUMPSInt  sym;
172:   MPI_Comm       mumps_comm;
173:   PetscMUMPSInt *ICNTL_pre;
174:   PetscReal     *CNTL_pre;
175:   PetscMUMPSInt  ICNTL9_pre;         /* check if ICNTL(9) is changed from previous MatSolve */
176:   VecScatter     scat_rhs, scat_sol; /* used by MatSolve() */
177:   PetscMUMPSInt  ICNTL20;            /* use centralized (0) or distributed (10) dense RHS */
178:   PetscMUMPSInt  lrhs_loc, nloc_rhs, *irhs_loc;
179: #if defined(PETSC_HAVE_OPENMP_SUPPORT)
180:   PetscInt    *rhs_nrow, max_nrhs;
181:   PetscMPIInt *rhs_recvcounts, *rhs_disps;
182:   PetscScalar *rhs_loc, *rhs_recvbuf;
183: #endif
184:   Vec            b_seq, x_seq;
185:   PetscInt       ninfo, *info; /* which INFO to display */
186:   PetscInt       sizeredrhs;
187:   PetscScalar   *schur_sol;
188:   PetscInt       schur_sizesol;
189:   PetscMUMPSInt *ia_alloc, *ja_alloc; /* work arrays used for the CSR struct for sparse rhs */
190:   PetscInt64     cur_ilen, cur_jlen;  /* current len of ia_alloc[], ja_alloc[] */
191:   PetscErrorCode (*ConvertToTriples)(Mat, PetscInt, MatReuse, Mat_MUMPS *);

193:   /* Support for MATNEST */
194:   PetscErrorCode (**nest_convert_to_triples)(Mat, PetscInt, MatReuse, Mat_MUMPS *);
195:   PetscInt64  *nest_vals_start;
196:   PetscScalar *nest_vals;

198:   /* stuff used by petsc/mumps OpenMP support*/
199:   PetscBool    use_petsc_omp_support;
200:   PetscOmpCtrl omp_ctrl;             /* an OpenMP controller that blocked processes will release their CPU (MPI_Barrier does not have this guarantee) */
201:   MPI_Comm     petsc_comm, omp_comm; /* petsc_comm is petsc matrix's comm */
202:   PetscInt64  *recvcount;            /* a collection of nnz on omp_master */
203:   PetscMPIInt  tag, omp_comm_size;
204:   PetscBool    is_omp_master; /* is this rank the master of omp_comm */
205:   MPI_Request *reqs;
206: };

208: /* Cast a 1-based CSR represented by (nrow, ia, ja) of type PetscInt to a CSR of type PetscMUMPSInt.
209:    Here, nrow is number of rows, ia[] is row pointer and ja[] is column indices.
210:  */
211: static PetscErrorCode PetscMUMPSIntCSRCast(Mat_MUMPS *mumps, PetscInt nrow, PetscInt *ia, PetscInt *ja, PetscMUMPSInt **ia_mumps, PetscMUMPSInt **ja_mumps, PetscMUMPSInt *nnz_mumps)
212: {
213:   PetscInt nnz = ia[nrow] - 1; /* mumps uses 1-based indices. Uses PetscInt instead of PetscInt64 since mumps only uses PetscMUMPSInt for rhs */

215:   PetscFunctionBegin;
216: #if defined(PETSC_USE_64BIT_INDICES)
217:   {
218:     PetscInt i;
219:     if (nrow + 1 > mumps->cur_ilen) { /* realloc ia_alloc/ja_alloc to fit ia/ja */
220:       PetscCall(PetscFree(mumps->ia_alloc));
221:       PetscCall(PetscMalloc1(nrow + 1, &mumps->ia_alloc));
222:       mumps->cur_ilen = nrow + 1;
223:     }
224:     if (nnz > mumps->cur_jlen) {
225:       PetscCall(PetscFree(mumps->ja_alloc));
226:       PetscCall(PetscMalloc1(nnz, &mumps->ja_alloc));
227:       mumps->cur_jlen = nnz;
228:     }
229:     for (i = 0; i < nrow + 1; i++) PetscCall(PetscMUMPSIntCast(ia[i], &(mumps->ia_alloc[i])));
230:     for (i = 0; i < nnz; i++) PetscCall(PetscMUMPSIntCast(ja[i], &(mumps->ja_alloc[i])));
231:     *ia_mumps = mumps->ia_alloc;
232:     *ja_mumps = mumps->ja_alloc;
233:   }
234: #else
235:   *ia_mumps = ia;
236:   *ja_mumps = ja;
237: #endif
238:   PetscCall(PetscMUMPSIntCast(nnz, nnz_mumps));
239:   PetscFunctionReturn(PETSC_SUCCESS);
240: }

242: static PetscErrorCode MatMumpsResetSchur_Private(Mat_MUMPS *mumps)
243: {
244:   PetscFunctionBegin;
245:   PetscCall(PetscFree(mumps->id.listvar_schur));
246:   PetscCall(PetscFree(mumps->id.redrhs));
247:   PetscCall(PetscFree(mumps->schur_sol));
248:   mumps->id.size_schur = 0;
249:   mumps->id.schur_lld  = 0;
250:   mumps->id.ICNTL(19)  = 0;
251:   PetscFunctionReturn(PETSC_SUCCESS);
252: }

254: /* solve with rhs in mumps->id.redrhs and return in the same location */
255: static PetscErrorCode MatMumpsSolveSchur_Private(Mat F)
256: {
257:   Mat_MUMPS           *mumps = (Mat_MUMPS *)F->data;
258:   Mat                  S, B, X;
259:   MatFactorSchurStatus schurstatus;
260:   PetscInt             sizesol;

262:   PetscFunctionBegin;
263:   PetscCall(MatFactorFactorizeSchurComplement(F));
264:   PetscCall(MatFactorGetSchurComplement(F, &S, &schurstatus));
265:   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, mumps->id.size_schur, mumps->id.nrhs, (PetscScalar *)mumps->id.redrhs, &B));
266:   PetscCall(MatSetType(B, ((PetscObject)S)->type_name));
267: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
268:   PetscCall(MatBindToCPU(B, S->boundtocpu));
269: #endif
270:   switch (schurstatus) {
271:   case MAT_FACTOR_SCHUR_FACTORED:
272:     PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, mumps->id.size_schur, mumps->id.nrhs, (PetscScalar *)mumps->id.redrhs, &X));
273:     PetscCall(MatSetType(X, ((PetscObject)S)->type_name));
274: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
275:     PetscCall(MatBindToCPU(X, S->boundtocpu));
276: #endif
277:     if (!mumps->id.ICNTL(9)) { /* transpose solve */
278:       PetscCall(MatMatSolveTranspose(S, B, X));
279:     } else {
280:       PetscCall(MatMatSolve(S, B, X));
281:     }
282:     break;
283:   case MAT_FACTOR_SCHUR_INVERTED:
284:     sizesol = mumps->id.nrhs * mumps->id.size_schur;
285:     if (!mumps->schur_sol || sizesol > mumps->schur_sizesol) {
286:       PetscCall(PetscFree(mumps->schur_sol));
287:       PetscCall(PetscMalloc1(sizesol, &mumps->schur_sol));
288:       mumps->schur_sizesol = sizesol;
289:     }
290:     PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, mumps->id.size_schur, mumps->id.nrhs, mumps->schur_sol, &X));
291:     PetscCall(MatSetType(X, ((PetscObject)S)->type_name));
292: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
293:     PetscCall(MatBindToCPU(X, S->boundtocpu));
294: #endif
295:     PetscCall(MatProductCreateWithMat(S, B, NULL, X));
296:     if (!mumps->id.ICNTL(9)) { /* transpose solve */
297:       PetscCall(MatProductSetType(X, MATPRODUCT_AtB));
298:     } else {
299:       PetscCall(MatProductSetType(X, MATPRODUCT_AB));
300:     }
301:     PetscCall(MatProductSetFromOptions(X));
302:     PetscCall(MatProductSymbolic(X));
303:     PetscCall(MatProductNumeric(X));

305:     PetscCall(MatCopy(X, B, SAME_NONZERO_PATTERN));
306:     break;
307:   default:
308:     SETERRQ(PetscObjectComm((PetscObject)F), PETSC_ERR_SUP, "Unhandled MatFactorSchurStatus %d", F->schur_status);
309:   }
310:   PetscCall(MatFactorRestoreSchurComplement(F, &S, schurstatus));
311:   PetscCall(MatDestroy(&B));
312:   PetscCall(MatDestroy(&X));
313:   PetscFunctionReturn(PETSC_SUCCESS);
314: }

316: static PetscErrorCode MatMumpsHandleSchur_Private(Mat F, PetscBool expansion)
317: {
318:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

320:   PetscFunctionBegin;
321:   if (!mumps->id.ICNTL(19)) { /* do nothing when Schur complement has not been computed */
322:     PetscFunctionReturn(PETSC_SUCCESS);
323:   }
324:   if (!expansion) { /* prepare for the condensation step */
325:     PetscInt sizeredrhs = mumps->id.nrhs * mumps->id.size_schur;
326:     /* allocate MUMPS internal array to store reduced right-hand sides */
327:     if (!mumps->id.redrhs || sizeredrhs > mumps->sizeredrhs) {
328:       PetscCall(PetscFree(mumps->id.redrhs));
329:       mumps->id.lredrhs = mumps->id.size_schur;
330:       PetscCall(PetscMalloc1(mumps->id.nrhs * mumps->id.lredrhs, &mumps->id.redrhs));
331:       mumps->sizeredrhs = mumps->id.nrhs * mumps->id.lredrhs;
332:     }
333:   } else { /* prepare for the expansion step */
334:     /* solve Schur complement (this has to be done by the MUMPS user, so basically us) */
335:     PetscCall(MatMumpsSolveSchur_Private(F));
336:     mumps->id.ICNTL(26) = 2; /* expansion phase */
337:     PetscMUMPS_c(mumps);
338:     PetscCheck(mumps->id.INFOG(1) >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in solve: INFOG(1)=%d " MUMPS_MANUALS, mumps->id.INFOG(1));
339:     /* restore defaults */
340:     mumps->id.ICNTL(26) = -1;
341:     /* free MUMPS internal array for redrhs if we have solved for multiple rhs in order to save memory space */
342:     if (mumps->id.nrhs > 1) {
343:       PetscCall(PetscFree(mumps->id.redrhs));
344:       mumps->id.lredrhs = 0;
345:       mumps->sizeredrhs = 0;
346:     }
347:   }
348:   PetscFunctionReturn(PETSC_SUCCESS);
349: }

351: /*
352:   MatConvertToTriples_A_B - convert Petsc matrix to triples: row[nz], col[nz], val[nz]

354:   input:
355:     A       - matrix in aij,baij or sbaij format
356:     shift   - 0: C style output triple; 1: Fortran style output triple.
357:     reuse   - MAT_INITIAL_MATRIX: spaces are allocated and values are set for the triple
358:               MAT_REUSE_MATRIX:   only the values in v array are updated
359:   output:
360:     nnz     - dim of r, c, and v (number of local nonzero entries of A)
361:     r, c, v - row and col index, matrix values (matrix triples)

363:   The returned values r, c, and sometimes v are obtained in a single PetscMalloc(). Then in MatDestroy_MUMPS() it is
364:   freed with PetscFree(mumps->irn);  This is not ideal code, the fact that v is ONLY sometimes part of mumps->irn means
365:   that the PetscMalloc() cannot easily be replaced with a PetscMalloc3().

367:  */

369: static PetscErrorCode MatConvertToTriples_seqaij_seqaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
370: {
371:   const PetscScalar *av;
372:   const PetscInt    *ai, *aj, *ajj, M = A->rmap->n;
373:   PetscInt64         nz, rnz, i, j, k;
374:   PetscMUMPSInt     *row, *col;
375:   Mat_SeqAIJ        *aa = (Mat_SeqAIJ *)A->data;

377:   PetscFunctionBegin;
378:   PetscCall(MatSeqAIJGetArrayRead(A, &av));
379:   if (reuse == MAT_INITIAL_MATRIX) {
380:     nz = aa->nz;
381:     ai = aa->i;
382:     aj = aa->j;
383:     PetscCall(PetscMalloc2(nz, &row, nz, &col));
384:     for (i = k = 0; i < M; i++) {
385:       rnz = ai[i + 1] - ai[i];
386:       ajj = aj + ai[i];
387:       for (j = 0; j < rnz; j++) {
388:         PetscCall(PetscMUMPSIntCast(i + shift, &row[k]));
389:         PetscCall(PetscMUMPSIntCast(ajj[j] + shift, &col[k]));
390:         k++;
391:       }
392:     }
393:     mumps->val = (PetscScalar *)av;
394:     mumps->irn = row;
395:     mumps->jcn = col;
396:     mumps->nnz = nz;
397:   } else if (mumps->nest_vals) PetscCall(PetscArraycpy(mumps->val, av, aa->nz)); /* MatConvertToTriples_nest_xaij() allocates mumps->val outside of MatConvertToTriples_seqaij_seqaij(), so one needs to copy the memory */
398:   else mumps->val = (PetscScalar *)av;                                           /* in the default case, mumps->val is never allocated, one just needs to update the mumps->val pointer */
399:   PetscCall(MatSeqAIJRestoreArrayRead(A, &av));
400:   PetscFunctionReturn(PETSC_SUCCESS);
401: }

403: static PetscErrorCode MatConvertToTriples_seqsell_seqaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
404: {
405:   PetscInt64     nz, i, j, k, r;
406:   Mat_SeqSELL   *a = (Mat_SeqSELL *)A->data;
407:   PetscMUMPSInt *row, *col;

409:   PetscFunctionBegin;
410:   nz = a->sliidx[a->totalslices];
411:   if (reuse == MAT_INITIAL_MATRIX) {
412:     PetscCall(PetscMalloc2(nz, &row, nz, &col));
413:     for (i = k = 0; i < a->totalslices; i++) {
414:       for (j = a->sliidx[i], r = 0; j < a->sliidx[i + 1]; j++, r = ((r + 1) & 0x07)) PetscCall(PetscMUMPSIntCast(8 * i + r + shift, &row[k++]));
415:     }
416:     for (i = 0; i < nz; i++) PetscCall(PetscMUMPSIntCast(a->colidx[i] + shift, &col[i]));
417:     mumps->irn = row;
418:     mumps->jcn = col;
419:     mumps->nnz = nz;
420:     mumps->val = a->val;
421:   } else if (mumps->nest_vals) PetscCall(PetscArraycpy(mumps->val, a->val, nz)); /* MatConvertToTriples_nest_xaij() allocates mumps->val outside of MatConvertToTriples_seqsell_seqaij(), so one needs to copy the memory */
422:   else mumps->val = a->val;                                                      /* in the default case, mumps->val is never allocated, one just needs to update the mumps->val pointer */
423:   PetscFunctionReturn(PETSC_SUCCESS);
424: }

426: static PetscErrorCode MatConvertToTriples_seqbaij_seqaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
427: {
428:   Mat_SeqBAIJ    *aa = (Mat_SeqBAIJ *)A->data;
429:   const PetscInt *ai, *aj, *ajj, bs2 = aa->bs2;
430:   PetscInt64      M, nz = bs2 * aa->nz, idx = 0, rnz, i, j, k, m;
431:   PetscInt        bs;
432:   PetscMUMPSInt  *row, *col;

434:   PetscFunctionBegin;
435:   if (reuse == MAT_INITIAL_MATRIX) {
436:     PetscCall(MatGetBlockSize(A, &bs));
437:     M  = A->rmap->N / bs;
438:     ai = aa->i;
439:     aj = aa->j;
440:     PetscCall(PetscMalloc2(nz, &row, nz, &col));
441:     for (i = 0; i < M; i++) {
442:       ajj = aj + ai[i];
443:       rnz = ai[i + 1] - ai[i];
444:       for (k = 0; k < rnz; k++) {
445:         for (j = 0; j < bs; j++) {
446:           for (m = 0; m < bs; m++) {
447:             PetscCall(PetscMUMPSIntCast(i * bs + m + shift, &row[idx]));
448:             PetscCall(PetscMUMPSIntCast(bs * ajj[k] + j + shift, &col[idx]));
449:             idx++;
450:           }
451:         }
452:       }
453:     }
454:     mumps->irn = row;
455:     mumps->jcn = col;
456:     mumps->nnz = nz;
457:     mumps->val = aa->a;
458:   } else if (mumps->nest_vals) PetscCall(PetscArraycpy(mumps->val, aa->a, nz)); /* MatConvertToTriples_nest_xaij() allocates mumps->val outside of MatConvertToTriples_seqbaij_seqaij(), so one needs to copy the memory */
459:   else mumps->val = aa->a;                                                      /* in the default case, mumps->val is never allocated, one just needs to update the mumps->val pointer */
460:   PetscFunctionReturn(PETSC_SUCCESS);
461: }

463: static PetscErrorCode MatConvertToTriples_seqsbaij_seqsbaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
464: {
465:   const PetscInt *ai, *aj, *ajj;
466:   PetscInt        bs;
467:   PetscInt64      nz, rnz, i, j, k, m;
468:   PetscMUMPSInt  *row, *col;
469:   PetscScalar    *val;
470:   Mat_SeqSBAIJ   *aa  = (Mat_SeqSBAIJ *)A->data;
471:   const PetscInt  bs2 = aa->bs2, mbs = aa->mbs;
472: #if defined(PETSC_USE_COMPLEX)
473:   PetscBool isset, hermitian;
474: #endif

476:   PetscFunctionBegin;
477: #if defined(PETSC_USE_COMPLEX)
478:   PetscCall(MatIsHermitianKnown(A, &isset, &hermitian));
479:   PetscCheck(!isset || !hermitian, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "MUMPS does not support Hermitian symmetric matrices for Choleksy");
480: #endif
481:   ai = aa->i;
482:   aj = aa->j;
483:   PetscCall(MatGetBlockSize(A, &bs));
484:   if (reuse == MAT_INITIAL_MATRIX) {
485:     const PetscInt64 alloc_size = aa->nz * bs2;

487:     PetscCall(PetscMalloc2(alloc_size, &row, alloc_size, &col));
488:     if (bs > 1) {
489:       PetscCall(PetscMalloc1(alloc_size, &mumps->val_alloc));
490:       mumps->val = mumps->val_alloc;
491:     } else {
492:       mumps->val = aa->a;
493:     }
494:     mumps->irn = row;
495:     mumps->jcn = col;
496:   } else {
497:     row = mumps->irn;
498:     col = mumps->jcn;
499:   }
500:   val = mumps->val;

502:   nz = 0;
503:   if (bs > 1) {
504:     for (i = 0; i < mbs; i++) {
505:       rnz = ai[i + 1] - ai[i];
506:       ajj = aj + ai[i];
507:       for (j = 0; j < rnz; j++) {
508:         for (k = 0; k < bs; k++) {
509:           for (m = 0; m < bs; m++) {
510:             if (ajj[j] > i || k >= m) {
511:               if (reuse == MAT_INITIAL_MATRIX) {
512:                 PetscCall(PetscMUMPSIntCast(i * bs + m + shift, &row[nz]));
513:                 PetscCall(PetscMUMPSIntCast(ajj[j] * bs + k + shift, &col[nz]));
514:               }
515:               val[nz++] = aa->a[(ai[i] + j) * bs2 + m + k * bs];
516:             }
517:           }
518:         }
519:       }
520:     }
521:   } else if (reuse == MAT_INITIAL_MATRIX) {
522:     for (i = 0; i < mbs; i++) {
523:       rnz = ai[i + 1] - ai[i];
524:       ajj = aj + ai[i];
525:       for (j = 0; j < rnz; j++) {
526:         PetscCall(PetscMUMPSIntCast(i + shift, &row[nz]));
527:         PetscCall(PetscMUMPSIntCast(ajj[j] + shift, &col[nz]));
528:         nz++;
529:       }
530:     }
531:     PetscCheck(nz == aa->nz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Different numbers of nonzeros %" PetscInt64_FMT " != %" PetscInt_FMT, nz, aa->nz);
532:   } else if (mumps->nest_vals)
533:     PetscCall(PetscArraycpy(mumps->val, aa->a, aa->nz)); /* bs == 1 and MAT_REUSE_MATRIX, MatConvertToTriples_nest_xaij() allocates mumps->val outside of MatConvertToTriples_seqsbaij_seqsbaij(), so one needs to copy the memory */
534:   else mumps->val = aa->a;                               /* in the default case, mumps->val is never allocated, one just needs to update the mumps->val pointer */
535:   if (reuse == MAT_INITIAL_MATRIX) mumps->nnz = nz;
536:   PetscFunctionReturn(PETSC_SUCCESS);
537: }

539: static PetscErrorCode MatConvertToTriples_seqaij_seqsbaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
540: {
541:   const PetscInt    *ai, *aj, *ajj, *adiag, M = A->rmap->n;
542:   PetscInt64         nz, rnz, i, j;
543:   const PetscScalar *av, *v1;
544:   PetscScalar       *val;
545:   PetscMUMPSInt     *row, *col;
546:   Mat_SeqAIJ        *aa = (Mat_SeqAIJ *)A->data;
547:   PetscBool          missing;
548: #if defined(PETSC_USE_COMPLEX)
549:   PetscBool hermitian, isset;
550: #endif

552:   PetscFunctionBegin;
553: #if defined(PETSC_USE_COMPLEX)
554:   PetscCall(MatIsHermitianKnown(A, &isset, &hermitian));
555:   PetscCheck(!isset || !hermitian, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "MUMPS does not support Hermitian symmetric matrices for Choleksy");
556: #endif
557:   PetscCall(MatSeqAIJGetArrayRead(A, &av));
558:   ai    = aa->i;
559:   aj    = aa->j;
560:   adiag = aa->diag;
561:   PetscCall(MatMissingDiagonal_SeqAIJ(A, &missing, NULL));
562:   if (reuse == MAT_INITIAL_MATRIX) {
563:     /* count nz in the upper triangular part of A */
564:     nz = 0;
565:     if (missing) {
566:       for (i = 0; i < M; i++) {
567:         if (PetscUnlikely(adiag[i] >= ai[i + 1])) {
568:           for (j = ai[i]; j < ai[i + 1]; j++) {
569:             if (aj[j] < i) continue;
570:             nz++;
571:           }
572:         } else {
573:           nz += ai[i + 1] - adiag[i];
574:         }
575:       }
576:     } else {
577:       for (i = 0; i < M; i++) nz += ai[i + 1] - adiag[i];
578:     }
579:     PetscCall(PetscMalloc2(nz, &row, nz, &col));
580:     PetscCall(PetscMalloc1(nz, &val));
581:     mumps->nnz = nz;
582:     mumps->irn = row;
583:     mumps->jcn = col;
584:     mumps->val = mumps->val_alloc = val;

586:     nz = 0;
587:     if (missing) {
588:       for (i = 0; i < M; i++) {
589:         if (PetscUnlikely(adiag[i] >= ai[i + 1])) {
590:           for (j = ai[i]; j < ai[i + 1]; j++) {
591:             if (aj[j] < i) continue;
592:             PetscCall(PetscMUMPSIntCast(i + shift, &row[nz]));
593:             PetscCall(PetscMUMPSIntCast(aj[j] + shift, &col[nz]));
594:             val[nz] = av[j];
595:             nz++;
596:           }
597:         } else {
598:           rnz = ai[i + 1] - adiag[i];
599:           ajj = aj + adiag[i];
600:           v1  = av + adiag[i];
601:           for (j = 0; j < rnz; j++) {
602:             PetscCall(PetscMUMPSIntCast(i + shift, &row[nz]));
603:             PetscCall(PetscMUMPSIntCast(ajj[j] + shift, &col[nz]));
604:             val[nz++] = v1[j];
605:           }
606:         }
607:       }
608:     } else {
609:       for (i = 0; i < M; i++) {
610:         rnz = ai[i + 1] - adiag[i];
611:         ajj = aj + adiag[i];
612:         v1  = av + adiag[i];
613:         for (j = 0; j < rnz; j++) {
614:           PetscCall(PetscMUMPSIntCast(i + shift, &row[nz]));
615:           PetscCall(PetscMUMPSIntCast(ajj[j] + shift, &col[nz]));
616:           val[nz++] = v1[j];
617:         }
618:       }
619:     }
620:   } else {
621:     nz  = 0;
622:     val = mumps->val;
623:     if (missing) {
624:       for (i = 0; i < M; i++) {
625:         if (PetscUnlikely(adiag[i] >= ai[i + 1])) {
626:           for (j = ai[i]; j < ai[i + 1]; j++) {
627:             if (aj[j] < i) continue;
628:             val[nz++] = av[j];
629:           }
630:         } else {
631:           rnz = ai[i + 1] - adiag[i];
632:           v1  = av + adiag[i];
633:           for (j = 0; j < rnz; j++) val[nz++] = v1[j];
634:         }
635:       }
636:     } else {
637:       for (i = 0; i < M; i++) {
638:         rnz = ai[i + 1] - adiag[i];
639:         v1  = av + adiag[i];
640:         for (j = 0; j < rnz; j++) val[nz++] = v1[j];
641:       }
642:     }
643:   }
644:   PetscCall(MatSeqAIJRestoreArrayRead(A, &av));
645:   PetscFunctionReturn(PETSC_SUCCESS);
646: }

648: static PetscErrorCode MatConvertToTriples_mpisbaij_mpisbaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
649: {
650:   const PetscInt    *ai, *aj, *bi, *bj, *garray, *ajj, *bjj;
651:   PetscInt           bs;
652:   PetscInt64         rstart, nz, i, j, k, m, jj, irow, countA, countB;
653:   PetscMUMPSInt     *row, *col;
654:   const PetscScalar *av, *bv, *v1, *v2;
655:   PetscScalar       *val;
656:   Mat_MPISBAIJ      *mat = (Mat_MPISBAIJ *)A->data;
657:   Mat_SeqSBAIJ      *aa  = (Mat_SeqSBAIJ *)(mat->A)->data;
658:   Mat_SeqBAIJ       *bb  = (Mat_SeqBAIJ *)(mat->B)->data;
659:   const PetscInt     bs2 = aa->bs2, mbs = aa->mbs;
660: #if defined(PETSC_USE_COMPLEX)
661:   PetscBool hermitian, isset;
662: #endif

664:   PetscFunctionBegin;
665: #if defined(PETSC_USE_COMPLEX)
666:   PetscCall(MatIsHermitianKnown(A, &isset, &hermitian));
667:   PetscCheck(!isset || !hermitian, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "MUMPS does not support Hermitian symmetric matrices for Choleksy");
668: #endif
669:   PetscCall(MatGetBlockSize(A, &bs));
670:   rstart = A->rmap->rstart;
671:   ai     = aa->i;
672:   aj     = aa->j;
673:   bi     = bb->i;
674:   bj     = bb->j;
675:   av     = aa->a;
676:   bv     = bb->a;

678:   garray = mat->garray;

680:   if (reuse == MAT_INITIAL_MATRIX) {
681:     nz = (aa->nz + bb->nz) * bs2; /* just a conservative estimate */
682:     PetscCall(PetscMalloc2(nz, &row, nz, &col));
683:     PetscCall(PetscMalloc1(nz, &val));
684:     /* can not decide the exact mumps->nnz now because of the SBAIJ */
685:     mumps->irn = row;
686:     mumps->jcn = col;
687:     mumps->val = mumps->val_alloc = val;
688:   } else {
689:     val = mumps->val;
690:   }

692:   jj   = 0;
693:   irow = rstart;
694:   for (i = 0; i < mbs; i++) {
695:     ajj    = aj + ai[i]; /* ptr to the beginning of this row */
696:     countA = ai[i + 1] - ai[i];
697:     countB = bi[i + 1] - bi[i];
698:     bjj    = bj + bi[i];
699:     v1     = av + ai[i] * bs2;
700:     v2     = bv + bi[i] * bs2;

702:     if (bs > 1) {
703:       /* A-part */
704:       for (j = 0; j < countA; j++) {
705:         for (k = 0; k < bs; k++) {
706:           for (m = 0; m < bs; m++) {
707:             if (rstart + ajj[j] * bs > irow || k >= m) {
708:               if (reuse == MAT_INITIAL_MATRIX) {
709:                 PetscCall(PetscMUMPSIntCast(irow + m + shift, &row[jj]));
710:                 PetscCall(PetscMUMPSIntCast(rstart + ajj[j] * bs + k + shift, &col[jj]));
711:               }
712:               val[jj++] = v1[j * bs2 + m + k * bs];
713:             }
714:           }
715:         }
716:       }

718:       /* B-part */
719:       for (j = 0; j < countB; j++) {
720:         for (k = 0; k < bs; k++) {
721:           for (m = 0; m < bs; m++) {
722:             if (reuse == MAT_INITIAL_MATRIX) {
723:               PetscCall(PetscMUMPSIntCast(irow + m + shift, &row[jj]));
724:               PetscCall(PetscMUMPSIntCast(garray[bjj[j]] * bs + k + shift, &col[jj]));
725:             }
726:             val[jj++] = v2[j * bs2 + m + k * bs];
727:           }
728:         }
729:       }
730:     } else {
731:       /* A-part */
732:       for (j = 0; j < countA; j++) {
733:         if (reuse == MAT_INITIAL_MATRIX) {
734:           PetscCall(PetscMUMPSIntCast(irow + shift, &row[jj]));
735:           PetscCall(PetscMUMPSIntCast(rstart + ajj[j] + shift, &col[jj]));
736:         }
737:         val[jj++] = v1[j];
738:       }

740:       /* B-part */
741:       for (j = 0; j < countB; j++) {
742:         if (reuse == MAT_INITIAL_MATRIX) {
743:           PetscCall(PetscMUMPSIntCast(irow + shift, &row[jj]));
744:           PetscCall(PetscMUMPSIntCast(garray[bjj[j]] + shift, &col[jj]));
745:         }
746:         val[jj++] = v2[j];
747:       }
748:     }
749:     irow += bs;
750:   }
751:   if (reuse == MAT_INITIAL_MATRIX) mumps->nnz = jj;
752:   PetscFunctionReturn(PETSC_SUCCESS);
753: }

755: static PetscErrorCode MatConvertToTriples_mpiaij_mpiaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
756: {
757:   const PetscInt    *ai, *aj, *bi, *bj, *garray, m = A->rmap->n, *ajj, *bjj;
758:   PetscInt64         rstart, cstart, nz, i, j, jj, irow, countA, countB;
759:   PetscMUMPSInt     *row, *col;
760:   const PetscScalar *av, *bv, *v1, *v2;
761:   PetscScalar       *val;
762:   Mat                Ad, Ao;
763:   Mat_SeqAIJ        *aa;
764:   Mat_SeqAIJ        *bb;

766:   PetscFunctionBegin;
767:   PetscCall(MatMPIAIJGetSeqAIJ(A, &Ad, &Ao, &garray));
768:   PetscCall(MatSeqAIJGetArrayRead(Ad, &av));
769:   PetscCall(MatSeqAIJGetArrayRead(Ao, &bv));

771:   aa = (Mat_SeqAIJ *)(Ad)->data;
772:   bb = (Mat_SeqAIJ *)(Ao)->data;
773:   ai = aa->i;
774:   aj = aa->j;
775:   bi = bb->i;
776:   bj = bb->j;

778:   rstart = A->rmap->rstart;
779:   cstart = A->cmap->rstart;

781:   if (reuse == MAT_INITIAL_MATRIX) {
782:     nz = (PetscInt64)aa->nz + bb->nz; /* make sure the sum won't overflow PetscInt */
783:     PetscCall(PetscMalloc2(nz, &row, nz, &col));
784:     PetscCall(PetscMalloc1(nz, &val));
785:     mumps->nnz = nz;
786:     mumps->irn = row;
787:     mumps->jcn = col;
788:     mumps->val = mumps->val_alloc = val;
789:   } else {
790:     val = mumps->val;
791:   }

793:   jj   = 0;
794:   irow = rstart;
795:   for (i = 0; i < m; i++) {
796:     ajj    = aj + ai[i]; /* ptr to the beginning of this row */
797:     countA = ai[i + 1] - ai[i];
798:     countB = bi[i + 1] - bi[i];
799:     bjj    = bj + bi[i];
800:     v1     = av + ai[i];
801:     v2     = bv + bi[i];

803:     /* A-part */
804:     for (j = 0; j < countA; j++) {
805:       if (reuse == MAT_INITIAL_MATRIX) {
806:         PetscCall(PetscMUMPSIntCast(irow + shift, &row[jj]));
807:         PetscCall(PetscMUMPSIntCast(cstart + ajj[j] + shift, &col[jj]));
808:       }
809:       val[jj++] = v1[j];
810:     }

812:     /* B-part */
813:     for (j = 0; j < countB; j++) {
814:       if (reuse == MAT_INITIAL_MATRIX) {
815:         PetscCall(PetscMUMPSIntCast(irow + shift, &row[jj]));
816:         PetscCall(PetscMUMPSIntCast(garray[bjj[j]] + shift, &col[jj]));
817:       }
818:       val[jj++] = v2[j];
819:     }
820:     irow++;
821:   }
822:   PetscCall(MatSeqAIJRestoreArrayRead(Ad, &av));
823:   PetscCall(MatSeqAIJRestoreArrayRead(Ao, &bv));
824:   PetscFunctionReturn(PETSC_SUCCESS);
825: }

827: static PetscErrorCode MatConvertToTriples_mpibaij_mpiaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
828: {
829:   Mat_MPIBAIJ       *mat = (Mat_MPIBAIJ *)A->data;
830:   Mat_SeqBAIJ       *aa  = (Mat_SeqBAIJ *)(mat->A)->data;
831:   Mat_SeqBAIJ       *bb  = (Mat_SeqBAIJ *)(mat->B)->data;
832:   const PetscInt    *ai = aa->i, *bi = bb->i, *aj = aa->j, *bj = bb->j, *ajj, *bjj;
833:   const PetscInt    *garray = mat->garray, mbs = mat->mbs, rstart = A->rmap->rstart, cstart = A->cmap->rstart;
834:   const PetscInt     bs2 = mat->bs2;
835:   PetscInt           bs;
836:   PetscInt64         nz, i, j, k, n, jj, irow, countA, countB, idx;
837:   PetscMUMPSInt     *row, *col;
838:   const PetscScalar *av = aa->a, *bv = bb->a, *v1, *v2;
839:   PetscScalar       *val;

841:   PetscFunctionBegin;
842:   PetscCall(MatGetBlockSize(A, &bs));
843:   if (reuse == MAT_INITIAL_MATRIX) {
844:     nz = bs2 * (aa->nz + bb->nz);
845:     PetscCall(PetscMalloc2(nz, &row, nz, &col));
846:     PetscCall(PetscMalloc1(nz, &val));
847:     mumps->nnz = nz;
848:     mumps->irn = row;
849:     mumps->jcn = col;
850:     mumps->val = mumps->val_alloc = val;
851:   } else {
852:     val = mumps->val;
853:   }

855:   jj   = 0;
856:   irow = rstart;
857:   for (i = 0; i < mbs; i++) {
858:     countA = ai[i + 1] - ai[i];
859:     countB = bi[i + 1] - bi[i];
860:     ajj    = aj + ai[i];
861:     bjj    = bj + bi[i];
862:     v1     = av + bs2 * ai[i];
863:     v2     = bv + bs2 * bi[i];

865:     idx = 0;
866:     /* A-part */
867:     for (k = 0; k < countA; k++) {
868:       for (j = 0; j < bs; j++) {
869:         for (n = 0; n < bs; n++) {
870:           if (reuse == MAT_INITIAL_MATRIX) {
871:             PetscCall(PetscMUMPSIntCast(irow + n + shift, &row[jj]));
872:             PetscCall(PetscMUMPSIntCast(cstart + bs * ajj[k] + j + shift, &col[jj]));
873:           }
874:           val[jj++] = v1[idx++];
875:         }
876:       }
877:     }

879:     idx = 0;
880:     /* B-part */
881:     for (k = 0; k < countB; k++) {
882:       for (j = 0; j < bs; j++) {
883:         for (n = 0; n < bs; n++) {
884:           if (reuse == MAT_INITIAL_MATRIX) {
885:             PetscCall(PetscMUMPSIntCast(irow + n + shift, &row[jj]));
886:             PetscCall(PetscMUMPSIntCast(bs * garray[bjj[k]] + j + shift, &col[jj]));
887:           }
888:           val[jj++] = v2[idx++];
889:         }
890:       }
891:     }
892:     irow += bs;
893:   }
894:   PetscFunctionReturn(PETSC_SUCCESS);
895: }

897: static PetscErrorCode MatConvertToTriples_mpiaij_mpisbaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
898: {
899:   const PetscInt    *ai, *aj, *adiag, *bi, *bj, *garray, m = A->rmap->n, *ajj, *bjj;
900:   PetscInt64         rstart, nz, nza, nzb, i, j, jj, irow, countA, countB;
901:   PetscMUMPSInt     *row, *col;
902:   const PetscScalar *av, *bv, *v1, *v2;
903:   PetscScalar       *val;
904:   Mat                Ad, Ao;
905:   Mat_SeqAIJ        *aa;
906:   Mat_SeqAIJ        *bb;
907: #if defined(PETSC_USE_COMPLEX)
908:   PetscBool hermitian, isset;
909: #endif

911:   PetscFunctionBegin;
912: #if defined(PETSC_USE_COMPLEX)
913:   PetscCall(MatIsHermitianKnown(A, &isset, &hermitian));
914:   PetscCheck(!isset || !hermitian, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "MUMPS does not support Hermitian symmetric matrices for Choleksy");
915: #endif
916:   PetscCall(MatMPIAIJGetSeqAIJ(A, &Ad, &Ao, &garray));
917:   PetscCall(MatSeqAIJGetArrayRead(Ad, &av));
918:   PetscCall(MatSeqAIJGetArrayRead(Ao, &bv));

920:   aa    = (Mat_SeqAIJ *)(Ad)->data;
921:   bb    = (Mat_SeqAIJ *)(Ao)->data;
922:   ai    = aa->i;
923:   aj    = aa->j;
924:   adiag = aa->diag;
925:   bi    = bb->i;
926:   bj    = bb->j;

928:   rstart = A->rmap->rstart;

930:   if (reuse == MAT_INITIAL_MATRIX) {
931:     nza = 0; /* num of upper triangular entries in mat->A, including diagonals */
932:     nzb = 0; /* num of upper triangular entries in mat->B */
933:     for (i = 0; i < m; i++) {
934:       nza += (ai[i + 1] - adiag[i]);
935:       countB = bi[i + 1] - bi[i];
936:       bjj    = bj + bi[i];
937:       for (j = 0; j < countB; j++) {
938:         if (garray[bjj[j]] > rstart) nzb++;
939:       }
940:     }

942:     nz = nza + nzb; /* total nz of upper triangular part of mat */
943:     PetscCall(PetscMalloc2(nz, &row, nz, &col));
944:     PetscCall(PetscMalloc1(nz, &val));
945:     mumps->nnz = nz;
946:     mumps->irn = row;
947:     mumps->jcn = col;
948:     mumps->val = mumps->val_alloc = val;
949:   } else {
950:     val = mumps->val;
951:   }

953:   jj   = 0;
954:   irow = rstart;
955:   for (i = 0; i < m; i++) {
956:     ajj    = aj + adiag[i]; /* ptr to the beginning of the diagonal of this row */
957:     v1     = av + adiag[i];
958:     countA = ai[i + 1] - adiag[i];
959:     countB = bi[i + 1] - bi[i];
960:     bjj    = bj + bi[i];
961:     v2     = bv + bi[i];

963:     /* A-part */
964:     for (j = 0; j < countA; j++) {
965:       if (reuse == MAT_INITIAL_MATRIX) {
966:         PetscCall(PetscMUMPSIntCast(irow + shift, &row[jj]));
967:         PetscCall(PetscMUMPSIntCast(rstart + ajj[j] + shift, &col[jj]));
968:       }
969:       val[jj++] = v1[j];
970:     }

972:     /* B-part */
973:     for (j = 0; j < countB; j++) {
974:       if (garray[bjj[j]] > rstart) {
975:         if (reuse == MAT_INITIAL_MATRIX) {
976:           PetscCall(PetscMUMPSIntCast(irow + shift, &row[jj]));
977:           PetscCall(PetscMUMPSIntCast(garray[bjj[j]] + shift, &col[jj]));
978:         }
979:         val[jj++] = v2[j];
980:       }
981:     }
982:     irow++;
983:   }
984:   PetscCall(MatSeqAIJRestoreArrayRead(Ad, &av));
985:   PetscCall(MatSeqAIJRestoreArrayRead(Ao, &bv));
986:   PetscFunctionReturn(PETSC_SUCCESS);
987: }

989: static PetscErrorCode MatConvertToTriples_diagonal_xaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
990: {
991:   const PetscScalar *av;
992:   const PetscInt     M = A->rmap->n;
993:   PetscInt64         i;
994:   PetscMUMPSInt     *row, *col;
995:   Vec                v;

997:   PetscFunctionBegin;
998:   PetscCall(MatDiagonalGetDiagonal(A, &v));
999:   PetscCall(VecGetArrayRead(v, &av));
1000:   if (reuse == MAT_INITIAL_MATRIX) {
1001:     PetscCall(PetscMalloc2(M, &row, M, &col));
1002:     for (i = 0; i < M; i++) {
1003:       PetscCall(PetscMUMPSIntCast(i + A->rmap->rstart, &row[i]));
1004:       col[i] = row[i];
1005:     }
1006:     mumps->val = (PetscScalar *)av;
1007:     mumps->irn = row;
1008:     mumps->jcn = col;
1009:     mumps->nnz = M;
1010:   } else if (mumps->nest_vals) PetscCall(PetscArraycpy(mumps->val, av, M)); /* MatConvertToTriples_nest_xaij() allocates mumps->val outside of MatConvertToTriples_diagonal_xaij(), so one needs to copy the memory */
1011:   else mumps->val = (PetscScalar *)av;                                      /* in the default case, mumps->val is never allocated, one just needs to update the mumps->val pointer */
1012:   PetscCall(VecRestoreArrayRead(v, &av));
1013:   PetscFunctionReturn(PETSC_SUCCESS);
1014: }

1016: static PetscErrorCode MatConvertToTriples_nest_xaij(Mat A, PetscInt shift, MatReuse reuse, Mat_MUMPS *mumps)
1017: {
1018:   Mat     **mats;
1019:   PetscInt  nr, nc;
1020:   PetscBool chol = mumps->sym ? PETSC_TRUE : PETSC_FALSE;

1022:   PetscFunctionBegin;
1023:   PetscCall(MatNestGetSubMats(A, &nr, &nc, &mats));
1024:   if (reuse == MAT_INITIAL_MATRIX) {
1025:     PetscMUMPSInt *irns, *jcns;
1026:     PetscScalar   *vals;
1027:     PetscInt64     totnnz, cumnnz, maxnnz;
1028:     PetscInt      *pjcns_w;
1029:     IS            *rows, *cols;
1030:     PetscInt     **rows_idx, **cols_idx;

1032:     cumnnz = 0;
1033:     maxnnz = 0;
1034:     PetscCall(PetscMalloc2(nr * nc + 1, &mumps->nest_vals_start, nr * nc, &mumps->nest_convert_to_triples));
1035:     for (PetscInt r = 0; r < nr; r++) {
1036:       for (PetscInt c = 0; c < nc; c++) {
1037:         Mat sub = mats[r][c];

1039:         mumps->nest_convert_to_triples[r * nc + c] = NULL;
1040:         if (chol && c < r) continue; /* skip lower-triangular block for Cholesky */
1041:         if (sub) {
1042:           PetscErrorCode (*convert_to_triples)(Mat, PetscInt, MatReuse, Mat_MUMPS *) = NULL;
1043:           PetscBool isSeqAIJ, isMPIAIJ, isSeqBAIJ, isMPIBAIJ, isSeqSBAIJ, isMPISBAIJ, isTrans, isHTrans = PETSC_FALSE, isDiag;
1044:           MatInfo   info;

1046:           PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATTRANSPOSEVIRTUAL, &isTrans));
1047:           if (isTrans) PetscCall(MatTransposeGetMat(sub, &sub));
1048:           else {
1049:             PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATHERMITIANTRANSPOSEVIRTUAL, &isHTrans));
1050:             if (isHTrans) PetscCall(MatHermitianTransposeGetMat(sub, &sub));
1051:           }
1052:           PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATSEQAIJ, &isSeqAIJ));
1053:           PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATMPIAIJ, &isMPIAIJ));
1054:           PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATSEQBAIJ, &isSeqBAIJ));
1055:           PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATMPIBAIJ, &isMPIBAIJ));
1056:           PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATSEQSBAIJ, &isSeqSBAIJ));
1057:           PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATMPISBAIJ, &isMPISBAIJ));
1058:           PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATDIAGONAL, &isDiag));

1060:           if (chol) {
1061:             if (r == c) {
1062:               if (isSeqAIJ) convert_to_triples = MatConvertToTriples_seqaij_seqsbaij;
1063:               else if (isMPIAIJ) convert_to_triples = MatConvertToTriples_mpiaij_mpisbaij;
1064:               else if (isSeqSBAIJ) convert_to_triples = MatConvertToTriples_seqsbaij_seqsbaij;
1065:               else if (isMPISBAIJ) convert_to_triples = MatConvertToTriples_mpisbaij_mpisbaij;
1066:               else if (isDiag) convert_to_triples = MatConvertToTriples_diagonal_xaij;
1067:             } else {
1068:               if (isSeqAIJ) convert_to_triples = MatConvertToTriples_seqaij_seqaij;
1069:               else if (isMPIAIJ) convert_to_triples = MatConvertToTriples_mpiaij_mpiaij;
1070:               else if (isSeqBAIJ) convert_to_triples = MatConvertToTriples_seqbaij_seqaij;
1071:               else if (isMPIBAIJ) convert_to_triples = MatConvertToTriples_mpibaij_mpiaij;
1072:               else if (isDiag) convert_to_triples = MatConvertToTriples_diagonal_xaij;
1073:             }
1074:           } else {
1075:             if (isSeqAIJ) convert_to_triples = MatConvertToTriples_seqaij_seqaij;
1076:             else if (isMPIAIJ) convert_to_triples = MatConvertToTriples_mpiaij_mpiaij;
1077:             else if (isSeqBAIJ) convert_to_triples = MatConvertToTriples_seqbaij_seqaij;
1078:             else if (isMPIBAIJ) convert_to_triples = MatConvertToTriples_mpibaij_mpiaij;
1079:             else if (isDiag) convert_to_triples = MatConvertToTriples_diagonal_xaij;
1080:           }
1081:           PetscCheck(convert_to_triples, PetscObjectComm((PetscObject)sub), PETSC_ERR_SUP, "Not for block of type %s", ((PetscObject)sub)->type_name);
1082:           mumps->nest_convert_to_triples[r * nc + c] = convert_to_triples;
1083:           PetscCall(MatGetInfo(sub, MAT_LOCAL, &info));
1084:           cumnnz += (PetscInt64)info.nz_used; /* can be overestimated for Cholesky */
1085:           maxnnz = PetscMax(maxnnz, info.nz_used);
1086:         }
1087:       }
1088:     }

1090:     /* Allocate total COO */
1091:     totnnz = cumnnz;
1092:     PetscCall(PetscMalloc2(totnnz, &irns, totnnz, &jcns));
1093:     PetscCall(PetscMalloc1(totnnz, &vals));

1095:     /* Handle rows and column maps
1096:        We directly map rows and use an SF for the columns */
1097:     PetscCall(PetscMalloc4(nr, &rows, nc, &cols, nr, &rows_idx, nc, &cols_idx));
1098:     PetscCall(MatNestGetISs(A, rows, cols));
1099:     for (PetscInt r = 0; r < nr; r++) PetscCall(ISGetIndices(rows[r], (const PetscInt **)&rows_idx[r]));
1100:     for (PetscInt c = 0; c < nc; c++) PetscCall(ISGetIndices(cols[c], (const PetscInt **)&cols_idx[c]));
1101:     if (PetscDefined(USE_64BIT_INDICES)) PetscCall(PetscMalloc1(maxnnz, &pjcns_w));
1102:     else (void)maxnnz;

1104:     cumnnz = 0;
1105:     for (PetscInt r = 0; r < nr; r++) {
1106:       for (PetscInt c = 0; c < nc; c++) {
1107:         Mat             sub  = mats[r][c];
1108:         const PetscInt *ridx = rows_idx[r];
1109:         const PetscInt *cidx = cols_idx[c];
1110:         PetscInt        rst;
1111:         PetscSF         csf;
1112:         PetscBool       isTrans, isHTrans = PETSC_FALSE, swap;
1113:         PetscLayout     cmap;

1115:         mumps->nest_vals_start[r * nc + c] = cumnnz;
1116:         if (!mumps->nest_convert_to_triples[r * nc + c]) continue;

1118:         /* Extract inner blocks if needed */
1119:         PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATTRANSPOSEVIRTUAL, &isTrans));
1120:         if (isTrans) PetscCall(MatTransposeGetMat(sub, &sub));
1121:         else {
1122:           PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATHERMITIANTRANSPOSEVIRTUAL, &isHTrans));
1123:           if (isHTrans) PetscCall(MatHermitianTransposeGetMat(sub, &sub));
1124:         }
1125:         swap = (PetscBool)(isTrans || isHTrans);

1127:         /* Get column layout to map off-process columns */
1128:         PetscCall(MatGetLayouts(sub, NULL, &cmap));

1130:         /* Get row start to map on-process rows */
1131:         PetscCall(MatGetOwnershipRange(sub, &rst, NULL));

1133:         /* Directly use the mumps datastructure and use C ordering for now */
1134:         PetscCall((*mumps->nest_convert_to_triples[r * nc + c])(sub, 0, MAT_INITIAL_MATRIX, mumps));

1136:         /* Swap the role of rows and columns indices for transposed blocks
1137:            since we need values with global final ordering */
1138:         if (swap) {
1139:           cidx = rows_idx[r];
1140:           ridx = cols_idx[c];
1141:         }

1143:         /* Communicate column indices
1144:            This could have been done with a single SF but it would have complicated the code a lot.
1145:            But since we do it only once, we pay the price of setting up an SF for each block */
1146:         if (PetscDefined(USE_64BIT_INDICES)) {
1147:           for (PetscInt k = 0; k < mumps->nnz; k++) pjcns_w[k] = mumps->jcn[k];
1148:         } else pjcns_w = (PetscInt *)(mumps->jcn); /* This cast is needed only to silence warnings for 64bit integers builds */
1149:         PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &csf));
1150:         PetscCall(PetscSFSetGraphLayout(csf, cmap, mumps->nnz, NULL, PETSC_OWN_POINTER, pjcns_w));
1151:         PetscCall(PetscSFBcastBegin(csf, MPIU_INT, cidx, pjcns_w, MPI_REPLACE));
1152:         PetscCall(PetscSFBcastEnd(csf, MPIU_INT, cidx, pjcns_w, MPI_REPLACE));
1153:         PetscCall(PetscSFDestroy(&csf));

1155:         /* Import indices: use direct map for rows and mapped indices for columns */
1156:         if (swap) {
1157:           for (PetscInt k = 0; k < mumps->nnz; k++) {
1158:             PetscCall(PetscMUMPSIntCast(ridx[mumps->irn[k] - rst] + shift, &jcns[cumnnz + k]));
1159:             PetscCall(PetscMUMPSIntCast(pjcns_w[k] + shift, &irns[cumnnz + k]));
1160:           }
1161:         } else {
1162:           for (PetscInt k = 0; k < mumps->nnz; k++) {
1163:             PetscCall(PetscMUMPSIntCast(ridx[mumps->irn[k] - rst] + shift, &irns[cumnnz + k]));
1164:             PetscCall(PetscMUMPSIntCast(pjcns_w[k] + shift, &jcns[cumnnz + k]));
1165:           }
1166:         }

1168:         /* Import values to full COO */
1169:         PetscCall(PetscArraycpy(vals + cumnnz, mumps->val, mumps->nnz));
1170:         if (isHTrans) { /* conjugate the entries */
1171:           PetscScalar *v = vals + cumnnz;
1172:           for (PetscInt k = 0; k < mumps->nnz; k++) v[k] = PetscConj(v[k]);
1173:         }

1175:         /* Shift new starting point and sanity check */
1176:         cumnnz += mumps->nnz;
1177:         PetscCheck(cumnnz <= totnnz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unexpected number of nonzeros %" PetscInt64_FMT " != %" PetscInt64_FMT, cumnnz, totnnz);

1179:         /* Free scratch memory */
1180:         PetscCall(PetscFree2(mumps->irn, mumps->jcn));
1181:         PetscCall(PetscFree(mumps->val_alloc));
1182:         mumps->val = NULL;
1183:         mumps->nnz = 0;
1184:       }
1185:     }
1186:     if (PetscDefined(USE_64BIT_INDICES)) PetscCall(PetscFree(pjcns_w));
1187:     for (PetscInt r = 0; r < nr; r++) PetscCall(ISRestoreIndices(rows[r], (const PetscInt **)&rows_idx[r]));
1188:     for (PetscInt c = 0; c < nc; c++) PetscCall(ISRestoreIndices(cols[c], (const PetscInt **)&cols_idx[c]));
1189:     PetscCall(PetscFree4(rows, cols, rows_idx, cols_idx));
1190:     if (!chol) PetscCheck(cumnnz == totnnz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Different number of nonzeros %" PetscInt64_FMT " != %" PetscInt64_FMT, cumnnz, totnnz);
1191:     mumps->nest_vals_start[nr * nc] = cumnnz;

1193:     /* Set pointers for final MUMPS data structure */
1194:     mumps->nest_vals = vals;
1195:     mumps->val_alloc = NULL; /* do not use val_alloc since it may be reallocated with the OMP callpath */
1196:     mumps->val       = vals;
1197:     mumps->irn       = irns;
1198:     mumps->jcn       = jcns;
1199:     mumps->nnz       = cumnnz;
1200:   } else {
1201:     PetscScalar *oval = mumps->nest_vals;
1202:     for (PetscInt r = 0; r < nr; r++) {
1203:       for (PetscInt c = 0; c < nc; c++) {
1204:         PetscBool isTrans, isHTrans = PETSC_FALSE;
1205:         Mat       sub  = mats[r][c];
1206:         PetscInt  midx = r * nc + c;

1208:         if (!mumps->nest_convert_to_triples[midx]) continue;
1209:         PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATTRANSPOSEVIRTUAL, &isTrans));
1210:         if (isTrans) PetscCall(MatTransposeGetMat(sub, &sub));
1211:         else {
1212:           PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATHERMITIANTRANSPOSEVIRTUAL, &isHTrans));
1213:           if (isHTrans) PetscCall(MatHermitianTransposeGetMat(sub, &sub));
1214:         }
1215:         mumps->val = oval + mumps->nest_vals_start[midx];
1216:         PetscCall((*mumps->nest_convert_to_triples[midx])(sub, shift, MAT_REUSE_MATRIX, mumps));
1217:         if (isHTrans) {
1218:           PetscInt nnz = mumps->nest_vals_start[midx + 1] - mumps->nest_vals_start[midx];
1219:           for (PetscInt k = 0; k < nnz; k++) mumps->val[k] = PetscConj(mumps->val[k]);
1220:         }
1221:       }
1222:     }
1223:     mumps->val = oval;
1224:   }
1225:   PetscFunctionReturn(PETSC_SUCCESS);
1226: }

1228: static PetscErrorCode MatDestroy_MUMPS(Mat A)
1229: {
1230:   Mat_MUMPS *mumps = (Mat_MUMPS *)A->data;

1232:   PetscFunctionBegin;
1233:   PetscCall(PetscFree2(mumps->id.sol_loc, mumps->id.isol_loc));
1234:   PetscCall(VecScatterDestroy(&mumps->scat_rhs));
1235:   PetscCall(VecScatterDestroy(&mumps->scat_sol));
1236:   PetscCall(VecDestroy(&mumps->b_seq));
1237:   PetscCall(VecDestroy(&mumps->x_seq));
1238:   PetscCall(PetscFree(mumps->id.perm_in));
1239:   PetscCall(PetscFree2(mumps->irn, mumps->jcn));
1240:   PetscCall(PetscFree(mumps->val_alloc));
1241:   PetscCall(PetscFree(mumps->info));
1242:   PetscCall(PetscFree(mumps->ICNTL_pre));
1243:   PetscCall(PetscFree(mumps->CNTL_pre));
1244:   PetscCall(MatMumpsResetSchur_Private(mumps));
1245:   if (mumps->id.job != JOB_NULL) { /* cannot call PetscMUMPS_c() if JOB_INIT has never been called for this instance */
1246:     mumps->id.job = JOB_END;
1247:     PetscMUMPS_c(mumps);
1248:     PetscCheck(mumps->id.INFOG(1) >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in termination: INFOG(1)=%d " MUMPS_MANUALS, mumps->id.INFOG(1));
1249:     if (mumps->mumps_comm != MPI_COMM_NULL) {
1250:       if (PetscDefined(HAVE_OPENMP_SUPPORT) && mumps->use_petsc_omp_support) PetscCallMPI(MPI_Comm_free(&mumps->mumps_comm));
1251:       else PetscCall(PetscCommRestoreComm(PetscObjectComm((PetscObject)A), &mumps->mumps_comm));
1252:     }
1253:   }
1254: #if defined(PETSC_HAVE_OPENMP_SUPPORT)
1255:   if (mumps->use_petsc_omp_support) {
1256:     PetscCall(PetscOmpCtrlDestroy(&mumps->omp_ctrl));
1257:     PetscCall(PetscFree2(mumps->rhs_loc, mumps->rhs_recvbuf));
1258:     PetscCall(PetscFree3(mumps->rhs_nrow, mumps->rhs_recvcounts, mumps->rhs_disps));
1259:   }
1260: #endif
1261:   PetscCall(PetscFree(mumps->ia_alloc));
1262:   PetscCall(PetscFree(mumps->ja_alloc));
1263:   PetscCall(PetscFree(mumps->recvcount));
1264:   PetscCall(PetscFree(mumps->reqs));
1265:   PetscCall(PetscFree(mumps->irhs_loc));
1266:   PetscCall(PetscFree2(mumps->nest_vals_start, mumps->nest_convert_to_triples));
1267:   PetscCall(PetscFree(mumps->nest_vals));
1268:   PetscCall(PetscFree(A->data));

1270:   /* clear composed functions */
1271:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL));
1272:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorSetSchurIS_C", NULL));
1273:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorCreateSchurComplement_C", NULL));
1274:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsSetIcntl_C", NULL));
1275:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetIcntl_C", NULL));
1276:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsSetCntl_C", NULL));
1277:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetCntl_C", NULL));
1278:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetInfo_C", NULL));
1279:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetInfog_C", NULL));
1280:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetRinfo_C", NULL));
1281:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetRinfog_C", NULL));
1282:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetNullPivots_C", NULL));
1283:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetInverse_C", NULL));
1284:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMumpsGetInverseTranspose_C", NULL));
1285:   PetscFunctionReturn(PETSC_SUCCESS);
1286: }

1288: /* Set up the distributed RHS info for MUMPS. <nrhs> is the number of RHS. <array> points to start of RHS on the local processor. */
1289: static PetscErrorCode MatMumpsSetUpDistRHSInfo(Mat A, PetscInt nrhs, const PetscScalar *array)
1290: {
1291:   Mat_MUMPS        *mumps   = (Mat_MUMPS *)A->data;
1292:   const PetscMPIInt ompsize = mumps->omp_comm_size;
1293:   PetscInt          i, m, M, rstart;

1295:   PetscFunctionBegin;
1296:   PetscCall(MatGetSize(A, &M, NULL));
1297:   PetscCall(MatGetLocalSize(A, &m, NULL));
1298:   PetscCheck(M <= PETSC_MUMPS_INT_MAX, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "PetscInt too long for PetscMUMPSInt");
1299:   if (ompsize == 1) {
1300:     if (!mumps->irhs_loc) {
1301:       mumps->nloc_rhs = m;
1302:       PetscCall(PetscMalloc1(m, &mumps->irhs_loc));
1303:       PetscCall(MatGetOwnershipRange(A, &rstart, NULL));
1304:       for (i = 0; i < m; i++) mumps->irhs_loc[i] = rstart + i + 1; /* use 1-based indices */
1305:     }
1306:     mumps->id.rhs_loc = (MumpsScalar *)array;
1307:   } else {
1308: #if defined(PETSC_HAVE_OPENMP_SUPPORT)
1309:     const PetscInt *ranges;
1310:     PetscMPIInt     j, k, sendcount, *petsc_ranks, *omp_ranks;
1311:     MPI_Group       petsc_group, omp_group;
1312:     PetscScalar    *recvbuf = NULL;

1314:     if (mumps->is_omp_master) {
1315:       /* Lazily initialize the omp stuff for distributed rhs */
1316:       if (!mumps->irhs_loc) {
1317:         PetscCall(PetscMalloc2(ompsize, &omp_ranks, ompsize, &petsc_ranks));
1318:         PetscCall(PetscMalloc3(ompsize, &mumps->rhs_nrow, ompsize, &mumps->rhs_recvcounts, ompsize, &mumps->rhs_disps));
1319:         PetscCallMPI(MPI_Comm_group(mumps->petsc_comm, &petsc_group));
1320:         PetscCallMPI(MPI_Comm_group(mumps->omp_comm, &omp_group));
1321:         for (j = 0; j < ompsize; j++) omp_ranks[j] = j;
1322:         PetscCallMPI(MPI_Group_translate_ranks(omp_group, ompsize, omp_ranks, petsc_group, petsc_ranks));

1324:         /* Populate mumps->irhs_loc[], rhs_nrow[] */
1325:         mumps->nloc_rhs = 0;
1326:         PetscCall(MatGetOwnershipRanges(A, &ranges));
1327:         for (j = 0; j < ompsize; j++) {
1328:           mumps->rhs_nrow[j] = ranges[petsc_ranks[j] + 1] - ranges[petsc_ranks[j]];
1329:           mumps->nloc_rhs += mumps->rhs_nrow[j];
1330:         }
1331:         PetscCall(PetscMalloc1(mumps->nloc_rhs, &mumps->irhs_loc));
1332:         for (j = k = 0; j < ompsize; j++) {
1333:           for (i = ranges[petsc_ranks[j]]; i < ranges[petsc_ranks[j] + 1]; i++, k++) mumps->irhs_loc[k] = i + 1; /* uses 1-based indices */
1334:         }

1336:         PetscCall(PetscFree2(omp_ranks, petsc_ranks));
1337:         PetscCallMPI(MPI_Group_free(&petsc_group));
1338:         PetscCallMPI(MPI_Group_free(&omp_group));
1339:       }

1341:       /* Realloc buffers when current nrhs is bigger than what we have met */
1342:       if (nrhs > mumps->max_nrhs) {
1343:         PetscCall(PetscFree2(mumps->rhs_loc, mumps->rhs_recvbuf));
1344:         PetscCall(PetscMalloc2(mumps->nloc_rhs * nrhs, &mumps->rhs_loc, mumps->nloc_rhs * nrhs, &mumps->rhs_recvbuf));
1345:         mumps->max_nrhs = nrhs;
1346:       }

1348:       /* Setup recvcounts[], disps[], recvbuf on omp rank 0 for the upcoming MPI_Gatherv */
1349:       for (j = 0; j < ompsize; j++) PetscCall(PetscMPIIntCast(mumps->rhs_nrow[j] * nrhs, &mumps->rhs_recvcounts[j]));
1350:       mumps->rhs_disps[0] = 0;
1351:       for (j = 1; j < ompsize; j++) {
1352:         mumps->rhs_disps[j] = mumps->rhs_disps[j - 1] + mumps->rhs_recvcounts[j - 1];
1353:         PetscCheck(mumps->rhs_disps[j] >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "PetscMPIInt overflow!");
1354:       }
1355:       recvbuf = (nrhs == 1) ? mumps->rhs_loc : mumps->rhs_recvbuf; /* Directly use rhs_loc[] as recvbuf. Single rhs is common in Ax=b */
1356:     }

1358:     PetscCall(PetscMPIIntCast(m * nrhs, &sendcount));
1359:     PetscCallMPI(MPI_Gatherv(array, sendcount, MPIU_SCALAR, recvbuf, mumps->rhs_recvcounts, mumps->rhs_disps, MPIU_SCALAR, 0, mumps->omp_comm));

1361:     if (mumps->is_omp_master) {
1362:       if (nrhs > 1) { /* Copy & re-arrange data from rhs_recvbuf[] to mumps->rhs_loc[] only when there are multiple rhs */
1363:         PetscScalar *dst, *dstbase = mumps->rhs_loc;
1364:         for (j = 0; j < ompsize; j++) {
1365:           const PetscScalar *src = mumps->rhs_recvbuf + mumps->rhs_disps[j];
1366:           dst                    = dstbase;
1367:           for (i = 0; i < nrhs; i++) {
1368:             PetscCall(PetscArraycpy(dst, src, mumps->rhs_nrow[j]));
1369:             src += mumps->rhs_nrow[j];
1370:             dst += mumps->nloc_rhs;
1371:           }
1372:           dstbase += mumps->rhs_nrow[j];
1373:         }
1374:       }
1375:       mumps->id.rhs_loc = (MumpsScalar *)mumps->rhs_loc;
1376:     }
1377: #endif /* PETSC_HAVE_OPENMP_SUPPORT */
1378:   }
1379:   mumps->id.nrhs     = nrhs;
1380:   mumps->id.nloc_rhs = mumps->nloc_rhs;
1381:   mumps->id.lrhs_loc = mumps->nloc_rhs;
1382:   mumps->id.irhs_loc = mumps->irhs_loc;
1383:   PetscFunctionReturn(PETSC_SUCCESS);
1384: }

1386: static PetscErrorCode MatSolve_MUMPS(Mat A, Vec b, Vec x)
1387: {
1388:   Mat_MUMPS         *mumps  = (Mat_MUMPS *)A->data;
1389:   const PetscScalar *rarray = NULL;
1390:   PetscScalar       *array;
1391:   IS                 is_iden, is_petsc;
1392:   PetscInt           i;
1393:   PetscBool          second_solve = PETSC_FALSE;
1394:   static PetscBool   cite1 = PETSC_FALSE, cite2 = PETSC_FALSE;

1396:   PetscFunctionBegin;
1397:   PetscCall(PetscCitationsRegister("@article{MUMPS01,\n  author = {P.~R. Amestoy and I.~S. Duff and J.-Y. L'Excellent and J. Koster},\n  title = {A fully asynchronous multifrontal solver using distributed dynamic scheduling},\n  journal = {SIAM "
1398:                                    "Journal on Matrix Analysis and Applications},\n  volume = {23},\n  number = {1},\n  pages = {15--41},\n  year = {2001}\n}\n",
1399:                                    &cite1));
1400:   PetscCall(PetscCitationsRegister("@article{MUMPS02,\n  author = {P.~R. Amestoy and A. Guermouche and J.-Y. L'Excellent and S. Pralet},\n  title = {Hybrid scheduling for the parallel solution of linear systems},\n  journal = {Parallel "
1401:                                    "Computing},\n  volume = {32},\n  number = {2},\n  pages = {136--156},\n  year = {2006}\n}\n",
1402:                                    &cite2));

1404:   if (A->factorerrortype) {
1405:     PetscCall(PetscInfo(A, "MatSolve is called with singular matrix factor, INFOG(1)=%d, INFO(2)=%d\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
1406:     PetscCall(VecSetInf(x));
1407:     PetscFunctionReturn(PETSC_SUCCESS);
1408:   }

1410:   mumps->id.nrhs = 1;
1411:   if (mumps->petsc_size > 1) {
1412:     if (mumps->ICNTL20 == 10) {
1413:       mumps->id.ICNTL(20) = 10; /* dense distributed RHS */
1414:       PetscCall(VecGetArrayRead(b, &rarray));
1415:       PetscCall(MatMumpsSetUpDistRHSInfo(A, 1, rarray));
1416:     } else {
1417:       mumps->id.ICNTL(20) = 0; /* dense centralized RHS; Scatter b into a sequential rhs vector*/
1418:       PetscCall(VecScatterBegin(mumps->scat_rhs, b, mumps->b_seq, INSERT_VALUES, SCATTER_FORWARD));
1419:       PetscCall(VecScatterEnd(mumps->scat_rhs, b, mumps->b_seq, INSERT_VALUES, SCATTER_FORWARD));
1420:       if (!mumps->myid) {
1421:         PetscCall(VecGetArray(mumps->b_seq, &array));
1422:         mumps->id.rhs = (MumpsScalar *)array;
1423:       }
1424:     }
1425:   } else {                   /* petsc_size == 1 */
1426:     mumps->id.ICNTL(20) = 0; /* dense centralized RHS */
1427:     PetscCall(VecCopy(b, x));
1428:     PetscCall(VecGetArray(x, &array));
1429:     mumps->id.rhs = (MumpsScalar *)array;
1430:   }

1432:   /*
1433:      handle condensation step of Schur complement (if any)
1434:      We set by default ICNTL(26) == -1 when Schur indices have been provided by the user.
1435:      According to MUMPS (5.0.0) manual, any value should be harmful during the factorization phase
1436:      Unless the user provides a valid value for ICNTL(26), MatSolve and MatMatSolve routines solve the full system.
1437:      This requires an extra call to PetscMUMPS_c and the computation of the factors for S
1438:   */
1439:   if (mumps->id.size_schur > 0) {
1440:     PetscCheck(mumps->petsc_size <= 1, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Parallel Schur complements not yet supported from PETSc");
1441:     if (mumps->id.ICNTL(26) < 0 || mumps->id.ICNTL(26) > 2) {
1442:       second_solve = PETSC_TRUE;
1443:       PetscCall(MatMumpsHandleSchur_Private(A, PETSC_FALSE));
1444:       mumps->id.ICNTL(26) = 1; /* condensation phase */
1445:     } else if (mumps->id.ICNTL(26) == 1) PetscCall(MatMumpsHandleSchur_Private(A, PETSC_FALSE));
1446:   }
1447:   /* solve phase */
1448:   mumps->id.job = JOB_SOLVE;
1449:   PetscMUMPS_c(mumps);
1450:   PetscCheck(mumps->id.INFOG(1) >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in solve: INFOG(1)=%d " MUMPS_MANUALS, mumps->id.INFOG(1));

1452:   /* handle expansion step of Schur complement (if any) */
1453:   if (second_solve) PetscCall(MatMumpsHandleSchur_Private(A, PETSC_TRUE));
1454:   else if (mumps->id.ICNTL(26) == 1) {
1455:     PetscCall(MatMumpsSolveSchur_Private(A));
1456:     for (i = 0; i < mumps->id.size_schur; ++i) {
1457: #if !defined(PETSC_USE_COMPLEX)
1458:       PetscScalar val = mumps->id.redrhs[i];
1459: #else
1460:       PetscScalar val = mumps->id.redrhs[i].r + PETSC_i * mumps->id.redrhs[i].i;
1461: #endif
1462:       array[mumps->id.listvar_schur[i] - 1] = val;
1463:     }
1464:   }

1466:   if (mumps->petsc_size > 1) { /* convert mumps distributed solution to petsc mpi x */
1467:     if (mumps->scat_sol && mumps->ICNTL9_pre != mumps->id.ICNTL(9)) {
1468:       /* when id.ICNTL(9) changes, the contents of lsol_loc may change (not its size, lsol_loc), recreates scat_sol */
1469:       PetscCall(VecScatterDestroy(&mumps->scat_sol));
1470:     }
1471:     if (!mumps->scat_sol) { /* create scatter scat_sol */
1472:       PetscInt *isol2_loc = NULL;
1473:       PetscCall(ISCreateStride(PETSC_COMM_SELF, mumps->id.lsol_loc, 0, 1, &is_iden)); /* from */
1474:       PetscCall(PetscMalloc1(mumps->id.lsol_loc, &isol2_loc));
1475:       for (i = 0; i < mumps->id.lsol_loc; i++) isol2_loc[i] = mumps->id.isol_loc[i] - 1;                        /* change Fortran style to C style */
1476:       PetscCall(ISCreateGeneral(PETSC_COMM_SELF, mumps->id.lsol_loc, isol2_loc, PETSC_OWN_POINTER, &is_petsc)); /* to */
1477:       PetscCall(VecScatterCreate(mumps->x_seq, is_iden, x, is_petsc, &mumps->scat_sol));
1478:       PetscCall(ISDestroy(&is_iden));
1479:       PetscCall(ISDestroy(&is_petsc));
1480:       mumps->ICNTL9_pre = mumps->id.ICNTL(9); /* save current value of id.ICNTL(9) */
1481:     }

1483:     PetscCall(VecScatterBegin(mumps->scat_sol, mumps->x_seq, x, INSERT_VALUES, SCATTER_FORWARD));
1484:     PetscCall(VecScatterEnd(mumps->scat_sol, mumps->x_seq, x, INSERT_VALUES, SCATTER_FORWARD));
1485:   }

1487:   if (mumps->petsc_size > 1) {
1488:     if (mumps->ICNTL20 == 10) {
1489:       PetscCall(VecRestoreArrayRead(b, &rarray));
1490:     } else if (!mumps->myid) {
1491:       PetscCall(VecRestoreArray(mumps->b_seq, &array));
1492:     }
1493:   } else PetscCall(VecRestoreArray(x, &array));

1495:   PetscCall(PetscLogFlops(2.0 * PetscMax(0, (mumps->id.INFO(28) >= 0 ? mumps->id.INFO(28) : -1000000 * mumps->id.INFO(28)) - A->cmap->n)));
1496:   PetscFunctionReturn(PETSC_SUCCESS);
1497: }

1499: static PetscErrorCode MatSolveTranspose_MUMPS(Mat A, Vec b, Vec x)
1500: {
1501:   Mat_MUMPS          *mumps = (Mat_MUMPS *)A->data;
1502:   const PetscMUMPSInt value = mumps->id.ICNTL(9);

1504:   PetscFunctionBegin;
1505:   mumps->id.ICNTL(9) = 0;
1506:   PetscCall(MatSolve_MUMPS(A, b, x));
1507:   mumps->id.ICNTL(9) = value;
1508:   PetscFunctionReturn(PETSC_SUCCESS);
1509: }

1511: static PetscErrorCode MatMatSolve_MUMPS(Mat A, Mat B, Mat X)
1512: {
1513:   Mat                Bt = NULL;
1514:   PetscBool          denseX, denseB, flg, flgT;
1515:   Mat_MUMPS         *mumps = (Mat_MUMPS *)A->data;
1516:   PetscInt           i, nrhs, M;
1517:   PetscScalar       *array;
1518:   const PetscScalar *rbray;
1519:   PetscInt           lsol_loc, nlsol_loc, *idxx, iidx = 0;
1520:   PetscMUMPSInt     *isol_loc, *isol_loc_save;
1521:   PetscScalar       *bray, *sol_loc, *sol_loc_save;
1522:   IS                 is_to, is_from;
1523:   PetscInt           k, proc, j, m, myrstart;
1524:   const PetscInt    *rstart;
1525:   Vec                v_mpi, msol_loc;
1526:   VecScatter         scat_sol;
1527:   Vec                b_seq;
1528:   VecScatter         scat_rhs;
1529:   PetscScalar       *aa;
1530:   PetscInt           spnr, *ia, *ja;
1531:   Mat_MPIAIJ        *b = NULL;

1533:   PetscFunctionBegin;
1534:   PetscCall(PetscObjectTypeCompareAny((PetscObject)X, &denseX, MATSEQDENSE, MATMPIDENSE, NULL));
1535:   PetscCheck(denseX, PetscObjectComm((PetscObject)X), PETSC_ERR_ARG_WRONG, "Matrix X must be MATDENSE matrix");

1537:   PetscCall(PetscObjectTypeCompareAny((PetscObject)B, &denseB, MATSEQDENSE, MATMPIDENSE, NULL));
1538:   if (denseB) {
1539:     PetscCheck(B->rmap->n == X->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Matrix B and X must have same row distribution");
1540:     mumps->id.ICNTL(20) = 0; /* dense RHS */
1541:   } else {                   /* sparse B */
1542:     PetscCheck(X != B, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_IDN, "X and B must be different matrices");
1543:     PetscCall(PetscObjectTypeCompare((PetscObject)B, MATTRANSPOSEVIRTUAL, &flgT));
1544:     if (flgT) { /* input B is transpose of actual RHS matrix,
1545:                  because mumps requires sparse compressed COLUMN storage! See MatMatTransposeSolve_MUMPS() */
1546:       PetscCall(MatTransposeGetMat(B, &Bt));
1547:     } else SETERRQ(PetscObjectComm((PetscObject)B), PETSC_ERR_ARG_WRONG, "Matrix B must be MATTRANSPOSEVIRTUAL matrix");
1548:     mumps->id.ICNTL(20) = 1; /* sparse RHS */
1549:   }

1551:   PetscCall(MatGetSize(B, &M, &nrhs));
1552:   mumps->id.nrhs = nrhs;
1553:   mumps->id.lrhs = M;
1554:   mumps->id.rhs  = NULL;

1556:   if (mumps->petsc_size == 1) {
1557:     PetscScalar *aa;
1558:     PetscInt     spnr, *ia, *ja;
1559:     PetscBool    second_solve = PETSC_FALSE;

1561:     PetscCall(MatDenseGetArray(X, &array));
1562:     mumps->id.rhs = (MumpsScalar *)array;

1564:     if (denseB) {
1565:       /* copy B to X */
1566:       PetscCall(MatDenseGetArrayRead(B, &rbray));
1567:       PetscCall(PetscArraycpy(array, rbray, M * nrhs));
1568:       PetscCall(MatDenseRestoreArrayRead(B, &rbray));
1569:     } else { /* sparse B */
1570:       PetscCall(MatSeqAIJGetArray(Bt, &aa));
1571:       PetscCall(MatGetRowIJ(Bt, 1, PETSC_FALSE, PETSC_FALSE, &spnr, (const PetscInt **)&ia, (const PetscInt **)&ja, &flg));
1572:       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot get IJ structure");
1573:       PetscCall(PetscMUMPSIntCSRCast(mumps, spnr, ia, ja, &mumps->id.irhs_ptr, &mumps->id.irhs_sparse, &mumps->id.nz_rhs));
1574:       mumps->id.rhs_sparse = (MumpsScalar *)aa;
1575:     }
1576:     /* handle condensation step of Schur complement (if any) */
1577:     if (mumps->id.size_schur > 0) {
1578:       if (mumps->id.ICNTL(26) < 0 || mumps->id.ICNTL(26) > 2) {
1579:         second_solve = PETSC_TRUE;
1580:         PetscCall(MatMumpsHandleSchur_Private(A, PETSC_FALSE));
1581:         mumps->id.ICNTL(26) = 1; /* condensation phase */
1582:       } else if (mumps->id.ICNTL(26) == 1) PetscCall(MatMumpsHandleSchur_Private(A, PETSC_FALSE));
1583:     }
1584:     /* solve phase */
1585:     mumps->id.job = JOB_SOLVE;
1586:     PetscMUMPS_c(mumps);
1587:     PetscCheck(mumps->id.INFOG(1) >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in solve: INFOG(1)=%d " MUMPS_MANUALS, mumps->id.INFOG(1));

1589:     /* handle expansion step of Schur complement (if any) */
1590:     if (second_solve) PetscCall(MatMumpsHandleSchur_Private(A, PETSC_TRUE));
1591:     else if (mumps->id.ICNTL(26) == 1) {
1592:       PetscCall(MatMumpsSolveSchur_Private(A));
1593:       for (j = 0; j < nrhs; ++j)
1594:         for (i = 0; i < mumps->id.size_schur; ++i) {
1595: #if !defined(PETSC_USE_COMPLEX)
1596:           PetscScalar val = mumps->id.redrhs[i + j * mumps->id.lredrhs];
1597: #else
1598:           PetscScalar val = mumps->id.redrhs[i + j * mumps->id.lredrhs].r + PETSC_i * mumps->id.redrhs[i + j * mumps->id.lredrhs].i;
1599: #endif
1600:           array[mumps->id.listvar_schur[i] - 1 + j * M] = val;
1601:         }
1602:     }
1603:     if (!denseB) { /* sparse B */
1604:       PetscCall(MatSeqAIJRestoreArray(Bt, &aa));
1605:       PetscCall(MatRestoreRowIJ(Bt, 1, PETSC_FALSE, PETSC_FALSE, &spnr, (const PetscInt **)&ia, (const PetscInt **)&ja, &flg));
1606:       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot restore IJ structure");
1607:     }
1608:     PetscCall(MatDenseRestoreArray(X, &array));
1609:     PetscFunctionReturn(PETSC_SUCCESS);
1610:   }

1612:   /* parallel case: MUMPS requires rhs B to be centralized on the host! */
1613:   PetscCheck(!mumps->id.ICNTL(19), PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Parallel Schur complements not yet supported from PETSc");

1615:   /* create msol_loc to hold mumps local solution */
1616:   isol_loc_save = mumps->id.isol_loc; /* save it for MatSolve() */
1617:   sol_loc_save  = (PetscScalar *)mumps->id.sol_loc;

1619:   lsol_loc  = mumps->id.lsol_loc;
1620:   nlsol_loc = nrhs * lsol_loc; /* length of sol_loc */
1621:   PetscCall(PetscMalloc2(nlsol_loc, &sol_loc, lsol_loc, &isol_loc));
1622:   mumps->id.sol_loc  = (MumpsScalar *)sol_loc;
1623:   mumps->id.isol_loc = isol_loc;

1625:   PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, nlsol_loc, (PetscScalar *)sol_loc, &msol_loc));

1627:   if (denseB) {
1628:     if (mumps->ICNTL20 == 10) {
1629:       mumps->id.ICNTL(20) = 10; /* dense distributed RHS */
1630:       PetscCall(MatDenseGetArrayRead(B, &rbray));
1631:       PetscCall(MatMumpsSetUpDistRHSInfo(A, nrhs, rbray));
1632:       PetscCall(MatDenseRestoreArrayRead(B, &rbray));
1633:       PetscCall(MatGetLocalSize(B, &m, NULL));
1634:       PetscCall(VecCreateMPIWithArray(PetscObjectComm((PetscObject)B), 1, nrhs * m, nrhs * M, NULL, &v_mpi));
1635:     } else {
1636:       mumps->id.ICNTL(20) = 0; /* dense centralized RHS */
1637:       /* TODO: Because of non-contiguous indices, the created vecscatter scat_rhs is not done in MPI_Gather, resulting in
1638:         very inefficient communication. An optimization is to use VecScatterCreateToZero to gather B to rank 0. Then on rank
1639:         0, re-arrange B into desired order, which is a local operation.
1640:       */

1642:       /* scatter v_mpi to b_seq because MUMPS before 5.3.0 only supports centralized rhs */
1643:       /* wrap dense rhs matrix B into a vector v_mpi */
1644:       PetscCall(MatGetLocalSize(B, &m, NULL));
1645:       PetscCall(MatDenseGetArray(B, &bray));
1646:       PetscCall(VecCreateMPIWithArray(PetscObjectComm((PetscObject)B), 1, nrhs * m, nrhs * M, (const PetscScalar *)bray, &v_mpi));
1647:       PetscCall(MatDenseRestoreArray(B, &bray));

1649:       /* scatter v_mpi to b_seq in proc[0]. MUMPS requires rhs to be centralized on the host! */
1650:       if (!mumps->myid) {
1651:         PetscInt *idx;
1652:         /* idx: maps from k-th index of v_mpi to (i,j)-th global entry of B */
1653:         PetscCall(PetscMalloc1(nrhs * M, &idx));
1654:         PetscCall(MatGetOwnershipRanges(B, &rstart));
1655:         k = 0;
1656:         for (proc = 0; proc < mumps->petsc_size; proc++) {
1657:           for (j = 0; j < nrhs; j++) {
1658:             for (i = rstart[proc]; i < rstart[proc + 1]; i++) idx[k++] = j * M + i;
1659:           }
1660:         }

1662:         PetscCall(VecCreateSeq(PETSC_COMM_SELF, nrhs * M, &b_seq));
1663:         PetscCall(ISCreateGeneral(PETSC_COMM_SELF, nrhs * M, idx, PETSC_OWN_POINTER, &is_to));
1664:         PetscCall(ISCreateStride(PETSC_COMM_SELF, nrhs * M, 0, 1, &is_from));
1665:       } else {
1666:         PetscCall(VecCreateSeq(PETSC_COMM_SELF, 0, &b_seq));
1667:         PetscCall(ISCreateStride(PETSC_COMM_SELF, 0, 0, 1, &is_to));
1668:         PetscCall(ISCreateStride(PETSC_COMM_SELF, 0, 0, 1, &is_from));
1669:       }
1670:       PetscCall(VecScatterCreate(v_mpi, is_from, b_seq, is_to, &scat_rhs));
1671:       PetscCall(VecScatterBegin(scat_rhs, v_mpi, b_seq, INSERT_VALUES, SCATTER_FORWARD));
1672:       PetscCall(ISDestroy(&is_to));
1673:       PetscCall(ISDestroy(&is_from));
1674:       PetscCall(VecScatterEnd(scat_rhs, v_mpi, b_seq, INSERT_VALUES, SCATTER_FORWARD));

1676:       if (!mumps->myid) { /* define rhs on the host */
1677:         PetscCall(VecGetArray(b_seq, &bray));
1678:         mumps->id.rhs = (MumpsScalar *)bray;
1679:         PetscCall(VecRestoreArray(b_seq, &bray));
1680:       }
1681:     }
1682:   } else { /* sparse B */
1683:     b = (Mat_MPIAIJ *)Bt->data;

1685:     /* wrap dense X into a vector v_mpi */
1686:     PetscCall(MatGetLocalSize(X, &m, NULL));
1687:     PetscCall(MatDenseGetArray(X, &bray));
1688:     PetscCall(VecCreateMPIWithArray(PetscObjectComm((PetscObject)X), 1, nrhs * m, nrhs * M, (const PetscScalar *)bray, &v_mpi));
1689:     PetscCall(MatDenseRestoreArray(X, &bray));

1691:     if (!mumps->myid) {
1692:       PetscCall(MatSeqAIJGetArray(b->A, &aa));
1693:       PetscCall(MatGetRowIJ(b->A, 1, PETSC_FALSE, PETSC_FALSE, &spnr, (const PetscInt **)&ia, (const PetscInt **)&ja, &flg));
1694:       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot get IJ structure");
1695:       PetscCall(PetscMUMPSIntCSRCast(mumps, spnr, ia, ja, &mumps->id.irhs_ptr, &mumps->id.irhs_sparse, &mumps->id.nz_rhs));
1696:       mumps->id.rhs_sparse = (MumpsScalar *)aa;
1697:     } else {
1698:       mumps->id.irhs_ptr    = NULL;
1699:       mumps->id.irhs_sparse = NULL;
1700:       mumps->id.nz_rhs      = 0;
1701:       mumps->id.rhs_sparse  = NULL;
1702:     }
1703:   }

1705:   /* solve phase */
1706:   mumps->id.job = JOB_SOLVE;
1707:   PetscMUMPS_c(mumps);
1708:   PetscCheck(mumps->id.INFOG(1) >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in solve: INFOG(1)=%d " MUMPS_MANUALS, mumps->id.INFOG(1));

1710:   /* scatter mumps distributed solution to petsc vector v_mpi, which shares local arrays with solution matrix X */
1711:   PetscCall(MatDenseGetArray(X, &array));
1712:   PetscCall(VecPlaceArray(v_mpi, array));

1714:   /* create scatter scat_sol */
1715:   PetscCall(MatGetOwnershipRanges(X, &rstart));
1716:   /* iidx: index for scatter mumps solution to petsc X */

1718:   PetscCall(ISCreateStride(PETSC_COMM_SELF, nlsol_loc, 0, 1, &is_from));
1719:   PetscCall(PetscMalloc1(nlsol_loc, &idxx));
1720:   for (i = 0; i < lsol_loc; i++) {
1721:     isol_loc[i] -= 1; /* change Fortran style to C style. isol_loc[i+j*lsol_loc] contains x[isol_loc[i]] in j-th vector */

1723:     for (proc = 0; proc < mumps->petsc_size; proc++) {
1724:       if (isol_loc[i] >= rstart[proc] && isol_loc[i] < rstart[proc + 1]) {
1725:         myrstart = rstart[proc];
1726:         k        = isol_loc[i] - myrstart;          /* local index on 1st column of petsc vector X */
1727:         iidx     = k + myrstart * nrhs;             /* maps mumps isol_loc[i] to petsc index in X */
1728:         m        = rstart[proc + 1] - rstart[proc]; /* rows of X for this proc */
1729:         break;
1730:       }
1731:     }

1733:     for (j = 0; j < nrhs; j++) idxx[i + j * lsol_loc] = iidx + j * m;
1734:   }
1735:   PetscCall(ISCreateGeneral(PETSC_COMM_SELF, nlsol_loc, idxx, PETSC_COPY_VALUES, &is_to));
1736:   PetscCall(VecScatterCreate(msol_loc, is_from, v_mpi, is_to, &scat_sol));
1737:   PetscCall(VecScatterBegin(scat_sol, msol_loc, v_mpi, INSERT_VALUES, SCATTER_FORWARD));
1738:   PetscCall(ISDestroy(&is_from));
1739:   PetscCall(ISDestroy(&is_to));
1740:   PetscCall(VecScatterEnd(scat_sol, msol_loc, v_mpi, INSERT_VALUES, SCATTER_FORWARD));
1741:   PetscCall(MatDenseRestoreArray(X, &array));

1743:   /* free spaces */
1744:   mumps->id.sol_loc  = (MumpsScalar *)sol_loc_save;
1745:   mumps->id.isol_loc = isol_loc_save;

1747:   PetscCall(PetscFree2(sol_loc, isol_loc));
1748:   PetscCall(PetscFree(idxx));
1749:   PetscCall(VecDestroy(&msol_loc));
1750:   PetscCall(VecDestroy(&v_mpi));
1751:   if (!denseB) {
1752:     if (!mumps->myid) {
1753:       b = (Mat_MPIAIJ *)Bt->data;
1754:       PetscCall(MatSeqAIJRestoreArray(b->A, &aa));
1755:       PetscCall(MatRestoreRowIJ(b->A, 1, PETSC_FALSE, PETSC_FALSE, &spnr, (const PetscInt **)&ia, (const PetscInt **)&ja, &flg));
1756:       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot restore IJ structure");
1757:     }
1758:   } else {
1759:     if (mumps->ICNTL20 == 0) {
1760:       PetscCall(VecDestroy(&b_seq));
1761:       PetscCall(VecScatterDestroy(&scat_rhs));
1762:     }
1763:   }
1764:   PetscCall(VecScatterDestroy(&scat_sol));
1765:   PetscCall(PetscLogFlops(nrhs * PetscMax(0, (2.0 * (mumps->id.INFO(28) >= 0 ? mumps->id.INFO(28) : -1000000 * mumps->id.INFO(28)) - A->cmap->n))));
1766:   PetscFunctionReturn(PETSC_SUCCESS);
1767: }

1769: static PetscErrorCode MatMatSolveTranspose_MUMPS(Mat A, Mat B, Mat X)
1770: {
1771:   Mat_MUMPS          *mumps = (Mat_MUMPS *)A->data;
1772:   const PetscMUMPSInt value = mumps->id.ICNTL(9);

1774:   PetscFunctionBegin;
1775:   mumps->id.ICNTL(9) = 0;
1776:   PetscCall(MatMatSolve_MUMPS(A, B, X));
1777:   mumps->id.ICNTL(9) = value;
1778:   PetscFunctionReturn(PETSC_SUCCESS);
1779: }

1781: static PetscErrorCode MatMatTransposeSolve_MUMPS(Mat A, Mat Bt, Mat X)
1782: {
1783:   PetscBool flg;
1784:   Mat       B;

1786:   PetscFunctionBegin;
1787:   PetscCall(PetscObjectTypeCompareAny((PetscObject)Bt, &flg, MATSEQAIJ, MATMPIAIJ, NULL));
1788:   PetscCheck(flg, PetscObjectComm((PetscObject)Bt), PETSC_ERR_ARG_WRONG, "Matrix Bt must be MATAIJ matrix");

1790:   /* Create B=Bt^T that uses Bt's data structure */
1791:   PetscCall(MatCreateTranspose(Bt, &B));

1793:   PetscCall(MatMatSolve_MUMPS(A, B, X));
1794:   PetscCall(MatDestroy(&B));
1795:   PetscFunctionReturn(PETSC_SUCCESS);
1796: }

1798: #if !defined(PETSC_USE_COMPLEX)
1799: /*
1800:   input:
1801:    F:        numeric factor
1802:   output:
1803:    nneg:     total number of negative pivots
1804:    nzero:    total number of zero pivots
1805:    npos:     (global dimension of F) - nneg - nzero
1806: */
1807: static PetscErrorCode MatGetInertia_SBAIJMUMPS(Mat F, PetscInt *nneg, PetscInt *nzero, PetscInt *npos)
1808: {
1809:   Mat_MUMPS  *mumps = (Mat_MUMPS *)F->data;
1810:   PetscMPIInt size;

1812:   PetscFunctionBegin;
1813:   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)F), &size));
1814:   /* MUMPS 4.3.1 calls ScaLAPACK when ICNTL(13)=0 (default), which does not offer the possibility to compute the inertia of a dense matrix. Set ICNTL(13)=1 to skip ScaLAPACK */
1815:   PetscCheck(size <= 1 || mumps->id.ICNTL(13) == 1, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "ICNTL(13)=%d. -mat_mumps_icntl_13 must be set as 1 for correct global matrix inertia", mumps->id.INFOG(13));

1817:   if (nneg) *nneg = mumps->id.INFOG(12);
1818:   if (nzero || npos) {
1819:     PetscCheck(mumps->id.ICNTL(24) == 1, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "-mat_mumps_icntl_24 must be set as 1 for null pivot row detection");
1820:     if (nzero) *nzero = mumps->id.INFOG(28);
1821:     if (npos) *npos = F->rmap->N - (mumps->id.INFOG(12) + mumps->id.INFOG(28));
1822:   }
1823:   PetscFunctionReturn(PETSC_SUCCESS);
1824: }
1825: #endif

1827: static PetscErrorCode MatMumpsGatherNonzerosOnMaster(MatReuse reuse, Mat_MUMPS *mumps)
1828: {
1829:   PetscInt       i, nreqs;
1830:   PetscMUMPSInt *irn, *jcn;
1831:   PetscMPIInt    count;
1832:   PetscInt64     totnnz, remain;
1833:   const PetscInt osize = mumps->omp_comm_size;
1834:   PetscScalar   *val;

1836:   PetscFunctionBegin;
1837:   if (osize > 1) {
1838:     if (reuse == MAT_INITIAL_MATRIX) {
1839:       /* master first gathers counts of nonzeros to receive */
1840:       if (mumps->is_omp_master) PetscCall(PetscMalloc1(osize, &mumps->recvcount));
1841:       PetscCallMPI(MPI_Gather(&mumps->nnz, 1, MPIU_INT64, mumps->recvcount, 1, MPIU_INT64, 0 /*master*/, mumps->omp_comm));

1843:       /* Then each computes number of send/recvs */
1844:       if (mumps->is_omp_master) {
1845:         /* Start from 1 since self communication is not done in MPI */
1846:         nreqs = 0;
1847:         for (i = 1; i < osize; i++) nreqs += (mumps->recvcount[i] + PETSC_MPI_INT_MAX - 1) / PETSC_MPI_INT_MAX;
1848:       } else {
1849:         nreqs = (mumps->nnz + PETSC_MPI_INT_MAX - 1) / PETSC_MPI_INT_MAX;
1850:       }
1851:       PetscCall(PetscMalloc1(nreqs * 3, &mumps->reqs)); /* Triple the requests since we send irn, jcn and val separately */

1853:       /* The following code is doing a very simple thing: omp_master rank gathers irn/jcn/val from others.
1854:          MPI_Gatherv would be enough if it supports big counts > 2^31-1. Since it does not, and mumps->nnz
1855:          might be a prime number > 2^31-1, we have to slice the message. Note omp_comm_size
1856:          is very small, the current approach should have no extra overhead compared to MPI_Gatherv.
1857:        */
1858:       nreqs = 0; /* counter for actual send/recvs */
1859:       if (mumps->is_omp_master) {
1860:         for (i = 0, totnnz = 0; i < osize; i++) totnnz += mumps->recvcount[i]; /* totnnz = sum of nnz over omp_comm */
1861:         PetscCall(PetscMalloc2(totnnz, &irn, totnnz, &jcn));
1862:         PetscCall(PetscMalloc1(totnnz, &val));

1864:         /* Self communication */
1865:         PetscCall(PetscArraycpy(irn, mumps->irn, mumps->nnz));
1866:         PetscCall(PetscArraycpy(jcn, mumps->jcn, mumps->nnz));
1867:         PetscCall(PetscArraycpy(val, mumps->val, mumps->nnz));

1869:         /* Replace mumps->irn/jcn etc on master with the newly allocated bigger arrays */
1870:         PetscCall(PetscFree2(mumps->irn, mumps->jcn));
1871:         PetscCall(PetscFree(mumps->val_alloc));
1872:         mumps->nnz = totnnz;
1873:         mumps->irn = irn;
1874:         mumps->jcn = jcn;
1875:         mumps->val = mumps->val_alloc = val;

1877:         irn += mumps->recvcount[0]; /* recvcount[0] is old mumps->nnz on omp rank 0 */
1878:         jcn += mumps->recvcount[0];
1879:         val += mumps->recvcount[0];

1881:         /* Remote communication */
1882:         for (i = 1; i < osize; i++) {
1883:           count  = PetscMin(mumps->recvcount[i], PETSC_MPI_INT_MAX);
1884:           remain = mumps->recvcount[i] - count;
1885:           while (count > 0) {
1886:             PetscCallMPI(MPI_Irecv(irn, count, MPIU_MUMPSINT, i, mumps->tag, mumps->omp_comm, &mumps->reqs[nreqs++]));
1887:             PetscCallMPI(MPI_Irecv(jcn, count, MPIU_MUMPSINT, i, mumps->tag, mumps->omp_comm, &mumps->reqs[nreqs++]));
1888:             PetscCallMPI(MPI_Irecv(val, count, MPIU_SCALAR, i, mumps->tag, mumps->omp_comm, &mumps->reqs[nreqs++]));
1889:             irn += count;
1890:             jcn += count;
1891:             val += count;
1892:             count = PetscMin(remain, PETSC_MPI_INT_MAX);
1893:             remain -= count;
1894:           }
1895:         }
1896:       } else {
1897:         irn    = mumps->irn;
1898:         jcn    = mumps->jcn;
1899:         val    = mumps->val;
1900:         count  = PetscMin(mumps->nnz, PETSC_MPI_INT_MAX);
1901:         remain = mumps->nnz - count;
1902:         while (count > 0) {
1903:           PetscCallMPI(MPI_Isend(irn, count, MPIU_MUMPSINT, 0, mumps->tag, mumps->omp_comm, &mumps->reqs[nreqs++]));
1904:           PetscCallMPI(MPI_Isend(jcn, count, MPIU_MUMPSINT, 0, mumps->tag, mumps->omp_comm, &mumps->reqs[nreqs++]));
1905:           PetscCallMPI(MPI_Isend(val, count, MPIU_SCALAR, 0, mumps->tag, mumps->omp_comm, &mumps->reqs[nreqs++]));
1906:           irn += count;
1907:           jcn += count;
1908:           val += count;
1909:           count = PetscMin(remain, PETSC_MPI_INT_MAX);
1910:           remain -= count;
1911:         }
1912:       }
1913:     } else {
1914:       nreqs = 0;
1915:       if (mumps->is_omp_master) {
1916:         val = mumps->val + mumps->recvcount[0];
1917:         for (i = 1; i < osize; i++) { /* Remote communication only since self data is already in place */
1918:           count  = PetscMin(mumps->recvcount[i], PETSC_MPI_INT_MAX);
1919:           remain = mumps->recvcount[i] - count;
1920:           while (count > 0) {
1921:             PetscCallMPI(MPI_Irecv(val, count, MPIU_SCALAR, i, mumps->tag, mumps->omp_comm, &mumps->reqs[nreqs++]));
1922:             val += count;
1923:             count = PetscMin(remain, PETSC_MPI_INT_MAX);
1924:             remain -= count;
1925:           }
1926:         }
1927:       } else {
1928:         val    = mumps->val;
1929:         count  = PetscMin(mumps->nnz, PETSC_MPI_INT_MAX);
1930:         remain = mumps->nnz - count;
1931:         while (count > 0) {
1932:           PetscCallMPI(MPI_Isend(val, count, MPIU_SCALAR, 0, mumps->tag, mumps->omp_comm, &mumps->reqs[nreqs++]));
1933:           val += count;
1934:           count = PetscMin(remain, PETSC_MPI_INT_MAX);
1935:           remain -= count;
1936:         }
1937:       }
1938:     }
1939:     PetscCallMPI(MPI_Waitall(nreqs, mumps->reqs, MPI_STATUSES_IGNORE));
1940:     mumps->tag++; /* It is totally fine for above send/recvs to share one mpi tag */
1941:   }
1942:   PetscFunctionReturn(PETSC_SUCCESS);
1943: }

1945: static PetscErrorCode MatFactorNumeric_MUMPS(Mat F, Mat A, const MatFactorInfo *info)
1946: {
1947:   Mat_MUMPS *mumps = (Mat_MUMPS *)(F)->data;
1948:   PetscBool  isMPIAIJ;

1950:   PetscFunctionBegin;
1951:   if (mumps->id.INFOG(1) < 0 && !(mumps->id.INFOG(1) == -16 && mumps->id.INFOG(1) == 0)) {
1952:     if (mumps->id.INFOG(1) == -6) PetscCall(PetscInfo(A, "MatFactorNumeric is called with singular matrix structure, INFOG(1)=%d, INFO(2)=%d\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
1953:     PetscCall(PetscInfo(A, "MatFactorNumeric is called after analysis phase fails, INFOG(1)=%d, INFO(2)=%d\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
1954:     PetscFunctionReturn(PETSC_SUCCESS);
1955:   }

1957:   PetscCall((*mumps->ConvertToTriples)(A, 1, MAT_REUSE_MATRIX, mumps));
1958:   PetscCall(MatMumpsGatherNonzerosOnMaster(MAT_REUSE_MATRIX, mumps));

1960:   /* numerical factorization phase */
1961:   mumps->id.job = JOB_FACTNUMERIC;
1962:   if (!mumps->id.ICNTL(18)) { /* A is centralized */
1963:     if (!mumps->myid) mumps->id.a = (MumpsScalar *)mumps->val;
1964:   } else {
1965:     mumps->id.a_loc = (MumpsScalar *)mumps->val;
1966:   }
1967:   PetscMUMPS_c(mumps);
1968:   if (mumps->id.INFOG(1) < 0) {
1969:     PetscCheck(!A->erroriffailure, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in numerical factorization: INFOG(1)=%d, INFO(2)=%d " MUMPS_MANUALS, mumps->id.INFOG(1), mumps->id.INFO(2));
1970:     if (mumps->id.INFOG(1) == -10) {
1971:       PetscCall(PetscInfo(F, "MUMPS error in numerical factorization: matrix is numerically singular, INFOG(1)=%d, INFO(2)=%d\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
1972:       F->factorerrortype = MAT_FACTOR_NUMERIC_ZEROPIVOT;
1973:     } else if (mumps->id.INFOG(1) == -13) {
1974:       PetscCall(PetscInfo(F, "MUMPS error in numerical factorization: INFOG(1)=%d, cannot allocate required memory %d megabytes\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
1975:       F->factorerrortype = MAT_FACTOR_OUTMEMORY;
1976:     } else if (mumps->id.INFOG(1) == -8 || mumps->id.INFOG(1) == -9 || (-16 < mumps->id.INFOG(1) && mumps->id.INFOG(1) < -10)) {
1977:       PetscCall(PetscInfo(F, "MUMPS error in numerical factorizatione: INFOG(1)=%d, INFO(2)=%d, problem with work array\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
1978:       F->factorerrortype = MAT_FACTOR_OUTMEMORY;
1979:     } else {
1980:       PetscCall(PetscInfo(F, "MUMPS error in numerical factorization: INFOG(1)=%d, INFO(2)=%d\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
1981:       F->factorerrortype = MAT_FACTOR_OTHER;
1982:     }
1983:   }
1984:   PetscCheck(mumps->myid || mumps->id.ICNTL(16) <= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in numerical factorization: ICNTL(16)=%d " MUMPS_MANUALS, mumps->id.INFOG(16));

1986:   F->assembled = PETSC_TRUE;

1988:   if (F->schur) { /* reset Schur status to unfactored */
1989: #if defined(PETSC_HAVE_CUDA)
1990:     F->schur->offloadmask = PETSC_OFFLOAD_CPU;
1991: #endif
1992:     if (mumps->id.ICNTL(19) == 1) { /* stored by rows */
1993:       mumps->id.ICNTL(19) = 2;
1994:       PetscCall(MatTranspose(F->schur, MAT_INPLACE_MATRIX, &F->schur));
1995:     }
1996:     PetscCall(MatFactorRestoreSchurComplement(F, NULL, MAT_FACTOR_SCHUR_UNFACTORED));
1997:   }

1999:   /* just to be sure that ICNTL(19) value returned by a call from MatMumpsGetIcntl is always consistent */
2000:   if (!mumps->sym && mumps->id.ICNTL(19) && mumps->id.ICNTL(19) != 1) mumps->id.ICNTL(19) = 3;

2002:   if (!mumps->is_omp_master) mumps->id.INFO(23) = 0;
2003:   if (mumps->petsc_size > 1) {
2004:     PetscInt     lsol_loc;
2005:     PetscScalar *sol_loc;

2007:     PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &isMPIAIJ));

2009:     /* distributed solution; Create x_seq=sol_loc for repeated use */
2010:     if (mumps->x_seq) {
2011:       PetscCall(VecScatterDestroy(&mumps->scat_sol));
2012:       PetscCall(PetscFree2(mumps->id.sol_loc, mumps->id.isol_loc));
2013:       PetscCall(VecDestroy(&mumps->x_seq));
2014:     }
2015:     lsol_loc = mumps->id.INFO(23); /* length of sol_loc */
2016:     PetscCall(PetscMalloc2(lsol_loc, &sol_loc, lsol_loc, &mumps->id.isol_loc));
2017:     mumps->id.lsol_loc = lsol_loc;
2018:     mumps->id.sol_loc  = (MumpsScalar *)sol_loc;
2019:     PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, lsol_loc, sol_loc, &mumps->x_seq));
2020:   }
2021:   PetscCall(PetscLogFlops(mumps->id.RINFO(2)));
2022:   PetscFunctionReturn(PETSC_SUCCESS);
2023: }

2025: /* Sets MUMPS options from the options database */
2026: static PetscErrorCode MatSetFromOptions_MUMPS(Mat F, Mat A)
2027: {
2028:   Mat_MUMPS    *mumps = (Mat_MUMPS *)F->data;
2029:   PetscMUMPSInt icntl = 0, size, *listvar_schur;
2030:   PetscInt      info[80], i, ninfo = 80, rbs, cbs;
2031:   PetscBool     flg = PETSC_FALSE, schur = (PetscBool)(mumps->id.ICNTL(26) == -1);
2032:   MumpsScalar  *arr;

2034:   PetscFunctionBegin;
2035:   PetscOptionsBegin(PetscObjectComm((PetscObject)F), ((PetscObject)F)->prefix, "MUMPS Options", "Mat");
2036:   if (mumps->id.job == JOB_NULL) { /* MatSetFromOptions_MUMPS() has never been called before */
2037:     PetscInt nthreads   = 0;
2038:     PetscInt nCNTL_pre  = mumps->CNTL_pre ? mumps->CNTL_pre[0] : 0;
2039:     PetscInt nICNTL_pre = mumps->ICNTL_pre ? mumps->ICNTL_pre[0] : 0;

2041:     mumps->petsc_comm = PetscObjectComm((PetscObject)A);
2042:     PetscCallMPI(MPI_Comm_size(mumps->petsc_comm, &mumps->petsc_size));
2043:     PetscCallMPI(MPI_Comm_rank(mumps->petsc_comm, &mumps->myid)); /* "if (!myid)" still works even if mumps_comm is different */

2045:     PetscCall(PetscOptionsName("-mat_mumps_use_omp_threads", "Convert MPI processes into OpenMP threads", "None", &mumps->use_petsc_omp_support));
2046:     if (mumps->use_petsc_omp_support) nthreads = -1; /* -1 will let PetscOmpCtrlCreate() guess a proper value when user did not supply one */
2047:     /* do not use PetscOptionsInt() so that the option -mat_mumps_use_omp_threads is not displayed twice in the help */
2048:     PetscCall(PetscOptionsGetInt(NULL, ((PetscObject)F)->prefix, "-mat_mumps_use_omp_threads", &nthreads, NULL));
2049:     if (mumps->use_petsc_omp_support) {
2050:       PetscCheck(PetscDefined(HAVE_OPENMP_SUPPORT), PETSC_COMM_SELF, PETSC_ERR_SUP_SYS, "The system does not have PETSc OpenMP support but you added the -%smat_mumps_use_omp_threads option. Configure PETSc with --with-openmp --download-hwloc (or --with-hwloc) to enable it, see more in MATSOLVERMUMPS manual",
2051:                  ((PetscObject)F)->prefix ? ((PetscObject)F)->prefix : "");
2052:       PetscCheck(!schur, PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot use -%smat_mumps_use_omp_threads with the Schur complement feature", ((PetscObject)F)->prefix ? ((PetscObject)F)->prefix : "");
2053: #if defined(PETSC_HAVE_OPENMP_SUPPORT)
2054:       PetscCall(PetscOmpCtrlCreate(mumps->petsc_comm, nthreads, &mumps->omp_ctrl));
2055:       PetscCall(PetscOmpCtrlGetOmpComms(mumps->omp_ctrl, &mumps->omp_comm, &mumps->mumps_comm, &mumps->is_omp_master));
2056: #endif
2057:     } else {
2058:       mumps->omp_comm      = PETSC_COMM_SELF;
2059:       mumps->mumps_comm    = mumps->petsc_comm;
2060:       mumps->is_omp_master = PETSC_TRUE;
2061:     }
2062:     PetscCallMPI(MPI_Comm_size(mumps->omp_comm, &mumps->omp_comm_size));
2063:     mumps->reqs = NULL;
2064:     mumps->tag  = 0;

2066:     if (mumps->mumps_comm != MPI_COMM_NULL) {
2067:       if (PetscDefined(HAVE_OPENMP_SUPPORT) && mumps->use_petsc_omp_support) {
2068:         /* It looks like MUMPS does not dup the input comm. Dup a new comm for MUMPS to avoid any tag mismatches. */
2069:         MPI_Comm comm;
2070:         PetscCallMPI(MPI_Comm_dup(mumps->mumps_comm, &comm));
2071:         mumps->mumps_comm = comm;
2072:       } else PetscCall(PetscCommGetComm(mumps->petsc_comm, &mumps->mumps_comm));
2073:     }

2075:     mumps->id.comm_fortran = MPI_Comm_c2f(mumps->mumps_comm);
2076:     mumps->id.job          = JOB_INIT;
2077:     mumps->id.par          = 1; /* host participates factorizaton and solve */
2078:     mumps->id.sym          = mumps->sym;

2080:     size          = mumps->id.size_schur;
2081:     arr           = mumps->id.schur;
2082:     listvar_schur = mumps->id.listvar_schur;
2083:     PetscMUMPS_c(mumps);
2084:     PetscCheck(mumps->id.INFOG(1) >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error: INFOG(1)=%d " MUMPS_MANUALS, mumps->id.INFOG(1));

2086:     /* set PETSc-MUMPS default options - override MUMPS default */
2087:     mumps->id.ICNTL(3) = 0;
2088:     mumps->id.ICNTL(4) = 0;
2089:     if (mumps->petsc_size == 1) {
2090:       mumps->id.ICNTL(18) = 0; /* centralized assembled matrix input */
2091:       mumps->id.ICNTL(7)  = 7; /* automatic choice of ordering done by the package */
2092:     } else {
2093:       mumps->id.ICNTL(18) = 3; /* distributed assembled matrix input */
2094:       mumps->id.ICNTL(21) = 1; /* distributed solution */
2095:     }

2097:     /* restore cached ICNTL and CNTL values */
2098:     for (icntl = 0; icntl < nICNTL_pre; ++icntl) mumps->id.ICNTL(mumps->ICNTL_pre[1 + 2 * icntl]) = mumps->ICNTL_pre[2 + 2 * icntl];
2099:     for (icntl = 0; icntl < nCNTL_pre; ++icntl) mumps->id.CNTL((PetscInt)mumps->CNTL_pre[1 + 2 * icntl]) = mumps->CNTL_pre[2 + 2 * icntl];
2100:     PetscCall(PetscFree(mumps->ICNTL_pre));
2101:     PetscCall(PetscFree(mumps->CNTL_pre));

2103:     if (schur) {
2104:       mumps->id.size_schur    = size;
2105:       mumps->id.schur_lld     = size;
2106:       mumps->id.schur         = arr;
2107:       mumps->id.listvar_schur = listvar_schur;
2108:       if (mumps->petsc_size > 1) {
2109:         PetscBool gs; /* gs is false if any rank other than root has non-empty IS */

2111:         mumps->id.ICNTL(19) = 1;                                                                            /* MUMPS returns Schur centralized on the host */
2112:         gs                  = mumps->myid ? (mumps->id.size_schur ? PETSC_FALSE : PETSC_TRUE) : PETSC_TRUE; /* always true on root; false on others if their size != 0 */
2113:         PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &gs, 1, MPIU_BOOL, MPI_LAND, mumps->petsc_comm));
2114:         PetscCheck(gs, PETSC_COMM_SELF, PETSC_ERR_SUP, "MUMPS distributed parallel Schur complements not yet supported from PETSc");
2115:       } else {
2116:         if (F->factortype == MAT_FACTOR_LU) {
2117:           mumps->id.ICNTL(19) = 3; /* MUMPS returns full matrix */
2118:         } else {
2119:           mumps->id.ICNTL(19) = 2; /* MUMPS returns lower triangular part */
2120:         }
2121:       }
2122:       mumps->id.ICNTL(26) = -1;
2123:     }

2125:     /* copy MUMPS default control values from master to slaves. Although slaves do not call MUMPS, they may access these values in code.
2126:        For example, ICNTL(9) is initialized to 1 by MUMPS and slaves check ICNTL(9) in MatSolve_MUMPS.
2127:      */
2128:     PetscCallMPI(MPI_Bcast(mumps->id.icntl, 40, MPI_INT, 0, mumps->omp_comm));
2129:     PetscCallMPI(MPI_Bcast(mumps->id.cntl, 15, MPIU_REAL, 0, mumps->omp_comm));

2131:     mumps->scat_rhs = NULL;
2132:     mumps->scat_sol = NULL;
2133:   }
2134:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_1", "ICNTL(1): output stream for error messages", "None", mumps->id.ICNTL(1), &icntl, &flg));
2135:   if (flg) mumps->id.ICNTL(1) = icntl;
2136:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_2", "ICNTL(2): output stream for diagnostic printing, statistics, and warning", "None", mumps->id.ICNTL(2), &icntl, &flg));
2137:   if (flg) mumps->id.ICNTL(2) = icntl;
2138:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_3", "ICNTL(3): output stream for global information, collected on the host", "None", mumps->id.ICNTL(3), &icntl, &flg));
2139:   if (flg) mumps->id.ICNTL(3) = icntl;

2141:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_4", "ICNTL(4): level of printing (0 to 4)", "None", mumps->id.ICNTL(4), &icntl, &flg));
2142:   if (flg) mumps->id.ICNTL(4) = icntl;
2143:   if (mumps->id.ICNTL(4) || PetscLogPrintInfo) mumps->id.ICNTL(3) = 6; /* resume MUMPS default id.ICNTL(3) = 6 */

2145:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_6", "ICNTL(6): permutes to a zero-free diagonal and/or scale the matrix (0 to 7)", "None", mumps->id.ICNTL(6), &icntl, &flg));
2146:   if (flg) mumps->id.ICNTL(6) = icntl;

2148:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_7", "ICNTL(7): computes a symmetric permutation in sequential analysis. 0=AMD, 2=AMF, 3=Scotch, 4=PORD, 5=Metis, 6=QAMD, and 7=auto(default)", "None", mumps->id.ICNTL(7), &icntl, &flg));
2149:   if (flg) {
2150:     PetscCheck(icntl != 1 && icntl >= 0 && icntl <= 7, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Valid values are 0=AMD, 2=AMF, 3=Scotch, 4=PORD, 5=Metis, 6=QAMD, and 7=auto");
2151:     mumps->id.ICNTL(7) = icntl;
2152:   }

2154:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_8", "ICNTL(8): scaling strategy (-2 to 8 or 77)", "None", mumps->id.ICNTL(8), &mumps->id.ICNTL(8), NULL));
2155:   /* PetscCall(PetscOptionsInt("-mat_mumps_icntl_9","ICNTL(9): computes the solution using A or A^T","None",mumps->id.ICNTL(9),&mumps->id.ICNTL(9),NULL)); handled by MatSolveTranspose_MUMPS() */
2156:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_10", "ICNTL(10): max num of refinements", "None", mumps->id.ICNTL(10), &mumps->id.ICNTL(10), NULL));
2157:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_11", "ICNTL(11): statistics related to an error analysis (via -ksp_view)", "None", mumps->id.ICNTL(11), &mumps->id.ICNTL(11), NULL));
2158:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_12", "ICNTL(12): an ordering strategy for symmetric matrices (0 to 3)", "None", mumps->id.ICNTL(12), &mumps->id.ICNTL(12), NULL));
2159:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_13", "ICNTL(13): parallelism of the root node (enable ScaLAPACK) and its splitting", "None", mumps->id.ICNTL(13), &mumps->id.ICNTL(13), NULL));
2160:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_14", "ICNTL(14): percentage increase in the estimated working space", "None", mumps->id.ICNTL(14), &mumps->id.ICNTL(14), NULL));
2161:   PetscCall(MatGetBlockSizes(A, &rbs, &cbs));
2162:   if (rbs == cbs && rbs > 1) mumps->id.ICNTL(15) = -rbs;
2163:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_15", "ICNTL(15): compression of the input matrix resulting from a block format", "None", mumps->id.ICNTL(15), &mumps->id.ICNTL(15), &flg));
2164:   if (flg) {
2165:     PetscCheck(mumps->id.ICNTL(15) <= 0, PETSC_COMM_SELF, PETSC_ERR_SUP, "Positive -mat_mumps_icntl_15 not handled");
2166:     PetscCheck((-mumps->id.ICNTL(15) % cbs == 0) && (-mumps->id.ICNTL(15) % rbs == 0), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "The opposite of -mat_mumps_icntl_15 must be a multiple of the column and row blocksizes");
2167:   }
2168:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_19", "ICNTL(19): computes the Schur complement", "None", mumps->id.ICNTL(19), &mumps->id.ICNTL(19), NULL));
2169:   if (mumps->id.ICNTL(19) <= 0 || mumps->id.ICNTL(19) > 3) { /* reset any schur data (if any) */
2170:     PetscCall(MatDestroy(&F->schur));
2171:     PetscCall(MatMumpsResetSchur_Private(mumps));
2172:   }

2174:   /* Two MPICH Fortran MPI_IN_PLACE binding bugs prevented the use of 'mpich + mumps'. One happened with "mpi4py + mpich + mumps",
2175:      and was reported by Firedrake. See https://bitbucket.org/mpi4py/mpi4py/issues/162/mpi4py-initialization-breaks-fortran
2176:      and a petsc-maint mailing list thread with subject 'MUMPS segfaults in parallel because of ...'
2177:      This bug was fixed by https://github.com/pmodels/mpich/pull/4149. But the fix brought a new bug,
2178:      see https://github.com/pmodels/mpich/issues/5589. This bug was fixed by https://github.com/pmodels/mpich/pull/5590.
2179:      In short, we could not use distributed RHS with MPICH until v4.0b1.
2180:    */
2181: #if PETSC_PKG_MUMPS_VERSION_LT(5, 3, 0) || (defined(PETSC_HAVE_MPICH_NUMVERSION) && (PETSC_HAVE_MPICH_NUMVERSION < 40000101))
2182:   mumps->ICNTL20 = 0; /* Centralized dense RHS*/
2183: #else
2184:   mumps->ICNTL20     = 10; /* Distributed dense RHS*/
2185: #endif
2186:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_20", "ICNTL(20): give mumps centralized (0) or distributed (10) dense right-hand sides", "None", mumps->ICNTL20, &mumps->ICNTL20, &flg));
2187:   PetscCheck(!flg || mumps->ICNTL20 == 10 || mumps->ICNTL20 == 0, PETSC_COMM_SELF, PETSC_ERR_SUP, "ICNTL(20)=%d is not supported by the PETSc/MUMPS interface. Allowed values are 0, 10", (int)mumps->ICNTL20);
2188: #if PETSC_PKG_MUMPS_VERSION_LT(5, 3, 0)
2189:   PetscCheck(!flg || mumps->ICNTL20 != 10, PETSC_COMM_SELF, PETSC_ERR_SUP, "ICNTL(20)=10 is not supported before MUMPS-5.3.0");
2190: #endif
2191:   /* PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_21","ICNTL(21): the distribution (centralized or distributed) of the solution vectors","None",mumps->id.ICNTL(21),&mumps->id.ICNTL(21),NULL)); we only use distributed solution vector */

2193:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_22", "ICNTL(22): in-core/out-of-core factorization and solve (0 or 1)", "None", mumps->id.ICNTL(22), &mumps->id.ICNTL(22), NULL));
2194:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_23", "ICNTL(23): max size of the working memory (MB) that can allocate per processor", "None", mumps->id.ICNTL(23), &mumps->id.ICNTL(23), NULL));
2195:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_24", "ICNTL(24): detection of null pivot rows (0 or 1)", "None", mumps->id.ICNTL(24), &mumps->id.ICNTL(24), NULL));
2196:   if (mumps->id.ICNTL(24)) { mumps->id.ICNTL(13) = 1; /* turn-off ScaLAPACK to help with the correct detection of null pivots */ }

2198:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_25", "ICNTL(25): computes a solution of a deficient matrix and a null space basis", "None", mumps->id.ICNTL(25), &mumps->id.ICNTL(25), NULL));
2199:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_26", "ICNTL(26): drives the solution phase if a Schur complement matrix", "None", mumps->id.ICNTL(26), &mumps->id.ICNTL(26), NULL));
2200:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_27", "ICNTL(27): controls the blocking size for multiple right-hand sides", "None", mumps->id.ICNTL(27), &mumps->id.ICNTL(27), NULL));
2201:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_28", "ICNTL(28): use 1 for sequential analysis and ictnl(7) ordering, or 2 for parallel analysis and ictnl(29) ordering", "None", mumps->id.ICNTL(28), &mumps->id.ICNTL(28), NULL));
2202:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_29", "ICNTL(29): parallel ordering 1 = ptscotch, 2 = parmetis", "None", mumps->id.ICNTL(29), &mumps->id.ICNTL(29), NULL));
2203:   /* PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_30","ICNTL(30): compute user-specified set of entries in inv(A)","None",mumps->id.ICNTL(30),&mumps->id.ICNTL(30),NULL)); */ /* call MatMumpsGetInverse() directly */
2204:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_31", "ICNTL(31): indicates which factors may be discarded during factorization", "None", mumps->id.ICNTL(31), &mumps->id.ICNTL(31), NULL));
2205:   /* PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_32","ICNTL(32): performs the forward elimination of the right-hand sides during factorization","None",mumps->id.ICNTL(32),&mumps->id.ICNTL(32),NULL));  -- not supported by PETSc API */
2206:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_33", "ICNTL(33): compute determinant", "None", mumps->id.ICNTL(33), &mumps->id.ICNTL(33), NULL));
2207:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_35", "ICNTL(35): activates Block Low Rank (BLR) based factorization", "None", mumps->id.ICNTL(35), &mumps->id.ICNTL(35), NULL));
2208:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_36", "ICNTL(36): choice of BLR factorization variant", "None", mumps->id.ICNTL(36), &mumps->id.ICNTL(36), NULL));
2209:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_38", "ICNTL(38): estimated compression rate of LU factors with BLR", "None", mumps->id.ICNTL(38), &mumps->id.ICNTL(38), NULL));
2210:   PetscCall(PetscOptionsMUMPSInt("-mat_mumps_icntl_58", "ICNTL(58): defines options for symbolic factorization", "None", mumps->id.ICNTL(58), &mumps->id.ICNTL(58), NULL));

2212:   PetscCall(PetscOptionsReal("-mat_mumps_cntl_1", "CNTL(1): relative pivoting threshold", "None", mumps->id.CNTL(1), &mumps->id.CNTL(1), NULL));
2213:   PetscCall(PetscOptionsReal("-mat_mumps_cntl_2", "CNTL(2): stopping criterion of refinement", "None", mumps->id.CNTL(2), &mumps->id.CNTL(2), NULL));
2214:   PetscCall(PetscOptionsReal("-mat_mumps_cntl_3", "CNTL(3): absolute pivoting threshold", "None", mumps->id.CNTL(3), &mumps->id.CNTL(3), NULL));
2215:   PetscCall(PetscOptionsReal("-mat_mumps_cntl_4", "CNTL(4): value for static pivoting", "None", mumps->id.CNTL(4), &mumps->id.CNTL(4), NULL));
2216:   PetscCall(PetscOptionsReal("-mat_mumps_cntl_5", "CNTL(5): fixation for null pivots", "None", mumps->id.CNTL(5), &mumps->id.CNTL(5), NULL));
2217:   PetscCall(PetscOptionsReal("-mat_mumps_cntl_7", "CNTL(7): dropping parameter used during BLR", "None", mumps->id.CNTL(7), &mumps->id.CNTL(7), NULL));

2219:   PetscCall(PetscOptionsString("-mat_mumps_ooc_tmpdir", "out of core directory", "None", mumps->id.ooc_tmpdir, mumps->id.ooc_tmpdir, sizeof(mumps->id.ooc_tmpdir), NULL));

2221:   PetscCall(PetscOptionsIntArray("-mat_mumps_view_info", "request INFO local to each processor", "", info, &ninfo, NULL));
2222:   if (ninfo) {
2223:     PetscCheck(ninfo <= 80, PETSC_COMM_SELF, PETSC_ERR_USER, "number of INFO %" PetscInt_FMT " must <= 80", ninfo);
2224:     PetscCall(PetscMalloc1(ninfo, &mumps->info));
2225:     mumps->ninfo = ninfo;
2226:     for (i = 0; i < ninfo; i++) {
2227:       PetscCheck(info[i] >= 0 && info[i] <= 80, PETSC_COMM_SELF, PETSC_ERR_USER, "index of INFO %" PetscInt_FMT " must between 1 and 80", ninfo);
2228:       mumps->info[i] = info[i];
2229:     }
2230:   }
2231:   PetscOptionsEnd();
2232:   PetscFunctionReturn(PETSC_SUCCESS);
2233: }

2235: static PetscErrorCode MatFactorSymbolic_MUMPS_ReportIfError(Mat F, Mat A, const MatFactorInfo *info, Mat_MUMPS *mumps)
2236: {
2237:   PetscFunctionBegin;
2238:   if (mumps->id.INFOG(1) < 0) {
2239:     PetscCheck(!A->erroriffailure, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in analysis: INFOG(1)=%d " MUMPS_MANUALS, mumps->id.INFOG(1));
2240:     if (mumps->id.INFOG(1) == -6) {
2241:       PetscCall(PetscInfo(F, "MUMPS error in analysis: matrix is singular, INFOG(1)=%d, INFO(2)=%d\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
2242:       F->factorerrortype = MAT_FACTOR_STRUCT_ZEROPIVOT;
2243:     } else if (mumps->id.INFOG(1) == -5 || mumps->id.INFOG(1) == -7) {
2244:       PetscCall(PetscInfo(F, "MUMPS error in analysis: problem with work array, INFOG(1)=%d, INFO(2)=%d\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
2245:       F->factorerrortype = MAT_FACTOR_OUTMEMORY;
2246:     } else if (mumps->id.INFOG(1) == -16 && mumps->id.INFOG(1) == 0) {
2247:       PetscCall(PetscInfo(F, "MUMPS error in analysis: empty matrix\n"));
2248:     } else {
2249:       PetscCall(PetscInfo(F, "MUMPS error in analysis: INFOG(1)=%d, INFO(2)=%d " MUMPS_MANUALS "\n", mumps->id.INFOG(1), mumps->id.INFO(2)));
2250:       F->factorerrortype = MAT_FACTOR_OTHER;
2251:     }
2252:   }
2253:   PetscFunctionReturn(PETSC_SUCCESS);
2254: }

2256: static PetscErrorCode MatLUFactorSymbolic_AIJMUMPS(Mat F, Mat A, IS r, IS c, const MatFactorInfo *info)
2257: {
2258:   Mat_MUMPS     *mumps = (Mat_MUMPS *)F->data;
2259:   Vec            b;
2260:   const PetscInt M = A->rmap->N;

2262:   PetscFunctionBegin;
2263:   if (mumps->matstruc == SAME_NONZERO_PATTERN) {
2264:     /* F is assembled by a previous call of MatLUFactorSymbolic_AIJMUMPS() */
2265:     PetscFunctionReturn(PETSC_SUCCESS);
2266:   }

2268:   /* Set MUMPS options from the options database */
2269:   PetscCall(MatSetFromOptions_MUMPS(F, A));

2271:   PetscCall((*mumps->ConvertToTriples)(A, 1, MAT_INITIAL_MATRIX, mumps));
2272:   PetscCall(MatMumpsGatherNonzerosOnMaster(MAT_INITIAL_MATRIX, mumps));

2274:   /* analysis phase */
2275:   mumps->id.job = JOB_FACTSYMBOLIC;
2276:   mumps->id.n   = M;
2277:   switch (mumps->id.ICNTL(18)) {
2278:   case 0: /* centralized assembled matrix input */
2279:     if (!mumps->myid) {
2280:       mumps->id.nnz = mumps->nnz;
2281:       mumps->id.irn = mumps->irn;
2282:       mumps->id.jcn = mumps->jcn;
2283:       if (mumps->id.ICNTL(6) > 1) mumps->id.a = (MumpsScalar *)mumps->val;
2284:       if (r && mumps->id.ICNTL(7) == 7) {
2285:         mumps->id.ICNTL(7) = 1;
2286:         if (!mumps->myid) {
2287:           const PetscInt *idx;
2288:           PetscInt        i;

2290:           PetscCall(PetscMalloc1(M, &mumps->id.perm_in));
2291:           PetscCall(ISGetIndices(r, &idx));
2292:           for (i = 0; i < M; i++) PetscCall(PetscMUMPSIntCast(idx[i] + 1, &(mumps->id.perm_in[i]))); /* perm_in[]: start from 1, not 0! */
2293:           PetscCall(ISRestoreIndices(r, &idx));
2294:         }
2295:       }
2296:     }
2297:     break;
2298:   case 3: /* distributed assembled matrix input (size>1) */
2299:     mumps->id.nnz_loc = mumps->nnz;
2300:     mumps->id.irn_loc = mumps->irn;
2301:     mumps->id.jcn_loc = mumps->jcn;
2302:     if (mumps->id.ICNTL(6) > 1) mumps->id.a_loc = (MumpsScalar *)mumps->val;
2303:     if (mumps->ICNTL20 == 0) { /* Centralized rhs. Create scatter scat_rhs for repeated use in MatSolve() */
2304:       PetscCall(MatCreateVecs(A, NULL, &b));
2305:       PetscCall(VecScatterCreateToZero(b, &mumps->scat_rhs, &mumps->b_seq));
2306:       PetscCall(VecDestroy(&b));
2307:     }
2308:     break;
2309:   }
2310:   PetscMUMPS_c(mumps);
2311:   PetscCall(MatFactorSymbolic_MUMPS_ReportIfError(F, A, info, mumps));

2313:   F->ops->lufactornumeric   = MatFactorNumeric_MUMPS;
2314:   F->ops->solve             = MatSolve_MUMPS;
2315:   F->ops->solvetranspose    = MatSolveTranspose_MUMPS;
2316:   F->ops->matsolve          = MatMatSolve_MUMPS;
2317:   F->ops->mattransposesolve = MatMatTransposeSolve_MUMPS;
2318:   F->ops->matsolvetranspose = MatMatSolveTranspose_MUMPS;

2320:   mumps->matstruc = SAME_NONZERO_PATTERN;
2321:   PetscFunctionReturn(PETSC_SUCCESS);
2322: }

2324: /* Note the Petsc r and c permutations are ignored */
2325: static PetscErrorCode MatLUFactorSymbolic_BAIJMUMPS(Mat F, Mat A, IS r, IS c, const MatFactorInfo *info)
2326: {
2327:   Mat_MUMPS     *mumps = (Mat_MUMPS *)F->data;
2328:   Vec            b;
2329:   const PetscInt M = A->rmap->N;

2331:   PetscFunctionBegin;
2332:   if (mumps->matstruc == SAME_NONZERO_PATTERN) {
2333:     /* F is assembled by a previous call of MatLUFactorSymbolic_BAIJMUMPS() */
2334:     PetscFunctionReturn(PETSC_SUCCESS);
2335:   }

2337:   /* Set MUMPS options from the options database */
2338:   PetscCall(MatSetFromOptions_MUMPS(F, A));

2340:   PetscCall((*mumps->ConvertToTriples)(A, 1, MAT_INITIAL_MATRIX, mumps));
2341:   PetscCall(MatMumpsGatherNonzerosOnMaster(MAT_INITIAL_MATRIX, mumps));

2343:   /* analysis phase */
2344:   mumps->id.job = JOB_FACTSYMBOLIC;
2345:   mumps->id.n   = M;
2346:   switch (mumps->id.ICNTL(18)) {
2347:   case 0: /* centralized assembled matrix input */
2348:     if (!mumps->myid) {
2349:       mumps->id.nnz = mumps->nnz;
2350:       mumps->id.irn = mumps->irn;
2351:       mumps->id.jcn = mumps->jcn;
2352:       if (mumps->id.ICNTL(6) > 1) mumps->id.a = (MumpsScalar *)mumps->val;
2353:     }
2354:     break;
2355:   case 3: /* distributed assembled matrix input (size>1) */
2356:     mumps->id.nnz_loc = mumps->nnz;
2357:     mumps->id.irn_loc = mumps->irn;
2358:     mumps->id.jcn_loc = mumps->jcn;
2359:     if (mumps->id.ICNTL(6) > 1) mumps->id.a_loc = (MumpsScalar *)mumps->val;
2360:     if (mumps->ICNTL20 == 0) { /* Centralized rhs. Create scatter scat_rhs for repeated use in MatSolve() */
2361:       PetscCall(MatCreateVecs(A, NULL, &b));
2362:       PetscCall(VecScatterCreateToZero(b, &mumps->scat_rhs, &mumps->b_seq));
2363:       PetscCall(VecDestroy(&b));
2364:     }
2365:     break;
2366:   }
2367:   PetscMUMPS_c(mumps);
2368:   PetscCall(MatFactorSymbolic_MUMPS_ReportIfError(F, A, info, mumps));

2370:   F->ops->lufactornumeric   = MatFactorNumeric_MUMPS;
2371:   F->ops->solve             = MatSolve_MUMPS;
2372:   F->ops->solvetranspose    = MatSolveTranspose_MUMPS;
2373:   F->ops->matsolvetranspose = MatMatSolveTranspose_MUMPS;

2375:   mumps->matstruc = SAME_NONZERO_PATTERN;
2376:   PetscFunctionReturn(PETSC_SUCCESS);
2377: }

2379: /* Note the Petsc r permutation and factor info are ignored */
2380: static PetscErrorCode MatCholeskyFactorSymbolic_MUMPS(Mat F, Mat A, IS r, const MatFactorInfo *info)
2381: {
2382:   Mat_MUMPS     *mumps = (Mat_MUMPS *)F->data;
2383:   Vec            b;
2384:   const PetscInt M = A->rmap->N;

2386:   PetscFunctionBegin;
2387:   if (mumps->matstruc == SAME_NONZERO_PATTERN) {
2388:     /* F is assembled by a previous call of MatCholeskyFactorSymbolic_MUMPS() */
2389:     PetscFunctionReturn(PETSC_SUCCESS);
2390:   }

2392:   /* Set MUMPS options from the options database */
2393:   PetscCall(MatSetFromOptions_MUMPS(F, A));

2395:   PetscCall((*mumps->ConvertToTriples)(A, 1, MAT_INITIAL_MATRIX, mumps));
2396:   PetscCall(MatMumpsGatherNonzerosOnMaster(MAT_INITIAL_MATRIX, mumps));

2398:   /* analysis phase */
2399:   mumps->id.job = JOB_FACTSYMBOLIC;
2400:   mumps->id.n   = M;
2401:   switch (mumps->id.ICNTL(18)) {
2402:   case 0: /* centralized assembled matrix input */
2403:     if (!mumps->myid) {
2404:       mumps->id.nnz = mumps->nnz;
2405:       mumps->id.irn = mumps->irn;
2406:       mumps->id.jcn = mumps->jcn;
2407:       if (mumps->id.ICNTL(6) > 1) mumps->id.a = (MumpsScalar *)mumps->val;
2408:     }
2409:     break;
2410:   case 3: /* distributed assembled matrix input (size>1) */
2411:     mumps->id.nnz_loc = mumps->nnz;
2412:     mumps->id.irn_loc = mumps->irn;
2413:     mumps->id.jcn_loc = mumps->jcn;
2414:     if (mumps->id.ICNTL(6) > 1) mumps->id.a_loc = (MumpsScalar *)mumps->val;
2415:     if (mumps->ICNTL20 == 0) { /* Centralized rhs. Create scatter scat_rhs for repeated use in MatSolve() */
2416:       PetscCall(MatCreateVecs(A, NULL, &b));
2417:       PetscCall(VecScatterCreateToZero(b, &mumps->scat_rhs, &mumps->b_seq));
2418:       PetscCall(VecDestroy(&b));
2419:     }
2420:     break;
2421:   }
2422:   PetscMUMPS_c(mumps);
2423:   PetscCall(MatFactorSymbolic_MUMPS_ReportIfError(F, A, info, mumps));

2425:   F->ops->choleskyfactornumeric = MatFactorNumeric_MUMPS;
2426:   F->ops->solve                 = MatSolve_MUMPS;
2427:   F->ops->solvetranspose        = MatSolve_MUMPS;
2428:   F->ops->matsolve              = MatMatSolve_MUMPS;
2429:   F->ops->mattransposesolve     = MatMatTransposeSolve_MUMPS;
2430:   F->ops->matsolvetranspose     = MatMatSolveTranspose_MUMPS;
2431: #if defined(PETSC_USE_COMPLEX)
2432:   F->ops->getinertia = NULL;
2433: #else
2434:   F->ops->getinertia = MatGetInertia_SBAIJMUMPS;
2435: #endif

2437:   mumps->matstruc = SAME_NONZERO_PATTERN;
2438:   PetscFunctionReturn(PETSC_SUCCESS);
2439: }

2441: static PetscErrorCode MatView_MUMPS(Mat A, PetscViewer viewer)
2442: {
2443:   PetscBool         iascii;
2444:   PetscViewerFormat format;
2445:   Mat_MUMPS        *mumps = (Mat_MUMPS *)A->data;

2447:   PetscFunctionBegin;
2448:   /* check if matrix is mumps type */
2449:   if (A->ops->solve != MatSolve_MUMPS) PetscFunctionReturn(PETSC_SUCCESS);

2451:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
2452:   if (iascii) {
2453:     PetscCall(PetscViewerGetFormat(viewer, &format));
2454:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
2455:       PetscCall(PetscViewerASCIIPrintf(viewer, "MUMPS run parameters:\n"));
2456:       if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
2457:         PetscCall(PetscViewerASCIIPrintf(viewer, "  SYM (matrix type):                   %d\n", mumps->id.sym));
2458:         PetscCall(PetscViewerASCIIPrintf(viewer, "  PAR (host participation):            %d\n", mumps->id.par));
2459:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(1) (output for error):         %d\n", mumps->id.ICNTL(1)));
2460:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(2) (output of diagnostic msg): %d\n", mumps->id.ICNTL(2)));
2461:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(3) (output for global info):   %d\n", mumps->id.ICNTL(3)));
2462:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(4) (level of printing):        %d\n", mumps->id.ICNTL(4)));
2463:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(5) (input mat struct):         %d\n", mumps->id.ICNTL(5)));
2464:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(6) (matrix prescaling):        %d\n", mumps->id.ICNTL(6)));
2465:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(7) (sequential matrix ordering):%d\n", mumps->id.ICNTL(7)));
2466:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(8) (scaling strategy):         %d\n", mumps->id.ICNTL(8)));
2467:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(10) (max num of refinements):  %d\n", mumps->id.ICNTL(10)));
2468:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(11) (error analysis):          %d\n", mumps->id.ICNTL(11)));
2469:         if (mumps->id.ICNTL(11) > 0) {
2470:           PetscCall(PetscViewerASCIIPrintf(viewer, "    RINFOG(4) (inf norm of input mat):        %g\n", mumps->id.RINFOG(4)));
2471:           PetscCall(PetscViewerASCIIPrintf(viewer, "    RINFOG(5) (inf norm of solution):         %g\n", mumps->id.RINFOG(5)));
2472:           PetscCall(PetscViewerASCIIPrintf(viewer, "    RINFOG(6) (inf norm of residual):         %g\n", mumps->id.RINFOG(6)));
2473:           PetscCall(PetscViewerASCIIPrintf(viewer, "    RINFOG(7),RINFOG(8) (backward error est): %g, %g\n", mumps->id.RINFOG(7), mumps->id.RINFOG(8)));
2474:           PetscCall(PetscViewerASCIIPrintf(viewer, "    RINFOG(9) (error estimate):               %g\n", mumps->id.RINFOG(9)));
2475:           PetscCall(PetscViewerASCIIPrintf(viewer, "    RINFOG(10),RINFOG(11)(condition numbers): %g, %g\n", mumps->id.RINFOG(10), mumps->id.RINFOG(11)));
2476:         }
2477:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(12) (efficiency control):                         %d\n", mumps->id.ICNTL(12)));
2478:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(13) (sequential factorization of the root node):  %d\n", mumps->id.ICNTL(13)));
2479:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(14) (percentage of estimated workspace increase): %d\n", mumps->id.ICNTL(14)));
2480:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(15) (compression of the input matrix):            %d\n", mumps->id.ICNTL(15)));
2481:         /* ICNTL(15-17) not used */
2482:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(18) (input mat struct):                           %d\n", mumps->id.ICNTL(18)));
2483:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(19) (Schur complement info):                      %d\n", mumps->id.ICNTL(19)));
2484:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(20) (RHS sparse pattern):                         %d\n", mumps->id.ICNTL(20)));
2485:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(21) (solution struct):                            %d\n", mumps->id.ICNTL(21)));
2486:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(22) (in-core/out-of-core facility):               %d\n", mumps->id.ICNTL(22)));
2487:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(23) (max size of memory can be allocated locally):%d\n", mumps->id.ICNTL(23)));

2489:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(24) (detection of null pivot rows):               %d\n", mumps->id.ICNTL(24)));
2490:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(25) (computation of a null space basis):          %d\n", mumps->id.ICNTL(25)));
2491:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(26) (Schur options for RHS or solution):          %d\n", mumps->id.ICNTL(26)));
2492:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(27) (blocking size for multiple RHS):             %d\n", mumps->id.ICNTL(27)));
2493:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(28) (use parallel or sequential ordering):        %d\n", mumps->id.ICNTL(28)));
2494:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(29) (parallel ordering):                          %d\n", mumps->id.ICNTL(29)));

2496:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(30) (user-specified set of entries in inv(A)):    %d\n", mumps->id.ICNTL(30)));
2497:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(31) (factors is discarded in the solve phase):    %d\n", mumps->id.ICNTL(31)));
2498:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(33) (compute determinant):                        %d\n", mumps->id.ICNTL(33)));
2499:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(35) (activate BLR based factorization):           %d\n", mumps->id.ICNTL(35)));
2500:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(36) (choice of BLR factorization variant):        %d\n", mumps->id.ICNTL(36)));
2501:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(38) (estimated compression rate of LU factors):   %d\n", mumps->id.ICNTL(38)));
2502:         PetscCall(PetscViewerASCIIPrintf(viewer, "  ICNTL(58) (options for symbolic factorization):         %d\n", mumps->id.ICNTL(58)));

2504:         PetscCall(PetscViewerASCIIPrintf(viewer, "  CNTL(1) (relative pivoting threshold):      %g\n", mumps->id.CNTL(1)));
2505:         PetscCall(PetscViewerASCIIPrintf(viewer, "  CNTL(2) (stopping criterion of refinement): %g\n", mumps->id.CNTL(2)));
2506:         PetscCall(PetscViewerASCIIPrintf(viewer, "  CNTL(3) (absolute pivoting threshold):      %g\n", mumps->id.CNTL(3)));
2507:         PetscCall(PetscViewerASCIIPrintf(viewer, "  CNTL(4) (value of static pivoting):         %g\n", mumps->id.CNTL(4)));
2508:         PetscCall(PetscViewerASCIIPrintf(viewer, "  CNTL(5) (fixation for null pivots):         %g\n", mumps->id.CNTL(5)));
2509:         PetscCall(PetscViewerASCIIPrintf(viewer, "  CNTL(7) (dropping parameter for BLR):       %g\n", mumps->id.CNTL(7)));

2511:         /* information local to each processor */
2512:         PetscCall(PetscViewerASCIIPrintf(viewer, "  RINFO(1) (local estimated flops for the elimination after analysis):\n"));
2513:         PetscCall(PetscViewerASCIIPushSynchronized(viewer));
2514:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "    [%d] %g\n", mumps->myid, mumps->id.RINFO(1)));
2515:         PetscCall(PetscViewerFlush(viewer));
2516:         PetscCall(PetscViewerASCIIPrintf(viewer, "  RINFO(2) (local estimated flops for the assembly after factorization):\n"));
2517:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "    [%d] %g\n", mumps->myid, mumps->id.RINFO(2)));
2518:         PetscCall(PetscViewerFlush(viewer));
2519:         PetscCall(PetscViewerASCIIPrintf(viewer, "  RINFO(3) (local estimated flops for the elimination after factorization):\n"));
2520:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "    [%d] %g\n", mumps->myid, mumps->id.RINFO(3)));
2521:         PetscCall(PetscViewerFlush(viewer));

2523:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFO(15) (estimated size of (in MB) MUMPS internal data for running numerical factorization):\n"));
2524:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "    [%d] %d\n", mumps->myid, mumps->id.INFO(15)));
2525:         PetscCall(PetscViewerFlush(viewer));

2527:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFO(16) (size of (in MB) MUMPS internal data used during numerical factorization):\n"));
2528:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "    [%d] %d\n", mumps->myid, mumps->id.INFO(16)));
2529:         PetscCall(PetscViewerFlush(viewer));

2531:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFO(23) (num of pivots eliminated on this processor after factorization):\n"));
2532:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "    [%d] %d\n", mumps->myid, mumps->id.INFO(23)));
2533:         PetscCall(PetscViewerFlush(viewer));

2535:         if (mumps->ninfo && mumps->ninfo <= 80) {
2536:           PetscInt i;
2537:           for (i = 0; i < mumps->ninfo; i++) {
2538:             PetscCall(PetscViewerASCIIPrintf(viewer, "  INFO(%" PetscInt_FMT "):\n", mumps->info[i]));
2539:             PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "    [%d] %d\n", mumps->myid, mumps->id.INFO(mumps->info[i])));
2540:             PetscCall(PetscViewerFlush(viewer));
2541:           }
2542:         }
2543:         PetscCall(PetscViewerASCIIPopSynchronized(viewer));
2544:       } else PetscCall(PetscViewerASCIIPrintf(viewer, "  Use -%sksp_view ::ascii_info_detail to display information for all processes\n", ((PetscObject)A)->prefix ? ((PetscObject)A)->prefix : ""));

2546:       if (mumps->myid == 0) { /* information from the host */
2547:         PetscCall(PetscViewerASCIIPrintf(viewer, "  RINFOG(1) (global estimated flops for the elimination after analysis): %g\n", mumps->id.RINFOG(1)));
2548:         PetscCall(PetscViewerASCIIPrintf(viewer, "  RINFOG(2) (global estimated flops for the assembly after factorization): %g\n", mumps->id.RINFOG(2)));
2549:         PetscCall(PetscViewerASCIIPrintf(viewer, "  RINFOG(3) (global estimated flops for the elimination after factorization): %g\n", mumps->id.RINFOG(3)));
2550:         PetscCall(PetscViewerASCIIPrintf(viewer, "  (RINFOG(12) RINFOG(13))*2^INFOG(34) (determinant): (%g,%g)*(2^%d)\n", mumps->id.RINFOG(12), mumps->id.RINFOG(13), mumps->id.INFOG(34)));

2552:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(3) (estimated real workspace for factors on all processors after analysis): %d\n", mumps->id.INFOG(3)));
2553:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(4) (estimated integer workspace for factors on all processors after analysis): %d\n", mumps->id.INFOG(4)));
2554:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(5) (estimated maximum front size in the complete tree): %d\n", mumps->id.INFOG(5)));
2555:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(6) (number of nodes in the complete tree): %d\n", mumps->id.INFOG(6)));
2556:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(7) (ordering option effectively used after analysis): %d\n", mumps->id.INFOG(7)));
2557:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(8) (structural symmetry in percent of the permuted matrix after analysis): %d\n", mumps->id.INFOG(8)));
2558:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(9) (total real/complex workspace to store the matrix factors after factorization): %d\n", mumps->id.INFOG(9)));
2559:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(10) (total integer space store the matrix factors after factorization): %d\n", mumps->id.INFOG(10)));
2560:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(11) (order of largest frontal matrix after factorization): %d\n", mumps->id.INFOG(11)));
2561:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(12) (number of off-diagonal pivots): %d\n", mumps->id.INFOG(12)));
2562:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(13) (number of delayed pivots after factorization): %d\n", mumps->id.INFOG(13)));
2563:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(14) (number of memory compress after factorization): %d\n", mumps->id.INFOG(14)));
2564:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(15) (number of steps of iterative refinement after solution): %d\n", mumps->id.INFOG(15)));
2565:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(16) (estimated size (in MB) of all MUMPS internal data for factorization after analysis: value on the most memory consuming processor): %d\n", mumps->id.INFOG(16)));
2566:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(17) (estimated size of all MUMPS internal data for factorization after analysis: sum over all processors): %d\n", mumps->id.INFOG(17)));
2567:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(18) (size of all MUMPS internal data allocated during factorization: value on the most memory consuming processor): %d\n", mumps->id.INFOG(18)));
2568:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(19) (size of all MUMPS internal data allocated during factorization: sum over all processors): %d\n", mumps->id.INFOG(19)));
2569:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(20) (estimated number of entries in the factors): %d\n", mumps->id.INFOG(20)));
2570:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(21) (size in MB of memory effectively used during factorization - value on the most memory consuming processor): %d\n", mumps->id.INFOG(21)));
2571:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(22) (size in MB of memory effectively used during factorization - sum over all processors): %d\n", mumps->id.INFOG(22)));
2572:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(23) (after analysis: value of ICNTL(6) effectively used): %d\n", mumps->id.INFOG(23)));
2573:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(24) (after analysis: value of ICNTL(12) effectively used): %d\n", mumps->id.INFOG(24)));
2574:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(25) (after factorization: number of pivots modified by static pivoting): %d\n", mumps->id.INFOG(25)));
2575:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(28) (after factorization: number of null pivots encountered): %d\n", mumps->id.INFOG(28)));
2576:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(29) (after factorization: effective number of entries in the factors (sum over all processors)): %d\n", mumps->id.INFOG(29)));
2577:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(30, 31) (after solution: size in Mbytes of memory used during solution phase): %d, %d\n", mumps->id.INFOG(30), mumps->id.INFOG(31)));
2578:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(32) (after analysis: type of analysis done): %d\n", mumps->id.INFOG(32)));
2579:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(33) (value used for ICNTL(8)): %d\n", mumps->id.INFOG(33)));
2580:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(34) (exponent of the determinant if determinant is requested): %d\n", mumps->id.INFOG(34)));
2581:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(35) (after factorization: number of entries taking into account BLR factor compression - sum over all processors): %d\n", mumps->id.INFOG(35)));
2582:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(36) (after analysis: estimated size of all MUMPS internal data for running BLR in-core - value on the most memory consuming processor): %d\n", mumps->id.INFOG(36)));
2583:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(37) (after analysis: estimated size of all MUMPS internal data for running BLR in-core - sum over all processors): %d\n", mumps->id.INFOG(37)));
2584:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(38) (after analysis: estimated size of all MUMPS internal data for running BLR out-of-core - value on the most memory consuming processor): %d\n", mumps->id.INFOG(38)));
2585:         PetscCall(PetscViewerASCIIPrintf(viewer, "  INFOG(39) (after analysis: estimated size of all MUMPS internal data for running BLR out-of-core - sum over all processors): %d\n", mumps->id.INFOG(39)));
2586:       }
2587:     }
2588:   }
2589:   PetscFunctionReturn(PETSC_SUCCESS);
2590: }

2592: static PetscErrorCode MatGetInfo_MUMPS(Mat A, MatInfoType flag, MatInfo *info)
2593: {
2594:   Mat_MUMPS *mumps = (Mat_MUMPS *)A->data;

2596:   PetscFunctionBegin;
2597:   info->block_size        = 1.0;
2598:   info->nz_allocated      = mumps->id.INFOG(20) >= 0 ? mumps->id.INFOG(20) : -1000000 * mumps->id.INFOG(20);
2599:   info->nz_used           = mumps->id.INFOG(20) >= 0 ? mumps->id.INFOG(20) : -1000000 * mumps->id.INFOG(20);
2600:   info->nz_unneeded       = 0.0;
2601:   info->assemblies        = 0.0;
2602:   info->mallocs           = 0.0;
2603:   info->memory            = 0.0;
2604:   info->fill_ratio_given  = 0;
2605:   info->fill_ratio_needed = 0;
2606:   info->factor_mallocs    = 0;
2607:   PetscFunctionReturn(PETSC_SUCCESS);
2608: }

2610: static PetscErrorCode MatFactorSetSchurIS_MUMPS(Mat F, IS is)
2611: {
2612:   Mat_MUMPS         *mumps = (Mat_MUMPS *)F->data;
2613:   const PetscScalar *arr;
2614:   const PetscInt    *idxs;
2615:   PetscInt           size, i;

2617:   PetscFunctionBegin;
2618:   PetscCall(ISGetLocalSize(is, &size));
2619:   /* Schur complement matrix */
2620:   PetscCall(MatDestroy(&F->schur));
2621:   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, size, size, NULL, &F->schur));
2622:   PetscCall(MatDenseGetArrayRead(F->schur, &arr));
2623:   mumps->id.schur      = (MumpsScalar *)arr;
2624:   mumps->id.size_schur = size;
2625:   mumps->id.schur_lld  = size;
2626:   PetscCall(MatDenseRestoreArrayRead(F->schur, &arr));
2627:   if (mumps->sym == 1) PetscCall(MatSetOption(F->schur, MAT_SPD, PETSC_TRUE));

2629:   /* MUMPS expects Fortran style indices */
2630:   PetscCall(PetscFree(mumps->id.listvar_schur));
2631:   PetscCall(PetscMalloc1(size, &mumps->id.listvar_schur));
2632:   PetscCall(ISGetIndices(is, &idxs));
2633:   for (i = 0; i < size; i++) PetscCall(PetscMUMPSIntCast(idxs[i] + 1, &(mumps->id.listvar_schur[i])));
2634:   PetscCall(ISRestoreIndices(is, &idxs));
2635:   /* set a special value of ICNTL (not handled my MUMPS) to be used in the solve phase by PETSc */
2636:   mumps->id.ICNTL(26) = -1;
2637:   PetscFunctionReturn(PETSC_SUCCESS);
2638: }

2640: static PetscErrorCode MatFactorCreateSchurComplement_MUMPS(Mat F, Mat *S)
2641: {
2642:   Mat          St;
2643:   Mat_MUMPS   *mumps = (Mat_MUMPS *)F->data;
2644:   PetscScalar *array;

2646:   PetscFunctionBegin;
2647:   PetscCheck(mumps->id.ICNTL(19), PetscObjectComm((PetscObject)F), PETSC_ERR_ORDER, "Schur complement mode not selected! Call MatFactorSetSchurIS() to enable it");
2648:   PetscCall(MatCreate(PETSC_COMM_SELF, &St));
2649:   PetscCall(MatSetSizes(St, PETSC_DECIDE, PETSC_DECIDE, mumps->id.size_schur, mumps->id.size_schur));
2650:   PetscCall(MatSetType(St, MATDENSE));
2651:   PetscCall(MatSetUp(St));
2652:   PetscCall(MatDenseGetArray(St, &array));
2653:   if (!mumps->sym) {                /* MUMPS always return a full matrix */
2654:     if (mumps->id.ICNTL(19) == 1) { /* stored by rows */
2655:       PetscInt i, j, N = mumps->id.size_schur;
2656:       for (i = 0; i < N; i++) {
2657:         for (j = 0; j < N; j++) {
2658: #if !defined(PETSC_USE_COMPLEX)
2659:           PetscScalar val = mumps->id.schur[i * N + j];
2660: #else
2661:           PetscScalar val = mumps->id.schur[i * N + j].r + PETSC_i * mumps->id.schur[i * N + j].i;
2662: #endif
2663:           array[j * N + i] = val;
2664:         }
2665:       }
2666:     } else { /* stored by columns */
2667:       PetscCall(PetscArraycpy(array, mumps->id.schur, mumps->id.size_schur * mumps->id.size_schur));
2668:     }
2669:   } else {                          /* either full or lower-triangular (not packed) */
2670:     if (mumps->id.ICNTL(19) == 2) { /* lower triangular stored by columns */
2671:       PetscInt i, j, N = mumps->id.size_schur;
2672:       for (i = 0; i < N; i++) {
2673:         for (j = i; j < N; j++) {
2674: #if !defined(PETSC_USE_COMPLEX)
2675:           PetscScalar val = mumps->id.schur[i * N + j];
2676: #else
2677:           PetscScalar val = mumps->id.schur[i * N + j].r + PETSC_i * mumps->id.schur[i * N + j].i;
2678: #endif
2679:           array[i * N + j] = array[j * N + i] = val;
2680:         }
2681:       }
2682:     } else if (mumps->id.ICNTL(19) == 3) { /* full matrix */
2683:       PetscCall(PetscArraycpy(array, mumps->id.schur, mumps->id.size_schur * mumps->id.size_schur));
2684:     } else { /* ICNTL(19) == 1 lower triangular stored by rows */
2685:       PetscInt i, j, N = mumps->id.size_schur;
2686:       for (i = 0; i < N; i++) {
2687:         for (j = 0; j < i + 1; j++) {
2688: #if !defined(PETSC_USE_COMPLEX)
2689:           PetscScalar val = mumps->id.schur[i * N + j];
2690: #else
2691:           PetscScalar val = mumps->id.schur[i * N + j].r + PETSC_i * mumps->id.schur[i * N + j].i;
2692: #endif
2693:           array[i * N + j] = array[j * N + i] = val;
2694:         }
2695:       }
2696:     }
2697:   }
2698:   PetscCall(MatDenseRestoreArray(St, &array));
2699:   *S = St;
2700:   PetscFunctionReturn(PETSC_SUCCESS);
2701: }

2703: static PetscErrorCode MatMumpsSetIcntl_MUMPS(Mat F, PetscInt icntl, PetscInt ival)
2704: {
2705:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2707:   PetscFunctionBegin;
2708:   if (mumps->id.job == JOB_NULL) {                                       /* need to cache icntl and ival since PetscMUMPS_c() has never been called */
2709:     PetscInt i, nICNTL_pre = mumps->ICNTL_pre ? mumps->ICNTL_pre[0] : 0; /* number of already cached ICNTL */
2710:     for (i = 0; i < nICNTL_pre; ++i)
2711:       if (mumps->ICNTL_pre[1 + 2 * i] == icntl) break; /* is this ICNTL already cached? */
2712:     if (i == nICNTL_pre) {                             /* not already cached */
2713:       if (i > 0) PetscCall(PetscRealloc(sizeof(PetscMUMPSInt) * (2 * nICNTL_pre + 3), &mumps->ICNTL_pre));
2714:       else PetscCall(PetscCalloc(sizeof(PetscMUMPSInt) * 3, &mumps->ICNTL_pre));
2715:       mumps->ICNTL_pre[0]++;
2716:     }
2717:     mumps->ICNTL_pre[1 + 2 * i] = icntl;
2718:     PetscCall(PetscMUMPSIntCast(ival, mumps->ICNTL_pre + 2 + 2 * i));
2719:   } else PetscCall(PetscMUMPSIntCast(ival, &mumps->id.ICNTL(icntl)));
2720:   PetscFunctionReturn(PETSC_SUCCESS);
2721: }

2723: static PetscErrorCode MatMumpsGetIcntl_MUMPS(Mat F, PetscInt icntl, PetscInt *ival)
2724: {
2725:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2727:   PetscFunctionBegin;
2728:   if (mumps->id.job == JOB_NULL) {
2729:     PetscInt i, nICNTL_pre = mumps->ICNTL_pre ? mumps->ICNTL_pre[0] : 0;
2730:     *ival = 0;
2731:     for (i = 0; i < nICNTL_pre; ++i) {
2732:       if (mumps->ICNTL_pre[1 + 2 * i] == icntl) *ival = mumps->ICNTL_pre[2 + 2 * i];
2733:     }
2734:   } else *ival = mumps->id.ICNTL(icntl);
2735:   PetscFunctionReturn(PETSC_SUCCESS);
2736: }

2738: /*@
2739:   MatMumpsSetIcntl - Set MUMPS parameter ICNTL() <https://mumps-solver.org/index.php?page=doc>

2741:   Logically Collective

2743:   Input Parameters:
2744: + F     - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface
2745: . icntl - index of MUMPS parameter array ICNTL()
2746: - ival  - value of MUMPS ICNTL(icntl)

2748:   Options Database Key:
2749: . -mat_mumps_icntl_<icntl> <ival> - change the option numbered icntl to ival

2751:   Level: beginner

2753: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsGetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfo()`, `MatMumpsGetRinfog()`
2754: @*/
2755: PetscErrorCode MatMumpsSetIcntl(Mat F, PetscInt icntl, PetscInt ival)
2756: {
2757:   PetscFunctionBegin;
2759:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
2762:   PetscCheck((icntl >= 1 && icntl <= 38) || icntl == 58, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONG, "Unsupported ICNTL value %" PetscInt_FMT, icntl);
2763:   PetscTryMethod(F, "MatMumpsSetIcntl_C", (Mat, PetscInt, PetscInt), (F, icntl, ival));
2764:   PetscFunctionReturn(PETSC_SUCCESS);
2765: }

2767: /*@
2768:   MatMumpsGetIcntl - Get MUMPS parameter ICNTL() <https://mumps-solver.org/index.php?page=doc>

2770:   Logically Collective

2772:   Input Parameters:
2773: + F     - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface
2774: - icntl - index of MUMPS parameter array ICNTL()

2776:   Output Parameter:
2777: . ival - value of MUMPS ICNTL(icntl)

2779:   Level: beginner

2781: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsSetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfo()`, `MatMumpsGetRinfog()`
2782: @*/
2783: PetscErrorCode MatMumpsGetIcntl(Mat F, PetscInt icntl, PetscInt *ival)
2784: {
2785:   PetscFunctionBegin;
2787:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
2789:   PetscAssertPointer(ival, 3);
2790:   PetscCheck((icntl >= 1 && icntl <= 38) || icntl == 58, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONG, "Unsupported ICNTL value %" PetscInt_FMT, icntl);
2791:   PetscUseMethod(F, "MatMumpsGetIcntl_C", (Mat, PetscInt, PetscInt *), (F, icntl, ival));
2792:   PetscFunctionReturn(PETSC_SUCCESS);
2793: }

2795: static PetscErrorCode MatMumpsSetCntl_MUMPS(Mat F, PetscInt icntl, PetscReal val)
2796: {
2797:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2799:   PetscFunctionBegin;
2800:   if (mumps->id.job == JOB_NULL) {
2801:     PetscInt i, nCNTL_pre = mumps->CNTL_pre ? mumps->CNTL_pre[0] : 0;
2802:     for (i = 0; i < nCNTL_pre; ++i)
2803:       if (mumps->CNTL_pre[1 + 2 * i] == icntl) break;
2804:     if (i == nCNTL_pre) {
2805:       if (i > 0) PetscCall(PetscRealloc(sizeof(PetscReal) * (2 * nCNTL_pre + 3), &mumps->CNTL_pre));
2806:       else PetscCall(PetscCalloc(sizeof(PetscReal) * 3, &mumps->CNTL_pre));
2807:       mumps->CNTL_pre[0]++;
2808:     }
2809:     mumps->CNTL_pre[1 + 2 * i] = icntl;
2810:     mumps->CNTL_pre[2 + 2 * i] = val;
2811:   } else mumps->id.CNTL(icntl) = val;
2812:   PetscFunctionReturn(PETSC_SUCCESS);
2813: }

2815: static PetscErrorCode MatMumpsGetCntl_MUMPS(Mat F, PetscInt icntl, PetscReal *val)
2816: {
2817:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2819:   PetscFunctionBegin;
2820:   if (mumps->id.job == JOB_NULL) {
2821:     PetscInt i, nCNTL_pre = mumps->CNTL_pre ? mumps->CNTL_pre[0] : 0;
2822:     *val = 0.0;
2823:     for (i = 0; i < nCNTL_pre; ++i) {
2824:       if (mumps->CNTL_pre[1 + 2 * i] == icntl) *val = mumps->CNTL_pre[2 + 2 * i];
2825:     }
2826:   } else *val = mumps->id.CNTL(icntl);
2827:   PetscFunctionReturn(PETSC_SUCCESS);
2828: }

2830: /*@
2831:   MatMumpsSetCntl - Set MUMPS parameter CNTL() <https://mumps-solver.org/index.php?page=doc>

2833:   Logically Collective

2835:   Input Parameters:
2836: + F     - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface
2837: . icntl - index of MUMPS parameter array CNTL()
2838: - val   - value of MUMPS CNTL(icntl)

2840:   Options Database Key:
2841: . -mat_mumps_cntl_<icntl> <val> - change the option numbered icntl to ival

2843:   Level: beginner

2845: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsSetIcntl()`, `MatMumpsGetIcntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfo()`, `MatMumpsGetRinfog()`
2846: @*/
2847: PetscErrorCode MatMumpsSetCntl(Mat F, PetscInt icntl, PetscReal val)
2848: {
2849:   PetscFunctionBegin;
2851:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
2854:   PetscCheck(icntl >= 1 && icntl <= 7, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONG, "Unsupported CNTL value %" PetscInt_FMT, icntl);
2855:   PetscTryMethod(F, "MatMumpsSetCntl_C", (Mat, PetscInt, PetscReal), (F, icntl, val));
2856:   PetscFunctionReturn(PETSC_SUCCESS);
2857: }

2859: /*@
2860:   MatMumpsGetCntl - Get MUMPS parameter CNTL() <https://mumps-solver.org/index.php?page=doc>

2862:   Logically Collective

2864:   Input Parameters:
2865: + F     - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface
2866: - icntl - index of MUMPS parameter array CNTL()

2868:   Output Parameter:
2869: . val - value of MUMPS CNTL(icntl)

2871:   Level: beginner

2873: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsSetIcntl()`, `MatMumpsGetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfo()`, `MatMumpsGetRinfog()`
2874: @*/
2875: PetscErrorCode MatMumpsGetCntl(Mat F, PetscInt icntl, PetscReal *val)
2876: {
2877:   PetscFunctionBegin;
2879:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
2881:   PetscAssertPointer(val, 3);
2882:   PetscCheck(icntl >= 1 && icntl <= 7, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONG, "Unsupported CNTL value %" PetscInt_FMT, icntl);
2883:   PetscUseMethod(F, "MatMumpsGetCntl_C", (Mat, PetscInt, PetscReal *), (F, icntl, val));
2884:   PetscFunctionReturn(PETSC_SUCCESS);
2885: }

2887: static PetscErrorCode MatMumpsGetInfo_MUMPS(Mat F, PetscInt icntl, PetscInt *info)
2888: {
2889:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2891:   PetscFunctionBegin;
2892:   *info = mumps->id.INFO(icntl);
2893:   PetscFunctionReturn(PETSC_SUCCESS);
2894: }

2896: static PetscErrorCode MatMumpsGetInfog_MUMPS(Mat F, PetscInt icntl, PetscInt *infog)
2897: {
2898:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2900:   PetscFunctionBegin;
2901:   *infog = mumps->id.INFOG(icntl);
2902:   PetscFunctionReturn(PETSC_SUCCESS);
2903: }

2905: static PetscErrorCode MatMumpsGetRinfo_MUMPS(Mat F, PetscInt icntl, PetscReal *rinfo)
2906: {
2907:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2909:   PetscFunctionBegin;
2910:   *rinfo = mumps->id.RINFO(icntl);
2911:   PetscFunctionReturn(PETSC_SUCCESS);
2912: }

2914: static PetscErrorCode MatMumpsGetRinfog_MUMPS(Mat F, PetscInt icntl, PetscReal *rinfog)
2915: {
2916:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2918:   PetscFunctionBegin;
2919:   *rinfog = mumps->id.RINFOG(icntl);
2920:   PetscFunctionReturn(PETSC_SUCCESS);
2921: }

2923: static PetscErrorCode MatMumpsGetNullPivots_MUMPS(Mat F, PetscInt *size, PetscInt **array)
2924: {
2925:   Mat_MUMPS *mumps = (Mat_MUMPS *)F->data;

2927:   PetscFunctionBegin;
2928:   PetscCheck(mumps->id.ICNTL(24) == 1, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "-mat_mumps_icntl_24 must be set as 1 for null pivot row detection");
2929:   *size  = 0;
2930:   *array = NULL;
2931:   if (!mumps->myid) {
2932:     *size = mumps->id.INFOG(28);
2933:     PetscCall(PetscMalloc1(*size, array));
2934:     for (int i = 0; i < *size; i++) (*array)[i] = mumps->id.pivnul_list[i] - 1;
2935:   }
2936:   PetscFunctionReturn(PETSC_SUCCESS);
2937: }

2939: static PetscErrorCode MatMumpsGetInverse_MUMPS(Mat F, Mat spRHS)
2940: {
2941:   Mat          Bt = NULL, Btseq = NULL;
2942:   PetscBool    flg;
2943:   Mat_MUMPS   *mumps = (Mat_MUMPS *)F->data;
2944:   PetscScalar *aa;
2945:   PetscInt     spnr, *ia, *ja, M, nrhs;

2947:   PetscFunctionBegin;
2948:   PetscAssertPointer(spRHS, 2);
2949:   PetscCall(PetscObjectTypeCompare((PetscObject)spRHS, MATTRANSPOSEVIRTUAL, &flg));
2950:   if (flg) {
2951:     PetscCall(MatTransposeGetMat(spRHS, &Bt));
2952:   } else SETERRQ(PetscObjectComm((PetscObject)spRHS), PETSC_ERR_ARG_WRONG, "Matrix spRHS must be type MATTRANSPOSEVIRTUAL matrix");

2954:   PetscCall(MatMumpsSetIcntl(F, 30, 1));

2956:   if (mumps->petsc_size > 1) {
2957:     Mat_MPIAIJ *b = (Mat_MPIAIJ *)Bt->data;
2958:     Btseq         = b->A;
2959:   } else {
2960:     Btseq = Bt;
2961:   }

2963:   PetscCall(MatGetSize(spRHS, &M, &nrhs));
2964:   mumps->id.nrhs = nrhs;
2965:   mumps->id.lrhs = M;
2966:   mumps->id.rhs  = NULL;

2968:   if (!mumps->myid) {
2969:     PetscCall(MatSeqAIJGetArray(Btseq, &aa));
2970:     PetscCall(MatGetRowIJ(Btseq, 1, PETSC_FALSE, PETSC_FALSE, &spnr, (const PetscInt **)&ia, (const PetscInt **)&ja, &flg));
2971:     PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot get IJ structure");
2972:     PetscCall(PetscMUMPSIntCSRCast(mumps, spnr, ia, ja, &mumps->id.irhs_ptr, &mumps->id.irhs_sparse, &mumps->id.nz_rhs));
2973:     mumps->id.rhs_sparse = (MumpsScalar *)aa;
2974:   } else {
2975:     mumps->id.irhs_ptr    = NULL;
2976:     mumps->id.irhs_sparse = NULL;
2977:     mumps->id.nz_rhs      = 0;
2978:     mumps->id.rhs_sparse  = NULL;
2979:   }
2980:   mumps->id.ICNTL(20) = 1; /* rhs is sparse */
2981:   mumps->id.ICNTL(21) = 0; /* solution is in assembled centralized format */

2983:   /* solve phase */
2984:   mumps->id.job = JOB_SOLVE;
2985:   PetscMUMPS_c(mumps);
2986:   PetscCheck(mumps->id.INFOG(1) >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "MUMPS error in solve: INFOG(1)=%d INFO(2)=%d " MUMPS_MANUALS, mumps->id.INFOG(1), mumps->id.INFO(2));

2988:   if (!mumps->myid) {
2989:     PetscCall(MatSeqAIJRestoreArray(Btseq, &aa));
2990:     PetscCall(MatRestoreRowIJ(Btseq, 1, PETSC_FALSE, PETSC_FALSE, &spnr, (const PetscInt **)&ia, (const PetscInt **)&ja, &flg));
2991:     PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot get IJ structure");
2992:   }
2993:   PetscFunctionReturn(PETSC_SUCCESS);
2994: }

2996: /*@
2997:   MatMumpsGetInverse - Get user-specified set of entries in inverse of `A` <https://mumps-solver.org/index.php?page=doc>

2999:   Logically Collective

3001:   Input Parameter:
3002: . F - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface

3004:   Output Parameter:
3005: . spRHS - sequential sparse matrix in `MATTRANSPOSEVIRTUAL` format with requested entries of inverse of `A`

3007:   Level: beginner

3009: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatCreateTranspose()`
3010: @*/
3011: PetscErrorCode MatMumpsGetInverse(Mat F, Mat spRHS)
3012: {
3013:   PetscFunctionBegin;
3015:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
3016:   PetscUseMethod(F, "MatMumpsGetInverse_C", (Mat, Mat), (F, spRHS));
3017:   PetscFunctionReturn(PETSC_SUCCESS);
3018: }

3020: static PetscErrorCode MatMumpsGetInverseTranspose_MUMPS(Mat F, Mat spRHST)
3021: {
3022:   Mat spRHS;

3024:   PetscFunctionBegin;
3025:   PetscCall(MatCreateTranspose(spRHST, &spRHS));
3026:   PetscCall(MatMumpsGetInverse_MUMPS(F, spRHS));
3027:   PetscCall(MatDestroy(&spRHS));
3028:   PetscFunctionReturn(PETSC_SUCCESS);
3029: }

3031: /*@
3032:   MatMumpsGetInverseTranspose - Get user-specified set of entries in inverse of matrix $A^T $ <https://mumps-solver.org/index.php?page=doc>

3034:   Logically Collective

3036:   Input Parameter:
3037: . F - the factored matrix of A obtained by calling `MatGetFactor()` from PETSc-MUMPS interface

3039:   Output Parameter:
3040: . spRHST - sequential sparse matrix in `MATAIJ` format containing the requested entries of inverse of `A`^T

3042:   Level: beginner

3044: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatCreateTranspose()`, `MatMumpsGetInverse()`
3045: @*/
3046: PetscErrorCode MatMumpsGetInverseTranspose(Mat F, Mat spRHST)
3047: {
3048:   PetscBool flg;

3050:   PetscFunctionBegin;
3052:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
3053:   PetscCall(PetscObjectTypeCompareAny((PetscObject)spRHST, &flg, MATSEQAIJ, MATMPIAIJ, NULL));
3054:   PetscCheck(flg, PetscObjectComm((PetscObject)spRHST), PETSC_ERR_ARG_WRONG, "Matrix spRHST must be MATAIJ matrix");

3056:   PetscUseMethod(F, "MatMumpsGetInverseTranspose_C", (Mat, Mat), (F, spRHST));
3057:   PetscFunctionReturn(PETSC_SUCCESS);
3058: }

3060: /*@
3061:   MatMumpsGetInfo - Get MUMPS parameter INFO() <https://mumps-solver.org/index.php?page=doc>

3063:   Logically Collective

3065:   Input Parameters:
3066: + F     - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface
3067: - icntl - index of MUMPS parameter array INFO()

3069:   Output Parameter:
3070: . ival - value of MUMPS INFO(icntl)

3072:   Level: beginner

3074: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsSetIcntl()`, `MatMumpsGetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfo()`, `MatMumpsGetRinfog()`
3075: @*/
3076: PetscErrorCode MatMumpsGetInfo(Mat F, PetscInt icntl, PetscInt *ival)
3077: {
3078:   PetscFunctionBegin;
3080:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
3081:   PetscAssertPointer(ival, 3);
3082:   PetscUseMethod(F, "MatMumpsGetInfo_C", (Mat, PetscInt, PetscInt *), (F, icntl, ival));
3083:   PetscFunctionReturn(PETSC_SUCCESS);
3084: }

3086: /*@
3087:   MatMumpsGetInfog - Get MUMPS parameter INFOG() <https://mumps-solver.org/index.php?page=doc>

3089:   Logically Collective

3091:   Input Parameters:
3092: + F     - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface
3093: - icntl - index of MUMPS parameter array INFOG()

3095:   Output Parameter:
3096: . ival - value of MUMPS INFOG(icntl)

3098:   Level: beginner

3100: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsSetIcntl()`, `MatMumpsGetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetRinfo()`, `MatMumpsGetRinfog()`
3101: @*/
3102: PetscErrorCode MatMumpsGetInfog(Mat F, PetscInt icntl, PetscInt *ival)
3103: {
3104:   PetscFunctionBegin;
3106:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
3107:   PetscAssertPointer(ival, 3);
3108:   PetscUseMethod(F, "MatMumpsGetInfog_C", (Mat, PetscInt, PetscInt *), (F, icntl, ival));
3109:   PetscFunctionReturn(PETSC_SUCCESS);
3110: }

3112: /*@
3113:   MatMumpsGetRinfo - Get MUMPS parameter RINFO() <https://mumps-solver.org/index.php?page=doc>

3115:   Logically Collective

3117:   Input Parameters:
3118: + F     - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface
3119: - icntl - index of MUMPS parameter array RINFO()

3121:   Output Parameter:
3122: . val - value of MUMPS RINFO(icntl)

3124:   Level: beginner

3126: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsSetIcntl()`, `MatMumpsGetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfog()`
3127: @*/
3128: PetscErrorCode MatMumpsGetRinfo(Mat F, PetscInt icntl, PetscReal *val)
3129: {
3130:   PetscFunctionBegin;
3132:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
3133:   PetscAssertPointer(val, 3);
3134:   PetscUseMethod(F, "MatMumpsGetRinfo_C", (Mat, PetscInt, PetscReal *), (F, icntl, val));
3135:   PetscFunctionReturn(PETSC_SUCCESS);
3136: }

3138: /*@
3139:   MatMumpsGetRinfog - Get MUMPS parameter RINFOG() <https://mumps-solver.org/index.php?page=doc>

3141:   Logically Collective

3143:   Input Parameters:
3144: + F     - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface
3145: - icntl - index of MUMPS parameter array RINFOG()

3147:   Output Parameter:
3148: . val - value of MUMPS RINFOG(icntl)

3150:   Level: beginner

3152: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsSetIcntl()`, `MatMumpsGetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfo()`
3153: @*/
3154: PetscErrorCode MatMumpsGetRinfog(Mat F, PetscInt icntl, PetscReal *val)
3155: {
3156:   PetscFunctionBegin;
3158:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
3159:   PetscAssertPointer(val, 3);
3160:   PetscUseMethod(F, "MatMumpsGetRinfog_C", (Mat, PetscInt, PetscReal *), (F, icntl, val));
3161:   PetscFunctionReturn(PETSC_SUCCESS);
3162: }

3164: /*@
3165:   MatMumpsGetNullPivots - Get MUMPS parameter PIVNUL_LIST() <https://mumps-solver.org/index.php?page=doc>

3167:   Logically Collective

3169:   Input Parameter:
3170: . F - the factored matrix obtained by calling `MatGetFactor()` from PETSc-MUMPS interface

3172:   Output Parameters:
3173: + size  - local size of the array. The size of the array is non-zero only on the host.
3174: - array - array of rows with null pivot, these rows follow 0-based indexing. The array gets allocated within the function and the user is responsible
3175:            for freeing this array.

3177:   Level: beginner

3179: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MatMumpsSetIcntl()`, `MatMumpsGetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfo()`
3180: @*/
3181: PetscErrorCode MatMumpsGetNullPivots(Mat F, PetscInt *size, PetscInt **array)
3182: {
3183:   PetscFunctionBegin;
3185:   PetscCheck(F->factortype, PetscObjectComm((PetscObject)F), PETSC_ERR_ARG_WRONGSTATE, "Only for factored matrix");
3186:   PetscAssertPointer(size, 2);
3187:   PetscAssertPointer(array, 3);
3188:   PetscUseMethod(F, "MatMumpsGetNullPivots_C", (Mat, PetscInt *, PetscInt **), (F, size, array));
3189:   PetscFunctionReturn(PETSC_SUCCESS);
3190: }

3192: /*MC
3193:   MATSOLVERMUMPS -  A matrix type providing direct solvers (LU and Cholesky) for
3194:   distributed and sequential matrices via the external package MUMPS <https://mumps-solver.org/index.php?page=doc>

3196:   Works with `MATAIJ` and `MATSBAIJ` matrices

3198:   Use ./configure --download-mumps --download-scalapack --download-parmetis --download-metis --download-ptscotch to have PETSc installed with MUMPS

3200:   Use ./configure --with-openmp --download-hwloc (or --with-hwloc) to enable running MUMPS in MPI+OpenMP hybrid mode and non-MUMPS in flat-MPI mode.
3201:   See details below.

3203:   Use `-pc_type cholesky` or `lu` `-pc_factor_mat_solver_type mumps` to use this direct solver

3205:   Options Database Keys:
3206: +  -mat_mumps_icntl_1   - ICNTL(1): output stream for error messages
3207: .  -mat_mumps_icntl_2   - ICNTL(2): output stream for diagnostic printing, statistics, and warning
3208: .  -mat_mumps_icntl_3   -  ICNTL(3): output stream for global information, collected on the host
3209: .  -mat_mumps_icntl_4   -  ICNTL(4): level of printing (0 to 4)
3210: .  -mat_mumps_icntl_6   - ICNTL(6): permutes to a zero-free diagonal and/or scale the matrix (0 to 7)
3211: .  -mat_mumps_icntl_7   - ICNTL(7): computes a symmetric permutation in sequential analysis, 0=AMD, 2=AMF, 3=Scotch, 4=PORD, 5=Metis, 6=QAMD, and 7=auto
3212:                           Use -pc_factor_mat_ordering_type <type> to have PETSc perform the ordering (sequential only)
3213: .  -mat_mumps_icntl_8   - ICNTL(8): scaling strategy (-2 to 8 or 77)
3214: .  -mat_mumps_icntl_10  - ICNTL(10): max num of refinements
3215: .  -mat_mumps_icntl_11  - ICNTL(11): statistics related to an error analysis (via -ksp_view)
3216: .  -mat_mumps_icntl_12  - ICNTL(12): an ordering strategy for symmetric matrices (0 to 3)
3217: .  -mat_mumps_icntl_13  - ICNTL(13): parallelism of the root node (enable ScaLAPACK) and its splitting
3218: .  -mat_mumps_icntl_14  - ICNTL(14): percentage increase in the estimated working space
3219: .  -mat_mumps_icntl_15  - ICNTL(15): compression of the input matrix resulting from a block format
3220: .  -mat_mumps_icntl_19  - ICNTL(19): computes the Schur complement
3221: .  -mat_mumps_icntl_20  - ICNTL(20): give MUMPS centralized (0) or distributed (10) dense RHS
3222: .  -mat_mumps_icntl_22  - ICNTL(22): in-core/out-of-core factorization and solve (0 or 1)
3223: .  -mat_mumps_icntl_23  - ICNTL(23): max size of the working memory (MB) that can allocate per processor
3224: .  -mat_mumps_icntl_24  - ICNTL(24): detection of null pivot rows (0 or 1)
3225: .  -mat_mumps_icntl_25  - ICNTL(25): compute a solution of a deficient matrix and a null space basis
3226: .  -mat_mumps_icntl_26  - ICNTL(26): drives the solution phase if a Schur complement matrix
3227: .  -mat_mumps_icntl_28  - ICNTL(28): use 1 for sequential analysis and ictnl(7) ordering, or 2 for parallel analysis and ictnl(29) ordering
3228: .  -mat_mumps_icntl_29 - ICNTL(29): parallel ordering 1 = ptscotch, 2 = parmetis
3229: .  -mat_mumps_icntl_30 - ICNTL(30): compute user-specified set of entries in inv(A)
3230: .  -mat_mumps_icntl_31 - ICNTL(31): indicates which factors may be discarded during factorization
3231: .  -mat_mumps_icntl_33 - ICNTL(33): compute determinant
3232: .  -mat_mumps_icntl_35 - ICNTL(35): level of activation of BLR (Block Low-Rank) feature
3233: .  -mat_mumps_icntl_36 - ICNTL(36): controls the choice of BLR factorization variant
3234: .  -mat_mumps_icntl_38 - ICNTL(38): sets the estimated compression rate of LU factors with BLR
3235: .  -mat_mumps_icntl_58 - ICNTL(58): options for symbolic factorization
3236: .  -mat_mumps_cntl_1   - CNTL(1): relative pivoting threshold
3237: .  -mat_mumps_cntl_2   -  CNTL(2): stopping criterion of refinement
3238: .  -mat_mumps_cntl_3   - CNTL(3): absolute pivoting threshold
3239: .  -mat_mumps_cntl_4   - CNTL(4): value for static pivoting
3240: .  -mat_mumps_cntl_5   - CNTL(5): fixation for null pivots
3241: .  -mat_mumps_cntl_7   - CNTL(7): precision of the dropping parameter used during BLR factorization
3242: -  -mat_mumps_use_omp_threads [m] - run MUMPS in MPI+OpenMP hybrid mode as if omp_set_num_threads(m) is called before calling MUMPS.
3243:                                    Default might be the number of cores per CPU package (socket) as reported by hwloc and suggested by the MUMPS manual.

3245:   Level: beginner

3247:   Notes:
3248:   MUMPS Cholesky does not handle (complex) Hermitian matrices (see User's Guide at <https://mumps-solver.org/index.php?page=doc>) so using it will
3249:   error if the matrix is Hermitian.

3251:   When used within a `KSP`/`PC` solve the options are prefixed with that of the `PC`. Otherwise one can set the options prefix by calling
3252:   `MatSetOptionsPrefixFactor()` on the matrix from which the factor was obtained or `MatSetOptionsPrefix()` on the factor matrix.

3254:   When a MUMPS factorization fails inside a KSP solve, for example with a `KSP_DIVERGED_PC_FAILED`, one can find the MUMPS information about
3255:   the failure with
3256: .vb
3257:           KSPGetPC(ksp,&pc);
3258:           PCFactorGetMatrix(pc,&mat);
3259:           MatMumpsGetInfo(mat,....);
3260:           MatMumpsGetInfog(mat,....); etc.
3261: .ve
3262:     Or run with `-ksp_error_if_not_converged` and the program will be stopped and the information printed in the error message.

3264:   MUMPS provides 64-bit integer support in two build modes:
3265:   full 64-bit: here MUMPS is built with C preprocessing flag -DINTSIZE64 and Fortran compiler option -i8, -fdefault-integer-8 or equivalent, and
3266:   requires all dependent libraries MPI, ScaLAPACK, LAPACK and BLAS built the same way with 64-bit integers (for example ILP64 Intel MKL and MPI).

3268:   selective 64-bit: with the default MUMPS build, 64-bit integers have been introduced where needed. In compressed sparse row (CSR) storage of matrices,
3269:   MUMPS stores column indices in 32-bit, but row offsets in 64-bit, so you can have a huge number of non-zeros, but must have less than 2^31 rows and
3270:   columns. This can lead to significant memory and performance gains with respect to a full 64-bit integer MUMPS version. This requires a regular (32-bit
3271:   integer) build of all dependent libraries MPI, ScaLAPACK, LAPACK and BLAS.

3273:   With --download-mumps=1, PETSc always build MUMPS in selective 64-bit mode, which can be used by both --with-64-bit-indices=0/1 variants of PETSc.

3275:   Two modes to run MUMPS/PETSc with OpenMP
3276: .vb
3277:      Set OMP_NUM_THREADS and run with fewer MPI ranks than cores. For example, if you want to have 16 OpenMP
3278:      threads per rank, then you may use "export OMP_NUM_THREADS=16 && mpirun -n 4 ./test".
3279: .ve

3281: .vb
3282:      -mat_mumps_use_omp_threads [m] and run your code with as many MPI ranks as the number of cores. For example,
3283:     if a compute node has 32 cores and you run on two nodes, you may use "mpirun -n 64 ./test -mat_mumps_use_omp_threads 16"
3284: .ve

3286:    To run MUMPS in MPI+OpenMP hybrid mode (i.e., enable multithreading in MUMPS), but still run the non-MUMPS part
3287:    (i.e., PETSc part) of your code in the so-called flat-MPI (aka pure-MPI) mode, you need to configure PETSc with `--with-openmp` `--download-hwloc`
3288:    (or `--with-hwloc`), and have an MPI that supports MPI-3.0's process shared memory (which is usually available). Since MUMPS calls BLAS
3289:    libraries, to really get performance, you should have multithreaded BLAS libraries such as Intel MKL, AMD ACML, Cray libSci or OpenBLAS
3290:    (PETSc will automatically try to utilized a threaded BLAS if --with-openmp is provided).

3292:    If you run your code through a job submission system, there are caveats in MPI rank mapping. We use MPI_Comm_split_type() to obtain MPI
3293:    processes on each compute node. Listing the processes in rank ascending order, we split processes on a node into consecutive groups of
3294:    size m and create a communicator called omp_comm for each group. Rank 0 in an omp_comm is called the master rank, and others in the omp_comm
3295:    are called slave ranks (or slaves). Only master ranks are seen to MUMPS and slaves are not. We will free CPUs assigned to slaves (might be set
3296:    by CPU binding policies in job scripts) and make the CPUs available to the master so that OMP threads spawned by MUMPS can run on the CPUs.
3297:    In a multi-socket compute node, MPI rank mapping is an issue. Still use the above example and suppose your compute node has two sockets,
3298:    if you interleave MPI ranks on the two sockets, in other words, even ranks are placed on socket 0, and odd ranks are on socket 1, and bind
3299:    MPI ranks to cores, then with -mat_mumps_use_omp_threads 16, a master rank (and threads it spawns) will use half cores in socket 0, and half
3300:    cores in socket 1, that definitely hurts locality. On the other hand, if you map MPI ranks consecutively on the two sockets, then the
3301:    problem will not happen. Therefore, when you use -mat_mumps_use_omp_threads, you need to keep an eye on your MPI rank mapping and CPU binding.
3302:    For example, with the Slurm job scheduler, one can use srun --cpu-bind=verbose -m block:block to map consecutive MPI ranks to sockets and
3303:    examine the mapping result.

3305:    PETSc does not control thread binding in MUMPS. So to get best performance, one still has to set `OMP_PROC_BIND` and `OMP_PLACES` in job scripts,
3306:    for example, export `OMP_PLACES`=threads and export `OMP_PROC_BIND`=spread. One does not need to export `OMP_NUM_THREADS`=m in job scripts as PETSc
3307:    calls `omp_set_num_threads`(m) internally before calling MUMPS.

3309:    See {cite}`heroux2011bi` and {cite}`gutierrez2017accommodating`

3311: .seealso: [](ch_matrices), `Mat`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatMumpsSetIcntl()`, `MatMumpsGetIcntl()`, `MatMumpsSetCntl()`, `MatMumpsGetCntl()`, `MatMumpsGetInfo()`, `MatMumpsGetInfog()`, `MatMumpsGetRinfo()`, `MatMumpsGetRinfog()`, `KSPGetPC()`, `PCFactorGetMatrix()`
3312: M*/

3314: static PetscErrorCode MatFactorGetSolverType_mumps(Mat A, MatSolverType *type)
3315: {
3316:   PetscFunctionBegin;
3317:   *type = MATSOLVERMUMPS;
3318:   PetscFunctionReturn(PETSC_SUCCESS);
3319: }

3321: /* MatGetFactor for Seq and MPI AIJ matrices */
3322: static PetscErrorCode MatGetFactor_aij_mumps(Mat A, MatFactorType ftype, Mat *F)
3323: {
3324:   Mat         B;
3325:   Mat_MUMPS  *mumps;
3326:   PetscBool   isSeqAIJ, isDiag;
3327:   PetscMPIInt size;

3329:   PetscFunctionBegin;
3330: #if defined(PETSC_USE_COMPLEX)
3331:   if (ftype == MAT_FACTOR_CHOLESKY && A->hermitian == PETSC_BOOL3_TRUE && A->symmetric != PETSC_BOOL3_TRUE) {
3332:     PetscCall(PetscInfo(A, "Hermitian MAT_FACTOR_CHOLESKY is not supported. Use MAT_FACTOR_LU instead.\n"));
3333:     *F = NULL;
3334:     PetscFunctionReturn(PETSC_SUCCESS);
3335:   }
3336: #endif
3337:   /* Create the factorization matrix */
3338:   PetscCall(PetscObjectBaseTypeCompare((PetscObject)A, MATSEQAIJ, &isSeqAIJ));
3339:   PetscCall(PetscObjectBaseTypeCompare((PetscObject)A, MATDIAGONAL, &isDiag));
3340:   PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
3341:   PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
3342:   PetscCall(PetscStrallocpy("mumps", &((PetscObject)B)->type_name));
3343:   PetscCall(MatSetUp(B));

3345:   PetscCall(PetscNew(&mumps));

3347:   B->ops->view    = MatView_MUMPS;
3348:   B->ops->getinfo = MatGetInfo_MUMPS;

3350:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mumps));
3351:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorSetSchurIS_C", MatFactorSetSchurIS_MUMPS));
3352:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorCreateSchurComplement_C", MatFactorCreateSchurComplement_MUMPS));
3353:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetIcntl_C", MatMumpsSetIcntl_MUMPS));
3354:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetIcntl_C", MatMumpsGetIcntl_MUMPS));
3355:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetCntl_C", MatMumpsSetCntl_MUMPS));
3356:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetCntl_C", MatMumpsGetCntl_MUMPS));
3357:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfo_C", MatMumpsGetInfo_MUMPS));
3358:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfog_C", MatMumpsGetInfog_MUMPS));
3359:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfo_C", MatMumpsGetRinfo_MUMPS));
3360:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfog_C", MatMumpsGetRinfog_MUMPS));
3361:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetNullPivots_C", MatMumpsGetNullPivots_MUMPS));
3362:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInverse_C", MatMumpsGetInverse_MUMPS));
3363:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInverseTranspose_C", MatMumpsGetInverseTranspose_MUMPS));

3365:   if (ftype == MAT_FACTOR_LU) {
3366:     B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMUMPS;
3367:     B->factortype            = MAT_FACTOR_LU;
3368:     if (isSeqAIJ) mumps->ConvertToTriples = MatConvertToTriples_seqaij_seqaij;
3369:     else if (isDiag) mumps->ConvertToTriples = MatConvertToTriples_diagonal_xaij;
3370:     else mumps->ConvertToTriples = MatConvertToTriples_mpiaij_mpiaij;
3371:     PetscCall(PetscStrallocpy(MATORDERINGEXTERNAL, (char **)&B->preferredordering[MAT_FACTOR_LU]));
3372:     mumps->sym = 0;
3373:   } else {
3374:     B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MUMPS;
3375:     B->factortype                  = MAT_FACTOR_CHOLESKY;
3376:     if (isSeqAIJ) mumps->ConvertToTriples = MatConvertToTriples_seqaij_seqsbaij;
3377:     else if (isDiag) mumps->ConvertToTriples = MatConvertToTriples_diagonal_xaij;
3378:     else mumps->ConvertToTriples = MatConvertToTriples_mpiaij_mpisbaij;
3379:     PetscCall(PetscStrallocpy(MATORDERINGEXTERNAL, (char **)&B->preferredordering[MAT_FACTOR_CHOLESKY]));
3380: #if defined(PETSC_USE_COMPLEX)
3381:     mumps->sym = 2;
3382: #else
3383:     if (A->spd == PETSC_BOOL3_TRUE) mumps->sym = 1;
3384:     else mumps->sym = 2;
3385: #endif
3386:   }

3388:   /* set solvertype */
3389:   PetscCall(PetscFree(B->solvertype));
3390:   PetscCall(PetscStrallocpy(MATSOLVERMUMPS, &B->solvertype));
3391:   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
3392:   if (size == 1) {
3393:     /* MUMPS option -mat_mumps_icntl_7 1 is automatically set if PETSc ordering is passed into symbolic factorization */
3394:     B->canuseordering = PETSC_TRUE;
3395:   }
3396:   B->ops->destroy = MatDestroy_MUMPS;
3397:   B->data         = (void *)mumps;

3399:   *F               = B;
3400:   mumps->id.job    = JOB_NULL;
3401:   mumps->ICNTL_pre = NULL;
3402:   mumps->CNTL_pre  = NULL;
3403:   mumps->matstruc  = DIFFERENT_NONZERO_PATTERN;
3404:   PetscFunctionReturn(PETSC_SUCCESS);
3405: }

3407: /* MatGetFactor for Seq and MPI SBAIJ matrices */
3408: static PetscErrorCode MatGetFactor_sbaij_mumps(Mat A, MatFactorType ftype, Mat *F)
3409: {
3410:   Mat         B;
3411:   Mat_MUMPS  *mumps;
3412:   PetscBool   isSeqSBAIJ;
3413:   PetscMPIInt size;

3415:   PetscFunctionBegin;
3416: #if defined(PETSC_USE_COMPLEX)
3417:   if (ftype == MAT_FACTOR_CHOLESKY && A->hermitian == PETSC_BOOL3_TRUE && A->symmetric != PETSC_BOOL3_TRUE) {
3418:     PetscCall(PetscInfo(A, "Hermitian MAT_FACTOR_CHOLESKY is not supported. Use MAT_FACTOR_LU instead.\n"));
3419:     *F = NULL;
3420:     PetscFunctionReturn(PETSC_SUCCESS);
3421:   }
3422: #endif
3423:   PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
3424:   PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
3425:   PetscCall(PetscStrallocpy("mumps", &((PetscObject)B)->type_name));
3426:   PetscCall(MatSetUp(B));

3428:   PetscCall(PetscNew(&mumps));
3429:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQSBAIJ, &isSeqSBAIJ));
3430:   if (isSeqSBAIJ) {
3431:     mumps->ConvertToTriples = MatConvertToTriples_seqsbaij_seqsbaij;
3432:   } else {
3433:     mumps->ConvertToTriples = MatConvertToTriples_mpisbaij_mpisbaij;
3434:   }

3436:   B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MUMPS;
3437:   B->ops->view                   = MatView_MUMPS;
3438:   B->ops->getinfo                = MatGetInfo_MUMPS;

3440:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mumps));
3441:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorSetSchurIS_C", MatFactorSetSchurIS_MUMPS));
3442:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorCreateSchurComplement_C", MatFactorCreateSchurComplement_MUMPS));
3443:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetIcntl_C", MatMumpsSetIcntl_MUMPS));
3444:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetIcntl_C", MatMumpsGetIcntl_MUMPS));
3445:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetCntl_C", MatMumpsSetCntl_MUMPS));
3446:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetCntl_C", MatMumpsGetCntl_MUMPS));
3447:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfo_C", MatMumpsGetInfo_MUMPS));
3448:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfog_C", MatMumpsGetInfog_MUMPS));
3449:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfo_C", MatMumpsGetRinfo_MUMPS));
3450:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfog_C", MatMumpsGetRinfog_MUMPS));
3451:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetNullPivots_C", MatMumpsGetNullPivots_MUMPS));
3452:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInverse_C", MatMumpsGetInverse_MUMPS));
3453:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInverseTranspose_C", MatMumpsGetInverseTranspose_MUMPS));

3455:   B->factortype = MAT_FACTOR_CHOLESKY;
3456: #if defined(PETSC_USE_COMPLEX)
3457:   mumps->sym = 2;
3458: #else
3459:   if (A->spd == PETSC_BOOL3_TRUE) mumps->sym = 1;
3460:   else mumps->sym = 2;
3461: #endif

3463:   /* set solvertype */
3464:   PetscCall(PetscFree(B->solvertype));
3465:   PetscCall(PetscStrallocpy(MATSOLVERMUMPS, &B->solvertype));
3466:   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
3467:   if (size == 1) {
3468:     /* MUMPS option -mat_mumps_icntl_7 1 is automatically set if PETSc ordering is passed into symbolic factorization */
3469:     B->canuseordering = PETSC_TRUE;
3470:   }
3471:   PetscCall(PetscStrallocpy(MATORDERINGEXTERNAL, (char **)&B->preferredordering[MAT_FACTOR_CHOLESKY]));
3472:   B->ops->destroy = MatDestroy_MUMPS;
3473:   B->data         = (void *)mumps;

3475:   *F               = B;
3476:   mumps->id.job    = JOB_NULL;
3477:   mumps->ICNTL_pre = NULL;
3478:   mumps->CNTL_pre  = NULL;
3479:   mumps->matstruc  = DIFFERENT_NONZERO_PATTERN;
3480:   PetscFunctionReturn(PETSC_SUCCESS);
3481: }

3483: static PetscErrorCode MatGetFactor_baij_mumps(Mat A, MatFactorType ftype, Mat *F)
3484: {
3485:   Mat         B;
3486:   Mat_MUMPS  *mumps;
3487:   PetscBool   isSeqBAIJ;
3488:   PetscMPIInt size;

3490:   PetscFunctionBegin;
3491:   /* Create the factorization matrix */
3492:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQBAIJ, &isSeqBAIJ));
3493:   PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
3494:   PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
3495:   PetscCall(PetscStrallocpy("mumps", &((PetscObject)B)->type_name));
3496:   PetscCall(MatSetUp(B));

3498:   PetscCall(PetscNew(&mumps));
3499:   if (ftype == MAT_FACTOR_LU) {
3500:     B->ops->lufactorsymbolic = MatLUFactorSymbolic_BAIJMUMPS;
3501:     B->factortype            = MAT_FACTOR_LU;
3502:     if (isSeqBAIJ) mumps->ConvertToTriples = MatConvertToTriples_seqbaij_seqaij;
3503:     else mumps->ConvertToTriples = MatConvertToTriples_mpibaij_mpiaij;
3504:     mumps->sym = 0;
3505:     PetscCall(PetscStrallocpy(MATORDERINGEXTERNAL, (char **)&B->preferredordering[MAT_FACTOR_LU]));
3506:   } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot use PETSc BAIJ matrices with MUMPS Cholesky, use SBAIJ or AIJ matrix instead");

3508:   B->ops->view    = MatView_MUMPS;
3509:   B->ops->getinfo = MatGetInfo_MUMPS;

3511:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mumps));
3512:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorSetSchurIS_C", MatFactorSetSchurIS_MUMPS));
3513:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorCreateSchurComplement_C", MatFactorCreateSchurComplement_MUMPS));
3514:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetIcntl_C", MatMumpsSetIcntl_MUMPS));
3515:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetIcntl_C", MatMumpsGetIcntl_MUMPS));
3516:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetCntl_C", MatMumpsSetCntl_MUMPS));
3517:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetCntl_C", MatMumpsGetCntl_MUMPS));
3518:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfo_C", MatMumpsGetInfo_MUMPS));
3519:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfog_C", MatMumpsGetInfog_MUMPS));
3520:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfo_C", MatMumpsGetRinfo_MUMPS));
3521:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfog_C", MatMumpsGetRinfog_MUMPS));
3522:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetNullPivots_C", MatMumpsGetNullPivots_MUMPS));
3523:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInverse_C", MatMumpsGetInverse_MUMPS));
3524:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInverseTranspose_C", MatMumpsGetInverseTranspose_MUMPS));

3526:   /* set solvertype */
3527:   PetscCall(PetscFree(B->solvertype));
3528:   PetscCall(PetscStrallocpy(MATSOLVERMUMPS, &B->solvertype));
3529:   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
3530:   if (size == 1) {
3531:     /* MUMPS option -mat_mumps_icntl_7 1 is automatically set if PETSc ordering is passed into symbolic factorization */
3532:     B->canuseordering = PETSC_TRUE;
3533:   }
3534:   B->ops->destroy = MatDestroy_MUMPS;
3535:   B->data         = (void *)mumps;

3537:   *F               = B;
3538:   mumps->id.job    = JOB_NULL;
3539:   mumps->ICNTL_pre = NULL;
3540:   mumps->CNTL_pre  = NULL;
3541:   mumps->matstruc  = DIFFERENT_NONZERO_PATTERN;
3542:   PetscFunctionReturn(PETSC_SUCCESS);
3543: }

3545: /* MatGetFactor for Seq and MPI SELL matrices */
3546: static PetscErrorCode MatGetFactor_sell_mumps(Mat A, MatFactorType ftype, Mat *F)
3547: {
3548:   Mat         B;
3549:   Mat_MUMPS  *mumps;
3550:   PetscBool   isSeqSELL;
3551:   PetscMPIInt size;

3553:   PetscFunctionBegin;
3554:   /* Create the factorization matrix */
3555:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQSELL, &isSeqSELL));
3556:   PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
3557:   PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
3558:   PetscCall(PetscStrallocpy("mumps", &((PetscObject)B)->type_name));
3559:   PetscCall(MatSetUp(B));

3561:   PetscCall(PetscNew(&mumps));

3563:   B->ops->view    = MatView_MUMPS;
3564:   B->ops->getinfo = MatGetInfo_MUMPS;

3566:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mumps));
3567:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorSetSchurIS_C", MatFactorSetSchurIS_MUMPS));
3568:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorCreateSchurComplement_C", MatFactorCreateSchurComplement_MUMPS));
3569:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetIcntl_C", MatMumpsSetIcntl_MUMPS));
3570:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetIcntl_C", MatMumpsGetIcntl_MUMPS));
3571:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetCntl_C", MatMumpsSetCntl_MUMPS));
3572:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetCntl_C", MatMumpsGetCntl_MUMPS));
3573:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfo_C", MatMumpsGetInfo_MUMPS));
3574:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfog_C", MatMumpsGetInfog_MUMPS));
3575:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfo_C", MatMumpsGetRinfo_MUMPS));
3576:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfog_C", MatMumpsGetRinfog_MUMPS));
3577:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetNullPivots_C", MatMumpsGetNullPivots_MUMPS));

3579:   if (ftype == MAT_FACTOR_LU) {
3580:     B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMUMPS;
3581:     B->factortype            = MAT_FACTOR_LU;
3582:     if (isSeqSELL) mumps->ConvertToTriples = MatConvertToTriples_seqsell_seqaij;
3583:     else SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "To be implemented");
3584:     mumps->sym = 0;
3585:     PetscCall(PetscStrallocpy(MATORDERINGEXTERNAL, (char **)&B->preferredordering[MAT_FACTOR_LU]));
3586:   } else SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "To be implemented");

3588:   /* set solvertype */
3589:   PetscCall(PetscFree(B->solvertype));
3590:   PetscCall(PetscStrallocpy(MATSOLVERMUMPS, &B->solvertype));
3591:   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
3592:   if (size == 1) {
3593:     /* MUMPS option -mat_mumps_icntl_7 1 is automatically set if PETSc ordering is passed into symbolic factorization  */
3594:     B->canuseordering = PETSC_TRUE;
3595:   }
3596:   B->ops->destroy = MatDestroy_MUMPS;
3597:   B->data         = (void *)mumps;

3599:   *F               = B;
3600:   mumps->id.job    = JOB_NULL;
3601:   mumps->ICNTL_pre = NULL;
3602:   mumps->CNTL_pre  = NULL;
3603:   mumps->matstruc  = DIFFERENT_NONZERO_PATTERN;
3604:   PetscFunctionReturn(PETSC_SUCCESS);
3605: }

3607: /* MatGetFactor for MATNEST matrices */
3608: static PetscErrorCode MatGetFactor_nest_mumps(Mat A, MatFactorType ftype, Mat *F)
3609: {
3610:   Mat         B, **mats;
3611:   Mat_MUMPS  *mumps;
3612:   PetscInt    nr, nc;
3613:   PetscMPIInt size;
3614:   PetscBool   flg = PETSC_TRUE;

3616:   PetscFunctionBegin;
3617: #if defined(PETSC_USE_COMPLEX)
3618:   if (ftype == MAT_FACTOR_CHOLESKY && A->hermitian == PETSC_BOOL3_TRUE && A->symmetric != PETSC_BOOL3_TRUE) {
3619:     PetscCall(PetscInfo(A, "Hermitian MAT_FACTOR_CHOLESKY is not supported. Use MAT_FACTOR_LU instead.\n"));
3620:     *F = NULL;
3621:     PetscFunctionReturn(PETSC_SUCCESS);
3622:   }
3623: #endif

3625:   /* Return if some condition is not satisfied */
3626:   *F = NULL;
3627:   PetscCall(MatNestGetSubMats(A, &nr, &nc, &mats));
3628:   if (ftype == MAT_FACTOR_CHOLESKY) {
3629:     IS       *rows, *cols;
3630:     PetscInt *m, *M;

3632:     PetscCheck(nr == nc, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "MAT_FACTOR_CHOLESKY not supported for nest sizes %" PetscInt_FMT " != %" PetscInt_FMT ". Use MAT_FACTOR_LU.", nr, nc);
3633:     PetscCall(PetscMalloc2(nr, &rows, nc, &cols));
3634:     PetscCall(MatNestGetISs(A, rows, cols));
3635:     for (PetscInt r = 0; flg && r < nr; r++) PetscCall(ISEqualUnsorted(rows[r], cols[r], &flg));
3636:     if (!flg) {
3637:       PetscCall(PetscFree2(rows, cols));
3638:       PetscCall(PetscInfo(A, "MAT_FACTOR_CHOLESKY not supported for unequal row and column maps. Use MAT_FACTOR_LU.\n"));
3639:       PetscFunctionReturn(PETSC_SUCCESS);
3640:     }
3641:     PetscCall(PetscMalloc2(nr, &m, nr, &M));
3642:     for (PetscInt r = 0; r < nr; r++) PetscCall(ISGetMinMax(rows[r], &m[r], &M[r]));
3643:     for (PetscInt r = 0; flg && r < nr; r++)
3644:       for (PetscInt k = r + 1; flg && k < nr; k++)
3645:         if ((m[k] <= m[r] && m[r] <= M[k]) || (m[k] <= M[r] && M[r] <= M[k])) flg = PETSC_FALSE;
3646:     PetscCall(PetscFree2(m, M));
3647:     PetscCall(PetscFree2(rows, cols));
3648:     if (!flg) {
3649:       PetscCall(PetscInfo(A, "MAT_FACTOR_CHOLESKY not supported for intersecting row maps. Use MAT_FACTOR_LU.\n"));
3650:       PetscFunctionReturn(PETSC_SUCCESS);
3651:     }
3652:   }

3654:   for (PetscInt r = 0; r < nr; r++) {
3655:     for (PetscInt c = 0; c < nc; c++) {
3656:       Mat       sub = mats[r][c];
3657:       PetscBool isSeqAIJ, isMPIAIJ, isSeqBAIJ, isMPIBAIJ, isSeqSBAIJ, isMPISBAIJ, isTrans, isDiag;

3659:       if (!sub || (ftype == MAT_FACTOR_CHOLESKY && c < r)) continue;
3660:       PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATTRANSPOSEVIRTUAL, &isTrans));
3661:       if (isTrans) PetscCall(MatTransposeGetMat(sub, &sub));
3662:       else {
3663:         PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATHERMITIANTRANSPOSEVIRTUAL, &isTrans));
3664:         if (isTrans) PetscCall(MatHermitianTransposeGetMat(sub, &sub));
3665:       }
3666:       PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATSEQAIJ, &isSeqAIJ));
3667:       PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATMPIAIJ, &isMPIAIJ));
3668:       PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATSEQBAIJ, &isSeqBAIJ));
3669:       PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATMPIBAIJ, &isMPIBAIJ));
3670:       PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATSEQSBAIJ, &isSeqSBAIJ));
3671:       PetscCall(PetscObjectBaseTypeCompare((PetscObject)sub, MATMPISBAIJ, &isMPISBAIJ));
3672:       PetscCall(PetscObjectTypeCompare((PetscObject)sub, MATDIAGONAL, &isDiag));
3673:       if (ftype == MAT_FACTOR_CHOLESKY) {
3674:         if (r == c) {
3675:           if (!isSeqAIJ && !isMPIAIJ && !isSeqBAIJ && !isMPIBAIJ && !isSeqSBAIJ && !isMPISBAIJ && !isDiag) {
3676:             PetscCall(PetscInfo(sub, "MAT_CHOLESKY_FACTOR not supported for diagonal block of type %s.\n", ((PetscObject)sub)->type_name));
3677:             flg = PETSC_FALSE;
3678:           }
3679:         } else if (!isSeqAIJ && !isMPIAIJ && !isSeqBAIJ && !isMPIBAIJ && !isDiag) {
3680:           PetscCall(PetscInfo(sub, "MAT_CHOLESKY_FACTOR not supported for off-diagonal block of type %s.\n", ((PetscObject)sub)->type_name));
3681:           flg = PETSC_FALSE;
3682:         }
3683:       } else if (!isSeqAIJ && !isMPIAIJ && !isSeqBAIJ && !isMPIBAIJ && !isDiag) {
3684:         PetscCall(PetscInfo(sub, "MAT_LU_FACTOR not supported for block of type %s.\n", ((PetscObject)sub)->type_name));
3685:         flg = PETSC_FALSE;
3686:       }
3687:     }
3688:   }
3689:   if (!flg) PetscFunctionReturn(PETSC_SUCCESS);

3691:   /* Create the factorization matrix */
3692:   PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
3693:   PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
3694:   PetscCall(PetscStrallocpy(MATSOLVERMUMPS, &((PetscObject)B)->type_name));
3695:   PetscCall(MatSetUp(B));

3697:   PetscCall(PetscNew(&mumps));

3699:   B->ops->view    = MatView_MUMPS;
3700:   B->ops->getinfo = MatGetInfo_MUMPS;

3702:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mumps));
3703:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorSetSchurIS_C", MatFactorSetSchurIS_MUMPS));
3704:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorCreateSchurComplement_C", MatFactorCreateSchurComplement_MUMPS));
3705:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetIcntl_C", MatMumpsSetIcntl_MUMPS));
3706:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetIcntl_C", MatMumpsGetIcntl_MUMPS));
3707:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsSetCntl_C", MatMumpsSetCntl_MUMPS));
3708:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetCntl_C", MatMumpsGetCntl_MUMPS));
3709:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfo_C", MatMumpsGetInfo_MUMPS));
3710:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInfog_C", MatMumpsGetInfog_MUMPS));
3711:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfo_C", MatMumpsGetRinfo_MUMPS));
3712:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetRinfog_C", MatMumpsGetRinfog_MUMPS));
3713:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetNullPivots_C", MatMumpsGetNullPivots_MUMPS));
3714:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInverse_C", MatMumpsGetInverse_MUMPS));
3715:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMumpsGetInverseTranspose_C", MatMumpsGetInverseTranspose_MUMPS));

3717:   if (ftype == MAT_FACTOR_LU) {
3718:     B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMUMPS;
3719:     B->factortype            = MAT_FACTOR_LU;
3720:     mumps->sym               = 0;
3721:   } else {
3722:     B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MUMPS;
3723:     B->factortype                  = MAT_FACTOR_CHOLESKY;
3724: #if defined(PETSC_USE_COMPLEX)
3725:     mumps->sym = 2;
3726: #else
3727:     if (A->spd == PETSC_BOOL3_TRUE) mumps->sym = 1;
3728:     else mumps->sym = 2;
3729: #endif
3730:   }
3731:   mumps->ConvertToTriples = MatConvertToTriples_nest_xaij;
3732:   PetscCall(PetscStrallocpy(MATORDERINGEXTERNAL, (char **)&B->preferredordering[ftype]));

3734:   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
3735:   if (size == 1) {
3736:     /* MUMPS option -mat_mumps_icntl_7 1 is automatically set if PETSc ordering is passed into symbolic factorization */
3737:     B->canuseordering = PETSC_TRUE;
3738:   }

3740:   /* set solvertype */
3741:   PetscCall(PetscFree(B->solvertype));
3742:   PetscCall(PetscStrallocpy(MATSOLVERMUMPS, &B->solvertype));
3743:   B->ops->destroy = MatDestroy_MUMPS;
3744:   B->data         = (void *)mumps;

3746:   *F               = B;
3747:   mumps->id.job    = JOB_NULL;
3748:   mumps->ICNTL_pre = NULL;
3749:   mumps->CNTL_pre  = NULL;
3750:   mumps->matstruc  = DIFFERENT_NONZERO_PATTERN;
3751:   PetscFunctionReturn(PETSC_SUCCESS);
3752: }

3754: PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_MUMPS(void)
3755: {
3756:   PetscFunctionBegin;
3757:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATMPIAIJ, MAT_FACTOR_LU, MatGetFactor_aij_mumps));
3758:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATMPIAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_aij_mumps));
3759:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATMPIBAIJ, MAT_FACTOR_LU, MatGetFactor_baij_mumps));
3760:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATMPIBAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_baij_mumps));
3761:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATMPISBAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_sbaij_mumps));
3762:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_aij_mumps));
3763:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATSEQAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_aij_mumps));
3764:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATSEQBAIJ, MAT_FACTOR_LU, MatGetFactor_baij_mumps));
3765:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATSEQBAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_baij_mumps));
3766:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATSEQSBAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_sbaij_mumps));
3767:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATSEQSELL, MAT_FACTOR_LU, MatGetFactor_sell_mumps));
3768:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATDIAGONAL, MAT_FACTOR_LU, MatGetFactor_aij_mumps));
3769:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATDIAGONAL, MAT_FACTOR_CHOLESKY, MatGetFactor_aij_mumps));
3770:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATNEST, MAT_FACTOR_LU, MatGetFactor_nest_mumps));
3771:   PetscCall(MatSolverTypeRegister(MATSOLVERMUMPS, MATNEST, MAT_FACTOR_CHOLESKY, MatGetFactor_nest_mumps));
3772:   PetscFunctionReturn(PETSC_SUCCESS);
3773: }