Actual source code: mpiaij.h

petsc-master 2019-06-22
Report Typos and Errors


  5:  #include <../src/mat/impls/aij/seq/aij.h>

  7: typedef struct { /* used by MatCreateMPIAIJSumSeqAIJ for reusing the merged matrix */
  8:   PetscLayout rowmap;
  9:   PetscInt    **buf_ri,**buf_rj;
 10:   PetscMPIInt *len_s,*len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
 11:   PetscMPIInt nsend,nrecv;
 12:   PetscInt    *bi,*bj;    /* i and j array of the local portion of mpi C (matrix product) - rename to ci, cj! */
 13:   PetscInt    *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
 14:   PetscErrorCode (*destroy)(Mat);
 15:   PetscErrorCode (*duplicate)(Mat,MatDuplicateOption,Mat*);
 16: } Mat_Merge_SeqsToMPI;

 18: typedef struct { /* used by MatPtAP_MPIAIJ_MPIAIJ() and MatMatMult_MPIAIJ_MPIAIJ() */
 19:   PetscInt               *startsj_s,*startsj_r;    /* used by MatGetBrowsOfAoCols_MPIAIJ */
 20:   PetscScalar            *bufa;                    /* used by MatGetBrowsOfAoCols_MPIAIJ */
 21:   Mat                     P_loc,P_oth;             /* partial B_seq -- intend to replace B_seq */
 22:   PetscInt                *api,*apj;               /* symbolic i and j arrays of the local product A_loc*B_seq */
 23:   PetscScalar             *apv;
 24:   MatReuse                reuse;                   /* flag to skip MatGetBrowsOfAoCols_MPIAIJ() and MatMPIAIJGetLocalMat() in 1st call of MatPtAPNumeric_MPIAIJ_MPIAIJ() */
 25:   PetscScalar             *apa;                    /* tmp array for store a row of A*P used in MatMatMult() */
 26:   Mat                     A_loc;                   /* used by MatTransposeMatMult(), contains api and apj */
 27:   ISLocalToGlobalMapping  ltog;                    /* mapping from local column indices to global column indices for A_loc */
 28:   Mat                     Pt;                      /* used by MatTransposeMatMult(), Pt = P^T */
 29:   PetscBool               freestruct;              /* flag for MatFreeIntermediateDataStructures() */
 30:   Mat                     Rd,Ro,AP_loc,C_loc,C_oth;
 31:   PetscInt                algType;                 /* implementation algorithm */
 32:   PetscSF                 sf;                      /* use it to communicate remote part of C */
 33:   PetscInt                *c_othi,*c_rmti;

 35:   Mat_Merge_SeqsToMPI *merge;
 36:   PetscErrorCode (*destroy)(Mat);
 37:   PetscErrorCode (*duplicate)(Mat,MatDuplicateOption,Mat*);
 38:   PetscErrorCode (*view)(Mat,PetscViewer);
 39: } Mat_APMPI;

 41: typedef struct {
 42:   Mat A,B;                             /* local submatrices: A (diag part),
 43:                                            B (off-diag part) */
 44:   PetscMPIInt size;                     /* size of communicator */
 45:   PetscMPIInt rank;                     /* rank of proc in communicator */

 47:   /* The following variables are used for matrix assembly */
 48:   PetscBool   donotstash;               /* PETSC_TRUE if off processor entries dropped */
 49:   MPI_Request *send_waits;              /* array of send requests */
 50:   MPI_Request *recv_waits;              /* array of receive requests */
 51:   PetscInt    nsends,nrecvs;           /* numbers of sends and receives */
 52:   PetscScalar *svalues,*rvalues;       /* sending and receiving data */
 53:   PetscInt    rmax;                     /* maximum message length */
 54: #if defined(PETSC_USE_CTABLE)
 55:   PetscTable colmap;
 56: #else
 57:   PetscInt *colmap;                     /* local col number of off-diag col */
 58: #endif
 59:   PetscInt *garray;                     /* global index of all off-processor columns */

 61:   /* The following variables are used for matrix-vector products */
 62:   Vec        lvec;                 /* local vector */
 63:   Vec        diag;
 64:   VecScatter Mvctx,Mvctx_mpi1;     /* scatter context for vector */
 65:   PetscBool  Mvctx_mpi1_flg;       /* if true, additional Mvctx_mpi1 is requested for mat-mat ops, default false */
 66:   PetscBool  roworiented;          /* if true, row-oriented input, default true */

 68:   /* The following variables are for MatGetRow() */
 69:   PetscInt    *rowindices;         /* column indices for row */
 70:   PetscScalar *rowvalues;          /* nonzero values in row */
 71:   PetscBool   getrowactive;        /* indicates MatGetRow(), not restored */

 73:   /* Used by MatDistribute_MPIAIJ() to allow reuse of previous matrix allocation  and nonzero pattern */
 74:   PetscInt *ld;                    /* number of entries per row left of diagona block */

 76:   /* Used by MatMatMult() and MatPtAP() */
 77:   Mat_APMPI *ap;

 79:   /* used by MatMatMatMult() */
 80:   Mat_MatMatMatMult *matmatmatmult;

 82:   /* Used by MPICUSP and MPICUSPARSE classes */
 83:   void * spptr;

 85: } Mat_MPIAIJ;

 87: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat);

 89: PETSC_INTERN PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat,MatAssemblyType);

 91: PETSC_INTERN PetscErrorCode MatSetUpMultiply_MPIAIJ(Mat);
 92: PETSC_INTERN PetscErrorCode MatDisAssemble_MPIAIJ(Mat);
 93: PETSC_INTERN PetscErrorCode MatDuplicate_MPIAIJ(Mat,MatDuplicateOption,Mat*);
 94: PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat,PetscInt,IS [],PetscInt);
 95: PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ_Scalable(Mat,PetscInt,IS [],PetscInt);
 96: PETSC_INTERN PetscErrorCode MatFDColoringCreate_MPIXAIJ(Mat,ISColoring,MatFDColoring);
 97: PETSC_INTERN PetscErrorCode MatFDColoringSetUp_MPIXAIJ(Mat,ISColoring,MatFDColoring);
 98: PETSC_INTERN PetscErrorCode MatCreateSubMatrices_MPIAIJ (Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]);
 99: PETSC_INTERN PetscErrorCode MatCreateSubMatricesMPI_MPIAIJ (Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]);
100: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_All(Mat,MatCreateSubMatrixOption,MatReuse,Mat *[]);
101: PETSC_INTERN PetscErrorCode MatView_MPIAIJ(Mat,PetscViewer);

103: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat,IS,IS,MatReuse,Mat*);
104: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat,IS,IS,PetscInt,MatReuse,Mat*);
105: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat,IS,IS,IS,MatReuse,Mat*);
106: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat,IS,IS,MatReuse,Mat*);
107: PETSC_INTERN PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat,MPI_Comm,MatReuse,Mat*);

109: PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ(Mat,PetscViewer);
110: PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ_Binary(Mat,PetscViewer);
111: PETSC_INTERN PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat);
112: PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);
113: PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);
114: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,PetscReal,Mat*);
115: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat,Mat,PetscReal,Mat*);
116: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat*);
117: PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);
118: PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,Mat);

120: PETSC_INTERN PetscErrorCode MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
121: PETSC_INTERN PetscErrorCode MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,PetscReal,Mat*);
122: PETSC_INTERN PetscErrorCode MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,Mat);

124: PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);
125: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat*);
126: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);

128: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat,Mat,PetscReal,Mat*);
129: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(Mat,Mat,PetscReal,Mat*);
130: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(Mat,Mat,PetscReal,Mat*);
131: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat,Mat,Mat);
132: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce(Mat,Mat,Mat);
133: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged(Mat,Mat,Mat);
134: PETSC_INTERN PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat);
135: PETSC_INTERN PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_BC(Mat);

137: #if defined(PETSC_HAVE_HYPRE)
138: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat*);
139: #endif

141: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat);
142: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);

144: PETSC_INTERN PetscErrorCode MatRARt_MPIAIJ_MPIAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);

146: PETSC_INTERN PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat,Mat,MatReuse,PetscInt**,PetscInt**,MatScalar**,Mat*);
147: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar [],InsertMode);
148: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]);
149: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat,const PetscInt[],const PetscInt[]);
150: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat);
151: PETSC_INTERN PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void*);
152: PETSC_INTERN PetscErrorCode MatSetOption_MPIAIJ(Mat,MatOption,PetscBool);

154: PETSC_INTERN PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);
155: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,PetscReal,Mat*);
156: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat*);
157: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);
158: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,Mat);
159: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat,Mat,Mat);
160: PETSC_INTERN PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIDense(Mat,Mat,MatReuse,PetscReal,Mat*);
161: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat*);
162: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIDense(Mat,Mat,Mat);
163: PETSC_INTERN PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat,Mat*);

165: PETSC_INTERN PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems*,Mat);
166: PETSC_INTERN PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);

168: #if !defined(PETSC_USE_COMPLEX) && !defined(PETSC_USE_REAL_SINGLE) && !defined(PETSC_USE_REAL___FLOAT128) && !defined(PETSC_USE_REAL___FP16)
169: PETSC_INTERN PetscErrorCode MatLUFactorSymbolic_MPIAIJ_TFS(Mat,IS,IS,const MatFactorInfo*,Mat*);
170: #endif
171: PETSC_INTERN PetscErrorCode MatSolve_MPIAIJ(Mat,Vec,Vec);
172: PETSC_INTERN PetscErrorCode MatILUFactor_MPIAIJ(Mat,IS,IS,const MatFactorInfo*);

174: PETSC_INTERN PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,PetscInt*);

176: extern PetscErrorCode MatGetDiagonalBlock_MPIAIJ(Mat,Mat*);
177: extern PetscErrorCode MatDiagonalScaleLocal_MPIAIJ(Mat,Vec);

179: PETSC_INTERN PetscErrorCode MatGetSeqMats_MPIAIJ(Mat,Mat*,Mat*);
180: PETSC_INTERN PetscErrorCode MatSetSeqMats_MPIAIJ(Mat,IS,IS,IS,MatStructure,Mat,Mat);

182: /* compute apa = A[i,:]*P = Ad[i,:]*P_loc + Ao*[i,:]*P_oth using sparse axpy */
183: #define AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa) \
184: {\
185:   PetscInt    _anz,_pnz,_j,_k,*_ai,*_aj,_row,*_pi,*_pj,_nextp,*_apJ;\
186:   PetscScalar *_aa,_valtmp,*_pa;\
187:   _apJ = apj + api[i];\
188:   /* diagonal portion of A */\
189:   _ai  = ad->i;\
190:   _anz = _ai[i+1] - _ai[i];\
191:   _aj  = ad->j + _ai[i];\
192:   _aa  = ad->a + _ai[i];\
193:   for (_j=0; _j<_anz; _j++) {\
194:     _row = _aj[_j]; \
195:     _pi  = p_loc->i;                             \
196:     _pnz = _pi[_row+1] - _pi[_row];              \
197:     _pj  = p_loc->j + _pi[_row];                 \
198:     _pa  = p_loc->a + _pi[_row];                 \
199:     /* perform sparse axpy */                    \
200:     _valtmp = _aa[_j];                           \
201:     _nextp  = 0; \
202:     for (_k=0; _nextp<_pnz; _k++) {                    \
203:       if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */\
204:         apa[_k] += _valtmp*_pa[_nextp++];                             \
205:       } \
206:     }                                           \
207:     (void)PetscLogFlops(2.0*_pnz);              \
208:   }                                             \
209:   /* off-diagonal portion of A */               \
210:   if (p_oth){ \
211:     _ai  = ao->i;\
212:     _anz = _ai[i+1] - _ai[i];                   \
213:     _aj  = ao->j + _ai[i];                      \
214:     _aa  = ao->a + _ai[i];                      \
215:     for (_j=0; _j<_anz; _j++) {                 \
216:       _row = _aj[_j];    \
217:       _pi  = p_oth->i;                         \
218:       _pnz = _pi[_row+1] - _pi[_row];          \
219:       _pj  = p_oth->j + _pi[_row];             \
220:       _pa  = p_oth->a + _pi[_row];             \
221:       /* perform sparse axpy */                \
222:       _valtmp = _aa[_j];                       \
223:       _nextp  = 0; \
224:       for (_k=0; _nextp<_pnz; _k++) {          \
225:         if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */\
226:           apa[_k] += _valtmp*_pa[_nextp++];    \
227:         }                                      \
228:       }                                        \
229:       (void)PetscLogFlops(2.0*_pnz);           \
230:     } \
231:   }\
232: }

234: #define AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa) \
235: {\
236:   PetscInt    _anz,_pnz,_j,_k,*_ai,*_aj,_row,*_pi,*_pj;\
237:   PetscScalar *_aa,_valtmp,*_pa;                       \
238:   /* diagonal portion of A */\
239:   _ai  = ad->i;\
240:   _anz = _ai[i+1] - _ai[i];\
241:   _aj  = ad->j + _ai[i];\
242:   _aa  = ad->a + _ai[i];\
243:   for (_j=0; _j<_anz; _j++) {\
244:     _row = _aj[_j]; \
245:     _pi  = p_loc->i;                        \
246:     _pnz = _pi[_row+1] - _pi[_row];         \
247:     _pj  = p_loc->j + _pi[_row];            \
248:     _pa  = p_loc->a + _pi[_row];            \
249:     /* perform dense axpy */                \
250:     _valtmp = _aa[_j];                      \
251:     for (_k=0; _k<_pnz; _k++) {             \
252:       apa[_pj[_k]] += _valtmp*_pa[_k];      \
253:     }                                       \
254:     (void)PetscLogFlops(2.0*_pnz);          \
255:   }                                         \
256:   /* off-diagonal portion of A */           \
257:   if (p_oth){ \
258:     _ai  = ao->i;\
259:     _anz = _ai[i+1] - _ai[i];               \
260:     _aj  = ao->j + _ai[i];                  \
261:     _aa  = ao->a + _ai[i];                  \
262:     for (_j=0; _j<_anz; _j++) {             \
263:       _row = _aj[_j];    \
264:       _pi  = p_oth->i;                      \
265:       _pnz = _pi[_row+1] - _pi[_row];       \
266:       _pj  = p_oth->j + _pi[_row];          \
267:       _pa  = p_oth->a + _pi[_row];          \
268:       /* perform dense axpy */              \
269:       _valtmp = _aa[_j];                    \
270:       for (_k=0; _k<_pnz; _k++) {           \
271:         apa[_pj[_k]] += _valtmp*_pa[_k];    \
272:       }                                     \
273:       (void)PetscLogFlops(2.0*_pnz);        \
274:     }                                       \
275:   }\
276: }

278: #endif