Actual source code: mpiaij.c
petsc-3.8.0 2017-09-26
3: #include <../src/mat/impls/aij/mpi/mpiaij.h>
4: #include <petsc/private/vecimpl.h>
5: #include <petsc/private/isimpl.h>
6: #include <petscblaslapack.h>
7: #include <petscsf.h>
9: /*MC
10: MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.
12: This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
13: and MATMPIAIJ otherwise. As a result, for single process communicators,
14: MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
15: for communicators controlling multiple processes. It is recommended that you call both of
16: the above preallocation routines for simplicity.
18: Options Database Keys:
19: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()
21: Developer Notes: Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
22: enough exist.
24: Level: beginner
26: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
27: M*/
29: /*MC
30: MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.
32: This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
33: and MATMPIAIJCRL otherwise. As a result, for single process communicators,
34: MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
35: for communicators controlling multiple processes. It is recommended that you call both of
36: the above preallocation routines for simplicity.
38: Options Database Keys:
39: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()
41: Level: beginner
43: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
44: M*/
46: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
47: {
49: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data;
52: if (mat->A) {
53: MatSetBlockSizes(mat->A,rbs,cbs);
54: MatSetBlockSizes(mat->B,rbs,1);
55: }
56: return(0);
57: }
59: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
60: {
61: PetscErrorCode ierr;
62: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data;
63: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data;
64: Mat_SeqAIJ *b = (Mat_SeqAIJ*)mat->B->data;
65: const PetscInt *ia,*ib;
66: const MatScalar *aa,*bb;
67: PetscInt na,nb,i,j,*rows,cnt=0,n0rows;
68: PetscInt m = M->rmap->n,rstart = M->rmap->rstart;
71: *keptrows = 0;
72: ia = a->i;
73: ib = b->i;
74: for (i=0; i<m; i++) {
75: na = ia[i+1] - ia[i];
76: nb = ib[i+1] - ib[i];
77: if (!na && !nb) {
78: cnt++;
79: goto ok1;
80: }
81: aa = a->a + ia[i];
82: for (j=0; j<na; j++) {
83: if (aa[j] != 0.0) goto ok1;
84: }
85: bb = b->a + ib[i];
86: for (j=0; j <nb; j++) {
87: if (bb[j] != 0.0) goto ok1;
88: }
89: cnt++;
90: ok1:;
91: }
92: MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
93: if (!n0rows) return(0);
94: PetscMalloc1(M->rmap->n-cnt,&rows);
95: cnt = 0;
96: for (i=0; i<m; i++) {
97: na = ia[i+1] - ia[i];
98: nb = ib[i+1] - ib[i];
99: if (!na && !nb) continue;
100: aa = a->a + ia[i];
101: for (j=0; j<na;j++) {
102: if (aa[j] != 0.0) {
103: rows[cnt++] = rstart + i;
104: goto ok2;
105: }
106: }
107: bb = b->a + ib[i];
108: for (j=0; j<nb; j++) {
109: if (bb[j] != 0.0) {
110: rows[cnt++] = rstart + i;
111: goto ok2;
112: }
113: }
114: ok2:;
115: }
116: ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
117: return(0);
118: }
120: PetscErrorCode MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
121: {
122: PetscErrorCode ierr;
123: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) Y->data;
126: if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) {
127: MatDiagonalSet(aij->A,D,is);
128: } else {
129: MatDiagonalSet_Default(Y,D,is);
130: }
131: return(0);
132: }
134: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
135: {
136: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)M->data;
138: PetscInt i,rstart,nrows,*rows;
141: *zrows = NULL;
142: MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
143: MatGetOwnershipRange(M,&rstart,NULL);
144: for (i=0; i<nrows; i++) rows[i] += rstart;
145: ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
146: return(0);
147: }
149: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
150: {
152: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
153: PetscInt i,n,*garray = aij->garray;
154: Mat_SeqAIJ *a_aij = (Mat_SeqAIJ*) aij->A->data;
155: Mat_SeqAIJ *b_aij = (Mat_SeqAIJ*) aij->B->data;
156: PetscReal *work;
159: MatGetSize(A,NULL,&n);
160: PetscCalloc1(n,&work);
161: if (type == NORM_2) {
162: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
163: work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
164: }
165: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
166: work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
167: }
168: } else if (type == NORM_1) {
169: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
170: work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
171: }
172: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
173: work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
174: }
175: } else if (type == NORM_INFINITY) {
176: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
177: work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
178: }
179: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
180: work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
181: }
183: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
184: if (type == NORM_INFINITY) {
185: MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
186: } else {
187: MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
188: }
189: PetscFree(work);
190: if (type == NORM_2) {
191: for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
192: }
193: return(0);
194: }
196: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
197: {
198: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
199: IS sis,gis;
200: PetscErrorCode ierr;
201: const PetscInt *isis,*igis;
202: PetscInt n,*iis,nsis,ngis,rstart,i;
205: MatFindOffBlockDiagonalEntries(a->A,&sis);
206: MatFindNonzeroRows(a->B,&gis);
207: ISGetSize(gis,&ngis);
208: ISGetSize(sis,&nsis);
209: ISGetIndices(sis,&isis);
210: ISGetIndices(gis,&igis);
212: PetscMalloc1(ngis+nsis,&iis);
213: PetscMemcpy(iis,igis,ngis*sizeof(PetscInt));
214: PetscMemcpy(iis+ngis,isis,nsis*sizeof(PetscInt));
215: n = ngis + nsis;
216: PetscSortRemoveDupsInt(&n,iis);
217: MatGetOwnershipRange(A,&rstart,NULL);
218: for (i=0; i<n; i++) iis[i] += rstart;
219: ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);
221: ISRestoreIndices(sis,&isis);
222: ISRestoreIndices(gis,&igis);
223: ISDestroy(&sis);
224: ISDestroy(&gis);
225: return(0);
226: }
228: /*
229: Distributes a SeqAIJ matrix across a set of processes. Code stolen from
230: MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.
232: Only for square matrices
234: Used by a preconditioner, hence PETSC_EXTERN
235: */
236: PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
237: {
238: PetscMPIInt rank,size;
239: PetscInt *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
241: Mat mat;
242: Mat_SeqAIJ *gmata;
243: PetscMPIInt tag;
244: MPI_Status status;
245: PetscBool aij;
246: MatScalar *gmataa,*ao,*ad,*gmataarestore=0;
249: MPI_Comm_rank(comm,&rank);
250: MPI_Comm_size(comm,&size);
251: if (!rank) {
252: PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
253: if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
254: }
255: if (reuse == MAT_INITIAL_MATRIX) {
256: MatCreate(comm,&mat);
257: MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
258: MatGetBlockSizes(gmat,&bses[0],&bses[1]);
259: MPI_Bcast(bses,2,MPIU_INT,0,comm);
260: MatSetBlockSizes(mat,bses[0],bses[1]);
261: MatSetType(mat,MATAIJ);
262: PetscMalloc1(size+1,&rowners);
263: PetscMalloc2(m,&dlens,m,&olens);
264: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
266: rowners[0] = 0;
267: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
268: rstart = rowners[rank];
269: rend = rowners[rank+1];
270: PetscObjectGetNewTag((PetscObject)mat,&tag);
271: if (!rank) {
272: gmata = (Mat_SeqAIJ*) gmat->data;
273: /* send row lengths to all processors */
274: for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
275: for (i=1; i<size; i++) {
276: MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
277: }
278: /* determine number diagonal and off-diagonal counts */
279: PetscMemzero(olens,m*sizeof(PetscInt));
280: PetscCalloc1(m,&ld);
281: jj = 0;
282: for (i=0; i<m; i++) {
283: for (j=0; j<dlens[i]; j++) {
284: if (gmata->j[jj] < rstart) ld[i]++;
285: if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
286: jj++;
287: }
288: }
289: /* send column indices to other processes */
290: for (i=1; i<size; i++) {
291: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
292: MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
293: MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
294: }
296: /* send numerical values to other processes */
297: for (i=1; i<size; i++) {
298: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
299: MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
300: }
301: gmataa = gmata->a;
302: gmataj = gmata->j;
304: } else {
305: /* receive row lengths */
306: MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
307: /* receive column indices */
308: MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
309: PetscMalloc2(nz,&gmataa,nz,&gmataj);
310: MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
311: /* determine number diagonal and off-diagonal counts */
312: PetscMemzero(olens,m*sizeof(PetscInt));
313: PetscCalloc1(m,&ld);
314: jj = 0;
315: for (i=0; i<m; i++) {
316: for (j=0; j<dlens[i]; j++) {
317: if (gmataj[jj] < rstart) ld[i]++;
318: if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
319: jj++;
320: }
321: }
322: /* receive numerical values */
323: PetscMemzero(gmataa,nz*sizeof(PetscScalar));
324: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
325: }
326: /* set preallocation */
327: for (i=0; i<m; i++) {
328: dlens[i] -= olens[i];
329: }
330: MatSeqAIJSetPreallocation(mat,0,dlens);
331: MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);
333: for (i=0; i<m; i++) {
334: dlens[i] += olens[i];
335: }
336: cnt = 0;
337: for (i=0; i<m; i++) {
338: row = rstart + i;
339: MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
340: cnt += dlens[i];
341: }
342: if (rank) {
343: PetscFree2(gmataa,gmataj);
344: }
345: PetscFree2(dlens,olens);
346: PetscFree(rowners);
348: ((Mat_MPIAIJ*)(mat->data))->ld = ld;
350: *inmat = mat;
351: } else { /* column indices are already set; only need to move over numerical values from process 0 */
352: Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
353: Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
354: mat = *inmat;
355: PetscObjectGetNewTag((PetscObject)mat,&tag);
356: if (!rank) {
357: /* send numerical values to other processes */
358: gmata = (Mat_SeqAIJ*) gmat->data;
359: MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
360: gmataa = gmata->a;
361: for (i=1; i<size; i++) {
362: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
363: MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
364: }
365: nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
366: } else {
367: /* receive numerical values from process 0*/
368: nz = Ad->nz + Ao->nz;
369: PetscMalloc1(nz,&gmataa); gmataarestore = gmataa;
370: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
371: }
372: /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
373: ld = ((Mat_MPIAIJ*)(mat->data))->ld;
374: ad = Ad->a;
375: ao = Ao->a;
376: if (mat->rmap->n) {
377: i = 0;
378: nz = ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
379: nz = Ad->i[i+1] - Ad->i[i]; PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
380: }
381: for (i=1; i<mat->rmap->n; i++) {
382: nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
383: nz = Ad->i[i+1] - Ad->i[i]; PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
384: }
385: i--;
386: if (mat->rmap->n) {
387: nz = Ao->i[i+1] - Ao->i[i] - ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));
388: }
389: if (rank) {
390: PetscFree(gmataarestore);
391: }
392: }
393: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
394: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
395: return(0);
396: }
398: /*
399: Local utility routine that creates a mapping from the global column
400: number to the local number in the off-diagonal part of the local
401: storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at
402: a slightly higher hash table cost; without it it is not scalable (each processor
403: has an order N integer array but is fast to acess.
404: */
405: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
406: {
407: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
409: PetscInt n = aij->B->cmap->n,i;
412: if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
413: #if defined(PETSC_USE_CTABLE)
414: PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
415: for (i=0; i<n; i++) {
416: PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
417: }
418: #else
419: PetscCalloc1(mat->cmap->N+1,&aij->colmap);
420: PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
421: for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
422: #endif
423: return(0);
424: }
426: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol) \
427: { \
428: if (col <= lastcol1) low1 = 0; \
429: else high1 = nrow1; \
430: lastcol1 = col;\
431: while (high1-low1 > 5) { \
432: t = (low1+high1)/2; \
433: if (rp1[t] > col) high1 = t; \
434: else low1 = t; \
435: } \
436: for (_i=low1; _i<high1; _i++) { \
437: if (rp1[_i] > col) break; \
438: if (rp1[_i] == col) { \
439: if (addv == ADD_VALUES) ap1[_i] += value; \
440: else ap1[_i] = value; \
441: goto a_noinsert; \
442: } \
443: } \
444: if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
445: if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;} \
446: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
447: MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
448: N = nrow1++ - 1; a->nz++; high1++; \
449: /* shift up all the later entries in this row */ \
450: for (ii=N; ii>=_i; ii--) { \
451: rp1[ii+1] = rp1[ii]; \
452: ap1[ii+1] = ap1[ii]; \
453: } \
454: rp1[_i] = col; \
455: ap1[_i] = value; \
456: A->nonzerostate++;\
457: a_noinsert: ; \
458: ailen[row] = nrow1; \
459: }
461: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
462: { \
463: if (col <= lastcol2) low2 = 0; \
464: else high2 = nrow2; \
465: lastcol2 = col; \
466: while (high2-low2 > 5) { \
467: t = (low2+high2)/2; \
468: if (rp2[t] > col) high2 = t; \
469: else low2 = t; \
470: } \
471: for (_i=low2; _i<high2; _i++) { \
472: if (rp2[_i] > col) break; \
473: if (rp2[_i] == col) { \
474: if (addv == ADD_VALUES) ap2[_i] += value; \
475: else ap2[_i] = value; \
476: goto b_noinsert; \
477: } \
478: } \
479: if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
480: if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
481: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
482: MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
483: N = nrow2++ - 1; b->nz++; high2++; \
484: /* shift up all the later entries in this row */ \
485: for (ii=N; ii>=_i; ii--) { \
486: rp2[ii+1] = rp2[ii]; \
487: ap2[ii+1] = ap2[ii]; \
488: } \
489: rp2[_i] = col; \
490: ap2[_i] = value; \
491: B->nonzerostate++; \
492: b_noinsert: ; \
493: bilen[row] = nrow2; \
494: }
496: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
497: {
498: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
499: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
501: PetscInt l,*garray = mat->garray,diag;
504: /* code only works for square matrices A */
506: /* find size of row to the left of the diagonal part */
507: MatGetOwnershipRange(A,&diag,0);
508: row = row - diag;
509: for (l=0; l<b->i[row+1]-b->i[row]; l++) {
510: if (garray[b->j[b->i[row]+l]] > diag) break;
511: }
512: PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));
514: /* diagonal part */
515: PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));
517: /* right of diagonal part */
518: PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));
519: return(0);
520: }
522: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
523: {
524: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
525: PetscScalar value;
527: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
528: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
529: PetscBool roworiented = aij->roworiented;
531: /* Some Variables required in the macro */
532: Mat A = aij->A;
533: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
534: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
535: MatScalar *aa = a->a;
536: PetscBool ignorezeroentries = a->ignorezeroentries;
537: Mat B = aij->B;
538: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
539: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
540: MatScalar *ba = b->a;
542: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
543: PetscInt nonew;
544: MatScalar *ap1,*ap2;
547: for (i=0; i<m; i++) {
548: if (im[i] < 0) continue;
549: #if defined(PETSC_USE_DEBUG)
550: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
551: #endif
552: if (im[i] >= rstart && im[i] < rend) {
553: row = im[i] - rstart;
554: lastcol1 = -1;
555: rp1 = aj + ai[row];
556: ap1 = aa + ai[row];
557: rmax1 = aimax[row];
558: nrow1 = ailen[row];
559: low1 = 0;
560: high1 = nrow1;
561: lastcol2 = -1;
562: rp2 = bj + bi[row];
563: ap2 = ba + bi[row];
564: rmax2 = bimax[row];
565: nrow2 = bilen[row];
566: low2 = 0;
567: high2 = nrow2;
569: for (j=0; j<n; j++) {
570: if (roworiented) value = v[i*n+j];
571: else value = v[i+j*m];
572: if (in[j] >= cstart && in[j] < cend) {
573: col = in[j] - cstart;
574: nonew = a->nonew;
575: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
576: MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
577: } else if (in[j] < 0) continue;
578: #if defined(PETSC_USE_DEBUG)
579: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
580: #endif
581: else {
582: if (mat->was_assembled) {
583: if (!aij->colmap) {
584: MatCreateColmap_MPIAIJ_Private(mat);
585: }
586: #if defined(PETSC_USE_CTABLE)
587: PetscTableFind(aij->colmap,in[j]+1,&col);
588: col--;
589: #else
590: col = aij->colmap[in[j]] - 1;
591: #endif
592: if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
593: MatDisAssemble_MPIAIJ(mat);
594: col = in[j];
595: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
596: B = aij->B;
597: b = (Mat_SeqAIJ*)B->data;
598: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
599: rp2 = bj + bi[row];
600: ap2 = ba + bi[row];
601: rmax2 = bimax[row];
602: nrow2 = bilen[row];
603: low2 = 0;
604: high2 = nrow2;
605: bm = aij->B->rmap->n;
606: ba = b->a;
607: } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
608: } else col = in[j];
609: nonew = b->nonew;
610: MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
611: }
612: }
613: } else {
614: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
615: if (!aij->donotstash) {
616: mat->assembled = PETSC_FALSE;
617: if (roworiented) {
618: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
619: } else {
620: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
621: }
622: }
623: }
624: }
625: return(0);
626: }
628: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
629: {
630: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
632: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
633: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
636: for (i=0; i<m; i++) {
637: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
638: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
639: if (idxm[i] >= rstart && idxm[i] < rend) {
640: row = idxm[i] - rstart;
641: for (j=0; j<n; j++) {
642: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
643: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
644: if (idxn[j] >= cstart && idxn[j] < cend) {
645: col = idxn[j] - cstart;
646: MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
647: } else {
648: if (!aij->colmap) {
649: MatCreateColmap_MPIAIJ_Private(mat);
650: }
651: #if defined(PETSC_USE_CTABLE)
652: PetscTableFind(aij->colmap,idxn[j]+1,&col);
653: col--;
654: #else
655: col = aij->colmap[idxn[j]] - 1;
656: #endif
657: if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
658: else {
659: MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
660: }
661: }
662: }
663: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
664: }
665: return(0);
666: }
668: extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);
670: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
671: {
672: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
674: PetscInt nstash,reallocs;
677: if (aij->donotstash || mat->nooffprocentries) return(0);
679: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
680: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
681: PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
682: return(0);
683: }
685: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
686: {
687: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
688: Mat_SeqAIJ *a = (Mat_SeqAIJ*)aij->A->data;
690: PetscMPIInt n;
691: PetscInt i,j,rstart,ncols,flg;
692: PetscInt *row,*col;
693: PetscBool other_disassembled;
694: PetscScalar *val;
696: /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */
699: if (!aij->donotstash && !mat->nooffprocentries) {
700: while (1) {
701: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
702: if (!flg) break;
704: for (i=0; i<n; ) {
705: /* Now identify the consecutive vals belonging to the same row */
706: for (j=i,rstart=row[j]; j<n; j++) {
707: if (row[j] != rstart) break;
708: }
709: if (j < n) ncols = j-i;
710: else ncols = n-i;
711: /* Now assemble all these values with a single function call */
712: MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
714: i = j;
715: }
716: }
717: MatStashScatterEnd_Private(&mat->stash);
718: }
719: MatAssemblyBegin(aij->A,mode);
720: MatAssemblyEnd(aij->A,mode);
722: /* determine if any processor has disassembled, if so we must
723: also disassemble ourselfs, in order that we may reassemble. */
724: /*
725: if nonzero structure of submatrix B cannot change then we know that
726: no processor disassembled thus we can skip this stuff
727: */
728: if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
729: MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
730: if (mat->was_assembled && !other_disassembled) {
731: MatDisAssemble_MPIAIJ(mat);
732: }
733: }
734: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
735: MatSetUpMultiply_MPIAIJ(mat);
736: }
737: MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
738: MatAssemblyBegin(aij->B,mode);
739: MatAssemblyEnd(aij->B,mode);
741: PetscFree2(aij->rowvalues,aij->rowindices);
743: aij->rowvalues = 0;
745: VecDestroy(&aij->diag);
746: if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;
748: /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
749: if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
750: PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
751: MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
752: }
753: return(0);
754: }
756: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
757: {
758: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
762: MatZeroEntries(l->A);
763: MatZeroEntries(l->B);
764: return(0);
765: }
767: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
768: {
769: Mat_MPIAIJ *mat = (Mat_MPIAIJ *) A->data;
770: PetscInt *lrows;
771: PetscInt r, len;
775: /* get locally owned rows */
776: MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
777: /* fix right hand side if needed */
778: if (x && b) {
779: const PetscScalar *xx;
780: PetscScalar *bb;
782: VecGetArrayRead(x, &xx);
783: VecGetArray(b, &bb);
784: for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
785: VecRestoreArrayRead(x, &xx);
786: VecRestoreArray(b, &bb);
787: }
788: /* Must zero l->B before l->A because the (diag) case below may put values into l->B*/
789: MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
790: if (A->congruentlayouts == -1) { /* first time we compare rows and cols layouts */
791: PetscBool cong;
792: PetscLayoutCompare(A->rmap,A->cmap,&cong);
793: if (cong) A->congruentlayouts = 1;
794: else A->congruentlayouts = 0;
795: }
796: if ((diag != 0.0) && A->congruentlayouts) {
797: MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
798: } else if (diag != 0.0) {
799: MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
800: if (((Mat_SeqAIJ *) mat->A->data)->nonew) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatZeroRows() on rectangular matrices cannot be used with the Mat options\nMAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
801: for (r = 0; r < len; ++r) {
802: const PetscInt row = lrows[r] + A->rmap->rstart;
803: MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
804: }
805: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
806: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
807: } else {
808: MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
809: }
810: PetscFree(lrows);
812: /* only change matrix nonzero state if pattern was allowed to be changed */
813: if (!((Mat_SeqAIJ*)(mat->A->data))->keepnonzeropattern) {
814: PetscObjectState state = mat->A->nonzerostate + mat->B->nonzerostate;
815: MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
816: }
817: return(0);
818: }
820: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
821: {
822: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
823: PetscErrorCode ierr;
824: PetscMPIInt n = A->rmap->n;
825: PetscInt i,j,r,m,p = 0,len = 0;
826: PetscInt *lrows,*owners = A->rmap->range;
827: PetscSFNode *rrows;
828: PetscSF sf;
829: const PetscScalar *xx;
830: PetscScalar *bb,*mask;
831: Vec xmask,lmask;
832: Mat_SeqAIJ *aij = (Mat_SeqAIJ*)l->B->data;
833: const PetscInt *aj, *ii,*ridx;
834: PetscScalar *aa;
837: /* Create SF where leaves are input rows and roots are owned rows */
838: PetscMalloc1(n, &lrows);
839: for (r = 0; r < n; ++r) lrows[r] = -1;
840: PetscMalloc1(N, &rrows);
841: for (r = 0; r < N; ++r) {
842: const PetscInt idx = rows[r];
843: if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
844: if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
845: PetscLayoutFindOwner(A->rmap,idx,&p);
846: }
847: rrows[r].rank = p;
848: rrows[r].index = rows[r] - owners[p];
849: }
850: PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
851: PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
852: /* Collect flags for rows to be zeroed */
853: PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
854: PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
855: PetscSFDestroy(&sf);
856: /* Compress and put in row numbers */
857: for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
858: /* zero diagonal part of matrix */
859: MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
860: /* handle off diagonal part of matrix */
861: MatCreateVecs(A,&xmask,NULL);
862: VecDuplicate(l->lvec,&lmask);
863: VecGetArray(xmask,&bb);
864: for (i=0; i<len; i++) bb[lrows[i]] = 1;
865: VecRestoreArray(xmask,&bb);
866: VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
867: VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
868: VecDestroy(&xmask);
869: if (x) {
870: VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
871: VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
872: VecGetArrayRead(l->lvec,&xx);
873: VecGetArray(b,&bb);
874: }
875: VecGetArray(lmask,&mask);
876: /* remove zeroed rows of off diagonal matrix */
877: ii = aij->i;
878: for (i=0; i<len; i++) {
879: PetscMemzero(aij->a + ii[lrows[i]],(ii[lrows[i]+1] - ii[lrows[i]])*sizeof(PetscScalar));
880: }
881: /* loop over all elements of off process part of matrix zeroing removed columns*/
882: if (aij->compressedrow.use) {
883: m = aij->compressedrow.nrows;
884: ii = aij->compressedrow.i;
885: ridx = aij->compressedrow.rindex;
886: for (i=0; i<m; i++) {
887: n = ii[i+1] - ii[i];
888: aj = aij->j + ii[i];
889: aa = aij->a + ii[i];
891: for (j=0; j<n; j++) {
892: if (PetscAbsScalar(mask[*aj])) {
893: if (b) bb[*ridx] -= *aa*xx[*aj];
894: *aa = 0.0;
895: }
896: aa++;
897: aj++;
898: }
899: ridx++;
900: }
901: } else { /* do not use compressed row format */
902: m = l->B->rmap->n;
903: for (i=0; i<m; i++) {
904: n = ii[i+1] - ii[i];
905: aj = aij->j + ii[i];
906: aa = aij->a + ii[i];
907: for (j=0; j<n; j++) {
908: if (PetscAbsScalar(mask[*aj])) {
909: if (b) bb[i] -= *aa*xx[*aj];
910: *aa = 0.0;
911: }
912: aa++;
913: aj++;
914: }
915: }
916: }
917: if (x) {
918: VecRestoreArray(b,&bb);
919: VecRestoreArrayRead(l->lvec,&xx);
920: }
921: VecRestoreArray(lmask,&mask);
922: VecDestroy(&lmask);
923: PetscFree(lrows);
925: /* only change matrix nonzero state if pattern was allowed to be changed */
926: if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
927: PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
928: MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
929: }
930: return(0);
931: }
933: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
934: {
935: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
937: PetscInt nt;
940: VecGetLocalSize(xx,&nt);
941: if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
942: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
943: (*a->A->ops->mult)(a->A,xx,yy);
944: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
945: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
946: return(0);
947: }
949: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
950: {
951: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
955: MatMultDiagonalBlock(a->A,bb,xx);
956: return(0);
957: }
959: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
960: {
961: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
965: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
966: (*a->A->ops->multadd)(a->A,xx,yy,zz);
967: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
968: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
969: return(0);
970: }
972: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
973: {
974: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
976: PetscBool merged;
979: VecScatterGetMerged(a->Mvctx,&merged);
980: /* do nondiagonal part */
981: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
982: if (!merged) {
983: /* send it on its way */
984: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
985: /* do local part */
986: (*a->A->ops->multtranspose)(a->A,xx,yy);
987: /* receive remote parts: note this assumes the values are not actually */
988: /* added in yy until the next line, */
989: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
990: } else {
991: /* do local part */
992: (*a->A->ops->multtranspose)(a->A,xx,yy);
993: /* send it on its way */
994: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
995: /* values actually were received in the Begin() but we need to call this nop */
996: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
997: }
998: return(0);
999: }
1001: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool *f)
1002: {
1003: MPI_Comm comm;
1004: Mat_MPIAIJ *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1005: Mat Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1006: IS Me,Notme;
1008: PetscInt M,N,first,last,*notme,i;
1009: PetscMPIInt size;
1012: /* Easy test: symmetric diagonal block */
1013: Bij = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1014: MatIsTranspose(Adia,Bdia,tol,f);
1015: if (!*f) return(0);
1016: PetscObjectGetComm((PetscObject)Amat,&comm);
1017: MPI_Comm_size(comm,&size);
1018: if (size == 1) return(0);
1020: /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1021: MatGetSize(Amat,&M,&N);
1022: MatGetOwnershipRange(Amat,&first,&last);
1023: PetscMalloc1(N-last+first,¬me);
1024: for (i=0; i<first; i++) notme[i] = i;
1025: for (i=last; i<M; i++) notme[i-last+first] = i;
1026: ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1027: ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1028: MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1029: Aoff = Aoffs[0];
1030: MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1031: Boff = Boffs[0];
1032: MatIsTranspose(Aoff,Boff,tol,f);
1033: MatDestroyMatrices(1,&Aoffs);
1034: MatDestroyMatrices(1,&Boffs);
1035: ISDestroy(&Me);
1036: ISDestroy(&Notme);
1037: PetscFree(notme);
1038: return(0);
1039: }
1041: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1042: {
1043: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1047: /* do nondiagonal part */
1048: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1049: /* send it on its way */
1050: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1051: /* do local part */
1052: (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1053: /* receive remote parts */
1054: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1055: return(0);
1056: }
1058: /*
1059: This only works correctly for square matrices where the subblock A->A is the
1060: diagonal block
1061: */
1062: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1063: {
1065: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1068: if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1069: if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1070: MatGetDiagonal(a->A,v);
1071: return(0);
1072: }
1074: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1075: {
1076: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1080: MatScale(a->A,aa);
1081: MatScale(a->B,aa);
1082: return(0);
1083: }
1085: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1086: {
1087: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1091: #if defined(PETSC_USE_LOG)
1092: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1093: #endif
1094: MatStashDestroy_Private(&mat->stash);
1095: VecDestroy(&aij->diag);
1096: MatDestroy(&aij->A);
1097: MatDestroy(&aij->B);
1098: #if defined(PETSC_USE_CTABLE)
1099: PetscTableDestroy(&aij->colmap);
1100: #else
1101: PetscFree(aij->colmap);
1102: #endif
1103: PetscFree(aij->garray);
1104: VecDestroy(&aij->lvec);
1105: VecScatterDestroy(&aij->Mvctx);
1106: PetscFree2(aij->rowvalues,aij->rowindices);
1107: PetscFree(aij->ld);
1108: PetscFree(mat->data);
1110: PetscObjectChangeTypeName((PetscObject)mat,0);
1111: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1112: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1113: PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1114: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1115: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1116: PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1117: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1118: #if defined(PETSC_HAVE_ELEMENTAL)
1119: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1120: #endif
1121: #if defined(PETSC_HAVE_HYPRE)
1122: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1123: PetscObjectComposeFunction((PetscObject)mat,"MatMatMatMult_transpose_mpiaij_mpiaij_C",NULL);
1124: #endif
1125: return(0);
1126: }
1128: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1129: {
1130: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1131: Mat_SeqAIJ *A = (Mat_SeqAIJ*)aij->A->data;
1132: Mat_SeqAIJ *B = (Mat_SeqAIJ*)aij->B->data;
1134: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
1135: int fd;
1136: PetscInt nz,header[4],*row_lengths,*range=0,rlen,i;
1137: PetscInt nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz = 0;
1138: PetscScalar *column_values;
1139: PetscInt message_count,flowcontrolcount;
1140: FILE *file;
1143: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1144: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
1145: nz = A->nz + B->nz;
1146: PetscViewerBinaryGetDescriptor(viewer,&fd);
1147: if (!rank) {
1148: header[0] = MAT_FILE_CLASSID;
1149: header[1] = mat->rmap->N;
1150: header[2] = mat->cmap->N;
1152: MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1153: PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
1154: /* get largest number of rows any processor has */
1155: rlen = mat->rmap->n;
1156: range = mat->rmap->range;
1157: for (i=1; i<size; i++) rlen = PetscMax(rlen,range[i+1] - range[i]);
1158: } else {
1159: MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1160: rlen = mat->rmap->n;
1161: }
1163: /* load up the local row counts */
1164: PetscMalloc1(rlen+1,&row_lengths);
1165: for (i=0; i<mat->rmap->n; i++) row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1167: /* store the row lengths to the file */
1168: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1169: if (!rank) {
1170: PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);
1171: for (i=1; i<size; i++) {
1172: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1173: rlen = range[i+1] - range[i];
1174: MPIULong_Recv(row_lengths,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1175: PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);
1176: }
1177: PetscViewerFlowControlEndMaster(viewer,&message_count);
1178: } else {
1179: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1180: MPIULong_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1181: PetscViewerFlowControlEndWorker(viewer,&message_count);
1182: }
1183: PetscFree(row_lengths);
1185: /* load up the local column indices */
1186: nzmax = nz; /* th processor needs space a largest processor needs */
1187: MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));
1188: PetscMalloc1(nzmax+1,&column_indices);
1189: cnt = 0;
1190: for (i=0; i<mat->rmap->n; i++) {
1191: for (j=B->i[i]; j<B->i[i+1]; j++) {
1192: if ((col = garray[B->j[j]]) > cstart) break;
1193: column_indices[cnt++] = col;
1194: }
1195: for (k=A->i[i]; k<A->i[i+1]; k++) column_indices[cnt++] = A->j[k] + cstart;
1196: for (; j<B->i[i+1]; j++) column_indices[cnt++] = garray[B->j[j]];
1197: }
1198: if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1200: /* store the column indices to the file */
1201: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1202: if (!rank) {
1203: MPI_Status status;
1204: PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
1205: for (i=1; i<size; i++) {
1206: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1207: MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1208: if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1209: MPIULong_Recv(column_indices,rnz,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1210: PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);
1211: }
1212: PetscViewerFlowControlEndMaster(viewer,&message_count);
1213: } else {
1214: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1215: MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1216: MPIULong_Send(column_indices,nz,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1217: PetscViewerFlowControlEndWorker(viewer,&message_count);
1218: }
1219: PetscFree(column_indices);
1221: /* load up the local column values */
1222: PetscMalloc1(nzmax+1,&column_values);
1223: cnt = 0;
1224: for (i=0; i<mat->rmap->n; i++) {
1225: for (j=B->i[i]; j<B->i[i+1]; j++) {
1226: if (garray[B->j[j]] > cstart) break;
1227: column_values[cnt++] = B->a[j];
1228: }
1229: for (k=A->i[i]; k<A->i[i+1]; k++) column_values[cnt++] = A->a[k];
1230: for (; j<B->i[i+1]; j++) column_values[cnt++] = B->a[j];
1231: }
1232: if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1234: /* store the column values to the file */
1235: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1236: if (!rank) {
1237: MPI_Status status;
1238: PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1239: for (i=1; i<size; i++) {
1240: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1241: MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1242: if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1243: MPIULong_Recv(column_values,rnz,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat));
1244: PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);
1245: }
1246: PetscViewerFlowControlEndMaster(viewer,&message_count);
1247: } else {
1248: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1249: MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1250: MPIULong_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));
1251: PetscViewerFlowControlEndWorker(viewer,&message_count);
1252: }
1253: PetscFree(column_values);
1255: PetscViewerBinaryGetInfoPointer(viewer,&file);
1256: if (file) fprintf(file,"-matload_block_size %d\n",(int)PetscAbs(mat->rmap->bs));
1257: return(0);
1258: }
1260: #include <petscdraw.h>
1261: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1262: {
1263: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1264: PetscErrorCode ierr;
1265: PetscMPIInt rank = aij->rank,size = aij->size;
1266: PetscBool isdraw,iascii,isbinary;
1267: PetscViewer sviewer;
1268: PetscViewerFormat format;
1271: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1272: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1273: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1274: if (iascii) {
1275: PetscViewerGetFormat(viewer,&format);
1276: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1277: MatInfo info;
1278: PetscBool inodes;
1280: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1281: MatGetInfo(mat,MAT_LOCAL,&info);
1282: MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);
1283: PetscViewerASCIIPushSynchronized(viewer);
1284: if (!inodes) {
1285: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, not using I-node routines\n",
1286: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
1287: } else {
1288: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, using I-node routines\n",
1289: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
1290: }
1291: MatGetInfo(aij->A,MAT_LOCAL,&info);
1292: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1293: MatGetInfo(aij->B,MAT_LOCAL,&info);
1294: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1295: PetscViewerFlush(viewer);
1296: PetscViewerASCIIPopSynchronized(viewer);
1297: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1298: VecScatterView(aij->Mvctx,viewer);
1299: return(0);
1300: } else if (format == PETSC_VIEWER_ASCII_INFO) {
1301: PetscInt inodecount,inodelimit,*inodes;
1302: MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1303: if (inodes) {
1304: PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1305: } else {
1306: PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1307: }
1308: return(0);
1309: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1310: return(0);
1311: }
1312: } else if (isbinary) {
1313: if (size == 1) {
1314: PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1315: MatView(aij->A,viewer);
1316: } else {
1317: MatView_MPIAIJ_Binary(mat,viewer);
1318: }
1319: return(0);
1320: } else if (isdraw) {
1321: PetscDraw draw;
1322: PetscBool isnull;
1323: PetscViewerDrawGetDraw(viewer,0,&draw);
1324: PetscDrawIsNull(draw,&isnull);
1325: if (isnull) return(0);
1326: }
1328: {
1329: /* assemble the entire matrix onto first processor. */
1330: Mat A;
1331: Mat_SeqAIJ *Aloc;
1332: PetscInt M = mat->rmap->N,N = mat->cmap->N,m,*ai,*aj,row,*cols,i,*ct;
1333: MatScalar *a;
1335: MatCreate(PetscObjectComm((PetscObject)mat),&A);
1336: if (!rank) {
1337: MatSetSizes(A,M,N,M,N);
1338: } else {
1339: MatSetSizes(A,0,0,M,N);
1340: }
1341: /* This is just a temporary matrix, so explicitly using MATMPIAIJ is probably best */
1342: MatSetType(A,MATMPIAIJ);
1343: MatMPIAIJSetPreallocation(A,0,NULL,0,NULL);
1344: MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
1345: PetscLogObjectParent((PetscObject)mat,(PetscObject)A);
1347: /* copy over the A part */
1348: Aloc = (Mat_SeqAIJ*)aij->A->data;
1349: m = aij->A->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1350: row = mat->rmap->rstart;
1351: for (i=0; i<ai[m]; i++) aj[i] += mat->cmap->rstart;
1352: for (i=0; i<m; i++) {
1353: MatSetValues(A,1,&row,ai[i+1]-ai[i],aj,a,INSERT_VALUES);
1354: row++;
1355: a += ai[i+1]-ai[i]; aj += ai[i+1]-ai[i];
1356: }
1357: aj = Aloc->j;
1358: for (i=0; i<ai[m]; i++) aj[i] -= mat->cmap->rstart;
1360: /* copy over the B part */
1361: Aloc = (Mat_SeqAIJ*)aij->B->data;
1362: m = aij->B->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1363: row = mat->rmap->rstart;
1364: PetscMalloc1(ai[m]+1,&cols);
1365: ct = cols;
1366: for (i=0; i<ai[m]; i++) cols[i] = aij->garray[aj[i]];
1367: for (i=0; i<m; i++) {
1368: MatSetValues(A,1,&row,ai[i+1]-ai[i],cols,a,INSERT_VALUES);
1369: row++;
1370: a += ai[i+1]-ai[i]; cols += ai[i+1]-ai[i];
1371: }
1372: PetscFree(ct);
1373: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1374: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1375: /*
1376: Everyone has to call to draw the matrix since the graphics waits are
1377: synchronized across all processors that share the PetscDraw object
1378: */
1379: PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1380: if (!rank) {
1381: PetscObjectSetName((PetscObject)((Mat_MPIAIJ*)(A->data))->A,((PetscObject)mat)->name);
1382: MatView_SeqAIJ(((Mat_MPIAIJ*)(A->data))->A,sviewer);
1383: }
1384: PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1385: PetscViewerFlush(viewer);
1386: MatDestroy(&A);
1387: }
1388: return(0);
1389: }
1391: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1392: {
1394: PetscBool iascii,isdraw,issocket,isbinary;
1397: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1398: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1399: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1400: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1401: if (iascii || isdraw || isbinary || issocket) {
1402: MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1403: }
1404: return(0);
1405: }
1407: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1408: {
1409: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1411: Vec bb1 = 0;
1412: PetscBool hasop;
1415: if (flag == SOR_APPLY_UPPER) {
1416: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1417: return(0);
1418: }
1420: if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1421: VecDuplicate(bb,&bb1);
1422: }
1424: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1425: if (flag & SOR_ZERO_INITIAL_GUESS) {
1426: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1427: its--;
1428: }
1430: while (its--) {
1431: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1432: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1434: /* update rhs: bb1 = bb - B*x */
1435: VecScale(mat->lvec,-1.0);
1436: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1438: /* local sweep */
1439: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1440: }
1441: } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1442: if (flag & SOR_ZERO_INITIAL_GUESS) {
1443: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1444: its--;
1445: }
1446: while (its--) {
1447: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1448: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1450: /* update rhs: bb1 = bb - B*x */
1451: VecScale(mat->lvec,-1.0);
1452: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1454: /* local sweep */
1455: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1456: }
1457: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1458: if (flag & SOR_ZERO_INITIAL_GUESS) {
1459: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1460: its--;
1461: }
1462: while (its--) {
1463: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1464: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1466: /* update rhs: bb1 = bb - B*x */
1467: VecScale(mat->lvec,-1.0);
1468: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1470: /* local sweep */
1471: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1472: }
1473: } else if (flag & SOR_EISENSTAT) {
1474: Vec xx1;
1476: VecDuplicate(bb,&xx1);
1477: (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);
1479: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1480: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1481: if (!mat->diag) {
1482: MatCreateVecs(matin,&mat->diag,NULL);
1483: MatGetDiagonal(matin,mat->diag);
1484: }
1485: MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1486: if (hasop) {
1487: MatMultDiagonalBlock(matin,xx,bb1);
1488: } else {
1489: VecPointwiseMult(bb1,mat->diag,xx);
1490: }
1491: VecAYPX(bb1,(omega-2.0)/omega,bb);
1493: MatMultAdd(mat->B,mat->lvec,bb1,bb1);
1495: /* local sweep */
1496: (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1497: VecAXPY(xx,1.0,xx1);
1498: VecDestroy(&xx1);
1499: } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");
1501: VecDestroy(&bb1);
1503: matin->factorerrortype = mat->A->factorerrortype;
1504: return(0);
1505: }
1507: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1508: {
1509: Mat aA,aB,Aperm;
1510: const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1511: PetscScalar *aa,*ba;
1512: PetscInt i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1513: PetscSF rowsf,sf;
1514: IS parcolp = NULL;
1515: PetscBool done;
1519: MatGetLocalSize(A,&m,&n);
1520: ISGetIndices(rowp,&rwant);
1521: ISGetIndices(colp,&cwant);
1522: PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);
1524: /* Invert row permutation to find out where my rows should go */
1525: PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1526: PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1527: PetscSFSetFromOptions(rowsf);
1528: for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1529: PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1530: PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1532: /* Invert column permutation to find out where my columns should go */
1533: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1534: PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1535: PetscSFSetFromOptions(sf);
1536: for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1537: PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1538: PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1539: PetscSFDestroy(&sf);
1541: ISRestoreIndices(rowp,&rwant);
1542: ISRestoreIndices(colp,&cwant);
1543: MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);
1545: /* Find out where my gcols should go */
1546: MatGetSize(aB,NULL,&ng);
1547: PetscMalloc1(ng,&gcdest);
1548: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1549: PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1550: PetscSFSetFromOptions(sf);
1551: PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);
1552: PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);
1553: PetscSFDestroy(&sf);
1555: PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1556: MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1557: MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1558: for (i=0; i<m; i++) {
1559: PetscInt row = rdest[i],rowner;
1560: PetscLayoutFindOwner(A->rmap,row,&rowner);
1561: for (j=ai[i]; j<ai[i+1]; j++) {
1562: PetscInt cowner,col = cdest[aj[j]];
1563: PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1564: if (rowner == cowner) dnnz[i]++;
1565: else onnz[i]++;
1566: }
1567: for (j=bi[i]; j<bi[i+1]; j++) {
1568: PetscInt cowner,col = gcdest[bj[j]];
1569: PetscLayoutFindOwner(A->cmap,col,&cowner);
1570: if (rowner == cowner) dnnz[i]++;
1571: else onnz[i]++;
1572: }
1573: }
1574: PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);
1575: PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);
1576: PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);
1577: PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);
1578: PetscSFDestroy(&rowsf);
1580: MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1581: MatSeqAIJGetArray(aA,&aa);
1582: MatSeqAIJGetArray(aB,&ba);
1583: for (i=0; i<m; i++) {
1584: PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1585: PetscInt j0,rowlen;
1586: rowlen = ai[i+1] - ai[i];
1587: for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1588: for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1589: MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1590: }
1591: rowlen = bi[i+1] - bi[i];
1592: for (j0=j=0; j<rowlen; j0=j) {
1593: for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1594: MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1595: }
1596: }
1597: MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1598: MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1599: MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1600: MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1601: MatSeqAIJRestoreArray(aA,&aa);
1602: MatSeqAIJRestoreArray(aB,&ba);
1603: PetscFree4(dnnz,onnz,tdnnz,tonnz);
1604: PetscFree3(work,rdest,cdest);
1605: PetscFree(gcdest);
1606: if (parcolp) {ISDestroy(&colp);}
1607: *B = Aperm;
1608: return(0);
1609: }
1611: PetscErrorCode MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1612: {
1613: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1617: MatGetSize(aij->B,NULL,nghosts);
1618: if (ghosts) *ghosts = aij->garray;
1619: return(0);
1620: }
1622: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1623: {
1624: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1625: Mat A = mat->A,B = mat->B;
1627: PetscReal isend[5],irecv[5];
1630: info->block_size = 1.0;
1631: MatGetInfo(A,MAT_LOCAL,info);
1633: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1634: isend[3] = info->memory; isend[4] = info->mallocs;
1636: MatGetInfo(B,MAT_LOCAL,info);
1638: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1639: isend[3] += info->memory; isend[4] += info->mallocs;
1640: if (flag == MAT_LOCAL) {
1641: info->nz_used = isend[0];
1642: info->nz_allocated = isend[1];
1643: info->nz_unneeded = isend[2];
1644: info->memory = isend[3];
1645: info->mallocs = isend[4];
1646: } else if (flag == MAT_GLOBAL_MAX) {
1647: MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));
1649: info->nz_used = irecv[0];
1650: info->nz_allocated = irecv[1];
1651: info->nz_unneeded = irecv[2];
1652: info->memory = irecv[3];
1653: info->mallocs = irecv[4];
1654: } else if (flag == MAT_GLOBAL_SUM) {
1655: MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));
1657: info->nz_used = irecv[0];
1658: info->nz_allocated = irecv[1];
1659: info->nz_unneeded = irecv[2];
1660: info->memory = irecv[3];
1661: info->mallocs = irecv[4];
1662: }
1663: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1664: info->fill_ratio_needed = 0;
1665: info->factor_mallocs = 0;
1666: return(0);
1667: }
1669: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1670: {
1671: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1675: switch (op) {
1676: case MAT_NEW_NONZERO_LOCATIONS:
1677: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1678: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1679: case MAT_KEEP_NONZERO_PATTERN:
1680: case MAT_NEW_NONZERO_LOCATION_ERR:
1681: case MAT_USE_INODES:
1682: case MAT_IGNORE_ZERO_ENTRIES:
1683: MatCheckPreallocated(A,1);
1684: MatSetOption(a->A,op,flg);
1685: MatSetOption(a->B,op,flg);
1686: break;
1687: case MAT_ROW_ORIENTED:
1688: MatCheckPreallocated(A,1);
1689: a->roworiented = flg;
1691: MatSetOption(a->A,op,flg);
1692: MatSetOption(a->B,op,flg);
1693: break;
1694: case MAT_NEW_DIAGONALS:
1695: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1696: break;
1697: case MAT_IGNORE_OFF_PROC_ENTRIES:
1698: a->donotstash = flg;
1699: break;
1700: case MAT_SPD:
1701: A->spd_set = PETSC_TRUE;
1702: A->spd = flg;
1703: if (flg) {
1704: A->symmetric = PETSC_TRUE;
1705: A->structurally_symmetric = PETSC_TRUE;
1706: A->symmetric_set = PETSC_TRUE;
1707: A->structurally_symmetric_set = PETSC_TRUE;
1708: }
1709: break;
1710: case MAT_SYMMETRIC:
1711: MatCheckPreallocated(A,1);
1712: MatSetOption(a->A,op,flg);
1713: break;
1714: case MAT_STRUCTURALLY_SYMMETRIC:
1715: MatCheckPreallocated(A,1);
1716: MatSetOption(a->A,op,flg);
1717: break;
1718: case MAT_HERMITIAN:
1719: MatCheckPreallocated(A,1);
1720: MatSetOption(a->A,op,flg);
1721: break;
1722: case MAT_SYMMETRY_ETERNAL:
1723: MatCheckPreallocated(A,1);
1724: MatSetOption(a->A,op,flg);
1725: break;
1726: case MAT_SUBMAT_SINGLEIS:
1727: A->submat_singleis = flg;
1728: break;
1729: case MAT_STRUCTURE_ONLY:
1730: /* The option is handled directly by MatSetOption() */
1731: break;
1732: default:
1733: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1734: }
1735: return(0);
1736: }
1738: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1739: {
1740: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1741: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
1743: PetscInt i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1744: PetscInt nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1745: PetscInt *cmap,*idx_p;
1748: if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1749: mat->getrowactive = PETSC_TRUE;
1751: if (!mat->rowvalues && (idx || v)) {
1752: /*
1753: allocate enough space to hold information from the longest row.
1754: */
1755: Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1756: PetscInt max = 1,tmp;
1757: for (i=0; i<matin->rmap->n; i++) {
1758: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1759: if (max < tmp) max = tmp;
1760: }
1761: PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1762: }
1764: if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1765: lrow = row - rstart;
1767: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1768: if (!v) {pvA = 0; pvB = 0;}
1769: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1770: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1771: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1772: nztot = nzA + nzB;
1774: cmap = mat->garray;
1775: if (v || idx) {
1776: if (nztot) {
1777: /* Sort by increasing column numbers, assuming A and B already sorted */
1778: PetscInt imark = -1;
1779: if (v) {
1780: *v = v_p = mat->rowvalues;
1781: for (i=0; i<nzB; i++) {
1782: if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1783: else break;
1784: }
1785: imark = i;
1786: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1787: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1788: }
1789: if (idx) {
1790: *idx = idx_p = mat->rowindices;
1791: if (imark > -1) {
1792: for (i=0; i<imark; i++) {
1793: idx_p[i] = cmap[cworkB[i]];
1794: }
1795: } else {
1796: for (i=0; i<nzB; i++) {
1797: if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1798: else break;
1799: }
1800: imark = i;
1801: }
1802: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart + cworkA[i];
1803: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]];
1804: }
1805: } else {
1806: if (idx) *idx = 0;
1807: if (v) *v = 0;
1808: }
1809: }
1810: *nz = nztot;
1811: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1812: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1813: return(0);
1814: }
1816: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1817: {
1818: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1821: if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1822: aij->getrowactive = PETSC_FALSE;
1823: return(0);
1824: }
1826: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1827: {
1828: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1829: Mat_SeqAIJ *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1831: PetscInt i,j,cstart = mat->cmap->rstart;
1832: PetscReal sum = 0.0;
1833: MatScalar *v;
1836: if (aij->size == 1) {
1837: MatNorm(aij->A,type,norm);
1838: } else {
1839: if (type == NORM_FROBENIUS) {
1840: v = amat->a;
1841: for (i=0; i<amat->nz; i++) {
1842: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1843: }
1844: v = bmat->a;
1845: for (i=0; i<bmat->nz; i++) {
1846: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1847: }
1848: MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1849: *norm = PetscSqrtReal(*norm);
1850: PetscLogFlops(2*amat->nz+2*bmat->nz);
1851: } else if (type == NORM_1) { /* max column norm */
1852: PetscReal *tmp,*tmp2;
1853: PetscInt *jj,*garray = aij->garray;
1854: PetscCalloc1(mat->cmap->N+1,&tmp);
1855: PetscMalloc1(mat->cmap->N+1,&tmp2);
1856: *norm = 0.0;
1857: v = amat->a; jj = amat->j;
1858: for (j=0; j<amat->nz; j++) {
1859: tmp[cstart + *jj++] += PetscAbsScalar(*v); v++;
1860: }
1861: v = bmat->a; jj = bmat->j;
1862: for (j=0; j<bmat->nz; j++) {
1863: tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1864: }
1865: MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1866: for (j=0; j<mat->cmap->N; j++) {
1867: if (tmp2[j] > *norm) *norm = tmp2[j];
1868: }
1869: PetscFree(tmp);
1870: PetscFree(tmp2);
1871: PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1872: } else if (type == NORM_INFINITY) { /* max row norm */
1873: PetscReal ntemp = 0.0;
1874: for (j=0; j<aij->A->rmap->n; j++) {
1875: v = amat->a + amat->i[j];
1876: sum = 0.0;
1877: for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1878: sum += PetscAbsScalar(*v); v++;
1879: }
1880: v = bmat->a + bmat->i[j];
1881: for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1882: sum += PetscAbsScalar(*v); v++;
1883: }
1884: if (sum > ntemp) ntemp = sum;
1885: }
1886: MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
1887: PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1888: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1889: }
1890: return(0);
1891: }
1893: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1894: {
1895: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1896: Mat_SeqAIJ *Aloc=(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data;
1898: PetscInt M = A->rmap->N,N = A->cmap->N,ma,na,mb,nb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,i;
1899: PetscInt cstart = A->cmap->rstart,ncol;
1900: Mat B;
1901: MatScalar *array;
1904: if (reuse == MAT_INPLACE_MATRIX && M != N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1906: ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
1907: ai = Aloc->i; aj = Aloc->j;
1908: bi = Bloc->i; bj = Bloc->j;
1909: if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1910: PetscInt *d_nnz,*g_nnz,*o_nnz;
1911: PetscSFNode *oloc;
1912: PETSC_UNUSED PetscSF sf;
1914: PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
1915: /* compute d_nnz for preallocation */
1916: PetscMemzero(d_nnz,na*sizeof(PetscInt));
1917: for (i=0; i<ai[ma]; i++) {
1918: d_nnz[aj[i]]++;
1919: aj[i] += cstart; /* global col index to be used by MatSetValues() */
1920: }
1921: /* compute local off-diagonal contributions */
1922: PetscMemzero(g_nnz,nb*sizeof(PetscInt));
1923: for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
1924: /* map those to global */
1925: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1926: PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
1927: PetscSFSetFromOptions(sf);
1928: PetscMemzero(o_nnz,na*sizeof(PetscInt));
1929: PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1930: PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1931: PetscSFDestroy(&sf);
1933: MatCreate(PetscObjectComm((PetscObject)A),&B);
1934: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
1935: MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
1936: MatSetType(B,((PetscObject)A)->type_name);
1937: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
1938: PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
1939: } else {
1940: B = *matout;
1941: MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
1942: for (i=0; i<ai[ma]; i++) aj[i] += cstart; /* global col index to be used by MatSetValues() */
1943: }
1945: /* copy over the A part */
1946: array = Aloc->a;
1947: row = A->rmap->rstart;
1948: for (i=0; i<ma; i++) {
1949: ncol = ai[i+1]-ai[i];
1950: MatSetValues(B,ncol,aj,1,&row,array,INSERT_VALUES);
1951: row++;
1952: array += ncol; aj += ncol;
1953: }
1954: aj = Aloc->j;
1955: for (i=0; i<ai[ma]; i++) aj[i] -= cstart; /* resume local col index */
1957: /* copy over the B part */
1958: PetscCalloc1(bi[mb],&cols);
1959: array = Bloc->a;
1960: row = A->rmap->rstart;
1961: for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
1962: cols_tmp = cols;
1963: for (i=0; i<mb; i++) {
1964: ncol = bi[i+1]-bi[i];
1965: MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
1966: row++;
1967: array += ncol; cols_tmp += ncol;
1968: }
1969: PetscFree(cols);
1971: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1972: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1973: if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
1974: *matout = B;
1975: } else {
1976: MatHeaderMerge(A,&B);
1977: }
1978: return(0);
1979: }
1981: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
1982: {
1983: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1984: Mat a = aij->A,b = aij->B;
1986: PetscInt s1,s2,s3;
1989: MatGetLocalSize(mat,&s2,&s3);
1990: if (rr) {
1991: VecGetLocalSize(rr,&s1);
1992: if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1993: /* Overlap communication with computation. */
1994: VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1995: }
1996: if (ll) {
1997: VecGetLocalSize(ll,&s1);
1998: if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1999: (*b->ops->diagonalscale)(b,ll,0);
2000: }
2001: /* scale the diagonal block */
2002: (*a->ops->diagonalscale)(a,ll,rr);
2004: if (rr) {
2005: /* Do a scatter end and then right scale the off-diagonal block */
2006: VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2007: (*b->ops->diagonalscale)(b,0,aij->lvec);
2008: }
2009: return(0);
2010: }
2012: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2013: {
2014: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2018: MatSetUnfactored(a->A);
2019: return(0);
2020: }
2022: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool *flag)
2023: {
2024: Mat_MPIAIJ *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2025: Mat a,b,c,d;
2026: PetscBool flg;
2030: a = matA->A; b = matA->B;
2031: c = matB->A; d = matB->B;
2033: MatEqual(a,c,&flg);
2034: if (flg) {
2035: MatEqual(b,d,&flg);
2036: }
2037: MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2038: return(0);
2039: }
2041: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2042: {
2044: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2045: Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
2048: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2049: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2050: /* because of the column compression in the off-processor part of the matrix a->B,
2051: the number of columns in a->B and b->B may be different, hence we cannot call
2052: the MatCopy() directly on the two parts. If need be, we can provide a more
2053: efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2054: then copying the submatrices */
2055: MatCopy_Basic(A,B,str);
2056: } else {
2057: MatCopy(a->A,b->A,str);
2058: MatCopy(a->B,b->B,str);
2059: }
2060: PetscObjectStateIncrease((PetscObject)B);
2061: return(0);
2062: }
2064: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2065: {
2069: MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2070: return(0);
2071: }
2073: /*
2074: Computes the number of nonzeros per row needed for preallocation when X and Y
2075: have different nonzero structure.
2076: */
2077: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2078: {
2079: PetscInt i,j,k,nzx,nzy;
2082: /* Set the number of nonzeros in the new matrix */
2083: for (i=0; i<m; i++) {
2084: const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2085: nzx = xi[i+1] - xi[i];
2086: nzy = yi[i+1] - yi[i];
2087: nnz[i] = 0;
2088: for (j=0,k=0; j<nzx; j++) { /* Point in X */
2089: for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2090: if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++; /* Skip duplicate */
2091: nnz[i]++;
2092: }
2093: for (; k<nzy; k++) nnz[i]++;
2094: }
2095: return(0);
2096: }
2098: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2099: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2100: {
2102: PetscInt m = Y->rmap->N;
2103: Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data;
2104: Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data;
2107: MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2108: return(0);
2109: }
2111: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2112: {
2114: Mat_MPIAIJ *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2115: PetscBLASInt bnz,one=1;
2116: Mat_SeqAIJ *x,*y;
2119: if (str == SAME_NONZERO_PATTERN) {
2120: PetscScalar alpha = a;
2121: x = (Mat_SeqAIJ*)xx->A->data;
2122: PetscBLASIntCast(x->nz,&bnz);
2123: y = (Mat_SeqAIJ*)yy->A->data;
2124: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2125: x = (Mat_SeqAIJ*)xx->B->data;
2126: y = (Mat_SeqAIJ*)yy->B->data;
2127: PetscBLASIntCast(x->nz,&bnz);
2128: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2129: PetscObjectStateIncrease((PetscObject)Y);
2130: } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2131: MatAXPY_Basic(Y,a,X,str);
2132: } else {
2133: Mat B;
2134: PetscInt *nnz_d,*nnz_o;
2135: PetscMalloc1(yy->A->rmap->N,&nnz_d);
2136: PetscMalloc1(yy->B->rmap->N,&nnz_o);
2137: MatCreate(PetscObjectComm((PetscObject)Y),&B);
2138: PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2139: MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2140: MatSetBlockSizesFromMats(B,Y,Y);
2141: MatSetType(B,MATMPIAIJ);
2142: MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2143: MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2144: MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2145: MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2146: MatHeaderReplace(Y,&B);
2147: PetscFree(nnz_d);
2148: PetscFree(nnz_o);
2149: }
2150: return(0);
2151: }
2153: extern PetscErrorCode MatConjugate_SeqAIJ(Mat);
2155: PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2156: {
2157: #if defined(PETSC_USE_COMPLEX)
2159: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2162: MatConjugate_SeqAIJ(aij->A);
2163: MatConjugate_SeqAIJ(aij->B);
2164: #else
2166: #endif
2167: return(0);
2168: }
2170: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2171: {
2172: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2176: MatRealPart(a->A);
2177: MatRealPart(a->B);
2178: return(0);
2179: }
2181: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2182: {
2183: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2187: MatImaginaryPart(a->A);
2188: MatImaginaryPart(a->B);
2189: return(0);
2190: }
2192: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2193: {
2194: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2196: PetscInt i,*idxb = 0;
2197: PetscScalar *va,*vb;
2198: Vec vtmp;
2201: MatGetRowMaxAbs(a->A,v,idx);
2202: VecGetArray(v,&va);
2203: if (idx) {
2204: for (i=0; i<A->rmap->n; i++) {
2205: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2206: }
2207: }
2209: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2210: if (idx) {
2211: PetscMalloc1(A->rmap->n,&idxb);
2212: }
2213: MatGetRowMaxAbs(a->B,vtmp,idxb);
2214: VecGetArray(vtmp,&vb);
2216: for (i=0; i<A->rmap->n; i++) {
2217: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2218: va[i] = vb[i];
2219: if (idx) idx[i] = a->garray[idxb[i]];
2220: }
2221: }
2223: VecRestoreArray(v,&va);
2224: VecRestoreArray(vtmp,&vb);
2225: PetscFree(idxb);
2226: VecDestroy(&vtmp);
2227: return(0);
2228: }
2230: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2231: {
2232: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2234: PetscInt i,*idxb = 0;
2235: PetscScalar *va,*vb;
2236: Vec vtmp;
2239: MatGetRowMinAbs(a->A,v,idx);
2240: VecGetArray(v,&va);
2241: if (idx) {
2242: for (i=0; i<A->cmap->n; i++) {
2243: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2244: }
2245: }
2247: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2248: if (idx) {
2249: PetscMalloc1(A->rmap->n,&idxb);
2250: }
2251: MatGetRowMinAbs(a->B,vtmp,idxb);
2252: VecGetArray(vtmp,&vb);
2254: for (i=0; i<A->rmap->n; i++) {
2255: if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2256: va[i] = vb[i];
2257: if (idx) idx[i] = a->garray[idxb[i]];
2258: }
2259: }
2261: VecRestoreArray(v,&va);
2262: VecRestoreArray(vtmp,&vb);
2263: PetscFree(idxb);
2264: VecDestroy(&vtmp);
2265: return(0);
2266: }
2268: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2269: {
2270: Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data;
2271: PetscInt n = A->rmap->n;
2272: PetscInt cstart = A->cmap->rstart;
2273: PetscInt *cmap = mat->garray;
2274: PetscInt *diagIdx, *offdiagIdx;
2275: Vec diagV, offdiagV;
2276: PetscScalar *a, *diagA, *offdiagA;
2277: PetscInt r;
2281: PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2282: VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);
2283: VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);
2284: MatGetRowMin(mat->A, diagV, diagIdx);
2285: MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2286: VecGetArray(v, &a);
2287: VecGetArray(diagV, &diagA);
2288: VecGetArray(offdiagV, &offdiagA);
2289: for (r = 0; r < n; ++r) {
2290: if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2291: a[r] = diagA[r];
2292: idx[r] = cstart + diagIdx[r];
2293: } else {
2294: a[r] = offdiagA[r];
2295: idx[r] = cmap[offdiagIdx[r]];
2296: }
2297: }
2298: VecRestoreArray(v, &a);
2299: VecRestoreArray(diagV, &diagA);
2300: VecRestoreArray(offdiagV, &offdiagA);
2301: VecDestroy(&diagV);
2302: VecDestroy(&offdiagV);
2303: PetscFree2(diagIdx, offdiagIdx);
2304: return(0);
2305: }
2307: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2308: {
2309: Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data;
2310: PetscInt n = A->rmap->n;
2311: PetscInt cstart = A->cmap->rstart;
2312: PetscInt *cmap = mat->garray;
2313: PetscInt *diagIdx, *offdiagIdx;
2314: Vec diagV, offdiagV;
2315: PetscScalar *a, *diagA, *offdiagA;
2316: PetscInt r;
2320: PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2321: VecCreateSeq(PETSC_COMM_SELF, n, &diagV);
2322: VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);
2323: MatGetRowMax(mat->A, diagV, diagIdx);
2324: MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2325: VecGetArray(v, &a);
2326: VecGetArray(diagV, &diagA);
2327: VecGetArray(offdiagV, &offdiagA);
2328: for (r = 0; r < n; ++r) {
2329: if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2330: a[r] = diagA[r];
2331: idx[r] = cstart + diagIdx[r];
2332: } else {
2333: a[r] = offdiagA[r];
2334: idx[r] = cmap[offdiagIdx[r]];
2335: }
2336: }
2337: VecRestoreArray(v, &a);
2338: VecRestoreArray(diagV, &diagA);
2339: VecRestoreArray(offdiagV, &offdiagA);
2340: VecDestroy(&diagV);
2341: VecDestroy(&offdiagV);
2342: PetscFree2(diagIdx, offdiagIdx);
2343: return(0);
2344: }
2346: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2347: {
2349: Mat *dummy;
2352: MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2353: *newmat = *dummy;
2354: PetscFree(dummy);
2355: return(0);
2356: }
2358: PetscErrorCode MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2359: {
2360: Mat_MPIAIJ *a = (Mat_MPIAIJ*) A->data;
2364: MatInvertBlockDiagonal(a->A,values);
2365: A->factorerrortype = a->A->factorerrortype;
2366: return(0);
2367: }
2369: static PetscErrorCode MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2370: {
2372: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)x->data;
2375: MatSetRandom(aij->A,rctx);
2376: MatSetRandom(aij->B,rctx);
2377: MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2378: MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2379: return(0);
2380: }
2382: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2383: {
2385: if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2386: else A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ;
2387: return(0);
2388: }
2390: /*@
2391: MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap
2393: Collective on Mat
2395: Input Parameters:
2396: + A - the matrix
2397: - sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)
2399: Level: advanced
2401: @*/
2402: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2403: {
2404: PetscErrorCode ierr;
2407: PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2408: return(0);
2409: }
2411: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2412: {
2413: PetscErrorCode ierr;
2414: PetscBool sc = PETSC_FALSE,flg;
2417: PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2418: PetscObjectOptionsBegin((PetscObject)A);
2419: if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2420: PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2421: if (flg) {
2422: MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2423: }
2424: PetscOptionsEnd();
2425: return(0);
2426: }
2428: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2429: {
2431: Mat_MPIAIJ *maij = (Mat_MPIAIJ*)Y->data;
2432: Mat_SeqAIJ *aij = (Mat_SeqAIJ*)maij->A->data;
2435: if (!Y->preallocated) {
2436: MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2437: } else if (!aij->nz) {
2438: PetscInt nonew = aij->nonew;
2439: MatSeqAIJSetPreallocation(maij->A,1,NULL);
2440: aij->nonew = nonew;
2441: }
2442: MatShift_Basic(Y,a);
2443: return(0);
2444: }
2446: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool *missing,PetscInt *d)
2447: {
2448: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2452: if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2453: MatMissingDiagonal(a->A,missing,d);
2454: if (d) {
2455: PetscInt rstart;
2456: MatGetOwnershipRange(A,&rstart,NULL);
2457: *d += rstart;
2459: }
2460: return(0);
2461: }
2464: /* -------------------------------------------------------------------*/
2465: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2466: MatGetRow_MPIAIJ,
2467: MatRestoreRow_MPIAIJ,
2468: MatMult_MPIAIJ,
2469: /* 4*/ MatMultAdd_MPIAIJ,
2470: MatMultTranspose_MPIAIJ,
2471: MatMultTransposeAdd_MPIAIJ,
2472: 0,
2473: 0,
2474: 0,
2475: /*10*/ 0,
2476: 0,
2477: 0,
2478: MatSOR_MPIAIJ,
2479: MatTranspose_MPIAIJ,
2480: /*15*/ MatGetInfo_MPIAIJ,
2481: MatEqual_MPIAIJ,
2482: MatGetDiagonal_MPIAIJ,
2483: MatDiagonalScale_MPIAIJ,
2484: MatNorm_MPIAIJ,
2485: /*20*/ MatAssemblyBegin_MPIAIJ,
2486: MatAssemblyEnd_MPIAIJ,
2487: MatSetOption_MPIAIJ,
2488: MatZeroEntries_MPIAIJ,
2489: /*24*/ MatZeroRows_MPIAIJ,
2490: 0,
2491: 0,
2492: 0,
2493: 0,
2494: /*29*/ MatSetUp_MPIAIJ,
2495: 0,
2496: 0,
2497: MatGetDiagonalBlock_MPIAIJ,
2498: 0,
2499: /*34*/ MatDuplicate_MPIAIJ,
2500: 0,
2501: 0,
2502: 0,
2503: 0,
2504: /*39*/ MatAXPY_MPIAIJ,
2505: MatCreateSubMatrices_MPIAIJ,
2506: MatIncreaseOverlap_MPIAIJ,
2507: MatGetValues_MPIAIJ,
2508: MatCopy_MPIAIJ,
2509: /*44*/ MatGetRowMax_MPIAIJ,
2510: MatScale_MPIAIJ,
2511: MatShift_MPIAIJ,
2512: MatDiagonalSet_MPIAIJ,
2513: MatZeroRowsColumns_MPIAIJ,
2514: /*49*/ MatSetRandom_MPIAIJ,
2515: 0,
2516: 0,
2517: 0,
2518: 0,
2519: /*54*/ MatFDColoringCreate_MPIXAIJ,
2520: 0,
2521: MatSetUnfactored_MPIAIJ,
2522: MatPermute_MPIAIJ,
2523: 0,
2524: /*59*/ MatCreateSubMatrix_MPIAIJ,
2525: MatDestroy_MPIAIJ,
2526: MatView_MPIAIJ,
2527: 0,
2528: MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ,
2529: /*64*/ MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ,
2530: MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2531: 0,
2532: 0,
2533: 0,
2534: /*69*/ MatGetRowMaxAbs_MPIAIJ,
2535: MatGetRowMinAbs_MPIAIJ,
2536: 0,
2537: 0,
2538: 0,
2539: 0,
2540: /*75*/ MatFDColoringApply_AIJ,
2541: MatSetFromOptions_MPIAIJ,
2542: 0,
2543: 0,
2544: MatFindZeroDiagonals_MPIAIJ,
2545: /*80*/ 0,
2546: 0,
2547: 0,
2548: /*83*/ MatLoad_MPIAIJ,
2549: 0,
2550: 0,
2551: 0,
2552: 0,
2553: 0,
2554: /*89*/ MatMatMult_MPIAIJ_MPIAIJ,
2555: MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2556: MatMatMultNumeric_MPIAIJ_MPIAIJ,
2557: MatPtAP_MPIAIJ_MPIAIJ,
2558: MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2559: /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2560: 0,
2561: 0,
2562: 0,
2563: 0,
2564: /*99*/ 0,
2565: 0,
2566: 0,
2567: MatConjugate_MPIAIJ,
2568: 0,
2569: /*104*/MatSetValuesRow_MPIAIJ,
2570: MatRealPart_MPIAIJ,
2571: MatImaginaryPart_MPIAIJ,
2572: 0,
2573: 0,
2574: /*109*/0,
2575: 0,
2576: MatGetRowMin_MPIAIJ,
2577: 0,
2578: MatMissingDiagonal_MPIAIJ,
2579: /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2580: 0,
2581: MatGetGhosts_MPIAIJ,
2582: 0,
2583: 0,
2584: /*119*/0,
2585: 0,
2586: 0,
2587: 0,
2588: MatGetMultiProcBlock_MPIAIJ,
2589: /*124*/MatFindNonzeroRows_MPIAIJ,
2590: MatGetColumnNorms_MPIAIJ,
2591: MatInvertBlockDiagonal_MPIAIJ,
2592: 0,
2593: MatCreateSubMatricesMPI_MPIAIJ,
2594: /*129*/0,
2595: MatTransposeMatMult_MPIAIJ_MPIAIJ,
2596: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ,
2597: MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2598: 0,
2599: /*134*/0,
2600: 0,
2601: MatRARt_MPIAIJ_MPIAIJ,
2602: 0,
2603: 0,
2604: /*139*/MatSetBlockSizes_MPIAIJ,
2605: 0,
2606: 0,
2607: MatFDColoringSetUp_MPIXAIJ,
2608: MatFindOffBlockDiagonalEntries_MPIAIJ,
2609: /*144*/MatCreateMPIMatConcatenateSeqMat_MPIAIJ
2610: };
2612: /* ----------------------------------------------------------------------------------------*/
2614: PetscErrorCode MatStoreValues_MPIAIJ(Mat mat)
2615: {
2616: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2620: MatStoreValues(aij->A);
2621: MatStoreValues(aij->B);
2622: return(0);
2623: }
2625: PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat)
2626: {
2627: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2631: MatRetrieveValues(aij->A);
2632: MatRetrieveValues(aij->B);
2633: return(0);
2634: }
2636: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2637: {
2638: Mat_MPIAIJ *b;
2642: PetscLayoutSetUp(B->rmap);
2643: PetscLayoutSetUp(B->cmap);
2644: b = (Mat_MPIAIJ*)B->data;
2646: #if defined(PETSC_USE_CTABLE)
2647: PetscTableDestroy(&b->colmap);
2648: #else
2649: PetscFree(b->colmap);
2650: #endif
2651: PetscFree(b->garray);
2652: VecDestroy(&b->lvec);
2653: VecScatterDestroy(&b->Mvctx);
2655: /* Because the B will have been resized we simply destroy it and create a new one each time */
2656: MatDestroy(&b->B);
2657: MatCreate(PETSC_COMM_SELF,&b->B);
2658: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
2659: MatSetBlockSizesFromMats(b->B,B,B);
2660: MatSetType(b->B,MATSEQAIJ);
2661: PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
2663: if (!B->preallocated) {
2664: MatCreate(PETSC_COMM_SELF,&b->A);
2665: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2666: MatSetBlockSizesFromMats(b->A,B,B);
2667: MatSetType(b->A,MATSEQAIJ);
2668: PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2669: }
2671: MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2672: MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2673: B->preallocated = PETSC_TRUE;
2674: B->was_assembled = PETSC_FALSE;
2675: B->assembled = PETSC_FALSE;;
2676: return(0);
2677: }
2679: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2680: {
2681: Mat mat;
2682: Mat_MPIAIJ *a,*oldmat = (Mat_MPIAIJ*)matin->data;
2686: *newmat = 0;
2687: MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2688: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2689: MatSetBlockSizesFromMats(mat,matin,matin);
2690: MatSetType(mat,((PetscObject)matin)->type_name);
2691: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
2692: a = (Mat_MPIAIJ*)mat->data;
2694: mat->factortype = matin->factortype;
2695: mat->assembled = PETSC_TRUE;
2696: mat->insertmode = NOT_SET_VALUES;
2697: mat->preallocated = PETSC_TRUE;
2699: a->size = oldmat->size;
2700: a->rank = oldmat->rank;
2701: a->donotstash = oldmat->donotstash;
2702: a->roworiented = oldmat->roworiented;
2703: a->rowindices = 0;
2704: a->rowvalues = 0;
2705: a->getrowactive = PETSC_FALSE;
2707: PetscLayoutReference(matin->rmap,&mat->rmap);
2708: PetscLayoutReference(matin->cmap,&mat->cmap);
2710: if (oldmat->colmap) {
2711: #if defined(PETSC_USE_CTABLE)
2712: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2713: #else
2714: PetscMalloc1(mat->cmap->N,&a->colmap);
2715: PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2716: PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));
2717: #endif
2718: } else a->colmap = 0;
2719: if (oldmat->garray) {
2720: PetscInt len;
2721: len = oldmat->B->cmap->n;
2722: PetscMalloc1(len+1,&a->garray);
2723: PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2724: if (len) { PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt)); }
2725: } else a->garray = 0;
2727: VecDuplicate(oldmat->lvec,&a->lvec);
2728: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2729: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2730: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2731: MatDuplicate(oldmat->A,cpvalues,&a->A);
2732: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2733: MatDuplicate(oldmat->B,cpvalues,&a->B);
2734: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2735: PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2736: *newmat = mat;
2737: return(0);
2738: }
2740: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2741: {
2742: PetscScalar *vals,*svals;
2743: MPI_Comm comm;
2745: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
2746: PetscInt i,nz,j,rstart,rend,mmax,maxnz = 0;
2747: PetscInt header[4],*rowlengths = 0,M,N,m,*cols;
2748: PetscInt *ourlens = NULL,*procsnz = NULL,*offlens = NULL,jj,*mycols,*smycols;
2749: PetscInt cend,cstart,n,*rowners;
2750: int fd;
2751: PetscInt bs = newMat->rmap->bs;
2754: /* force binary viewer to load .info file if it has not yet done so */
2755: PetscViewerSetUp(viewer);
2756: PetscObjectGetComm((PetscObject)viewer,&comm);
2757: MPI_Comm_size(comm,&size);
2758: MPI_Comm_rank(comm,&rank);
2759: PetscViewerBinaryGetDescriptor(viewer,&fd);
2760: if (!rank) {
2761: PetscBinaryRead(fd,(char*)header,4,PETSC_INT);
2762: if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2763: if (header[3] < 0) SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk,cannot load as MATMPIAIJ");
2764: }
2766: PetscOptionsBegin(comm,NULL,"Options for loading MATMPIAIJ matrix","Mat");
2767: PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);
2768: PetscOptionsEnd();
2769: if (bs < 0) bs = 1;
2771: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2772: M = header[1]; N = header[2];
2774: /* If global sizes are set, check if they are consistent with that given in the file */
2775: if (newMat->rmap->N >= 0 && newMat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newMat->rmap->N,M);
2776: if (newMat->cmap->N >=0 && newMat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newMat->cmap->N,N);
2778: /* determine ownership of all (block) rows */
2779: if (M%bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows (%d) and block size (%d)",M,bs);
2780: if (newMat->rmap->n < 0) m = bs*((M/bs)/size + (((M/bs) % size) > rank)); /* PETSC_DECIDE */
2781: else m = newMat->rmap->n; /* Set by user */
2783: PetscMalloc1(size+1,&rowners);
2784: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
2786: /* First process needs enough room for process with most rows */
2787: if (!rank) {
2788: mmax = rowners[1];
2789: for (i=2; i<=size; i++) {
2790: mmax = PetscMax(mmax, rowners[i]);
2791: }
2792: } else mmax = -1; /* unused, but compilers complain */
2794: rowners[0] = 0;
2795: for (i=2; i<=size; i++) {
2796: rowners[i] += rowners[i-1];
2797: }
2798: rstart = rowners[rank];
2799: rend = rowners[rank+1];
2801: /* distribute row lengths to all processors */
2802: PetscMalloc2(m,&ourlens,m,&offlens);
2803: if (!rank) {
2804: PetscBinaryRead(fd,ourlens,m,PETSC_INT);
2805: PetscMalloc1(mmax,&rowlengths);
2806: PetscCalloc1(size,&procsnz);
2807: for (j=0; j<m; j++) {
2808: procsnz[0] += ourlens[j];
2809: }
2810: for (i=1; i<size; i++) {
2811: PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);
2812: /* calculate the number of nonzeros on each processor */
2813: for (j=0; j<rowners[i+1]-rowners[i]; j++) {
2814: procsnz[i] += rowlengths[j];
2815: }
2816: MPIULong_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
2817: }
2818: PetscFree(rowlengths);
2819: } else {
2820: MPIULong_Recv(ourlens,m,MPIU_INT,0,tag,comm);
2821: }
2823: if (!rank) {
2824: /* determine max buffer needed and allocate it */
2825: maxnz = 0;
2826: for (i=0; i<size; i++) {
2827: maxnz = PetscMax(maxnz,procsnz[i]);
2828: }
2829: PetscMalloc1(maxnz,&cols);
2831: /* read in my part of the matrix column indices */
2832: nz = procsnz[0];
2833: PetscMalloc1(nz,&mycols);
2834: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2836: /* read in every one elses and ship off */
2837: for (i=1; i<size; i++) {
2838: nz = procsnz[i];
2839: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2840: MPIULong_Send(cols,nz,MPIU_INT,i,tag,comm);
2841: }
2842: PetscFree(cols);
2843: } else {
2844: /* determine buffer space needed for message */
2845: nz = 0;
2846: for (i=0; i<m; i++) {
2847: nz += ourlens[i];
2848: }
2849: PetscMalloc1(nz,&mycols);
2851: /* receive message of column indices*/
2852: MPIULong_Recv(mycols,nz,MPIU_INT,0,tag,comm);
2853: }
2855: /* determine column ownership if matrix is not square */
2856: if (N != M) {
2857: if (newMat->cmap->n < 0) n = N/size + ((N % size) > rank);
2858: else n = newMat->cmap->n;
2859: MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);
2860: cstart = cend - n;
2861: } else {
2862: cstart = rstart;
2863: cend = rend;
2864: n = cend - cstart;
2865: }
2867: /* loop over local rows, determining number of off diagonal entries */
2868: PetscMemzero(offlens,m*sizeof(PetscInt));
2869: jj = 0;
2870: for (i=0; i<m; i++) {
2871: for (j=0; j<ourlens[i]; j++) {
2872: if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
2873: jj++;
2874: }
2875: }
2877: for (i=0; i<m; i++) {
2878: ourlens[i] -= offlens[i];
2879: }
2880: MatSetSizes(newMat,m,n,M,N);
2882: if (bs > 1) {MatSetBlockSize(newMat,bs);}
2884: MatMPIAIJSetPreallocation(newMat,0,ourlens,0,offlens);
2886: for (i=0; i<m; i++) {
2887: ourlens[i] += offlens[i];
2888: }
2890: if (!rank) {
2891: PetscMalloc1(maxnz+1,&vals);
2893: /* read in my part of the matrix numerical values */
2894: nz = procsnz[0];
2895: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2897: /* insert into matrix */
2898: jj = rstart;
2899: smycols = mycols;
2900: svals = vals;
2901: for (i=0; i<m; i++) {
2902: MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
2903: smycols += ourlens[i];
2904: svals += ourlens[i];
2905: jj++;
2906: }
2908: /* read in other processors and ship out */
2909: for (i=1; i<size; i++) {
2910: nz = procsnz[i];
2911: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2912: MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newMat)->tag,comm);
2913: }
2914: PetscFree(procsnz);
2915: } else {
2916: /* receive numeric values */
2917: PetscMalloc1(nz+1,&vals);
2919: /* receive message of values*/
2920: MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newMat)->tag,comm);
2922: /* insert into matrix */
2923: jj = rstart;
2924: smycols = mycols;
2925: svals = vals;
2926: for (i=0; i<m; i++) {
2927: MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
2928: smycols += ourlens[i];
2929: svals += ourlens[i];
2930: jj++;
2931: }
2932: }
2933: PetscFree2(ourlens,offlens);
2934: PetscFree(vals);
2935: PetscFree(mycols);
2936: PetscFree(rowners);
2937: MatAssemblyBegin(newMat,MAT_FINAL_ASSEMBLY);
2938: MatAssemblyEnd(newMat,MAT_FINAL_ASSEMBLY);
2939: return(0);
2940: }
2942: /* Not scalable because of ISAllGather() unless getting all columns. */
2943: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
2944: {
2946: IS iscol_local;
2947: PetscBool isstride;
2948: PetscMPIInt lisstride=0,gisstride;
2951: /* check if we are grabbing all columns*/
2952: PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);
2954: if (isstride) {
2955: PetscInt start,len,mstart,mlen;
2956: ISStrideGetInfo(iscol,&start,NULL);
2957: ISGetLocalSize(iscol,&len);
2958: MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
2959: if (mstart == start && mlen-mstart == len) lisstride = 1;
2960: }
2962: MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
2963: if (gisstride) {
2964: PetscInt N;
2965: MatGetSize(mat,NULL,&N);
2966: ISCreateStride(PetscObjectComm((PetscObject)mat),N,0,1,&iscol_local);
2967: ISSetIdentity(iscol_local);
2968: PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
2969: } else {
2970: PetscInt cbs;
2971: ISGetBlockSize(iscol,&cbs);
2972: ISAllGather(iscol,&iscol_local);
2973: ISSetBlockSize(iscol_local,cbs);
2974: }
2976: *isseq = iscol_local;
2977: return(0);
2978: }
2980: /*
2981: Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
2982: (see MatCreateSubMatrix_MPIAIJ_nonscalable)
2984: Input Parameters:
2985: mat - matrix
2986: isrow - parallel row index set; its local indices are a subset of local columns of mat,
2987: i.e., mat->rstart <= isrow[i] < mat->rend
2988: iscol - parallel column index set; its local indices are a subset of local columns of mat,
2989: i.e., mat->cstart <= iscol[i] < mat->cend
2990: Output Parameter:
2991: isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
2992: iscol_o - sequential column index set for retrieving mat->B
2993: garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
2994: */
2995: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
2996: {
2998: Vec x,cmap;
2999: const PetscInt *is_idx;
3000: PetscScalar *xarray,*cmaparray;
3001: PetscInt ncols,isstart,*idx,m,rstart,*cmap1,count;
3002: Mat_MPIAIJ *a=(Mat_MPIAIJ*)mat->data;
3003: Mat B=a->B;
3004: Vec lvec=a->lvec,lcmap;
3005: PetscInt i,cstart,cend,Bn=B->cmap->N;
3006: MPI_Comm comm;
3009: PetscObjectGetComm((PetscObject)mat,&comm);
3010: ISGetLocalSize(iscol,&ncols);
3012: /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3013: MatCreateVecs(mat,&x,NULL);
3014: VecDuplicate(x,&cmap);
3015: VecSet(x,-1.0);
3017: /* Get start indices */
3018: MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3019: isstart -= ncols;
3020: MatGetOwnershipRangeColumn(mat,&cstart,&cend);
3022: ISGetIndices(iscol,&is_idx);
3023: VecGetArray(x,&xarray);
3024: VecGetArray(cmap,&cmaparray);
3025: PetscMalloc1(ncols,&idx);
3026: for (i=0; i<ncols; i++) {
3027: xarray[is_idx[i]-cstart] = (PetscScalar)is_idx[i];
3028: cmaparray[is_idx[i]-cstart] = i + isstart; /* global index of iscol[i] */
3029: idx[i] = is_idx[i]-cstart; /* local index of iscol[i] */
3030: }
3031: VecRestoreArray(x,&xarray);
3032: VecRestoreArray(cmap,&cmaparray);
3033: ISRestoreIndices(iscol,&is_idx);
3035: /* Get iscol_d */
3036: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3037: ISGetBlockSize(iscol,&i);
3038: ISSetBlockSize(*iscol_d,i);
3040: /* Get isrow_d */
3041: ISGetLocalSize(isrow,&m);
3042: rstart = mat->rmap->rstart;
3043: PetscMalloc1(m,&idx);
3044: ISGetIndices(isrow,&is_idx);
3045: for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3046: ISRestoreIndices(isrow,&is_idx);
3048: ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3049: ISGetBlockSize(isrow,&i);
3050: ISSetBlockSize(*isrow_d,i);
3052: /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3053: VecScatterBegin(a->Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3055: VecDuplicate(lvec,&lcmap);
3057: VecScatterEnd(a->Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3058: VecScatterBegin(a->Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3059: VecScatterEnd(a->Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3061: /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3062: /* off-process column indices */
3063: count = 0;
3064: PetscMalloc1(Bn,&idx);
3065: PetscMalloc1(Bn,&cmap1);
3067: VecGetArray(lvec,&xarray);
3068: VecGetArray(lcmap,&cmaparray);
3069: for (i=0; i<Bn; i++) {
3070: if (PetscRealPart(xarray[i]) > -1.0) {
3071: idx[count] = i; /* local column index in off-diagonal part B */
3072: cmap1[count++] = (PetscInt)PetscRealPart(cmaparray[i]); /* column index in submat */
3073: }
3074: }
3075: VecRestoreArray(lvec,&xarray);
3076: VecRestoreArray(lcmap,&cmaparray);
3078: ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3079: /* cannot ensure iscol_o has same blocksize as iscol! */
3081: PetscFree(idx);
3083: *garray = cmap1;
3085: VecDestroy(&x);
3086: VecDestroy(&cmap);
3087: VecDestroy(&lcmap);
3088: return(0);
3089: }
3091: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3092: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3093: {
3095: Mat_MPIAIJ *a = (Mat_MPIAIJ*)mat->data,*asub;
3096: Mat M = NULL;
3097: MPI_Comm comm;
3098: IS iscol_d,isrow_d,iscol_o;
3099: Mat Asub = NULL,Bsub = NULL;
3100: PetscInt n;
3103: PetscObjectGetComm((PetscObject)mat,&comm);
3105: if (call == MAT_REUSE_MATRIX) {
3106: /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3107: PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3108: if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");
3110: PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3111: if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");
3113: PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3114: if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");
3116: /* Update diagonal and off-diagonal portions of submat */
3117: asub = (Mat_MPIAIJ*)(*submat)->data;
3118: MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3119: ISGetLocalSize(iscol_o,&n);
3120: if (n) {
3121: MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3122: }
3123: MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3124: MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);
3126: } else { /* call == MAT_INITIAL_MATRIX) */
3127: const PetscInt *garray;
3128: PetscInt BsubN;
3130: /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3131: ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);
3133: /* Create local submatrices Asub and Bsub */
3134: MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3135: MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);
3137: /* Create submatrix M */
3138: MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);
3140: /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3141: asub = (Mat_MPIAIJ*)M->data;
3143: ISGetLocalSize(iscol_o,&BsubN);
3144: n = asub->B->cmap->N;
3145: if (BsubN > n) {
3146: /* This case can be tested using ~petsc/src/tao/bound/examples/tutorials/runplate2_3 */
3147: const PetscInt *idx;
3148: PetscInt i,j,*idx_new,*subgarray = asub->garray;
3149: PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);
3151: PetscMalloc1(n,&idx_new);
3152: j = 0;
3153: ISGetIndices(iscol_o,&idx);
3154: for (i=0; i<n; i++) {
3155: if (j >= BsubN) break;
3156: while (subgarray[i] > garray[j]) j++;
3158: if (subgarray[i] == garray[j]) {
3159: idx_new[i] = idx[j++];
3160: } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3161: }
3162: ISRestoreIndices(iscol_o,&idx);
3164: ISDestroy(&iscol_o);
3165: ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);
3167: } else if (BsubN < n) {
3168: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3169: }
3171: PetscFree(garray);
3172: *submat = M;
3174: /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3175: PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3176: ISDestroy(&isrow_d);
3178: PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3179: ISDestroy(&iscol_d);
3181: PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3182: ISDestroy(&iscol_o);
3183: }
3184: return(0);
3185: }
3187: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3188: {
3190: IS iscol_local,isrow_d;
3191: PetscInt csize;
3192: PetscInt n,i,j,start,end;
3193: PetscBool sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3194: MPI_Comm comm;
3197: /* If isrow has same processor distribution as mat,
3198: call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3199: if (call == MAT_REUSE_MATRIX) {
3200: PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3201: if (isrow_d) {
3202: sameRowDist = PETSC_TRUE;
3203: tsameDist[1] = PETSC_TRUE; /* sameColDist */
3204: } else {
3205: PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3206: if (iscol_local) {
3207: sameRowDist = PETSC_TRUE;
3208: tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3209: }
3210: }
3211: } else {
3212: /* Check if isrow has same processor distribution as mat */
3213: sameDist[0] = PETSC_FALSE;
3214: ISGetLocalSize(isrow,&n);
3215: if (!n) {
3216: sameDist[0] = PETSC_TRUE;
3217: } else {
3218: ISGetMinMax(isrow,&i,&j);
3219: MatGetOwnershipRange(mat,&start,&end);
3220: if (i >= start && j < end) {
3221: sameDist[0] = PETSC_TRUE;
3222: }
3223: }
3225: /* Check if iscol has same processor distribution as mat */
3226: sameDist[1] = PETSC_FALSE;
3227: ISGetLocalSize(iscol,&n);
3228: if (!n) {
3229: sameDist[1] = PETSC_TRUE;
3230: } else {
3231: ISGetMinMax(iscol,&i,&j);
3232: MatGetOwnershipRangeColumn(mat,&start,&end);
3233: if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3234: }
3236: PetscObjectGetComm((PetscObject)mat,&comm);
3237: MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3238: sameRowDist = tsameDist[0];
3239: }
3241: if (sameRowDist) {
3242: if (tsameDist[1]) { /* sameRowDist & sameColDist */
3243: /* isrow and iscol have same processor distribution as mat */
3244: MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3245: } else { /* sameRowDist */
3246: /* isrow has same processor distribution as mat */
3247: MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,call,newmat);
3248: }
3249: return(0);
3250: }
3252: /* General case: iscol -> iscol_local which has global size of iscol */
3253: if (call == MAT_REUSE_MATRIX) {
3254: PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3255: if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3256: } else {
3257: ISGetSeqIS_Private(mat,iscol,&iscol_local);
3258: }
3260: ISGetLocalSize(iscol,&csize);
3261: MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);
3263: if (call == MAT_INITIAL_MATRIX) {
3264: PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3265: ISDestroy(&iscol_local);
3266: }
3267: return(0);
3268: }
3270: /*@C
3271: MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3272: and "off-diagonal" part of the matrix in CSR format.
3274: Collective on MPI_Comm
3276: Input Parameters:
3277: + comm - MPI communicator
3278: . A - "diagonal" portion of matrix
3279: . B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3280: - garray - global index of B columns
3282: Output Parameter:
3283: . mat - the matrix, with input A as its local diagonal matrix
3284: Level: advanced
3286: Notes:
3287: See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3288: A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.
3290: .seealso: MatCreateMPIAIJWithSplitArrays()
3291: @*/
3292: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3293: {
3295: Mat_MPIAIJ *maij;
3296: Mat_SeqAIJ *b=(Mat_SeqAIJ*)B->data,*bnew;
3297: PetscInt *oi=b->i,*oj=b->j,i,nz,col;
3298: PetscScalar *oa=b->a;
3299: Mat Bnew;
3300: PetscInt m,n,N;
3303: MatCreate(comm,mat);
3304: MatGetSize(A,&m,&n);
3305: if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3306: if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3307: /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3308: /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */
3310: /* Get global columns of mat */
3311: MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);
3313: MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3314: MatSetType(*mat,MATMPIAIJ);
3315: MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3316: maij = (Mat_MPIAIJ*)(*mat)->data;
3318: (*mat)->preallocated = PETSC_TRUE;
3320: PetscLayoutSetUp((*mat)->rmap);
3321: PetscLayoutSetUp((*mat)->cmap);
3323: /* Set A as diagonal portion of *mat */
3324: maij->A = A;
3326: nz = oi[m];
3327: for (i=0; i<nz; i++) {
3328: col = oj[i];
3329: oj[i] = garray[col];
3330: }
3332: /* Set Bnew as off-diagonal portion of *mat */
3333: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,oa,&Bnew);
3334: bnew = (Mat_SeqAIJ*)Bnew->data;
3335: bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3336: maij->B = Bnew;
3338: if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,0,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);
3340: b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3341: b->free_a = PETSC_FALSE;
3342: b->free_ij = PETSC_FALSE;
3343: MatDestroy(&B);
3345: bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3346: bnew->free_a = PETSC_TRUE;
3347: bnew->free_ij = PETSC_TRUE;
3349: /* condense columns of maij->B */
3350: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3351: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3352: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3353: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3354: MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3355: return(0);
3356: }
3358: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);
3360: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3361: {
3363: PetscInt i,m,n,rstart,row,rend,nz,j,bs,cbs;
3364: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3365: Mat_MPIAIJ *a=(Mat_MPIAIJ*)mat->data;
3366: Mat M,Msub,B=a->B;
3367: MatScalar *aa;
3368: Mat_SeqAIJ *aij;
3369: PetscInt *garray = a->garray,*colsub,Ncols;
3370: PetscInt count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3371: IS iscol_sub,iscmap;
3372: const PetscInt *is_idx,*cmap;
3373: PetscBool allcolumns=PETSC_FALSE;
3374: IS iscol_local=NULL;
3375: MPI_Comm comm;
3378: PetscObjectGetComm((PetscObject)mat,&comm);
3380: if (call == MAT_REUSE_MATRIX) {
3381: PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3382: if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3383: ISGetLocalSize(iscol_sub,&count);
3385: PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3386: if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");
3388: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3389: if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3391: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);
3393: } else { /* call == MAT_INITIAL_MATRIX) */
3394: PetscBool flg;
3396: ISGetLocalSize(iscol,&n);
3397: ISGetSize(iscol,&Ncols);
3399: /* (1) iscol -> nonscalable iscol_local */
3400: ISGetSeqIS_Private(mat,iscol,&iscol_local);
3401: ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3402: if (n != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != Ncols %d",n,Ncols);
3404: /* Check for special case: each processor gets entire matrix columns */
3405: ISIdentity(iscol_local,&flg);
3406: if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3407: if (allcolumns) {
3408: iscol_sub = iscol_local;
3409: PetscObjectReference((PetscObject)iscol_local);
3410: ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);
3412: } else {
3413: /* (2) iscol_local -> iscol_sub and iscmap */
3414: PetscInt *idx,*cmap1,k;
3416: /* implementation below requires iscol_local be sorted, it can have duplicate indices */
3417: ISSorted(iscol_local,&flg);
3418: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsorted iscol_local is not implemented yet");
3420: PetscMalloc1(Ncols,&idx);
3421: PetscMalloc1(Ncols,&cmap1);
3422: ISGetIndices(iscol_local,&is_idx);
3423: count = 0;
3424: k = 0;
3425: for (i=0; i<Ncols; i++) {
3426: j = is_idx[i];
3427: if (j >= cstart && j < cend) {
3428: /* diagonal part of mat */
3429: idx[count] = j;
3430: cmap1[count++] = i; /* column index in submat */
3431: } else if (Bn) {
3432: /* off-diagonal part of mat */
3433: if (j == garray[k]) {
3434: idx[count] = j;
3435: cmap1[count++] = i; /* column index in submat */
3436: } else if (j > garray[k]) {
3437: while (j > garray[k] && k < Bn-1) k++;
3438: if (j == garray[k]) {
3439: idx[count] = j;
3440: cmap1[count++] = i; /* column index in submat */
3441: }
3442: }
3443: }
3444: }
3445: ISRestoreIndices(iscol_local,&is_idx);
3447: ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3448: ISGetBlockSize(iscol,&cbs);
3449: ISSetBlockSize(iscol_sub,cbs);
3451: ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3452: }
3454: /* (3) Create sequential Msub */
3455: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3456: }
3458: ISGetLocalSize(iscol_sub,&count);
3459: aij = (Mat_SeqAIJ*)(Msub)->data;
3460: ii = aij->i;
3461: ISGetIndices(iscmap,&cmap);
3463: /*
3464: m - number of local rows
3465: Ncols - number of columns (same on all processors)
3466: rstart - first row in new global matrix generated
3467: */
3468: MatGetSize(Msub,&m,NULL);
3470: if (call == MAT_INITIAL_MATRIX) {
3471: /* (4) Create parallel newmat */
3472: PetscMPIInt rank,size;
3473: PetscInt csize;
3475: MPI_Comm_size(comm,&size);
3476: MPI_Comm_rank(comm,&rank);
3478: /*
3479: Determine the number of non-zeros in the diagonal and off-diagonal
3480: portions of the matrix in order to do correct preallocation
3481: */
3483: /* first get start and end of "diagonal" columns */
3484: ISGetLocalSize(iscol,&csize);
3485: if (csize == PETSC_DECIDE) {
3486: ISGetSize(isrow,&mglobal);
3487: if (mglobal == Ncols) { /* square matrix */
3488: nlocal = m;
3489: } else {
3490: nlocal = Ncols/size + ((Ncols % size) > rank);
3491: }
3492: } else {
3493: nlocal = csize;
3494: }
3495: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3496: rstart = rend - nlocal;
3497: if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);
3499: /* next, compute all the lengths */
3500: jj = aij->j;
3501: PetscMalloc1(2*m+1,&dlens);
3502: olens = dlens + m;
3503: for (i=0; i<m; i++) {
3504: jend = ii[i+1] - ii[i];
3505: olen = 0;
3506: dlen = 0;
3507: for (j=0; j<jend; j++) {
3508: if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3509: else dlen++;
3510: jj++;
3511: }
3512: olens[i] = olen;
3513: dlens[i] = dlen;
3514: }
3516: ISGetBlockSize(isrow,&bs);
3517: ISGetBlockSize(iscol,&cbs);
3519: MatCreate(comm,&M);
3520: MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3521: MatSetBlockSizes(M,bs,cbs);
3522: MatSetType(M,((PetscObject)mat)->type_name);
3523: MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3524: PetscFree(dlens);
3526: } else { /* call == MAT_REUSE_MATRIX */
3527: M = *newmat;
3528: MatGetLocalSize(M,&i,NULL);
3529: if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3530: MatZeroEntries(M);
3531: /*
3532: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3533: rather than the slower MatSetValues().
3534: */
3535: M->was_assembled = PETSC_TRUE;
3536: M->assembled = PETSC_FALSE;
3537: }
3539: /* (5) Set values of Msub to *newmat */
3540: PetscMalloc1(count,&colsub);
3541: MatGetOwnershipRange(M,&rstart,NULL);
3543: jj = aij->j;
3544: aa = aij->a;
3545: for (i=0; i<m; i++) {
3546: row = rstart + i;
3547: nz = ii[i+1] - ii[i];
3548: for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3549: MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3550: jj += nz; aa += nz;
3551: }
3552: ISRestoreIndices(iscmap,&cmap);
3554: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3555: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3557: PetscFree(colsub);
3559: /* save Msub, iscol_sub and iscmap used in processor for next request */
3560: if (call == MAT_INITIAL_MATRIX) {
3561: *newmat = M;
3562: PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3563: MatDestroy(&Msub);
3565: PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3566: ISDestroy(&iscol_sub);
3568: PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3569: ISDestroy(&iscmap);
3571: if (iscol_local) {
3572: PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3573: ISDestroy(&iscol_local);
3574: }
3575: }
3576: return(0);
3577: }
3579: /*
3580: Not great since it makes two copies of the submatrix, first an SeqAIJ
3581: in local and then by concatenating the local matrices the end result.
3582: Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()
3584: Note: This requires a sequential iscol with all indices.
3585: */
3586: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3587: {
3589: PetscMPIInt rank,size;
3590: PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3591: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3592: Mat M,Mreuse;
3593: MatScalar *aa,*vwork;
3594: MPI_Comm comm;
3595: Mat_SeqAIJ *aij;
3596: PetscBool colflag,allcolumns=PETSC_FALSE;
3599: PetscObjectGetComm((PetscObject)mat,&comm);
3600: MPI_Comm_rank(comm,&rank);
3601: MPI_Comm_size(comm,&size);
3603: /* Check for special case: each processor gets entire matrix columns */
3604: ISIdentity(iscol,&colflag);
3605: ISGetLocalSize(iscol,&n);
3606: if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3608: if (call == MAT_REUSE_MATRIX) {
3609: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3610: if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3611: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3612: } else {
3613: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3614: }
3616: /*
3617: m - number of local rows
3618: n - number of columns (same on all processors)
3619: rstart - first row in new global matrix generated
3620: */
3621: MatGetSize(Mreuse,&m,&n);
3622: MatGetBlockSizes(Mreuse,&bs,&cbs);
3623: if (call == MAT_INITIAL_MATRIX) {
3624: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3625: ii = aij->i;
3626: jj = aij->j;
3628: /*
3629: Determine the number of non-zeros in the diagonal and off-diagonal
3630: portions of the matrix in order to do correct preallocation
3631: */
3633: /* first get start and end of "diagonal" columns */
3634: if (csize == PETSC_DECIDE) {
3635: ISGetSize(isrow,&mglobal);
3636: if (mglobal == n) { /* square matrix */
3637: nlocal = m;
3638: } else {
3639: nlocal = n/size + ((n % size) > rank);
3640: }
3641: } else {
3642: nlocal = csize;
3643: }
3644: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3645: rstart = rend - nlocal;
3646: if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3648: /* next, compute all the lengths */
3649: PetscMalloc1(2*m+1,&dlens);
3650: olens = dlens + m;
3651: for (i=0; i<m; i++) {
3652: jend = ii[i+1] - ii[i];
3653: olen = 0;
3654: dlen = 0;
3655: for (j=0; j<jend; j++) {
3656: if (*jj < rstart || *jj >= rend) olen++;
3657: else dlen++;
3658: jj++;
3659: }
3660: olens[i] = olen;
3661: dlens[i] = dlen;
3662: }
3663: MatCreate(comm,&M);
3664: MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3665: MatSetBlockSizes(M,bs,cbs);
3666: MatSetType(M,((PetscObject)mat)->type_name);
3667: MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3668: PetscFree(dlens);
3669: } else {
3670: PetscInt ml,nl;
3672: M = *newmat;
3673: MatGetLocalSize(M,&ml,&nl);
3674: if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3675: MatZeroEntries(M);
3676: /*
3677: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3678: rather than the slower MatSetValues().
3679: */
3680: M->was_assembled = PETSC_TRUE;
3681: M->assembled = PETSC_FALSE;
3682: }
3683: MatGetOwnershipRange(M,&rstart,&rend);
3684: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3685: ii = aij->i;
3686: jj = aij->j;
3687: aa = aij->a;
3688: for (i=0; i<m; i++) {
3689: row = rstart + i;
3690: nz = ii[i+1] - ii[i];
3691: cwork = jj; jj += nz;
3692: vwork = aa; aa += nz;
3693: MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3694: }
3696: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3697: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3698: *newmat = M;
3700: /* save submatrix used in processor for next request */
3701: if (call == MAT_INITIAL_MATRIX) {
3702: PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3703: MatDestroy(&Mreuse);
3704: }
3705: return(0);
3706: }
3708: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3709: {
3710: PetscInt m,cstart, cend,j,nnz,i,d;
3711: PetscInt *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3712: const PetscInt *JJ;
3713: PetscScalar *values;
3715: PetscBool nooffprocentries;
3718: if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);
3720: PetscLayoutSetUp(B->rmap);
3721: PetscLayoutSetUp(B->cmap);
3722: m = B->rmap->n;
3723: cstart = B->cmap->rstart;
3724: cend = B->cmap->rend;
3725: rstart = B->rmap->rstart;
3727: PetscMalloc2(m,&d_nnz,m,&o_nnz);
3729: #if defined(PETSC_USE_DEBUGGING)
3730: for (i=0; i<m; i++) {
3731: nnz = Ii[i+1]- Ii[i];
3732: JJ = J + Ii[i];
3733: if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3734: if (nnz && (JJ[0] < 0)) SETERRRQ1(PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,j);
3735: if (nnz && (JJ[nnz-1] >= B->cmap->N) SETERRRQ3(PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3736: }
3737: #endif
3739: for (i=0; i<m; i++) {
3740: nnz = Ii[i+1]- Ii[i];
3741: JJ = J + Ii[i];
3742: nnz_max = PetscMax(nnz_max,nnz);
3743: d = 0;
3744: for (j=0; j<nnz; j++) {
3745: if (cstart <= JJ[j] && JJ[j] < cend) d++;
3746: }
3747: d_nnz[i] = d;
3748: o_nnz[i] = nnz - d;
3749: }
3750: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3751: PetscFree2(d_nnz,o_nnz);
3753: if (v) values = (PetscScalar*)v;
3754: else {
3755: PetscCalloc1(nnz_max+1,&values);
3756: }
3758: for (i=0; i<m; i++) {
3759: ii = i + rstart;
3760: nnz = Ii[i+1]- Ii[i];
3761: MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);
3762: }
3763: nooffprocentries = B->nooffprocentries;
3764: B->nooffprocentries = PETSC_TRUE;
3765: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3766: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3767: B->nooffprocentries = nooffprocentries;
3769: if (!v) {
3770: PetscFree(values);
3771: }
3772: MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3773: return(0);
3774: }
3776: /*@
3777: MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3778: (the default parallel PETSc format).
3780: Collective on MPI_Comm
3782: Input Parameters:
3783: + B - the matrix
3784: . i - the indices into j for the start of each local row (starts with zero)
3785: . j - the column indices for each local row (starts with zero)
3786: - v - optional values in the matrix
3788: Level: developer
3790: Notes:
3791: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3792: thus you CANNOT change the matrix entries by changing the values of a[] after you have
3793: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3795: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3797: The format which is used for the sparse matrix input, is equivalent to a
3798: row-major ordering.. i.e for the following matrix, the input data expected is
3799: as shown
3801: $ 1 0 0
3802: $ 2 0 3 P0
3803: $ -------
3804: $ 4 5 6 P1
3805: $
3806: $ Process0 [P0]: rows_owned=[0,1]
3807: $ i = {0,1,3} [size = nrow+1 = 2+1]
3808: $ j = {0,0,2} [size = 3]
3809: $ v = {1,2,3} [size = 3]
3810: $
3811: $ Process1 [P1]: rows_owned=[2]
3812: $ i = {0,3} [size = nrow+1 = 1+1]
3813: $ j = {0,1,2} [size = 3]
3814: $ v = {4,5,6} [size = 3]
3816: .keywords: matrix, aij, compressed row, sparse, parallel
3818: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3819: MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3820: @*/
3821: PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3822: {
3826: PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3827: return(0);
3828: }
3830: /*@C
3831: MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3832: (the default parallel PETSc format). For good matrix assembly performance
3833: the user should preallocate the matrix storage by setting the parameters
3834: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3835: performance can be increased by more than a factor of 50.
3837: Collective on MPI_Comm
3839: Input Parameters:
3840: + B - the matrix
3841: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
3842: (same value is used for all local rows)
3843: . d_nnz - array containing the number of nonzeros in the various rows of the
3844: DIAGONAL portion of the local submatrix (possibly different for each row)
3845: or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
3846: The size of this array is equal to the number of local rows, i.e 'm'.
3847: For matrices that will be factored, you must leave room for (and set)
3848: the diagonal entry even if it is zero.
3849: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
3850: submatrix (same value is used for all local rows).
3851: - o_nnz - array containing the number of nonzeros in the various rows of the
3852: OFF-DIAGONAL portion of the local submatrix (possibly different for
3853: each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
3854: structure. The size of this array is equal to the number
3855: of local rows, i.e 'm'.
3857: If the *_nnz parameter is given then the *_nz parameter is ignored
3859: The AIJ format (also called the Yale sparse matrix format or
3860: compressed row storage (CSR)), is fully compatible with standard Fortran 77
3861: storage. The stored row and column indices begin with zero.
3862: See Users-Manual: ch_mat for details.
3864: The parallel matrix is partitioned such that the first m0 rows belong to
3865: process 0, the next m1 rows belong to process 1, the next m2 rows belong
3866: to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
3868: The DIAGONAL portion of the local submatrix of a processor can be defined
3869: as the submatrix which is obtained by extraction the part corresponding to
3870: the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
3871: first row that belongs to the processor, r2 is the last row belonging to
3872: the this processor, and c1-c2 is range of indices of the local part of a
3873: vector suitable for applying the matrix to. This is an mxn matrix. In the
3874: common case of a square matrix, the row and column ranges are the same and
3875: the DIAGONAL part is also square. The remaining portion of the local
3876: submatrix (mxN) constitute the OFF-DIAGONAL portion.
3878: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3880: You can call MatGetInfo() to get information on how effective the preallocation was;
3881: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3882: You can also run with the option -info and look for messages with the string
3883: malloc in them to see if additional memory allocation was needed.
3885: Example usage:
3887: Consider the following 8x8 matrix with 34 non-zero values, that is
3888: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3889: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3890: as follows:
3892: .vb
3893: 1 2 0 | 0 3 0 | 0 4
3894: Proc0 0 5 6 | 7 0 0 | 8 0
3895: 9 0 10 | 11 0 0 | 12 0
3896: -------------------------------------
3897: 13 0 14 | 15 16 17 | 0 0
3898: Proc1 0 18 0 | 19 20 21 | 0 0
3899: 0 0 0 | 22 23 0 | 24 0
3900: -------------------------------------
3901: Proc2 25 26 27 | 0 0 28 | 29 0
3902: 30 0 0 | 31 32 33 | 0 34
3903: .ve
3905: This can be represented as a collection of submatrices as:
3907: .vb
3908: A B C
3909: D E F
3910: G H I
3911: .ve
3913: Where the submatrices A,B,C are owned by proc0, D,E,F are
3914: owned by proc1, G,H,I are owned by proc2.
3916: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3917: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3918: The 'M','N' parameters are 8,8, and have the same values on all procs.
3920: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3921: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3922: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3923: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3924: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3925: matrix, ans [DF] as another SeqAIJ matrix.
3927: When d_nz, o_nz parameters are specified, d_nz storage elements are
3928: allocated for every row of the local diagonal submatrix, and o_nz
3929: storage locations are allocated for every row of the OFF-DIAGONAL submat.
3930: One way to choose d_nz and o_nz is to use the max nonzerors per local
3931: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3932: In this case, the values of d_nz,o_nz are:
3933: .vb
3934: proc0 : dnz = 2, o_nz = 2
3935: proc1 : dnz = 3, o_nz = 2
3936: proc2 : dnz = 1, o_nz = 4
3937: .ve
3938: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3939: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3940: for proc3. i.e we are using 12+15+10=37 storage locations to store
3941: 34 values.
3943: When d_nnz, o_nnz parameters are specified, the storage is specified
3944: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3945: In the above case the values for d_nnz,o_nnz are:
3946: .vb
3947: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3948: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3949: proc2: d_nnz = [1,1] and o_nnz = [4,4]
3950: .ve
3951: Here the space allocated is sum of all the above values i.e 34, and
3952: hence pre-allocation is perfect.
3954: Level: intermediate
3956: .keywords: matrix, aij, compressed row, sparse, parallel
3958: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
3959: MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
3960: @*/
3961: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3962: {
3968: PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
3969: return(0);
3970: }
3972: /*@
3973: MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
3974: CSR format the local rows.
3976: Collective on MPI_Comm
3978: Input Parameters:
3979: + comm - MPI communicator
3980: . m - number of local rows (Cannot be PETSC_DECIDE)
3981: . n - This value should be the same as the local size used in creating the
3982: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3983: calculated if N is given) For square matrices n is almost always m.
3984: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3985: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3986: . i - row indices
3987: . j - column indices
3988: - a - matrix values
3990: Output Parameter:
3991: . mat - the matrix
3993: Level: intermediate
3995: Notes:
3996: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3997: thus you CANNOT change the matrix entries by changing the values of a[] after you have
3998: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
4000: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
4002: The format which is used for the sparse matrix input, is equivalent to a
4003: row-major ordering.. i.e for the following matrix, the input data expected is
4004: as shown
4006: $ 1 0 0
4007: $ 2 0 3 P0
4008: $ -------
4009: $ 4 5 6 P1
4010: $
4011: $ Process0 [P0]: rows_owned=[0,1]
4012: $ i = {0,1,3} [size = nrow+1 = 2+1]
4013: $ j = {0,0,2} [size = 3]
4014: $ v = {1,2,3} [size = 3]
4015: $
4016: $ Process1 [P1]: rows_owned=[2]
4017: $ i = {0,3} [size = nrow+1 = 1+1]
4018: $ j = {0,1,2} [size = 3]
4019: $ v = {4,5,6} [size = 3]
4021: .keywords: matrix, aij, compressed row, sparse, parallel
4023: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4024: MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays()
4025: @*/
4026: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4027: {
4031: if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4032: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4033: MatCreate(comm,mat);
4034: MatSetSizes(*mat,m,n,M,N);
4035: /* MatSetBlockSizes(M,bs,cbs); */
4036: MatSetType(*mat,MATMPIAIJ);
4037: MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4038: return(0);
4039: }
4041: /*@C
4042: MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4043: (the default parallel PETSc format). For good matrix assembly performance
4044: the user should preallocate the matrix storage by setting the parameters
4045: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
4046: performance can be increased by more than a factor of 50.
4048: Collective on MPI_Comm
4050: Input Parameters:
4051: + comm - MPI communicator
4052: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4053: This value should be the same as the local size used in creating the
4054: y vector for the matrix-vector product y = Ax.
4055: . n - This value should be the same as the local size used in creating the
4056: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4057: calculated if N is given) For square matrices n is almost always m.
4058: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4059: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4060: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
4061: (same value is used for all local rows)
4062: . d_nnz - array containing the number of nonzeros in the various rows of the
4063: DIAGONAL portion of the local submatrix (possibly different for each row)
4064: or NULL, if d_nz is used to specify the nonzero structure.
4065: The size of this array is equal to the number of local rows, i.e 'm'.
4066: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
4067: submatrix (same value is used for all local rows).
4068: - o_nnz - array containing the number of nonzeros in the various rows of the
4069: OFF-DIAGONAL portion of the local submatrix (possibly different for
4070: each row) or NULL, if o_nz is used to specify the nonzero
4071: structure. The size of this array is equal to the number
4072: of local rows, i.e 'm'.
4074: Output Parameter:
4075: . A - the matrix
4077: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4078: MatXXXXSetPreallocation() paradgm instead of this routine directly.
4079: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
4081: Notes:
4082: If the *_nnz parameter is given then the *_nz parameter is ignored
4084: m,n,M,N parameters specify the size of the matrix, and its partitioning across
4085: processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4086: storage requirements for this matrix.
4088: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one
4089: processor than it must be used on all processors that share the object for
4090: that argument.
4092: The user MUST specify either the local or global matrix dimensions
4093: (possibly both).
4095: The parallel matrix is partitioned across processors such that the
4096: first m0 rows belong to process 0, the next m1 rows belong to
4097: process 1, the next m2 rows belong to process 2 etc.. where
4098: m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4099: values corresponding to [m x N] submatrix.
4101: The columns are logically partitioned with the n0 columns belonging
4102: to 0th partition, the next n1 columns belonging to the next
4103: partition etc.. where n0,n1,n2... are the input parameter 'n'.
4105: The DIAGONAL portion of the local submatrix on any given processor
4106: is the submatrix corresponding to the rows and columns m,n
4107: corresponding to the given processor. i.e diagonal matrix on
4108: process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4109: etc. The remaining portion of the local submatrix [m x (N-n)]
4110: constitute the OFF-DIAGONAL portion. The example below better
4111: illustrates this concept.
4113: For a square global matrix we define each processor's diagonal portion
4114: to be its local rows and the corresponding columns (a square submatrix);
4115: each processor's off-diagonal portion encompasses the remainder of the
4116: local matrix (a rectangular submatrix).
4118: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
4120: When calling this routine with a single process communicator, a matrix of
4121: type SEQAIJ is returned. If a matrix of type MPIAIJ is desired for this
4122: type of communicator, use the construction mechanism
4123: .vb
4124: MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4125: .ve
4127: $ MatCreate(...,&A);
4128: $ MatSetType(A,MATMPIAIJ);
4129: $ MatSetSizes(A, m,n,M,N);
4130: $ MatMPIAIJSetPreallocation(A,...);
4132: By default, this format uses inodes (identical nodes) when possible.
4133: We search for consecutive rows with the same nonzero structure, thereby
4134: reusing matrix information to achieve increased efficiency.
4136: Options Database Keys:
4137: + -mat_no_inode - Do not use inodes
4138: . -mat_inode_limit <limit> - Sets inode limit (max limit=5)
4139: - -mat_aij_oneindex - Internally use indexing starting at 1
4140: rather than 0. Note that when calling MatSetValues(),
4141: the user still MUST index entries starting at 0!
4144: Example usage:
4146: Consider the following 8x8 matrix with 34 non-zero values, that is
4147: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4148: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4149: as follows
4151: .vb
4152: 1 2 0 | 0 3 0 | 0 4
4153: Proc0 0 5 6 | 7 0 0 | 8 0
4154: 9 0 10 | 11 0 0 | 12 0
4155: -------------------------------------
4156: 13 0 14 | 15 16 17 | 0 0
4157: Proc1 0 18 0 | 19 20 21 | 0 0
4158: 0 0 0 | 22 23 0 | 24 0
4159: -------------------------------------
4160: Proc2 25 26 27 | 0 0 28 | 29 0
4161: 30 0 0 | 31 32 33 | 0 34
4162: .ve
4164: This can be represented as a collection of submatrices as
4166: .vb
4167: A B C
4168: D E F
4169: G H I
4170: .ve
4172: Where the submatrices A,B,C are owned by proc0, D,E,F are
4173: owned by proc1, G,H,I are owned by proc2.
4175: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4176: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4177: The 'M','N' parameters are 8,8, and have the same values on all procs.
4179: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4180: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4181: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4182: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4183: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4184: matrix, ans [DF] as another SeqAIJ matrix.
4186: When d_nz, o_nz parameters are specified, d_nz storage elements are
4187: allocated for every row of the local diagonal submatrix, and o_nz
4188: storage locations are allocated for every row of the OFF-DIAGONAL submat.
4189: One way to choose d_nz and o_nz is to use the max nonzerors per local
4190: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4191: In this case, the values of d_nz,o_nz are
4192: .vb
4193: proc0 : dnz = 2, o_nz = 2
4194: proc1 : dnz = 3, o_nz = 2
4195: proc2 : dnz = 1, o_nz = 4
4196: .ve
4197: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4198: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4199: for proc3. i.e we are using 12+15+10=37 storage locations to store
4200: 34 values.
4202: When d_nnz, o_nnz parameters are specified, the storage is specified
4203: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4204: In the above case the values for d_nnz,o_nnz are
4205: .vb
4206: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4207: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4208: proc2: d_nnz = [1,1] and o_nnz = [4,4]
4209: .ve
4210: Here the space allocated is sum of all the above values i.e 34, and
4211: hence pre-allocation is perfect.
4213: Level: intermediate
4215: .keywords: matrix, aij, compressed row, sparse, parallel
4217: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4218: MATMPIAIJ, MatCreateMPIAIJWithArrays()
4219: @*/
4220: PetscErrorCode MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4221: {
4223: PetscMPIInt size;
4226: MatCreate(comm,A);
4227: MatSetSizes(*A,m,n,M,N);
4228: MPI_Comm_size(comm,&size);
4229: if (size > 1) {
4230: MatSetType(*A,MATMPIAIJ);
4231: MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4232: } else {
4233: MatSetType(*A,MATSEQAIJ);
4234: MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4235: }
4236: return(0);
4237: }
4239: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4240: {
4241: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
4242: PetscBool flg;
4244:
4246: PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);
4247: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4248: if (Ad) *Ad = a->A;
4249: if (Ao) *Ao = a->B;
4250: if (colmap) *colmap = a->garray;
4251: return(0);
4252: }
4254: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4255: {
4257: PetscInt m,N,i,rstart,nnz,Ii;
4258: PetscInt *indx;
4259: PetscScalar *values;
4262: MatGetSize(inmat,&m,&N);
4263: if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4264: PetscInt *dnz,*onz,sum,bs,cbs;
4266: if (n == PETSC_DECIDE) {
4267: PetscSplitOwnership(comm,&n,&N);
4268: }
4269: /* Check sum(n) = N */
4270: MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4271: if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);
4273: MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4274: rstart -= m;
4276: MatPreallocateInitialize(comm,m,n,dnz,onz);
4277: for (i=0; i<m; i++) {
4278: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4279: MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4280: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4281: }
4283: MatCreate(comm,outmat);
4284: MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4285: MatGetBlockSizes(inmat,&bs,&cbs);
4286: MatSetBlockSizes(*outmat,bs,cbs);
4287: MatSetType(*outmat,MATAIJ);
4288: MatSeqAIJSetPreallocation(*outmat,0,dnz);
4289: MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4290: MatPreallocateFinalize(dnz,onz);
4291: }
4293: /* numeric phase */
4294: MatGetOwnershipRange(*outmat,&rstart,NULL);
4295: for (i=0; i<m; i++) {
4296: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4297: Ii = i + rstart;
4298: MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4299: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4300: }
4301: MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4302: MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4303: return(0);
4304: }
4306: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4307: {
4308: PetscErrorCode ierr;
4309: PetscMPIInt rank;
4310: PetscInt m,N,i,rstart,nnz;
4311: size_t len;
4312: const PetscInt *indx;
4313: PetscViewer out;
4314: char *name;
4315: Mat B;
4316: const PetscScalar *values;
4319: MatGetLocalSize(A,&m,0);
4320: MatGetSize(A,0,&N);
4321: /* Should this be the type of the diagonal block of A? */
4322: MatCreate(PETSC_COMM_SELF,&B);
4323: MatSetSizes(B,m,N,m,N);
4324: MatSetBlockSizesFromMats(B,A,A);
4325: MatSetType(B,MATSEQAIJ);
4326: MatSeqAIJSetPreallocation(B,0,NULL);
4327: MatGetOwnershipRange(A,&rstart,0);
4328: for (i=0; i<m; i++) {
4329: MatGetRow(A,i+rstart,&nnz,&indx,&values);
4330: MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4331: MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4332: }
4333: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4334: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
4336: MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4337: PetscStrlen(outfile,&len);
4338: PetscMalloc1(len+5,&name);
4339: sprintf(name,"%s.%d",outfile,rank);
4340: PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4341: PetscFree(name);
4342: MatView(B,out);
4343: PetscViewerDestroy(&out);
4344: MatDestroy(&B);
4345: return(0);
4346: }
4348: PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
4349: {
4350: PetscErrorCode ierr;
4351: Mat_Merge_SeqsToMPI *merge;
4352: PetscContainer container;
4355: PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);
4356: if (container) {
4357: PetscContainerGetPointer(container,(void**)&merge);
4358: PetscFree(merge->id_r);
4359: PetscFree(merge->len_s);
4360: PetscFree(merge->len_r);
4361: PetscFree(merge->bi);
4362: PetscFree(merge->bj);
4363: PetscFree(merge->buf_ri[0]);
4364: PetscFree(merge->buf_ri);
4365: PetscFree(merge->buf_rj[0]);
4366: PetscFree(merge->buf_rj);
4367: PetscFree(merge->coi);
4368: PetscFree(merge->coj);
4369: PetscFree(merge->owners_co);
4370: PetscLayoutDestroy(&merge->rowmap);
4371: PetscFree(merge);
4372: PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);
4373: }
4374: MatDestroy_MPIAIJ(A);
4375: return(0);
4376: }
4378: #include <../src/mat/utils/freespace.h>
4379: #include <petscbt.h>
4381: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4382: {
4383: PetscErrorCode ierr;
4384: MPI_Comm comm;
4385: Mat_SeqAIJ *a =(Mat_SeqAIJ*)seqmat->data;
4386: PetscMPIInt size,rank,taga,*len_s;
4387: PetscInt N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4388: PetscInt proc,m;
4389: PetscInt **buf_ri,**buf_rj;
4390: PetscInt k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4391: PetscInt nrows,**buf_ri_k,**nextrow,**nextai;
4392: MPI_Request *s_waits,*r_waits;
4393: MPI_Status *status;
4394: MatScalar *aa=a->a;
4395: MatScalar **abuf_r,*ba_i;
4396: Mat_Merge_SeqsToMPI *merge;
4397: PetscContainer container;
4400: PetscObjectGetComm((PetscObject)mpimat,&comm);
4401: PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);
4403: MPI_Comm_size(comm,&size);
4404: MPI_Comm_rank(comm,&rank);
4406: PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4407: PetscContainerGetPointer(container,(void**)&merge);
4409: bi = merge->bi;
4410: bj = merge->bj;
4411: buf_ri = merge->buf_ri;
4412: buf_rj = merge->buf_rj;
4414: PetscMalloc1(size,&status);
4415: owners = merge->rowmap->range;
4416: len_s = merge->len_s;
4418: /* send and recv matrix values */
4419: /*-----------------------------*/
4420: PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4421: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
4423: PetscMalloc1(merge->nsend+1,&s_waits);
4424: for (proc=0,k=0; proc<size; proc++) {
4425: if (!len_s[proc]) continue;
4426: i = owners[proc];
4427: MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4428: k++;
4429: }
4431: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4432: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4433: PetscFree(status);
4435: PetscFree(s_waits);
4436: PetscFree(r_waits);
4438: /* insert mat values of mpimat */
4439: /*----------------------------*/
4440: PetscMalloc1(N,&ba_i);
4441: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);
4443: for (k=0; k<merge->nrecv; k++) {
4444: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4445: nrows = *(buf_ri_k[k]);
4446: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
4447: nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
4448: }
4450: /* set values of ba */
4451: m = merge->rowmap->n;
4452: for (i=0; i<m; i++) {
4453: arow = owners[rank] + i;
4454: bj_i = bj+bi[i]; /* col indices of the i-th row of mpimat */
4455: bnzi = bi[i+1] - bi[i];
4456: PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));
4458: /* add local non-zero vals of this proc's seqmat into ba */
4459: anzi = ai[arow+1] - ai[arow];
4460: aj = a->j + ai[arow];
4461: aa = a->a + ai[arow];
4462: nextaj = 0;
4463: for (j=0; nextaj<anzi; j++) {
4464: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4465: ba_i[j] += aa[nextaj++];
4466: }
4467: }
4469: /* add received vals into ba */
4470: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4471: /* i-th row */
4472: if (i == *nextrow[k]) {
4473: anzi = *(nextai[k]+1) - *nextai[k];
4474: aj = buf_rj[k] + *(nextai[k]);
4475: aa = abuf_r[k] + *(nextai[k]);
4476: nextaj = 0;
4477: for (j=0; nextaj<anzi; j++) {
4478: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4479: ba_i[j] += aa[nextaj++];
4480: }
4481: }
4482: nextrow[k]++; nextai[k]++;
4483: }
4484: }
4485: MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4486: }
4487: MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4488: MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);
4490: PetscFree(abuf_r[0]);
4491: PetscFree(abuf_r);
4492: PetscFree(ba_i);
4493: PetscFree3(buf_ri_k,nextrow,nextai);
4494: PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4495: return(0);
4496: }
4498: PetscErrorCode MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4499: {
4500: PetscErrorCode ierr;
4501: Mat B_mpi;
4502: Mat_SeqAIJ *a=(Mat_SeqAIJ*)seqmat->data;
4503: PetscMPIInt size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4504: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
4505: PetscInt M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4506: PetscInt len,proc,*dnz,*onz,bs,cbs;
4507: PetscInt k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4508: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4509: MPI_Request *si_waits,*sj_waits,*ri_waits,*rj_waits;
4510: MPI_Status *status;
4511: PetscFreeSpaceList free_space=NULL,current_space=NULL;
4512: PetscBT lnkbt;
4513: Mat_Merge_SeqsToMPI *merge;
4514: PetscContainer container;
4517: PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);
4519: /* make sure it is a PETSc comm */
4520: PetscCommDuplicate(comm,&comm,NULL);
4521: MPI_Comm_size(comm,&size);
4522: MPI_Comm_rank(comm,&rank);
4524: PetscNew(&merge);
4525: PetscMalloc1(size,&status);
4527: /* determine row ownership */
4528: /*---------------------------------------------------------*/
4529: PetscLayoutCreate(comm,&merge->rowmap);
4530: PetscLayoutSetLocalSize(merge->rowmap,m);
4531: PetscLayoutSetSize(merge->rowmap,M);
4532: PetscLayoutSetBlockSize(merge->rowmap,1);
4533: PetscLayoutSetUp(merge->rowmap);
4534: PetscMalloc1(size,&len_si);
4535: PetscMalloc1(size,&merge->len_s);
4537: m = merge->rowmap->n;
4538: owners = merge->rowmap->range;
4540: /* determine the number of messages to send, their lengths */
4541: /*---------------------------------------------------------*/
4542: len_s = merge->len_s;
4544: len = 0; /* length of buf_si[] */
4545: merge->nsend = 0;
4546: for (proc=0; proc<size; proc++) {
4547: len_si[proc] = 0;
4548: if (proc == rank) {
4549: len_s[proc] = 0;
4550: } else {
4551: len_si[proc] = owners[proc+1] - owners[proc] + 1;
4552: len_s[proc] = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4553: }
4554: if (len_s[proc]) {
4555: merge->nsend++;
4556: nrows = 0;
4557: for (i=owners[proc]; i<owners[proc+1]; i++) {
4558: if (ai[i+1] > ai[i]) nrows++;
4559: }
4560: len_si[proc] = 2*(nrows+1);
4561: len += len_si[proc];
4562: }
4563: }
4565: /* determine the number and length of messages to receive for ij-structure */
4566: /*-------------------------------------------------------------------------*/
4567: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4568: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
4570: /* post the Irecv of j-structure */
4571: /*-------------------------------*/
4572: PetscCommGetNewTag(comm,&tagj);
4573: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);
4575: /* post the Isend of j-structure */
4576: /*--------------------------------*/
4577: PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);
4579: for (proc=0, k=0; proc<size; proc++) {
4580: if (!len_s[proc]) continue;
4581: i = owners[proc];
4582: MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4583: k++;
4584: }
4586: /* receives and sends of j-structure are complete */
4587: /*------------------------------------------------*/
4588: if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4589: if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}
4591: /* send and recv i-structure */
4592: /*---------------------------*/
4593: PetscCommGetNewTag(comm,&tagi);
4594: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);
4596: PetscMalloc1(len+1,&buf_s);
4597: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
4598: for (proc=0,k=0; proc<size; proc++) {
4599: if (!len_s[proc]) continue;
4600: /* form outgoing message for i-structure:
4601: buf_si[0]: nrows to be sent
4602: [1:nrows]: row index (global)
4603: [nrows+1:2*nrows+1]: i-structure index
4604: */
4605: /*-------------------------------------------*/
4606: nrows = len_si[proc]/2 - 1;
4607: buf_si_i = buf_si + nrows+1;
4608: buf_si[0] = nrows;
4609: buf_si_i[0] = 0;
4610: nrows = 0;
4611: for (i=owners[proc]; i<owners[proc+1]; i++) {
4612: anzi = ai[i+1] - ai[i];
4613: if (anzi) {
4614: buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4615: buf_si[nrows+1] = i-owners[proc]; /* local row index */
4616: nrows++;
4617: }
4618: }
4619: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4620: k++;
4621: buf_si += len_si[proc];
4622: }
4624: if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4625: if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}
4627: PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4628: for (i=0; i<merge->nrecv; i++) {
4629: PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4630: }
4632: PetscFree(len_si);
4633: PetscFree(len_ri);
4634: PetscFree(rj_waits);
4635: PetscFree2(si_waits,sj_waits);
4636: PetscFree(ri_waits);
4637: PetscFree(buf_s);
4638: PetscFree(status);
4640: /* compute a local seq matrix in each processor */
4641: /*----------------------------------------------*/
4642: /* allocate bi array and free space for accumulating nonzero column info */
4643: PetscMalloc1(m+1,&bi);
4644: bi[0] = 0;
4646: /* create and initialize a linked list */
4647: nlnk = N+1;
4648: PetscLLCreate(N,N,nlnk,lnk,lnkbt);
4650: /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4651: len = ai[owners[rank+1]] - ai[owners[rank]];
4652: PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);
4654: current_space = free_space;
4656: /* determine symbolic info for each local row */
4657: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);
4659: for (k=0; k<merge->nrecv; k++) {
4660: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4661: nrows = *buf_ri_k[k];
4662: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
4663: nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
4664: }
4666: MatPreallocateInitialize(comm,m,n,dnz,onz);
4667: len = 0;
4668: for (i=0; i<m; i++) {
4669: bnzi = 0;
4670: /* add local non-zero cols of this proc's seqmat into lnk */
4671: arow = owners[rank] + i;
4672: anzi = ai[arow+1] - ai[arow];
4673: aj = a->j + ai[arow];
4674: PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4675: bnzi += nlnk;
4676: /* add received col data into lnk */
4677: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4678: if (i == *nextrow[k]) { /* i-th row */
4679: anzi = *(nextai[k]+1) - *nextai[k];
4680: aj = buf_rj[k] + *nextai[k];
4681: PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4682: bnzi += nlnk;
4683: nextrow[k]++; nextai[k]++;
4684: }
4685: }
4686: if (len < bnzi) len = bnzi; /* =max(bnzi) */
4688: /* if free space is not available, make more free space */
4689: if (current_space->local_remaining<bnzi) {
4690: PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),¤t_space);
4691: nspacedouble++;
4692: }
4693: /* copy data into free space, then initialize lnk */
4694: PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4695: MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);
4697: current_space->array += bnzi;
4698: current_space->local_used += bnzi;
4699: current_space->local_remaining -= bnzi;
4701: bi[i+1] = bi[i] + bnzi;
4702: }
4704: PetscFree3(buf_ri_k,nextrow,nextai);
4706: PetscMalloc1(bi[m]+1,&bj);
4707: PetscFreeSpaceContiguous(&free_space,bj);
4708: PetscLLDestroy(lnk,lnkbt);
4710: /* create symbolic parallel matrix B_mpi */
4711: /*---------------------------------------*/
4712: MatGetBlockSizes(seqmat,&bs,&cbs);
4713: MatCreate(comm,&B_mpi);
4714: if (n==PETSC_DECIDE) {
4715: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4716: } else {
4717: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4718: }
4719: MatSetBlockSizes(B_mpi,bs,cbs);
4720: MatSetType(B_mpi,MATMPIAIJ);
4721: MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4722: MatPreallocateFinalize(dnz,onz);
4723: MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);
4725: /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4726: B_mpi->assembled = PETSC_FALSE;
4727: B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
4728: merge->bi = bi;
4729: merge->bj = bj;
4730: merge->buf_ri = buf_ri;
4731: merge->buf_rj = buf_rj;
4732: merge->coi = NULL;
4733: merge->coj = NULL;
4734: merge->owners_co = NULL;
4736: PetscCommDestroy(&comm);
4738: /* attach the supporting struct to B_mpi for reuse */
4739: PetscContainerCreate(PETSC_COMM_SELF,&container);
4740: PetscContainerSetPointer(container,merge);
4741: PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4742: PetscContainerDestroy(&container);
4743: *mpimat = B_mpi;
4745: PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4746: return(0);
4747: }
4749: /*@C
4750: MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4751: matrices from each processor
4753: Collective on MPI_Comm
4755: Input Parameters:
4756: + comm - the communicators the parallel matrix will live on
4757: . seqmat - the input sequential matrices
4758: . m - number of local rows (or PETSC_DECIDE)
4759: . n - number of local columns (or PETSC_DECIDE)
4760: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4762: Output Parameter:
4763: . mpimat - the parallel matrix generated
4765: Level: advanced
4767: Notes:
4768: The dimensions of the sequential matrix in each processor MUST be the same.
4769: The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4770: destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4771: @*/
4772: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4773: {
4775: PetscMPIInt size;
4778: MPI_Comm_size(comm,&size);
4779: if (size == 1) {
4780: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4781: if (scall == MAT_INITIAL_MATRIX) {
4782: MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
4783: } else {
4784: MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
4785: }
4786: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4787: return(0);
4788: }
4789: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4790: if (scall == MAT_INITIAL_MATRIX) {
4791: MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
4792: }
4793: MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
4794: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4795: return(0);
4796: }
4798: /*@
4799: MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
4800: mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
4801: with MatGetSize()
4803: Not Collective
4805: Input Parameters:
4806: + A - the matrix
4807: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4809: Output Parameter:
4810: . A_loc - the local sequential matrix generated
4812: Level: developer
4814: .seealso: MatGetOwnerShipRange(), MatMPIAIJGetLocalMatCondensed()
4816: @*/
4817: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4818: {
4820: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
4821: Mat_SeqAIJ *mat,*a,*b;
4822: PetscInt *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
4823: MatScalar *aa,*ba,*cam;
4824: PetscScalar *ca;
4825: PetscInt am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4826: PetscInt *ci,*cj,col,ncols_d,ncols_o,jo;
4827: PetscBool match;
4828: MPI_Comm comm;
4829: PetscMPIInt size;
4832: PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
4833: if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
4834: PetscObjectGetComm((PetscObject)A,&comm);
4835: MPI_Comm_size(comm,&size);
4836: if (size == 1 && scall == MAT_REUSE_MATRIX) return(0);
4838: PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
4839: a = (Mat_SeqAIJ*)(mpimat->A)->data;
4840: b = (Mat_SeqAIJ*)(mpimat->B)->data;
4841: ai = a->i; aj = a->j; bi = b->i; bj = b->j;
4842: aa = a->a; ba = b->a;
4843: if (scall == MAT_INITIAL_MATRIX) {
4844: if (size == 1) {
4845: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ai,aj,aa,A_loc);
4846: return(0);
4847: }
4849: PetscMalloc1(1+am,&ci);
4850: ci[0] = 0;
4851: for (i=0; i<am; i++) {
4852: ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
4853: }
4854: PetscMalloc1(1+ci[am],&cj);
4855: PetscMalloc1(1+ci[am],&ca);
4856: k = 0;
4857: for (i=0; i<am; i++) {
4858: ncols_o = bi[i+1] - bi[i];
4859: ncols_d = ai[i+1] - ai[i];
4860: /* off-diagonal portion of A */
4861: for (jo=0; jo<ncols_o; jo++) {
4862: col = cmap[*bj];
4863: if (col >= cstart) break;
4864: cj[k] = col; bj++;
4865: ca[k++] = *ba++;
4866: }
4867: /* diagonal portion of A */
4868: for (j=0; j<ncols_d; j++) {
4869: cj[k] = cstart + *aj++;
4870: ca[k++] = *aa++;
4871: }
4872: /* off-diagonal portion of A */
4873: for (j=jo; j<ncols_o; j++) {
4874: cj[k] = cmap[*bj++];
4875: ca[k++] = *ba++;
4876: }
4877: }
4878: /* put together the new matrix */
4879: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
4880: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4881: /* Since these are PETSc arrays, change flags to free them as necessary. */
4882: mat = (Mat_SeqAIJ*)(*A_loc)->data;
4883: mat->free_a = PETSC_TRUE;
4884: mat->free_ij = PETSC_TRUE;
4885: mat->nonew = 0;
4886: } else if (scall == MAT_REUSE_MATRIX) {
4887: mat=(Mat_SeqAIJ*)(*A_loc)->data;
4888: ci = mat->i; cj = mat->j; cam = mat->a;
4889: for (i=0; i<am; i++) {
4890: /* off-diagonal portion of A */
4891: ncols_o = bi[i+1] - bi[i];
4892: for (jo=0; jo<ncols_o; jo++) {
4893: col = cmap[*bj];
4894: if (col >= cstart) break;
4895: *cam++ = *ba++; bj++;
4896: }
4897: /* diagonal portion of A */
4898: ncols_d = ai[i+1] - ai[i];
4899: for (j=0; j<ncols_d; j++) *cam++ = *aa++;
4900: /* off-diagonal portion of A */
4901: for (j=jo; j<ncols_o; j++) {
4902: *cam++ = *ba++; bj++;
4903: }
4904: }
4905: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
4906: PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
4907: return(0);
4908: }
4910: /*@C
4911: MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns
4913: Not Collective
4915: Input Parameters:
4916: + A - the matrix
4917: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4918: - row, col - index sets of rows and columns to extract (or NULL)
4920: Output Parameter:
4921: . A_loc - the local sequential matrix generated
4923: Level: developer
4925: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()
4927: @*/
4928: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
4929: {
4930: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
4932: PetscInt i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
4933: IS isrowa,iscola;
4934: Mat *aloc;
4935: PetscBool match;
4938: PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
4939: if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
4940: PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
4941: if (!row) {
4942: start = A->rmap->rstart; end = A->rmap->rend;
4943: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
4944: } else {
4945: isrowa = *row;
4946: }
4947: if (!col) {
4948: start = A->cmap->rstart;
4949: cmap = a->garray;
4950: nzA = a->A->cmap->n;
4951: nzB = a->B->cmap->n;
4952: PetscMalloc1(nzA+nzB, &idx);
4953: ncols = 0;
4954: for (i=0; i<nzB; i++) {
4955: if (cmap[i] < start) idx[ncols++] = cmap[i];
4956: else break;
4957: }
4958: imark = i;
4959: for (i=0; i<nzA; i++) idx[ncols++] = start + i;
4960: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
4961: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
4962: } else {
4963: iscola = *col;
4964: }
4965: if (scall != MAT_INITIAL_MATRIX) {
4966: PetscMalloc1(1,&aloc);
4967: aloc[0] = *A_loc;
4968: }
4969: MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
4970: *A_loc = aloc[0];
4971: PetscFree(aloc);
4972: if (!row) {
4973: ISDestroy(&isrowa);
4974: }
4975: if (!col) {
4976: ISDestroy(&iscola);
4977: }
4978: PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
4979: return(0);
4980: }
4982: /*@C
4983: MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
4985: Collective on Mat
4987: Input Parameters:
4988: + A,B - the matrices in mpiaij format
4989: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4990: - rowb, colb - index sets of rows and columns of B to extract (or NULL)
4992: Output Parameter:
4993: + rowb, colb - index sets of rows and columns of B to extract
4994: - B_seq - the sequential matrix generated
4996: Level: developer
4998: @*/
4999: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5000: {
5001: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
5003: PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5004: IS isrowb,iscolb;
5005: Mat *bseq=NULL;
5008: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5009: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5010: }
5011: PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);
5013: if (scall == MAT_INITIAL_MATRIX) {
5014: start = A->cmap->rstart;
5015: cmap = a->garray;
5016: nzA = a->A->cmap->n;
5017: nzB = a->B->cmap->n;
5018: PetscMalloc1(nzA+nzB, &idx);
5019: ncols = 0;
5020: for (i=0; i<nzB; i++) { /* row < local row index */
5021: if (cmap[i] < start) idx[ncols++] = cmap[i];
5022: else break;
5023: }
5024: imark = i;
5025: for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */
5026: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5027: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5028: ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5029: } else {
5030: if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5031: isrowb = *rowb; iscolb = *colb;
5032: PetscMalloc1(1,&bseq);
5033: bseq[0] = *B_seq;
5034: }
5035: MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5036: *B_seq = bseq[0];
5037: PetscFree(bseq);
5038: if (!rowb) {
5039: ISDestroy(&isrowb);
5040: } else {
5041: *rowb = isrowb;
5042: }
5043: if (!colb) {
5044: ISDestroy(&iscolb);
5045: } else {
5046: *colb = iscolb;
5047: }
5048: PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5049: return(0);
5050: }
5052: /*
5053: MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5054: of the OFF-DIAGONAL portion of local A
5056: Collective on Mat
5058: Input Parameters:
5059: + A,B - the matrices in mpiaij format
5060: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5062: Output Parameter:
5063: + startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5064: . startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5065: . bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5066: - B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N
5068: Level: developer
5070: */
5071: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5072: {
5073: VecScatter_MPI_General *gen_to,*gen_from;
5074: PetscErrorCode ierr;
5075: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
5076: Mat_SeqAIJ *b_oth;
5077: VecScatter ctx =a->Mvctx;
5078: MPI_Comm comm;
5079: PetscMPIInt *rprocs,*sprocs,tag=((PetscObject)ctx)->tag,rank;
5080: PetscInt *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj;
5081: PetscInt *rvalues,*svalues;
5082: MatScalar *b_otha,*bufa,*bufA;
5083: PetscInt i,j,k,l,ll,nrecvs,nsends,nrows,*srow,*rstarts,*rstartsj = 0,*sstarts,*sstartsj,len;
5084: MPI_Request *rwaits = NULL,*swaits = NULL;
5085: MPI_Status *sstatus,rstatus;
5086: PetscMPIInt jj,size;
5087: PetscInt *cols,sbs,rbs;
5088: PetscScalar *vals;
5091: PetscObjectGetComm((PetscObject)A,&comm);
5092: MPI_Comm_size(comm,&size);
5094: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5095: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5096: }
5097: PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5098: MPI_Comm_rank(comm,&rank);
5100: if (size == 1) {
5101: startsj_s = NULL;
5102: bufa_ptr = NULL;
5103: *B_oth = NULL;
5104: return(0);
5105: }
5107: gen_to = (VecScatter_MPI_General*)ctx->todata;
5108: gen_from = (VecScatter_MPI_General*)ctx->fromdata;
5109: nrecvs = gen_from->n;
5110: nsends = gen_to->n;
5112: PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);
5113: srow = gen_to->indices; /* local row index to be sent */
5114: sstarts = gen_to->starts;
5115: sprocs = gen_to->procs;
5116: sstatus = gen_to->sstatus;
5117: sbs = gen_to->bs;
5118: rstarts = gen_from->starts;
5119: rprocs = gen_from->procs;
5120: rbs = gen_from->bs;
5122: if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5123: if (scall == MAT_INITIAL_MATRIX) {
5124: /* i-array */
5125: /*---------*/
5126: /* post receives */
5127: PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);
5128: for (i=0; i<nrecvs; i++) {
5129: rowlen = rvalues + rstarts[i]*rbs;
5130: nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5131: MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5132: }
5134: /* pack the outgoing message */
5135: PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);
5137: sstartsj[0] = 0;
5138: rstartsj[0] = 0;
5139: len = 0; /* total length of j or a array to be sent */
5140: k = 0;
5141: PetscMalloc1(sbs*(sstarts[nsends] - sstarts[0]),&svalues);
5142: for (i=0; i<nsends; i++) {
5143: rowlen = svalues + sstarts[i]*sbs;
5144: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5145: for (j=0; j<nrows; j++) {
5146: row = srow[k] + B->rmap->range[rank]; /* global row idx */
5147: for (l=0; l<sbs; l++) {
5148: MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */
5150: rowlen[j*sbs+l] = ncols;
5152: len += ncols;
5153: MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5154: }
5155: k++;
5156: }
5157: MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);
5159: sstartsj[i+1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5160: }
5161: /* recvs and sends of i-array are completed */
5162: i = nrecvs;
5163: while (i--) {
5164: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
5165: }
5166: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
5167: PetscFree(svalues);
5169: /* allocate buffers for sending j and a arrays */
5170: PetscMalloc1(len+1,&bufj);
5171: PetscMalloc1(len+1,&bufa);
5173: /* create i-array of B_oth */
5174: PetscMalloc1(aBn+2,&b_othi);
5176: b_othi[0] = 0;
5177: len = 0; /* total length of j or a array to be received */
5178: k = 0;
5179: for (i=0; i<nrecvs; i++) {
5180: rowlen = rvalues + rstarts[i]*rbs;
5181: nrows = rbs*(rstarts[i+1]-rstarts[i]); /* num of rows to be received */
5182: for (j=0; j<nrows; j++) {
5183: b_othi[k+1] = b_othi[k] + rowlen[j];
5184: PetscIntSumError(rowlen[j],len,&len);
5185: k++;
5186: }
5187: rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5188: }
5189: PetscFree(rvalues);
5191: /* allocate space for j and a arrrays of B_oth */
5192: PetscMalloc1(b_othi[aBn]+1,&b_othj);
5193: PetscMalloc1(b_othi[aBn]+1,&b_otha);
5195: /* j-array */
5196: /*---------*/
5197: /* post receives of j-array */
5198: for (i=0; i<nrecvs; i++) {
5199: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5200: MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5201: }
5203: /* pack the outgoing message j-array */
5204: k = 0;
5205: for (i=0; i<nsends; i++) {
5206: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5207: bufJ = bufj+sstartsj[i];
5208: for (j=0; j<nrows; j++) {
5209: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5210: for (ll=0; ll<sbs; ll++) {
5211: MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5212: for (l=0; l<ncols; l++) {
5213: *bufJ++ = cols[l];
5214: }
5215: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5216: }
5217: }
5218: MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5219: }
5221: /* recvs and sends of j-array are completed */
5222: i = nrecvs;
5223: while (i--) {
5224: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
5225: }
5226: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
5227: } else if (scall == MAT_REUSE_MATRIX) {
5228: sstartsj = *startsj_s;
5229: rstartsj = *startsj_r;
5230: bufa = *bufa_ptr;
5231: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
5232: b_otha = b_oth->a;
5233: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
5235: /* a-array */
5236: /*---------*/
5237: /* post receives of a-array */
5238: for (i=0; i<nrecvs; i++) {
5239: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5240: MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5241: }
5243: /* pack the outgoing message a-array */
5244: k = 0;
5245: for (i=0; i<nsends; i++) {
5246: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5247: bufA = bufa+sstartsj[i];
5248: for (j=0; j<nrows; j++) {
5249: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5250: for (ll=0; ll<sbs; ll++) {
5251: MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5252: for (l=0; l<ncols; l++) {
5253: *bufA++ = vals[l];
5254: }
5255: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5256: }
5257: }
5258: MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5259: }
5260: /* recvs and sends of a-array are completed */
5261: i = nrecvs;
5262: while (i--) {
5263: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
5264: }
5265: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
5266: PetscFree2(rwaits,swaits);
5268: if (scall == MAT_INITIAL_MATRIX) {
5269: /* put together the new matrix */
5270: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);
5272: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5273: /* Since these are PETSc arrays, change flags to free them as necessary. */
5274: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
5275: b_oth->free_a = PETSC_TRUE;
5276: b_oth->free_ij = PETSC_TRUE;
5277: b_oth->nonew = 0;
5279: PetscFree(bufj);
5280: if (!startsj_s || !bufa_ptr) {
5281: PetscFree2(sstartsj,rstartsj);
5282: PetscFree(bufa_ptr);
5283: } else {
5284: *startsj_s = sstartsj;
5285: *startsj_r = rstartsj;
5286: *bufa_ptr = bufa;
5287: }
5288: }
5289: PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5290: return(0);
5291: }
5293: /*@C
5294: MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.
5296: Not Collective
5298: Input Parameters:
5299: . A - The matrix in mpiaij format
5301: Output Parameter:
5302: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5303: . colmap - A map from global column index to local index into lvec
5304: - multScatter - A scatter from the argument of a matrix-vector product to lvec
5306: Level: developer
5308: @*/
5309: #if defined(PETSC_USE_CTABLE)
5310: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5311: #else
5312: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5313: #endif
5314: {
5315: Mat_MPIAIJ *a;
5322: a = (Mat_MPIAIJ*) A->data;
5323: if (lvec) *lvec = a->lvec;
5324: if (colmap) *colmap = a->colmap;
5325: if (multScatter) *multScatter = a->Mvctx;
5326: return(0);
5327: }
5329: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5330: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5331: #if defined(PETSC_HAVE_MKL)
5332: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5333: #endif
5334: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5335: #if defined(PETSC_HAVE_ELEMENTAL)
5336: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5337: #endif
5338: #if defined(PETSC_HAVE_HYPRE)
5339: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5340: PETSC_INTERN PetscErrorCode MatMatMatMult_Transpose_AIJ_AIJ(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
5341: #endif
5342: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_IS(Mat,MatType,MatReuse,Mat*);
5344: /*
5345: Computes (B'*A')' since computing B*A directly is untenable
5347: n p p
5348: ( ) ( ) ( )
5349: m ( A ) * n ( B ) = m ( C )
5350: ( ) ( ) ( )
5352: */
5353: PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5354: {
5356: Mat At,Bt,Ct;
5359: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5360: MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5361: MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);
5362: MatDestroy(&At);
5363: MatDestroy(&Bt);
5364: MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5365: MatDestroy(&Ct);
5366: return(0);
5367: }
5369: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
5370: {
5372: PetscInt m=A->rmap->n,n=B->cmap->n;
5373: Mat Cmat;
5376: if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5377: MatCreate(PetscObjectComm((PetscObject)A),&Cmat);
5378: MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
5379: MatSetBlockSizesFromMats(Cmat,A,B);
5380: MatSetType(Cmat,MATMPIDENSE);
5381: MatMPIDenseSetPreallocation(Cmat,NULL);
5382: MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);
5383: MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);
5385: Cmat->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
5387: *C = Cmat;
5388: return(0);
5389: }
5391: /* ----------------------------------------------------------------*/
5392: PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
5393: {
5397: if (scall == MAT_INITIAL_MATRIX) {
5398: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
5399: MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);
5400: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
5401: }
5402: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
5403: MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);
5404: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
5405: return(0);
5406: }
5408: /*MC
5409: MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
5411: Options Database Keys:
5412: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()
5414: Level: beginner
5416: .seealso: MatCreateAIJ()
5417: M*/
5419: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
5420: {
5421: Mat_MPIAIJ *b;
5423: PetscMPIInt size;
5426: MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
5428: PetscNewLog(B,&b);
5429: B->data = (void*)b;
5430: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
5431: B->assembled = PETSC_FALSE;
5432: B->insertmode = NOT_SET_VALUES;
5433: b->size = size;
5435: MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);
5437: /* build cache for off array entries formed */
5438: MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);
5440: b->donotstash = PETSC_FALSE;
5441: b->colmap = 0;
5442: b->garray = 0;
5443: b->roworiented = PETSC_TRUE;
5445: /* stuff used for matrix vector multiply */
5446: b->lvec = NULL;
5447: b->Mvctx = NULL;
5449: /* stuff for MatGetRow() */
5450: b->rowindices = 0;
5451: b->rowvalues = 0;
5452: b->getrowactive = PETSC_FALSE;
5454: /* flexible pointer used in CUSP/CUSPARSE classes */
5455: b->spptr = NULL;
5457: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
5458: PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
5459: PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
5460: PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
5461: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
5462: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
5463: PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
5464: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
5465: #if defined(PETSC_HAVE_MKL)
5466: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
5467: #endif
5468: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
5469: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
5470: #if defined(PETSC_HAVE_ELEMENTAL)
5471: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
5472: #endif
5473: #if defined(PETSC_HAVE_HYPRE)
5474: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
5475: #endif
5476: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_MPIAIJ_IS);
5477: PetscObjectComposeFunction((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",MatMatMult_MPIDense_MPIAIJ);
5478: PetscObjectComposeFunction((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",MatMatMultSymbolic_MPIDense_MPIAIJ);
5479: PetscObjectComposeFunction((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",MatMatMultNumeric_MPIDense_MPIAIJ);
5480: #if defined(PETSC_HAVE_HYPRE)
5481: PetscObjectComposeFunction((PetscObject)B,"MatMatMatMult_transpose_mpiaij_mpiaij_C",MatMatMatMult_Transpose_AIJ_AIJ);
5482: #endif
5483: PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
5484: return(0);
5485: }
5487: /*@C
5488: MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5489: and "off-diagonal" part of the matrix in CSR format.
5491: Collective on MPI_Comm
5493: Input Parameters:
5494: + comm - MPI communicator
5495: . m - number of local rows (Cannot be PETSC_DECIDE)
5496: . n - This value should be the same as the local size used in creating the
5497: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5498: calculated if N is given) For square matrices n is almost always m.
5499: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5500: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5501: . i - row indices for "diagonal" portion of matrix
5502: . j - column indices
5503: . a - matrix values
5504: . oi - row indices for "off-diagonal" portion of matrix
5505: . oj - column indices
5506: - oa - matrix values
5508: Output Parameter:
5509: . mat - the matrix
5511: Level: advanced
5513: Notes:
5514: The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
5515: must free the arrays once the matrix has been destroyed and not before.
5517: The i and j indices are 0 based
5519: See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix
5521: This sets local rows and cannot be used to set off-processor values.
5523: Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
5524: legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
5525: not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
5526: the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
5527: keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
5528: communication if it is known that only local entries will be set.
5530: .keywords: matrix, aij, compressed row, sparse, parallel
5532: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5533: MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
5534: @*/
5535: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5536: {
5538: Mat_MPIAIJ *maij;
5541: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5542: if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5543: if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5544: MatCreate(comm,mat);
5545: MatSetSizes(*mat,m,n,M,N);
5546: MatSetType(*mat,MATMPIAIJ);
5547: maij = (Mat_MPIAIJ*) (*mat)->data;
5549: (*mat)->preallocated = PETSC_TRUE;
5551: PetscLayoutSetUp((*mat)->rmap);
5552: PetscLayoutSetUp((*mat)->cmap);
5554: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
5555: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);
5557: MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);
5558: MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);
5559: MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);
5560: MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);
5562: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
5563: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
5564: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
5565: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
5566: MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
5567: return(0);
5568: }
5570: /*
5571: Special version for direct calls from Fortran
5572: */
5573: #include <petsc/private/fortranimpl.h>
5575: /* Change these macros so can be used in void function */
5576: #undef CHKERRQ
5577: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
5578: #undef SETERRQ2
5579: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
5580: #undef SETERRQ3
5581: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
5582: #undef SETERRQ
5583: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)
5585: #if defined(PETSC_HAVE_FORTRAN_CAPS)
5586: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
5587: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
5588: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
5589: #else
5590: #endif
5591: PETSC_EXTERN void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
5592: {
5593: Mat mat = *mmat;
5594: PetscInt m = *mm, n = *mn;
5595: InsertMode addv = *maddv;
5596: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
5597: PetscScalar value;
5600: MatCheckPreallocated(mat,1);
5601: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
5603: #if defined(PETSC_USE_DEBUG)
5604: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
5605: #endif
5606: {
5607: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
5608: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
5609: PetscBool roworiented = aij->roworiented;
5611: /* Some Variables required in the macro */
5612: Mat A = aij->A;
5613: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
5614: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
5615: MatScalar *aa = a->a;
5616: PetscBool ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
5617: Mat B = aij->B;
5618: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
5619: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
5620: MatScalar *ba = b->a;
5622: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
5623: PetscInt nonew = a->nonew;
5624: MatScalar *ap1,*ap2;
5627: for (i=0; i<m; i++) {
5628: if (im[i] < 0) continue;
5629: #if defined(PETSC_USE_DEBUG)
5630: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
5631: #endif
5632: if (im[i] >= rstart && im[i] < rend) {
5633: row = im[i] - rstart;
5634: lastcol1 = -1;
5635: rp1 = aj + ai[row];
5636: ap1 = aa + ai[row];
5637: rmax1 = aimax[row];
5638: nrow1 = ailen[row];
5639: low1 = 0;
5640: high1 = nrow1;
5641: lastcol2 = -1;
5642: rp2 = bj + bi[row];
5643: ap2 = ba + bi[row];
5644: rmax2 = bimax[row];
5645: nrow2 = bilen[row];
5646: low2 = 0;
5647: high2 = nrow2;
5649: for (j=0; j<n; j++) {
5650: if (roworiented) value = v[i*n+j];
5651: else value = v[i+j*m];
5652: if (in[j] >= cstart && in[j] < cend) {
5653: col = in[j] - cstart;
5654: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
5655: MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
5656: } else if (in[j] < 0) continue;
5657: #if defined(PETSC_USE_DEBUG)
5658: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
5659: #endif
5660: else {
5661: if (mat->was_assembled) {
5662: if (!aij->colmap) {
5663: MatCreateColmap_MPIAIJ_Private(mat);
5664: }
5665: #if defined(PETSC_USE_CTABLE)
5666: PetscTableFind(aij->colmap,in[j]+1,&col);
5667: col--;
5668: #else
5669: col = aij->colmap[in[j]] - 1;
5670: #endif
5671: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
5672: if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
5673: MatDisAssemble_MPIAIJ(mat);
5674: col = in[j];
5675: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
5676: B = aij->B;
5677: b = (Mat_SeqAIJ*)B->data;
5678: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
5679: rp2 = bj + bi[row];
5680: ap2 = ba + bi[row];
5681: rmax2 = bimax[row];
5682: nrow2 = bilen[row];
5683: low2 = 0;
5684: high2 = nrow2;
5685: bm = aij->B->rmap->n;
5686: ba = b->a;
5687: }
5688: } else col = in[j];
5689: MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
5690: }
5691: }
5692: } else if (!aij->donotstash) {
5693: if (roworiented) {
5694: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
5695: } else {
5696: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
5697: }
5698: }
5699: }
5700: }
5701: PetscFunctionReturnVoid();
5702: }