Actual source code: mpiaij.c
1: #define PETSCMAT_DLL
3: #include ../src/mat/impls/aij/mpi/mpiaij.h
4: #include ../src/inline/spops.h
8: /*
9: Distributes a SeqAIJ matrix across a set of processes. Code stolen from
10: MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.
12: Only for square matrices
13: */
14: PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
15: {
16: PetscMPIInt rank,size;
17: PetscInt *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz,*gmataj,cnt,row,*ld;
19: Mat mat;
20: Mat_SeqAIJ *gmata;
21: PetscMPIInt tag;
22: MPI_Status status;
23: PetscTruth aij;
24: MatScalar *gmataa,*ao,*ad,*gmataarestore=0;
27: CHKMEMQ;
28: MPI_Comm_rank(comm,&rank);
29: MPI_Comm_size(comm,&size);
30: if (!rank) {
31: PetscTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
32: if (!aij) SETERRQ1(PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
33: }
34: if (reuse == MAT_INITIAL_MATRIX) {
35: MatCreate(comm,&mat);
36: MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
37: MatSetType(mat,MATAIJ);
38: PetscMalloc((size+1)*sizeof(PetscInt),&rowners);
39: PetscMalloc2(m,PetscInt,&dlens,m,PetscInt,&olens);
40: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
41: rowners[0] = 0;
42: for (i=2; i<=size; i++) {
43: rowners[i] += rowners[i-1];
44: }
45: rstart = rowners[rank];
46: rend = rowners[rank+1];
47: PetscObjectGetNewTag((PetscObject)mat,&tag);
48: if (!rank) {
49: gmata = (Mat_SeqAIJ*) gmat->data;
50: /* send row lengths to all processors */
51: for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
52: for (i=1; i<size; i++) {
53: MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
54: }
55: /* determine number diagonal and off-diagonal counts */
56: PetscMemzero(olens,m*sizeof(PetscInt));
57: PetscMalloc(m*sizeof(PetscInt),&ld);
58: PetscMemzero(ld,m*sizeof(PetscInt));
59: jj = 0;
60: for (i=0; i<m; i++) {
61: for (j=0; j<dlens[i]; j++) {
62: if (gmata->j[jj] < rstart) ld[i]++;
63: if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
64: jj++;
65: }
66: }
67: /* send column indices to other processes */
68: for (i=1; i<size; i++) {
69: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
70: MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
71: MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
72: }
74: /* send numerical values to other processes */
75: for (i=1; i<size; i++) {
76: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
77: MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
78: }
79: gmataa = gmata->a;
80: gmataj = gmata->j;
82: } else {
83: /* receive row lengths */
84: MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
85: /* receive column indices */
86: MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
87: PetscMalloc2(nz,PetscScalar,&gmataa,nz,PetscInt,&gmataj);
88: MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
89: /* determine number diagonal and off-diagonal counts */
90: PetscMemzero(olens,m*sizeof(PetscInt));
91: PetscMalloc(m*sizeof(PetscInt),&ld);
92: PetscMemzero(ld,m*sizeof(PetscInt));
93: jj = 0;
94: for (i=0; i<m; i++) {
95: for (j=0; j<dlens[i]; j++) {
96: if (gmataj[jj] < rstart) ld[i]++;
97: if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
98: jj++;
99: }
100: }
101: /* receive numerical values */
102: PetscMemzero(gmataa,nz*sizeof(PetscScalar));
103: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
104: }
105: /* set preallocation */
106: for (i=0; i<m; i++) {
107: dlens[i] -= olens[i];
108: }
109: MatSeqAIJSetPreallocation(mat,0,dlens);
110: MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);
111:
112: for (i=0; i<m; i++) {
113: dlens[i] += olens[i];
114: }
115: cnt = 0;
116: for (i=0; i<m; i++) {
117: row = rstart + i;
118: MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
119: cnt += dlens[i];
120: }
121: if (rank) {
122: PetscFree2(gmataa,gmataj);
123: }
124: PetscFree2(dlens,olens);
125: PetscFree(rowners);
126: ((Mat_MPIAIJ*)(mat->data))->ld = ld;
127: *inmat = mat;
128: } else { /* column indices are already set; only need to move over numerical values from process 0 */
129: Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
130: Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
131: mat = *inmat;
132: PetscObjectGetNewTag((PetscObject)mat,&tag);
133: if (!rank) {
134: /* send numerical values to other processes */
135: gmata = (Mat_SeqAIJ*) gmat->data;
136: MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
137: gmataa = gmata->a;
138: for (i=1; i<size; i++) {
139: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
140: MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
141: }
142: nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
143: } else {
144: /* receive numerical values from process 0*/
145: nz = Ad->nz + Ao->nz;
146: PetscMalloc(nz*sizeof(PetscScalar),&gmataa); gmataarestore = gmataa;
147: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
148: }
149: /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
150: ld = ((Mat_MPIAIJ*)(mat->data))->ld;
151: ad = Ad->a;
152: ao = Ao->a;
153: if (mat->rmap->n) {
154: i = 0;
155: nz = ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
156: nz = Ad->i[i+1] - Ad->i[i]; PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
157: }
158: for (i=1; i<mat->rmap->n; i++) {
159: nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
160: nz = Ad->i[i+1] - Ad->i[i]; PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar)); ad += nz; gmataa += nz;
161: }
162: i--;
163: if (mat->rmap->n) {
164: nz = Ao->i[i+1] - Ao->i[i] - ld[i]; PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar)); ao += nz; gmataa += nz;
165: }
166: if (rank) {
167: PetscFree(gmataarestore);
168: }
169: }
170: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
171: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
172: CHKMEMQ;
173: return(0);
174: }
176: /*
177: Local utility routine that creates a mapping from the global column
178: number to the local number in the off-diagonal part of the local
179: storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at
180: a slightly higher hash table cost; without it it is not scalable (each processor
181: has an order N integer array but is fast to acess.
182: */
185: PetscErrorCode CreateColmap_MPIAIJ_Private(Mat mat)
186: {
187: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
189: PetscInt n = aij->B->cmap->n,i;
192: #if defined (PETSC_USE_CTABLE)
193: PetscTableCreate(n,&aij->colmap);
194: for (i=0; i<n; i++){
195: PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1);
196: }
197: #else
198: PetscMalloc((mat->cmap->N+1)*sizeof(PetscInt),&aij->colmap);
199: PetscLogObjectMemory(mat,mat->cmap->N*sizeof(PetscInt));
200: PetscMemzero(aij->colmap,mat->cmap->N*sizeof(PetscInt));
201: for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
202: #endif
203: return(0);
204: }
207: #define CHUNKSIZE 15
208: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv) \
209: { \
210: if (col <= lastcol1) low1 = 0; else high1 = nrow1; \
211: lastcol1 = col;\
212: while (high1-low1 > 5) { \
213: t = (low1+high1)/2; \
214: if (rp1[t] > col) high1 = t; \
215: else low1 = t; \
216: } \
217: for (_i=low1; _i<high1; _i++) { \
218: if (rp1[_i] > col) break; \
219: if (rp1[_i] == col) { \
220: if (addv == ADD_VALUES) ap1[_i] += value; \
221: else ap1[_i] = value; \
222: goto a_noinsert; \
223: } \
224: } \
225: if (value == 0.0 && ignorezeroentries) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
226: if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;} \
227: if (nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
228: MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
229: N = nrow1++ - 1; a->nz++; high1++; \
230: /* shift up all the later entries in this row */ \
231: for (ii=N; ii>=_i; ii--) { \
232: rp1[ii+1] = rp1[ii]; \
233: ap1[ii+1] = ap1[ii]; \
234: } \
235: rp1[_i] = col; \
236: ap1[_i] = value; \
237: a_noinsert: ; \
238: ailen[row] = nrow1; \
239: }
242: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv) \
243: { \
244: if (col <= lastcol2) low2 = 0; else high2 = nrow2; \
245: lastcol2 = col;\
246: while (high2-low2 > 5) { \
247: t = (low2+high2)/2; \
248: if (rp2[t] > col) high2 = t; \
249: else low2 = t; \
250: } \
251: for (_i=low2; _i<high2; _i++) { \
252: if (rp2[_i] > col) break; \
253: if (rp2[_i] == col) { \
254: if (addv == ADD_VALUES) ap2[_i] += value; \
255: else ap2[_i] = value; \
256: goto b_noinsert; \
257: } \
258: } \
259: if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
260: if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
261: if (nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
262: MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
263: N = nrow2++ - 1; b->nz++; high2++; \
264: /* shift up all the later entries in this row */ \
265: for (ii=N; ii>=_i; ii--) { \
266: rp2[ii+1] = rp2[ii]; \
267: ap2[ii+1] = ap2[ii]; \
268: } \
269: rp2[_i] = col; \
270: ap2[_i] = value; \
271: b_noinsert: ; \
272: bilen[row] = nrow2; \
273: }
277: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
278: {
279: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
280: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
282: PetscInt l,*garray = mat->garray,diag;
285: /* code only works for square matrices A */
287: /* find size of row to the left of the diagonal part */
288: MatGetOwnershipRange(A,&diag,0);
289: row = row - diag;
290: for (l=0; l<b->i[row+1]-b->i[row]; l++) {
291: if (garray[b->j[b->i[row]+l]] > diag) break;
292: }
293: PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));
295: /* diagonal part */
296: PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));
298: /* right of diagonal part */
299: PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));
300: return(0);
301: }
305: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
306: {
307: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
308: PetscScalar value;
310: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
311: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
312: PetscTruth roworiented = aij->roworiented;
314: /* Some Variables required in the macro */
315: Mat A = aij->A;
316: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
317: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
318: MatScalar *aa = a->a;
319: PetscTruth ignorezeroentries = a->ignorezeroentries;
320: Mat B = aij->B;
321: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
322: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
323: MatScalar *ba = b->a;
325: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
326: PetscInt nonew = a->nonew;
327: MatScalar *ap1,*ap2;
330: for (i=0; i<m; i++) {
331: if (im[i] < 0) continue;
332: #if defined(PETSC_USE_DEBUG)
333: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
334: #endif
335: if (im[i] >= rstart && im[i] < rend) {
336: row = im[i] - rstart;
337: lastcol1 = -1;
338: rp1 = aj + ai[row];
339: ap1 = aa + ai[row];
340: rmax1 = aimax[row];
341: nrow1 = ailen[row];
342: low1 = 0;
343: high1 = nrow1;
344: lastcol2 = -1;
345: rp2 = bj + bi[row];
346: ap2 = ba + bi[row];
347: rmax2 = bimax[row];
348: nrow2 = bilen[row];
349: low2 = 0;
350: high2 = nrow2;
352: for (j=0; j<n; j++) {
353: if (v) {if (roworiented) value = v[i*n+j]; else value = v[i+j*m];} else value = 0.0;
354: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
355: if (in[j] >= cstart && in[j] < cend){
356: col = in[j] - cstart;
357: MatSetValues_SeqAIJ_A_Private(row,col,value,addv);
358: } else if (in[j] < 0) continue;
359: #if defined(PETSC_USE_DEBUG)
360: else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
361: #endif
362: else {
363: if (mat->was_assembled) {
364: if (!aij->colmap) {
365: CreateColmap_MPIAIJ_Private(mat);
366: }
367: #if defined (PETSC_USE_CTABLE)
368: PetscTableFind(aij->colmap,in[j]+1,&col);
369: col--;
370: #else
371: col = aij->colmap[in[j]] - 1;
372: #endif
373: if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
374: DisAssemble_MPIAIJ(mat);
375: col = in[j];
376: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
377: B = aij->B;
378: b = (Mat_SeqAIJ*)B->data;
379: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
380: rp2 = bj + bi[row];
381: ap2 = ba + bi[row];
382: rmax2 = bimax[row];
383: nrow2 = bilen[row];
384: low2 = 0;
385: high2 = nrow2;
386: bm = aij->B->rmap->n;
387: ba = b->a;
388: }
389: } else col = in[j];
390: MatSetValues_SeqAIJ_B_Private(row,col,value,addv);
391: }
392: }
393: } else {
394: if (!aij->donotstash) {
395: if (roworiented) {
396: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscTruth)(ignorezeroentries && (addv == ADD_VALUES)));
397: } else {
398: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscTruth)(ignorezeroentries && (addv == ADD_VALUES)));
399: }
400: }
401: }
402: }
403: return(0);
404: }
408: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
409: {
410: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
412: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
413: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
416: for (i=0; i<m; i++) {
417: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
418: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
419: if (idxm[i] >= rstart && idxm[i] < rend) {
420: row = idxm[i] - rstart;
421: for (j=0; j<n; j++) {
422: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
423: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
424: if (idxn[j] >= cstart && idxn[j] < cend){
425: col = idxn[j] - cstart;
426: MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
427: } else {
428: if (!aij->colmap) {
429: CreateColmap_MPIAIJ_Private(mat);
430: }
431: #if defined (PETSC_USE_CTABLE)
432: PetscTableFind(aij->colmap,idxn[j]+1,&col);
433: col --;
434: #else
435: col = aij->colmap[idxn[j]] - 1;
436: #endif
437: if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
438: else {
439: MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
440: }
441: }
442: }
443: } else {
444: SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
445: }
446: }
447: return(0);
448: }
452: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
453: {
454: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
456: PetscInt nstash,reallocs;
457: InsertMode addv;
460: if (aij->donotstash) {
461: return(0);
462: }
464: /* make sure all processors are either in INSERTMODE or ADDMODE */
465: MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,((PetscObject)mat)->comm);
466: if (addv == (ADD_VALUES|INSERT_VALUES)) {
467: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
468: }
469: mat->insertmode = addv; /* in case this processor had no cache */
471: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
472: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
473: PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
474: return(0);
475: }
479: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
480: {
481: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
482: Mat_SeqAIJ *a=(Mat_SeqAIJ *)aij->A->data;
484: PetscMPIInt n;
485: PetscInt i,j,rstart,ncols,flg;
486: PetscInt *row,*col;
487: PetscTruth other_disassembled;
488: PetscScalar *val;
489: InsertMode addv = mat->insertmode;
491: /* do not use 'b = (Mat_SeqAIJ *)aij->B->data' as B can be reset in disassembly */
493: if (!aij->donotstash) {
494: while (1) {
495: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
496: if (!flg) break;
498: for (i=0; i<n;) {
499: /* Now identify the consecutive vals belonging to the same row */
500: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
501: if (j < n) ncols = j-i;
502: else ncols = n-i;
503: /* Now assemble all these values with a single function call */
504: MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,addv);
505: i = j;
506: }
507: }
508: MatStashScatterEnd_Private(&mat->stash);
509: }
510: a->compressedrow.use = PETSC_FALSE;
511: MatAssemblyBegin(aij->A,mode);
512: MatAssemblyEnd(aij->A,mode);
514: /* determine if any processor has disassembled, if so we must
515: also disassemble ourselfs, in order that we may reassemble. */
516: /*
517: if nonzero structure of submatrix B cannot change then we know that
518: no processor disassembled thus we can skip this stuff
519: */
520: if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
521: MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,((PetscObject)mat)->comm);
522: if (mat->was_assembled && !other_disassembled) {
523: DisAssemble_MPIAIJ(mat);
524: }
525: }
526: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
527: MatSetUpMultiply_MPIAIJ(mat);
528: }
529: MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
530: ((Mat_SeqAIJ *)aij->B->data)->compressedrow.use = PETSC_TRUE; /* b->compressedrow.use */
531: MatAssemblyBegin(aij->B,mode);
532: MatAssemblyEnd(aij->B,mode);
534: PetscFree(aij->rowvalues);
535: aij->rowvalues = 0;
537: /* used by MatAXPY() */
538: a->xtoy = 0; ((Mat_SeqAIJ *)aij->B->data)->xtoy = 0; /* b->xtoy = 0 */
539: a->XtoY = 0; ((Mat_SeqAIJ *)aij->B->data)->XtoY = 0; /* b->XtoY = 0 */
541: return(0);
542: }
546: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
547: {
548: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
552: MatZeroEntries(l->A);
553: MatZeroEntries(l->B);
554: return(0);
555: }
559: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag)
560: {
561: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
563: PetscMPIInt size = l->size,imdex,n,rank = l->rank,tag = ((PetscObject)A)->tag,lastidx = -1;
564: PetscInt i,*owners = A->rmap->range;
565: PetscInt *nprocs,j,idx,nsends,row;
566: PetscInt nmax,*svalues,*starts,*owner,nrecvs;
567: PetscInt *rvalues,count,base,slen,*source;
568: PetscInt *lens,*lrows,*values,rstart=A->rmap->rstart;
569: MPI_Comm comm = ((PetscObject)A)->comm;
570: MPI_Request *send_waits,*recv_waits;
571: MPI_Status recv_status,*send_status;
572: #if defined(PETSC_DEBUG)
573: PetscTruth found = PETSC_FALSE;
574: #endif
577: /* first count number of contributors to each processor */
578: PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
579: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
580: PetscMalloc((N+1)*sizeof(PetscInt),&owner); /* see note*/
581: j = 0;
582: for (i=0; i<N; i++) {
583: if (lastidx > (idx = rows[i])) j = 0;
584: lastidx = idx;
585: for (; j<size; j++) {
586: if (idx >= owners[j] && idx < owners[j+1]) {
587: nprocs[2*j]++;
588: nprocs[2*j+1] = 1;
589: owner[i] = j;
590: #if defined(PETSC_DEBUG)
591: found = PETSC_TRUE;
592: #endif
593: break;
594: }
595: }
596: #if defined(PETSC_DEBUG)
597: if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
598: found = PETSC_FALSE;
599: #endif
600: }
601: nsends = 0; for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
603: /* inform other processors of number of messages and max length*/
604: PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
606: /* post receives: */
607: PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);
608: PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
609: for (i=0; i<nrecvs; i++) {
610: MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
611: }
613: /* do sends:
614: 1) starts[i] gives the starting index in svalues for stuff going to
615: the ith processor
616: */
617: PetscMalloc((N+1)*sizeof(PetscInt),&svalues);
618: PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
619: PetscMalloc((size+1)*sizeof(PetscInt),&starts);
620: starts[0] = 0;
621: for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
622: for (i=0; i<N; i++) {
623: svalues[starts[owner[i]]++] = rows[i];
624: }
626: starts[0] = 0;
627: for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
628: count = 0;
629: for (i=0; i<size; i++) {
630: if (nprocs[2*i+1]) {
631: MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);
632: }
633: }
634: PetscFree(starts);
636: base = owners[rank];
638: /* wait on receives */
639: PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);
640: source = lens + nrecvs;
641: count = nrecvs; slen = 0;
642: while (count) {
643: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
644: /* unpack receives into our local space */
645: MPI_Get_count(&recv_status,MPIU_INT,&n);
646: source[imdex] = recv_status.MPI_SOURCE;
647: lens[imdex] = n;
648: slen += n;
649: count--;
650: }
651: PetscFree(recv_waits);
652:
653: /* move the data into the send scatter */
654: PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);
655: count = 0;
656: for (i=0; i<nrecvs; i++) {
657: values = rvalues + i*nmax;
658: for (j=0; j<lens[i]; j++) {
659: lrows[count++] = values[j] - base;
660: }
661: }
662: PetscFree(rvalues);
663: PetscFree(lens);
664: PetscFree(owner);
665: PetscFree(nprocs);
666:
667: /* actually zap the local rows */
668: /*
669: Zero the required rows. If the "diagonal block" of the matrix
670: is square and the user wishes to set the diagonal we use separate
671: code so that MatSetValues() is not called for each diagonal allocating
672: new memory, thus calling lots of mallocs and slowing things down.
674: Contributed by: Matthew Knepley
675: */
676: /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
677: MatZeroRows(l->B,slen,lrows,0.0);
678: if ((diag != 0.0) && (l->A->rmap->N == l->A->cmap->N)) {
679: MatZeroRows(l->A,slen,lrows,diag);
680: } else if (diag != 0.0) {
681: MatZeroRows(l->A,slen,lrows,0.0);
682: if (((Mat_SeqAIJ*)l->A->data)->nonew) {
683: SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options\n\
684: MAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
685: }
686: for (i = 0; i < slen; i++) {
687: row = lrows[i] + rstart;
688: MatSetValues(A,1,&row,1,&row,&diag,INSERT_VALUES);
689: }
690: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
691: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
692: } else {
693: MatZeroRows(l->A,slen,lrows,0.0);
694: }
695: PetscFree(lrows);
697: /* wait on sends */
698: if (nsends) {
699: PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
700: MPI_Waitall(nsends,send_waits,send_status);
701: PetscFree(send_status);
702: }
703: PetscFree(send_waits);
704: PetscFree(svalues);
706: return(0);
707: }
711: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
712: {
713: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
715: PetscInt nt;
718: VecGetLocalSize(xx,&nt);
719: if (nt != A->cmap->n) {
720: SETERRQ2(PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
721: }
722: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
723: (*a->A->ops->mult)(a->A,xx,yy);
724: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
725: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
726: return(0);
727: }
731: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
732: {
733: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
737: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
738: (*a->A->ops->multadd)(a->A,xx,yy,zz);
739: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
740: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
741: return(0);
742: }
746: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
747: {
748: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
750: PetscTruth merged;
753: VecScatterGetMerged(a->Mvctx,&merged);
754: /* do nondiagonal part */
755: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
756: if (!merged) {
757: /* send it on its way */
758: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
759: /* do local part */
760: (*a->A->ops->multtranspose)(a->A,xx,yy);
761: /* receive remote parts: note this assumes the values are not actually */
762: /* added in yy until the next line, */
763: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
764: } else {
765: /* do local part */
766: (*a->A->ops->multtranspose)(a->A,xx,yy);
767: /* send it on its way */
768: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
769: /* values actually were received in the Begin() but we need to call this nop */
770: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
771: }
772: return(0);
773: }
778: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscTruth *f)
779: {
780: MPI_Comm comm;
781: Mat_MPIAIJ *Aij = (Mat_MPIAIJ *) Amat->data, *Bij;
782: Mat Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
783: IS Me,Notme;
785: PetscInt M,N,first,last,*notme,i;
786: PetscMPIInt size;
790: /* Easy test: symmetric diagonal block */
791: Bij = (Mat_MPIAIJ *) Bmat->data; Bdia = Bij->A;
792: MatIsTranspose(Adia,Bdia,tol,f);
793: if (!*f) return(0);
794: PetscObjectGetComm((PetscObject)Amat,&comm);
795: MPI_Comm_size(comm,&size);
796: if (size == 1) return(0);
798: /* Hard test: off-diagonal block. This takes a MatGetSubMatrix. */
799: MatGetSize(Amat,&M,&N);
800: MatGetOwnershipRange(Amat,&first,&last);
801: PetscMalloc((N-last+first)*sizeof(PetscInt),¬me);
802: for (i=0; i<first; i++) notme[i] = i;
803: for (i=last; i<M; i++) notme[i-last+first] = i;
804: ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,&Notme);
805: ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
806: MatGetSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
807: Aoff = Aoffs[0];
808: MatGetSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
809: Boff = Boffs[0];
810: MatIsTranspose(Aoff,Boff,tol,f);
811: MatDestroyMatrices(1,&Aoffs);
812: MatDestroyMatrices(1,&Boffs);
813: ISDestroy(Me);
814: ISDestroy(Notme);
816: return(0);
817: }
822: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
823: {
824: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
828: /* do nondiagonal part */
829: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
830: /* send it on its way */
831: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
832: /* do local part */
833: (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
834: /* receive remote parts */
835: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
836: return(0);
837: }
839: /*
840: This only works correctly for square matrices where the subblock A->A is the
841: diagonal block
842: */
845: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
846: {
848: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
851: if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
852: if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) {
853: SETERRQ(PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
854: }
855: MatGetDiagonal(a->A,v);
856: return(0);
857: }
861: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
862: {
863: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
867: MatScale(a->A,aa);
868: MatScale(a->B,aa);
869: return(0);
870: }
874: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
875: {
876: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
880: #if defined(PETSC_USE_LOG)
881: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
882: #endif
883: MatStashDestroy_Private(&mat->stash);
884: MatDestroy(aij->A);
885: MatDestroy(aij->B);
886: #if defined (PETSC_USE_CTABLE)
887: if (aij->colmap) {PetscTableDestroy(aij->colmap);}
888: #else
889: PetscFree(aij->colmap);
890: #endif
891: PetscFree(aij->garray);
892: if (aij->lvec) {VecDestroy(aij->lvec);}
893: if (aij->Mvctx) {VecScatterDestroy(aij->Mvctx);}
894: PetscFree(aij->rowvalues);
895: PetscFree(aij->ld);
896: PetscFree(aij);
898: PetscObjectChangeTypeName((PetscObject)mat,0);
899: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
900: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
901: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
902: PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C","",PETSC_NULL);
903: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C","",PETSC_NULL);
904: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C","",PETSC_NULL);
905: PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C","",PETSC_NULL);
906: return(0);
907: }
911: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
912: {
913: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
914: Mat_SeqAIJ* A = (Mat_SeqAIJ*)aij->A->data;
915: Mat_SeqAIJ* B = (Mat_SeqAIJ*)aij->B->data;
916: PetscErrorCode ierr;
917: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
918: int fd;
919: PetscInt nz,header[4],*row_lengths,*range=0,rlen,i;
920: PetscInt nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz;
921: PetscScalar *column_values;
924: MPI_Comm_rank(((PetscObject)mat)->comm,&rank);
925: MPI_Comm_size(((PetscObject)mat)->comm,&size);
926: nz = A->nz + B->nz;
927: if (!rank) {
928: header[0] = MAT_FILE_COOKIE;
929: header[1] = mat->rmap->N;
930: header[2] = mat->cmap->N;
931: MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,((PetscObject)mat)->comm);
932: PetscViewerBinaryGetDescriptor(viewer,&fd);
933: PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
934: /* get largest number of rows any processor has */
935: rlen = mat->rmap->n;
936: range = mat->rmap->range;
937: for (i=1; i<size; i++) {
938: rlen = PetscMax(rlen,range[i+1] - range[i]);
939: }
940: } else {
941: MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,((PetscObject)mat)->comm);
942: rlen = mat->rmap->n;
943: }
945: /* load up the local row counts */
946: PetscMalloc((rlen+1)*sizeof(PetscInt),&row_lengths);
947: for (i=0; i<mat->rmap->n; i++) {
948: row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
949: }
951: /* store the row lengths to the file */
952: if (!rank) {
953: MPI_Status status;
954: PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);
955: for (i=1; i<size; i++) {
956: rlen = range[i+1] - range[i];
957: MPI_Recv(row_lengths,rlen,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);
958: PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);
959: }
960: } else {
961: MPI_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,((PetscObject)mat)->comm);
962: }
963: PetscFree(row_lengths);
965: /* load up the local column indices */
966: nzmax = nz; /* )th processor needs space a largest processor needs */
967: MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,((PetscObject)mat)->comm);
968: PetscMalloc((nzmax+1)*sizeof(PetscInt),&column_indices);
969: cnt = 0;
970: for (i=0; i<mat->rmap->n; i++) {
971: for (j=B->i[i]; j<B->i[i+1]; j++) {
972: if ( (col = garray[B->j[j]]) > cstart) break;
973: column_indices[cnt++] = col;
974: }
975: for (k=A->i[i]; k<A->i[i+1]; k++) {
976: column_indices[cnt++] = A->j[k] + cstart;
977: }
978: for (; j<B->i[i+1]; j++) {
979: column_indices[cnt++] = garray[B->j[j]];
980: }
981: }
982: if (cnt != A->nz + B->nz) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
984: /* store the column indices to the file */
985: if (!rank) {
986: MPI_Status status;
987: PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
988: for (i=1; i<size; i++) {
989: MPI_Recv(&rnz,1,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);
990: if (rnz > nzmax) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
991: MPI_Recv(column_indices,rnz,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);
992: PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);
993: }
994: } else {
995: MPI_Send(&nz,1,MPIU_INT,0,tag,((PetscObject)mat)->comm);
996: MPI_Send(column_indices,nz,MPIU_INT,0,tag,((PetscObject)mat)->comm);
997: }
998: PetscFree(column_indices);
1000: /* load up the local column values */
1001: PetscMalloc((nzmax+1)*sizeof(PetscScalar),&column_values);
1002: cnt = 0;
1003: for (i=0; i<mat->rmap->n; i++) {
1004: for (j=B->i[i]; j<B->i[i+1]; j++) {
1005: if ( garray[B->j[j]] > cstart) break;
1006: column_values[cnt++] = B->a[j];
1007: }
1008: for (k=A->i[i]; k<A->i[i+1]; k++) {
1009: column_values[cnt++] = A->a[k];
1010: }
1011: for (; j<B->i[i+1]; j++) {
1012: column_values[cnt++] = B->a[j];
1013: }
1014: }
1015: if (cnt != A->nz + B->nz) SETERRQ2(PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1017: /* store the column values to the file */
1018: if (!rank) {
1019: MPI_Status status;
1020: PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1021: for (i=1; i<size; i++) {
1022: MPI_Recv(&rnz,1,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);
1023: if (rnz > nzmax) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1024: MPI_Recv(column_values,rnz,MPIU_SCALAR,i,tag,((PetscObject)mat)->comm,&status);
1025: PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);
1026: }
1027: } else {
1028: MPI_Send(&nz,1,MPIU_INT,0,tag,((PetscObject)mat)->comm);
1029: MPI_Send(column_values,nz,MPIU_SCALAR,0,tag,((PetscObject)mat)->comm);
1030: }
1031: PetscFree(column_values);
1032: return(0);
1033: }
1037: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1038: {
1039: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1040: PetscErrorCode ierr;
1041: PetscMPIInt rank = aij->rank,size = aij->size;
1042: PetscTruth isdraw,iascii,isbinary;
1043: PetscViewer sviewer;
1044: PetscViewerFormat format;
1047: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
1048: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
1049: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
1050: if (iascii) {
1051: PetscViewerGetFormat(viewer,&format);
1052: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1053: MatInfo info;
1054: PetscTruth inodes;
1056: MPI_Comm_rank(((PetscObject)mat)->comm,&rank);
1057: MatGetInfo(mat,MAT_LOCAL,&info);
1058: MatInodeGetInodeSizes(aij->A,PETSC_NULL,(PetscInt **)&inodes,PETSC_NULL);
1059: if (!inodes) {
1060: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, not using I-node routines\n",
1061: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
1062: } else {
1063: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, using I-node routines\n",
1064: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
1065: }
1066: MatGetInfo(aij->A,MAT_LOCAL,&info);
1067: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1068: MatGetInfo(aij->B,MAT_LOCAL,&info);
1069: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1070: PetscViewerFlush(viewer);
1071: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1072: VecScatterView(aij->Mvctx,viewer);
1073: return(0);
1074: } else if (format == PETSC_VIEWER_ASCII_INFO) {
1075: PetscInt inodecount,inodelimit,*inodes;
1076: MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1077: if (inodes) {
1078: PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1079: } else {
1080: PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1081: }
1082: return(0);
1083: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1084: return(0);
1085: }
1086: } else if (isbinary) {
1087: if (size == 1) {
1088: PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1089: MatView(aij->A,viewer);
1090: } else {
1091: MatView_MPIAIJ_Binary(mat,viewer);
1092: }
1093: return(0);
1094: } else if (isdraw) {
1095: PetscDraw draw;
1096: PetscTruth isnull;
1097: PetscViewerDrawGetDraw(viewer,0,&draw);
1098: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
1099: }
1101: if (size == 1) {
1102: PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1103: MatView(aij->A,viewer);
1104: } else {
1105: /* assemble the entire matrix onto first processor. */
1106: Mat A;
1107: Mat_SeqAIJ *Aloc;
1108: PetscInt M = mat->rmap->N,N = mat->cmap->N,m,*ai,*aj,row,*cols,i,*ct;
1109: MatScalar *a;
1111: if (mat->rmap->N > 1024) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"ASCII matrix output not allowed for matrices with more than 512 rows, use binary format instead");
1113: MatCreate(((PetscObject)mat)->comm,&A);
1114: if (!rank) {
1115: MatSetSizes(A,M,N,M,N);
1116: } else {
1117: MatSetSizes(A,0,0,M,N);
1118: }
1119: /* This is just a temporary matrix, so explicitly using MATMPIAIJ is probably best */
1120: MatSetType(A,MATMPIAIJ);
1121: MatMPIAIJSetPreallocation(A,0,PETSC_NULL,0,PETSC_NULL);
1122: PetscLogObjectParent(mat,A);
1124: /* copy over the A part */
1125: Aloc = (Mat_SeqAIJ*)aij->A->data;
1126: m = aij->A->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1127: row = mat->rmap->rstart;
1128: for (i=0; i<ai[m]; i++) {aj[i] += mat->cmap->rstart ;}
1129: for (i=0; i<m; i++) {
1130: MatSetValues(A,1,&row,ai[i+1]-ai[i],aj,a,INSERT_VALUES);
1131: row++; a += ai[i+1]-ai[i]; aj += ai[i+1]-ai[i];
1132: }
1133: aj = Aloc->j;
1134: for (i=0; i<ai[m]; i++) {aj[i] -= mat->cmap->rstart;}
1136: /* copy over the B part */
1137: Aloc = (Mat_SeqAIJ*)aij->B->data;
1138: m = aij->B->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1139: row = mat->rmap->rstart;
1140: PetscMalloc((ai[m]+1)*sizeof(PetscInt),&cols);
1141: ct = cols;
1142: for (i=0; i<ai[m]; i++) {cols[i] = aij->garray[aj[i]];}
1143: for (i=0; i<m; i++) {
1144: MatSetValues(A,1,&row,ai[i+1]-ai[i],cols,a,INSERT_VALUES);
1145: row++; a += ai[i+1]-ai[i]; cols += ai[i+1]-ai[i];
1146: }
1147: PetscFree(ct);
1148: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1149: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1150: /*
1151: Everyone has to call to draw the matrix since the graphics waits are
1152: synchronized across all processors that share the PetscDraw object
1153: */
1154: PetscViewerGetSingleton(viewer,&sviewer);
1155: if (!rank) {
1156: PetscObjectSetName((PetscObject)((Mat_MPIAIJ*)(A->data))->A,((PetscObject)mat)->name);
1157: MatView(((Mat_MPIAIJ*)(A->data))->A,sviewer);
1158: }
1159: PetscViewerRestoreSingleton(viewer,&sviewer);
1160: MatDestroy(A);
1161: }
1162: return(0);
1163: }
1167: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1168: {
1170: PetscTruth iascii,isdraw,issocket,isbinary;
1171:
1173: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
1174: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
1175: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
1176: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);
1177: if (iascii || isdraw || isbinary || issocket) {
1178: MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1179: } else {
1180: SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPIAIJ matrices",((PetscObject)viewer)->type_name);
1181: }
1182: return(0);
1183: }
1187: PetscErrorCode MatRelax_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1188: {
1189: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1191: Vec bb1;
1194: VecDuplicate(bb,&bb1);
1196: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
1197: if (flag & SOR_ZERO_INITIAL_GUESS) {
1198: (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
1199: its--;
1200: }
1201:
1202: while (its--) {
1203: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1204: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1206: /* update rhs: bb1 = bb - B*x */
1207: VecScale(mat->lvec,-1.0);
1208: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1210: /* local sweep */
1211: (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
1212: }
1213: } else if (flag & SOR_LOCAL_FORWARD_SWEEP){
1214: if (flag & SOR_ZERO_INITIAL_GUESS) {
1215: (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1216: its--;
1217: }
1218: while (its--) {
1219: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1220: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1222: /* update rhs: bb1 = bb - B*x */
1223: VecScale(mat->lvec,-1.0);
1224: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1226: /* local sweep */
1227: (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1228: }
1229: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP){
1230: if (flag & SOR_ZERO_INITIAL_GUESS) {
1231: (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1232: its--;
1233: }
1234: while (its--) {
1235: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1236: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1238: /* update rhs: bb1 = bb - B*x */
1239: VecScale(mat->lvec,-1.0);
1240: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1242: /* local sweep */
1243: (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1244: }
1245: } else {
1246: SETERRQ(PETSC_ERR_SUP,"Parallel SOR not supported");
1247: }
1249: VecDestroy(bb1);
1250: return(0);
1251: }
1255: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1256: {
1257: MPI_Comm comm,pcomm;
1258: PetscInt first,local_size,nrows;
1259: const PetscInt *rows;
1260: int ntids;
1261: IS crowp,growp,irowp,lrowp,lcolp,icolp;
1265: PetscObjectGetComm((PetscObject)A,&comm);
1266: /* make a collective version of 'rowp' */
1267: PetscObjectGetComm((PetscObject)rowp,&pcomm);
1268: if (pcomm==comm) {
1269: crowp = rowp;
1270: } else {
1271: ISGetSize(rowp,&nrows);
1272: ISGetIndices(rowp,&rows);
1273: ISCreateGeneral(comm,nrows,rows,&crowp);
1274: ISRestoreIndices(rowp,&rows);
1275: }
1276: /* collect the global row permutation and invert it */
1277: ISAllGather(crowp,&growp);
1278: ISSetPermutation(growp);
1279: if (pcomm!=comm) {
1280: ISDestroy(crowp);
1281: }
1282: ISInvertPermutation(growp,PETSC_DECIDE,&irowp);
1283: /* get the local target indices */
1284: MatGetOwnershipRange(A,&first,PETSC_NULL);
1285: MatGetLocalSize(A,&local_size,PETSC_NULL);
1286: ISGetIndices(irowp,&rows);
1287: ISCreateGeneral(MPI_COMM_SELF,local_size,rows+first,&lrowp);
1288: ISRestoreIndices(irowp,&rows);
1289: ISDestroy(irowp);
1290: /* the column permutation is so much easier;
1291: make a local version of 'colp' and invert it */
1292: PetscObjectGetComm((PetscObject)colp,&pcomm);
1293: MPI_Comm_size(pcomm,&ntids);
1294: if (ntids==1) {
1295: lcolp = colp;
1296: } else {
1297: ISGetSize(colp,&nrows);
1298: ISGetIndices(colp,&rows);
1299: ISCreateGeneral(MPI_COMM_SELF,nrows,rows,&lcolp);
1300: }
1301: ISInvertPermutation(lcolp,PETSC_DECIDE,&icolp);
1302: ISSetPermutation(lcolp);
1303: if (ntids>1) {
1304: ISRestoreIndices(colp,&rows);
1305: ISDestroy(lcolp);
1306: }
1307: /* now we just get the submatrix */
1308: MatGetSubMatrix(A,lrowp,icolp,local_size,MAT_INITIAL_MATRIX,B);
1309: /* clean up */
1310: ISDestroy(lrowp);
1311: ISDestroy(icolp);
1312: return(0);
1313: }
1317: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1318: {
1319: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1320: Mat A = mat->A,B = mat->B;
1322: PetscReal isend[5],irecv[5];
1325: info->block_size = 1.0;
1326: MatGetInfo(A,MAT_LOCAL,info);
1327: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1328: isend[3] = info->memory; isend[4] = info->mallocs;
1329: MatGetInfo(B,MAT_LOCAL,info);
1330: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1331: isend[3] += info->memory; isend[4] += info->mallocs;
1332: if (flag == MAT_LOCAL) {
1333: info->nz_used = isend[0];
1334: info->nz_allocated = isend[1];
1335: info->nz_unneeded = isend[2];
1336: info->memory = isend[3];
1337: info->mallocs = isend[4];
1338: } else if (flag == MAT_GLOBAL_MAX) {
1339: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,((PetscObject)matin)->comm);
1340: info->nz_used = irecv[0];
1341: info->nz_allocated = irecv[1];
1342: info->nz_unneeded = irecv[2];
1343: info->memory = irecv[3];
1344: info->mallocs = irecv[4];
1345: } else if (flag == MAT_GLOBAL_SUM) {
1346: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,((PetscObject)matin)->comm);
1347: info->nz_used = irecv[0];
1348: info->nz_allocated = irecv[1];
1349: info->nz_unneeded = irecv[2];
1350: info->memory = irecv[3];
1351: info->mallocs = irecv[4];
1352: }
1353: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1354: info->fill_ratio_needed = 0;
1355: info->factor_mallocs = 0;
1357: return(0);
1358: }
1362: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscTruth flg)
1363: {
1364: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1368: switch (op) {
1369: case MAT_NEW_NONZERO_LOCATIONS:
1370: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1371: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1372: case MAT_KEEP_ZEROED_ROWS:
1373: case MAT_NEW_NONZERO_LOCATION_ERR:
1374: case MAT_USE_INODES:
1375: case MAT_IGNORE_ZERO_ENTRIES:
1376: MatSetOption(a->A,op,flg);
1377: MatSetOption(a->B,op,flg);
1378: break;
1379: case MAT_ROW_ORIENTED:
1380: a->roworiented = flg;
1381: MatSetOption(a->A,op,flg);
1382: MatSetOption(a->B,op,flg);
1383: break;
1384: case MAT_NEW_DIAGONALS:
1385: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1386: break;
1387: case MAT_IGNORE_OFF_PROC_ENTRIES:
1388: a->donotstash = PETSC_TRUE;
1389: break;
1390: case MAT_SYMMETRIC:
1391: MatSetOption(a->A,op,flg);
1392: break;
1393: case MAT_STRUCTURALLY_SYMMETRIC:
1394: case MAT_HERMITIAN:
1395: case MAT_SYMMETRY_ETERNAL:
1396: MatSetOption(a->A,op,flg);
1397: break;
1398: default:
1399: SETERRQ1(PETSC_ERR_SUP,"unknown option %d",op);
1400: }
1401: return(0);
1402: }
1406: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1407: {
1408: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1409: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
1411: PetscInt i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1412: PetscInt nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1413: PetscInt *cmap,*idx_p;
1416: if (mat->getrowactive) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1417: mat->getrowactive = PETSC_TRUE;
1419: if (!mat->rowvalues && (idx || v)) {
1420: /*
1421: allocate enough space to hold information from the longest row.
1422: */
1423: Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1424: PetscInt max = 1,tmp;
1425: for (i=0; i<matin->rmap->n; i++) {
1426: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1427: if (max < tmp) { max = tmp; }
1428: }
1429: PetscMalloc(max*(sizeof(PetscInt)+sizeof(PetscScalar)),&mat->rowvalues);
1430: mat->rowindices = (PetscInt*)(mat->rowvalues + max);
1431: }
1433: if (row < rstart || row >= rend) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Only local rows")
1434: lrow = row - rstart;
1436: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1437: if (!v) {pvA = 0; pvB = 0;}
1438: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1439: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1440: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1441: nztot = nzA + nzB;
1443: cmap = mat->garray;
1444: if (v || idx) {
1445: if (nztot) {
1446: /* Sort by increasing column numbers, assuming A and B already sorted */
1447: PetscInt imark = -1;
1448: if (v) {
1449: *v = v_p = mat->rowvalues;
1450: for (i=0; i<nzB; i++) {
1451: if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1452: else break;
1453: }
1454: imark = i;
1455: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1456: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1457: }
1458: if (idx) {
1459: *idx = idx_p = mat->rowindices;
1460: if (imark > -1) {
1461: for (i=0; i<imark; i++) {
1462: idx_p[i] = cmap[cworkB[i]];
1463: }
1464: } else {
1465: for (i=0; i<nzB; i++) {
1466: if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1467: else break;
1468: }
1469: imark = i;
1470: }
1471: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart + cworkA[i];
1472: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]];
1473: }
1474: } else {
1475: if (idx) *idx = 0;
1476: if (v) *v = 0;
1477: }
1478: }
1479: *nz = nztot;
1480: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1481: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1482: return(0);
1483: }
1487: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1488: {
1489: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1492: if (!aij->getrowactive) {
1493: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1494: }
1495: aij->getrowactive = PETSC_FALSE;
1496: return(0);
1497: }
1501: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1502: {
1503: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1504: Mat_SeqAIJ *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1506: PetscInt i,j,cstart = mat->cmap->rstart;
1507: PetscReal sum = 0.0;
1508: MatScalar *v;
1511: if (aij->size == 1) {
1512: MatNorm(aij->A,type,norm);
1513: } else {
1514: if (type == NORM_FROBENIUS) {
1515: v = amat->a;
1516: for (i=0; i<amat->nz; i++) {
1517: #if defined(PETSC_USE_COMPLEX)
1518: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1519: #else
1520: sum += (*v)*(*v); v++;
1521: #endif
1522: }
1523: v = bmat->a;
1524: for (i=0; i<bmat->nz; i++) {
1525: #if defined(PETSC_USE_COMPLEX)
1526: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1527: #else
1528: sum += (*v)*(*v); v++;
1529: #endif
1530: }
1531: MPI_Allreduce(&sum,norm,1,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);
1532: *norm = sqrt(*norm);
1533: } else if (type == NORM_1) { /* max column norm */
1534: PetscReal *tmp,*tmp2;
1535: PetscInt *jj,*garray = aij->garray;
1536: PetscMalloc((mat->cmap->N+1)*sizeof(PetscReal),&tmp);
1537: PetscMalloc((mat->cmap->N+1)*sizeof(PetscReal),&tmp2);
1538: PetscMemzero(tmp,mat->cmap->N*sizeof(PetscReal));
1539: *norm = 0.0;
1540: v = amat->a; jj = amat->j;
1541: for (j=0; j<amat->nz; j++) {
1542: tmp[cstart + *jj++ ] += PetscAbsScalar(*v); v++;
1543: }
1544: v = bmat->a; jj = bmat->j;
1545: for (j=0; j<bmat->nz; j++) {
1546: tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1547: }
1548: MPI_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);
1549: for (j=0; j<mat->cmap->N; j++) {
1550: if (tmp2[j] > *norm) *norm = tmp2[j];
1551: }
1552: PetscFree(tmp);
1553: PetscFree(tmp2);
1554: } else if (type == NORM_INFINITY) { /* max row norm */
1555: PetscReal ntemp = 0.0;
1556: for (j=0; j<aij->A->rmap->n; j++) {
1557: v = amat->a + amat->i[j];
1558: sum = 0.0;
1559: for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1560: sum += PetscAbsScalar(*v); v++;
1561: }
1562: v = bmat->a + bmat->i[j];
1563: for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1564: sum += PetscAbsScalar(*v); v++;
1565: }
1566: if (sum > ntemp) ntemp = sum;
1567: }
1568: MPI_Allreduce(&ntemp,norm,1,MPIU_REAL,MPI_MAX,((PetscObject)mat)->comm);
1569: } else {
1570: SETERRQ(PETSC_ERR_SUP,"No support for two norm");
1571: }
1572: }
1573: return(0);
1574: }
1578: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1579: {
1580: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1581: Mat_SeqAIJ *Aloc=(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data;
1583: PetscInt M = A->rmap->N,N = A->cmap->N,ma,na,mb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,i,*d_nnz;
1584: PetscInt cstart=A->cmap->rstart,ncol;
1585: Mat B;
1586: MatScalar *array;
1589: if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1591: ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n;
1592: ai = Aloc->i; aj = Aloc->j;
1593: bi = Bloc->i; bj = Bloc->j;
1594: if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1595: /* compute d_nnz for preallocation; o_nnz is approximated by d_nnz to avoid communication */
1596: PetscMalloc((1+na)*sizeof(PetscInt),&d_nnz);
1597: PetscMemzero(d_nnz,(1+na)*sizeof(PetscInt));
1598: for (i=0; i<ai[ma]; i++){
1599: d_nnz[aj[i]] ++;
1600: aj[i] += cstart; /* global col index to be used by MatSetValues() */
1601: }
1603: MatCreate(((PetscObject)A)->comm,&B);
1604: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
1605: MatSetType(B,((PetscObject)A)->type_name);
1606: MatMPIAIJSetPreallocation(B,0,d_nnz,0,d_nnz);
1607: PetscFree(d_nnz);
1608: } else {
1609: B = *matout;
1610: }
1612: /* copy over the A part */
1613: array = Aloc->a;
1614: row = A->rmap->rstart;
1615: for (i=0; i<ma; i++) {
1616: ncol = ai[i+1]-ai[i];
1617: MatSetValues(B,ncol,aj,1,&row,array,INSERT_VALUES);
1618: row++; array += ncol; aj += ncol;
1619: }
1620: aj = Aloc->j;
1621: for (i=0; i<ai[ma]; i++) aj[i] -= cstart; /* resume local col index */
1623: /* copy over the B part */
1624: PetscMalloc(bi[mb]*sizeof(PetscInt),&cols);
1625: PetscMemzero(cols,bi[mb]*sizeof(PetscInt));
1626: array = Bloc->a;
1627: row = A->rmap->rstart;
1628: for (i=0; i<bi[mb]; i++) {cols[i] = a->garray[bj[i]];}
1629: cols_tmp = cols;
1630: for (i=0; i<mb; i++) {
1631: ncol = bi[i+1]-bi[i];
1632: MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
1633: row++; array += ncol; cols_tmp += ncol;
1634: }
1635: PetscFree(cols);
1636:
1637: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1638: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1639: if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
1640: *matout = B;
1641: } else {
1642: MatHeaderCopy(A,B);
1643: }
1644: return(0);
1645: }
1649: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
1650: {
1651: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1652: Mat a = aij->A,b = aij->B;
1654: PetscInt s1,s2,s3;
1657: MatGetLocalSize(mat,&s2,&s3);
1658: if (rr) {
1659: VecGetLocalSize(rr,&s1);
1660: if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1661: /* Overlap communication with computation. */
1662: VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1663: }
1664: if (ll) {
1665: VecGetLocalSize(ll,&s1);
1666: if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1667: (*b->ops->diagonalscale)(b,ll,0);
1668: }
1669: /* scale the diagonal block */
1670: (*a->ops->diagonalscale)(a,ll,rr);
1672: if (rr) {
1673: /* Do a scatter end and then right scale the off-diagonal block */
1674: VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1675: (*b->ops->diagonalscale)(b,0,aij->lvec);
1676: }
1677:
1678: return(0);
1679: }
1683: PetscErrorCode MatSetBlockSize_MPIAIJ(Mat A,PetscInt bs)
1684: {
1685: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1689: MatSetBlockSize(a->A,bs);
1690: MatSetBlockSize(a->B,bs);
1691: return(0);
1692: }
1695: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
1696: {
1697: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1701: MatSetUnfactored(a->A);
1702: return(0);
1703: }
1707: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscTruth *flag)
1708: {
1709: Mat_MPIAIJ *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
1710: Mat a,b,c,d;
1711: PetscTruth flg;
1715: a = matA->A; b = matA->B;
1716: c = matB->A; d = matB->B;
1718: MatEqual(a,c,&flg);
1719: if (flg) {
1720: MatEqual(b,d,&flg);
1721: }
1722: MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);
1723: return(0);
1724: }
1728: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
1729: {
1731: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1732: Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
1735: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1736: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1737: /* because of the column compression in the off-processor part of the matrix a->B,
1738: the number of columns in a->B and b->B may be different, hence we cannot call
1739: the MatCopy() directly on the two parts. If need be, we can provide a more
1740: efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
1741: then copying the submatrices */
1742: MatCopy_Basic(A,B,str);
1743: } else {
1744: MatCopy(a->A,b->A,str);
1745: MatCopy(a->B,b->B,str);
1746: }
1747: return(0);
1748: }
1752: PetscErrorCode MatSetUpPreallocation_MPIAIJ(Mat A)
1753: {
1757: MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1758: return(0);
1759: }
1761: #include petscblaslapack.h
1764: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
1765: {
1767: PetscInt i;
1768: Mat_MPIAIJ *xx = (Mat_MPIAIJ *)X->data,*yy = (Mat_MPIAIJ *)Y->data;
1769: PetscBLASInt bnz,one=1;
1770: Mat_SeqAIJ *x,*y;
1773: if (str == SAME_NONZERO_PATTERN) {
1774: PetscScalar alpha = a;
1775: x = (Mat_SeqAIJ *)xx->A->data;
1776: y = (Mat_SeqAIJ *)yy->A->data;
1777: bnz = PetscBLASIntCast(x->nz);
1778: BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one);
1779: x = (Mat_SeqAIJ *)xx->B->data;
1780: y = (Mat_SeqAIJ *)yy->B->data;
1781: bnz = PetscBLASIntCast(x->nz);
1782: BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one);
1783: } else if (str == SUBSET_NONZERO_PATTERN) {
1784: MatAXPY_SeqAIJ(yy->A,a,xx->A,str);
1786: x = (Mat_SeqAIJ *)xx->B->data;
1787: y = (Mat_SeqAIJ *)yy->B->data;
1788: if (y->xtoy && y->XtoY != xx->B) {
1789: PetscFree(y->xtoy);
1790: MatDestroy(y->XtoY);
1791: }
1792: if (!y->xtoy) { /* get xtoy */
1793: MatAXPYGetxtoy_Private(xx->B->rmap->n,x->i,x->j,xx->garray,y->i,y->j,yy->garray,&y->xtoy);
1794: y->XtoY = xx->B;
1795: PetscObjectReference((PetscObject)xx->B);
1796: }
1797: for (i=0; i<x->nz; i++) y->a[y->xtoy[i]] += a*(x->a[i]);
1798: } else {
1799: MatAXPY_Basic(Y,a,X,str);
1800: }
1801: return(0);
1802: }
1804: EXTERN PetscErrorCode MatConjugate_SeqAIJ(Mat);
1808: PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
1809: {
1810: #if defined(PETSC_USE_COMPLEX)
1812: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1815: MatConjugate_SeqAIJ(aij->A);
1816: MatConjugate_SeqAIJ(aij->B);
1817: #else
1819: #endif
1820: return(0);
1821: }
1825: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
1826: {
1827: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1831: MatRealPart(a->A);
1832: MatRealPart(a->B);
1833: return(0);
1834: }
1838: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
1839: {
1840: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1844: MatImaginaryPart(a->A);
1845: MatImaginaryPart(a->B);
1846: return(0);
1847: }
1849: #ifdef PETSC_HAVE_PBGL
1851: #include <boost/parallel/mpi/bsp_process_group.hpp>
1852: #include <boost/graph/distributed/ilu_default_graph.hpp>
1853: #include <boost/graph/distributed/ilu_0_block.hpp>
1854: #include <boost/graph/distributed/ilu_preconditioner.hpp>
1855: #include <boost/graph/distributed/petsc/interface.hpp>
1856: #include <boost/multi_array.hpp>
1857: #include <boost/parallel/distributed_property_map->hpp>
1861: /*
1862: This uses the parallel ILU factorization of Peter Gottschling <pgottsch@osl.iu.edu>
1863: */
1864: PetscErrorCode MatILUFactorSymbolic_MPIAIJ(Mat fact,Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
1865: {
1866: namespace petsc = boost::distributed::petsc;
1867:
1868: namespace graph_dist = boost::graph::distributed;
1869: using boost::graph::distributed::ilu_default::process_group_type;
1870: using boost::graph::ilu_permuted;
1872: PetscTruth row_identity, col_identity;
1873: PetscContainer c;
1874: PetscInt m, n, M, N;
1875: PetscErrorCode ierr;
1878: if (info->levels != 0) SETERRQ(PETSC_ERR_SUP,"Only levels = 0 supported for parallel ilu");
1879: ISIdentity(isrow, &row_identity);
1880: ISIdentity(iscol, &col_identity);
1881: if (!row_identity || !col_identity) {
1882: SETERRQ(PETSC_ERR_ARG_WRONG,"Row and column permutations must be identity for parallel ILU");
1883: }
1885: process_group_type pg;
1886: typedef graph_dist::ilu_default::ilu_level_graph_type lgraph_type;
1887: lgraph_type* lgraph_p = new lgraph_type(petsc::num_global_vertices(A), pg, petsc::matrix_distribution(A, pg));
1888: lgraph_type& level_graph = *lgraph_p;
1889: graph_dist::ilu_default::graph_type& graph(level_graph.graph);
1891: petsc::read_matrix(A, graph, get(boost::edge_weight, graph));
1892: ilu_permuted(level_graph);
1894: /* put together the new matrix */
1895: MatCreate(((PetscObject)A)->comm, fact);
1896: MatGetLocalSize(A, &m, &n);
1897: MatGetSize(A, &M, &N);
1898: MatSetSizes(fact, m, n, M, N);
1899: MatSetType(fact, ((PetscObject)A)->type_name);
1900: MatAssemblyBegin(fact, MAT_FINAL_ASSEMBLY);
1901: MatAssemblyEnd(fact, MAT_FINAL_ASSEMBLY);
1903: PetscContainerCreate(((PetscObject)A)->comm, &c);
1904: PetscContainerSetPointer(c, lgraph_p);
1905: PetscObjectCompose((PetscObject) (fact), "graph", (PetscObject) c);
1906: return(0);
1907: }
1911: PetscErrorCode MatLUFactorNumeric_MPIAIJ(Mat B,Mat A, const MatFactorInfo *info)
1912: {
1914: return(0);
1915: }
1919: /*
1920: This uses the parallel ILU factorization of Peter Gottschling <pgottsch@osl.iu.edu>
1921: */
1922: PetscErrorCode MatSolve_MPIAIJ(Mat A, Vec b, Vec x)
1923: {
1924: namespace graph_dist = boost::graph::distributed;
1926: typedef graph_dist::ilu_default::ilu_level_graph_type lgraph_type;
1927: lgraph_type* lgraph_p;
1928: PetscContainer c;
1932: PetscObjectQuery((PetscObject) A, "graph", (PetscObject *) &c);
1933: PetscContainerGetPointer(c, (void **) &lgraph_p);
1934: VecCopy(b, x);
1936: PetscScalar* array_x;
1937: VecGetArray(x, &array_x);
1938: PetscInt sx;
1939: VecGetSize(x, &sx);
1940:
1941: PetscScalar* array_b;
1942: VecGetArray(b, &array_b);
1943: PetscInt sb;
1944: VecGetSize(b, &sb);
1946: lgraph_type& level_graph = *lgraph_p;
1947: graph_dist::ilu_default::graph_type& graph(level_graph.graph);
1949: typedef boost::multi_array_ref<PetscScalar, 1> array_ref_type;
1950: array_ref_type ref_b(array_b, boost::extents[num_vertices(graph)]),
1951: ref_x(array_x, boost::extents[num_vertices(graph)]);
1953: typedef boost::iterator_property_map<array_ref_type::iterator,
1954: boost::property_map<graph_dist::ilu_default::graph_type, boost::vertex_index_t>::type> gvector_type;
1955: gvector_type vector_b(ref_b.begin(), get(boost::vertex_index, graph)),
1956: vector_x(ref_x.begin(), get(boost::vertex_index, graph));
1957:
1958: ilu_set_solve(*lgraph_p, vector_b, vector_x);
1960: return(0);
1961: }
1962: #endif
1964: typedef struct { /* used by MatGetRedundantMatrix() for reusing matredundant */
1965: PetscInt nzlocal,nsends,nrecvs;
1966: PetscMPIInt *send_rank;
1967: PetscInt *sbuf_nz,*sbuf_j,**rbuf_j;
1968: PetscScalar *sbuf_a,**rbuf_a;
1969: PetscErrorCode (*MatDestroy)(Mat);
1970: } Mat_Redundant;
1974: PetscErrorCode PetscContainerDestroy_MatRedundant(void *ptr)
1975: {
1976: PetscErrorCode ierr;
1977: Mat_Redundant *redund=(Mat_Redundant*)ptr;
1978: PetscInt i;
1981: PetscFree(redund->send_rank);
1982: PetscFree(redund->sbuf_j);
1983: PetscFree(redund->sbuf_a);
1984: for (i=0; i<redund->nrecvs; i++){
1985: PetscFree(redund->rbuf_j[i]);
1986: PetscFree(redund->rbuf_a[i]);
1987: }
1988: PetscFree3(redund->sbuf_nz,redund->rbuf_j,redund->rbuf_a);
1989: PetscFree(redund);
1990: return(0);
1991: }
1995: PetscErrorCode MatDestroy_MatRedundant(Mat A)
1996: {
1997: PetscErrorCode ierr;
1998: PetscContainer container;
1999: Mat_Redundant *redund=PETSC_NULL;
2002: PetscObjectQuery((PetscObject)A,"Mat_Redundant",(PetscObject *)&container);
2003: if (container) {
2004: PetscContainerGetPointer(container,(void **)&redund);
2005: } else {
2006: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
2007: }
2008: A->ops->destroy = redund->MatDestroy;
2009: PetscObjectCompose((PetscObject)A,"Mat_Redundant",0);
2010: (*A->ops->destroy)(A);
2011: PetscContainerDestroy(container);
2012: return(0);
2013: }
2017: PetscErrorCode MatGetRedundantMatrix_MPIAIJ(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,PetscInt mlocal_sub,MatReuse reuse,Mat *matredundant)
2018: {
2019: PetscMPIInt rank,size;
2020: MPI_Comm comm=((PetscObject)mat)->comm;
2022: PetscInt nsends=0,nrecvs=0,i,rownz_max=0;
2023: PetscMPIInt *send_rank=PETSC_NULL,*recv_rank=PETSC_NULL;
2024: PetscInt *rowrange=mat->rmap->range;
2025: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2026: Mat A=aij->A,B=aij->B,C=*matredundant;
2027: Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data,*b=(Mat_SeqAIJ*)B->data;
2028: PetscScalar *sbuf_a;
2029: PetscInt nzlocal=a->nz+b->nz;
2030: PetscInt j,cstart=mat->cmap->rstart,cend=mat->cmap->rend,row,nzA,nzB,ncols,*cworkA,*cworkB;
2031: PetscInt rstart=mat->rmap->rstart,rend=mat->rmap->rend,*bmap=aij->garray,M,N;
2032: PetscInt *cols,ctmp,lwrite,*rptr,l,*sbuf_j;
2033: MatScalar *aworkA,*aworkB;
2034: PetscScalar *vals;
2035: PetscMPIInt tag1,tag2,tag3,imdex;
2036: MPI_Request *s_waits1=PETSC_NULL,*s_waits2=PETSC_NULL,*s_waits3=PETSC_NULL,
2037: *r_waits1=PETSC_NULL,*r_waits2=PETSC_NULL,*r_waits3=PETSC_NULL;
2038: MPI_Status recv_status,*send_status;
2039: PetscInt *sbuf_nz=PETSC_NULL,*rbuf_nz=PETSC_NULL,count;
2040: PetscInt **rbuf_j=PETSC_NULL;
2041: PetscScalar **rbuf_a=PETSC_NULL;
2042: Mat_Redundant *redund=PETSC_NULL;
2043: PetscContainer container;
2046: MPI_Comm_rank(comm,&rank);
2047: MPI_Comm_size(comm,&size);
2049: if (reuse == MAT_REUSE_MATRIX) {
2050: MatGetSize(C,&M,&N);
2051: if (M != N || M != mat->rmap->N) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong global size");
2052: MatGetLocalSize(C,&M,&N);
2053: if (M != N || M != mlocal_sub) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong local size");
2054: PetscObjectQuery((PetscObject)C,"Mat_Redundant",(PetscObject *)&container);
2055: if (container) {
2056: PetscContainerGetPointer(container,(void **)&redund);
2057: } else {
2058: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
2059: }
2060: if (nzlocal != redund->nzlocal) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong nzlocal");
2062: nsends = redund->nsends;
2063: nrecvs = redund->nrecvs;
2064: send_rank = redund->send_rank; recv_rank = send_rank + size;
2065: sbuf_nz = redund->sbuf_nz; rbuf_nz = sbuf_nz + nsends;
2066: sbuf_j = redund->sbuf_j;
2067: sbuf_a = redund->sbuf_a;
2068: rbuf_j = redund->rbuf_j;
2069: rbuf_a = redund->rbuf_a;
2070: }
2072: if (reuse == MAT_INITIAL_MATRIX){
2073: PetscMPIInt subrank,subsize;
2074: PetscInt nleftover,np_subcomm;
2075: /* get the destination processors' id send_rank, nsends and nrecvs */
2076: MPI_Comm_rank(subcomm,&subrank);
2077: MPI_Comm_size(subcomm,&subsize);
2078: PetscMalloc((2*size+1)*sizeof(PetscMPIInt),&send_rank);
2079: recv_rank = send_rank + size;
2080: np_subcomm = size/nsubcomm;
2081: nleftover = size - nsubcomm*np_subcomm;
2082: nsends = 0; nrecvs = 0;
2083: for (i=0; i<size; i++){ /* i=rank*/
2084: if (subrank == i/nsubcomm && rank != i){ /* my_subrank == other's subrank */
2085: send_rank[nsends] = i; nsends++;
2086: recv_rank[nrecvs++] = i;
2087: }
2088: }
2089: if (rank >= size - nleftover){/* this proc is a leftover processor */
2090: i = size-nleftover-1;
2091: j = 0;
2092: while (j < nsubcomm - nleftover){
2093: send_rank[nsends++] = i;
2094: i--; j++;
2095: }
2096: }
2098: if (nleftover && subsize == size/nsubcomm && subrank==subsize-1){ /* this proc recvs from leftover processors */
2099: for (i=0; i<nleftover; i++){
2100: recv_rank[nrecvs++] = size-nleftover+i;
2101: }
2102: }
2104: /* allocate sbuf_j, sbuf_a */
2105: i = nzlocal + rowrange[rank+1] - rowrange[rank] + 2;
2106: PetscMalloc(i*sizeof(PetscInt),&sbuf_j);
2107: PetscMalloc((nzlocal+1)*sizeof(PetscScalar),&sbuf_a);
2108: } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2109:
2110: /* copy mat's local entries into the buffers */
2111: if (reuse == MAT_INITIAL_MATRIX){
2112: rownz_max = 0;
2113: rptr = sbuf_j;
2114: cols = sbuf_j + rend-rstart + 1;
2115: vals = sbuf_a;
2116: rptr[0] = 0;
2117: for (i=0; i<rend-rstart; i++){
2118: row = i + rstart;
2119: nzA = a->i[i+1] - a->i[i]; nzB = b->i[i+1] - b->i[i];
2120: ncols = nzA + nzB;
2121: cworkA = a->j + a->i[i]; cworkB = b->j + b->i[i];
2122: aworkA = a->a + a->i[i]; aworkB = b->a + b->i[i];
2123: /* load the column indices for this row into cols */
2124: lwrite = 0;
2125: for (l=0; l<nzB; l++) {
2126: if ((ctmp = bmap[cworkB[l]]) < cstart){
2127: vals[lwrite] = aworkB[l];
2128: cols[lwrite++] = ctmp;
2129: }
2130: }
2131: for (l=0; l<nzA; l++){
2132: vals[lwrite] = aworkA[l];
2133: cols[lwrite++] = cstart + cworkA[l];
2134: }
2135: for (l=0; l<nzB; l++) {
2136: if ((ctmp = bmap[cworkB[l]]) >= cend){
2137: vals[lwrite] = aworkB[l];
2138: cols[lwrite++] = ctmp;
2139: }
2140: }
2141: vals += ncols;
2142: cols += ncols;
2143: rptr[i+1] = rptr[i] + ncols;
2144: if (rownz_max < ncols) rownz_max = ncols;
2145: }
2146: if (rptr[rend-rstart] != a->nz + b->nz) SETERRQ4(1, "rptr[%d] %d != %d + %d",rend-rstart,rptr[rend-rstart+1],a->nz,b->nz);
2147: } else { /* only copy matrix values into sbuf_a */
2148: rptr = sbuf_j;
2149: vals = sbuf_a;
2150: rptr[0] = 0;
2151: for (i=0; i<rend-rstart; i++){
2152: row = i + rstart;
2153: nzA = a->i[i+1] - a->i[i]; nzB = b->i[i+1] - b->i[i];
2154: ncols = nzA + nzB;
2155: cworkA = a->j + a->i[i]; cworkB = b->j + b->i[i];
2156: aworkA = a->a + a->i[i]; aworkB = b->a + b->i[i];
2157: lwrite = 0;
2158: for (l=0; l<nzB; l++) {
2159: if ((ctmp = bmap[cworkB[l]]) < cstart) vals[lwrite++] = aworkB[l];
2160: }
2161: for (l=0; l<nzA; l++) vals[lwrite++] = aworkA[l];
2162: for (l=0; l<nzB; l++) {
2163: if ((ctmp = bmap[cworkB[l]]) >= cend) vals[lwrite++] = aworkB[l];
2164: }
2165: vals += ncols;
2166: rptr[i+1] = rptr[i] + ncols;
2167: }
2168: } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2170: /* send nzlocal to others, and recv other's nzlocal */
2171: /*--------------------------------------------------*/
2172: if (reuse == MAT_INITIAL_MATRIX){
2173: PetscMalloc2(3*(nsends + nrecvs)+1,MPI_Request,&s_waits3,nsends+1,MPI_Status,&send_status);
2174: s_waits2 = s_waits3 + nsends;
2175: s_waits1 = s_waits2 + nsends;
2176: r_waits1 = s_waits1 + nsends;
2177: r_waits2 = r_waits1 + nrecvs;
2178: r_waits3 = r_waits2 + nrecvs;
2179: } else {
2180: PetscMalloc2(nsends + nrecvs +1,MPI_Request,&s_waits3,nsends+1,MPI_Status,&send_status);
2181: r_waits3 = s_waits3 + nsends;
2182: }
2184: PetscObjectGetNewTag((PetscObject)mat,&tag3);
2185: if (reuse == MAT_INITIAL_MATRIX){
2186: /* get new tags to keep the communication clean */
2187: PetscObjectGetNewTag((PetscObject)mat,&tag1);
2188: PetscObjectGetNewTag((PetscObject)mat,&tag2);
2189: PetscMalloc3(nsends+nrecvs+1,PetscInt,&sbuf_nz,nrecvs,PetscInt*,&rbuf_j,nrecvs,PetscScalar*,&rbuf_a);
2190: rbuf_nz = sbuf_nz + nsends;
2191:
2192: /* post receives of other's nzlocal */
2193: for (i=0; i<nrecvs; i++){
2194: MPI_Irecv(rbuf_nz+i,1,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,r_waits1+i);
2195: }
2196: /* send nzlocal to others */
2197: for (i=0; i<nsends; i++){
2198: sbuf_nz[i] = nzlocal;
2199: MPI_Isend(sbuf_nz+i,1,MPIU_INT,send_rank[i],tag1,comm,s_waits1+i);
2200: }
2201: /* wait on receives of nzlocal; allocate space for rbuf_j, rbuf_a */
2202: count = nrecvs;
2203: while (count) {
2204: MPI_Waitany(nrecvs,r_waits1,&imdex,&recv_status);
2205: recv_rank[imdex] = recv_status.MPI_SOURCE;
2206: /* allocate rbuf_a and rbuf_j; then post receives of rbuf_j */
2207: PetscMalloc((rbuf_nz[imdex]+1)*sizeof(PetscScalar),&rbuf_a[imdex]);
2209: i = rowrange[recv_status.MPI_SOURCE+1] - rowrange[recv_status.MPI_SOURCE]; /* number of expected mat->i */
2210: rbuf_nz[imdex] += i + 2;
2211: PetscMalloc(rbuf_nz[imdex]*sizeof(PetscInt),&rbuf_j[imdex]);
2212: MPI_Irecv(rbuf_j[imdex],rbuf_nz[imdex],MPIU_INT,recv_status.MPI_SOURCE,tag2,comm,r_waits2+imdex);
2213: count--;
2214: }
2215: /* wait on sends of nzlocal */
2216: if (nsends) {MPI_Waitall(nsends,s_waits1,send_status);}
2217: /* send mat->i,j to others, and recv from other's */
2218: /*------------------------------------------------*/
2219: for (i=0; i<nsends; i++){
2220: j = nzlocal + rowrange[rank+1] - rowrange[rank] + 1;
2221: MPI_Isend(sbuf_j,j,MPIU_INT,send_rank[i],tag2,comm,s_waits2+i);
2222: }
2223: /* wait on receives of mat->i,j */
2224: /*------------------------------*/
2225: count = nrecvs;
2226: while (count) {
2227: MPI_Waitany(nrecvs,r_waits2,&imdex,&recv_status);
2228: if (recv_rank[imdex] != recv_status.MPI_SOURCE) SETERRQ2(1, "recv_rank %d != MPI_SOURCE %d",recv_rank[imdex],recv_status.MPI_SOURCE);
2229: count--;
2230: }
2231: /* wait on sends of mat->i,j */
2232: /*---------------------------*/
2233: if (nsends) {
2234: MPI_Waitall(nsends,s_waits2,send_status);
2235: }
2236: } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2238: /* post receives, send and receive mat->a */
2239: /*----------------------------------------*/
2240: for (imdex=0; imdex<nrecvs; imdex++) {
2241: MPI_Irecv(rbuf_a[imdex],rbuf_nz[imdex],MPIU_SCALAR,recv_rank[imdex],tag3,comm,r_waits3+imdex);
2242: }
2243: for (i=0; i<nsends; i++){
2244: MPI_Isend(sbuf_a,nzlocal,MPIU_SCALAR,send_rank[i],tag3,comm,s_waits3+i);
2245: }
2246: count = nrecvs;
2247: while (count) {
2248: MPI_Waitany(nrecvs,r_waits3,&imdex,&recv_status);
2249: if (recv_rank[imdex] != recv_status.MPI_SOURCE) SETERRQ2(1, "recv_rank %d != MPI_SOURCE %d",recv_rank[imdex],recv_status.MPI_SOURCE);
2250: count--;
2251: }
2252: if (nsends) {
2253: MPI_Waitall(nsends,s_waits3,send_status);
2254: }
2256: PetscFree2(s_waits3,send_status);
2257:
2258: /* create redundant matrix */
2259: /*-------------------------*/
2260: if (reuse == MAT_INITIAL_MATRIX){
2261: /* compute rownz_max for preallocation */
2262: for (imdex=0; imdex<nrecvs; imdex++){
2263: j = rowrange[recv_rank[imdex]+1] - rowrange[recv_rank[imdex]];
2264: rptr = rbuf_j[imdex];
2265: for (i=0; i<j; i++){
2266: ncols = rptr[i+1] - rptr[i];
2267: if (rownz_max < ncols) rownz_max = ncols;
2268: }
2269: }
2270:
2271: MatCreate(subcomm,&C);
2272: MatSetSizes(C,mlocal_sub,mlocal_sub,PETSC_DECIDE,PETSC_DECIDE);
2273: MatSetFromOptions(C);
2274: MatSeqAIJSetPreallocation(C,rownz_max,PETSC_NULL);
2275: MatMPIAIJSetPreallocation(C,rownz_max,PETSC_NULL,rownz_max,PETSC_NULL);
2276: } else {
2277: C = *matredundant;
2278: }
2280: /* insert local matrix entries */
2281: rptr = sbuf_j;
2282: cols = sbuf_j + rend-rstart + 1;
2283: vals = sbuf_a;
2284: for (i=0; i<rend-rstart; i++){
2285: row = i + rstart;
2286: ncols = rptr[i+1] - rptr[i];
2287: MatSetValues(C,1,&row,ncols,cols,vals,INSERT_VALUES);
2288: vals += ncols;
2289: cols += ncols;
2290: }
2291: /* insert received matrix entries */
2292: for (imdex=0; imdex<nrecvs; imdex++){
2293: rstart = rowrange[recv_rank[imdex]];
2294: rend = rowrange[recv_rank[imdex]+1];
2295: rptr = rbuf_j[imdex];
2296: cols = rbuf_j[imdex] + rend-rstart + 1;
2297: vals = rbuf_a[imdex];
2298: for (i=0; i<rend-rstart; i++){
2299: row = i + rstart;
2300: ncols = rptr[i+1] - rptr[i];
2301: MatSetValues(C,1,&row,ncols,cols,vals,INSERT_VALUES);
2302: vals += ncols;
2303: cols += ncols;
2304: }
2305: }
2306: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2307: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2308: MatGetSize(C,&M,&N);
2309: if (M != mat->rmap->N || N != mat->cmap->N) SETERRQ2(PETSC_ERR_ARG_INCOMP,"redundant mat size %d != input mat size %d",M,mat->rmap->N);
2310: if (reuse == MAT_INITIAL_MATRIX){
2311: PetscContainer container;
2312: *matredundant = C;
2313: /* create a supporting struct and attach it to C for reuse */
2314: PetscNewLog(C,Mat_Redundant,&redund);
2315: PetscContainerCreate(PETSC_COMM_SELF,&container);
2316: PetscContainerSetPointer(container,redund);
2317: PetscObjectCompose((PetscObject)C,"Mat_Redundant",(PetscObject)container);
2318: PetscContainerSetUserDestroy(container,PetscContainerDestroy_MatRedundant);
2319:
2320: redund->nzlocal = nzlocal;
2321: redund->nsends = nsends;
2322: redund->nrecvs = nrecvs;
2323: redund->send_rank = send_rank;
2324: redund->sbuf_nz = sbuf_nz;
2325: redund->sbuf_j = sbuf_j;
2326: redund->sbuf_a = sbuf_a;
2327: redund->rbuf_j = rbuf_j;
2328: redund->rbuf_a = rbuf_a;
2330: redund->MatDestroy = C->ops->destroy;
2331: C->ops->destroy = MatDestroy_MatRedundant;
2332: }
2333: return(0);
2334: }
2338: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2339: {
2340: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2342: PetscInt i,*idxb = 0;
2343: PetscScalar *va,*vb;
2344: Vec vtmp;
2347: MatGetRowMaxAbs(a->A,v,idx);
2348: VecGetArray(v,&va);
2349: if (idx) {
2350: for (i=0; i<A->rmap->n; i++) {
2351: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2352: }
2353: }
2355: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2356: if (idx) {
2357: PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);
2358: }
2359: MatGetRowMaxAbs(a->B,vtmp,idxb);
2360: VecGetArray(vtmp,&vb);
2362: for (i=0; i<A->rmap->n; i++){
2363: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2364: va[i] = vb[i];
2365: if (idx) idx[i] = a->garray[idxb[i]];
2366: }
2367: }
2369: VecRestoreArray(v,&va);
2370: VecRestoreArray(vtmp,&vb);
2371: if (idxb) {
2372: PetscFree(idxb);
2373: }
2374: VecDestroy(vtmp);
2375: return(0);
2376: }
2380: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2381: {
2382: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2384: PetscInt i,*idxb = 0;
2385: PetscScalar *va,*vb;
2386: Vec vtmp;
2389: MatGetRowMinAbs(a->A,v,idx);
2390: VecGetArray(v,&va);
2391: if (idx) {
2392: for (i=0; i<A->cmap->n; i++) {
2393: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2394: }
2395: }
2397: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2398: if (idx) {
2399: PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);
2400: }
2401: MatGetRowMinAbs(a->B,vtmp,idxb);
2402: VecGetArray(vtmp,&vb);
2404: for (i=0; i<A->rmap->n; i++){
2405: if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2406: va[i] = vb[i];
2407: if (idx) idx[i] = a->garray[idxb[i]];
2408: }
2409: }
2411: VecRestoreArray(v,&va);
2412: VecRestoreArray(vtmp,&vb);
2413: if (idxb) {
2414: PetscFree(idxb);
2415: }
2416: VecDestroy(vtmp);
2417: return(0);
2418: }
2422: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2423: {
2424: Mat_MPIAIJ *mat = (Mat_MPIAIJ *) A->data;
2425: PetscInt n = A->rmap->n;
2426: PetscInt cstart = A->cmap->rstart;
2427: PetscInt *cmap = mat->garray;
2428: PetscInt *diagIdx, *offdiagIdx;
2429: Vec diagV, offdiagV;
2430: PetscScalar *a, *diagA, *offdiagA;
2431: PetscInt r;
2435: PetscMalloc2(n,PetscInt,&diagIdx,n,PetscInt,&offdiagIdx);
2436: VecCreateSeq(((PetscObject)A)->comm, n, &diagV);
2437: VecCreateSeq(((PetscObject)A)->comm, n, &offdiagV);
2438: MatGetRowMin(mat->A, diagV, diagIdx);
2439: MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2440: VecGetArray(v, &a);
2441: VecGetArray(diagV, &diagA);
2442: VecGetArray(offdiagV, &offdiagA);
2443: for(r = 0; r < n; ++r) {
2444: if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2445: a[r] = diagA[r];
2446: idx[r] = cstart + diagIdx[r];
2447: } else {
2448: a[r] = offdiagA[r];
2449: idx[r] = cmap[offdiagIdx[r]];
2450: }
2451: }
2452: VecRestoreArray(v, &a);
2453: VecRestoreArray(diagV, &diagA);
2454: VecRestoreArray(offdiagV, &offdiagA);
2455: VecDestroy(diagV);
2456: VecDestroy(offdiagV);
2457: PetscFree2(diagIdx, offdiagIdx);
2458: return(0);
2459: }
2463: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2464: {
2465: Mat_MPIAIJ *mat = (Mat_MPIAIJ *) A->data;
2466: PetscInt n = A->rmap->n;
2467: PetscInt cstart = A->cmap->rstart;
2468: PetscInt *cmap = mat->garray;
2469: PetscInt *diagIdx, *offdiagIdx;
2470: Vec diagV, offdiagV;
2471: PetscScalar *a, *diagA, *offdiagA;
2472: PetscInt r;
2476: PetscMalloc2(n,PetscInt,&diagIdx,n,PetscInt,&offdiagIdx);
2477: VecCreateSeq(((PetscObject)A)->comm, n, &diagV);
2478: VecCreateSeq(((PetscObject)A)->comm, n, &offdiagV);
2479: MatGetRowMax(mat->A, diagV, diagIdx);
2480: MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2481: VecGetArray(v, &a);
2482: VecGetArray(diagV, &diagA);
2483: VecGetArray(offdiagV, &offdiagA);
2484: for(r = 0; r < n; ++r) {
2485: if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2486: a[r] = diagA[r];
2487: idx[r] = cstart + diagIdx[r];
2488: } else {
2489: a[r] = offdiagA[r];
2490: idx[r] = cmap[offdiagIdx[r]];
2491: }
2492: }
2493: VecRestoreArray(v, &a);
2494: VecRestoreArray(diagV, &diagA);
2495: VecRestoreArray(offdiagV, &offdiagA);
2496: VecDestroy(diagV);
2497: VecDestroy(offdiagV);
2498: PetscFree2(diagIdx, offdiagIdx);
2499: return(0);
2500: }
2504: PetscErrorCode MatGetSeqNonzerostructure_MPIAIJ(Mat mat,Mat *newmat[])
2505: {
2509: MatGetSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,newmat);
2510: return(0);
2511: }
2513: /* -------------------------------------------------------------------*/
2514: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2515: MatGetRow_MPIAIJ,
2516: MatRestoreRow_MPIAIJ,
2517: MatMult_MPIAIJ,
2518: /* 4*/ MatMultAdd_MPIAIJ,
2519: MatMultTranspose_MPIAIJ,
2520: MatMultTransposeAdd_MPIAIJ,
2521: #ifdef PETSC_HAVE_PBGL
2522: MatSolve_MPIAIJ,
2523: #else
2524: 0,
2525: #endif
2526: 0,
2527: 0,
2528: /*10*/ 0,
2529: 0,
2530: 0,
2531: MatRelax_MPIAIJ,
2532: MatTranspose_MPIAIJ,
2533: /*15*/ MatGetInfo_MPIAIJ,
2534: MatEqual_MPIAIJ,
2535: MatGetDiagonal_MPIAIJ,
2536: MatDiagonalScale_MPIAIJ,
2537: MatNorm_MPIAIJ,
2538: /*20*/ MatAssemblyBegin_MPIAIJ,
2539: MatAssemblyEnd_MPIAIJ,
2540: 0,
2541: MatSetOption_MPIAIJ,
2542: MatZeroEntries_MPIAIJ,
2543: /*25*/ MatZeroRows_MPIAIJ,
2544: 0,
2545: #ifdef PETSC_HAVE_PBGL
2546: 0,
2547: #else
2548: 0,
2549: #endif
2550: 0,
2551: 0,
2552: /*30*/ MatSetUpPreallocation_MPIAIJ,
2553: #ifdef PETSC_HAVE_PBGL
2554: 0,
2555: #else
2556: 0,
2557: #endif
2558: 0,
2559: 0,
2560: 0,
2561: /*35*/ MatDuplicate_MPIAIJ,
2562: 0,
2563: 0,
2564: 0,
2565: 0,
2566: /*40*/ MatAXPY_MPIAIJ,
2567: MatGetSubMatrices_MPIAIJ,
2568: MatIncreaseOverlap_MPIAIJ,
2569: MatGetValues_MPIAIJ,
2570: MatCopy_MPIAIJ,
2571: /*45*/ MatGetRowMax_MPIAIJ,
2572: MatScale_MPIAIJ,
2573: 0,
2574: 0,
2575: 0,
2576: /*50*/ MatSetBlockSize_MPIAIJ,
2577: 0,
2578: 0,
2579: 0,
2580: 0,
2581: /*55*/ MatFDColoringCreate_MPIAIJ,
2582: 0,
2583: MatSetUnfactored_MPIAIJ,
2584: MatPermute_MPIAIJ,
2585: 0,
2586: /*60*/ MatGetSubMatrix_MPIAIJ,
2587: MatDestroy_MPIAIJ,
2588: MatView_MPIAIJ,
2589: 0,
2590: 0,
2591: /*65*/ 0,
2592: 0,
2593: 0,
2594: 0,
2595: 0,
2596: /*70*/ MatGetRowMaxAbs_MPIAIJ,
2597: MatGetRowMinAbs_MPIAIJ,
2598: 0,
2599: MatSetColoring_MPIAIJ,
2600: #if defined(PETSC_HAVE_ADIC)
2601: MatSetValuesAdic_MPIAIJ,
2602: #else
2603: 0,
2604: #endif
2605: MatSetValuesAdifor_MPIAIJ,
2606: /*75*/ 0,
2607: 0,
2608: 0,
2609: 0,
2610: 0,
2611: /*80*/ 0,
2612: 0,
2613: 0,
2614: /*84*/ MatLoad_MPIAIJ,
2615: 0,
2616: 0,
2617: 0,
2618: 0,
2619: 0,
2620: /*90*/ MatMatMult_MPIAIJ_MPIAIJ,
2621: MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2622: MatMatMultNumeric_MPIAIJ_MPIAIJ,
2623: MatPtAP_Basic,
2624: MatPtAPSymbolic_MPIAIJ,
2625: /*95*/ MatPtAPNumeric_MPIAIJ,
2626: 0,
2627: 0,
2628: 0,
2629: 0,
2630: /*100*/0,
2631: MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2632: MatPtAPNumeric_MPIAIJ_MPIAIJ,
2633: MatConjugate_MPIAIJ,
2634: 0,
2635: /*105*/MatSetValuesRow_MPIAIJ,
2636: MatRealPart_MPIAIJ,
2637: MatImaginaryPart_MPIAIJ,
2638: 0,
2639: 0,
2640: /*110*/0,
2641: MatGetRedundantMatrix_MPIAIJ,
2642: MatGetRowMin_MPIAIJ,
2643: 0,
2644: 0,
2645: /*115*/MatGetSeqNonzerostructure_MPIAIJ};
2647: /* ----------------------------------------------------------------------------------------*/
2652: PetscErrorCode MatStoreValues_MPIAIJ(Mat mat)
2653: {
2654: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
2658: MatStoreValues(aij->A);
2659: MatStoreValues(aij->B);
2660: return(0);
2661: }
2667: PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat)
2668: {
2669: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
2673: MatRetrieveValues(aij->A);
2674: MatRetrieveValues(aij->B);
2675: return(0);
2676: }
2679: #include petscpc.h
2683: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2684: {
2685: Mat_MPIAIJ *b;
2687: PetscInt i;
2690: if (d_nz == PETSC_DEFAULT || d_nz == PETSC_DECIDE) d_nz = 5;
2691: if (o_nz == PETSC_DEFAULT || o_nz == PETSC_DECIDE) o_nz = 2;
2692: if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
2693: if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
2695: PetscMapSetBlockSize(B->rmap,1);
2696: PetscMapSetBlockSize(B->cmap,1);
2697: PetscMapSetUp(B->rmap);
2698: PetscMapSetUp(B->cmap);
2699: if (d_nnz) {
2700: for (i=0; i<B->rmap->n; i++) {
2701: if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]);
2702: }
2703: }
2704: if (o_nnz) {
2705: for (i=0; i<B->rmap->n; i++) {
2706: if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]);
2707: }
2708: }
2709: b = (Mat_MPIAIJ*)B->data;
2711: if (!B->preallocated) {
2712: /* Explicitly create 2 MATSEQAIJ matrices. */
2713: MatCreate(PETSC_COMM_SELF,&b->A);
2714: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2715: MatSetType(b->A,MATSEQAIJ);
2716: PetscLogObjectParent(B,b->A);
2717: MatCreate(PETSC_COMM_SELF,&b->B);
2718: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
2719: MatSetType(b->B,MATSEQAIJ);
2720: PetscLogObjectParent(B,b->B);
2721: }
2723: MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2724: MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2725: B->preallocated = PETSC_TRUE;
2726: return(0);
2727: }
2732: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2733: {
2734: Mat mat;
2735: Mat_MPIAIJ *a,*oldmat = (Mat_MPIAIJ*)matin->data;
2739: *newmat = 0;
2740: MatCreate(((PetscObject)matin)->comm,&mat);
2741: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2742: MatSetType(mat,((PetscObject)matin)->type_name);
2743: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
2744: a = (Mat_MPIAIJ*)mat->data;
2745:
2746: mat->factor = matin->factor;
2747: mat->rmap->bs = matin->rmap->bs;
2748: mat->assembled = PETSC_TRUE;
2749: mat->insertmode = NOT_SET_VALUES;
2750: mat->preallocated = PETSC_TRUE;
2752: a->size = oldmat->size;
2753: a->rank = oldmat->rank;
2754: a->donotstash = oldmat->donotstash;
2755: a->roworiented = oldmat->roworiented;
2756: a->rowindices = 0;
2757: a->rowvalues = 0;
2758: a->getrowactive = PETSC_FALSE;
2760: PetscMapCopy(((PetscObject)mat)->comm,matin->rmap,mat->rmap);
2761: PetscMapCopy(((PetscObject)mat)->comm,matin->cmap,mat->cmap);
2763: MatStashCreate_Private(((PetscObject)matin)->comm,1,&mat->stash);
2764: if (oldmat->colmap) {
2765: #if defined (PETSC_USE_CTABLE)
2766: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2767: #else
2768: PetscMalloc((mat->cmap->N)*sizeof(PetscInt),&a->colmap);
2769: PetscLogObjectMemory(mat,(mat->cmap->N)*sizeof(PetscInt));
2770: PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));
2771: #endif
2772: } else a->colmap = 0;
2773: if (oldmat->garray) {
2774: PetscInt len;
2775: len = oldmat->B->cmap->n;
2776: PetscMalloc((len+1)*sizeof(PetscInt),&a->garray);
2777: PetscLogObjectMemory(mat,len*sizeof(PetscInt));
2778: if (len) { PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt)); }
2779: } else a->garray = 0;
2780:
2781: VecDuplicate(oldmat->lvec,&a->lvec);
2782: PetscLogObjectParent(mat,a->lvec);
2783: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2784: PetscLogObjectParent(mat,a->Mvctx);
2785: MatDuplicate(oldmat->A,cpvalues,&a->A);
2786: PetscLogObjectParent(mat,a->A);
2787: MatDuplicate(oldmat->B,cpvalues,&a->B);
2788: PetscLogObjectParent(mat,a->B);
2789: PetscFListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2790: *newmat = mat;
2791: return(0);
2792: }
2794: #include petscsys.h
2798: PetscErrorCode MatLoad_MPIAIJ(PetscViewer viewer, const MatType type,Mat *newmat)
2799: {
2800: Mat A;
2801: PetscScalar *vals,*svals;
2802: MPI_Comm comm = ((PetscObject)viewer)->comm;
2803: MPI_Status status;
2805: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag,mpicnt,mpimaxnz;
2806: PetscInt i,nz,j,rstart,rend,mmax,maxnz = 0;
2807: PetscInt header[4],*rowlengths = 0,M,N,m,*cols;
2808: PetscInt *ourlens = PETSC_NULL,*procsnz = PETSC_NULL,*offlens = PETSC_NULL,jj,*mycols,*smycols;
2809: PetscInt cend,cstart,n,*rowners;
2810: int fd;
2813: MPI_Comm_size(comm,&size);
2814: MPI_Comm_rank(comm,&rank);
2815: if (!rank) {
2816: PetscViewerBinaryGetDescriptor(viewer,&fd);
2817: PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2818: if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2819: }
2821: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2822: M = header[1]; N = header[2];
2823: /* determine ownership of all rows */
2824: m = M/size + ((M % size) > rank);
2825: PetscMalloc((size+1)*sizeof(PetscInt),&rowners);
2826: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
2828: /* First process needs enough room for process with most rows */
2829: if (!rank) {
2830: mmax = rowners[1];
2831: for (i=2; i<size; i++) {
2832: mmax = PetscMax(mmax,rowners[i]);
2833: }
2834: } else mmax = m;
2836: rowners[0] = 0;
2837: for (i=2; i<=size; i++) {
2838: rowners[i] += rowners[i-1];
2839: }
2840: rstart = rowners[rank];
2841: rend = rowners[rank+1];
2843: /* distribute row lengths to all processors */
2844: PetscMalloc2(mmax,PetscInt,&ourlens,mmax,PetscInt,&offlens);
2845: if (!rank) {
2846: PetscBinaryRead(fd,ourlens,m,PETSC_INT);
2847: PetscMalloc(m*sizeof(PetscInt),&rowlengths);
2848: PetscMalloc(size*sizeof(PetscInt),&procsnz);
2849: PetscMemzero(procsnz,size*sizeof(PetscInt));
2850: for (j=0; j<m; j++) {
2851: procsnz[0] += ourlens[j];
2852: }
2853: for (i=1; i<size; i++) {
2854: PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);
2855: /* calculate the number of nonzeros on each processor */
2856: for (j=0; j<rowners[i+1]-rowners[i]; j++) {
2857: procsnz[i] += rowlengths[j];
2858: }
2859: mpicnt = PetscMPIIntCast(rowners[i+1]-rowners[i]);
2860: MPI_Send(rowlengths,mpicnt,MPIU_INT,i,tag,comm);
2861: }
2862: PetscFree(rowlengths);
2863: } else {
2864: mpicnt = PetscMPIIntCast(m);
2865: MPI_Recv(ourlens,mpicnt,MPIU_INT,0,tag,comm,&status);
2866: }
2868: if (!rank) {
2869: /* determine max buffer needed and allocate it */
2870: maxnz = 0;
2871: for (i=0; i<size; i++) {
2872: maxnz = PetscMax(maxnz,procsnz[i]);
2873: }
2874: PetscMalloc(maxnz*sizeof(PetscInt),&cols);
2876: /* read in my part of the matrix column indices */
2877: nz = procsnz[0];
2878: PetscMalloc(nz*sizeof(PetscInt),&mycols);
2879: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2881: /* read in every one elses and ship off */
2882: for (i=1; i<size; i++) {
2883: nz = procsnz[i];
2884: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2885: mpicnt = PetscMPIIntCast(nz);
2886: MPI_Send(cols,mpicnt,MPIU_INT,i,tag,comm);
2887: }
2888: PetscFree(cols);
2889: } else {
2890: /* determine buffer space needed for message */
2891: nz = 0;
2892: for (i=0; i<m; i++) {
2893: nz += ourlens[i];
2894: }
2895: PetscMalloc(nz*sizeof(PetscInt),&mycols);
2897: /* receive message of column indices*/
2898: mpicnt = PetscMPIIntCast(nz);
2899: MPI_Recv(mycols,mpicnt,MPIU_INT,0,tag,comm,&status);
2900: MPI_Get_count(&status,MPIU_INT,&mpimaxnz);
2901: if (mpimaxnz == MPI_UNDEFINED) {SETERRQ1(PETSC_ERR_LIB,"MPI_Get_count() returned MPI_UNDEFINED, expected %d",mpicnt);}
2902: else if (mpimaxnz < 0) {SETERRQ2(PETSC_ERR_LIB,"MPI_Get_count() returned impossible negative value %d, expected %d",mpimaxnz,mpicnt);}
2903: else if (mpimaxnz != mpicnt) {SETERRQ2(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file: expected %d received %d",mpicnt,mpimaxnz);}
2904: }
2906: /* determine column ownership if matrix is not square */
2907: if (N != M) {
2908: n = N/size + ((N % size) > rank);
2909: MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);
2910: cstart = cend - n;
2911: } else {
2912: cstart = rstart;
2913: cend = rend;
2914: n = cend - cstart;
2915: }
2917: /* loop over local rows, determining number of off diagonal entries */
2918: PetscMemzero(offlens,m*sizeof(PetscInt));
2919: jj = 0;
2920: for (i=0; i<m; i++) {
2921: for (j=0; j<ourlens[i]; j++) {
2922: if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
2923: jj++;
2924: }
2925: }
2927: /* create our matrix */
2928: for (i=0; i<m; i++) {
2929: ourlens[i] -= offlens[i];
2930: }
2931: MatCreate(comm,&A);
2932: MatSetSizes(A,m,n,M,N);
2933: MatSetType(A,type);
2934: MatMPIAIJSetPreallocation(A,0,ourlens,0,offlens);
2936: for (i=0; i<m; i++) {
2937: ourlens[i] += offlens[i];
2938: }
2940: if (!rank) {
2941: PetscMalloc((maxnz+1)*sizeof(PetscScalar),&vals);
2943: /* read in my part of the matrix numerical values */
2944: nz = procsnz[0];
2945: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2946:
2947: /* insert into matrix */
2948: jj = rstart;
2949: smycols = mycols;
2950: svals = vals;
2951: for (i=0; i<m; i++) {
2952: MatSetValues_MPIAIJ(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
2953: smycols += ourlens[i];
2954: svals += ourlens[i];
2955: jj++;
2956: }
2958: /* read in other processors and ship out */
2959: for (i=1; i<size; i++) {
2960: nz = procsnz[i];
2961: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2962: mpicnt = PetscMPIIntCast(nz);
2963: MPI_Send(vals,mpicnt,MPIU_SCALAR,i,((PetscObject)A)->tag,comm);
2964: }
2965: PetscFree(procsnz);
2966: } else {
2967: /* receive numeric values */
2968: PetscMalloc((nz+1)*sizeof(PetscScalar),&vals);
2970: /* receive message of values*/
2971: mpicnt = PetscMPIIntCast(nz);
2972: MPI_Recv(vals,mpicnt,MPIU_SCALAR,0,((PetscObject)A)->tag,comm,&status);
2973: MPI_Get_count(&status,MPIU_SCALAR,&mpimaxnz);
2974: if (mpimaxnz == MPI_UNDEFINED) {SETERRQ1(PETSC_ERR_LIB,"MPI_Get_count() returned MPI_UNDEFINED, expected %d",mpicnt);}
2975: else if (mpimaxnz < 0) {SETERRQ2(PETSC_ERR_LIB,"MPI_Get_count() returned impossible negative value %d, expected %d",mpimaxnz,mpicnt);}
2976: else if (mpimaxnz != mpicnt) {SETERRQ2(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file: expected %d received %d",mpicnt,mpimaxnz);}
2978: /* insert into matrix */
2979: jj = rstart;
2980: smycols = mycols;
2981: svals = vals;
2982: for (i=0; i<m; i++) {
2983: MatSetValues_MPIAIJ(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
2984: smycols += ourlens[i];
2985: svals += ourlens[i];
2986: jj++;
2987: }
2988: }
2989: PetscFree2(ourlens,offlens);
2990: PetscFree(vals);
2991: PetscFree(mycols);
2992: PetscFree(rowners);
2994: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
2995: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
2996: *newmat = A;
2997: return(0);
2998: }
3002: /*
3003: Not great since it makes two copies of the submatrix, first an SeqAIJ
3004: in local and then by concatenating the local matrices the end result.
3005: Writing it directly would be much like MatGetSubMatrices_MPIAIJ()
3006: */
3007: PetscErrorCode MatGetSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3008: {
3010: PetscMPIInt rank,size;
3011: PetscInt i,m,n,rstart,row,rend,nz,*cwork,j;
3012: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3013: Mat *local,M,Mreuse;
3014: MatScalar *vwork,*aa;
3015: MPI_Comm comm = ((PetscObject)mat)->comm;
3016: Mat_SeqAIJ *aij;
3020: MPI_Comm_rank(comm,&rank);
3021: MPI_Comm_size(comm,&size);
3023: if (call == MAT_REUSE_MATRIX) {
3024: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject *)&Mreuse);
3025: if (!Mreuse) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3026: local = &Mreuse;
3027: MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,&local);
3028: } else {
3029: MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&local);
3030: Mreuse = *local;
3031: PetscFree(local);
3032: }
3034: /*
3035: m - number of local rows
3036: n - number of columns (same on all processors)
3037: rstart - first row in new global matrix generated
3038: */
3039: MatGetSize(Mreuse,&m,&n);
3040: if (call == MAT_INITIAL_MATRIX) {
3041: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3042: ii = aij->i;
3043: jj = aij->j;
3045: /*
3046: Determine the number of non-zeros in the diagonal and off-diagonal
3047: portions of the matrix in order to do correct preallocation
3048: */
3050: /* first get start and end of "diagonal" columns */
3051: if (csize == PETSC_DECIDE) {
3052: ISGetSize(isrow,&mglobal);
3053: if (mglobal == n) { /* square matrix */
3054: nlocal = m;
3055: } else {
3056: nlocal = n/size + ((n % size) > rank);
3057: }
3058: } else {
3059: nlocal = csize;
3060: }
3061: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3062: rstart = rend - nlocal;
3063: if (rank == size - 1 && rend != n) {
3064: SETERRQ2(PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3065: }
3067: /* next, compute all the lengths */
3068: PetscMalloc((2*m+1)*sizeof(PetscInt),&dlens);
3069: olens = dlens + m;
3070: for (i=0; i<m; i++) {
3071: jend = ii[i+1] - ii[i];
3072: olen = 0;
3073: dlen = 0;
3074: for (j=0; j<jend; j++) {
3075: if (*jj < rstart || *jj >= rend) olen++;
3076: else dlen++;
3077: jj++;
3078: }
3079: olens[i] = olen;
3080: dlens[i] = dlen;
3081: }
3082: MatCreate(comm,&M);
3083: MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3084: MatSetType(M,((PetscObject)mat)->type_name);
3085: MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3086: PetscFree(dlens);
3087: } else {
3088: PetscInt ml,nl;
3090: M = *newmat;
3091: MatGetLocalSize(M,&ml,&nl);
3092: if (ml != m) SETERRQ(PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3093: MatZeroEntries(M);
3094: /*
3095: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3096: rather than the slower MatSetValues().
3097: */
3098: M->was_assembled = PETSC_TRUE;
3099: M->assembled = PETSC_FALSE;
3100: }
3101: MatGetOwnershipRange(M,&rstart,&rend);
3102: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3103: ii = aij->i;
3104: jj = aij->j;
3105: aa = aij->a;
3106: for (i=0; i<m; i++) {
3107: row = rstart + i;
3108: nz = ii[i+1] - ii[i];
3109: cwork = jj; jj += nz;
3110: vwork = aa; aa += nz;
3111: MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3112: }
3114: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3115: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3116: *newmat = M;
3118: /* save submatrix used in processor for next request */
3119: if (call == MAT_INITIAL_MATRIX) {
3120: PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3121: PetscObjectDereference((PetscObject)Mreuse);
3122: }
3124: return(0);
3125: }
3130: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3131: {
3132: PetscInt m,cstart, cend,j,nnz,i,d;
3133: PetscInt *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3134: const PetscInt *JJ;
3135: PetscScalar *values;
3139: if (Ii[0]) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);
3141: PetscMapSetBlockSize(B->rmap,1);
3142: PetscMapSetBlockSize(B->cmap,1);
3143: PetscMapSetUp(B->rmap);
3144: PetscMapSetUp(B->cmap);
3145: m = B->rmap->n;
3146: cstart = B->cmap->rstart;
3147: cend = B->cmap->rend;
3148: rstart = B->rmap->rstart;
3150: PetscMalloc((2*m+1)*sizeof(PetscInt),&d_nnz);
3151: o_nnz = d_nnz + m;
3153: #if defined(PETSC_USE_DEBUGGING)
3154: for (i=0; i<m; i++) {
3155: nnz = Ii[i+1]- Ii[i];
3156: JJ = J + Ii[i];
3157: if (nnz < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3158: if (nnz && (JJ[0] < 0)) SETERRRQ1(PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,j);
3159: if (nnz && (JJ[nnz-1] >= B->cmap->N) SETERRRQ3(PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3160: for (j=1; j<nnz; j++) {
3161: if (JJ[i] <= JJ[i-1]) SETERRRQ(PETSC_ERR_ARG_WRONGSTATE,"Row %D has unsorted column index at %D location in column indices",i,j);
3162: }
3163: }
3164: #endif
3166: for (i=0; i<m; i++) {
3167: nnz = Ii[i+1]- Ii[i];
3168: JJ = J + Ii[i];
3169: nnz_max = PetscMax(nnz_max,nnz);
3170: for (j=0; j<nnz; j++) {
3171: if (*JJ >= cstart) break;
3172: JJ++;
3173: }
3174: d = 0;
3175: for (; j<nnz; j++) {
3176: if (*JJ++ >= cend) break;
3177: d++;
3178: }
3179: d_nnz[i] = d;
3180: o_nnz[i] = nnz - d;
3181: }
3182: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3183: PetscFree(d_nnz);
3185: if (v) values = (PetscScalar*)v;
3186: else {
3187: PetscMalloc((nnz_max+1)*sizeof(PetscScalar),&values);
3188: PetscMemzero(values,nnz_max*sizeof(PetscScalar));
3189: }
3191: for (i=0; i<m; i++) {
3192: ii = i + rstart;
3193: nnz = Ii[i+1]- Ii[i];
3194: MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);
3195: }
3196: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3197: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3199: if (!v) {
3200: PetscFree(values);
3201: }
3202: return(0);
3203: }
3208: /*@
3209: MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3210: (the default parallel PETSc format).
3212: Collective on MPI_Comm
3214: Input Parameters:
3215: + B - the matrix
3216: . i - the indices into j for the start of each local row (starts with zero)
3217: . j - the column indices for each local row (starts with zero) these must be sorted for each row
3218: - v - optional values in the matrix
3220: Level: developer
3222: Notes:
3223: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3224: thus you CANNOT change the matrix entries by changing the values of a[] after you have
3225: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3227: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3229: The format which is used for the sparse matrix input, is equivalent to a
3230: row-major ordering.. i.e for the following matrix, the input data expected is
3231: as shown:
3233: 1 0 0
3234: 2 0 3 P0
3235: -------
3236: 4 5 6 P1
3238: Process0 [P0]: rows_owned=[0,1]
3239: i = {0,1,3} [size = nrow+1 = 2+1]
3240: j = {0,0,2} [size = nz = 6]
3241: v = {1,2,3} [size = nz = 6]
3243: Process1 [P1]: rows_owned=[2]
3244: i = {0,3} [size = nrow+1 = 1+1]
3245: j = {0,1,2} [size = nz = 6]
3246: v = {4,5,6} [size = nz = 6]
3248: The column indices for each row MUST be sorted.
3250: .keywords: matrix, aij, compressed row, sparse, parallel
3252: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateMPIAIJ(), MPIAIJ,
3253: MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3254: @*/
3255: PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3256: {
3257: PetscErrorCode ierr,(*f)(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]);
3260: PetscObjectQueryFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",(void (**)(void))&f);
3261: if (f) {
3262: (*f)(B,i,j,v);
3263: }
3264: return(0);
3265: }
3269: /*@C
3270: MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3271: (the default parallel PETSc format). For good matrix assembly performance
3272: the user should preallocate the matrix storage by setting the parameters
3273: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3274: performance can be increased by more than a factor of 50.
3276: Collective on MPI_Comm
3278: Input Parameters:
3279: + A - the matrix
3280: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
3281: (same value is used for all local rows)
3282: . d_nnz - array containing the number of nonzeros in the various rows of the
3283: DIAGONAL portion of the local submatrix (possibly different for each row)
3284: or PETSC_NULL, if d_nz is used to specify the nonzero structure.
3285: The size of this array is equal to the number of local rows, i.e 'm'.
3286: You must leave room for the diagonal entry even if it is zero.
3287: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
3288: submatrix (same value is used for all local rows).
3289: - o_nnz - array containing the number of nonzeros in the various rows of the
3290: OFF-DIAGONAL portion of the local submatrix (possibly different for
3291: each row) or PETSC_NULL, if o_nz is used to specify the nonzero
3292: structure. The size of this array is equal to the number
3293: of local rows, i.e 'm'.
3295: If the *_nnz parameter is given then the *_nz parameter is ignored
3297: The AIJ format (also called the Yale sparse matrix format or
3298: compressed row storage (CSR)), is fully compatible with standard Fortran 77
3299: storage. The stored row and column indices begin with zero. See the users manual for details.
3301: The parallel matrix is partitioned such that the first m0 rows belong to
3302: process 0, the next m1 rows belong to process 1, the next m2 rows belong
3303: to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
3305: The DIAGONAL portion of the local submatrix of a processor can be defined
3306: as the submatrix which is obtained by extraction the part corresponding
3307: to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
3308: first row that belongs to the processor, and r2 is the last row belonging
3309: to the this processor. This is a square mxm matrix. The remaining portion
3310: of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
3312: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3314: You can call MatGetInfo() to get information on how effective the preallocation was;
3315: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3316: You can also run with the option -info and look for messages with the string
3317: malloc in them to see if additional memory allocation was needed.
3319: Example usage:
3320:
3321: Consider the following 8x8 matrix with 34 non-zero values, that is
3322: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3323: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3324: as follows:
3326: .vb
3327: 1 2 0 | 0 3 0 | 0 4
3328: Proc0 0 5 6 | 7 0 0 | 8 0
3329: 9 0 10 | 11 0 0 | 12 0
3330: -------------------------------------
3331: 13 0 14 | 15 16 17 | 0 0
3332: Proc1 0 18 0 | 19 20 21 | 0 0
3333: 0 0 0 | 22 23 0 | 24 0
3334: -------------------------------------
3335: Proc2 25 26 27 | 0 0 28 | 29 0
3336: 30 0 0 | 31 32 33 | 0 34
3337: .ve
3339: This can be represented as a collection of submatrices as:
3341: .vb
3342: A B C
3343: D E F
3344: G H I
3345: .ve
3347: Where the submatrices A,B,C are owned by proc0, D,E,F are
3348: owned by proc1, G,H,I are owned by proc2.
3350: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3351: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3352: The 'M','N' parameters are 8,8, and have the same values on all procs.
3354: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3355: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3356: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3357: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3358: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3359: matrix, ans [DF] as another SeqAIJ matrix.
3361: When d_nz, o_nz parameters are specified, d_nz storage elements are
3362: allocated for every row of the local diagonal submatrix, and o_nz
3363: storage locations are allocated for every row of the OFF-DIAGONAL submat.
3364: One way to choose d_nz and o_nz is to use the max nonzerors per local
3365: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3366: In this case, the values of d_nz,o_nz are:
3367: .vb
3368: proc0 : dnz = 2, o_nz = 2
3369: proc1 : dnz = 3, o_nz = 2
3370: proc2 : dnz = 1, o_nz = 4
3371: .ve
3372: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3373: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3374: for proc3. i.e we are using 12+15+10=37 storage locations to store
3375: 34 values.
3377: When d_nnz, o_nnz parameters are specified, the storage is specified
3378: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3379: In the above case the values for d_nnz,o_nnz are:
3380: .vb
3381: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3382: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3383: proc2: d_nnz = [1,1] and o_nnz = [4,4]
3384: .ve
3385: Here the space allocated is sum of all the above values i.e 34, and
3386: hence pre-allocation is perfect.
3388: Level: intermediate
3390: .keywords: matrix, aij, compressed row, sparse, parallel
3392: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateMPIAIJ(), MatMPIAIJSetPreallocationCSR(),
3393: MPIAIJ, MatGetInfo()
3394: @*/
3395: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3396: {
3397: PetscErrorCode ierr,(*f)(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);
3400: PetscObjectQueryFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",(void (**)(void))&f);
3401: if (f) {
3402: (*f)(B,d_nz,d_nnz,o_nz,o_nnz);
3403: }
3404: return(0);
3405: }
3409: /*@
3410: MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
3411: CSR format the local rows.
3413: Collective on MPI_Comm
3415: Input Parameters:
3416: + comm - MPI communicator
3417: . m - number of local rows (Cannot be PETSC_DECIDE)
3418: . n - This value should be the same as the local size used in creating the
3419: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3420: calculated if N is given) For square matrices n is almost always m.
3421: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3422: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3423: . i - row indices
3424: . j - column indices
3425: - a - matrix values
3427: Output Parameter:
3428: . mat - the matrix
3430: Level: intermediate
3432: Notes:
3433: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3434: thus you CANNOT change the matrix entries by changing the values of a[] after you have
3435: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3437: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3439: The format which is used for the sparse matrix input, is equivalent to a
3440: row-major ordering.. i.e for the following matrix, the input data expected is
3441: as shown:
3443: 1 0 0
3444: 2 0 3 P0
3445: -------
3446: 4 5 6 P1
3448: Process0 [P0]: rows_owned=[0,1]
3449: i = {0,1,3} [size = nrow+1 = 2+1]
3450: j = {0,0,2} [size = nz = 6]
3451: v = {1,2,3} [size = nz = 6]
3453: Process1 [P1]: rows_owned=[2]
3454: i = {0,3} [size = nrow+1 = 1+1]
3455: j = {0,1,2} [size = nz = 6]
3456: v = {4,5,6} [size = nz = 6]
3458: .keywords: matrix, aij, compressed row, sparse, parallel
3460: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3461: MPIAIJ, MatCreateMPIAIJ(), MatCreateMPIAIJWithSplitArrays()
3462: @*/
3463: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
3464: {
3468: if (i[0]) {
3469: SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
3470: }
3471: if (m < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
3472: MatCreate(comm,mat);
3473: MatSetSizes(*mat,m,n,M,N);
3474: MatSetType(*mat,MATMPIAIJ);
3475: MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
3476: return(0);
3477: }
3481: /*@C
3482: MatCreateMPIAIJ - Creates a sparse parallel matrix in AIJ format
3483: (the default parallel PETSc format). For good matrix assembly performance
3484: the user should preallocate the matrix storage by setting the parameters
3485: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3486: performance can be increased by more than a factor of 50.
3488: Collective on MPI_Comm
3490: Input Parameters:
3491: + comm - MPI communicator
3492: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
3493: This value should be the same as the local size used in creating the
3494: y vector for the matrix-vector product y = Ax.
3495: . n - This value should be the same as the local size used in creating the
3496: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3497: calculated if N is given) For square matrices n is almost always m.
3498: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3499: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3500: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
3501: (same value is used for all local rows)
3502: . d_nnz - array containing the number of nonzeros in the various rows of the
3503: DIAGONAL portion of the local submatrix (possibly different for each row)
3504: or PETSC_NULL, if d_nz is used to specify the nonzero structure.
3505: The size of this array is equal to the number of local rows, i.e 'm'.
3506: You must leave room for the diagonal entry even if it is zero.
3507: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
3508: submatrix (same value is used for all local rows).
3509: - o_nnz - array containing the number of nonzeros in the various rows of the
3510: OFF-DIAGONAL portion of the local submatrix (possibly different for
3511: each row) or PETSC_NULL, if o_nz is used to specify the nonzero
3512: structure. The size of this array is equal to the number
3513: of local rows, i.e 'm'.
3515: Output Parameter:
3516: . A - the matrix
3518: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
3519: MatXXXXSetPreallocation() paradgm instead of this routine directly.
3520: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
3522: Notes:
3523: If the *_nnz parameter is given then the *_nz parameter is ignored
3525: m,n,M,N parameters specify the size of the matrix, and its partitioning across
3526: processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
3527: storage requirements for this matrix.
3529: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one
3530: processor than it must be used on all processors that share the object for
3531: that argument.
3533: The user MUST specify either the local or global matrix dimensions
3534: (possibly both).
3536: The parallel matrix is partitioned across processors such that the
3537: first m0 rows belong to process 0, the next m1 rows belong to
3538: process 1, the next m2 rows belong to process 2 etc.. where
3539: m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
3540: values corresponding to [m x N] submatrix.
3542: The columns are logically partitioned with the n0 columns belonging
3543: to 0th partition, the next n1 columns belonging to the next
3544: partition etc.. where n0,n1,n2... are the the input parameter 'n'.
3546: The DIAGONAL portion of the local submatrix on any given processor
3547: is the submatrix corresponding to the rows and columns m,n
3548: corresponding to the given processor. i.e diagonal matrix on
3549: process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
3550: etc. The remaining portion of the local submatrix [m x (N-n)]
3551: constitute the OFF-DIAGONAL portion. The example below better
3552: illustrates this concept.
3554: For a square global matrix we define each processor's diagonal portion
3555: to be its local rows and the corresponding columns (a square submatrix);
3556: each processor's off-diagonal portion encompasses the remainder of the
3557: local matrix (a rectangular submatrix).
3559: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3561: When calling this routine with a single process communicator, a matrix of
3562: type SEQAIJ is returned. If a matrix of type MPIAIJ is desired for this
3563: type of communicator, use the construction mechanism:
3564: MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
3565:
3566: By default, this format uses inodes (identical nodes) when possible.
3567: We search for consecutive rows with the same nonzero structure, thereby
3568: reusing matrix information to achieve increased efficiency.
3570: Options Database Keys:
3571: + -mat_no_inode - Do not use inodes
3572: . -mat_inode_limit <limit> - Sets inode limit (max limit=5)
3573: - -mat_aij_oneindex - Internally use indexing starting at 1
3574: rather than 0. Note that when calling MatSetValues(),
3575: the user still MUST index entries starting at 0!
3578: Example usage:
3579:
3580: Consider the following 8x8 matrix with 34 non-zero values, that is
3581: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3582: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3583: as follows:
3585: .vb
3586: 1 2 0 | 0 3 0 | 0 4
3587: Proc0 0 5 6 | 7 0 0 | 8 0
3588: 9 0 10 | 11 0 0 | 12 0
3589: -------------------------------------
3590: 13 0 14 | 15 16 17 | 0 0
3591: Proc1 0 18 0 | 19 20 21 | 0 0
3592: 0 0 0 | 22 23 0 | 24 0
3593: -------------------------------------
3594: Proc2 25 26 27 | 0 0 28 | 29 0
3595: 30 0 0 | 31 32 33 | 0 34
3596: .ve
3598: This can be represented as a collection of submatrices as:
3600: .vb
3601: A B C
3602: D E F
3603: G H I
3604: .ve
3606: Where the submatrices A,B,C are owned by proc0, D,E,F are
3607: owned by proc1, G,H,I are owned by proc2.
3609: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3610: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3611: The 'M','N' parameters are 8,8, and have the same values on all procs.
3613: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3614: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3615: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3616: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3617: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3618: matrix, ans [DF] as another SeqAIJ matrix.
3620: When d_nz, o_nz parameters are specified, d_nz storage elements are
3621: allocated for every row of the local diagonal submatrix, and o_nz
3622: storage locations are allocated for every row of the OFF-DIAGONAL submat.
3623: One way to choose d_nz and o_nz is to use the max nonzerors per local
3624: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3625: In this case, the values of d_nz,o_nz are:
3626: .vb
3627: proc0 : dnz = 2, o_nz = 2
3628: proc1 : dnz = 3, o_nz = 2
3629: proc2 : dnz = 1, o_nz = 4
3630: .ve
3631: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3632: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3633: for proc3. i.e we are using 12+15+10=37 storage locations to store
3634: 34 values.
3636: When d_nnz, o_nnz parameters are specified, the storage is specified
3637: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3638: In the above case the values for d_nnz,o_nnz are:
3639: .vb
3640: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3641: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3642: proc2: d_nnz = [1,1] and o_nnz = [4,4]
3643: .ve
3644: Here the space allocated is sum of all the above values i.e 34, and
3645: hence pre-allocation is perfect.
3647: Level: intermediate
3649: .keywords: matrix, aij, compressed row, sparse, parallel
3651: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3652: MPIAIJ, MatCreateMPIAIJWithArrays()
3653: @*/
3654: PetscErrorCode MatCreateMPIAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
3655: {
3657: PetscMPIInt size;
3660: MatCreate(comm,A);
3661: MatSetSizes(*A,m,n,M,N);
3662: MPI_Comm_size(comm,&size);
3663: if (size > 1) {
3664: MatSetType(*A,MATMPIAIJ);
3665: MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
3666: } else {
3667: MatSetType(*A,MATSEQAIJ);
3668: MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
3669: }
3670: return(0);
3671: }
3675: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,PetscInt *colmap[])
3676: {
3677: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
3680: *Ad = a->A;
3681: *Ao = a->B;
3682: *colmap = a->garray;
3683: return(0);
3684: }
3688: PetscErrorCode MatSetColoring_MPIAIJ(Mat A,ISColoring coloring)
3689: {
3691: PetscInt i;
3692: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
3695: if (coloring->ctype == IS_COLORING_GLOBAL) {
3696: ISColoringValue *allcolors,*colors;
3697: ISColoring ocoloring;
3699: /* set coloring for diagonal portion */
3700: MatSetColoring_SeqAIJ(a->A,coloring);
3702: /* set coloring for off-diagonal portion */
3703: ISAllGatherColors(((PetscObject)A)->comm,coloring->n,coloring->colors,PETSC_NULL,&allcolors);
3704: PetscMalloc((a->B->cmap->n+1)*sizeof(ISColoringValue),&colors);
3705: for (i=0; i<a->B->cmap->n; i++) {
3706: colors[i] = allcolors[a->garray[i]];
3707: }
3708: PetscFree(allcolors);
3709: ISColoringCreate(MPI_COMM_SELF,coloring->n,a->B->cmap->n,colors,&ocoloring);
3710: MatSetColoring_SeqAIJ(a->B,ocoloring);
3711: ISColoringDestroy(ocoloring);
3712: } else if (coloring->ctype == IS_COLORING_GHOSTED) {
3713: ISColoringValue *colors;
3714: PetscInt *larray;
3715: ISColoring ocoloring;
3717: /* set coloring for diagonal portion */
3718: PetscMalloc((a->A->cmap->n+1)*sizeof(PetscInt),&larray);
3719: for (i=0; i<a->A->cmap->n; i++) {
3720: larray[i] = i + A->cmap->rstart;
3721: }
3722: ISGlobalToLocalMappingApply(A->mapping,IS_GTOLM_MASK,a->A->cmap->n,larray,PETSC_NULL,larray);
3723: PetscMalloc((a->A->cmap->n+1)*sizeof(ISColoringValue),&colors);
3724: for (i=0; i<a->A->cmap->n; i++) {
3725: colors[i] = coloring->colors[larray[i]];
3726: }
3727: PetscFree(larray);
3728: ISColoringCreate(PETSC_COMM_SELF,coloring->n,a->A->cmap->n,colors,&ocoloring);
3729: MatSetColoring_SeqAIJ(a->A,ocoloring);
3730: ISColoringDestroy(ocoloring);
3732: /* set coloring for off-diagonal portion */
3733: PetscMalloc((a->B->cmap->n+1)*sizeof(PetscInt),&larray);
3734: ISGlobalToLocalMappingApply(A->mapping,IS_GTOLM_MASK,a->B->cmap->n,a->garray,PETSC_NULL,larray);
3735: PetscMalloc((a->B->cmap->n+1)*sizeof(ISColoringValue),&colors);
3736: for (i=0; i<a->B->cmap->n; i++) {
3737: colors[i] = coloring->colors[larray[i]];
3738: }
3739: PetscFree(larray);
3740: ISColoringCreate(MPI_COMM_SELF,coloring->n,a->B->cmap->n,colors,&ocoloring);
3741: MatSetColoring_SeqAIJ(a->B,ocoloring);
3742: ISColoringDestroy(ocoloring);
3743: } else {
3744: SETERRQ1(PETSC_ERR_SUP,"No support ISColoringType %d",(int)coloring->ctype);
3745: }
3747: return(0);
3748: }
3750: #if defined(PETSC_HAVE_ADIC)
3753: PetscErrorCode MatSetValuesAdic_MPIAIJ(Mat A,void *advalues)
3754: {
3755: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
3759: MatSetValuesAdic_SeqAIJ(a->A,advalues);
3760: MatSetValuesAdic_SeqAIJ(a->B,advalues);
3761: return(0);
3762: }
3763: #endif
3767: PetscErrorCode MatSetValuesAdifor_MPIAIJ(Mat A,PetscInt nl,void *advalues)
3768: {
3769: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
3773: MatSetValuesAdifor_SeqAIJ(a->A,nl,advalues);
3774: MatSetValuesAdifor_SeqAIJ(a->B,nl,advalues);
3775: return(0);
3776: }
3780: /*@
3781: MatMerge - Creates a single large PETSc matrix by concatinating sequential
3782: matrices from each processor
3784: Collective on MPI_Comm
3786: Input Parameters:
3787: + comm - the communicators the parallel matrix will live on
3788: . inmat - the input sequential matrices
3789: . n - number of local columns (or PETSC_DECIDE)
3790: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
3792: Output Parameter:
3793: . outmat - the parallel matrix generated
3795: Level: advanced
3797: Notes: The number of columns of the matrix in EACH processor MUST be the same.
3799: @*/
3800: PetscErrorCode MatMerge(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
3801: {
3803: PetscInt m,N,i,rstart,nnz,Ii,*dnz,*onz;
3804: PetscInt *indx;
3805: PetscScalar *values;
3808: MatGetSize(inmat,&m,&N);
3809: if (scall == MAT_INITIAL_MATRIX){
3810: /* count nonzeros in each row, for diagonal and off diagonal portion of matrix */
3811: if (n == PETSC_DECIDE){
3812: PetscSplitOwnership(comm,&n,&N);
3813: }
3814: MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
3815: rstart -= m;
3817: MatPreallocateInitialize(comm,m,n,dnz,onz);
3818: for (i=0;i<m;i++) {
3819: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,PETSC_NULL);
3820: MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
3821: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,PETSC_NULL);
3822: }
3823: /* This routine will ONLY return MPIAIJ type matrix */
3824: MatCreate(comm,outmat);
3825: MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
3826: MatSetType(*outmat,MATMPIAIJ);
3827: MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
3828: MatPreallocateFinalize(dnz,onz);
3829:
3830: } else if (scall == MAT_REUSE_MATRIX){
3831: MatGetOwnershipRange(*outmat,&rstart,PETSC_NULL);
3832: } else {
3833: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
3834: }
3836: for (i=0;i<m;i++) {
3837: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
3838: Ii = i + rstart;
3839: MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
3840: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
3841: }
3842: MatDestroy(inmat);
3843: MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
3844: MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
3846: return(0);
3847: }
3851: PetscErrorCode MatFileSplit(Mat A,char *outfile)
3852: {
3853: PetscErrorCode ierr;
3854: PetscMPIInt rank;
3855: PetscInt m,N,i,rstart,nnz;
3856: size_t len;
3857: const PetscInt *indx;
3858: PetscViewer out;
3859: char *name;
3860: Mat B;
3861: const PetscScalar *values;
3864: MatGetLocalSize(A,&m,0);
3865: MatGetSize(A,0,&N);
3866: /* Should this be the type of the diagonal block of A? */
3867: MatCreate(PETSC_COMM_SELF,&B);
3868: MatSetSizes(B,m,N,m,N);
3869: MatSetType(B,MATSEQAIJ);
3870: MatSeqAIJSetPreallocation(B,0,PETSC_NULL);
3871: MatGetOwnershipRange(A,&rstart,0);
3872: for (i=0;i<m;i++) {
3873: MatGetRow(A,i+rstart,&nnz,&indx,&values);
3874: MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
3875: MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
3876: }
3877: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3878: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3880: MPI_Comm_rank(((PetscObject)A)->comm,&rank);
3881: PetscStrlen(outfile,&len);
3882: PetscMalloc((len+5)*sizeof(char),&name);
3883: sprintf(name,"%s.%d",outfile,rank);
3884: PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
3885: PetscFree(name);
3886: MatView(B,out);
3887: PetscViewerDestroy(out);
3888: MatDestroy(B);
3889: return(0);
3890: }
3892: EXTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);
3895: PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
3896: {
3897: PetscErrorCode ierr;
3898: Mat_Merge_SeqsToMPI *merge;
3899: PetscContainer container;
3902: PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject *)&container);
3903: if (container) {
3904: PetscContainerGetPointer(container,(void **)&merge);
3905: PetscFree(merge->id_r);
3906: PetscFree(merge->len_s);
3907: PetscFree(merge->len_r);
3908: PetscFree(merge->bi);
3909: PetscFree(merge->bj);
3910: PetscFree(merge->buf_ri);
3911: PetscFree(merge->buf_rj);
3912: PetscFree(merge->coi);
3913: PetscFree(merge->coj);
3914: PetscFree(merge->owners_co);
3915: PetscFree(merge->rowmap.range);
3916:
3917: PetscContainerDestroy(container);
3918: PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);
3919: }
3920: PetscFree(merge);
3922: MatDestroy_MPIAIJ(A);
3923: return(0);
3924: }
3926: #include ../src/mat/utils/freespace.h
3927: #include petscbt.h
3931: /*@C
3932: MatMerge_SeqsToMPI - Creates a MPIAIJ matrix by adding sequential
3933: matrices from each processor
3935: Collective on MPI_Comm
3937: Input Parameters:
3938: + comm - the communicators the parallel matrix will live on
3939: . seqmat - the input sequential matrices
3940: . m - number of local rows (or PETSC_DECIDE)
3941: . n - number of local columns (or PETSC_DECIDE)
3942: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
3944: Output Parameter:
3945: . mpimat - the parallel matrix generated
3947: Level: advanced
3949: Notes:
3950: The dimensions of the sequential matrix in each processor MUST be the same.
3951: The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
3952: destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
3953: @*/
3954: PetscErrorCode MatMerge_SeqsToMPINumeric(Mat seqmat,Mat mpimat)
3955: {
3956: PetscErrorCode ierr;
3957: MPI_Comm comm=((PetscObject)mpimat)->comm;
3958: Mat_SeqAIJ *a=(Mat_SeqAIJ*)seqmat->data;
3959: PetscMPIInt size,rank,taga,*len_s;
3960: PetscInt N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj=a->j;
3961: PetscInt proc,m;
3962: PetscInt **buf_ri,**buf_rj;
3963: PetscInt k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
3964: PetscInt nrows,**buf_ri_k,**nextrow,**nextai;
3965: MPI_Request *s_waits,*r_waits;
3966: MPI_Status *status;
3967: MatScalar *aa=a->a;
3968: MatScalar **abuf_r,*ba_i;
3969: Mat_Merge_SeqsToMPI *merge;
3970: PetscContainer container;
3971:
3973: PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);
3975: MPI_Comm_size(comm,&size);
3976: MPI_Comm_rank(comm,&rank);
3978: PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject *)&container);
3979: if (container) {
3980: PetscContainerGetPointer(container,(void **)&merge);
3981: }
3982: bi = merge->bi;
3983: bj = merge->bj;
3984: buf_ri = merge->buf_ri;
3985: buf_rj = merge->buf_rj;
3987: PetscMalloc(size*sizeof(MPI_Status),&status);
3988: owners = merge->rowmap.range;
3989: len_s = merge->len_s;
3991: /* send and recv matrix values */
3992: /*-----------------------------*/
3993: PetscObjectGetNewTag((PetscObject)mpimat,&taga);
3994: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
3996: PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&s_waits);
3997: for (proc=0,k=0; proc<size; proc++){
3998: if (!len_s[proc]) continue;
3999: i = owners[proc];
4000: MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4001: k++;
4002: }
4004: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4005: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4006: PetscFree(status);
4008: PetscFree(s_waits);
4009: PetscFree(r_waits);
4011: /* insert mat values of mpimat */
4012: /*----------------------------*/
4013: PetscMalloc(N*sizeof(PetscScalar),&ba_i);
4014: PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);
4015: nextrow = buf_ri_k + merge->nrecv;
4016: nextai = nextrow + merge->nrecv;
4018: for (k=0; k<merge->nrecv; k++){
4019: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4020: nrows = *(buf_ri_k[k]);
4021: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
4022: nextai[k] = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure */
4023: }
4025: /* set values of ba */
4026: m = merge->rowmap.n;
4027: for (i=0; i<m; i++) {
4028: arow = owners[rank] + i;
4029: bj_i = bj+bi[i]; /* col indices of the i-th row of mpimat */
4030: bnzi = bi[i+1] - bi[i];
4031: PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));
4033: /* add local non-zero vals of this proc's seqmat into ba */
4034: anzi = ai[arow+1] - ai[arow];
4035: aj = a->j + ai[arow];
4036: aa = a->a + ai[arow];
4037: nextaj = 0;
4038: for (j=0; nextaj<anzi; j++){
4039: if (*(bj_i + j) == aj[nextaj]){ /* bcol == acol */
4040: ba_i[j] += aa[nextaj++];
4041: }
4042: }
4044: /* add received vals into ba */
4045: for (k=0; k<merge->nrecv; k++){ /* k-th received message */
4046: /* i-th row */
4047: if (i == *nextrow[k]) {
4048: anzi = *(nextai[k]+1) - *nextai[k];
4049: aj = buf_rj[k] + *(nextai[k]);
4050: aa = abuf_r[k] + *(nextai[k]);
4051: nextaj = 0;
4052: for (j=0; nextaj<anzi; j++){
4053: if (*(bj_i + j) == aj[nextaj]){ /* bcol == acol */
4054: ba_i[j] += aa[nextaj++];
4055: }
4056: }
4057: nextrow[k]++; nextai[k]++;
4058: }
4059: }
4060: MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4061: }
4062: MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4063: MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);
4065: PetscFree(abuf_r);
4066: PetscFree(ba_i);
4067: PetscFree(buf_ri_k);
4068: PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4069: return(0);
4070: }
4074: PetscErrorCode MatMerge_SeqsToMPISymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4075: {
4076: PetscErrorCode ierr;
4077: Mat B_mpi;
4078: Mat_SeqAIJ *a=(Mat_SeqAIJ*)seqmat->data;
4079: PetscMPIInt size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4080: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
4081: PetscInt M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4082: PetscInt len,proc,*dnz,*onz;
4083: PetscInt k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4084: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4085: MPI_Request *si_waits,*sj_waits,*ri_waits,*rj_waits;
4086: MPI_Status *status;
4087: PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL;
4088: PetscBT lnkbt;
4089: Mat_Merge_SeqsToMPI *merge;
4090: PetscContainer container;
4093: PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);
4095: /* make sure it is a PETSc comm */
4096: PetscCommDuplicate(comm,&comm,PETSC_NULL);
4097: MPI_Comm_size(comm,&size);
4098: MPI_Comm_rank(comm,&rank);
4099:
4100: PetscNew(Mat_Merge_SeqsToMPI,&merge);
4101: PetscMalloc(size*sizeof(MPI_Status),&status);
4103: /* determine row ownership */
4104: /*---------------------------------------------------------*/
4105: PetscMapInitialize(comm,&merge->rowmap);
4106: merge->rowmap.n = m;
4107: merge->rowmap.N = M;
4108: merge->rowmap.bs = 1;
4109: PetscMapSetUp(&merge->rowmap);
4110: PetscMalloc(size*sizeof(PetscMPIInt),&len_si);
4111: PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);
4112:
4113: m = merge->rowmap.n;
4114: M = merge->rowmap.N;
4115: owners = merge->rowmap.range;
4117: /* determine the number of messages to send, their lengths */
4118: /*---------------------------------------------------------*/
4119: len_s = merge->len_s;
4121: len = 0; /* length of buf_si[] */
4122: merge->nsend = 0;
4123: for (proc=0; proc<size; proc++){
4124: len_si[proc] = 0;
4125: if (proc == rank){
4126: len_s[proc] = 0;
4127: } else {
4128: len_si[proc] = owners[proc+1] - owners[proc] + 1;
4129: len_s[proc] = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4130: }
4131: if (len_s[proc]) {
4132: merge->nsend++;
4133: nrows = 0;
4134: for (i=owners[proc]; i<owners[proc+1]; i++){
4135: if (ai[i+1] > ai[i]) nrows++;
4136: }
4137: len_si[proc] = 2*(nrows+1);
4138: len += len_si[proc];
4139: }
4140: }
4142: /* determine the number and length of messages to receive for ij-structure */
4143: /*-------------------------------------------------------------------------*/
4144: PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);
4145: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
4147: /* post the Irecv of j-structure */
4148: /*-------------------------------*/
4149: PetscCommGetNewTag(comm,&tagj);
4150: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);
4152: /* post the Isend of j-structure */
4153: /*--------------------------------*/
4154: PetscMalloc((2*merge->nsend+1)*sizeof(MPI_Request),&si_waits);
4155: sj_waits = si_waits + merge->nsend;
4157: for (proc=0, k=0; proc<size; proc++){
4158: if (!len_s[proc]) continue;
4159: i = owners[proc];
4160: MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4161: k++;
4162: }
4164: /* receives and sends of j-structure are complete */
4165: /*------------------------------------------------*/
4166: if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4167: if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}
4168:
4169: /* send and recv i-structure */
4170: /*---------------------------*/
4171: PetscCommGetNewTag(comm,&tagi);
4172: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);
4173:
4174: PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);
4175: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
4176: for (proc=0,k=0; proc<size; proc++){
4177: if (!len_s[proc]) continue;
4178: /* form outgoing message for i-structure:
4179: buf_si[0]: nrows to be sent
4180: [1:nrows]: row index (global)
4181: [nrows+1:2*nrows+1]: i-structure index
4182: */
4183: /*-------------------------------------------*/
4184: nrows = len_si[proc]/2 - 1;
4185: buf_si_i = buf_si + nrows+1;
4186: buf_si[0] = nrows;
4187: buf_si_i[0] = 0;
4188: nrows = 0;
4189: for (i=owners[proc]; i<owners[proc+1]; i++){
4190: anzi = ai[i+1] - ai[i];
4191: if (anzi) {
4192: buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4193: buf_si[nrows+1] = i-owners[proc]; /* local row index */
4194: nrows++;
4195: }
4196: }
4197: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4198: k++;
4199: buf_si += len_si[proc];
4200: }
4202: if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4203: if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}
4205: PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4206: for (i=0; i<merge->nrecv; i++){
4207: PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4208: }
4210: PetscFree(len_si);
4211: PetscFree(len_ri);
4212: PetscFree(rj_waits);
4213: PetscFree(si_waits);
4214: PetscFree(ri_waits);
4215: PetscFree(buf_s);
4216: PetscFree(status);
4218: /* compute a local seq matrix in each processor */
4219: /*----------------------------------------------*/
4220: /* allocate bi array and free space for accumulating nonzero column info */
4221: PetscMalloc((m+1)*sizeof(PetscInt),&bi);
4222: bi[0] = 0;
4224: /* create and initialize a linked list */
4225: nlnk = N+1;
4226: PetscLLCreate(N,N,nlnk,lnk,lnkbt);
4227:
4228: /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4229: len = 0;
4230: len = ai[owners[rank+1]] - ai[owners[rank]];
4231: PetscFreeSpaceGet((PetscInt)(2*len+1),&free_space);
4232: current_space = free_space;
4234: /* determine symbolic info for each local row */
4235: PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);
4236: nextrow = buf_ri_k + merge->nrecv;
4237: nextai = nextrow + merge->nrecv;
4238: for (k=0; k<merge->nrecv; k++){
4239: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4240: nrows = *buf_ri_k[k];
4241: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
4242: nextai[k] = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure */
4243: }
4245: MatPreallocateInitialize(comm,m,n,dnz,onz);
4246: len = 0;
4247: for (i=0;i<m;i++) {
4248: bnzi = 0;
4249: /* add local non-zero cols of this proc's seqmat into lnk */
4250: arow = owners[rank] + i;
4251: anzi = ai[arow+1] - ai[arow];
4252: aj = a->j + ai[arow];
4253: PetscLLAdd(anzi,aj,N,nlnk,lnk,lnkbt);
4254: bnzi += nlnk;
4255: /* add received col data into lnk */
4256: for (k=0; k<merge->nrecv; k++){ /* k-th received message */
4257: if (i == *nextrow[k]) { /* i-th row */
4258: anzi = *(nextai[k]+1) - *nextai[k];
4259: aj = buf_rj[k] + *nextai[k];
4260: PetscLLAdd(anzi,aj,N,nlnk,lnk,lnkbt);
4261: bnzi += nlnk;
4262: nextrow[k]++; nextai[k]++;
4263: }
4264: }
4265: if (len < bnzi) len = bnzi; /* =max(bnzi) */
4267: /* if free space is not available, make more free space */
4268: if (current_space->local_remaining<bnzi) {
4269: PetscFreeSpaceGet(bnzi+current_space->total_array_size,¤t_space);
4270: nspacedouble++;
4271: }
4272: /* copy data into free space, then initialize lnk */
4273: PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4274: MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);
4276: current_space->array += bnzi;
4277: current_space->local_used += bnzi;
4278: current_space->local_remaining -= bnzi;
4279:
4280: bi[i+1] = bi[i] + bnzi;
4281: }
4282:
4283: PetscFree(buf_ri_k);
4285: PetscMalloc((bi[m]+1)*sizeof(PetscInt),&bj);
4286: PetscFreeSpaceContiguous(&free_space,bj);
4287: PetscLLDestroy(lnk,lnkbt);
4289: /* create symbolic parallel matrix B_mpi */
4290: /*---------------------------------------*/
4291: MatCreate(comm,&B_mpi);
4292: if (n==PETSC_DECIDE) {
4293: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4294: } else {
4295: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4296: }
4297: MatSetType(B_mpi,MATMPIAIJ);
4298: MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4299: MatPreallocateFinalize(dnz,onz);
4301: /* B_mpi is not ready for use - assembly will be done by MatMerge_SeqsToMPINumeric() */
4302: B_mpi->assembled = PETSC_FALSE;
4303: B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
4304: merge->bi = bi;
4305: merge->bj = bj;
4306: merge->buf_ri = buf_ri;
4307: merge->buf_rj = buf_rj;
4308: merge->coi = PETSC_NULL;
4309: merge->coj = PETSC_NULL;
4310: merge->owners_co = PETSC_NULL;
4312: /* attach the supporting struct to B_mpi for reuse */
4313: PetscContainerCreate(PETSC_COMM_SELF,&container);
4314: PetscContainerSetPointer(container,merge);
4315: PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4316: *mpimat = B_mpi;
4318: PetscCommDestroy(&comm);
4319: PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4320: return(0);
4321: }
4325: PetscErrorCode MatMerge_SeqsToMPI(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4326: {
4327: PetscErrorCode ierr;
4330: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4331: if (scall == MAT_INITIAL_MATRIX){
4332: MatMerge_SeqsToMPISymbolic(comm,seqmat,m,n,mpimat);
4333: }
4334: MatMerge_SeqsToMPINumeric(seqmat,*mpimat);
4335: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4336: return(0);
4337: }
4341: /*@
4342: MatGetLocalMat - Creates a SeqAIJ matrix by taking all its local rows
4344: Not Collective
4346: Input Parameters:
4347: + A - the matrix
4348: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4350: Output Parameter:
4351: . A_loc - the local sequential matrix generated
4353: Level: developer
4355: @*/
4356: PetscErrorCode MatGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4357: {
4358: PetscErrorCode ierr;
4359: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
4360: Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
4361: PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j,*cmap=mpimat->garray;
4362: MatScalar *aa=a->a,*ba=b->a,*cam;
4363: PetscScalar *ca;
4364: PetscInt am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4365: PetscInt *ci,*cj,col,ncols_d,ncols_o,jo;
4368: PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
4369: if (scall == MAT_INITIAL_MATRIX){
4370: PetscMalloc((1+am)*sizeof(PetscInt),&ci);
4371: ci[0] = 0;
4372: for (i=0; i<am; i++){
4373: ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
4374: }
4375: PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);
4376: PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);
4377: k = 0;
4378: for (i=0; i<am; i++) {
4379: ncols_o = bi[i+1] - bi[i];
4380: ncols_d = ai[i+1] - ai[i];
4381: /* off-diagonal portion of A */
4382: for (jo=0; jo<ncols_o; jo++) {
4383: col = cmap[*bj];
4384: if (col >= cstart) break;
4385: cj[k] = col; bj++;
4386: ca[k++] = *ba++;
4387: }
4388: /* diagonal portion of A */
4389: for (j=0; j<ncols_d; j++) {
4390: cj[k] = cstart + *aj++;
4391: ca[k++] = *aa++;
4392: }
4393: /* off-diagonal portion of A */
4394: for (j=jo; j<ncols_o; j++) {
4395: cj[k] = cmap[*bj++];
4396: ca[k++] = *ba++;
4397: }
4398: }
4399: /* put together the new matrix */
4400: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
4401: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4402: /* Since these are PETSc arrays, change flags to free them as necessary. */
4403: mat = (Mat_SeqAIJ*)(*A_loc)->data;
4404: mat->free_a = PETSC_TRUE;
4405: mat->free_ij = PETSC_TRUE;
4406: mat->nonew = 0;
4407: } else if (scall == MAT_REUSE_MATRIX){
4408: mat=(Mat_SeqAIJ*)(*A_loc)->data;
4409: ci = mat->i; cj = mat->j; cam = mat->a;
4410: for (i=0; i<am; i++) {
4411: /* off-diagonal portion of A */
4412: ncols_o = bi[i+1] - bi[i];
4413: for (jo=0; jo<ncols_o; jo++) {
4414: col = cmap[*bj];
4415: if (col >= cstart) break;
4416: *cam++ = *ba++; bj++;
4417: }
4418: /* diagonal portion of A */
4419: ncols_d = ai[i+1] - ai[i];
4420: for (j=0; j<ncols_d; j++) *cam++ = *aa++;
4421: /* off-diagonal portion of A */
4422: for (j=jo; j<ncols_o; j++) {
4423: *cam++ = *ba++; bj++;
4424: }
4425: }
4426: } else {
4427: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
4428: }
4430: PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
4431: return(0);
4432: }
4436: /*@C
4437: MatGetLocalMatCondensed - Creates a SeqAIJ matrix by taking all its local rows and NON-ZERO columns
4439: Not Collective
4441: Input Parameters:
4442: + A - the matrix
4443: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4444: - row, col - index sets of rows and columns to extract (or PETSC_NULL)
4446: Output Parameter:
4447: . A_loc - the local sequential matrix generated
4449: Level: developer
4451: @*/
4452: PetscErrorCode MatGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
4453: {
4454: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
4455: PetscErrorCode ierr;
4456: PetscInt i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
4457: IS isrowa,iscola;
4458: Mat *aloc;
4461: PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
4462: if (!row){
4463: start = A->rmap->rstart; end = A->rmap->rend;
4464: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
4465: } else {
4466: isrowa = *row;
4467: }
4468: if (!col){
4469: start = A->cmap->rstart;
4470: cmap = a->garray;
4471: nzA = a->A->cmap->n;
4472: nzB = a->B->cmap->n;
4473: PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);
4474: ncols = 0;
4475: for (i=0; i<nzB; i++) {
4476: if (cmap[i] < start) idx[ncols++] = cmap[i];
4477: else break;
4478: }
4479: imark = i;
4480: for (i=0; i<nzA; i++) idx[ncols++] = start + i;
4481: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
4482: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,&iscola);
4483: PetscFree(idx);
4484: } else {
4485: iscola = *col;
4486: }
4487: if (scall != MAT_INITIAL_MATRIX){
4488: PetscMalloc(sizeof(Mat),&aloc);
4489: aloc[0] = *A_loc;
4490: }
4491: MatGetSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
4492: *A_loc = aloc[0];
4493: PetscFree(aloc);
4494: if (!row){
4495: ISDestroy(isrowa);
4496: }
4497: if (!col){
4498: ISDestroy(iscola);
4499: }
4500: PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
4501: return(0);
4502: }
4506: /*@C
4507: MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
4509: Collective on Mat
4511: Input Parameters:
4512: + A,B - the matrices in mpiaij format
4513: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4514: - rowb, colb - index sets of rows and columns of B to extract (or PETSC_NULL)
4516: Output Parameter:
4517: + rowb, colb - index sets of rows and columns of B to extract
4518: . brstart - row index of B_seq from which next B->rmap->n rows are taken from B's local rows
4519: - B_seq - the sequential matrix generated
4521: Level: developer
4523: @*/
4524: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,PetscInt *brstart,Mat *B_seq)
4525: {
4526: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
4527: PetscErrorCode ierr;
4528: PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark;
4529: IS isrowb,iscolb;
4530: Mat *bseq;
4531:
4533: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
4534: SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4535: }
4536: PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);
4537:
4538: if (scall == MAT_INITIAL_MATRIX){
4539: start = A->cmap->rstart;
4540: cmap = a->garray;
4541: nzA = a->A->cmap->n;
4542: nzB = a->B->cmap->n;
4543: PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);
4544: ncols = 0;
4545: for (i=0; i<nzB; i++) { /* row < local row index */
4546: if (cmap[i] < start) idx[ncols++] = cmap[i];
4547: else break;
4548: }
4549: imark = i;
4550: for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */
4551: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
4552: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,&isrowb);
4553: PetscFree(idx);
4554: *brstart = imark;
4555: ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
4556: } else {
4557: if (!rowb || !colb) SETERRQ(PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
4558: isrowb = *rowb; iscolb = *colb;
4559: PetscMalloc(sizeof(Mat),&bseq);
4560: bseq[0] = *B_seq;
4561: }
4562: MatGetSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
4563: *B_seq = bseq[0];
4564: PetscFree(bseq);
4565: if (!rowb){
4566: ISDestroy(isrowb);
4567: } else {
4568: *rowb = isrowb;
4569: }
4570: if (!colb){
4571: ISDestroy(iscolb);
4572: } else {
4573: *colb = iscolb;
4574: }
4575: PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
4576: return(0);
4577: }
4581: /*@C
4582: MatGetBrowsOfAoCols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
4583: of the OFF-DIAGONAL portion of local A
4585: Collective on Mat
4587: Input Parameters:
4588: + A,B - the matrices in mpiaij format
4589: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4590: . startsj - starting point in B's sending and receiving j-arrays, saved for MAT_REUSE (or PETSC_NULL)
4591: - bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or PETSC_NULL)
4593: Output Parameter:
4594: + B_oth - the sequential matrix generated
4596: Level: developer
4598: @*/
4599: PetscErrorCode MatGetBrowsOfAoCols(Mat A,Mat B,MatReuse scall,PetscInt **startsj,MatScalar **bufa_ptr,Mat *B_oth)
4600: {
4601: VecScatter_MPI_General *gen_to,*gen_from;
4602: PetscErrorCode ierr;
4603: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
4604: Mat_SeqAIJ *b_oth;
4605: VecScatter ctx=a->Mvctx;
4606: MPI_Comm comm=((PetscObject)ctx)->comm;
4607: PetscMPIInt *rprocs,*sprocs,tag=((PetscObject)ctx)->tag,rank;
4608: PetscInt *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj;
4609: PetscScalar *rvalues,*svalues;
4610: MatScalar *b_otha,*bufa,*bufA;
4611: PetscInt i,j,k,l,ll,nrecvs,nsends,nrows,*srow,*rstarts,*rstartsj = 0,*sstarts,*sstartsj,len;
4612: MPI_Request *rwaits = PETSC_NULL,*swaits = PETSC_NULL;
4613: MPI_Status *sstatus,rstatus;
4614: PetscMPIInt jj;
4615: PetscInt *cols,sbs,rbs;
4616: PetscScalar *vals;
4619: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
4620: SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4621: }
4622: PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
4623: MPI_Comm_rank(comm,&rank);
4625: gen_to = (VecScatter_MPI_General*)ctx->todata;
4626: gen_from = (VecScatter_MPI_General*)ctx->fromdata;
4627: rvalues = gen_from->values; /* holds the length of receiving row */
4628: svalues = gen_to->values; /* holds the length of sending row */
4629: nrecvs = gen_from->n;
4630: nsends = gen_to->n;
4632: PetscMalloc2(nrecvs,MPI_Request,&rwaits,nsends,MPI_Request,&swaits);
4633: srow = gen_to->indices; /* local row index to be sent */
4634: sstarts = gen_to->starts;
4635: sprocs = gen_to->procs;
4636: sstatus = gen_to->sstatus;
4637: sbs = gen_to->bs;
4638: rstarts = gen_from->starts;
4639: rprocs = gen_from->procs;
4640: rbs = gen_from->bs;
4642: if (!startsj || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
4643: if (scall == MAT_INITIAL_MATRIX){
4644: /* i-array */
4645: /*---------*/
4646: /* post receives */
4647: for (i=0; i<nrecvs; i++){
4648: rowlen = (PetscInt*)rvalues + rstarts[i]*rbs;
4649: nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
4650: MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
4651: }
4653: /* pack the outgoing message */
4654: PetscMalloc((nsends+nrecvs+3)*sizeof(PetscInt),&sstartsj);
4655: rstartsj = sstartsj + nsends +1;
4656: sstartsj[0] = 0; rstartsj[0] = 0;
4657: len = 0; /* total length of j or a array to be sent */
4658: k = 0;
4659: for (i=0; i<nsends; i++){
4660: rowlen = (PetscInt*)svalues + sstarts[i]*sbs;
4661: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4662: for (j=0; j<nrows; j++) {
4663: row = srow[k] + B->rmap->range[rank]; /* global row idx */
4664: for (l=0; l<sbs; l++){
4665: MatGetRow_MPIAIJ(B,row+l,&ncols,PETSC_NULL,PETSC_NULL); /* rowlength */
4666: rowlen[j*sbs+l] = ncols;
4667: len += ncols;
4668: MatRestoreRow_MPIAIJ(B,row+l,&ncols,PETSC_NULL,PETSC_NULL);
4669: }
4670: k++;
4671: }
4672: MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);
4673: sstartsj[i+1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */
4674: }
4675: /* recvs and sends of i-array are completed */
4676: i = nrecvs;
4677: while (i--) {
4678: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
4679: }
4680: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
4682: /* allocate buffers for sending j and a arrays */
4683: PetscMalloc((len+1)*sizeof(PetscInt),&bufj);
4684: PetscMalloc((len+1)*sizeof(PetscScalar),&bufa);
4686: /* create i-array of B_oth */
4687: PetscMalloc((aBn+2)*sizeof(PetscInt),&b_othi);
4688: b_othi[0] = 0;
4689: len = 0; /* total length of j or a array to be received */
4690: k = 0;
4691: for (i=0; i<nrecvs; i++){
4692: rowlen = (PetscInt*)rvalues + rstarts[i]*rbs;
4693: nrows = rbs*(rstarts[i+1]-rstarts[i]); /* num of rows to be recieved */
4694: for (j=0; j<nrows; j++) {
4695: b_othi[k+1] = b_othi[k] + rowlen[j];
4696: len += rowlen[j]; k++;
4697: }
4698: rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
4699: }
4701: /* allocate space for j and a arrrays of B_oth */
4702: PetscMalloc((b_othi[aBn]+1)*sizeof(PetscInt),&b_othj);
4703: PetscMalloc((b_othi[aBn]+1)*sizeof(MatScalar),&b_otha);
4705: /* j-array */
4706: /*---------*/
4707: /* post receives of j-array */
4708: for (i=0; i<nrecvs; i++){
4709: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
4710: MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
4711: }
4713: /* pack the outgoing message j-array */
4714: k = 0;
4715: for (i=0; i<nsends; i++){
4716: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4717: bufJ = bufj+sstartsj[i];
4718: for (j=0; j<nrows; j++) {
4719: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
4720: for (ll=0; ll<sbs; ll++){
4721: MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,PETSC_NULL);
4722: for (l=0; l<ncols; l++){
4723: *bufJ++ = cols[l];
4724: }
4725: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,PETSC_NULL);
4726: }
4727: }
4728: MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
4729: }
4731: /* recvs and sends of j-array are completed */
4732: i = nrecvs;
4733: while (i--) {
4734: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
4735: }
4736: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
4737: } else if (scall == MAT_REUSE_MATRIX){
4738: sstartsj = *startsj;
4739: rstartsj = sstartsj + nsends +1;
4740: bufa = *bufa_ptr;
4741: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
4742: b_otha = b_oth->a;
4743: } else {
4744: SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
4745: }
4747: /* a-array */
4748: /*---------*/
4749: /* post receives of a-array */
4750: for (i=0; i<nrecvs; i++){
4751: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
4752: MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
4753: }
4755: /* pack the outgoing message a-array */
4756: k = 0;
4757: for (i=0; i<nsends; i++){
4758: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4759: bufA = bufa+sstartsj[i];
4760: for (j=0; j<nrows; j++) {
4761: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
4762: for (ll=0; ll<sbs; ll++){
4763: MatGetRow_MPIAIJ(B,row+ll,&ncols,PETSC_NULL,&vals);
4764: for (l=0; l<ncols; l++){
4765: *bufA++ = vals[l];
4766: }
4767: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,PETSC_NULL,&vals);
4768: }
4769: }
4770: MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
4771: }
4772: /* recvs and sends of a-array are completed */
4773: i = nrecvs;
4774: while (i--) {
4775: MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);
4776: }
4777: if (nsends) {MPI_Waitall(nsends,swaits,sstatus);}
4778: PetscFree2(rwaits,swaits);
4780: if (scall == MAT_INITIAL_MATRIX){
4781: /* put together the new matrix */
4782: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);
4784: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4785: /* Since these are PETSc arrays, change flags to free them as necessary. */
4786: b_oth = (Mat_SeqAIJ *)(*B_oth)->data;
4787: b_oth->free_a = PETSC_TRUE;
4788: b_oth->free_ij = PETSC_TRUE;
4789: b_oth->nonew = 0;
4791: PetscFree(bufj);
4792: if (!startsj || !bufa_ptr){
4793: PetscFree(sstartsj);
4794: PetscFree(bufa_ptr);
4795: } else {
4796: *startsj = sstartsj;
4797: *bufa_ptr = bufa;
4798: }
4799: }
4800: PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
4801: return(0);
4802: }
4806: /*@C
4807: MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.
4809: Not Collective
4811: Input Parameters:
4812: . A - The matrix in mpiaij format
4814: Output Parameter:
4815: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
4816: . colmap - A map from global column index to local index into lvec
4817: - multScatter - A scatter from the argument of a matrix-vector product to lvec
4819: Level: developer
4821: @*/
4822: #if defined (PETSC_USE_CTABLE)
4823: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
4824: #else
4825: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
4826: #endif
4827: {
4828: Mat_MPIAIJ *a;
4835: a = (Mat_MPIAIJ *) A->data;
4836: if (lvec) *lvec = a->lvec;
4837: if (colmap) *colmap = a->colmap;
4838: if (multScatter) *multScatter = a->Mvctx;
4839: return(0);
4840: }
4847: #include ../src/mat/impls/dense/mpi/mpidense.h
4851: /*
4852: Computes (B'*A')' since computing B*A directly is untenable
4854: n p p
4855: ( ) ( ) ( )
4856: m ( A ) * n ( B ) = m ( C )
4857: ( ) ( ) ( )
4859: */
4860: PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
4861: {
4862: PetscErrorCode ierr;
4863: Mat At,Bt,Ct;
4866: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
4867: MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
4868: MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);
4869: MatDestroy(At);
4870: MatDestroy(Bt);
4871: MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
4872: MatDestroy(Ct);
4873: return(0);
4874: }
4878: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
4879: {
4881: PetscInt m=A->rmap->n,n=B->cmap->n;
4882: Mat Cmat;
4885: if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
4886: MatCreate(((PetscObject)A)->comm,&Cmat);
4887: MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4888: MatSetType(Cmat,MATMPIDENSE);
4889: MatMPIDenseSetPreallocation(Cmat,PETSC_NULL);
4890: MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);
4891: MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);
4892: *C = Cmat;
4893: return(0);
4894: }
4896: /* ----------------------------------------------------------------*/
4899: PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
4900: {
4904: if (scall == MAT_INITIAL_MATRIX){
4905: MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);
4906: }
4907: MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);
4908: return(0);
4909: }
4912: #if defined(PETSC_HAVE_MUMPS)
4914: #endif
4915: #if defined(PETSC_HAVE_PASTIX)
4917: #endif
4918: #if defined(PETSC_HAVE_SUPERLU_DIST)
4920: #endif
4921: #if defined(PETSC_HAVE_SPOOLES)
4923: #endif
4926: /*MC
4927: MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
4929: Options Database Keys:
4930: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()
4932: Level: beginner
4934: .seealso: MatCreateMPIAIJ()
4935: M*/
4940: PetscErrorCode MatCreate_MPIAIJ(Mat B)
4941: {
4942: Mat_MPIAIJ *b;
4944: PetscMPIInt size;
4947: MPI_Comm_size(((PetscObject)B)->comm,&size);
4949: PetscNewLog(B,Mat_MPIAIJ,&b);
4950: B->data = (void*)b;
4951: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
4952: B->rmap->bs = 1;
4953: B->assembled = PETSC_FALSE;
4954: B->mapping = 0;
4956: B->insertmode = NOT_SET_VALUES;
4957: b->size = size;
4958: MPI_Comm_rank(((PetscObject)B)->comm,&b->rank);
4960: /* build cache for off array entries formed */
4961: MatStashCreate_Private(((PetscObject)B)->comm,1,&B->stash);
4962: b->donotstash = PETSC_FALSE;
4963: b->colmap = 0;
4964: b->garray = 0;
4965: b->roworiented = PETSC_TRUE;
4967: /* stuff used for matrix vector multiply */
4968: b->lvec = PETSC_NULL;
4969: b->Mvctx = PETSC_NULL;
4971: /* stuff for MatGetRow() */
4972: b->rowindices = 0;
4973: b->rowvalues = 0;
4974: b->getrowactive = PETSC_FALSE;
4976: #if defined(PETSC_HAVE_SPOOLES)
4977: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_spooles_C",
4978: "MatGetFactor_mpiaij_spooles",
4979: MatGetFactor_mpiaij_spooles);
4980: #endif
4981: #if defined(PETSC_HAVE_MUMPS)
4982: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_mumps_C",
4983: "MatGetFactor_mpiaij_mumps",
4984: MatGetFactor_mpiaij_mumps);
4985: #endif
4986: #if defined(PETSC_HAVE_PASTIX)
4987: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_pastix_C",
4988: "MatGetFactor_mpiaij_pastix",
4989: MatGetFactor_mpiaij_pastix);
4990: #endif
4991: #if defined(PETSC_HAVE_SUPERLU_DIST)
4992: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_superlu_dist_C",
4993: "MatGetFactor_mpiaij_superlu_dist",
4994: MatGetFactor_mpiaij_superlu_dist);
4995: #endif
4996: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
4997: "MatStoreValues_MPIAIJ",
4998: MatStoreValues_MPIAIJ);
4999: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
5000: "MatRetrieveValues_MPIAIJ",
5001: MatRetrieveValues_MPIAIJ);
5002: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
5003: "MatGetDiagonalBlock_MPIAIJ",
5004: MatGetDiagonalBlock_MPIAIJ);
5005: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatIsTranspose_C",
5006: "MatIsTranspose_MPIAIJ",
5007: MatIsTranspose_MPIAIJ);
5008: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIAIJSetPreallocation_C",
5009: "MatMPIAIJSetPreallocation_MPIAIJ",
5010: MatMPIAIJSetPreallocation_MPIAIJ);
5011: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",
5012: "MatMPIAIJSetPreallocationCSR_MPIAIJ",
5013: MatMPIAIJSetPreallocationCSR_MPIAIJ);
5014: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatDiagonalScaleLocal_C",
5015: "MatDiagonalScaleLocal_MPIAIJ",
5016: MatDiagonalScaleLocal_MPIAIJ);
5017: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpiaij_mpicsrperm_C",
5018: "MatConvert_MPIAIJ_MPICSRPERM",
5019: MatConvert_MPIAIJ_MPICSRPERM);
5020: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpiaij_mpicrl_C",
5021: "MatConvert_MPIAIJ_MPICRL",
5022: MatConvert_MPIAIJ_MPICRL);
5023: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",
5024: "MatMatMult_MPIDense_MPIAIJ",
5025: MatMatMult_MPIDense_MPIAIJ);
5026: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",
5027: "MatMatMultSymbolic_MPIDense_MPIAIJ",
5028: MatMatMultSymbolic_MPIDense_MPIAIJ);
5029: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",
5030: "MatMatMultNumeric_MPIDense_MPIAIJ",
5031: MatMatMultNumeric_MPIDense_MPIAIJ);
5032: PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
5033: return(0);
5034: }
5039: /*@
5040: MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5041: and "off-diagonal" part of the matrix in CSR format.
5043: Collective on MPI_Comm
5045: Input Parameters:
5046: + comm - MPI communicator
5047: . m - number of local rows (Cannot be PETSC_DECIDE)
5048: . n - This value should be the same as the local size used in creating the
5049: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5050: calculated if N is given) For square matrices n is almost always m.
5051: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5052: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5053: . i - row indices for "diagonal" portion of matrix
5054: . j - column indices
5055: . a - matrix values
5056: . oi - row indices for "off-diagonal" portion of matrix
5057: . oj - column indices
5058: - oa - matrix values
5060: Output Parameter:
5061: . mat - the matrix
5063: Level: advanced
5065: Notes:
5066: The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc.
5068: The i and j indices are 0 based
5069:
5070: See MatCreateMPIAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix
5073: .keywords: matrix, aij, compressed row, sparse, parallel
5075: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5076: MPIAIJ, MatCreateMPIAIJ(), MatCreateMPIAIJWithArrays()
5077: @*/
5078: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],
5079: PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5080: {
5082: Mat_MPIAIJ *maij;
5085: if (m < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5086: if (i[0]) {
5087: SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5088: }
5089: if (oi[0]) {
5090: SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5091: }
5092: MatCreate(comm,mat);
5093: MatSetSizes(*mat,m,n,M,N);
5094: MatSetType(*mat,MATMPIAIJ);
5095: maij = (Mat_MPIAIJ*) (*mat)->data;
5096: maij->donotstash = PETSC_TRUE;
5097: (*mat)->preallocated = PETSC_TRUE;
5099: PetscMapSetBlockSize((*mat)->rmap,1);
5100: PetscMapSetBlockSize((*mat)->cmap,1);
5101: PetscMapSetUp((*mat)->rmap);
5102: PetscMapSetUp((*mat)->cmap);
5104: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
5105: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);
5107: MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);
5108: MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);
5109: MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);
5110: MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);
5112: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
5113: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
5114: return(0);
5115: }
5117: /*
5118: Special version for direct calls from Fortran
5119: */
5120: #if defined(PETSC_HAVE_FORTRAN_CAPS)
5121: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
5122: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
5123: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
5124: #endif
5126: /* Change these macros so can be used in void function */
5127: #undef CHKERRQ
5128: #define CHKERRQ(ierr) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5129: #undef SETERRQ2
5130: #define SETERRQ2(ierr,b,c,d) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5131: #undef SETERRQ
5132: #define SETERRQ(ierr,b) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5137: void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
5138: {
5139: Mat mat = *mmat;
5140: PetscInt m = *mm, n = *mn;
5141: InsertMode addv = *maddv;
5142: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
5143: PetscScalar value;
5144: PetscErrorCode ierr;
5146: MatPreallocated(mat);
5147: if (mat->insertmode == NOT_SET_VALUES) {
5148: mat->insertmode = addv;
5149: }
5150: #if defined(PETSC_USE_DEBUG)
5151: else if (mat->insertmode != addv) {
5152: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
5153: }
5154: #endif
5155: {
5156: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
5157: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
5158: PetscTruth roworiented = aij->roworiented;
5160: /* Some Variables required in the macro */
5161: Mat A = aij->A;
5162: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
5163: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
5164: MatScalar *aa = a->a;
5165: PetscTruth ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES))?PETSC_TRUE:PETSC_FALSE);
5166: Mat B = aij->B;
5167: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
5168: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
5169: MatScalar *ba = b->a;
5171: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
5172: PetscInt nonew = a->nonew;
5173: MatScalar *ap1,*ap2;
5176: for (i=0; i<m; i++) {
5177: if (im[i] < 0) continue;
5178: #if defined(PETSC_USE_DEBUG)
5179: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
5180: #endif
5181: if (im[i] >= rstart && im[i] < rend) {
5182: row = im[i] - rstart;
5183: lastcol1 = -1;
5184: rp1 = aj + ai[row];
5185: ap1 = aa + ai[row];
5186: rmax1 = aimax[row];
5187: nrow1 = ailen[row];
5188: low1 = 0;
5189: high1 = nrow1;
5190: lastcol2 = -1;
5191: rp2 = bj + bi[row];
5192: ap2 = ba + bi[row];
5193: rmax2 = bimax[row];
5194: nrow2 = bilen[row];
5195: low2 = 0;
5196: high2 = nrow2;
5198: for (j=0; j<n; j++) {
5199: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
5200: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
5201: if (in[j] >= cstart && in[j] < cend){
5202: col = in[j] - cstart;
5203: MatSetValues_SeqAIJ_A_Private(row,col,value,addv);
5204: } else if (in[j] < 0) continue;
5205: #if defined(PETSC_USE_DEBUG)
5206: else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
5207: #endif
5208: else {
5209: if (mat->was_assembled) {
5210: if (!aij->colmap) {
5211: CreateColmap_MPIAIJ_Private(mat);
5212: }
5213: #if defined (PETSC_USE_CTABLE)
5214: PetscTableFind(aij->colmap,in[j]+1,&col);
5215: col--;
5216: #else
5217: col = aij->colmap[in[j]] - 1;
5218: #endif
5219: if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
5220: DisAssemble_MPIAIJ(mat);
5221: col = in[j];
5222: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
5223: B = aij->B;
5224: b = (Mat_SeqAIJ*)B->data;
5225: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
5226: rp2 = bj + bi[row];
5227: ap2 = ba + bi[row];
5228: rmax2 = bimax[row];
5229: nrow2 = bilen[row];
5230: low2 = 0;
5231: high2 = nrow2;
5232: bm = aij->B->rmap->n;
5233: ba = b->a;
5234: }
5235: } else col = in[j];
5236: MatSetValues_SeqAIJ_B_Private(row,col,value,addv);
5237: }
5238: }
5239: } else {
5240: if (!aij->donotstash) {
5241: if (roworiented) {
5242: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscTruth)(ignorezeroentries && (addv == ADD_VALUES)));
5243: } else {
5244: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscTruth)(ignorezeroentries && (addv == ADD_VALUES)));
5245: }
5246: }
5247: }
5248: }}
5249: PetscFunctionReturnVoid();
5250: }