Actual source code: mpiptap.c
petsc-3.3-p5 2012-12-01
2: /*
3: Defines projective product routines where A is a MPIAIJ matrix
4: C = P^T * A * P
5: */
7: #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
8: #include <../src/mat/utils/freespace.h>
9: #include <../src/mat/impls/aij/mpi/mpiaij.h>
10: #include <petscbt.h>
12: /*
13: #define DEBUG_MATPTAP
14: */
16: extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
19: PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
20: {
21: PetscErrorCode ierr;
22: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
23: Mat_PtAPMPI *ptap=a->ptap;
26: if (ptap){
27: Mat_Merge_SeqsToMPI *merge=ptap->merge;
28: PetscFree2(ptap->startsj_s,ptap->startsj_r);
29: PetscFree(ptap->bufa);
30: MatDestroy(&ptap->P_loc);
31: MatDestroy(&ptap->P_oth);
32: MatDestroy(&ptap->A_loc); /* used by MatTransposeMatMult() */
33: if (ptap->api){PetscFree(ptap->api);}
34: if (ptap->apj){PetscFree(ptap->apj);}
35: if (merge) {
36: PetscFree(merge->id_r);
37: PetscFree(merge->len_s);
38: PetscFree(merge->len_r);
39: PetscFree(merge->bi);
40: PetscFree(merge->bj);
41: PetscFree(merge->buf_ri[0]);
42: PetscFree(merge->buf_ri);
43: PetscFree(merge->buf_rj[0]);
44: PetscFree(merge->buf_rj);
45: PetscFree(merge->coi);
46: PetscFree(merge->coj);
47: PetscFree(merge->owners_co);
48: PetscLayoutDestroy(&merge->rowmap);
49: merge->destroy(A);
50: PetscFree(ptap->merge);
51: }
52: PetscFree(ptap);
53: }
55: return(0);
56: }
60: PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
61: {
62: PetscErrorCode ierr;
63: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
64: Mat_PtAPMPI *ptap = a->ptap;
65: Mat_Merge_SeqsToMPI *merge = ptap->merge;
68: (*merge->duplicate)(A,op,M);
69: (*M)->ops->destroy = merge->destroy;
70: (*M)->ops->duplicate = merge->duplicate;
71: return(0);
72: }
76: PetscErrorCode MatPtAPSymbolic_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
77: {
81: if (!P->ops->ptapsymbolic_mpiaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
82: (*P->ops->ptapsymbolic_mpiaij)(A,P,fill,C);
83: return(0);
84: }
88: PetscErrorCode MatPtAPNumeric_MPIAIJ(Mat A,Mat P,Mat C)
89: {
93: if (!P->ops->ptapnumeric_mpiaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
94: (*P->ops->ptapnumeric_mpiaij)(A,P,C);
95: return(0);
96: }
100: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
101: {
102: PetscErrorCode ierr;
103: Mat Cmpi;
104: Mat_PtAPMPI *ptap;
105: PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL;
106: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
107: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
108: Mat_SeqAIJ *p_loc,*p_oth;
109: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
110: PetscInt *adi=ad->i,*aj,*aoi=ao->i,nnz;
111: PetscInt nlnk,*lnk,*owners_co,*coi,*coj,i,k,pnz,row;
112: PetscInt am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
113: PetscBT lnkbt;
114: MPI_Comm comm=((PetscObject)A)->comm;
115: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
116: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
117: PetscInt len,proc,*dnz,*onz,*owners;
118: PetscInt nzi,*bi,*bj;
119: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
120: MPI_Request *swaits,*rwaits;
121: MPI_Status *sstatus,rstatus;
122: Mat_Merge_SeqsToMPI *merge;
123: PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
124: PetscReal afill=1.0,afill_tmp;
125: PetscInt rstart = P->cmap->rstart,rmax;
126: PetscScalar *vals;
129: /* check if matrix local sizes are compatible */
130: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend){
131: SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
132: }
133: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend){
134: SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
135: }
137: MPI_Comm_size(comm,&size);
138: MPI_Comm_rank(comm,&rank);
140: /* create struct Mat_PtAPMPI and attached it to C later */
141: PetscNew(Mat_PtAPMPI,&ptap);
142: ptap->reuse = MAT_INITIAL_MATRIX;
144:
145: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
146: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
147: #if defined(DEBUG_MATPTAP)
148: if (!rank) PetscPrintf(PETSC_COMM_SELF,"[%d] P_oth is done...\n",rank);
149: #endif
151: /* get P_loc by taking all local rows of P */
152: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
153: #if defined(DEBUG_MATPTAP)
154: MPI_Barrier(comm);
155: if (!rank) PetscPrintf(PETSC_COMM_SELF,"[%d] P_loc is done...\n",rank);
156: #endif
158: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
159: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
160: pi_loc = p_loc->i; pj_loc = p_loc->j;
161: pi_oth = p_oth->i; pj_oth = p_oth->j;
163: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
164: /*-------------------------------------------------------------------*/
165: PetscMalloc((am+1)*sizeof(PetscInt),&api);
166: api[0] = 0;
168: /* create and initialize a linked list */
169: nlnk = pN+1;
170: #if defined(DEBUG_MATPTAP)
171: MPI_Barrier(comm);
172: if (!rank) PetscPrintf(PETSC_COMM_SELF,"[%d] call PetscLLCreate(), pN %D, P_rmax %D %D; Annz %D\n",rank,pN,p_loc->rmax,p_oth->rmax,adi[am]+aoi[am]);
173: #endif
174: PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);
175: #if defined(DEBUG_MATPTAP)
176: MPI_Barrier(comm);
177: if (!rank) PetscPrintf(PETSC_COMM_SELF,"[%d] call PetscLLCreate() is done\n",rank);
178: #endif
180: /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
181: PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);
182: current_space = free_space;
184: for (i=0; i<am; i++) {
185: apnz = 0;
186: /* diagonal portion of A */
187: nzi = adi[i+1] - adi[i];
188: aj = ad->j + adi[i];
189: for (j=0; j<nzi; j++){
190: row = aj[j];
191: pnz = pi_loc[row+1] - pi_loc[row];
192: Jptr = pj_loc + pi_loc[row];
193: /* add non-zero cols of P into the sorted linked list lnk */
194: PetscLLAddSorted(pnz,Jptr,pN,nlnk,lnk,lnkbt);
195: apnz += nlnk;
196: }
197: /* off-diagonal portion of A */
198: nzi = aoi[i+1] - aoi[i];
199: aj = ao->j + aoi[i];
200: for (j=0; j<nzi; j++){
201: row = aj[j];
202: pnz = pi_oth[row+1] - pi_oth[row];
203: Jptr = pj_oth + pi_oth[row];
204: PetscLLAddSorted(pnz,Jptr,pN,nlnk,lnk,lnkbt);
205: apnz += nlnk;
206: }
208: api[i+1] = api[i] + apnz;
209: if (ap_rmax < apnz) ap_rmax = apnz;
211: /* if free space is not available, double the total space in the list */
212: if (current_space->local_remaining<apnz) {
213: PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);
214: nspacedouble++;
215: }
217: /* Copy data into free space, then initialize lnk */
218: PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);
219: current_space->array += apnz;
220: current_space->local_used += apnz;
221: current_space->local_remaining -= apnz;
222: }
223: /* Allocate space for apj, initialize apj, and */
224: /* destroy list of free space and other temporary array(s) */
225: PetscMalloc((api[am]+1)*sizeof(PetscInt),&apj);
226: PetscFreeSpaceContiguous(&free_space,apj);
227: afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
228: if (afill_tmp > afill) afill = afill_tmp;
230: /* determine symbolic Co=(p->B)^T*AP - send to others */
231: /*----------------------------------------------------*/
232: MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);
234: /* then, compute symbolic Co = (p->B)^T*AP */
235: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
236: >= (num of nonzero rows of C_seq) - pn */
237: PetscMalloc((pon+1)*sizeof(PetscInt),&coi);
238: coi[0] = 0;
240: /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
241: nnz = fill*(poti[pon] + api[am]);
242: PetscFreeSpaceGet(nnz,&free_space);
243: current_space = free_space;
245: for (i=0; i<pon; i++) {
246: nnz = 0;
247: pnz = poti[i+1] - poti[i];
248: ptJ = potj + poti[i];
249: for (j=0; j<pnz; j++){
250: row = ptJ[j]; /* row of AP == col of Pot */
251: apnz = api[row+1] - api[row];
252: Jptr = apj + api[row];
253: /* add non-zero cols of AP into the sorted linked list lnk */
254: PetscLLAddSorted(apnz,Jptr,pN,nlnk,lnk,lnkbt);
255: nnz += nlnk;
256: }
258: /* If free space is not available, double the total space in the list */
259: if (current_space->local_remaining<nnz) {
260: PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);
261: nspacedouble++;
262: }
264: /* Copy data into free space, and zero out denserows */
265: PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);
266: current_space->array += nnz;
267: current_space->local_used += nnz;
268: current_space->local_remaining -= nnz;
269: coi[i+1] = coi[i] + nnz;
270: }
271: PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);
272: PetscFreeSpaceContiguous(&free_space,coj);
273: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
274: if (afill_tmp > afill) afill = afill_tmp;
275: MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);
277: #if defined(DEBUG_MATPTAP)
278: MPI_Barrier(comm);
279: if (!rank) PetscPrintf(PETSC_COMM_SELF,"[%d] local Co is done\n",rank);
280: #endif
282: /* send j-array (coj) of Co to other processors */
283: /*----------------------------------------------*/
284: /* determine row ownership */
285: PetscNew(Mat_Merge_SeqsToMPI,&merge);
286: PetscLayoutCreate(comm,&merge->rowmap);
287: merge->rowmap->n = pn;
288: merge->rowmap->bs = 1;
289: PetscLayoutSetUp(merge->rowmap);
290: owners = merge->rowmap->range;
292: /* determine the number of messages to send, their lengths */
293: PetscMalloc(size*sizeof(PetscMPIInt),&len_si);
294: PetscMemzero(len_si,size*sizeof(PetscMPIInt));
295: PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);
296: len_s = merge->len_s;
297: merge->nsend = 0;
298:
299: PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);
300: PetscMemzero(len_s,size*sizeof(PetscMPIInt));
302: proc = 0;
303: for (i=0; i<pon; i++){
304: while (prmap[i] >= owners[proc+1]) proc++;
305: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
306: len_s[proc] += coi[i+1] - coi[i];
307: }
309: len = 0; /* max length of buf_si[] */
310: owners_co[0] = 0;
311: for (proc=0; proc<size; proc++){
312: owners_co[proc+1] = owners_co[proc] + len_si[proc];
313: if (len_si[proc]){
314: merge->nsend++;
315: len_si[proc] = 2*(len_si[proc] + 1);
316: len += len_si[proc];
317: }
318: }
320: /* determine the number and length of messages to receive for coi and coj */
321: PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);
322: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
324: /* post the Irecv and Isend of coj */
325: PetscCommGetNewTag(comm,&tagj);
326: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
327: PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);
328: for (proc=0, k=0; proc<size; proc++){
329: if (!len_s[proc]) continue;
330: i = owners_co[proc];
331: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
332: k++;
333: }
335: /* receives and sends of coj are complete */
336: PetscMalloc(size*sizeof(MPI_Status),&sstatus);
337: for (i=0; i<merge->nrecv; i++){
338: PetscMPIInt icompleted;
339: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
340: }
341: PetscFree(rwaits);
342: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
343:
344: /* send and recv coi */
345: /*-------------------*/
346: PetscCommGetNewTag(comm,&tagi);
347: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
348: PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);
349: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
350: for (proc=0,k=0; proc<size; proc++){
351: if (!len_s[proc]) continue;
352: /* form outgoing message for i-structure:
353: buf_si[0]: nrows to be sent
354: [1:nrows]: row index (global)
355: [nrows+1:2*nrows+1]: i-structure index
356: */
357: /*-------------------------------------------*/
358: nrows = len_si[proc]/2 - 1;
359: buf_si_i = buf_si + nrows+1;
360: buf_si[0] = nrows;
361: buf_si_i[0] = 0;
362: nrows = 0;
363: for (i=owners_co[proc]; i<owners_co[proc+1]; i++){
364: nzi = coi[i+1] - coi[i];
365: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
366: buf_si[nrows+1] =prmap[i] -owners[proc]; /* local row index */
367: nrows++;
368: }
369: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
370: k++;
371: buf_si += len_si[proc];
372: }
373: i = merge->nrecv;
374: while (i--) {
375: PetscMPIInt icompleted;
376: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
377: }
378: PetscFree(rwaits);
379: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
380: /*
381: PetscInfo2(A,"nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);
382: for (i=0; i<merge->nrecv; i++){
383: PetscInfo3(A,"recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
384: }
385: */
386: PetscFree(len_si);
387: PetscFree(len_ri);
388: PetscFree(swaits);
389: PetscFree(sstatus);
390: PetscFree(buf_s);
392: #if defined(DEBUG_MATPTAP)
393: MPI_Barrier(comm);
394: if (!rank) PetscPrintf(PETSC_COMM_SELF,"[%d] Co is done\n",rank);
395: #endif
397: /* compute the local portion of C (mpi mat) */
398: /*------------------------------------------*/
399: MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
401: /* allocate bi array and free space for accumulating nonzero column info */
402: PetscMalloc((pn+1)*sizeof(PetscInt),&bi);
403: bi[0] = 0;
404:
405: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
406: nnz = fill*(pi_loc[pm] + api[am]);
407: PetscFreeSpaceGet(nnz,&free_space);
408: current_space = free_space;
410: PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);
411: for (k=0; k<merge->nrecv; k++){
412: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
413: nrows = *buf_ri_k[k];
414: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
415: nextci[k] = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure */
416: }
417: MatPreallocateInitialize(comm,pn,pn,dnz,onz);
418: rmax = 0;
419: for (i=0; i<pn; i++) {
420: /* add pdt[i,:]*AP into lnk */
421: nnz = 0;
422: pnz = pdti[i+1] - pdti[i];
423: ptJ = pdtj + pdti[i];
424: for (j=0; j<pnz; j++){
425: row = ptJ[j]; /* row of AP == col of Pt */
426: apnz = api[row+1] - api[row];
427: Jptr = apj + api[row];
428: /* add non-zero cols of AP into the sorted linked list lnk */
429: PetscLLAddSorted(apnz,Jptr,pN,nlnk,lnk,lnkbt);
430: nnz += nlnk;
431: }
432: /* add received col data into lnk */
433: for (k=0; k<merge->nrecv; k++){ /* k-th received message */
434: if (i == *nextrow[k]) { /* i-th row */
435: nzi = *(nextci[k]+1) - *nextci[k];
436: Jptr = buf_rj[k] + *nextci[k];
437: PetscLLAddSorted(nzi,Jptr,pN,nlnk,lnk,lnkbt);
438: nnz += nlnk;
439: nextrow[k]++; nextci[k]++;
440: }
441: }
443: /* if free space is not available, make more free space */
444: if (current_space->local_remaining<nnz) {
445: PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);
446: nspacedouble++;
447: }
448: /* copy data into free space, then initialize lnk */
449: PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);
450: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
451: current_space->array += nnz;
452: current_space->local_used += nnz;
453: current_space->local_remaining -= nnz;
454: bi[i+1] = bi[i] + nnz;
455: if (nnz > rmax) rmax = nnz;
456: }
457: MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
458: PetscFree3(buf_ri_k,nextrow,nextci);
460: PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);
461: PetscFreeSpaceContiguous(&free_space,bj);
462: afill_tmp = (PetscReal)bi[pn]/(pi_loc[pm] + api[am]+1);
463: if (afill_tmp > afill) afill = afill_tmp;
464: PetscLLDestroy(lnk,lnkbt);
466: /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part - check runex55_SA ? */
467: /*------------------------------------------------------------------------------------------------------*/
468: PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);
469: PetscMemzero(vals,rmax*sizeof(PetscScalar));
471: MatCreate(comm,&Cmpi);
472: MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
473: MatSetBlockSizes(Cmpi,P->cmap->bs,P->cmap->bs);
474: MatSetType(Cmpi,MATMPIAIJ);
475: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
476: MatPreallocateFinalize(dnz,onz);
478: for (i=0; i<pn; i++){
479: row = i + rstart;
480: nnz = bi[i+1] - bi[i];
481: Jptr = bj + bi[i];
482: MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);
483: }
484: MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);
485: MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);
486: PetscFree(vals);
488: merge->bi = bi;
489: merge->bj = bj;
490: merge->coi = coi;
491: merge->coj = coj;
492: merge->buf_ri = buf_ri;
493: merge->buf_rj = buf_rj;
494: merge->owners_co = owners_co;
495: merge->destroy = Cmpi->ops->destroy;
496: merge->duplicate = Cmpi->ops->duplicate;
498: /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
499: /* Cmpi->assembled = PETSC_FALSE; */
500: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
501: Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
503: /* attach the supporting struct to Cmpi for reuse */
504: c = (Mat_MPIAIJ*)Cmpi->data;
505: c->ptap = ptap;
506: ptap->api = api;
507: ptap->apj = apj;
508: ptap->merge = merge;
509: ptap->rmax = ap_rmax;
510:
511: *C = Cmpi;
512: #if defined(PETSC_USE_INFO)
513: if (bi[pn] != 0) {
514: PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);
515: PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);
516: } else {
517: PetscInfo(Cmpi,"Empty matrix product\n");
518: }
519: #endif
520: return(0);
521: }
525: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
526: {
527: PetscErrorCode ierr;
528: Mat_Merge_SeqsToMPI *merge;
529: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
530: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
531: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
532: Mat_SeqAIJ *p_loc,*p_oth;
533: Mat_PtAPMPI *ptap;
534: PetscInt *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
535: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
536: PetscInt i,j,k,anz,pnz,apnz,nextap,row,*cj;
537: MatScalar *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
538: PetscInt am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
539: MPI_Comm comm=((PetscObject)C)->comm;
540: PetscMPIInt size,rank,taga,*len_s;
541: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
542: PetscInt **buf_ri,**buf_rj;
543: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
544: MPI_Request *s_waits,*r_waits;
545: MPI_Status *status;
546: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
547: PetscInt *api,*apj,*coi,*coj;
548: PetscInt *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
549: PetscInt sparse_axpy;
552: MPI_Comm_size(comm,&size);
553: MPI_Comm_rank(comm,&rank);
555: ptap = c->ptap;
556: if (!ptap) SETERRQ(((PetscObject)C)->comm,PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
557: merge = ptap->merge;
559: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
560: /*--------------------------------------------------*/
561: if (ptap->reuse == MAT_INITIAL_MATRIX){
562: ptap->reuse = MAT_REUSE_MATRIX;
563: } else { /* update numerical values of P_oth and P_loc */
564: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
565: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
566: }
568: /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
569: /*--------------------------------------------------------------*/
570: /* get data from symbolic products */
571: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
572: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
573: pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
574: pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
575:
576: coi = merge->coi; coj = merge->coj;
577: PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);
578: PetscMemzero(coa,coi[pon]*sizeof(MatScalar));
580: bi = merge->bi; bj = merge->bj;
581: owners = merge->rowmap->range;
582: PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);
583: PetscMemzero(ba,bi[cm]*sizeof(MatScalar));
584:
585: api = ptap->api; apj = ptap->apj;
586: /* flag 'sparse_axpy' determines which implementations to be used:
587: 0: do dense axpy in MatPtAPNumeric() - fastest, but requires storage of a dense array apa; (default)
588: 1: do one sparse axpy - uses same memory as sparse_axpy=0 and might execute less flops
589: (apnz vs. cnz in the outerproduct), slower than case '0' when cnz is not too large than apnz;
590: 2: do two sparse axpy in MatPtAPNumeric() - slowest, uses a sparse array apa */
591: /* set default sparse_axpy */
592: sparse_axpy = 0;
593: PetscOptionsGetInt(PETSC_NULL,"-matptap_sparseaxpy",&sparse_axpy,PETSC_NULL);
594: if (sparse_axpy == 0){ /* Do not perform sparse axpy */
595: /*--------------------------------------------------*/
596: /* malloc apa to store dense row A[i,:]*P */
597: PetscMalloc((P->cmap->N)*sizeof(PetscScalar),&apa);
598: PetscMemzero(apa,P->cmap->N*sizeof(PetscScalar));
600: for (i=0; i<am; i++) {
601: /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
602: /*------------------------------------------------------------*/
603: apJ = apj + api[i];
605: /* diagonal portion of A */
606: anz = adi[i+1] - adi[i];
607: adj = ad->j + adi[i];
608: ada = ad->a + adi[i];
609: for (j=0; j<anz; j++) {
610: row = adj[j];
611: pnz = pi_loc[row+1] - pi_loc[row];
612: pj = pj_loc + pi_loc[row];
613: pa = pa_loc + pi_loc[row];
615: /* perform dense axpy */
616: valtmp = ada[j];
617: for (k=0; k<pnz; k++){
618: apa[pj[k]] += valtmp*pa[k];
619: }
620: PetscLogFlops(2.0*pnz);
621: }
623: /* off-diagonal portion of A */
624: anz = aoi[i+1] - aoi[i];
625: aoj = ao->j + aoi[i];
626: aoa = ao->a + aoi[i];
627: for (j=0; j<anz; j++) {
628: row = aoj[j];
629: pnz = pi_oth[row+1] - pi_oth[row];
630: pj = pj_oth + pi_oth[row];
631: pa = pa_oth + pi_oth[row];
633: /* perform dense axpy */
634: valtmp = aoa[j];
635: for (k=0; k<pnz; k++){
636: apa[pj[k]] += valtmp*pa[k];
637: }
638: PetscLogFlops(2.0*pnz);
639: }
640:
641: /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
642: /*--------------------------------------------------------------*/
643: apnz = api[i+1] - api[i];
644: /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
645: pnz = po->i[i+1] - po->i[i];
646: poJ = po->j + po->i[i];
647: pA = po->a + po->i[i];
648: for (j=0; j<pnz; j++){
649: row = poJ[j];
650: cnz = coi[row+1] - coi[row];
651: cj = coj + coi[row];
652: ca = coa + coi[row];
653: /* perform dense axpy */
654: valtmp = pA[j];
655: for (k=0; k<cnz; k++) {
656: ca[k] += valtmp*apa[cj[k]];
657: }
658: PetscLogFlops(2.0*cnz);
659: }
660:
661: /* put the value into Cd (diagonal part) */
662: pnz = pd->i[i+1] - pd->i[i];
663: pdJ = pd->j + pd->i[i];
664: pA = pd->a + pd->i[i];
665: for (j=0; j<pnz; j++){
666: row = pdJ[j];
667: cnz = bi[row+1] - bi[row];
668: cj = bj + bi[row];
669: ca = ba + bi[row];
670: /* perform dense axpy */
671: valtmp = pA[j];
672: for (k=0; k<cnz; k++) {
673: ca[k] += valtmp*apa[cj[k]];
674: }
675: PetscLogFlops(2.0*cnz);
676: }
677:
678: /* zero the current row of A*P */
679: for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
680: }
681: } else if (sparse_axpy == 1){ /* Perform one sparse axpy */
682: /*------------------------------------------------------*/
683: /* malloc apa to store dense row A[i,:]*P */
684: PetscMalloc((P->cmap->N)*sizeof(PetscScalar),&apa);
685: PetscMemzero(apa,P->cmap->N*sizeof(PetscScalar));
687: for (i=0; i<am; i++) {
688: /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
689: /*------------------------------------------------------------*/
690: apJ = apj + api[i];
692: /* diagonal portion of A */
693: anz = adi[i+1] - adi[i];
694: adj = ad->j + adi[i];
695: ada = ad->a + adi[i];
696: for (j=0; j<anz; j++) {
697: row = adj[j];
698: pnz = pi_loc[row+1] - pi_loc[row];
699: pj = pj_loc + pi_loc[row];
700: pa = pa_loc + pi_loc[row];
702: /* perform dense axpy */
703: valtmp = ada[j];
704: for (k=0; k<pnz; k++){
705: apa[pj[k]] += valtmp*pa[k];
706: }
707: PetscLogFlops(2.0*pnz);
708: }
710: /* off-diagonal portion of A */
711: anz = aoi[i+1] - aoi[i];
712: aoj = ao->j + aoi[i];
713: aoa = ao->a + aoi[i];
714: for (j=0; j<anz; j++) {
715: row = aoj[j];
716: pnz = pi_oth[row+1] - pi_oth[row];
717: pj = pj_oth + pi_oth[row];
718: pa = pa_oth + pi_oth[row];
720: /* perform dense axpy */
721: valtmp = aoa[j];
722: for (k=0; k<pnz; k++){
723: apa[pj[k]] += valtmp*pa[k];
724: }
725: PetscLogFlops(2.0*pnz);
726: }
727:
728: /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
729: /*--------------------------------------------------------------*/
730: apnz = api[i+1] - api[i];
731: /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
732: pnz = po->i[i+1] - po->i[i];
733: poJ = po->j + po->i[i];
734: pA = po->a + po->i[i];
735: for (j=0; j<pnz; j++){
736: row = poJ[j];
737: cj = coj + coi[row];
738: ca = coa + coi[row];
739: valtmp = pA[j];
740: /* perform sparse axpy */
741: nextap = 0;
742: for (k=0; nextap<apnz; k++) {
743: if (cj[k]==apJ[nextap]) { /* global column index */
744: ca[k] += valtmp*apa[cj[k]]; nextap++;
745: }
746: }
747: PetscLogFlops(2.0*apnz);
748: }
749:
750: /* put the value into Cd (diagonal part) */
751: pnz = pd->i[i+1] - pd->i[i];
752: pdJ = pd->j + pd->i[i];
753: pA = pd->a + pd->i[i];
754: for (j=0; j<pnz; j++){
755: row = pdJ[j];
756: cj = bj + bi[row];
757: ca = ba + bi[row];
758: valtmp = pA[j];
759: /* perform sparse axpy */
760: nextap = 0;
761: for (k=0; nextap<apnz; k++) {
762: if (cj[k]==apJ[nextap]) { /* global column index */
763: ca[k] += valtmp*apa[cj[k]];
764: nextap++;
765: }
766: }
767: PetscLogFlops(2.0*apnz);
768: }
769:
770: /* zero the current row of A*P */
771: for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
772: }
773: } else if (sparse_axpy == 2){/* Perform two sparse axpy */
774: /*----------------------------------------------------*/
775: /* malloc apa to store sparse row A[i,:]*P */
776: PetscMalloc((ptap->rmax+1)*sizeof(MatScalar),&apa);
777: PetscMemzero(apa,ptap->rmax*sizeof(MatScalar));
779: pA=pa_loc;
780: for (i=0; i<am; i++) {
781: /* form i-th sparse row of A*P */
782: apnz = api[i+1] - api[i];
783: apJ = apj + api[i];
784: /* diagonal portion of A */
785: anz = adi[i+1] - adi[i];
786: adj = ad->j + adi[i];
787: ada = ad->a + adi[i];
788: for (j=0; j<anz; j++) {
789: row = adj[j];
790: pnz = pi_loc[row+1] - pi_loc[row];
791: pj = pj_loc + pi_loc[row];
792: pa = pa_loc + pi_loc[row];
793: valtmp = ada[j];
794: nextp = 0;
795: for (k=0; nextp<pnz; k++) {
796: if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
797: apa[k] += valtmp*pa[nextp++];
798: }
799: }
800: PetscLogFlops(2.0*pnz);
801: }
802: /* off-diagonal portion of A */
803: anz = aoi[i+1] - aoi[i];
804: aoj = ao->j + aoi[i];
805: aoa = ao->a + aoi[i];
806: for (j=0; j<anz; j++) {
807: row = aoj[j];
808: pnz = pi_oth[row+1] - pi_oth[row];
809: pj = pj_oth + pi_oth[row];
810: pa = pa_oth + pi_oth[row];
811: valtmp = aoa[j];
812: nextp = 0;
813: for (k=0; nextp<pnz; k++) {
814: if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
815: apa[k] += valtmp*pa[nextp++];
816: }
817: }
818: PetscLogFlops(2.0*pnz);
819: }
820:
821: /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
822: /*--------------------------------------------------------------*/
823: pnz = pi_loc[i+1] - pi_loc[i];
824: pJ = pj_loc + pi_loc[i];
825: for (j=0; j<pnz; j++) {
826: nextap = 0;
827: row = pJ[j]; /* global index */
828: if (row < pcstart || row >=pcend) { /* put the value into Co */
829: row = *poJ;
830: cj = coj + coi[row];
831: ca = coa + coi[row]; poJ++;
832: } else { /* put the value into Cd */
833: row = *pdJ;
834: cj = bj + bi[row];
835: ca = ba + bi[row]; pdJ++;
836: }
837: valtmp = pA[j];
838: for (k=0; nextap<apnz; k++) {
839: if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
840: }
841: PetscLogFlops(2.0*apnz);
842: }
843: pA += pnz;
844: /* zero the current row info for A*P */
845: PetscMemzero(apa,apnz*sizeof(MatScalar));
846: }
847: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"sparse_axpy only takes values 0, 1 and 2");
848: PetscFree(apa);
849:
850: /* 3) send and recv matrix values coa */
851: /*------------------------------------*/
852: buf_ri = merge->buf_ri;
853: buf_rj = merge->buf_rj;
854: len_s = merge->len_s;
855: PetscCommGetNewTag(comm,&taga);
856: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
858: PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);
859: for (proc=0,k=0; proc<size; proc++){
860: if (!len_s[proc]) continue;
861: i = merge->owners_co[proc];
862: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
863: k++;
864: }
865: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
866: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
868: PetscFree2(s_waits,status);
869: PetscFree(r_waits);
870: PetscFree(coa);
872: /* 4) insert local Cseq and received values into Cmpi */
873: /*------------------------------------------------------*/
874: PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);
875: for (k=0; k<merge->nrecv; k++){
876: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
877: nrows = *(buf_ri_k[k]);
878: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
879: nextci[k] = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure */
880: }
882: for (i=0; i<cm; i++) {
883: row = owners[rank] + i; /* global row index of C_seq */
884: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
885: ba_i = ba + bi[i];
886: bnz = bi[i+1] - bi[i];
887: /* add received vals into ba */
888: for (k=0; k<merge->nrecv; k++){ /* k-th received message */
889: /* i-th row */
890: if (i == *nextrow[k]) {
891: cnz = *(nextci[k]+1) - *nextci[k];
892: cj = buf_rj[k] + *(nextci[k]);
893: ca = abuf_r[k] + *(nextci[k]);
894: nextcj = 0;
895: for (j=0; nextcj<cnz; j++){
896: if (bj_i[j] == cj[nextcj]){ /* bcol == ccol */
897: ba_i[j] += ca[nextcj++];
898: }
899: }
900: nextrow[k]++; nextci[k]++;
901: PetscLogFlops(2.0*cnz);
902: }
903: }
904: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
905: }
906: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
907: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
909: PetscFree(ba);
910: PetscFree(abuf_r[0]);
911: PetscFree(abuf_r);
912: PetscFree3(buf_ri_k,nextrow,nextci);
913: return(0);
914: }