Actual source code: mpidense.c

petsc-master 2016-05-03
Report Typos and Errors
  2: /*
  3:    Basic functions for basic parallel dense matrices.
  4: */


  7: #include <../src/mat/impls/dense/mpi/mpidense.h>    /*I   "petscmat.h"  I*/
  8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  9: #include <petscblaslapack.h>

 13: /*@

 15:       MatDenseGetLocalMatrix - For a MATMPIDENSE or MATSEQDENSE matrix returns the sequential
 16:               matrix that represents the operator. For sequential matrices it returns itself.

 18:     Input Parameter:
 19: .      A - the Seq or MPI dense matrix

 21:     Output Parameter:
 22: .      B - the inner matrix

 24:     Level: intermediate

 26: @*/
 27: PetscErrorCode MatDenseGetLocalMatrix(Mat A,Mat *B)
 28: {
 29:   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
 31:   PetscBool      flg;

 34:   PetscObjectTypeCompare((PetscObject)A,MATMPIDENSE,&flg);
 35:   if (flg) *B = mat->A;
 36:   else *B = A;
 37:   return(0);
 38: }

 42: PetscErrorCode MatGetRow_MPIDense(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
 43: {
 44:   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
 46:   PetscInt       lrow,rstart = A->rmap->rstart,rend = A->rmap->rend;

 49:   if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"only local rows");
 50:   lrow = row - rstart;
 51:   MatGetRow(mat->A,lrow,nz,(const PetscInt**)idx,(const PetscScalar**)v);
 52:   return(0);
 53: }

 57: PetscErrorCode MatRestoreRow_MPIDense(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
 58: {

 62:   if (idx) {PetscFree(*idx);}
 63:   if (v) {PetscFree(*v);}
 64:   return(0);
 65: }

 69: PetscErrorCode  MatGetDiagonalBlock_MPIDense(Mat A,Mat *a)
 70: {
 71:   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
 73:   PetscInt       m = A->rmap->n,rstart = A->rmap->rstart;
 74:   PetscScalar    *array;
 75:   MPI_Comm       comm;
 76:   Mat            B;

 79:   if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only square matrices supported.");

 81:   PetscObjectQuery((PetscObject)A,"DiagonalBlock",(PetscObject*)&B);
 82:   if (!B) {
 83:     PetscObjectGetComm((PetscObject)(mdn->A),&comm);
 84:     MatCreate(comm,&B);
 85:     MatSetSizes(B,m,m,m,m);
 86:     MatSetType(B,((PetscObject)mdn->A)->type_name);
 87:     MatDenseGetArray(mdn->A,&array);
 88:     MatSeqDenseSetPreallocation(B,array+m*rstart);
 89:     MatDenseRestoreArray(mdn->A,&array);
 90:     MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
 91:     MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
 92:     PetscObjectCompose((PetscObject)A,"DiagonalBlock",(PetscObject)B);
 93:     *a   = B;
 94:     MatDestroy(&B);
 95:   } else *a = B;
 96:   return(0);
 97: }

101: PetscErrorCode MatSetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
102: {
103:   Mat_MPIDense   *A = (Mat_MPIDense*)mat->data;
105:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend,row;
106:   PetscBool      roworiented = A->roworiented;

109:   for (i=0; i<m; i++) {
110:     if (idxm[i] < 0) continue;
111:     if (idxm[i] >= mat->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
112:     if (idxm[i] >= rstart && idxm[i] < rend) {
113:       row = idxm[i] - rstart;
114:       if (roworiented) {
115:         MatSetValues(A->A,1,&row,n,idxn,v+i*n,addv);
116:       } else {
117:         for (j=0; j<n; j++) {
118:           if (idxn[j] < 0) continue;
119:           if (idxn[j] >= mat->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
120:           MatSetValues(A->A,1,&row,1,&idxn[j],v+i+j*m,addv);
121:         }
122:       }
123:     } else if (!A->donotstash) {
124:       mat->assembled = PETSC_FALSE;
125:       if (roworiented) {
126:         MatStashValuesRow_Private(&mat->stash,idxm[i],n,idxn,v+i*n,PETSC_FALSE);
127:       } else {
128:         MatStashValuesCol_Private(&mat->stash,idxm[i],n,idxn,v+i,m,PETSC_FALSE);
129:       }
130:     }
131:   }
132:   return(0);
133: }

137: PetscErrorCode MatGetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
138: {
139:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
141:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend,row;

144:   for (i=0; i<m; i++) {
145:     if (idxm[i] < 0) continue; /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row"); */
146:     if (idxm[i] >= mat->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
147:     if (idxm[i] >= rstart && idxm[i] < rend) {
148:       row = idxm[i] - rstart;
149:       for (j=0; j<n; j++) {
150:         if (idxn[j] < 0) continue; /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column"); */
151:         if (idxn[j] >= mat->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
152:         MatGetValues(mdn->A,1,&row,1,&idxn[j],v+i*n+j);
153:       }
154:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
155:   }
156:   return(0);
157: }

161: PetscErrorCode MatDenseGetArray_MPIDense(Mat A,PetscScalar *array[])
162: {
163:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

167:   MatDenseGetArray(a->A,array);
168:   return(0);
169: }

173: static PetscErrorCode MatGetSubMatrix_MPIDense(Mat A,IS isrow,IS iscol,MatReuse scall,Mat *B)
174: {
175:   Mat_MPIDense   *mat  = (Mat_MPIDense*)A->data,*newmatd;
176:   Mat_SeqDense   *lmat = (Mat_SeqDense*)mat->A->data;
178:   PetscInt       i,j,rstart,rend,nrows,ncols,Ncols,nlrows,nlcols;
179:   const PetscInt *irow,*icol;
180:   PetscScalar    *av,*bv,*v = lmat->v;
181:   Mat            newmat;
182:   IS             iscol_local;

185:   ISAllGather(iscol,&iscol_local);
186:   ISGetIndices(isrow,&irow);
187:   ISGetIndices(iscol_local,&icol);
188:   ISGetLocalSize(isrow,&nrows);
189:   ISGetLocalSize(iscol,&ncols);
190:   ISGetSize(iscol,&Ncols); /* global number of columns, size of iscol_local */

192:   /* No parallel redistribution currently supported! Should really check each index set
193:      to comfirm that it is OK.  ... Currently supports only submatrix same partitioning as
194:      original matrix! */

196:   MatGetLocalSize(A,&nlrows,&nlcols);
197:   MatGetOwnershipRange(A,&rstart,&rend);

199:   /* Check submatrix call */
200:   if (scall == MAT_REUSE_MATRIX) {
201:     /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Reused submatrix wrong size"); */
202:     /* Really need to test rows and column sizes! */
203:     newmat = *B;
204:   } else {
205:     /* Create and fill new matrix */
206:     MatCreate(PetscObjectComm((PetscObject)A),&newmat);
207:     MatSetSizes(newmat,nrows,ncols,PETSC_DECIDE,Ncols);
208:     MatSetType(newmat,((PetscObject)A)->type_name);
209:     MatMPIDenseSetPreallocation(newmat,NULL);
210:   }

212:   /* Now extract the data pointers and do the copy, column at a time */
213:   newmatd = (Mat_MPIDense*)newmat->data;
214:   bv      = ((Mat_SeqDense*)newmatd->A->data)->v;

216:   for (i=0; i<Ncols; i++) {
217:     av = v + ((Mat_SeqDense*)mat->A->data)->lda*icol[i];
218:     for (j=0; j<nrows; j++) {
219:       *bv++ = av[irow[j] - rstart];
220:     }
221:   }

223:   /* Assemble the matrices so that the correct flags are set */
224:   MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
225:   MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);

227:   /* Free work space */
228:   ISRestoreIndices(isrow,&irow);
229:   ISRestoreIndices(iscol_local,&icol);
230:   ISDestroy(&iscol_local);
231:   *B   = newmat;
232:   return(0);
233: }

237: PetscErrorCode MatDenseRestoreArray_MPIDense(Mat A,PetscScalar *array[])
238: {
239:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

243:   MatDenseRestoreArray(a->A,array);
244:   return(0);
245: }

249: PetscErrorCode MatAssemblyBegin_MPIDense(Mat mat,MatAssemblyType mode)
250: {
251:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
252:   MPI_Comm       comm;
254:   PetscInt       nstash,reallocs;
255:   InsertMode     addv;

258:   PetscObjectGetComm((PetscObject)mat,&comm);
259:   /* make sure all processors are either in INSERTMODE or ADDMODE */
260:   MPIU_Allreduce((PetscEnum*)&mat->insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,comm);
261:   if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot mix adds/inserts on different procs");
262:   mat->insertmode = addv; /* in case this processor had no cache */

264:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
265:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
266:   PetscInfo2(mdn->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
267:   return(0);
268: }

272: PetscErrorCode MatAssemblyEnd_MPIDense(Mat mat,MatAssemblyType mode)
273: {
274:   Mat_MPIDense   *mdn=(Mat_MPIDense*)mat->data;
276:   PetscInt       i,*row,*col,flg,j,rstart,ncols;
277:   PetscMPIInt    n;
278:   PetscScalar    *val;

281:   /*  wait on receives */
282:   while (1) {
283:     MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
284:     if (!flg) break;

286:     for (i=0; i<n;) {
287:       /* Now identify the consecutive vals belonging to the same row */
288:       for (j=i,rstart=row[j]; j<n; j++) {
289:         if (row[j] != rstart) break;
290:       }
291:       if (j < n) ncols = j-i;
292:       else       ncols = n-i;
293:       /* Now assemble all these values with a single function call */
294:       MatSetValues_MPIDense(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
295:       i    = j;
296:     }
297:   }
298:   MatStashScatterEnd_Private(&mat->stash);

300:   MatAssemblyBegin(mdn->A,mode);
301:   MatAssemblyEnd(mdn->A,mode);

303:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
304:     MatSetUpMultiply_MPIDense(mat);
305:   }
306:   return(0);
307: }

311: PetscErrorCode MatZeroEntries_MPIDense(Mat A)
312: {
314:   Mat_MPIDense   *l = (Mat_MPIDense*)A->data;

317:   MatZeroEntries(l->A);
318:   return(0);
319: }

321: /* the code does not do the diagonal entries correctly unless the
322:    matrix is square and the column and row owerships are identical.
323:    This is a BUG. The only way to fix it seems to be to access
324:    mdn->A and mdn->B directly and not through the MatZeroRows()
325:    routine.
326: */
329: PetscErrorCode MatZeroRows_MPIDense(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
330: {
331:   Mat_MPIDense      *l = (Mat_MPIDense*)A->data;
332:   PetscErrorCode    ierr;
333:   PetscInt          i,*owners = A->rmap->range;
334:   PetscInt          *sizes,j,idx,nsends;
335:   PetscInt          nmax,*svalues,*starts,*owner,nrecvs;
336:   PetscInt          *rvalues,tag = ((PetscObject)A)->tag,count,base,slen,*source;
337:   PetscInt          *lens,*lrows,*values;
338:   PetscMPIInt       n,imdex,rank = l->rank,size = l->size;
339:   MPI_Comm          comm;
340:   MPI_Request       *send_waits,*recv_waits;
341:   MPI_Status        recv_status,*send_status;
342:   PetscBool         found;
343:   const PetscScalar *xx;
344:   PetscScalar       *bb;

347:   PetscObjectGetComm((PetscObject)A,&comm);
348:   if (A->rmap->N != A->cmap->N) SETERRQ(comm,PETSC_ERR_SUP,"Only handles square matrices");
349:   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only handles matrices with identical column and row ownership");
350:   /*  first count number of contributors to each processor */
351:   PetscCalloc1(2*size,&sizes);
352:   PetscMalloc1(N+1,&owner);  /* see note*/
353:   for (i=0; i<N; i++) {
354:     idx   = rows[i];
355:     found = PETSC_FALSE;
356:     for (j=0; j<size; j++) {
357:       if (idx >= owners[j] && idx < owners[j+1]) {
358:         sizes[2*j]++; sizes[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
359:       }
360:     }
361:     if (!found) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
362:   }
363:   nsends = 0;
364:   for (i=0; i<size; i++) nsends += sizes[2*i+1];

366:   /* inform other processors of number of messages and max length*/
367:   PetscMaxSum(comm,sizes,&nmax,&nrecvs);

369:   /* post receives:   */
370:   PetscMalloc1((nrecvs+1)*(nmax+1),&rvalues);
371:   PetscMalloc1(nrecvs+1,&recv_waits);
372:   for (i=0; i<nrecvs; i++) {
373:     MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
374:   }

376:   /* do sends:
377:       1) starts[i] gives the starting index in svalues for stuff going to
378:          the ith processor
379:   */
380:   PetscMalloc1(N+1,&svalues);
381:   PetscMalloc1(nsends+1,&send_waits);
382:   PetscMalloc1(size+1,&starts);

384:   starts[0] = 0;
385:   for (i=1; i<size; i++) starts[i] = starts[i-1] + sizes[2*i-2];
386:   for (i=0; i<N; i++) svalues[starts[owner[i]]++] = rows[i];

388:   starts[0] = 0;
389:   for (i=1; i<size+1; i++) starts[i] = starts[i-1] + sizes[2*i-2];
390:   count = 0;
391:   for (i=0; i<size; i++) {
392:     if (sizes[2*i+1]) {
393:       MPI_Isend(svalues+starts[i],sizes[2*i],MPIU_INT,i,tag,comm,send_waits+count++);
394:     }
395:   }
396:   PetscFree(starts);

398:   base = owners[rank];

400:   /*  wait on receives */
401:   PetscMalloc2(nrecvs,&lens,nrecvs,&source);
402:   count = nrecvs;
403:   slen  = 0;
404:   while (count) {
405:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
406:     /* unpack receives into our local space */
407:     MPI_Get_count(&recv_status,MPIU_INT,&n);

409:     source[imdex] = recv_status.MPI_SOURCE;
410:     lens[imdex]   = n;
411:     slen += n;
412:     count--;
413:   }
414:   PetscFree(recv_waits);

416:   /* move the data into the send scatter */
417:   PetscMalloc1(slen+1,&lrows);
418:   count = 0;
419:   for (i=0; i<nrecvs; i++) {
420:     values = rvalues + i*nmax;
421:     for (j=0; j<lens[i]; j++) {
422:       lrows[count++] = values[j] - base;
423:     }
424:   }
425:   PetscFree(rvalues);
426:   PetscFree2(lens,source);
427:   PetscFree(owner);
428:   PetscFree(sizes);

430:   /* fix right hand side if needed */
431:   if (x && b) {
432:     VecGetArrayRead(x,&xx);
433:     VecGetArray(b,&bb);
434:     for (i=0; i<slen; i++) {
435:       bb[lrows[i]] = diag*xx[lrows[i]];
436:     }
437:     VecRestoreArrayRead(x,&xx);
438:     VecRestoreArray(b,&bb);
439:   }

441:   /* actually zap the local rows */
442:   MatZeroRows(l->A,slen,lrows,0.0,0,0);
443:   if (diag != 0.0) {
444:     Mat_SeqDense *ll = (Mat_SeqDense*)l->A->data;
445:     PetscInt     m   = ll->lda, i;

447:     for (i=0; i<slen; i++) {
448:       ll->v[lrows[i] + m*(A->cmap->rstart + lrows[i])] = diag;
449:     }
450:   }
451:   PetscFree(lrows);

453:   /* wait on sends */
454:   if (nsends) {
455:     PetscMalloc1(nsends,&send_status);
456:     MPI_Waitall(nsends,send_waits,send_status);
457:     PetscFree(send_status);
458:   }
459:   PetscFree(send_waits);
460:   PetscFree(svalues);
461:   return(0);
462: }

464: PETSC_INTERN PetscErrorCode MatMult_SeqDense(Mat,Vec,Vec);
465: PETSC_INTERN PetscErrorCode MatMultAdd_SeqDense(Mat,Vec,Vec,Vec);
466: PETSC_INTERN PetscErrorCode MatMultTranspose_SeqDense(Mat,Vec,Vec);
467: PETSC_INTERN PetscErrorCode MatMultTransposeAdd_SeqDense(Mat,Vec,Vec,Vec);

471: PetscErrorCode MatMult_MPIDense(Mat mat,Vec xx,Vec yy)
472: {
473:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;

477:   VecScatterBegin(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
478:   VecScatterEnd(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
479:   MatMult_SeqDense(mdn->A,mdn->lvec,yy);
480:   return(0);
481: }

485: PetscErrorCode MatMultAdd_MPIDense(Mat mat,Vec xx,Vec yy,Vec zz)
486: {
487:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;

491:   VecScatterBegin(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
492:   VecScatterEnd(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
493:   MatMultAdd_SeqDense(mdn->A,mdn->lvec,yy,zz);
494:   return(0);
495: }

499: PetscErrorCode MatMultTranspose_MPIDense(Mat A,Vec xx,Vec yy)
500: {
501:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
503:   PetscScalar    zero = 0.0;

506:   VecSet(yy,zero);
507:   MatMultTranspose_SeqDense(a->A,xx,a->lvec);
508:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
509:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
510:   return(0);
511: }

515: PetscErrorCode MatMultTransposeAdd_MPIDense(Mat A,Vec xx,Vec yy,Vec zz)
516: {
517:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

521:   VecCopy(yy,zz);
522:   MatMultTranspose_SeqDense(a->A,xx,a->lvec);
523:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
524:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
525:   return(0);
526: }

530: PetscErrorCode MatGetDiagonal_MPIDense(Mat A,Vec v)
531: {
532:   Mat_MPIDense   *a    = (Mat_MPIDense*)A->data;
533:   Mat_SeqDense   *aloc = (Mat_SeqDense*)a->A->data;
535:   PetscInt       len,i,n,m = A->rmap->n,radd;
536:   PetscScalar    *x,zero = 0.0;

539:   VecSet(v,zero);
540:   VecGetArray(v,&x);
541:   VecGetSize(v,&n);
542:   if (n != A->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Nonconforming mat and vec");
543:   len  = PetscMin(a->A->rmap->n,a->A->cmap->n);
544:   radd = A->rmap->rstart*m;
545:   for (i=0; i<len; i++) {
546:     x[i] = aloc->v[radd + i*m + i];
547:   }
548:   VecRestoreArray(v,&x);
549:   return(0);
550: }

554: PetscErrorCode MatDestroy_MPIDense(Mat mat)
555: {
556:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;

560: #if defined(PETSC_USE_LOG)
561:   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
562: #endif
563:   MatStashDestroy_Private(&mat->stash);
564:   MatDestroy(&mdn->A);
565:   VecDestroy(&mdn->lvec);
566:   VecScatterDestroy(&mdn->Mvctx);

568:   PetscFree(mat->data);
569:   PetscObjectChangeTypeName((PetscObject)mat,0);

571:   PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetArray_C",NULL);
572:   PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreArray_C",NULL);
573: #if defined(PETSC_HAVE_ELEMENTAL)
574:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpidense_elemental_C",NULL);
575: #endif
576:   PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C",NULL);
577:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIDenseSetPreallocation_C",NULL);
578:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C",NULL);
579:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C",NULL);
580:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C",NULL);
581:   PetscObjectComposeFunction((PetscObject)mat,"MatTransposeMatMult_mpiaij_mpidense_C",NULL);
582:   PetscObjectComposeFunction((PetscObject)mat,"MatTransposeMatMultSymbolic_mpiaij_mpidense_C",NULL);
583:   PetscObjectComposeFunction((PetscObject)mat,"MatTransposeMatMultNumeric_mpiaij_mpidense_C",NULL);
584:   return(0);
585: }

589: static PetscErrorCode MatView_MPIDense_Binary(Mat mat,PetscViewer viewer)
590: {
591:   Mat_MPIDense      *mdn = (Mat_MPIDense*)mat->data;
592:   PetscErrorCode    ierr;
593:   PetscViewerFormat format;
594:   int               fd;
595:   PetscInt          header[4],mmax,N = mat->cmap->N,i,j,m,k;
596:   PetscMPIInt       rank,tag  = ((PetscObject)viewer)->tag,size;
597:   PetscScalar       *work,*v,*vv;
598:   Mat_SeqDense      *a = (Mat_SeqDense*)mdn->A->data;

601:   if (mdn->size == 1) {
602:     MatView(mdn->A,viewer);
603:   } else {
604:     PetscViewerBinaryGetDescriptor(viewer,&fd);
605:     MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
606:     MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);

608:     PetscViewerGetFormat(viewer,&format);
609:     if (format == PETSC_VIEWER_NATIVE) {

611:       if (!rank) {
612:         /* store the matrix as a dense matrix */
613:         header[0] = MAT_FILE_CLASSID;
614:         header[1] = mat->rmap->N;
615:         header[2] = N;
616:         header[3] = MATRIX_BINARY_FORMAT_DENSE;
617:         PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);

619:         /* get largest work array needed for transposing array */
620:         mmax = mat->rmap->n;
621:         for (i=1; i<size; i++) {
622:           mmax = PetscMax(mmax,mat->rmap->range[i+1] - mat->rmap->range[i]);
623:         }
624:         PetscMalloc1(mmax*N,&work);

626:         /* write out local array, by rows */
627:         m = mat->rmap->n;
628:         v = a->v;
629:         for (j=0; j<N; j++) {
630:           for (i=0; i<m; i++) {
631:             work[j + i*N] = *v++;
632:           }
633:         }
634:         PetscBinaryWrite(fd,work,m*N,PETSC_SCALAR,PETSC_FALSE);
635:         /* get largest work array to receive messages from other processes, excludes process zero */
636:         mmax = 0;
637:         for (i=1; i<size; i++) {
638:           mmax = PetscMax(mmax,mat->rmap->range[i+1] - mat->rmap->range[i]);
639:         }
640:         PetscMalloc1(mmax*N,&vv);
641:         for (k = 1; k < size; k++) {
642:           v    = vv;
643:           m    = mat->rmap->range[k+1] - mat->rmap->range[k];
644:           MPIULong_Recv(v,m*N,MPIU_SCALAR,k,tag,PetscObjectComm((PetscObject)mat));

646:           for (j = 0; j < N; j++) {
647:             for (i = 0; i < m; i++) {
648:               work[j + i*N] = *v++;
649:             }
650:           }
651:           PetscBinaryWrite(fd,work,m*N,PETSC_SCALAR,PETSC_FALSE);
652:         }
653:         PetscFree(work);
654:         PetscFree(vv);
655:       } else {
656:         MPIULong_Send(a->v,mat->rmap->n*mat->cmap->N,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));
657:       }
658:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"To store a parallel dense matrix you must first call PetscViewerPushFormat(viewer,PETSC_VIEWER_NATIVE)");
659:   }
660:   return(0);
661: }

663: extern PetscErrorCode MatView_SeqDense(Mat,PetscViewer);
664: #include <petscdraw.h>
667: static PetscErrorCode MatView_MPIDense_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
668: {
669:   Mat_MPIDense      *mdn = (Mat_MPIDense*)mat->data;
670:   PetscErrorCode    ierr;
671:   PetscMPIInt       rank = mdn->rank;
672:   PetscViewerType   vtype;
673:   PetscBool         iascii,isdraw;
674:   PetscViewer       sviewer;
675:   PetscViewerFormat format;

678:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
679:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
680:   if (iascii) {
681:     PetscViewerGetType(viewer,&vtype);
682:     PetscViewerGetFormat(viewer,&format);
683:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
684:       MatInfo info;
685:       MatGetInfo(mat,MAT_LOCAL,&info);
686:       PetscViewerASCIIPushSynchronized(viewer);
687:       PetscViewerASCIISynchronizedPrintf(viewer,"  [%d] local rows %D nz %D nz alloced %D mem %D \n",rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
688:       PetscViewerFlush(viewer);
689:       PetscViewerASCIIPopSynchronized(viewer);
690:       VecScatterView(mdn->Mvctx,viewer);
691:       return(0);
692:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
693:       return(0);
694:     }
695:   } else if (isdraw) {
696:     PetscDraw draw;
697:     PetscBool isnull;

699:     PetscViewerDrawGetDraw(viewer,0,&draw);
700:     PetscDrawIsNull(draw,&isnull);
701:     if (isnull) return(0);
702:   }

704:   {
705:     /* assemble the entire matrix onto first processor. */
706:     Mat         A;
707:     PetscInt    M = mat->rmap->N,N = mat->cmap->N,m,row,i,nz;
708:     PetscInt    *cols;
709:     PetscScalar *vals;

711:     MatCreate(PetscObjectComm((PetscObject)mat),&A);
712:     if (!rank) {
713:       MatSetSizes(A,M,N,M,N);
714:     } else {
715:       MatSetSizes(A,0,0,M,N);
716:     }
717:     /* Since this is a temporary matrix, MATMPIDENSE instead of ((PetscObject)A)->type_name here is probably acceptable. */
718:     MatSetType(A,MATMPIDENSE);
719:     MatMPIDenseSetPreallocation(A,NULL);
720:     PetscLogObjectParent((PetscObject)mat,(PetscObject)A);

722:     /* Copy the matrix ... This isn't the most efficient means,
723:        but it's quick for now */
724:     A->insertmode = INSERT_VALUES;

726:     row = mat->rmap->rstart;
727:     m   = mdn->A->rmap->n;
728:     for (i=0; i<m; i++) {
729:       MatGetRow_MPIDense(mat,row,&nz,&cols,&vals);
730:       MatSetValues_MPIDense(A,1,&row,nz,cols,vals,INSERT_VALUES);
731:       MatRestoreRow_MPIDense(mat,row,&nz,&cols,&vals);
732:       row++;
733:     }

735:     MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
736:     MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
737:     PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
738:     if (!rank) {
739:         MatView_SeqDense(((Mat_MPIDense*)(A->data))->A,sviewer);
740:     }
741:     PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
742:     PetscViewerFlush(viewer);
743:     MatDestroy(&A);
744:   }
745:   return(0);
746: }

750: PetscErrorCode MatView_MPIDense(Mat mat,PetscViewer viewer)
751: {
753:   PetscBool      iascii,isbinary,isdraw,issocket;

756:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
757:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
758:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
759:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);

761:   if (iascii || issocket || isdraw) {
762:     MatView_MPIDense_ASCIIorDraworSocket(mat,viewer);
763:   } else if (isbinary) {
764:     MatView_MPIDense_Binary(mat,viewer);
765:   }
766:   return(0);
767: }

771: PetscErrorCode MatGetInfo_MPIDense(Mat A,MatInfoType flag,MatInfo *info)
772: {
773:   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
774:   Mat            mdn  = mat->A;
776:   PetscReal      isend[5],irecv[5];

779:   info->block_size = 1.0;

781:   MatGetInfo(mdn,MAT_LOCAL,info);

783:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
784:   isend[3] = info->memory;  isend[4] = info->mallocs;
785:   if (flag == MAT_LOCAL) {
786:     info->nz_used      = isend[0];
787:     info->nz_allocated = isend[1];
788:     info->nz_unneeded  = isend[2];
789:     info->memory       = isend[3];
790:     info->mallocs      = isend[4];
791:   } else if (flag == MAT_GLOBAL_MAX) {
792:     MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));

794:     info->nz_used      = irecv[0];
795:     info->nz_allocated = irecv[1];
796:     info->nz_unneeded  = irecv[2];
797:     info->memory       = irecv[3];
798:     info->mallocs      = irecv[4];
799:   } else if (flag == MAT_GLOBAL_SUM) {
800:     MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));

802:     info->nz_used      = irecv[0];
803:     info->nz_allocated = irecv[1];
804:     info->nz_unneeded  = irecv[2];
805:     info->memory       = irecv[3];
806:     info->mallocs      = irecv[4];
807:   }
808:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
809:   info->fill_ratio_needed = 0;
810:   info->factor_mallocs    = 0;
811:   return(0);
812: }

816: PetscErrorCode MatSetOption_MPIDense(Mat A,MatOption op,PetscBool flg)
817: {
818:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

822:   switch (op) {
823:   case MAT_NEW_NONZERO_LOCATIONS:
824:   case MAT_NEW_NONZERO_LOCATION_ERR:
825:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
826:     MatCheckPreallocated(A,1);
827:     MatSetOption(a->A,op,flg);
828:     break;
829:   case MAT_ROW_ORIENTED:
830:     MatCheckPreallocated(A,1);
831:     a->roworiented = flg;
832:     MatSetOption(a->A,op,flg);
833:     break;
834:   case MAT_NEW_DIAGONALS:
835:   case MAT_KEEP_NONZERO_PATTERN:
836:   case MAT_USE_HASH_TABLE:
837:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
838:     break;
839:   case MAT_IGNORE_OFF_PROC_ENTRIES:
840:     a->donotstash = flg;
841:     break;
842:   case MAT_SYMMETRIC:
843:   case MAT_STRUCTURALLY_SYMMETRIC:
844:   case MAT_HERMITIAN:
845:   case MAT_SYMMETRY_ETERNAL:
846:   case MAT_IGNORE_LOWER_TRIANGULAR:
847:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
848:     break;
849:   default:
850:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %s",MatOptions[op]);
851:   }
852:   return(0);
853: }


858: PetscErrorCode MatDiagonalScale_MPIDense(Mat A,Vec ll,Vec rr)
859: {
860:   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
861:   Mat_SeqDense   *mat = (Mat_SeqDense*)mdn->A->data;
862:   PetscScalar    *l,*r,x,*v;
864:   PetscInt       i,j,s2a,s3a,s2,s3,m=mdn->A->rmap->n,n=mdn->A->cmap->n;

867:   MatGetLocalSize(A,&s2,&s3);
868:   if (ll) {
869:     VecGetLocalSize(ll,&s2a);
870:     if (s2a != s2) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Left scaling vector non-conforming local size, %d != %d.", s2a, s2);
871:     VecGetArray(ll,&l);
872:     for (i=0; i<m; i++) {
873:       x = l[i];
874:       v = mat->v + i;
875:       for (j=0; j<n; j++) { (*v) *= x; v+= m;}
876:     }
877:     VecRestoreArray(ll,&l);
878:     PetscLogFlops(n*m);
879:   }
880:   if (rr) {
881:     VecGetLocalSize(rr,&s3a);
882:     if (s3a != s3) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Right scaling vec non-conforming local size, %d != %d.", s3a, s3);
883:     VecScatterBegin(mdn->Mvctx,rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
884:     VecScatterEnd(mdn->Mvctx,rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
885:     VecGetArray(mdn->lvec,&r);
886:     for (i=0; i<n; i++) {
887:       x = r[i];
888:       v = mat->v + i*m;
889:       for (j=0; j<m; j++) (*v++) *= x;
890:     }
891:     VecRestoreArray(mdn->lvec,&r);
892:     PetscLogFlops(n*m);
893:   }
894:   return(0);
895: }

899: PetscErrorCode MatNorm_MPIDense(Mat A,NormType type,PetscReal *nrm)
900: {
901:   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
902:   Mat_SeqDense   *mat = (Mat_SeqDense*)mdn->A->data;
904:   PetscInt       i,j;
905:   PetscReal      sum = 0.0;
906:   PetscScalar    *v  = mat->v;

909:   if (mdn->size == 1) {
910:      MatNorm(mdn->A,type,nrm);
911:   } else {
912:     if (type == NORM_FROBENIUS) {
913:       for (i=0; i<mdn->A->cmap->n*mdn->A->rmap->n; i++) {
914:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
915:       }
916:       MPIU_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
917:       *nrm = PetscSqrtReal(*nrm);
918:       PetscLogFlops(2.0*mdn->A->cmap->n*mdn->A->rmap->n);
919:     } else if (type == NORM_1) {
920:       PetscReal *tmp,*tmp2;
921:       PetscMalloc2(A->cmap->N,&tmp,A->cmap->N,&tmp2);
922:       PetscMemzero(tmp,A->cmap->N*sizeof(PetscReal));
923:       PetscMemzero(tmp2,A->cmap->N*sizeof(PetscReal));
924:       *nrm = 0.0;
925:       v    = mat->v;
926:       for (j=0; j<mdn->A->cmap->n; j++) {
927:         for (i=0; i<mdn->A->rmap->n; i++) {
928:           tmp[j] += PetscAbsScalar(*v);  v++;
929:         }
930:       }
931:       MPIU_Allreduce(tmp,tmp2,A->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
932:       for (j=0; j<A->cmap->N; j++) {
933:         if (tmp2[j] > *nrm) *nrm = tmp2[j];
934:       }
935:       PetscFree2(tmp,tmp2);
936:       PetscLogFlops(A->cmap->n*A->rmap->n);
937:     } else if (type == NORM_INFINITY) { /* max row norm */
938:       PetscReal ntemp;
939:       MatNorm(mdn->A,type,&ntemp);
940:       MPIU_Allreduce(&ntemp,nrm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
941:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"No support for two norm");
942:   }
943:   return(0);
944: }

948: PetscErrorCode MatTranspose_MPIDense(Mat A,MatReuse reuse,Mat *matout)
949: {
950:   Mat_MPIDense   *a    = (Mat_MPIDense*)A->data;
951:   Mat_SeqDense   *Aloc = (Mat_SeqDense*)a->A->data;
952:   Mat            B;
953:   PetscInt       M = A->rmap->N,N = A->cmap->N,m,n,*rwork,rstart = A->rmap->rstart;
955:   PetscInt       j,i;
956:   PetscScalar    *v;

959:   if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports square matrix only in-place");
960:   if (reuse == MAT_INITIAL_MATRIX || A == *matout) {
961:     MatCreate(PetscObjectComm((PetscObject)A),&B);
962:     MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
963:     MatSetType(B,((PetscObject)A)->type_name);
964:     MatMPIDenseSetPreallocation(B,NULL);
965:   } else {
966:     B = *matout;
967:   }

969:   m    = a->A->rmap->n; n = a->A->cmap->n; v = Aloc->v;
970:   PetscMalloc1(m,&rwork);
971:   for (i=0; i<m; i++) rwork[i] = rstart + i;
972:   for (j=0; j<n; j++) {
973:     MatSetValues(B,1,&j,m,rwork,v,INSERT_VALUES);
974:     v   += m;
975:   }
976:   PetscFree(rwork);
977:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
978:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
979:   if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
980:     *matout = B;
981:   } else {
982:     MatHeaderMerge(A,&B);
983:   }
984:   return(0);
985: }


988: static PetscErrorCode MatDuplicate_MPIDense(Mat,MatDuplicateOption,Mat*);
989: extern PetscErrorCode MatScale_MPIDense(Mat,PetscScalar);

993: PetscErrorCode MatSetUp_MPIDense(Mat A)
994: {

998:    MatMPIDenseSetPreallocation(A,0);
999:   return(0);
1000: }

1004: PetscErrorCode MatAXPY_MPIDense(Mat Y,PetscScalar alpha,Mat X,MatStructure str)
1005: {
1007:   Mat_MPIDense   *A = (Mat_MPIDense*)Y->data, *B = (Mat_MPIDense*)X->data;

1010:   MatAXPY(A->A,alpha,B->A,str);
1011:   PetscObjectStateIncrease((PetscObject)Y);
1012:   return(0);
1013: }

1017: PetscErrorCode  MatConjugate_MPIDense(Mat mat)
1018: {
1019:   Mat_MPIDense   *a = (Mat_MPIDense*)mat->data;

1023:   MatConjugate(a->A);
1024:   return(0);
1025: }

1029: PetscErrorCode MatRealPart_MPIDense(Mat A)
1030: {
1031:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

1035:   MatRealPart(a->A);
1036:   return(0);
1037: }

1041: PetscErrorCode MatImaginaryPart_MPIDense(Mat A)
1042: {
1043:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

1047:   MatImaginaryPart(a->A);
1048:   return(0);
1049: }

1051: extern PetscErrorCode MatGetColumnNorms_SeqDense(Mat,NormType,PetscReal*);
1054: PetscErrorCode MatGetColumnNorms_MPIDense(Mat A,NormType type,PetscReal *norms)
1055: {
1057:   PetscInt       i,n;
1058:   Mat_MPIDense   *a = (Mat_MPIDense*) A->data;
1059:   PetscReal      *work;

1062:   MatGetSize(A,NULL,&n);
1063:   PetscMalloc1(n,&work);
1064:   MatGetColumnNorms_SeqDense(a->A,type,work);
1065:   if (type == NORM_2) {
1066:     for (i=0; i<n; i++) work[i] *= work[i];
1067:   }
1068:   if (type == NORM_INFINITY) {
1069:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,A->hdr.comm);
1070:   } else {
1071:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,A->hdr.comm);
1072:   }
1073:   PetscFree(work);
1074:   if (type == NORM_2) {
1075:     for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
1076:   }
1077:   return(0);
1078: }

1082: static PetscErrorCode  MatSetRandom_MPIDense(Mat x,PetscRandom rctx)
1083: {
1084:   Mat_MPIDense   *d = (Mat_MPIDense*)x->data;
1086:   PetscScalar    *a;
1087:   PetscInt       m,n,i;

1090:   MatGetSize(d->A,&m,&n);
1091:   MatDenseGetArray(d->A,&a);
1092:   for (i=0; i<m*n; i++) {
1093:     PetscRandomGetValue(rctx,a+i);
1094:   }
1095:   MatDenseRestoreArray(d->A,&a);
1096:   return(0);
1097: }

1099: extern PetscErrorCode MatMatMultNumeric_MPIDense(Mat A,Mat,Mat);

1103: static PetscErrorCode MatMissingDiagonal_MPIDense(Mat A,PetscBool  *missing,PetscInt *d)
1104: {
1106:   *missing = PETSC_FALSE;
1107:   return(0);
1108: }

1110: /* -------------------------------------------------------------------*/
1111: static struct _MatOps MatOps_Values = { MatSetValues_MPIDense,
1112:                                         MatGetRow_MPIDense,
1113:                                         MatRestoreRow_MPIDense,
1114:                                         MatMult_MPIDense,
1115:                                 /*  4*/ MatMultAdd_MPIDense,
1116:                                         MatMultTranspose_MPIDense,
1117:                                         MatMultTransposeAdd_MPIDense,
1118:                                         0,
1119:                                         0,
1120:                                         0,
1121:                                 /* 10*/ 0,
1122:                                         0,
1123:                                         0,
1124:                                         0,
1125:                                         MatTranspose_MPIDense,
1126:                                 /* 15*/ MatGetInfo_MPIDense,
1127:                                         MatEqual_MPIDense,
1128:                                         MatGetDiagonal_MPIDense,
1129:                                         MatDiagonalScale_MPIDense,
1130:                                         MatNorm_MPIDense,
1131:                                 /* 20*/ MatAssemblyBegin_MPIDense,
1132:                                         MatAssemblyEnd_MPIDense,
1133:                                         MatSetOption_MPIDense,
1134:                                         MatZeroEntries_MPIDense,
1135:                                 /* 24*/ MatZeroRows_MPIDense,
1136:                                         0,
1137:                                         0,
1138:                                         0,
1139:                                         0,
1140:                                 /* 29*/ MatSetUp_MPIDense,
1141:                                         0,
1142:                                         0,
1143:                                         0,
1144:                                         0,
1145:                                 /* 34*/ MatDuplicate_MPIDense,
1146:                                         0,
1147:                                         0,
1148:                                         0,
1149:                                         0,
1150:                                 /* 39*/ MatAXPY_MPIDense,
1151:                                         MatGetSubMatrices_MPIDense,
1152:                                         0,
1153:                                         MatGetValues_MPIDense,
1154:                                         0,
1155:                                 /* 44*/ 0,
1156:                                         MatScale_MPIDense,
1157:                                         MatShift_Basic,
1158:                                         0,
1159:                                         0,
1160:                                 /* 49*/ MatSetRandom_MPIDense,
1161:                                         0,
1162:                                         0,
1163:                                         0,
1164:                                         0,
1165:                                 /* 54*/ 0,
1166:                                         0,
1167:                                         0,
1168:                                         0,
1169:                                         0,
1170:                                 /* 59*/ MatGetSubMatrix_MPIDense,
1171:                                         MatDestroy_MPIDense,
1172:                                         MatView_MPIDense,
1173:                                         0,
1174:                                         0,
1175:                                 /* 64*/ 0,
1176:                                         0,
1177:                                         0,
1178:                                         0,
1179:                                         0,
1180:                                 /* 69*/ 0,
1181:                                         0,
1182:                                         0,
1183:                                         0,
1184:                                         0,
1185:                                 /* 74*/ 0,
1186:                                         0,
1187:                                         0,
1188:                                         0,
1189:                                         0,
1190:                                 /* 79*/ 0,
1191:                                         0,
1192:                                         0,
1193:                                         0,
1194:                                 /* 83*/ MatLoad_MPIDense,
1195:                                         0,
1196:                                         0,
1197:                                         0,
1198:                                         0,
1199:                                         0,
1200: #if defined(PETSC_HAVE_ELEMENTAL)
1201:                                 /* 89*/ MatMatMult_MPIDense_MPIDense,
1202:                                         MatMatMultSymbolic_MPIDense_MPIDense,
1203: #else
1204:                                 /* 89*/ 0,
1205:                                         0,
1206: #endif
1207:                                         MatMatMultNumeric_MPIDense,
1208:                                         0,
1209:                                         0,
1210:                                 /* 94*/ 0,
1211:                                         0,
1212:                                         0,
1213:                                         0,
1214:                                         0,
1215:                                 /* 99*/ 0,
1216:                                         0,
1217:                                         0,
1218:                                         MatConjugate_MPIDense,
1219:                                         0,
1220:                                 /*104*/ 0,
1221:                                         MatRealPart_MPIDense,
1222:                                         MatImaginaryPart_MPIDense,
1223:                                         0,
1224:                                         0,
1225:                                 /*109*/ 0,
1226:                                         0,
1227:                                         0,
1228:                                         0,
1229:                                         MatMissingDiagonal_MPIDense,
1230:                                 /*114*/ 0,
1231:                                         0,
1232:                                         0,
1233:                                         0,
1234:                                         0,
1235:                                 /*119*/ 0,
1236:                                         0,
1237:                                         0,
1238:                                         0,
1239:                                         0,
1240:                                 /*124*/ 0,
1241:                                         MatGetColumnNorms_MPIDense,
1242:                                         0,
1243:                                         0,
1244:                                         0,
1245:                                 /*129*/ 0,
1246:                                         MatTransposeMatMult_MPIDense_MPIDense,
1247:                                         MatTransposeMatMultSymbolic_MPIDense_MPIDense,
1248:                                         MatTransposeMatMultNumeric_MPIDense_MPIDense,
1249:                                         0,
1250:                                 /*134*/ 0,
1251:                                         0,
1252:                                         0,
1253:                                         0,
1254:                                         0,
1255:                                 /*139*/ 0,
1256:                                         0,
1257:                                         0
1258: };

1262: PetscErrorCode  MatMPIDenseSetPreallocation_MPIDense(Mat mat,PetscScalar *data)
1263: {
1264:   Mat_MPIDense   *a;

1268:   mat->preallocated = PETSC_TRUE;
1269:   /* Note:  For now, when data is specified above, this assumes the user correctly
1270:    allocates the local dense storage space.  We should add error checking. */

1272:   a       = (Mat_MPIDense*)mat->data;
1273:   PetscLayoutSetUp(mat->rmap);
1274:   PetscLayoutSetUp(mat->cmap);
1275:   a->nvec = mat->cmap->n;

1277:   MatCreate(PETSC_COMM_SELF,&a->A);
1278:   MatSetSizes(a->A,mat->rmap->n,mat->cmap->N,mat->rmap->n,mat->cmap->N);
1279:   MatSetType(a->A,MATSEQDENSE);
1280:   MatSeqDenseSetPreallocation(a->A,data);
1281:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
1282:   return(0);
1283: }

1285: #if defined(PETSC_HAVE_ELEMENTAL)
1288: PETSC_INTERN PetscErrorCode MatConvert_MPIDense_Elemental(Mat A, MatType newtype,MatReuse reuse,Mat *newmat)
1289: {
1290:   Mat            mat_elemental;
1292:   PetscScalar    *v;
1293:   PetscInt       m=A->rmap->n,N=A->cmap->N,rstart=A->rmap->rstart,i,*rows,*cols;
1294: 
1296:   if (reuse == MAT_REUSE_MATRIX) {
1297:     mat_elemental = *newmat;
1298:     MatZeroEntries(*newmat);
1299:   } else {
1300:     MatCreate(PetscObjectComm((PetscObject)A), &mat_elemental);
1301:     MatSetSizes(mat_elemental,PETSC_DECIDE,PETSC_DECIDE,A->rmap->N,A->cmap->N);
1302:     MatSetType(mat_elemental,MATELEMENTAL);
1303:     MatSetUp(mat_elemental);
1304:     MatSetOption(mat_elemental,MAT_ROW_ORIENTED,PETSC_FALSE);
1305:   }

1307:   PetscMalloc2(m,&rows,N,&cols);
1308:   for (i=0; i<N; i++) cols[i] = i;
1309:   for (i=0; i<m; i++) rows[i] = rstart + i;
1310: 
1311:   /* PETSc-Elemental interaface uses axpy for setting off-processor entries, only ADD_VALUES is allowed */
1312:   MatDenseGetArray(A,&v);
1313:   MatSetValues(mat_elemental,m,rows,N,cols,v,ADD_VALUES);
1314:   MatAssemblyBegin(mat_elemental, MAT_FINAL_ASSEMBLY);
1315:   MatAssemblyEnd(mat_elemental, MAT_FINAL_ASSEMBLY);
1316:   MatDenseRestoreArray(A,&v);
1317:   PetscFree2(rows,cols);

1319:   if (reuse == MAT_INPLACE_MATRIX) {
1320:     MatHeaderReplace(A,&mat_elemental);
1321:   } else {
1322:     *newmat = mat_elemental;
1323:   }
1324:   return(0);
1325: }
1326: #endif

1330: PETSC_EXTERN PetscErrorCode MatCreate_MPIDense(Mat mat)
1331: {
1332:   Mat_MPIDense   *a;

1336:   PetscNewLog(mat,&a);
1337:   mat->data = (void*)a;
1338:   PetscMemcpy(mat->ops,&MatOps_Values,sizeof(struct _MatOps));

1340:   mat->insertmode = NOT_SET_VALUES;
1341:   MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&a->rank);
1342:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&a->size);

1344:   /* build cache for off array entries formed */
1345:   a->donotstash = PETSC_FALSE;

1347:   MatStashCreate_Private(PetscObjectComm((PetscObject)mat),1,&mat->stash);

1349:   /* stuff used for matrix vector multiply */
1350:   a->lvec        = 0;
1351:   a->Mvctx       = 0;
1352:   a->roworiented = PETSC_TRUE;

1354:   PetscObjectComposeFunction((PetscObject)mat,"MatDenseGetArray_C",MatDenseGetArray_MPIDense);
1355:   PetscObjectComposeFunction((PetscObject)mat,"MatDenseRestoreArray_C",MatDenseRestoreArray_MPIDense);
1356: #if defined(PETSC_HAVE_ELEMENTAL)
1357:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpidense_elemental_C",MatConvert_MPIDense_Elemental);
1358: #endif
1359:   PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C",MatGetDiagonalBlock_MPIDense);
1360:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIDenseSetPreallocation_C",MatMPIDenseSetPreallocation_MPIDense);
1361:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C",MatMatMult_MPIAIJ_MPIDense);
1362:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C",MatMatMultSymbolic_MPIAIJ_MPIDense);
1363:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C",MatMatMultNumeric_MPIAIJ_MPIDense);

1365:   PetscObjectComposeFunction((PetscObject)mat,"MatTransposeMatMult_mpiaij_mpidense_C",MatTransposeMatMult_MPIAIJ_MPIDense);
1366:   PetscObjectComposeFunction((PetscObject)mat,"MatTransposeMatMultSymbolic_mpiaij_mpidense_C",MatTransposeMatMultSymbolic_MPIAIJ_MPIDense);
1367:   PetscObjectComposeFunction((PetscObject)mat,"MatTransposeMatMultNumeric_mpiaij_mpidense_C",MatTransposeMatMultNumeric_MPIAIJ_MPIDense);
1368:   PetscObjectChangeTypeName((PetscObject)mat,MATMPIDENSE);
1369:   return(0);
1370: }

1372: /*MC
1373:    MATDENSE - MATDENSE = "dense" - A matrix type to be used for dense matrices.

1375:    This matrix type is identical to MATSEQDENSE when constructed with a single process communicator,
1376:    and MATMPIDENSE otherwise.

1378:    Options Database Keys:
1379: . -mat_type dense - sets the matrix type to "dense" during a call to MatSetFromOptions()

1381:   Level: beginner


1384: .seealso: MatCreateMPIDense,MATSEQDENSE,MATMPIDENSE
1385: M*/

1389: /*@C
1390:    MatMPIDenseSetPreallocation - Sets the array used to store the matrix entries

1392:    Not collective

1394:    Input Parameters:
1395: .  B - the matrix
1396: -  data - optional location of matrix data.  Set data=NULL for PETSc
1397:    to control all matrix memory allocation.

1399:    Notes:
1400:    The dense format is fully compatible with standard Fortran 77
1401:    storage by columns.

1403:    The data input variable is intended primarily for Fortran programmers
1404:    who wish to allocate their own matrix memory space.  Most users should
1405:    set data=NULL.

1407:    Level: intermediate

1409: .keywords: matrix,dense, parallel

1411: .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1412: @*/
1413: PetscErrorCode  MatMPIDenseSetPreallocation(Mat B,PetscScalar *data)
1414: {

1418:   PetscTryMethod(B,"MatMPIDenseSetPreallocation_C",(Mat,PetscScalar*),(B,data));
1419:   return(0);
1420: }

1424: /*@C
1425:    MatCreateDense - Creates a parallel matrix in dense format.

1427:    Collective on MPI_Comm

1429:    Input Parameters:
1430: +  comm - MPI communicator
1431: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1432: .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1433: .  M - number of global rows (or PETSC_DECIDE to have calculated if m is given)
1434: .  N - number of global columns (or PETSC_DECIDE to have calculated if n is given)
1435: -  data - optional location of matrix data.  Set data=NULL (PETSC_NULL_SCALAR for Fortran users) for PETSc
1436:    to control all matrix memory allocation.

1438:    Output Parameter:
1439: .  A - the matrix

1441:    Notes:
1442:    The dense format is fully compatible with standard Fortran 77
1443:    storage by columns.

1445:    The data input variable is intended primarily for Fortran programmers
1446:    who wish to allocate their own matrix memory space.  Most users should
1447:    set data=NULL (PETSC_NULL_SCALAR for Fortran users).

1449:    The user MUST specify either the local or global matrix dimensions
1450:    (possibly both).

1452:    Level: intermediate

1454: .keywords: matrix,dense, parallel

1456: .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1457: @*/
1458: PetscErrorCode  MatCreateDense(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscScalar *data,Mat *A)
1459: {
1461:   PetscMPIInt    size;

1464:   MatCreate(comm,A);
1465:   MatSetSizes(*A,m,n,M,N);
1466:   MPI_Comm_size(comm,&size);
1467:   if (size > 1) {
1468:     MatSetType(*A,MATMPIDENSE);
1469:     MatMPIDenseSetPreallocation(*A,data);
1470:     if (data) {  /* user provided data array, so no need to assemble */
1471:       MatSetUpMultiply_MPIDense(*A);
1472:       (*A)->assembled = PETSC_TRUE;
1473:     }
1474:   } else {
1475:     MatSetType(*A,MATSEQDENSE);
1476:     MatSeqDenseSetPreallocation(*A,data);
1477:   }
1478:   return(0);
1479: }

1483: static PetscErrorCode MatDuplicate_MPIDense(Mat A,MatDuplicateOption cpvalues,Mat *newmat)
1484: {
1485:   Mat            mat;
1486:   Mat_MPIDense   *a,*oldmat = (Mat_MPIDense*)A->data;

1490:   *newmat = 0;
1491:   MatCreate(PetscObjectComm((PetscObject)A),&mat);
1492:   MatSetSizes(mat,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
1493:   MatSetType(mat,((PetscObject)A)->type_name);
1494:   a       = (Mat_MPIDense*)mat->data;
1495:   PetscMemcpy(mat->ops,A->ops,sizeof(struct _MatOps));

1497:   mat->factortype   = A->factortype;
1498:   mat->assembled    = PETSC_TRUE;
1499:   mat->preallocated = PETSC_TRUE;

1501:   a->size         = oldmat->size;
1502:   a->rank         = oldmat->rank;
1503:   mat->insertmode = NOT_SET_VALUES;
1504:   a->nvec         = oldmat->nvec;
1505:   a->donotstash   = oldmat->donotstash;

1507:   PetscLayoutReference(A->rmap,&mat->rmap);
1508:   PetscLayoutReference(A->cmap,&mat->cmap);

1510:   MatSetUpMultiply_MPIDense(mat);
1511:   MatDuplicate(oldmat->A,cpvalues,&a->A);
1512:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);

1514:   *newmat = mat;
1515:   return(0);
1516: }

1520: PetscErrorCode MatLoad_MPIDense_DenseInFile(MPI_Comm comm,PetscInt fd,PetscInt M,PetscInt N,Mat newmat)
1521: {
1523:   PetscMPIInt    rank,size;
1524:   const PetscInt *rowners;
1525:   PetscInt       i,m,n,nz,j,mMax;
1526:   PetscScalar    *array,*vals,*vals_ptr;
1527:   Mat_MPIDense   *a = (Mat_MPIDense*)newmat->data;

1530:   MPI_Comm_rank(comm,&rank);
1531:   MPI_Comm_size(comm,&size);

1533:   /* determine ownership of rows and columns */
1534:   m = (newmat->rmap->n < 0) ? PETSC_DECIDE : newmat->rmap->n;
1535:   n = (newmat->cmap->n < 0) ? PETSC_DECIDE : newmat->cmap->n;

1537:   MatSetSizes(newmat,m,n,M,N);
1538:   if (!a->A || !((Mat_SeqDense*)(a->A->data))->user_alloc) {
1539:     MatMPIDenseSetPreallocation(newmat,NULL);
1540:   }
1541:   MatDenseGetArray(newmat,&array);
1542:   MatGetLocalSize(newmat,&m,NULL);
1543:   MatGetOwnershipRanges(newmat,&rowners);
1544:   MPI_Reduce(&m,&mMax,1,MPIU_INT,MPI_MAX,0,comm);
1545:   if (!rank) {
1546:     PetscMalloc1(mMax*N,&vals);

1548:     /* read in my part of the matrix numerical values  */
1549:     PetscBinaryRead(fd,vals,m*N,PETSC_SCALAR);

1551:     /* insert into matrix-by row (this is why cannot directly read into array */
1552:     vals_ptr = vals;
1553:     for (i=0; i<m; i++) {
1554:       for (j=0; j<N; j++) {
1555:         array[i + j*m] = *vals_ptr++;
1556:       }
1557:     }

1559:     /* read in other processors and ship out */
1560:     for (i=1; i<size; i++) {
1561:       nz   = (rowners[i+1] - rowners[i])*N;
1562:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1563:       MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)(newmat))->tag,comm);
1564:     }
1565:   } else {
1566:     /* receive numeric values */
1567:     PetscMalloc1(m*N,&vals);

1569:     /* receive message of values*/
1570:     MPIULong_Recv(vals,m*N,MPIU_SCALAR,0,((PetscObject)(newmat))->tag,comm);

1572:     /* insert into matrix-by row (this is why cannot directly read into array */
1573:     vals_ptr = vals;
1574:     for (i=0; i<m; i++) {
1575:       for (j=0; j<N; j++) {
1576:         array[i + j*m] = *vals_ptr++;
1577:       }
1578:     }
1579:   }
1580:   MatDenseRestoreArray(newmat,&array);
1581:   PetscFree(vals);
1582:   MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
1583:   MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
1584:   return(0);
1585: }

1589: PetscErrorCode MatLoad_MPIDense(Mat newmat,PetscViewer viewer)
1590: {
1591:   Mat_MPIDense   *a;
1592:   PetscScalar    *vals,*svals;
1593:   MPI_Comm       comm;
1594:   MPI_Status     status;
1595:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,*rowners,*sndcounts,m,n,maxnz;
1596:   PetscInt       header[4],*rowlengths = 0,M,N,*cols;
1597:   PetscInt       *ourlens,*procsnz = 0,jj,*mycols,*smycols;
1598:   PetscInt       i,nz,j,rstart,rend;
1599:   int            fd;

1603:   /* force binary viewer to load .info file if it has not yet done so */
1604:   PetscViewerSetUp(viewer);
1605:   PetscObjectGetComm((PetscObject)viewer,&comm);
1606:   MPI_Comm_size(comm,&size);
1607:   MPI_Comm_rank(comm,&rank);
1608:   PetscViewerBinaryGetDescriptor(viewer,&fd);
1609:   if (!rank) {
1610:     PetscBinaryRead(fd,(char*)header,4,PETSC_INT);
1611:     if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1612:   }
1613:   MPI_Bcast(header+1,3,MPIU_INT,0,comm);
1614:   M    = header[1]; N = header[2]; nz = header[3];

1616:   /* If global rows/cols are set to PETSC_DECIDE, set it to the sizes given in the file */
1617:   if (newmat->rmap->N < 0) newmat->rmap->N = M;
1618:   if (newmat->cmap->N < 0) newmat->cmap->N = N;

1620:   if (newmat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",M,newmat->rmap->N);
1621:   if (newmat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",N,newmat->cmap->N);

1623:   /*
1624:        Handle case where matrix is stored on disk as a dense matrix
1625:   */
1626:   if (nz == MATRIX_BINARY_FORMAT_DENSE) {
1627:     MatLoad_MPIDense_DenseInFile(comm,fd,M,N,newmat);
1628:     return(0);
1629:   }

1631:   /* determine ownership of all rows */
1632:   if (newmat->rmap->n < 0) {
1633:     PetscMPIIntCast(M/size + ((M % size) > rank),&m);
1634:   } else {
1635:     PetscMPIIntCast(newmat->rmap->n,&m);
1636:   }
1637:   if (newmat->cmap->n < 0) {
1638:     n = PETSC_DECIDE;
1639:   } else {
1640:     PetscMPIIntCast(newmat->cmap->n,&n);
1641:   }

1643:   PetscMalloc1(size+2,&rowners);
1644:   MPI_Allgather(&m,1,MPI_INT,rowners+1,1,MPI_INT,comm);
1645:   rowners[0] = 0;
1646:   for (i=2; i<=size; i++) {
1647:     rowners[i] += rowners[i-1];
1648:   }
1649:   rstart = rowners[rank];
1650:   rend   = rowners[rank+1];

1652:   /* distribute row lengths to all processors */
1653:   PetscMalloc1(rend-rstart,&ourlens);
1654:   if (!rank) {
1655:     PetscMalloc1(M,&rowlengths);
1656:     PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
1657:     PetscMalloc1(size,&sndcounts);
1658:     for (i=0; i<size; i++) sndcounts[i] = rowners[i+1] - rowners[i];
1659:     MPI_Scatterv(rowlengths,sndcounts,rowners,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);
1660:     PetscFree(sndcounts);
1661:   } else {
1662:     MPI_Scatterv(0,0,0,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);
1663:   }

1665:   if (!rank) {
1666:     /* calculate the number of nonzeros on each processor */
1667:     PetscMalloc1(size,&procsnz);
1668:     PetscMemzero(procsnz,size*sizeof(PetscInt));
1669:     for (i=0; i<size; i++) {
1670:       for (j=rowners[i]; j< rowners[i+1]; j++) {
1671:         procsnz[i] += rowlengths[j];
1672:       }
1673:     }
1674:     PetscFree(rowlengths);

1676:     /* determine max buffer needed and allocate it */
1677:     maxnz = 0;
1678:     for (i=0; i<size; i++) {
1679:       maxnz = PetscMax(maxnz,procsnz[i]);
1680:     }
1681:     PetscMalloc1(maxnz,&cols);

1683:     /* read in my part of the matrix column indices  */
1684:     nz   = procsnz[0];
1685:     PetscMalloc1(nz,&mycols);
1686:     PetscBinaryRead(fd,mycols,nz,PETSC_INT);

1688:     /* read in every one elses and ship off */
1689:     for (i=1; i<size; i++) {
1690:       nz   = procsnz[i];
1691:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
1692:       MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
1693:     }
1694:     PetscFree(cols);
1695:   } else {
1696:     /* determine buffer space needed for message */
1697:     nz = 0;
1698:     for (i=0; i<m; i++) {
1699:       nz += ourlens[i];
1700:     }
1701:     PetscMalloc1(nz+1,&mycols);

1703:     /* receive message of column indices*/
1704:     MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
1705:     MPI_Get_count(&status,MPIU_INT,&maxnz);
1706:     if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
1707:   }

1709:   MatSetSizes(newmat,m,n,M,N);
1710:   a = (Mat_MPIDense*)newmat->data;
1711:   if (!a->A || !((Mat_SeqDense*)(a->A->data))->user_alloc) {
1712:     MatMPIDenseSetPreallocation(newmat,NULL);
1713:   }

1715:   if (!rank) {
1716:     PetscMalloc1(maxnz,&vals);

1718:     /* read in my part of the matrix numerical values  */
1719:     nz   = procsnz[0];
1720:     PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);

1722:     /* insert into matrix */
1723:     jj      = rstart;
1724:     smycols = mycols;
1725:     svals   = vals;
1726:     for (i=0; i<m; i++) {
1727:       MatSetValues(newmat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
1728:       smycols += ourlens[i];
1729:       svals   += ourlens[i];
1730:       jj++;
1731:     }

1733:     /* read in other processors and ship out */
1734:     for (i=1; i<size; i++) {
1735:       nz   = procsnz[i];
1736:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1737:       MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newmat)->tag,comm);
1738:     }
1739:     PetscFree(procsnz);
1740:   } else {
1741:     /* receive numeric values */
1742:     PetscMalloc1(nz+1,&vals);

1744:     /* receive message of values*/
1745:     MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newmat)->tag,comm,&status);
1746:     MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
1747:     if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");

1749:     /* insert into matrix */
1750:     jj      = rstart;
1751:     smycols = mycols;
1752:     svals   = vals;
1753:     for (i=0; i<m; i++) {
1754:       MatSetValues(newmat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
1755:       smycols += ourlens[i];
1756:       svals   += ourlens[i];
1757:       jj++;
1758:     }
1759:   }
1760:   PetscFree(ourlens);
1761:   PetscFree(vals);
1762:   PetscFree(mycols);
1763:   PetscFree(rowners);

1765:   MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
1766:   MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
1767:   return(0);
1768: }

1772: PetscErrorCode MatEqual_MPIDense(Mat A,Mat B,PetscBool  *flag)
1773: {
1774:   Mat_MPIDense   *matB = (Mat_MPIDense*)B->data,*matA = (Mat_MPIDense*)A->data;
1775:   Mat            a,b;
1776:   PetscBool      flg;

1780:   a    = matA->A;
1781:   b    = matB->A;
1782:   MatEqual(a,b,&flg);
1783:   MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
1784:   return(0);
1785: }

1789: PetscErrorCode MatDestroy_MatTransMatMult_MPIDense_MPIDense(Mat A)
1790: {
1791:   PetscErrorCode        ierr;
1792:   Mat_MPIDense          *a = (Mat_MPIDense*)A->data;
1793:   Mat_TransMatMultDense *atb = a->atbdense;

1796:   PetscFree3(atb->sendbuf,atb->atbarray,atb->recvcounts);
1797:   (atb->destroy)(A);
1798:   PetscFree(atb);
1799:   return(0);
1800: }

1804: PetscErrorCode MatTransposeMatMultNumeric_MPIDense_MPIDense(Mat A,Mat B,Mat C)
1805: {
1806:   Mat_MPIDense   *a=(Mat_MPIDense*)A->data, *b=(Mat_MPIDense*)B->data, *c=(Mat_MPIDense*)C->data;
1807:   Mat_SeqDense   *aseq=(Mat_SeqDense*)(a->A)->data, *bseq=(Mat_SeqDense*)(b->A)->data;
1808:   Mat_TransMatMultDense *atb = c->atbdense;
1810:   MPI_Comm       comm;
1811:   PetscMPIInt    rank,size,*recvcounts=atb->recvcounts;
1812:   PetscScalar    *carray,*atbarray=atb->atbarray,*sendbuf=atb->sendbuf;
1813:   PetscInt       i,cN=C->cmap->N,cM=C->rmap->N,proc,k,j;
1814:   PetscScalar    _DOne=1.0,_DZero=0.0;
1815:   PetscBLASInt   am,an,bn,aN;
1816:   const PetscInt *ranges;

1819:   PetscObjectGetComm((PetscObject)A,&comm);
1820:   MPI_Comm_rank(comm,&rank);
1821:   MPI_Comm_size(comm,&size);

1823:   /* compute atbarray = aseq^T * bseq */
1824:   PetscBLASIntCast(a->A->cmap->n,&an);
1825:   PetscBLASIntCast(b->A->cmap->n,&bn);
1826:   PetscBLASIntCast(a->A->rmap->n,&am);
1827:   PetscBLASIntCast(A->cmap->N,&aN);
1828:   PetscStackCallBLAS("BLASgemm",BLASgemm_("T","N",&an,&bn,&am,&_DOne,aseq->v,&aseq->lda,bseq->v,&bseq->lda,&_DZero,atbarray,&aN));
1829: 
1830:   MatGetOwnershipRanges(C,&ranges);
1831:   for (i=0; i<size; i++) recvcounts[i] = (ranges[i+1] - ranges[i])*cN;
1832: 
1833:   /* arrange atbarray into sendbuf */
1834:   k = 0;
1835:   for (proc=0; proc<size; proc++) {
1836:     for (j=0; j<cN; j++) {
1837:       for (i=ranges[proc]; i<ranges[proc+1]; i++) sendbuf[k++] = atbarray[i+j*cM];
1838:     }
1839:   }
1840:   /* sum all atbarray to local values of C */
1841:   MatDenseGetArray(c->A,&carray);
1842:   MPI_Reduce_scatter(sendbuf,carray,recvcounts,MPIU_SCALAR,MPIU_SUM,comm);
1843:   MatDenseRestoreArray(c->A,&carray);
1844:   return(0);
1845: }

1849: PetscErrorCode MatTransposeMatMultSymbolic_MPIDense_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
1850: {
1851:   PetscErrorCode        ierr;
1852:   Mat                   Cdense;
1853:   MPI_Comm              comm;
1854:   PetscMPIInt           size;
1855:   PetscInt              cm=A->cmap->n,cM,cN=B->cmap->N;
1856:   Mat_MPIDense          *c;
1857:   Mat_TransMatMultDense *atb;

1860:   PetscObjectGetComm((PetscObject)A,&comm);
1861:   if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) {
1862:     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
1863:   }

1865:   /* create matrix product Cdense */
1866:   MatCreate(comm,&Cdense);
1867:   MatSetSizes(Cdense,cm,B->cmap->n,PETSC_DECIDE,PETSC_DECIDE);
1868:   MatSetType(Cdense,MATMPIDENSE);
1869:   MatMPIDenseSetPreallocation(Cdense,NULL);
1870:   MatAssemblyBegin(Cdense,MAT_FINAL_ASSEMBLY);
1871:   MatAssemblyEnd(Cdense,MAT_FINAL_ASSEMBLY);
1872:   *C   = Cdense;

1874:   /* create data structure for reuse Cdense */
1875:   MPI_Comm_size(comm,&size);
1876:   PetscNew(&atb);
1877:   cM = Cdense->rmap->N;
1878:   PetscMalloc3(cM*cN,&atb->sendbuf,cM*cN,&atb->atbarray,size,&atb->recvcounts);
1879: 
1880:   c                    = (Mat_MPIDense*)Cdense->data;
1881:   c->atbdense          = atb;
1882:   atb->destroy         = Cdense->ops->destroy;
1883:   Cdense->ops->destroy = MatDestroy_MatTransMatMult_MPIDense_MPIDense;
1884:   return(0);
1885: }

1889: PetscErrorCode MatTransposeMatMult_MPIDense_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
1890: {

1894:   if (scall == MAT_INITIAL_MATRIX) {
1895:     MatTransposeMatMultSymbolic_MPIDense_MPIDense(A,B,fill,C);
1896:   }
1897:   MatTransposeMatMultNumeric_MPIDense_MPIDense(A,B,*C);
1898:   return(0);
1899: }

1903: PetscErrorCode MatDestroy_MatMatMult_MPIDense_MPIDense(Mat A)
1904: {
1905:   PetscErrorCode   ierr;
1906:   Mat_MPIDense     *a = (Mat_MPIDense*)A->data;
1907:   Mat_MatMultDense *ab = a->abdense;

1910:   MatDestroy(&ab->Ce);
1911:   MatDestroy(&ab->Ae);
1912:   MatDestroy(&ab->Be);

1914:   (ab->destroy)(A);
1915:   PetscFree(ab);
1916:   return(0);
1917: }

1919: #if defined(PETSC_HAVE_ELEMENTAL)
1922: PetscErrorCode MatMatMultNumeric_MPIDense_MPIDense(Mat A,Mat B,Mat C)
1923: {
1924:   PetscErrorCode   ierr;
1925:   Mat_MPIDense     *c=(Mat_MPIDense*)C->data;
1926:   Mat_MatMultDense *ab=c->abdense;

1929:   MatConvert_MPIDense_Elemental(A,MATELEMENTAL,MAT_REUSE_MATRIX, &ab->Ae);
1930:   MatConvert_MPIDense_Elemental(B,MATELEMENTAL,MAT_REUSE_MATRIX, &ab->Be);
1931:   MatMatMultNumeric(ab->Ae,ab->Be,ab->Ce);
1932:   MatConvert(ab->Ce,MATMPIDENSE,MAT_REUSE_MATRIX,&C);
1933:   return(0);
1934: }

1938: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
1939: {
1940:   PetscErrorCode   ierr;
1941:   Mat              Ae,Be,Ce;
1942:   Mat_MPIDense     *c;
1943:   Mat_MatMultDense *ab;

1946:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
1947:     SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
1948:   }

1950:   /* convert A and B to Elemental matrices Ae and Be */
1951:   MatConvert(A,MATELEMENTAL,MAT_INITIAL_MATRIX, &Ae);
1952:   MatConvert(B,MATELEMENTAL,MAT_INITIAL_MATRIX, &Be);

1954:   /* Ce = Ae*Be */
1955:   MatMatMultSymbolic(Ae,Be,fill,&Ce);
1956:   MatMatMultNumeric(Ae,Be,Ce);
1957: 
1958:   /* convert Ce to C */
1959:   MatConvert(Ce,MATMPIDENSE,MAT_INITIAL_MATRIX,C);

1961:   /* create data structure for reuse Cdense */
1962:   PetscNew(&ab);
1963:   c                  = (Mat_MPIDense*)(*C)->data;
1964:   c->abdense         = ab;

1966:   ab->Ae             = Ae;
1967:   ab->Be             = Be;
1968:   ab->Ce             = Ce;
1969:   ab->destroy        = (*C)->ops->destroy;
1970:   (*C)->ops->destroy        = MatDestroy_MatMatMult_MPIDense_MPIDense;
1971:   (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIDense;
1972:   return(0);
1973: }

1977: PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
1978: {

1982:   if (scall == MAT_INITIAL_MATRIX) { /* simbolic product includes numeric product */
1983:     PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
1984:     MatMatMultSymbolic_MPIDense_MPIDense(A,B,fill,C);
1985:     PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
1986:   } else {
1987:     PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
1988:     MatMatMultNumeric_MPIDense_MPIDense(A,B,*C);
1989:     PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
1990:   }
1991:   return(0);
1992: }
1993: #endif