Actual source code: pbvec.c

petsc-3.8.2 2017-11-09
Report Typos and Errors

  2: /*
  3:    This file contains routines for Parallel vector operations.
  4:  */
  5:  #include <petscoptions.h>
  6:  #include <../src/vec/vec/impls/mpi/pvecimpl.h>

  8: static PetscErrorCode VecDot_MPI(Vec xin,Vec yin,PetscScalar *z)
  9: {
 10:   PetscScalar    sum,work;

 14:   VecDot_Seq(xin,yin,&work);
 15:   MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
 16:   *z   = sum;
 17:   return(0);
 18: }

 20: static PetscErrorCode VecTDot_MPI(Vec xin,Vec yin,PetscScalar *z)
 21: {
 22:   PetscScalar    sum,work;

 26:   VecTDot_Seq(xin,yin,&work);
 27:   MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
 28:   *z   = sum;
 29:   return(0);
 30: }

 32: extern PetscErrorCode VecView_MPI_Draw(Vec,PetscViewer);

 34: static PetscErrorCode VecPlaceArray_MPI(Vec vin,const PetscScalar *a)
 35: {
 37:   Vec_MPI        *v = (Vec_MPI*)vin->data;

 40:   if (v->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecPlaceArray() was already called on this vector, without a call to VecResetArray()");
 41:   v->unplacedarray = v->array;  /* save previous array so reset can bring it back */
 42:   v->array         = (PetscScalar*)a;
 43:   if (v->localrep) {
 44:     VecPlaceArray(v->localrep,a);
 45:   }
 46:   return(0);
 47: }

 49: static PetscErrorCode VecDuplicate_MPI(Vec win,Vec *v)
 50: {
 52:   Vec_MPI        *vw,*w = (Vec_MPI*)win->data;
 53:   PetscScalar    *array;

 56:   VecCreate(PetscObjectComm((PetscObject)win),v);
 57:   PetscLayoutReference(win->map,&(*v)->map);

 59:   VecCreate_MPI_Private(*v,PETSC_TRUE,w->nghost,0);
 60:   vw   = (Vec_MPI*)(*v)->data;
 61:   PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));

 63:   /* save local representation of the parallel vector (and scatter) if it exists */
 64:   if (w->localrep) {
 65:     VecGetArray(*v,&array);
 66:     VecCreateSeqWithArray(PETSC_COMM_SELF,PetscAbs(win->map->bs),win->map->n+w->nghost,array,&vw->localrep);
 67:     PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));
 68:     VecRestoreArray(*v,&array);
 69:     PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);

 71:     vw->localupdate = w->localupdate;
 72:     if (vw->localupdate) {
 73:       PetscObjectReference((PetscObject)vw->localupdate);
 74:     }
 75:   }

 77:   /* New vector should inherit stashing property of parent */
 78:   (*v)->stash.donotstash   = win->stash.donotstash;
 79:   (*v)->stash.ignorenegidx = win->stash.ignorenegidx;

 81:   PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);
 82:   PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);

 84:   (*v)->map->bs   = PetscAbs(win->map->bs);
 85:   (*v)->bstash.bs = win->bstash.bs;
 86:   return(0);
 87: }


 90: static PetscErrorCode VecSetOption_MPI(Vec V,VecOption op,PetscBool flag)
 91: {
 92:   Vec_MPI        *v = (Vec_MPI*)V->data;
 94:   switch (op) {
 95:   case VEC_IGNORE_OFF_PROC_ENTRIES: V->stash.donotstash = flag;
 96:     break;
 97:   case VEC_IGNORE_NEGATIVE_INDICES: V->stash.ignorenegidx = flag;
 98:     break;
 99:   case VEC_SUBSET_OFF_PROC_ENTRIES: v->assembly_subset = flag;
100:     break;
101:   }
102:   return(0);
103: }


106: static PetscErrorCode VecResetArray_MPI(Vec vin)
107: {
108:   Vec_MPI        *v = (Vec_MPI*)vin->data;

112:   v->array         = v->unplacedarray;
113:   v->unplacedarray = 0;
114:   if (v->localrep) {
115:     VecResetArray(v->localrep);
116:   }
117:   return(0);
118: }

120: static PetscErrorCode VecAssemblySend_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rankid,PetscMPIInt rank,void *sdata,MPI_Request req[],void *ctx)
121: {
122:   Vec X = (Vec)ctx;
123:   Vec_MPI *x = (Vec_MPI*)X->data;
124:   VecAssemblyHeader *hdr = (VecAssemblyHeader*)sdata;
125:   PetscInt bs = X->map->bs;

129:   /* x->recvhdr only exists when we are reusing a communication network.  In that case, some messages can be empty, but
130:    * we have to send them this time if we sent them before because the receiver is expecting them. */
131:   if (hdr->count || (x->recvhdr && x->sendptrs[rankid].ints)) {
132:     MPI_Isend(x->sendptrs[rankid].ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
133:     MPI_Isend(x->sendptrs[rankid].scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
134:   }
135:   if (hdr->bcount || (x->recvhdr && x->sendptrs[rankid].intb)) {
136:     MPI_Isend(x->sendptrs[rankid].intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
137:     MPI_Isend(x->sendptrs[rankid].scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
138:   }
139:   return(0);
140: }

142: static PetscErrorCode VecAssemblyRecv_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rank,void *rdata,MPI_Request req[],void *ctx)
143: {
144:   Vec X = (Vec)ctx;
145:   Vec_MPI *x = (Vec_MPI*)X->data;
146:   VecAssemblyHeader *hdr = (VecAssemblyHeader*)rdata;
148:   PetscInt bs = X->map->bs;
149:   VecAssemblyFrame *frame;

152:   PetscSegBufferGet(x->segrecvframe,1,&frame);

154:   if (hdr->count) {
155:     PetscSegBufferGet(x->segrecvint,hdr->count,&frame->ints);
156:     MPI_Irecv(frame->ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
157:     PetscSegBufferGet(x->segrecvscalar,hdr->count,&frame->scalars);
158:     MPI_Irecv(frame->scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
159:     frame->pendings = 2;
160:   } else {
161:     frame->ints = NULL;
162:     frame->scalars = NULL;
163:     frame->pendings = 0;
164:   }

166:   if (hdr->bcount) {
167:     PetscSegBufferGet(x->segrecvint,hdr->bcount,&frame->intb);
168:     MPI_Irecv(frame->intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
169:     PetscSegBufferGet(x->segrecvscalar,hdr->bcount*bs,&frame->scalarb);
170:     MPI_Irecv(frame->scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
171:     frame->pendingb = 2;
172:   } else {
173:     frame->intb = NULL;
174:     frame->scalarb = NULL;
175:     frame->pendingb = 0;
176:   }
177:   return(0);
178: }

180: static PetscErrorCode VecAssemblyBegin_MPI_BTS(Vec X)
181: {
182:   Vec_MPI        *x = (Vec_MPI*)X->data;
184:   MPI_Comm       comm;
185:   PetscInt       i,j,jb,bs;

188:   if (X->stash.donotstash) return(0);

190:   PetscObjectGetComm((PetscObject)X,&comm);
191:   VecGetBlockSize(X,&bs);
192: #if defined(PETSC_USE_DEBUG)
193:   {
194:     InsertMode addv;
195:     MPIU_Allreduce((PetscEnum*)&X->stash.insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,comm);
196:     if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(comm,PETSC_ERR_ARG_NOTSAMETYPE,"Some processors inserted values while others added");
197:   }
198: #endif
199:   X->bstash.insertmode = X->stash.insertmode; /* Block stash implicitly tracks InsertMode of scalar stash */

201:   VecStashSortCompress_Private(&X->stash);
202:   VecStashSortCompress_Private(&X->bstash);

204:   if (!x->sendranks) {
205:     PetscMPIInt nowners,bnowners,*owners,*bowners;
206:     PetscInt ntmp;
207:     VecStashGetOwnerList_Private(&X->stash,X->map,&nowners,&owners);
208:     VecStashGetOwnerList_Private(&X->bstash,X->map,&bnowners,&bowners);
209:     PetscMergeMPIIntArray(nowners,owners,bnowners,bowners,&ntmp,&x->sendranks);
210:     x->nsendranks = ntmp;
211:     PetscFree(owners);
212:     PetscFree(bowners);
213:     PetscMalloc1(x->nsendranks,&x->sendhdr);
214:     PetscCalloc1(x->nsendranks,&x->sendptrs);
215:   }
216:   for (i=0,j=0,jb=0; i<x->nsendranks; i++) {
217:     PetscMPIInt rank = x->sendranks[i];
218:     x->sendhdr[i].insertmode = X->stash.insertmode;
219:     /* Initialize pointers for non-empty stashes the first time around.  Subsequent assemblies with
220:      * VEC_SUBSET_OFF_PROC_ENTRIES will leave the old pointers (dangling because the stash has been collected) when
221:      * there is nothing new to send, so that size-zero messages get sent instead. */
222:     x->sendhdr[i].count = 0;
223:     if (X->stash.n) {
224:       x->sendptrs[i].ints    = &X->stash.idx[j];
225:       x->sendptrs[i].scalars = &X->stash.array[j];
226:       for ( ; j<X->stash.n && X->stash.idx[j] < X->map->range[rank+1]; j++) x->sendhdr[i].count++;
227:     }
228:     x->sendhdr[i].bcount = 0;
229:     if (X->bstash.n) {
230:       x->sendptrs[i].intb    = &X->bstash.idx[jb];
231:       x->sendptrs[i].scalarb = &X->bstash.array[jb*bs];
232:       for ( ; jb<X->bstash.n && X->bstash.idx[jb]*bs < X->map->range[rank+1]; jb++) x->sendhdr[i].bcount++;
233:     }
234:   }

236:   if (!x->segrecvint) {PetscSegBufferCreate(sizeof(PetscInt),1000,&x->segrecvint);}
237:   if (!x->segrecvscalar) {PetscSegBufferCreate(sizeof(PetscScalar),1000,&x->segrecvscalar);}
238:   if (!x->segrecvframe) {PetscSegBufferCreate(sizeof(VecAssemblyFrame),50,&x->segrecvframe);}
239:   if (x->recvhdr) {             /* VEC_SUBSET_OFF_PROC_ENTRIES and this is not the first assembly */
240:     PetscMPIInt tag[4];
241:     if (!x->assembly_subset) SETERRQ(comm,PETSC_ERR_PLIB,"Attempt to reuse rendezvous when not VEC_SUBSET_OFF_PROC_ENTRIES");
242:     for (i=0; i<4; i++) {PetscCommGetNewTag(comm,&tag[i]);}
243:     for (i=0; i<x->nsendranks; i++) {
244:       VecAssemblySend_MPI_Private(comm,tag,i,x->sendranks[i],x->sendhdr+i,x->sendreqs+4*i,X);
245:     }
246:     for (i=0; i<x->nrecvranks; i++) {
247:       VecAssemblyRecv_MPI_Private(comm,tag,x->recvranks[i],x->recvhdr+i,x->recvreqs+4*i,X);
248:     }
249:     x->use_status = PETSC_TRUE;
250:   } else {                      /* First time */
251:     PetscCommBuildTwoSidedFReq(comm,3,MPIU_INT,x->nsendranks,x->sendranks,(PetscInt*)x->sendhdr,&x->nrecvranks,&x->recvranks,&x->recvhdr,4,&x->sendreqs,&x->recvreqs,VecAssemblySend_MPI_Private,VecAssemblyRecv_MPI_Private,X);
252:     x->use_status = PETSC_FALSE;
253:   }

255:   {
256:     PetscInt nstash,reallocs;
257:     VecStashGetInfo_Private(&X->stash,&nstash,&reallocs);
258:     PetscInfo2(X,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
259:     VecStashGetInfo_Private(&X->bstash,&nstash,&reallocs);
260:     PetscInfo2(X,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
261:   }
262:   return(0);
263: }

265: static PetscErrorCode VecAssemblyEnd_MPI_BTS(Vec X)
266: {
267:   Vec_MPI *x = (Vec_MPI*)X->data;
268:   PetscInt bs = X->map->bs;
269:   PetscMPIInt npending,*some_indices,r;
270:   MPI_Status  *some_statuses;
271:   PetscScalar *xarray;
273:   VecAssemblyFrame *frame;

276:   if (X->stash.donotstash) {
277:     X->stash.insertmode = NOT_SET_VALUES;
278:     X->bstash.insertmode = NOT_SET_VALUES;
279:     return(0);
280:   }

282:   VecGetArray(X,&xarray);
283:   PetscSegBufferExtractInPlace(x->segrecvframe,&frame);
284:   PetscMalloc2(4*x->nrecvranks,&some_indices,x->use_status?4*x->nrecvranks:0,&some_statuses);
285:   for (r=0,npending=0; r<x->nrecvranks; r++) npending += frame[r].pendings + frame[r].pendingb;
286:   while (npending>0) {
287:     PetscMPIInt ndone,ii;
288:     /* Filling MPI_Status fields requires some resources from the MPI library.  We skip it on the first assembly, or
289:      * when VEC_SUBSET_OFF_PROC_ENTRIES has not been set, because we could exchange exact sizes in the initial
290:      * rendezvous.  When the rendezvous is elided, however, we use MPI_Status to get actual message lengths, so that
291:      * subsequent assembly can set a proper subset of the values. */
292:     MPI_Waitsome(4*x->nrecvranks,x->recvreqs,&ndone,some_indices,x->use_status?some_statuses:MPI_STATUSES_IGNORE);
293:     for (ii=0; ii<ndone; ii++) {
294:       PetscInt i = some_indices[ii]/4,j,k;
295:       InsertMode imode = (InsertMode)x->recvhdr[i].insertmode;
296:       PetscInt *recvint;
297:       PetscScalar *recvscalar;
298:       PetscBool intmsg = (PetscBool)(some_indices[ii]%2 == 0);
299:       PetscBool blockmsg = (PetscBool)((some_indices[ii]%4)/2 == 1);
300:       npending--;
301:       if (!blockmsg) { /* Scalar stash */
302:         PetscMPIInt count;
303:         if (--frame[i].pendings > 0) continue;
304:         if (x->use_status) {
305:           MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
306:         } else count = x->recvhdr[i].count;
307:         for (j=0,recvint=frame[i].ints,recvscalar=frame[i].scalars; j<count; j++,recvint++) {
308:           PetscInt loc = *recvint - X->map->rstart;
309:           if (*recvint < X->map->rstart || X->map->rend <= *recvint) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Received vector entry %D out of local range [%D,%D)]",*recvint,X->map->rstart,X->map->rend);
310:           switch (imode) {
311:           case ADD_VALUES:
312:             xarray[loc] += *recvscalar++;
313:             break;
314:           case INSERT_VALUES:
315:             xarray[loc] = *recvscalar++;
316:             break;
317:           default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
318:           }
319:         }
320:       } else {                  /* Block stash */
321:         PetscMPIInt count;
322:         if (--frame[i].pendingb > 0) continue;
323:         if (x->use_status) {
324:           MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
325:           if (!intmsg) count /= bs; /* Convert from number of scalars to number of blocks */
326:         } else count = x->recvhdr[i].bcount;
327:         for (j=0,recvint=frame[i].intb,recvscalar=frame[i].scalarb; j<count; j++,recvint++) {
328:           PetscInt loc = (*recvint)*bs - X->map->rstart;
329:           switch (imode) {
330:           case ADD_VALUES:
331:             for (k=loc; k<loc+bs; k++) xarray[k] += *recvscalar++;
332:             break;
333:           case INSERT_VALUES:
334:             for (k=loc; k<loc+bs; k++) xarray[k] = *recvscalar++;
335:             break;
336:           default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
337:           }
338:         }
339:       }
340:     }
341:   }
342:   VecRestoreArray(X,&xarray);
343:   MPI_Waitall(4*x->nsendranks,x->sendreqs,MPI_STATUSES_IGNORE);
344:   PetscFree2(some_indices,some_statuses);
345:   if (x->assembly_subset) {
346:     void *dummy;                /* reset segbuffers */
347:     PetscSegBufferExtractInPlace(x->segrecvint,&dummy);
348:     PetscSegBufferExtractInPlace(x->segrecvscalar,&dummy);
349:   } else {
350:     VecAssemblyReset_MPI(X);
351:   }

353:   X->stash.insertmode = NOT_SET_VALUES;
354:   X->bstash.insertmode = NOT_SET_VALUES;
355:   VecStashScatterEnd_Private(&X->stash);
356:   VecStashScatterEnd_Private(&X->bstash);
357:   return(0);
358: }

360: PetscErrorCode VecAssemblyReset_MPI(Vec X)
361: {
362:   Vec_MPI *x = (Vec_MPI*)X->data;

366:   PetscFree(x->sendreqs);
367:   PetscFree(x->recvreqs);
368:   PetscFree(x->sendranks);
369:   PetscFree(x->recvranks);
370:   PetscFree(x->sendhdr);
371:   PetscFree(x->recvhdr);
372:   PetscFree(x->sendptrs);
373:   PetscSegBufferDestroy(&x->segrecvint);
374:   PetscSegBufferDestroy(&x->segrecvscalar);
375:   PetscSegBufferDestroy(&x->segrecvframe);
376:   return(0);
377: }


380: static PetscErrorCode VecSetFromOptions_MPI(PetscOptionItems *PetscOptionsObject,Vec X)
381: {
383:   PetscBool      flg = PETSC_FALSE,set;

386:   PetscOptionsHead(PetscOptionsObject,"VecMPI Options");
387:   PetscOptionsBool("-vec_assembly_bts","Use BuildTwoSided version of assembly","",flg,&flg,&set);
388:   if (set) {
389:     X->ops->assemblybegin = flg ? VecAssemblyBegin_MPI_BTS : VecAssemblyBegin_MPI;
390:     X->ops->assemblyend   = flg ? VecAssemblyEnd_MPI_BTS   : VecAssemblyEnd_MPI;
391:   }
392:   PetscOptionsTail();
393:   return(0);
394: }


397: static struct _VecOps DvOps = { VecDuplicate_MPI, /* 1 */
398:                                 VecDuplicateVecs_Default,
399:                                 VecDestroyVecs_Default,
400:                                 VecDot_MPI,
401:                                 VecMDot_MPI,
402:                                 VecNorm_MPI,
403:                                 VecTDot_MPI,
404:                                 VecMTDot_MPI,
405:                                 VecScale_Seq,
406:                                 VecCopy_Seq, /* 10 */
407:                                 VecSet_Seq,
408:                                 VecSwap_Seq,
409:                                 VecAXPY_Seq,
410:                                 VecAXPBY_Seq,
411:                                 VecMAXPY_Seq,
412:                                 VecAYPX_Seq,
413:                                 VecWAXPY_Seq,
414:                                 VecAXPBYPCZ_Seq,
415:                                 VecPointwiseMult_Seq,
416:                                 VecPointwiseDivide_Seq,
417:                                 VecSetValues_MPI, /* 20 */
418:                                 VecAssemblyBegin_MPI,
419:                                 VecAssemblyEnd_MPI,
420:                                 0,
421:                                 VecGetSize_MPI,
422:                                 VecGetSize_Seq,
423:                                 0,
424:                                 VecMax_MPI,
425:                                 VecMin_MPI,
426:                                 VecSetRandom_Seq,
427:                                 VecSetOption_MPI,
428:                                 VecSetValuesBlocked_MPI,
429:                                 VecDestroy_MPI,
430:                                 VecView_MPI,
431:                                 VecPlaceArray_MPI,
432:                                 VecReplaceArray_Seq,
433:                                 VecDot_Seq,
434:                                 VecTDot_Seq,
435:                                 VecNorm_Seq,
436:                                 VecMDot_Seq,
437:                                 VecMTDot_Seq,
438:                                 VecLoad_Default,
439:                                 VecReciprocal_Default,
440:                                 VecConjugate_Seq,
441:                                 0,
442:                                 0,
443:                                 VecResetArray_MPI,
444:                                 VecSetFromOptions_MPI,/*set from options */
445:                                 VecMaxPointwiseDivide_Seq,
446:                                 VecPointwiseMax_Seq,
447:                                 VecPointwiseMaxAbs_Seq,
448:                                 VecPointwiseMin_Seq,
449:                                 VecGetValues_MPI,
450:                                 0,
451:                                 0,
452:                                 0,
453:                                 0,
454:                                 0,
455:                                 0,
456:                                 VecStrideGather_Default,
457:                                 VecStrideScatter_Default,
458:                                 0,
459:                                 0,
460:                                 0,
461:                                 0,
462:                                 0,
463:                                 VecStrideSubSetGather_Default,
464:                                 VecStrideSubSetScatter_Default,
465:                                 0,
466:                                 0
467: };

469: /*
470:     VecCreate_MPI_Private - Basic create routine called by VecCreate_MPI() (i.e. VecCreateMPI()),
471:     VecCreateMPIWithArray(), VecCreate_Shared() (i.e. VecCreateShared()), VecCreateGhost(),
472:     VecDuplicate_MPI(), VecCreateGhostWithArray(), VecDuplicate_MPI(), and VecDuplicate_Shared()

474:     If alloc is true and array is NULL then this routine allocates the space, otherwise
475:     no space is allocated.
476: */
477: PetscErrorCode VecCreate_MPI_Private(Vec v,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
478: {
479:   Vec_MPI        *s;

483:   PetscNewLog(v,&s);
484:   v->data        = (void*)s;
485:   PetscMemcpy(v->ops,&DvOps,sizeof(DvOps));
486:   s->nghost      = nghost;
487:   v->petscnative = PETSC_TRUE;

489:   PetscLayoutSetUp(v->map);

491:   s->array           = (PetscScalar*)array;
492:   s->array_allocated = 0;
493:   if (alloc && !array) {
494:     PetscInt n = v->map->n+nghost;
495:     PetscMalloc1(n,&s->array);
496:     PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));
497:     PetscMemzero(s->array,n*sizeof(PetscScalar));
498:     s->array_allocated = s->array;
499:   }

501:   /* By default parallel vectors do not have local representation */
502:   s->localrep    = 0;
503:   s->localupdate = 0;

505:   v->stash.insertmode = NOT_SET_VALUES;
506:   v->bstash.insertmode = NOT_SET_VALUES;
507:   /* create the stashes. The block-size for bstash is set later when
508:      VecSetValuesBlocked is called.
509:   */
510:   VecStashCreate_Private(PetscObjectComm((PetscObject)v),1,&v->stash);
511:   VecStashCreate_Private(PetscObjectComm((PetscObject)v),PetscAbs(v->map->bs),&v->bstash);

513: #if defined(PETSC_HAVE_MATLAB_ENGINE)
514:   PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEnginePut_C",VecMatlabEnginePut_Default);
515:   PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEngineGet_C",VecMatlabEngineGet_Default);
516: #endif
517:   PetscObjectChangeTypeName((PetscObject)v,VECMPI);
518:   return(0);
519: }

521: /*MC
522:    VECMPI - VECMPI = "mpi" - The basic parallel vector

524:    Options Database Keys:
525: . -vec_type mpi - sets the vector type to VECMPI during a call to VecSetFromOptions()

527:   Level: beginner

529: .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMpiWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateMpi()
530: M*/

532: PETSC_EXTERN PetscErrorCode VecCreate_MPI(Vec vv)
533: {

537:   VecCreate_MPI_Private(vv,PETSC_TRUE,0,0);
538:   return(0);
539: }

541: /*MC
542:    VECSTANDARD = "standard" - A VECSEQ on one process and VECMPI on more than one process

544:    Options Database Keys:
545: . -vec_type standard - sets a vector type to standard on calls to VecSetFromOptions()

547:   Level: beginner

549: .seealso: VecCreateSeq(), VecCreateMPI()
550: M*/

552: PETSC_EXTERN PetscErrorCode VecCreate_Standard(Vec v)
553: {
555:   PetscMPIInt    size;

558:   MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);
559:   if (size == 1) {
560:     VecSetType(v,VECSEQ);
561:   } else {
562:     VecSetType(v,VECMPI);
563:   }
564:   return(0);
565: }

567: /*@C
568:    VecCreateMPIWithArray - Creates a parallel, array-style vector,
569:    where the user provides the array space to store the vector values.

571:    Collective on MPI_Comm

573:    Input Parameters:
574: +  comm  - the MPI communicator to use
575: .  bs    - block size, same meaning as VecSetBlockSize()
576: .  n     - local vector length, cannot be PETSC_DECIDE
577: .  N     - global vector length (or PETSC_DECIDE to have calculated)
578: -  array - the user provided array to store the vector values

580:    Output Parameter:
581: .  vv - the vector

583:    Notes:
584:    Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
585:    same type as an existing vector.

587:    If the user-provided array is NULL, then VecPlaceArray() can be used
588:    at a later stage to SET the array for storing the vector values.

590:    PETSc does NOT free the array when the vector is destroyed via VecDestroy().
591:    The user should not free the array until the vector is destroyed.

593:    Level: intermediate

595:    Concepts: vectors^creating with array

597: .seealso: VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
598:           VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()

600: @*/
601: PetscErrorCode  VecCreateMPIWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
602: {

606:   if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
607:   PetscSplitOwnership(comm,&n,&N);
608:   VecCreate(comm,vv);
609:   VecSetSizes(*vv,n,N);
610:   VecSetBlockSize(*vv,bs);
611:   VecCreate_MPI_Private(*vv,PETSC_FALSE,0,array);
612:   return(0);
613: }

615: /*@C
616:    VecCreateGhostWithArray - Creates a parallel vector with ghost padding on each processor;
617:    the caller allocates the array space.

619:    Collective on MPI_Comm

621:    Input Parameters:
622: +  comm - the MPI communicator to use
623: .  n - local vector length
624: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
625: .  nghost - number of local ghost points
626: .  ghosts - global indices of ghost points (or NULL if not needed), these do not need to be in increasing order (sorted)
627: -  array - the space to store the vector values (as long as n + nghost)

629:    Output Parameter:
630: .  vv - the global vector representation (without ghost points as part of vector)

632:    Notes:
633:    Use VecGhostGetLocalForm() to access the local, ghosted representation
634:    of the vector.

636:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

638:    Level: advanced

640:    Concepts: vectors^creating with array
641:    Concepts: vectors^ghosted

643: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
644:           VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
645:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()

647: @*/
648: PetscErrorCode  VecCreateGhostWithArray(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
649: {
650:   PetscErrorCode         ierr;
651:   Vec_MPI                *w;
652:   PetscScalar            *larray;
653:   IS                     from,to;
654:   ISLocalToGlobalMapping ltog;
655:   PetscInt               rstart,i,*indices;

658:   *vv = 0;

660:   if (n == PETSC_DECIDE)      SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
661:   if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
662:   if (nghost < 0)             SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
663:   PetscSplitOwnership(comm,&n,&N);
664:   /* Create global representation */
665:   VecCreate(comm,vv);
666:   VecSetSizes(*vv,n,N);
667:   VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost,array);
668:   w    = (Vec_MPI*)(*vv)->data;
669:   /* Create local representation */
670:   VecGetArray(*vv,&larray);
671:   VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
672:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
673:   VecRestoreArray(*vv,&larray);

675:   /*
676:        Create scatter context for scattering (updating) ghost values
677:   */
678:   ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
679:   ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
680:   VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
681:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
682:   ISDestroy(&to);
683:   ISDestroy(&from);

685:   /* set local to global mapping for ghosted vector */
686:   PetscMalloc1(n+nghost,&indices);
687:   VecGetOwnershipRange(*vv,&rstart,NULL);
688:   for (i=0; i<n; i++) {
689:     indices[i] = rstart + i;
690:   }
691:   for (i=0; i<nghost; i++) {
692:     indices[n+i] = ghosts[i];
693:   }
694:   ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,&ltog);
695:   VecSetLocalToGlobalMapping(*vv,ltog);
696:   ISLocalToGlobalMappingDestroy(&ltog);
697:   return(0);
698: }

700: /*@
701:    VecCreateGhost - Creates a parallel vector with ghost padding on each processor.

703:    Collective on MPI_Comm

705:    Input Parameters:
706: +  comm - the MPI communicator to use
707: .  n - local vector length
708: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
709: .  nghost - number of local ghost points
710: -  ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)

712:    Output Parameter:
713: .  vv - the global vector representation (without ghost points as part of vector)

715:    Notes:
716:    Use VecGhostGetLocalForm() to access the local, ghosted representation
717:    of the vector.

719:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

721:    Level: advanced

723:    Concepts: vectors^ghosted

725: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
726:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
727:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
728:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()

730: @*/
731: PetscErrorCode  VecCreateGhost(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
732: {

736:   VecCreateGhostWithArray(comm,n,N,nghost,ghosts,0,vv);
737:   return(0);
738: }

740: /*@
741:    VecMPISetGhost - Sets the ghost points for an MPI ghost vector

743:    Collective on Vec

745:    Input Parameters:
746: +  vv - the MPI vector
747: .  nghost - number of local ghost points
748: -  ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)


751:    Notes:
752:    Use VecGhostGetLocalForm() to access the local, ghosted representation
753:    of the vector.

755:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

757:    You must call this AFTER you have set the type of the vector (with VecSetType()) and the size (with VecSetSizes()).

759:    Level: advanced

761:    Concepts: vectors^ghosted

763: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
764:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
765:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
766:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray()

768: @*/
769: PetscErrorCode  VecMPISetGhost(Vec vv,PetscInt nghost,const PetscInt ghosts[])
770: {
772:   PetscBool      flg;

775:   PetscObjectTypeCompare((PetscObject)vv,VECMPI,&flg);
776:   /* if already fully existant VECMPI then basically destroy it and rebuild with ghosting */
777:   if (flg) {
778:     PetscInt               n,N;
779:     Vec_MPI                *w;
780:     PetscScalar            *larray;
781:     IS                     from,to;
782:     ISLocalToGlobalMapping ltog;
783:     PetscInt               rstart,i,*indices;
784:     MPI_Comm               comm;

786:     PetscObjectGetComm((PetscObject)vv,&comm);
787:     n    = vv->map->n;
788:     N    = vv->map->N;
789:     (*vv->ops->destroy)(vv);
790:     VecSetSizes(vv,n,N);
791:     VecCreate_MPI_Private(vv,PETSC_TRUE,nghost,NULL);
792:     w    = (Vec_MPI*)(vv)->data;
793:     /* Create local representation */
794:     VecGetArray(vv,&larray);
795:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
796:     PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localrep);
797:     VecRestoreArray(vv,&larray);

799:     /*
800:      Create scatter context for scattering (updating) ghost values
801:      */
802:     ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
803:     ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
804:     VecScatterCreate(vv,from,w->localrep,to,&w->localupdate);
805:     PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localupdate);
806:     ISDestroy(&to);
807:     ISDestroy(&from);

809:     /* set local to global mapping for ghosted vector */
810:     PetscMalloc1(n+nghost,&indices);
811:     VecGetOwnershipRange(vv,&rstart,NULL);

813:     for (i=0; i<n; i++)      indices[i]   = rstart + i;
814:     for (i=0; i<nghost; i++) indices[n+i] = ghosts[i];

816:     ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,&ltog);
817:     VecSetLocalToGlobalMapping(vv,ltog);
818:     ISLocalToGlobalMappingDestroy(&ltog);
819:   } else if (vv->ops->create == VecCreate_MPI) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set local or global size before setting ghosting");
820:   else if (!((PetscObject)vv)->type_name) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set type to VECMPI before ghosting");
821:   return(0);
822: }


825: /* ------------------------------------------------------------------------------------------*/
826: /*@C
827:    VecCreateGhostBlockWithArray - Creates a parallel vector with ghost padding on each processor;
828:    the caller allocates the array space. Indices in the ghost region are based on blocks.

830:    Collective on MPI_Comm

832:    Input Parameters:
833: +  comm - the MPI communicator to use
834: .  bs - block size
835: .  n - local vector length
836: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
837: .  nghost - number of local ghost blocks
838: .  ghosts - global indices of ghost blocks (or NULL if not needed), counts are by block not by index, these do not need to be in increasing order (sorted)
839: -  array - the space to store the vector values (as long as n + nghost*bs)

841:    Output Parameter:
842: .  vv - the global vector representation (without ghost points as part of vector)

844:    Notes:
845:    Use VecGhostGetLocalForm() to access the local, ghosted representation
846:    of the vector.

848:    n is the local vector size (total local size not the number of blocks) while nghost
849:    is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
850:    portion is bs*nghost

852:    Level: advanced

854:    Concepts: vectors^creating ghosted
855:    Concepts: vectors^creating with array

857: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
858:           VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
859:           VecCreateGhostWithArray(), VecCreateGhostBlock()

861: @*/
862: PetscErrorCode  VecCreateGhostBlockWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
863: {
864:   PetscErrorCode         ierr;
865:   Vec_MPI                *w;
866:   PetscScalar            *larray;
867:   IS                     from,to;
868:   ISLocalToGlobalMapping ltog;
869:   PetscInt               rstart,i,nb,*indices;

872:   *vv = 0;

874:   if (n == PETSC_DECIDE)      SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
875:   if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
876:   if (nghost < 0)             SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
877:   if (n % bs)                 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Local size must be a multiple of block size");
878:   PetscSplitOwnership(comm,&n,&N);
879:   /* Create global representation */
880:   VecCreate(comm,vv);
881:   VecSetSizes(*vv,n,N);
882:   VecSetBlockSize(*vv,bs);
883:   VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost*bs,array);
884:   w    = (Vec_MPI*)(*vv)->data;
885:   /* Create local representation */
886:   VecGetArray(*vv,&larray);
887:   VecCreateSeqWithArray(PETSC_COMM_SELF,bs,n+bs*nghost,larray,&w->localrep);
888:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
889:   VecRestoreArray(*vv,&larray);

891:   /*
892:        Create scatter context for scattering (updating) ghost values
893:   */
894:   ISCreateBlock(comm,bs,nghost,ghosts,PETSC_COPY_VALUES,&from);
895:   ISCreateStride(PETSC_COMM_SELF,bs*nghost,n,1,&to);
896:   VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
897:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
898:   ISDestroy(&to);
899:   ISDestroy(&from);

901:   /* set local to global mapping for ghosted vector */
902:   nb   = n/bs;
903:   PetscMalloc1(nb+nghost,&indices);
904:   VecGetOwnershipRange(*vv,&rstart,NULL);

906:   for (i=0; i<nb; i++)      indices[i]    = rstart + i;
907:   for (i=0; i<nghost; i++)  indices[nb+i] = ghosts[i];

909:   ISLocalToGlobalMappingCreate(comm,bs,nb+nghost,indices,PETSC_OWN_POINTER,&ltog);
910:   VecSetLocalToGlobalMapping(*vv,ltog);
911:   ISLocalToGlobalMappingDestroy(&ltog);
912:   return(0);
913: }

915: /*@
916:    VecCreateGhostBlock - Creates a parallel vector with ghost padding on each processor.
917:         The indicing of the ghost points is done with blocks.

919:    Collective on MPI_Comm

921:    Input Parameters:
922: +  comm - the MPI communicator to use
923: .  bs - the block size
924: .  n - local vector length
925: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
926: .  nghost - number of local ghost blocks
927: -  ghosts - global indices of ghost blocks, counts are by block, not by individual index, these do not need to be in increasing order (sorted)

929:    Output Parameter:
930: .  vv - the global vector representation (without ghost points as part of vector)

932:    Notes:
933:    Use VecGhostGetLocalForm() to access the local, ghosted representation
934:    of the vector.

936:    n is the local vector size (total local size not the number of blocks) while nghost
937:    is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
938:    portion is bs*nghost

940:    Level: advanced

942:    Concepts: vectors^ghosted

944: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
945:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
946:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecCreateGhostBlockWithArray()

948: @*/
949: PetscErrorCode  VecCreateGhostBlock(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
950: {

954:   VecCreateGhostBlockWithArray(comm,bs,n,N,nghost,ghosts,0,vv);
955:   return(0);
956: }