Actual source code: pbvec.c

petsc-master 2020-07-04
Report Typos and Errors

  2: /*
  3:    This file contains routines for Parallel vector operations.
  4:  */
  5:  #include <petscsys.h>
  6:  #include <../src/vec/vec/impls/mpi/pvecimpl.h>

  8: PetscErrorCode VecDot_MPI(Vec xin,Vec yin,PetscScalar *z)
  9: {
 10:   PetscScalar    sum,work;

 14:   VecDot_Seq(xin,yin,&work);
 15:   MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
 16:   *z   = sum;
 17:   return(0);
 18: }

 20: PetscErrorCode VecTDot_MPI(Vec xin,Vec yin,PetscScalar *z)
 21: {
 22:   PetscScalar    sum,work;

 26:   VecTDot_Seq(xin,yin,&work);
 27:   MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
 28:   *z   = sum;
 29:   return(0);
 30: }

 32: extern PetscErrorCode VecView_MPI_Draw(Vec,PetscViewer);

 34: static PetscErrorCode VecPlaceArray_MPI(Vec vin,const PetscScalar *a)
 35: {
 37:   Vec_MPI        *v = (Vec_MPI*)vin->data;

 40:   if (v->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecPlaceArray() was already called on this vector, without a call to VecResetArray()");
 41:   v->unplacedarray = v->array;  /* save previous array so reset can bring it back */
 42:   v->array         = (PetscScalar*)a;
 43:   if (v->localrep) {
 44:     VecPlaceArray(v->localrep,a);
 45:   }
 46:   return(0);
 47: }

 49: static PetscErrorCode VecDuplicate_MPI(Vec win,Vec *v)
 50: {
 52:   Vec_MPI        *vw,*w = (Vec_MPI*)win->data;
 53:   PetscScalar    *array;

 56:   VecCreate(PetscObjectComm((PetscObject)win),v);
 57:   PetscLayoutReference(win->map,&(*v)->map);

 59:   VecCreate_MPI_Private(*v,PETSC_TRUE,w->nghost,0);
 60:   vw   = (Vec_MPI*)(*v)->data;
 61:   PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));

 63:   /* save local representation of the parallel vector (and scatter) if it exists */
 64:   if (w->localrep) {
 65:     VecGetArray(*v,&array);
 66:     VecCreateSeqWithArray(PETSC_COMM_SELF,PetscAbs(win->map->bs),win->map->n+w->nghost,array,&vw->localrep);
 67:     PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));
 68:     VecRestoreArray(*v,&array);
 69:     PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);

 71:     vw->localupdate = w->localupdate;
 72:     if (vw->localupdate) {
 73:       PetscObjectReference((PetscObject)vw->localupdate);
 74:     }
 75:   }

 77:   /* New vector should inherit stashing property of parent */
 78:   (*v)->stash.donotstash   = win->stash.donotstash;
 79:   (*v)->stash.ignorenegidx = win->stash.ignorenegidx;

 81:   PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);
 82:   PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);

 84:   (*v)->map->bs   = PetscAbs(win->map->bs);
 85:   (*v)->bstash.bs = win->bstash.bs;
 86:   return(0);
 87: }


 90: static PetscErrorCode VecSetOption_MPI(Vec V,VecOption op,PetscBool flag)
 91: {
 92:   Vec_MPI        *v = (Vec_MPI*)V->data;

 96:   switch (op) {
 97:   case VEC_IGNORE_OFF_PROC_ENTRIES: V->stash.donotstash = flag;
 98:     break;
 99:   case VEC_IGNORE_NEGATIVE_INDICES: V->stash.ignorenegidx = flag;
100:     break;
101:   case VEC_SUBSET_OFF_PROC_ENTRIES:
102:     v->assembly_subset = flag; /* See the same logic in MatAssembly wrt MAT_SUBSET_OFF_PROC_ENTRIES */
103:     if (!v->assembly_subset) { /* User indicates "do not reuse the communication pattern" */
104:       VecAssemblyReset_MPI(V); /* Reset existing pattern to free memory */
105:       v->first_assembly_done = PETSC_FALSE; /* Mark the first assembly is not done */
106:     }
107:     break;
108:   }

110:   return(0);
111: }


114: static PetscErrorCode VecResetArray_MPI(Vec vin)
115: {
116:   Vec_MPI        *v = (Vec_MPI*)vin->data;

120:   v->array         = v->unplacedarray;
121:   v->unplacedarray = 0;
122:   if (v->localrep) {
123:     VecResetArray(v->localrep);
124:   }
125:   return(0);
126: }

128: static PetscErrorCode VecAssemblySend_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rankid,PetscMPIInt rank,void *sdata,MPI_Request req[],void *ctx)
129: {
130:   Vec X = (Vec)ctx;
131:   Vec_MPI *x = (Vec_MPI*)X->data;
132:   VecAssemblyHeader *hdr = (VecAssemblyHeader*)sdata;
133:   PetscInt bs = X->map->bs;

137:   /* x->first_assembly_done indicates we are reusing a communication network. In that case, some
138:      messages can be empty, but we have to send them this time if we sent them before because the
139:      receiver is expecting them.
140:    */
141:   if (hdr->count || (x->first_assembly_done && x->sendptrs[rankid].ints)) {
142:     MPI_Isend(x->sendptrs[rankid].ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
143:     MPI_Isend(x->sendptrs[rankid].scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
144:   }
145:   if (hdr->bcount || (x->first_assembly_done && x->sendptrs[rankid].intb)) {
146:     MPI_Isend(x->sendptrs[rankid].intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
147:     MPI_Isend(x->sendptrs[rankid].scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
148:   }
149:   return(0);
150: }

152: static PetscErrorCode VecAssemblyRecv_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rank,void *rdata,MPI_Request req[],void *ctx)
153: {
154:   Vec X = (Vec)ctx;
155:   Vec_MPI *x = (Vec_MPI*)X->data;
156:   VecAssemblyHeader *hdr = (VecAssemblyHeader*)rdata;
158:   PetscInt bs = X->map->bs;
159:   VecAssemblyFrame *frame;

162:   PetscSegBufferGet(x->segrecvframe,1,&frame);

164:   if (hdr->count) {
165:     PetscSegBufferGet(x->segrecvint,hdr->count,&frame->ints);
166:     MPI_Irecv(frame->ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
167:     PetscSegBufferGet(x->segrecvscalar,hdr->count,&frame->scalars);
168:     MPI_Irecv(frame->scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
169:     frame->pendings = 2;
170:   } else {
171:     frame->ints = NULL;
172:     frame->scalars = NULL;
173:     frame->pendings = 0;
174:   }

176:   if (hdr->bcount) {
177:     PetscSegBufferGet(x->segrecvint,hdr->bcount,&frame->intb);
178:     MPI_Irecv(frame->intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
179:     PetscSegBufferGet(x->segrecvscalar,hdr->bcount*bs,&frame->scalarb);
180:     MPI_Irecv(frame->scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
181:     frame->pendingb = 2;
182:   } else {
183:     frame->intb = NULL;
184:     frame->scalarb = NULL;
185:     frame->pendingb = 0;
186:   }
187:   return(0);
188: }

190: static PetscErrorCode VecAssemblyBegin_MPI_BTS(Vec X)
191: {
192:   Vec_MPI        *x = (Vec_MPI*)X->data;
194:   MPI_Comm       comm;
195:   PetscInt       i,j,jb,bs;

198:   if (X->stash.donotstash) return(0);

200:   PetscObjectGetComm((PetscObject)X,&comm);
201:   VecGetBlockSize(X,&bs);
202:   if (PetscDefined(USE_DEBUG)) {
203:     InsertMode addv;
204:     MPIU_Allreduce((PetscEnum*)&X->stash.insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,comm);
205:     if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(comm,PETSC_ERR_ARG_NOTSAMETYPE,"Some processors inserted values while others added");
206:   }
207:   X->bstash.insertmode = X->stash.insertmode; /* Block stash implicitly tracks InsertMode of scalar stash */

209:   VecStashSortCompress_Private(&X->stash);
210:   VecStashSortCompress_Private(&X->bstash);

212:   if (!x->sendranks) {
213:     PetscMPIInt nowners,bnowners,*owners,*bowners;
214:     PetscInt ntmp;
215:     VecStashGetOwnerList_Private(&X->stash,X->map,&nowners,&owners);
216:     VecStashGetOwnerList_Private(&X->bstash,X->map,&bnowners,&bowners);
217:     PetscMergeMPIIntArray(nowners,owners,bnowners,bowners,&ntmp,&x->sendranks);
218:     x->nsendranks = ntmp;
219:     PetscFree(owners);
220:     PetscFree(bowners);
221:     PetscMalloc1(x->nsendranks,&x->sendhdr);
222:     PetscCalloc1(x->nsendranks,&x->sendptrs);
223:   }
224:   for (i=0,j=0,jb=0; i<x->nsendranks; i++) {
225:     PetscMPIInt rank = x->sendranks[i];
226:     x->sendhdr[i].insertmode = X->stash.insertmode;
227:     /* Initialize pointers for non-empty stashes the first time around.  Subsequent assemblies with
228:      * VEC_SUBSET_OFF_PROC_ENTRIES will leave the old pointers (dangling because the stash has been collected) when
229:      * there is nothing new to send, so that size-zero messages get sent instead. */
230:     x->sendhdr[i].count = 0;
231:     if (X->stash.n) {
232:       x->sendptrs[i].ints    = &X->stash.idx[j];
233:       x->sendptrs[i].scalars = &X->stash.array[j];
234:       for ( ; j<X->stash.n && X->stash.idx[j] < X->map->range[rank+1]; j++) x->sendhdr[i].count++;
235:     }
236:     x->sendhdr[i].bcount = 0;
237:     if (X->bstash.n) {
238:       x->sendptrs[i].intb    = &X->bstash.idx[jb];
239:       x->sendptrs[i].scalarb = &X->bstash.array[jb*bs];
240:       for ( ; jb<X->bstash.n && X->bstash.idx[jb]*bs < X->map->range[rank+1]; jb++) x->sendhdr[i].bcount++;
241:     }
242:   }

244:   if (!x->segrecvint) {PetscSegBufferCreate(sizeof(PetscInt),1000,&x->segrecvint);}
245:   if (!x->segrecvscalar) {PetscSegBufferCreate(sizeof(PetscScalar),1000,&x->segrecvscalar);}
246:   if (!x->segrecvframe) {PetscSegBufferCreate(sizeof(VecAssemblyFrame),50,&x->segrecvframe);}
247:   if (x->first_assembly_done) { /* this is not the first assembly */
248:     PetscMPIInt tag[4];
249:     for (i=0; i<4; i++) {PetscCommGetNewTag(comm,&tag[i]);}
250:     for (i=0; i<x->nsendranks; i++) {
251:       VecAssemblySend_MPI_Private(comm,tag,i,x->sendranks[i],x->sendhdr+i,x->sendreqs+4*i,X);
252:     }
253:     for (i=0; i<x->nrecvranks; i++) {
254:       VecAssemblyRecv_MPI_Private(comm,tag,x->recvranks[i],x->recvhdr+i,x->recvreqs+4*i,X);
255:     }
256:     x->use_status = PETSC_TRUE;
257:   } else { /* First time assembly */
258:     PetscCommBuildTwoSidedFReq(comm,3,MPIU_INT,x->nsendranks,x->sendranks,(PetscInt*)x->sendhdr,&x->nrecvranks,&x->recvranks,&x->recvhdr,4,&x->sendreqs,&x->recvreqs,VecAssemblySend_MPI_Private,VecAssemblyRecv_MPI_Private,X);
259:     x->use_status = PETSC_FALSE;
260:   }

262:   /* The first_assembly_done flag is only meaningful when x->assembly_subset is set.
263:      This line says when assembly_subset is set, then we mark that the first assembly is done.
264:    */
265:   x->first_assembly_done = x->assembly_subset;

267:   {
268:     PetscInt nstash,reallocs;
269:     VecStashGetInfo_Private(&X->stash,&nstash,&reallocs);
270:     PetscInfo2(X,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
271:     VecStashGetInfo_Private(&X->bstash,&nstash,&reallocs);
272:     PetscInfo2(X,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
273:   }
274:   return(0);
275: }

277: static PetscErrorCode VecAssemblyEnd_MPI_BTS(Vec X)
278: {
279:   Vec_MPI *x = (Vec_MPI*)X->data;
280:   PetscInt bs = X->map->bs;
281:   PetscMPIInt npending,*some_indices,r;
282:   MPI_Status  *some_statuses;
283:   PetscScalar *xarray;
285:   VecAssemblyFrame *frame;

288:   if (X->stash.donotstash) {
289:     X->stash.insertmode = NOT_SET_VALUES;
290:     X->bstash.insertmode = NOT_SET_VALUES;
291:     return(0);
292:   }

294:   if (!x->segrecvframe) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing segrecvframe! Probably you forgot to call VecAssemblyBegin first"); 
295:   VecGetArray(X,&xarray);
296:   PetscSegBufferExtractInPlace(x->segrecvframe,&frame);
297:   PetscMalloc2(4*x->nrecvranks,&some_indices,x->use_status?4*x->nrecvranks:0,&some_statuses);
298:   for (r=0,npending=0; r<x->nrecvranks; r++) npending += frame[r].pendings + frame[r].pendingb;
299:   while (npending>0) {
300:     PetscMPIInt ndone=0,ii;
301:     /* Filling MPI_Status fields requires some resources from the MPI library.  We skip it on the first assembly, or
302:      * when VEC_SUBSET_OFF_PROC_ENTRIES has not been set, because we could exchange exact sizes in the initial
303:      * rendezvous.  When the rendezvous is elided, however, we use MPI_Status to get actual message lengths, so that
304:      * subsequent assembly can set a proper subset of the values. */
305:     MPI_Waitsome(4*x->nrecvranks,x->recvreqs,&ndone,some_indices,x->use_status?some_statuses:MPI_STATUSES_IGNORE);
306:     for (ii=0; ii<ndone; ii++) {
307:       PetscInt i = some_indices[ii]/4,j,k;
308:       InsertMode imode = (InsertMode)x->recvhdr[i].insertmode;
309:       PetscInt *recvint;
310:       PetscScalar *recvscalar;
311:       PetscBool intmsg = (PetscBool)(some_indices[ii]%2 == 0);
312:       PetscBool blockmsg = (PetscBool)((some_indices[ii]%4)/2 == 1);
313:       npending--;
314:       if (!blockmsg) { /* Scalar stash */
315:         PetscMPIInt count;
316:         if (--frame[i].pendings > 0) continue;
317:         if (x->use_status) {
318:           MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
319:         } else count = x->recvhdr[i].count;
320:         for (j=0,recvint=frame[i].ints,recvscalar=frame[i].scalars; j<count; j++,recvint++) {
321:           PetscInt loc = *recvint - X->map->rstart;
322:           if (*recvint < X->map->rstart || X->map->rend <= *recvint) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Received vector entry %D out of local range [%D,%D)]",*recvint,X->map->rstart,X->map->rend);
323:           switch (imode) {
324:           case ADD_VALUES:
325:             xarray[loc] += *recvscalar++;
326:             break;
327:           case INSERT_VALUES:
328:             xarray[loc] = *recvscalar++;
329:             break;
330:           default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
331:           }
332:         }
333:       } else {                  /* Block stash */
334:         PetscMPIInt count;
335:         if (--frame[i].pendingb > 0) continue;
336:         if (x->use_status) {
337:           MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
338:           if (!intmsg) count /= bs; /* Convert from number of scalars to number of blocks */
339:         } else count = x->recvhdr[i].bcount;
340:         for (j=0,recvint=frame[i].intb,recvscalar=frame[i].scalarb; j<count; j++,recvint++) {
341:           PetscInt loc = (*recvint)*bs - X->map->rstart;
342:           switch (imode) {
343:           case ADD_VALUES:
344:             for (k=loc; k<loc+bs; k++) xarray[k] += *recvscalar++;
345:             break;
346:           case INSERT_VALUES:
347:             for (k=loc; k<loc+bs; k++) xarray[k] = *recvscalar++;
348:             break;
349:           default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
350:           }
351:         }
352:       }
353:     }
354:   }
355:   VecRestoreArray(X,&xarray);
356:   MPI_Waitall(4*x->nsendranks,x->sendreqs,MPI_STATUSES_IGNORE);
357:   PetscFree2(some_indices,some_statuses);
358:   if (x->assembly_subset) {
359:     void *dummy;                /* reset segbuffers */
360:     PetscSegBufferExtractInPlace(x->segrecvint,&dummy);
361:     PetscSegBufferExtractInPlace(x->segrecvscalar,&dummy);
362:   } else {
363:     VecAssemblyReset_MPI(X);
364:   }

366:   X->stash.insertmode = NOT_SET_VALUES;
367:   X->bstash.insertmode = NOT_SET_VALUES;
368:   VecStashScatterEnd_Private(&X->stash);
369:   VecStashScatterEnd_Private(&X->bstash);
370:   return(0);
371: }

373: PetscErrorCode VecAssemblyReset_MPI(Vec X)
374: {
375:   Vec_MPI *x = (Vec_MPI*)X->data;

379:   PetscFree(x->sendreqs);
380:   PetscFree(x->recvreqs);
381:   PetscFree(x->sendranks);
382:   PetscFree(x->recvranks);
383:   PetscFree(x->sendhdr);
384:   PetscFree(x->recvhdr);
385:   PetscFree(x->sendptrs);
386:   PetscSegBufferDestroy(&x->segrecvint);
387:   PetscSegBufferDestroy(&x->segrecvscalar);
388:   PetscSegBufferDestroy(&x->segrecvframe);
389:   return(0);
390: }


393: static PetscErrorCode VecSetFromOptions_MPI(PetscOptionItems *PetscOptionsObject,Vec X)
394: {
395: #if !defined(PETSC_HAVE_MPIUNI)
397:   PetscBool      flg = PETSC_FALSE,set;

400:   PetscOptionsHead(PetscOptionsObject,"VecMPI Options");
401:   PetscOptionsBool("-vec_assembly_legacy","Use MPI 1 version of assembly","",flg,&flg,&set);
402:   if (set) {
403:     X->ops->assemblybegin = flg ? VecAssemblyBegin_MPI : VecAssemblyBegin_MPI_BTS;
404:     X->ops->assemblyend   = flg ? VecAssemblyEnd_MPI   : VecAssemblyEnd_MPI_BTS;
405:   }
406:   PetscOptionsTail();
407: #else
408:   X->ops->assemblybegin =  VecAssemblyBegin_MPI;
409:   X->ops->assemblyend   =  VecAssemblyEnd_MPI;
410: #endif
411:   return(0);
412: }


415: static struct _VecOps DvOps = { VecDuplicate_MPI, /* 1 */
416:                                 VecDuplicateVecs_Default,
417:                                 VecDestroyVecs_Default,
418:                                 VecDot_MPI,
419:                                 VecMDot_MPI,
420:                                 VecNorm_MPI,
421:                                 VecTDot_MPI,
422:                                 VecMTDot_MPI,
423:                                 VecScale_Seq,
424:                                 VecCopy_Seq, /* 10 */
425:                                 VecSet_Seq,
426:                                 VecSwap_Seq,
427:                                 VecAXPY_Seq,
428:                                 VecAXPBY_Seq,
429:                                 VecMAXPY_Seq,
430:                                 VecAYPX_Seq,
431:                                 VecWAXPY_Seq,
432:                                 VecAXPBYPCZ_Seq,
433:                                 VecPointwiseMult_Seq,
434:                                 VecPointwiseDivide_Seq,
435:                                 VecSetValues_MPI, /* 20 */
436:                                 VecAssemblyBegin_MPI_BTS,
437:                                 VecAssemblyEnd_MPI_BTS,
438:                                 0,
439:                                 VecGetSize_MPI,
440:                                 VecGetSize_Seq,
441:                                 0,
442:                                 VecMax_MPI,
443:                                 VecMin_MPI,
444:                                 VecSetRandom_Seq,
445:                                 VecSetOption_MPI,
446:                                 VecSetValuesBlocked_MPI,
447:                                 VecDestroy_MPI,
448:                                 VecView_MPI,
449:                                 VecPlaceArray_MPI,
450:                                 VecReplaceArray_Seq,
451:                                 VecDot_Seq,
452:                                 VecTDot_Seq,
453:                                 VecNorm_Seq,
454:                                 VecMDot_Seq,
455:                                 VecMTDot_Seq,
456:                                 VecLoad_Default,
457:                                 VecReciprocal_Default,
458:                                 VecConjugate_Seq,
459:                                 0,
460:                                 0,
461:                                 VecResetArray_MPI,
462:                                 VecSetFromOptions_MPI,/*set from options */
463:                                 VecMaxPointwiseDivide_Seq,
464:                                 VecPointwiseMax_Seq,
465:                                 VecPointwiseMaxAbs_Seq,
466:                                 VecPointwiseMin_Seq,
467:                                 VecGetValues_MPI,
468:                                 0,
469:                                 0,
470:                                 0,
471:                                 0,
472:                                 0,
473:                                 0,
474:                                 VecStrideGather_Default,
475:                                 VecStrideScatter_Default,
476:                                 0,
477:                                 0,
478:                                 0,
479:                                 0,
480:                                 0,
481:                                 VecStrideSubSetGather_Default,
482:                                 VecStrideSubSetScatter_Default,
483:                                 0,
484:                                 0
485: };

487: /*
488:     VecCreate_MPI_Private - Basic create routine called by VecCreate_MPI() (i.e. VecCreateMPI()),
489:     VecCreateMPIWithArray(), VecCreate_Shared() (i.e. VecCreateShared()), VecCreateGhost(),
490:     VecDuplicate_MPI(), VecCreateGhostWithArray(), VecDuplicate_MPI(), and VecDuplicate_Shared()

492:     If alloc is true and array is NULL then this routine allocates the space, otherwise
493:     no space is allocated.
494: */
495: PetscErrorCode VecCreate_MPI_Private(Vec v,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
496: {
497:   Vec_MPI        *s;

501:   PetscNewLog(v,&s);
502:   v->data        = (void*)s;
503:   PetscMemcpy(v->ops,&DvOps,sizeof(DvOps));
504:   s->nghost      = nghost;
505:   v->petscnative = PETSC_TRUE;

507:   PetscLayoutSetUp(v->map);

509:   s->array           = (PetscScalar*)array;
510:   s->array_allocated = 0;
511:   if (alloc && !array) {
512:     PetscInt n = v->map->n+nghost;
513:     PetscCalloc1(n,&s->array);
514:     PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));
515:     s->array_allocated = s->array;
516:   }

518:   /* By default parallel vectors do not have local representation */
519:   s->localrep    = 0;
520:   s->localupdate = 0;

522:   v->stash.insertmode = NOT_SET_VALUES;
523:   v->bstash.insertmode = NOT_SET_VALUES;
524:   /* create the stashes. The block-size for bstash is set later when
525:      VecSetValuesBlocked is called.
526:   */
527:   VecStashCreate_Private(PetscObjectComm((PetscObject)v),1,&v->stash);
528:   VecStashCreate_Private(PetscObjectComm((PetscObject)v),PetscAbs(v->map->bs),&v->bstash);

530: #if defined(PETSC_HAVE_MATLAB_ENGINE)
531:   PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEnginePut_C",VecMatlabEnginePut_Default);
532:   PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEngineGet_C",VecMatlabEngineGet_Default);
533: #endif
534:   PetscObjectChangeTypeName((PetscObject)v,VECMPI);
535:   return(0);
536: }

538: /*MC
539:    VECMPI - VECMPI = "mpi" - The basic parallel vector

541:    Options Database Keys:
542: . -vec_type mpi - sets the vector type to VECMPI during a call to VecSetFromOptions()

544:   Level: beginner

546: .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateMPI()
547: M*/

549: PETSC_EXTERN PetscErrorCode VecCreate_MPI(Vec vv)
550: {

554:   VecCreate_MPI_Private(vv,PETSC_TRUE,0,0);
555:   return(0);
556: }

558: /*MC
559:    VECSTANDARD = "standard" - A VECSEQ on one process and VECMPI on more than one process

561:    Options Database Keys:
562: . -vec_type standard - sets a vector type to standard on calls to VecSetFromOptions()

564:   Level: beginner

566: .seealso: VecCreateSeq(), VecCreateMPI()
567: M*/

569: PETSC_EXTERN PetscErrorCode VecCreate_Standard(Vec v)
570: {
572:   PetscMPIInt    size;

575:   MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);
576:   if (size == 1) {
577:     VecSetType(v,VECSEQ);
578:   } else {
579:     VecSetType(v,VECMPI);
580:   }
581:   return(0);
582: }

584: /*@C
585:    VecCreateMPIWithArray - Creates a parallel, array-style vector,
586:    where the user provides the array space to store the vector values.

588:    Collective

590:    Input Parameters:
591: +  comm  - the MPI communicator to use
592: .  bs    - block size, same meaning as VecSetBlockSize()
593: .  n     - local vector length, cannot be PETSC_DECIDE
594: .  N     - global vector length (or PETSC_DECIDE to have calculated)
595: -  array - the user provided array to store the vector values

597:    Output Parameter:
598: .  vv - the vector

600:    Notes:
601:    Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
602:    same type as an existing vector.

604:    If the user-provided array is NULL, then VecPlaceArray() can be used
605:    at a later stage to SET the array for storing the vector values.

607:    PETSc does NOT free the array when the vector is destroyed via VecDestroy().
608:    The user should not free the array until the vector is destroyed.

610:    Level: intermediate

612: .seealso: VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
613:           VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()

615: @*/
616: PetscErrorCode  VecCreateMPIWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
617: {

621:   if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
622:   PetscSplitOwnership(comm,&n,&N);
623:   VecCreate(comm,vv);
624:   VecSetSizes(*vv,n,N);
625:   VecSetBlockSize(*vv,bs);
626:   VecCreate_MPI_Private(*vv,PETSC_FALSE,0,array);
627:   return(0);
628: }

630: /*@C
631:    VecCreateGhostWithArray - Creates a parallel vector with ghost padding on each processor;
632:    the caller allocates the array space.

634:    Collective

636:    Input Parameters:
637: +  comm - the MPI communicator to use
638: .  n - local vector length
639: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
640: .  nghost - number of local ghost points
641: .  ghosts - global indices of ghost points (or NULL if not needed), these do not need to be in increasing order (sorted)
642: -  array - the space to store the vector values (as long as n + nghost)

644:    Output Parameter:
645: .  vv - the global vector representation (without ghost points as part of vector)

647:    Notes:
648:    Use VecGhostGetLocalForm() to access the local, ghosted representation
649:    of the vector.

651:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

653:    Level: advanced


656: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
657:           VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
658:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()

660: @*/
661: PetscErrorCode  VecCreateGhostWithArray(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
662: {
663:   PetscErrorCode         ierr;
664:   Vec_MPI                *w;
665:   PetscScalar            *larray;
666:   IS                     from,to;
667:   ISLocalToGlobalMapping ltog;
668:   PetscInt               rstart,i,*indices;

671:   *vv = 0;

673:   if (n == PETSC_DECIDE)      SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
674:   if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
675:   if (nghost < 0)             SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
676:   PetscSplitOwnership(comm,&n,&N);
677:   /* Create global representation */
678:   VecCreate(comm,vv);
679:   VecSetSizes(*vv,n,N);
680:   VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost,array);
681:   w    = (Vec_MPI*)(*vv)->data;
682:   /* Create local representation */
683:   VecGetArray(*vv,&larray);
684:   VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
685:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
686:   VecRestoreArray(*vv,&larray);

688:   /*
689:        Create scatter context for scattering (updating) ghost values
690:   */
691:   ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
692:   ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
693:   VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
694:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
695:   ISDestroy(&to);
696:   ISDestroy(&from);

698:   /* set local to global mapping for ghosted vector */
699:   PetscMalloc1(n+nghost,&indices);
700:   VecGetOwnershipRange(*vv,&rstart,NULL);
701:   for (i=0; i<n; i++) {
702:     indices[i] = rstart + i;
703:   }
704:   for (i=0; i<nghost; i++) {
705:     indices[n+i] = ghosts[i];
706:   }
707:   ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,&ltog);
708:   VecSetLocalToGlobalMapping(*vv,ltog);
709:   ISLocalToGlobalMappingDestroy(&ltog);
710:   return(0);
711: }

713: /*@
714:    VecCreateGhost - Creates a parallel vector with ghost padding on each processor.

716:    Collective

718:    Input Parameters:
719: +  comm - the MPI communicator to use
720: .  n - local vector length
721: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
722: .  nghost - number of local ghost points
723: -  ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)

725:    Output Parameter:
726: .  vv - the global vector representation (without ghost points as part of vector)

728:    Notes:
729:    Use VecGhostGetLocalForm() to access the local, ghosted representation
730:    of the vector.

732:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

734:    Level: advanced

736: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
737:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
738:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
739:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()

741: @*/
742: PetscErrorCode  VecCreateGhost(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
743: {

747:   VecCreateGhostWithArray(comm,n,N,nghost,ghosts,0,vv);
748:   return(0);
749: }

751: /*@
752:    VecMPISetGhost - Sets the ghost points for an MPI ghost vector

754:    Collective on Vec

756:    Input Parameters:
757: +  vv - the MPI vector
758: .  nghost - number of local ghost points
759: -  ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)


762:    Notes:
763:    Use VecGhostGetLocalForm() to access the local, ghosted representation
764:    of the vector.

766:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

768:    You must call this AFTER you have set the type of the vector (with VecSetType()) and the size (with VecSetSizes()).

770:    Level: advanced

772: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
773:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
774:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
775:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray()

777: @*/
778: PetscErrorCode  VecMPISetGhost(Vec vv,PetscInt nghost,const PetscInt ghosts[])
779: {
781:   PetscBool      flg;

784:   PetscObjectTypeCompare((PetscObject)vv,VECMPI,&flg);
785:   /* if already fully existant VECMPI then basically destroy it and rebuild with ghosting */
786:   if (flg) {
787:     PetscInt               n,N;
788:     Vec_MPI                *w;
789:     PetscScalar            *larray;
790:     IS                     from,to;
791:     ISLocalToGlobalMapping ltog;
792:     PetscInt               rstart,i,*indices;
793:     MPI_Comm               comm;

795:     PetscObjectGetComm((PetscObject)vv,&comm);
796:     n    = vv->map->n;
797:     N    = vv->map->N;
798:     (*vv->ops->destroy)(vv);
799:     VecSetSizes(vv,n,N);
800:     VecCreate_MPI_Private(vv,PETSC_TRUE,nghost,NULL);
801:     w    = (Vec_MPI*)(vv)->data;
802:     /* Create local representation */
803:     VecGetArray(vv,&larray);
804:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
805:     PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localrep);
806:     VecRestoreArray(vv,&larray);

808:     /*
809:      Create scatter context for scattering (updating) ghost values
810:      */
811:     ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
812:     ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
813:     VecScatterCreate(vv,from,w->localrep,to,&w->localupdate);
814:     PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localupdate);
815:     ISDestroy(&to);
816:     ISDestroy(&from);

818:     /* set local to global mapping for ghosted vector */
819:     PetscMalloc1(n+nghost,&indices);
820:     VecGetOwnershipRange(vv,&rstart,NULL);

822:     for (i=0; i<n; i++)      indices[i]   = rstart + i;
823:     for (i=0; i<nghost; i++) indices[n+i] = ghosts[i];

825:     ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,&ltog);
826:     VecSetLocalToGlobalMapping(vv,ltog);
827:     ISLocalToGlobalMappingDestroy(&ltog);
828:   } else if (vv->ops->create == VecCreate_MPI) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set local or global size before setting ghosting");
829:   else if (!((PetscObject)vv)->type_name) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set type to VECMPI before ghosting");
830:   return(0);
831: }


834: /* ------------------------------------------------------------------------------------------*/
835: /*@C
836:    VecCreateGhostBlockWithArray - Creates a parallel vector with ghost padding on each processor;
837:    the caller allocates the array space. Indices in the ghost region are based on blocks.

839:    Collective

841:    Input Parameters:
842: +  comm - the MPI communicator to use
843: .  bs - block size
844: .  n - local vector length
845: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
846: .  nghost - number of local ghost blocks
847: .  ghosts - global indices of ghost blocks (or NULL if not needed), counts are by block not by index, these do not need to be in increasing order (sorted)
848: -  array - the space to store the vector values (as long as n + nghost*bs)

850:    Output Parameter:
851: .  vv - the global vector representation (without ghost points as part of vector)

853:    Notes:
854:    Use VecGhostGetLocalForm() to access the local, ghosted representation
855:    of the vector.

857:    n is the local vector size (total local size not the number of blocks) while nghost
858:    is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
859:    portion is bs*nghost

861:    Level: advanced


864: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
865:           VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
866:           VecCreateGhostWithArray(), VecCreateGhostBlock()

868: @*/
869: PetscErrorCode  VecCreateGhostBlockWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
870: {
871:   PetscErrorCode         ierr;
872:   Vec_MPI                *w;
873:   PetscScalar            *larray;
874:   IS                     from,to;
875:   ISLocalToGlobalMapping ltog;
876:   PetscInt               rstart,i,nb,*indices;

879:   *vv = 0;

881:   if (n == PETSC_DECIDE)      SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
882:   if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
883:   if (nghost < 0)             SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
884:   if (n % bs)                 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Local size must be a multiple of block size");
885:   PetscSplitOwnership(comm,&n,&N);
886:   /* Create global representation */
887:   VecCreate(comm,vv);
888:   VecSetSizes(*vv,n,N);
889:   VecSetBlockSize(*vv,bs);
890:   VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost*bs,array);
891:   w    = (Vec_MPI*)(*vv)->data;
892:   /* Create local representation */
893:   VecGetArray(*vv,&larray);
894:   VecCreateSeqWithArray(PETSC_COMM_SELF,bs,n+bs*nghost,larray,&w->localrep);
895:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
896:   VecRestoreArray(*vv,&larray);

898:   /*
899:        Create scatter context for scattering (updating) ghost values
900:   */
901:   ISCreateBlock(comm,bs,nghost,ghosts,PETSC_COPY_VALUES,&from);
902:   ISCreateStride(PETSC_COMM_SELF,bs*nghost,n,1,&to);
903:   VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
904:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
905:   ISDestroy(&to);
906:   ISDestroy(&from);

908:   /* set local to global mapping for ghosted vector */
909:   nb     = n/bs;
910:   PetscMalloc1(nb+nghost,&indices);
911:   VecGetOwnershipRange(*vv,&rstart,NULL);
912:   rstart = rstart/bs;

914:   for (i=0; i<nb; i++)      indices[i]    = rstart + i;
915:   for (i=0; i<nghost; i++)  indices[nb+i] = ghosts[i];

917:   ISLocalToGlobalMappingCreate(comm,bs,nb+nghost,indices,PETSC_OWN_POINTER,&ltog);
918:   VecSetLocalToGlobalMapping(*vv,ltog);
919:   ISLocalToGlobalMappingDestroy(&ltog);
920:   return(0);
921: }

923: /*@
924:    VecCreateGhostBlock - Creates a parallel vector with ghost padding on each processor.
925:         The indicing of the ghost points is done with blocks.

927:    Collective

929:    Input Parameters:
930: +  comm - the MPI communicator to use
931: .  bs - the block size
932: .  n - local vector length
933: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
934: .  nghost - number of local ghost blocks
935: -  ghosts - global indices of ghost blocks, counts are by block, not by individual index, these do not need to be in increasing order (sorted)

937:    Output Parameter:
938: .  vv - the global vector representation (without ghost points as part of vector)

940:    Notes:
941:    Use VecGhostGetLocalForm() to access the local, ghosted representation
942:    of the vector.

944:    n is the local vector size (total local size not the number of blocks) while nghost
945:    is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
946:    portion is bs*nghost

948:    Level: advanced

950: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
951:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
952:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecCreateGhostBlockWithArray()

954: @*/
955: PetscErrorCode  VecCreateGhostBlock(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
956: {

960:   VecCreateGhostBlockWithArray(comm,bs,n,N,nghost,ghosts,0,vv);
961:   return(0);
962: }