Actual source code: vecimpl.h

  2: /* 
  3:    This private file should not be included in users' code.
  4:    Defines the fields shared by all vector implementations.

  6: */

  8: #ifndef __VECIMPL_H

 11: #include <petscvec.h>

 14: /*S
 15:      PetscLayout - defines layout of vectors and matrices across processes (which rows are owned by which processes)

 17:    Level: developer


 20: .seealso:  PetscLayoutCreate(), PetscLayoutDestroy()
 21: S*/
 22: typedef struct _n_PetscLayout* PetscLayout;
 23: struct _n_PetscLayout{
 24:   MPI_Comm               comm;
 25:   PetscInt               n,N;         /* local, global vector size */
 26:   PetscInt               rstart,rend; /* local start, local end + 1 */
 27:   PetscInt               *range;      /* the offset of each processor */
 28:   PetscInt               bs;          /* number of elements in each block (generally for multi-component problems) Do NOT multiply above numbers by bs */
 29:   PetscInt               refcnt;      /* MPI Vecs obtained with VecDuplicate() and from MatGetVecs() reuse map of input object */
 30:   ISLocalToGlobalMapping mapping;     /* mapping used in Vec/MatSetValuesLocal() */
 31:   ISLocalToGlobalMapping bmapping;    /* mapping used in Vec/MatSetValuesBlockedLocal() */
 32: };

 41: PetscPolymorphicFunction(PetscLayoutGetLocalSize,(PetscLayout m),(m,&s),PetscInt,s)
 44: PetscPolymorphicFunction(PetscLayoutGetSize,(PetscLayout m),(m,&s),PetscInt,s)

 54: /*@C
 55:      PetscLayoutFindOwner - Find the owning rank for a global index

 57:     Not Collective

 59:    Input Parameters:
 60: +    map - the layout
 61: -    idx - global index to find the owner of

 63:    Output Parameter:
 64: .    owner - the owning rank

 66:    Level: developer

 68:     Fortran Notes:
 69:       Not available from Fortran

 71: @*/
 72: PETSC_STATIC_INLINE PetscErrorCode PetscLayoutFindOwner(PetscLayout map,PetscInt idx,PetscInt *owner)
 73: {
 75:   PetscMPIInt    lo = 0,hi,t;
 76:   PetscInt       bs = map->bs;

 79:   if (!((map->n >= 0) && (map->N >= 0) && (map->range))) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"PetscLayoutSetUp() must be called first");
 80:   if (idx < 0 || idx > map->N) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D is out of range",idx);
 81:   MPI_Comm_size(map->comm,&hi);
 82:   while (hi - lo > 1) {
 83:     t = lo + (hi - lo) / 2;
 84:     if (idx < map->range[t]/bs) hi = t;
 85:     else                     lo = t;
 86:   }
 87:   *owner = lo;
 88:   return(0);
 89: }

 91: /* ----------------------------------------------------------------------------*/
 92: typedef struct _n_PetscUniformSection *PetscUniformSection;
 93: struct _n_PetscUniformSection {
 94:   MPI_Comm comm;
 95:   PetscInt pStart, pEnd; /* The chart: all points are contained in [pStart, pEnd) */
 96:   PetscInt numDof;       /* Describes layout of storage, point --> (constant # of values, (p - pStart)*constant # of values) */
 97: };

 99: #if 0
100: // Should I protect these for C++?
103: #endif

105: /*S
106:   PetscSection - This is a mapping from DMMESH points to sets of values, which is
107:   our presentation of a fibre bundle.

109:   Level: developer

111: .seealso:  PetscSectionCreate(), PetscSectionDestroy()
112: S*/
113: typedef struct _n_PetscSection *PetscSection;
114: struct _n_PetscSection {
115:   struct _n_PetscUniformSection atlasLayout;  /* Layout for the atlas */
116:   PetscInt                     *atlasDof;     /* Describes layout of storage, point --> # of values */
117:   PetscInt                     *atlasOff;     /* Describes layout of storage, point --> offset into storage */
118:   PetscSection                  bc;           /* Describes constraints, point --> # local dofs which are constrained */
119:   PetscInt                     *bcIndices;    /* Local indices for constrained dofs */
120:   PetscInt                      refcnt;       /* Vecs obtained with VecDuplicate() and from MatGetVecs() reuse map of input object */
121: };



140: /* ----------------------------------------------------------------------------*/

142: typedef struct _VecOps *VecOps;
143: struct _VecOps {
144:   PetscErrorCode (*duplicate)(Vec,Vec*);         /* get single vector */
145:   PetscErrorCode (*duplicatevecs)(Vec,PetscInt,Vec**);     /* get array of vectors */
146:   PetscErrorCode (*destroyvecs)(PetscInt,Vec[]);           /* free array of vectors */
147:   PetscErrorCode (*dot)(Vec,Vec,PetscScalar*);             /* z = x^H * y */
148:   PetscErrorCode (*mdot)(Vec,PetscInt,const Vec[],PetscScalar*); /* z[j] = x dot y[j] */
149:   PetscErrorCode (*norm)(Vec,NormType,PetscReal*);        /* z = sqrt(x^H * x) */
150:   PetscErrorCode (*tdot)(Vec,Vec,PetscScalar*);             /* x'*y */
151:   PetscErrorCode (*mtdot)(Vec,PetscInt,const Vec[],PetscScalar*);/* z[j] = x dot y[j] */
152:   PetscErrorCode (*scale)(Vec,PetscScalar);                 /* x = alpha * x   */
153:   PetscErrorCode (*copy)(Vec,Vec);                     /* y = x */
154:   PetscErrorCode (*set)(Vec,PetscScalar);                        /* y = alpha  */
155:   PetscErrorCode (*swap)(Vec,Vec);                               /* exchange x and y */
156:   PetscErrorCode (*axpy)(Vec,PetscScalar,Vec);                   /* y = y + alpha * x */
157:   PetscErrorCode (*axpby)(Vec,PetscScalar,PetscScalar,Vec);      /* y = alpha * x + beta * y*/
158:   PetscErrorCode (*maxpy)(Vec,PetscInt,const PetscScalar*,Vec*); /* y = y + alpha[j] x[j] */
159:   PetscErrorCode (*aypx)(Vec,PetscScalar,Vec);                   /* y = x + alpha * y */
160:   PetscErrorCode (*waxpy)(Vec,PetscScalar,Vec,Vec);         /* w = y + alpha * x */
161:   PetscErrorCode (*axpbypcz)(Vec,PetscScalar,PetscScalar,PetscScalar,Vec,Vec);   /* z = alpha * x + beta *y + gamma *z*/
162:   PetscErrorCode (*pointwisemult)(Vec,Vec,Vec);        /* w = x .* y */
163:   PetscErrorCode (*pointwisedivide)(Vec,Vec,Vec);      /* w = x ./ y */
164:   PetscErrorCode (*setvalues)(Vec,PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
165:   PetscErrorCode (*assemblybegin)(Vec);                /* start global assembly */
166:   PetscErrorCode (*assemblyend)(Vec);                  /* end global assembly */
167:   PetscErrorCode (*getarray)(Vec,PetscScalar**);            /* get data array */
168:   PetscErrorCode (*getsize)(Vec,PetscInt*);
169:   PetscErrorCode (*getlocalsize)(Vec,PetscInt*);
170:   PetscErrorCode (*restorearray)(Vec,PetscScalar**);        /* restore data array */
171:   PetscErrorCode (*max)(Vec,PetscInt*,PetscReal*);      /* z = max(x); idx=index of max(x) */
172:   PetscErrorCode (*min)(Vec,PetscInt*,PetscReal*);      /* z = min(x); idx=index of min(x) */
173:   PetscErrorCode (*setrandom)(Vec,PetscRandom);         /* set y[j] = random numbers */
174:   PetscErrorCode (*setoption)(Vec,VecOption,PetscBool );
175:   PetscErrorCode (*setvaluesblocked)(Vec,PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
176:   PetscErrorCode (*destroy)(Vec);
177:   PetscErrorCode (*view)(Vec,PetscViewer);
178:   PetscErrorCode (*placearray)(Vec,const PetscScalar*);     /* place data array */
179:   PetscErrorCode (*replacearray)(Vec,const PetscScalar*);     /* replace data array */
180:   PetscErrorCode (*dot_local)(Vec,Vec,PetscScalar*);
181:   PetscErrorCode (*tdot_local)(Vec,Vec,PetscScalar*);
182:   PetscErrorCode (*norm_local)(Vec,NormType,PetscReal*);
183:   PetscErrorCode (*mdot_local)(Vec,PetscInt,const Vec[],PetscScalar*);
184:   PetscErrorCode (*mtdot_local)(Vec,PetscInt,const Vec[],PetscScalar*);
185:   PetscErrorCode (*load)(Vec,PetscViewer);
186:   PetscErrorCode (*reciprocal)(Vec);
187:   PetscErrorCode (*conjugate)(Vec);
188:   PetscErrorCode (*setlocaltoglobalmapping)(Vec,ISLocalToGlobalMapping);
189:   PetscErrorCode (*setvalueslocal)(Vec,PetscInt,const PetscInt *,const PetscScalar *,InsertMode);
190:   PetscErrorCode (*resetarray)(Vec);      /* vector points to its original array, i.e. undoes any VecPlaceArray() */
191:   PetscErrorCode (*setfromoptions)(Vec);
192:   PetscErrorCode (*maxpointwisedivide)(Vec,Vec,PetscReal*);      /* m = max abs(x ./ y) */
193:   PetscErrorCode (*pointwisemax)(Vec,Vec,Vec);
194:   PetscErrorCode (*pointwisemaxabs)(Vec,Vec,Vec);
195:   PetscErrorCode (*pointwisemin)(Vec,Vec,Vec);
196:   PetscErrorCode (*getvalues)(Vec,PetscInt,const PetscInt[],PetscScalar[]);
197:   PetscErrorCode (*sqrt)(Vec);
198:   PetscErrorCode (*abs)(Vec);
199:   PetscErrorCode (*exp)(Vec);
200:   PetscErrorCode (*log)(Vec);
201:   PetscErrorCode (*shift)(Vec);
202:   PetscErrorCode (*create)(Vec);
203:   PetscErrorCode (*stridegather)(Vec,PetscInt,Vec,InsertMode);
204:   PetscErrorCode (*stridescatter)(Vec,PetscInt,Vec,InsertMode);
205:   PetscErrorCode (*dotnorm2)(Vec,Vec,PetscScalar*,PetscScalar*);
206:   PetscErrorCode (*getsubvector)(Vec,IS,Vec*);
207:   PetscErrorCode (*restoresubvector)(Vec,IS,Vec*);
208: };

210: /* 
211:     The stash is used to temporarily store inserted vec values that 
212:   belong to another processor. During the assembly phase the stashed 
213:   values are moved to the correct processor and 
214: */

216: typedef struct {
217:   PetscInt      nmax;                   /* maximum stash size */
218:   PetscInt      umax;                   /* max stash size user wants */
219:   PetscInt      oldnmax;                /* the nmax value used previously */
220:   PetscInt      n;                      /* stash size */
221:   PetscInt      bs;                     /* block size of the stash */
222:   PetscInt      reallocs;               /* preserve the no of mallocs invoked */
223:   PetscInt      *idx;                   /* global row numbers in stash */
224:   PetscScalar   *array;                 /* array to hold stashed values */
225:   /* The following variables are used for communication */
226:   MPI_Comm      comm;
227:   PetscMPIInt   size,rank;
228:   PetscMPIInt   tag1,tag2;
229:   MPI_Request   *send_waits;            /* array of send requests */
230:   MPI_Request   *recv_waits;            /* array of receive requests */
231:   MPI_Status    *send_status;           /* array of send status */
232:   PetscInt      nsends,nrecvs;          /* numbers of sends and receives */
233:   PetscScalar   *svalues,*rvalues;      /* sending and receiving data */
234:   PetscInt      *sindices,*rindices;
235:   PetscInt      rmax;                   /* maximum message length */
236:   PetscInt      *nprocs;                /* tmp data used both during scatterbegin and end */
237:   PetscInt      nprocessed;             /* number of messages already processed */
238:   PetscBool     donotstash;
239:   PetscBool     ignorenegidx;           /* ignore negative indices passed into VecSetValues/VetGetValues */
240:   InsertMode    insertmode;
241:   PetscInt      *bowners;
242: } VecStash;

244: #if defined(PETSC_HAVE_CUSP)
245: /* Defines the flag structure that the CUSP arch uses. */
246: typedef enum {PETSC_CUSP_UNALLOCATED,PETSC_CUSP_GPU,PETSC_CUSP_CPU,PETSC_CUSP_BOTH} PetscCUSPFlag;
247: #endif

249: struct _p_Vec {
250:   PETSCHEADER(struct _VecOps);
251:   PetscLayout            map;
252:   void                   *data;     /* implementation-specific data */
253:   PetscBool              array_gotten;
254:   VecStash               stash,bstash; /* used for storing off-proc values during assembly */
255:   PetscBool              petscnative;  /* means the ->data starts with VECHEADER and can use VecGetArrayFast()*/
256: #if defined(PETSC_HAVE_CUSP)
257:   PetscCUSPFlag          valid_GPU_array;    /* indicates where the most recently modified vector data is (GPU or CPU) */
258:   void                   *spptr; /* if we're using CUSP, then this is the special pointer to the array on the GPU */
259: #endif
260: };


270: #if defined(PETSC_HAVE_CUSP)
272: #endif

276: PETSC_STATIC_INLINE PetscErrorCode VecGetArrayRead(Vec x,const PetscScalar *a[])
277: {

281:   if (x->petscnative){
282: #if defined(PETSC_HAVE_CUSP)
283:     if (x->valid_GPU_array == PETSC_CUSP_GPU || !*((PetscScalar**)x->data)){
284:       VecCUSPCopyFromGPU(x);
285:     }
286: #endif
287:     *a = *((PetscScalar **)x->data);
288:   } else {
289:     (*x->ops->getarray)(x,(PetscScalar**)a);
290:   }
291:   return(0);
292: }

296: PETSC_STATIC_INLINE PetscErrorCode VecRestoreArrayRead(Vec x,const PetscScalar *a[])
297: {

301:   if (x->petscnative){
302: #if defined(PETSC_HAVE_CUSP)
303:     if (x->valid_GPU_array != PETSC_CUSP_UNALLOCATED) {
304:       x->valid_GPU_array = PETSC_CUSP_BOTH;
305:     }
306: #endif
307:   } else {
308:     (*x->ops->restorearray)(x,(PetscScalar**)a);
309:   }
310:   return(0);
311: }

315: PETSC_STATIC_INLINE PetscErrorCode VecGetArray(Vec x,PetscScalar *a[])
316: {

320:   if (x->petscnative){
321: #if defined(PETSC_HAVE_CUSP)
322:     if (x->valid_GPU_array == PETSC_CUSP_GPU || !*((PetscScalar**)x->data)){
323:       VecCUSPCopyFromGPU(x);
324:     }
325: #endif
326:     *a = *((PetscScalar **)x->data);
327:   } else {
328:     (*x->ops->getarray)(x,a);
329:   }
330:   return(0);
331: }

335: PETSC_STATIC_INLINE PetscErrorCode VecRestoreArray(Vec x,PetscScalar *a[])
336: {

340:   if (x->petscnative){
341: #if defined(PETSC_HAVE_CUSP)
342:     if (x->valid_GPU_array != PETSC_CUSP_UNALLOCATED) {
343:       x->valid_GPU_array = PETSC_CUSP_CPU;
344:     }
345: #endif
346:   } else {
347:     (*x->ops->restorearray)(x,a);
348:   }
349:   PetscObjectStateIncrease((PetscObject)x);
350:   return(0);
351: }


354: /*
355:      Common header shared by array based vectors, 
356:    currently Vec_Seq and Vec_MPI
357: */
358: #define VECHEADER                          \
359:   PetscScalar *array;                      \
360:   PetscScalar *array_allocated;                        /* if the array was allocated by PETSc this is its pointer */  \
361:   PetscScalar *unplacedarray;                           /* if one called VecPlaceArray(), this is where it stashed the original */

363: /* Default obtain and release vectors; can be used by any implementation */


371: /* --------------------------------------------------------------------*/
372: /*                                                                     */
373: /* Defines the data structures used in the Vec Scatter operations      */

375: typedef enum { VEC_SCATTER_SEQ_GENERAL,VEC_SCATTER_SEQ_STRIDE,
376:                VEC_SCATTER_MPI_GENERAL,VEC_SCATTER_MPI_TOALL,
377:                VEC_SCATTER_MPI_TOONE} VecScatterType;

379: /* 
380:    These scatters are for the purely local case.
381: */
382: typedef struct {
383:   VecScatterType type;
384:   PetscInt       n;                    /* number of components to scatter */
385:   PetscInt       *vslots;              /* locations of components */
386:   /*
387:        The next three fields are used in parallel scatters, they contain 
388:        optimization in the special case that the "to" vector and the "from" 
389:        vector are the same, so one only needs copy components that truly 
390:        copies instead of just y[idx[i]] = y[jdx[i]] where idx[i] == jdx[i].
391:   */
392:   PetscBool      nonmatching_computed;
393:   PetscInt       n_nonmatching;        /* number of "from"s  != "to"s */
394:   PetscInt       *slots_nonmatching;   /* locations of "from"s  != "to"s */
395:   PetscBool      is_copy;
396:   PetscInt       copy_start;   /* local scatter is a copy starting at copy_start */
397:   PetscInt       copy_length;
398: } VecScatter_Seq_General;

400: typedef struct {
401:   VecScatterType type;
402:   PetscInt       n;
403:   PetscInt       first;
404:   PetscInt       step;
405: } VecScatter_Seq_Stride;

407: /*
408:    This scatter is for a global vector copied (completely) to each processor (or all to one)
409: */
410: typedef struct {
411:   VecScatterType type;
412:   PetscMPIInt    *count;        /* elements of vector on each processor */
413:   PetscMPIInt    *displx;
414:   PetscScalar    *work1;
415:   PetscScalar    *work2;
416: } VecScatter_MPI_ToAll;

418: /*
419:    This is the general parallel scatter
420: */
421: typedef struct {
422:   VecScatterType         type;
423:   PetscInt               n;        /* number of processors to send/receive */
424:   PetscInt               *starts;  /* starting point in indices and values for each proc*/
425:   PetscInt               *indices; /* list of all components sent or received */
426:   PetscMPIInt            *procs;   /* processors we are communicating with in scatter */
427:   MPI_Request            *requests,*rev_requests;
428:   PetscScalar            *values;  /* buffer for all sends or receives */
429:   VecScatter_Seq_General local;    /* any part that happens to be local */
430:   MPI_Status             *sstatus,*rstatus;
431:   PetscBool              use_readyreceiver;
432:   PetscInt               bs;
433:   PetscBool              sendfirst;
434:   PetscBool              contiq;
435:   /* for MPI_Alltoallv() approach */
436:   PetscBool              use_alltoallv;
437:   PetscMPIInt            *counts,*displs;
438:   /* for MPI_Alltoallw() approach */
439:   PetscBool              use_alltoallw;
440: #if defined(PETSC_HAVE_MPI_ALLTOALLW)
441:   PetscMPIInt            *wcounts,*wdispls;
442:   MPI_Datatype           *types;
443: #endif
444:   PetscBool              use_window;
445: #if defined(PETSC_HAVE_MPI_WIN_CREATE)
446:   MPI_Win                window;
447:   PetscInt               *winstarts;    /* displacements in the processes I am putting to */
448: #endif
449: } VecScatter_MPI_General;

451: struct _p_VecScatter {
452:   PETSCHEADER(int);
453:   PetscInt       to_n,from_n;
454:   PetscBool      inuse;                /* prevents corruption from mixing two scatters */
455:   PetscBool      beginandendtogether;  /* indicates that the scatter begin and end  function are called together, VecScatterEnd()
456:                                           is then treated as a nop */
457:   PetscBool      packtogether;         /* packs all the messages before sending, same with receive */
458:   PetscBool      reproduce;            /* always receive the ghost points in the same order of processes */
459:   PetscErrorCode (*begin)(VecScatter,Vec,Vec,InsertMode,ScatterMode);
460:   PetscErrorCode (*end)(VecScatter,Vec,Vec,InsertMode,ScatterMode);
461:   PetscErrorCode (*copy)(VecScatter,VecScatter);
462:   PetscErrorCode (*destroy)(VecScatter);
463:   PetscErrorCode (*view)(VecScatter,PetscViewer);
464:   void           *fromdata,*todata;
465:   void           *spptr;
466: };


477: /*
478:   VecStashValue_Private - inserts a single value into the stash.

480:   Input Parameters:
481:   stash  - the stash
482:   idx    - the global of the inserted value
483:   values - the value inserted
484: */
485: PETSC_STATIC_INLINE PetscErrorCode VecStashValue_Private(VecStash *stash,PetscInt row,PetscScalar value)
486: {
488:   /* Check and see if we have sufficient memory */
489:   if (((stash)->n + 1) > (stash)->nmax) {
490:     VecStashExpand_Private(stash,1);
491:   }
492:   (stash)->idx[(stash)->n]   = row;
493:   (stash)->array[(stash)->n] = value;
494:   (stash)->n++;
495:   return 0;
496: }

498: /*
499:   VecStashValuesBlocked_Private - inserts 1 block of values into the stash. 

501:   Input Parameters:
502:   stash  - the stash
503:   idx    - the global block index
504:   values - the values inserted
505: */
506: PETSC_STATIC_INLINE PetscErrorCode VecStashValuesBlocked_Private(VecStash *stash,PetscInt row,PetscScalar *values)
507: {
508:   PetscInt       jj,stash_bs=(stash)->bs;
509:   PetscScalar    *array;
511:   if (((stash)->n+1) > (stash)->nmax) {
512:     VecStashExpand_Private(stash,1);
513:   }
514:   array = (stash)->array + stash_bs*(stash)->n;
515:   (stash)->idx[(stash)->n]   = row;
516:   for (jj=0; jj<stash_bs; jj++) { array[jj] = values[jj];}
517:   (stash)->n++;
518:   return 0;
519: }


525: #if defined(PETSC_HAVE_MATLAB_ENGINE)
530: #endif


534: /* Reset __FUNCT__ in case the user does not define it themselves */

538: #endif