Actual source code: pbvec.c
1: /*
2: This file contains routines for Parallel vector operations.
3: */
4: #include <petscsys.h>
5: #include <../src/vec/vec/impls/mpi/pvecimpl.h>
7: PETSC_INTERN PetscErrorCode VecView_MPI_Draw(Vec, PetscViewer);
9: PetscErrorCode VecPlaceArray_MPI(Vec vin, const PetscScalar *a)
10: {
11: Vec_MPI *v = (Vec_MPI *)vin->data;
13: PetscFunctionBegin;
14: PetscCheck(!v->unplacedarray, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "VecPlaceArray() was already called on this vector, without a call to VecResetArray()");
15: v->unplacedarray = v->array; /* save previous array so reset can bring it back */
16: v->array = (PetscScalar *)a;
17: if (v->localrep) PetscCall(VecPlaceArray(v->localrep, a));
18: PetscFunctionReturn(PETSC_SUCCESS);
19: }
21: PetscErrorCode VecDuplicate_MPI(Vec win, Vec *v)
22: {
23: Vec_MPI *vw, *w = (Vec_MPI *)win->data;
24: PetscScalar *array;
26: PetscFunctionBegin;
27: PetscCall(VecCreateWithLayout_Private(win->map, v));
29: PetscCall(VecCreate_MPI_Private(*v, PETSC_TRUE, w->nghost, NULL));
30: vw = (Vec_MPI *)(*v)->data;
31: (*v)->ops[0] = win->ops[0];
33: /* save local representation of the parallel vector (and scatter) if it exists */
34: if (w->localrep) {
35: PetscCall(VecGetArray(*v, &array));
36: PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, PetscAbs(win->map->bs), win->map->n + w->nghost, array, &vw->localrep));
37: vw->localrep->ops[0] = w->localrep->ops[0];
38: PetscCall(VecRestoreArray(*v, &array));
40: vw->localupdate = w->localupdate;
41: if (vw->localupdate) PetscCall(PetscObjectReference((PetscObject)vw->localupdate));
43: vw->ghost = w->ghost;
44: if (vw->ghost) PetscCall(PetscObjectReference((PetscObject)vw->ghost));
45: }
47: /* New vector should inherit stashing property of parent */
48: (*v)->stash.donotstash = win->stash.donotstash;
49: (*v)->stash.ignorenegidx = win->stash.ignorenegidx;
51: PetscCall(PetscObjectListDuplicate(((PetscObject)win)->olist, &((PetscObject)*v)->olist));
52: PetscCall(PetscFunctionListDuplicate(((PetscObject)win)->qlist, &((PetscObject)*v)->qlist));
54: (*v)->bstash.bs = win->bstash.bs;
55: PetscFunctionReturn(PETSC_SUCCESS);
56: }
58: static PetscErrorCode VecDuplicateVecs_MPI_GEMV(Vec w, PetscInt m, Vec *V[])
59: {
60: Vec_MPI *wmpi = (Vec_MPI *)w->data;
62: PetscFunctionBegin;
63: // Currently only do GEMV for vectors without ghosts. Note w might be a VECMPI subclass object.
64: // This routine relies on the duplicate operation being VecDuplicate_MPI. If not, bail out to the default.
65: if (wmpi->nghost || w->ops->duplicate != VecDuplicate_MPI) {
66: w->ops->duplicatevecs = VecDuplicateVecs_Default;
67: PetscCall(VecDuplicateVecs(w, m, V));
68: } else {
69: PetscScalar *array;
70: PetscInt64 lda; // use 64-bit as we will do "m * lda"
72: PetscCall(PetscMalloc1(m, V));
73: VecGetLocalSizeAligned(w, 64, &lda); // get in lda the 64-bytes aligned local size
75: PetscCall(PetscCalloc1(m * lda, &array));
76: for (PetscInt i = 0; i < m; i++) {
77: Vec v;
78: PetscCall(VecCreateMPIWithLayoutAndArray_Private(w->map, PetscSafePointerPlusOffset(array, i * lda), &v));
79: PetscCall(PetscObjectListDuplicate(((PetscObject)w)->olist, &((PetscObject)v)->olist));
80: PetscCall(PetscFunctionListDuplicate(((PetscObject)w)->qlist, &((PetscObject)v)->qlist));
81: v->ops->view = w->ops->view;
82: v->stash.donotstash = w->stash.donotstash;
83: v->stash.ignorenegidx = w->stash.ignorenegidx;
84: v->stash.bs = w->stash.bs;
85: (*V)[i] = v;
86: }
87: // So when the first vector is destroyed it will destroy the array
88: if (m) ((Vec_MPI *)(*V)[0]->data)->array_allocated = array;
89: // disable replacearray of the first vector, as freeing its memory also frees others in the group.
90: // But replacearray of others is ok, as they don't own their array.
91: if (m > 1) (*V)[0]->ops->replacearray = VecReplaceArray_Default_GEMV_Error;
92: }
93: PetscFunctionReturn(PETSC_SUCCESS);
94: }
96: static PetscErrorCode VecSetOption_MPI(Vec V, VecOption op, PetscBool flag)
97: {
98: Vec_MPI *v = (Vec_MPI *)V->data;
100: PetscFunctionBegin;
101: switch (op) {
102: case VEC_IGNORE_OFF_PROC_ENTRIES:
103: V->stash.donotstash = flag;
104: break;
105: case VEC_IGNORE_NEGATIVE_INDICES:
106: V->stash.ignorenegidx = flag;
107: break;
108: case VEC_SUBSET_OFF_PROC_ENTRIES:
109: v->assembly_subset = flag; /* See the same logic in MatAssembly wrt MAT_SUBSET_OFF_PROC_ENTRIES */
110: if (!v->assembly_subset) { /* User indicates "do not reuse the communication pattern" */
111: PetscCall(VecAssemblyReset_MPI(V)); /* Reset existing pattern to free memory */
112: v->first_assembly_done = PETSC_FALSE; /* Mark the first assembly is not done */
113: }
114: break;
115: }
116: PetscFunctionReturn(PETSC_SUCCESS);
117: }
119: PetscErrorCode VecResetArray_MPI(Vec vin)
120: {
121: Vec_MPI *v = (Vec_MPI *)vin->data;
123: PetscFunctionBegin;
124: v->array = v->unplacedarray;
125: v->unplacedarray = NULL;
126: if (v->localrep) PetscCall(VecResetArray(v->localrep));
127: PetscFunctionReturn(PETSC_SUCCESS);
128: }
130: static PetscErrorCode VecAssemblySend_MPI_Private(MPI_Comm comm, const PetscMPIInt tag[], PetscMPIInt rankid, PetscMPIInt rank, void *sdata, MPI_Request req[], void *ctx)
131: {
132: Vec X = (Vec)ctx;
133: Vec_MPI *x = (Vec_MPI *)X->data;
134: VecAssemblyHeader *hdr = (VecAssemblyHeader *)sdata;
135: PetscInt bs = X->map->bs;
137: PetscFunctionBegin;
138: /* x->first_assembly_done indicates we are reusing a communication network. In that case, some
139: messages can be empty, but we have to send them this time if we sent them before because the
140: receiver is expecting them.
141: */
142: if (hdr->count || (x->first_assembly_done && x->sendptrs[rankid].ints)) {
143: PetscCallMPI(MPI_Isend(x->sendptrs[rankid].ints, hdr->count, MPIU_INT, rank, tag[0], comm, &req[0]));
144: PetscCallMPI(MPI_Isend(x->sendptrs[rankid].scalars, hdr->count, MPIU_SCALAR, rank, tag[1], comm, &req[1]));
145: }
146: if (hdr->bcount || (x->first_assembly_done && x->sendptrs[rankid].intb)) {
147: PetscCallMPI(MPI_Isend(x->sendptrs[rankid].intb, hdr->bcount, MPIU_INT, rank, tag[2], comm, &req[2]));
148: PetscCallMPI(MPI_Isend(x->sendptrs[rankid].scalarb, hdr->bcount * bs, MPIU_SCALAR, rank, tag[3], comm, &req[3]));
149: }
150: PetscFunctionReturn(PETSC_SUCCESS);
151: }
153: static PetscErrorCode VecAssemblyRecv_MPI_Private(MPI_Comm comm, const PetscMPIInt tag[], PetscMPIInt rank, void *rdata, MPI_Request req[], void *ctx)
154: {
155: Vec X = (Vec)ctx;
156: Vec_MPI *x = (Vec_MPI *)X->data;
157: VecAssemblyHeader *hdr = (VecAssemblyHeader *)rdata;
158: PetscInt bs = X->map->bs;
159: VecAssemblyFrame *frame;
161: PetscFunctionBegin;
162: PetscCall(PetscSegBufferGet(x->segrecvframe, 1, &frame));
164: if (hdr->count) {
165: PetscCall(PetscSegBufferGet(x->segrecvint, hdr->count, &frame->ints));
166: PetscCallMPI(MPI_Irecv(frame->ints, hdr->count, MPIU_INT, rank, tag[0], comm, &req[0]));
167: PetscCall(PetscSegBufferGet(x->segrecvscalar, hdr->count, &frame->scalars));
168: PetscCallMPI(MPI_Irecv(frame->scalars, hdr->count, MPIU_SCALAR, rank, tag[1], comm, &req[1]));
169: frame->pendings = 2;
170: } else {
171: frame->ints = NULL;
172: frame->scalars = NULL;
173: frame->pendings = 0;
174: }
176: if (hdr->bcount) {
177: PetscCall(PetscSegBufferGet(x->segrecvint, hdr->bcount, &frame->intb));
178: PetscCallMPI(MPI_Irecv(frame->intb, hdr->bcount, MPIU_INT, rank, tag[2], comm, &req[2]));
179: PetscCall(PetscSegBufferGet(x->segrecvscalar, hdr->bcount * bs, &frame->scalarb));
180: PetscCallMPI(MPI_Irecv(frame->scalarb, hdr->bcount * bs, MPIU_SCALAR, rank, tag[3], comm, &req[3]));
181: frame->pendingb = 2;
182: } else {
183: frame->intb = NULL;
184: frame->scalarb = NULL;
185: frame->pendingb = 0;
186: }
187: PetscFunctionReturn(PETSC_SUCCESS);
188: }
190: static PetscErrorCode VecAssemblyBegin_MPI_BTS(Vec X)
191: {
192: Vec_MPI *x = (Vec_MPI *)X->data;
193: MPI_Comm comm;
194: PetscInt i, j, jb, bs;
196: PetscFunctionBegin;
197: if (X->stash.donotstash) PetscFunctionReturn(PETSC_SUCCESS);
199: PetscCall(PetscObjectGetComm((PetscObject)X, &comm));
200: PetscCall(VecGetBlockSize(X, &bs));
201: if (PetscDefined(USE_DEBUG)) {
202: InsertMode addv;
203: PetscCall(MPIU_Allreduce((PetscEnum *)&X->stash.insertmode, (PetscEnum *)&addv, 1, MPIU_ENUM, MPI_BOR, comm));
204: PetscCheck(addv != (ADD_VALUES | INSERT_VALUES), comm, PETSC_ERR_ARG_NOTSAMETYPE, "Some processors inserted values while others added");
205: }
206: X->bstash.insertmode = X->stash.insertmode; /* Block stash implicitly tracks InsertMode of scalar stash */
208: PetscCall(VecStashSortCompress_Private(&X->stash));
209: PetscCall(VecStashSortCompress_Private(&X->bstash));
211: if (!x->sendranks) {
212: PetscMPIInt nowners, bnowners, *owners, *bowners;
213: PetscInt ntmp;
214: PetscCall(VecStashGetOwnerList_Private(&X->stash, X->map, &nowners, &owners));
215: PetscCall(VecStashGetOwnerList_Private(&X->bstash, X->map, &bnowners, &bowners));
216: PetscCall(PetscMergeMPIIntArray(nowners, owners, bnowners, bowners, &ntmp, &x->sendranks));
217: x->nsendranks = ntmp;
218: PetscCall(PetscFree(owners));
219: PetscCall(PetscFree(bowners));
220: PetscCall(PetscMalloc1(x->nsendranks, &x->sendhdr));
221: PetscCall(PetscCalloc1(x->nsendranks, &x->sendptrs));
222: }
223: for (i = 0, j = 0, jb = 0; i < x->nsendranks; i++) {
224: PetscMPIInt rank = x->sendranks[i];
225: x->sendhdr[i].insertmode = X->stash.insertmode;
226: /* Initialize pointers for non-empty stashes the first time around. Subsequent assemblies with
227: * VEC_SUBSET_OFF_PROC_ENTRIES will leave the old pointers (dangling because the stash has been collected) when
228: * there is nothing new to send, so that size-zero messages get sent instead. */
229: x->sendhdr[i].count = 0;
230: if (X->stash.n) {
231: x->sendptrs[i].ints = &X->stash.idx[j];
232: x->sendptrs[i].scalars = &X->stash.array[j];
233: for (; j < X->stash.n && X->stash.idx[j] < X->map->range[rank + 1]; j++) x->sendhdr[i].count++;
234: }
235: x->sendhdr[i].bcount = 0;
236: if (X->bstash.n) {
237: x->sendptrs[i].intb = &X->bstash.idx[jb];
238: x->sendptrs[i].scalarb = &X->bstash.array[jb * bs];
239: for (; jb < X->bstash.n && X->bstash.idx[jb] * bs < X->map->range[rank + 1]; jb++) x->sendhdr[i].bcount++;
240: }
241: }
243: if (!x->segrecvint) PetscCall(PetscSegBufferCreate(sizeof(PetscInt), 1000, &x->segrecvint));
244: if (!x->segrecvscalar) PetscCall(PetscSegBufferCreate(sizeof(PetscScalar), 1000, &x->segrecvscalar));
245: if (!x->segrecvframe) PetscCall(PetscSegBufferCreate(sizeof(VecAssemblyFrame), 50, &x->segrecvframe));
246: if (x->first_assembly_done) { /* this is not the first assembly */
247: PetscMPIInt tag[4];
248: for (i = 0; i < 4; i++) PetscCall(PetscCommGetNewTag(comm, &tag[i]));
249: for (i = 0; i < x->nsendranks; i++) PetscCall(VecAssemblySend_MPI_Private(comm, tag, i, x->sendranks[i], x->sendhdr + i, x->sendreqs + 4 * i, X));
250: for (i = 0; i < x->nrecvranks; i++) PetscCall(VecAssemblyRecv_MPI_Private(comm, tag, x->recvranks[i], x->recvhdr + i, x->recvreqs + 4 * i, X));
251: x->use_status = PETSC_TRUE;
252: } else { /* First time assembly */
253: PetscCall(PetscCommBuildTwoSidedFReq(comm, 3, MPIU_INT, x->nsendranks, x->sendranks, (PetscInt *)x->sendhdr, &x->nrecvranks, &x->recvranks, &x->recvhdr, 4, &x->sendreqs, &x->recvreqs, VecAssemblySend_MPI_Private, VecAssemblyRecv_MPI_Private, X));
254: x->use_status = PETSC_FALSE;
255: }
257: /* The first_assembly_done flag is only meaningful when x->assembly_subset is set.
258: This line says when assembly_subset is set, then we mark that the first assembly is done.
259: */
260: x->first_assembly_done = x->assembly_subset;
262: {
263: PetscInt nstash, reallocs;
264: PetscCall(VecStashGetInfo_Private(&X->stash, &nstash, &reallocs));
265: PetscCall(PetscInfo(X, "Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs));
266: PetscCall(VecStashGetInfo_Private(&X->bstash, &nstash, &reallocs));
267: PetscCall(PetscInfo(X, "Block-Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs));
268: }
269: PetscFunctionReturn(PETSC_SUCCESS);
270: }
272: static PetscErrorCode VecAssemblyEnd_MPI_BTS(Vec X)
273: {
274: Vec_MPI *x = (Vec_MPI *)X->data;
275: PetscInt bs = X->map->bs;
276: PetscMPIInt npending, *some_indices, r;
277: MPI_Status *some_statuses;
278: PetscScalar *xarray;
279: VecAssemblyFrame *frame;
281: PetscFunctionBegin;
282: if (X->stash.donotstash) {
283: X->stash.insertmode = NOT_SET_VALUES;
284: X->bstash.insertmode = NOT_SET_VALUES;
285: PetscFunctionReturn(PETSC_SUCCESS);
286: }
288: PetscCheck(x->segrecvframe, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing segrecvframe! Probably you forgot to call VecAssemblyBegin first");
289: PetscCall(VecGetArray(X, &xarray));
290: PetscCall(PetscSegBufferExtractInPlace(x->segrecvframe, &frame));
291: PetscCall(PetscMalloc2(4 * x->nrecvranks, &some_indices, x->use_status ? 4 * x->nrecvranks : 0, &some_statuses));
292: for (r = 0, npending = 0; r < x->nrecvranks; r++) npending += frame[r].pendings + frame[r].pendingb;
293: while (npending > 0) {
294: PetscMPIInt ndone = 0, ii;
295: /* Filling MPI_Status fields requires some resources from the MPI library. We skip it on the first assembly, or
296: * when VEC_SUBSET_OFF_PROC_ENTRIES has not been set, because we could exchange exact sizes in the initial
297: * rendezvous. When the rendezvous is elided, however, we use MPI_Status to get actual message lengths, so that
298: * subsequent assembly can set a proper subset of the values. */
299: PetscCallMPI(MPI_Waitsome(4 * x->nrecvranks, x->recvreqs, &ndone, some_indices, x->use_status ? some_statuses : MPI_STATUSES_IGNORE));
300: for (ii = 0; ii < ndone; ii++) {
301: PetscInt i = some_indices[ii] / 4, j, k;
302: InsertMode imode = (InsertMode)x->recvhdr[i].insertmode;
303: PetscInt *recvint;
304: PetscScalar *recvscalar;
305: PetscBool intmsg = (PetscBool)(some_indices[ii] % 2 == 0);
306: PetscBool blockmsg = (PetscBool)((some_indices[ii] % 4) / 2 == 1);
307: npending--;
308: if (!blockmsg) { /* Scalar stash */
309: PetscMPIInt count;
310: if (--frame[i].pendings > 0) continue;
311: if (x->use_status) {
312: PetscCallMPI(MPI_Get_count(&some_statuses[ii], intmsg ? MPIU_INT : MPIU_SCALAR, &count));
313: } else count = x->recvhdr[i].count;
314: for (j = 0, recvint = frame[i].ints, recvscalar = frame[i].scalars; j < count; j++, recvint++) {
315: PetscInt loc = *recvint - X->map->rstart;
316: PetscCheck(*recvint >= X->map->rstart && X->map->rend > *recvint, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Received vector entry %" PetscInt_FMT " out of local range [%" PetscInt_FMT ",%" PetscInt_FMT ")]", *recvint, X->map->rstart, X->map->rend);
317: switch (imode) {
318: case ADD_VALUES:
319: xarray[loc] += *recvscalar++;
320: break;
321: case INSERT_VALUES:
322: xarray[loc] = *recvscalar++;
323: break;
324: default:
325: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Insert mode not supported 0x%x", imode);
326: }
327: }
328: } else { /* Block stash */
329: PetscMPIInt count;
330: if (--frame[i].pendingb > 0) continue;
331: if (x->use_status) {
332: PetscCallMPI(MPI_Get_count(&some_statuses[ii], intmsg ? MPIU_INT : MPIU_SCALAR, &count));
333: if (!intmsg) count /= bs; /* Convert from number of scalars to number of blocks */
334: } else count = x->recvhdr[i].bcount;
335: for (j = 0, recvint = frame[i].intb, recvscalar = frame[i].scalarb; j < count; j++, recvint++) {
336: PetscInt loc = (*recvint) * bs - X->map->rstart;
337: switch (imode) {
338: case ADD_VALUES:
339: for (k = loc; k < loc + bs; k++) xarray[k] += *recvscalar++;
340: break;
341: case INSERT_VALUES:
342: for (k = loc; k < loc + bs; k++) xarray[k] = *recvscalar++;
343: break;
344: default:
345: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Insert mode not supported 0x%x", imode);
346: }
347: }
348: }
349: }
350: }
351: PetscCall(VecRestoreArray(X, &xarray));
352: PetscCallMPI(MPI_Waitall(4 * x->nsendranks, x->sendreqs, MPI_STATUSES_IGNORE));
353: PetscCall(PetscFree2(some_indices, some_statuses));
354: if (x->assembly_subset) {
355: PetscCall(PetscSegBufferExtractInPlace(x->segrecvint, NULL));
356: PetscCall(PetscSegBufferExtractInPlace(x->segrecvscalar, NULL));
357: } else {
358: PetscCall(VecAssemblyReset_MPI(X));
359: }
361: X->stash.insertmode = NOT_SET_VALUES;
362: X->bstash.insertmode = NOT_SET_VALUES;
363: PetscCall(VecStashScatterEnd_Private(&X->stash));
364: PetscCall(VecStashScatterEnd_Private(&X->bstash));
365: PetscFunctionReturn(PETSC_SUCCESS);
366: }
368: PetscErrorCode VecAssemblyReset_MPI(Vec X)
369: {
370: Vec_MPI *x = (Vec_MPI *)X->data;
372: PetscFunctionBegin;
373: PetscCall(PetscFree(x->sendreqs));
374: PetscCall(PetscFree(x->recvreqs));
375: PetscCall(PetscFree(x->sendranks));
376: PetscCall(PetscFree(x->recvranks));
377: PetscCall(PetscFree(x->sendhdr));
378: PetscCall(PetscFree(x->recvhdr));
379: PetscCall(PetscFree(x->sendptrs));
380: PetscCall(PetscSegBufferDestroy(&x->segrecvint));
381: PetscCall(PetscSegBufferDestroy(&x->segrecvscalar));
382: PetscCall(PetscSegBufferDestroy(&x->segrecvframe));
383: PetscFunctionReturn(PETSC_SUCCESS);
384: }
386: static PetscErrorCode VecSetFromOptions_MPI(Vec X, PetscOptionItems *PetscOptionsObject)
387: {
388: #if !defined(PETSC_HAVE_MPIUNI)
389: PetscBool flg = PETSC_FALSE, set;
391: PetscFunctionBegin;
392: PetscOptionsHeadBegin(PetscOptionsObject, "VecMPI Options");
393: PetscCall(PetscOptionsBool("-vec_assembly_legacy", "Use MPI 1 version of assembly", "", flg, &flg, &set));
394: if (set) {
395: X->ops->assemblybegin = flg ? VecAssemblyBegin_MPI : VecAssemblyBegin_MPI_BTS;
396: X->ops->assemblyend = flg ? VecAssemblyEnd_MPI : VecAssemblyEnd_MPI_BTS;
397: }
398: PetscOptionsHeadEnd();
399: #else
400: PetscFunctionBegin;
401: X->ops->assemblybegin = VecAssemblyBegin_MPI;
402: X->ops->assemblyend = VecAssemblyEnd_MPI;
403: #endif
404: PetscFunctionReturn(PETSC_SUCCESS);
405: }
407: static PetscErrorCode VecGetLocalToGlobalMapping_MPI_VecGhost(Vec X, ISLocalToGlobalMapping *ltg)
408: {
409: PetscInt *indices, n, nghost, rstart, i;
410: IS ghostis;
411: const PetscInt *ghostidx;
412: MPI_Comm comm;
414: PetscFunctionBegin;
415: if (X->map->mapping) {
416: *ltg = X->map->mapping;
417: PetscFunctionReturn(PETSC_SUCCESS);
418: }
419: PetscCall(VecGhostGetGhostIS(X, &ghostis));
420: PetscCall(ISGetLocalSize(ghostis, &nghost));
421: PetscCall(VecGetLocalSize(X, &n));
422: PetscCall(ISGetIndices(ghostis, &ghostidx));
423: /* set local to global mapping for ghosted vector */
424: PetscCall(PetscMalloc1(n + nghost, &indices));
425: PetscCall(VecGetOwnershipRange(X, &rstart, NULL));
426: for (i = 0; i < n; i++) { indices[i] = rstart + i; }
427: for (i = 0; i < nghost; i++) { indices[n + i] = ghostidx[i]; }
428: PetscCall(ISRestoreIndices(ghostis, &ghostidx));
429: PetscCall(PetscObjectGetComm((PetscObject)X, &comm));
430: PetscCall(ISLocalToGlobalMappingCreate(comm, 1, n + nghost, indices, PETSC_OWN_POINTER, &X->map->mapping));
431: *ltg = X->map->mapping;
432: PetscFunctionReturn(PETSC_SUCCESS);
433: }
435: static struct _VecOps DvOps = {PetscDesignatedInitializer(duplicate, VecDuplicate_MPI), /* 1 */
436: PetscDesignatedInitializer(duplicatevecs, VecDuplicateVecs_Default),
437: PetscDesignatedInitializer(destroyvecs, VecDestroyVecs_Default),
438: PetscDesignatedInitializer(dot, VecDot_MPI),
439: PetscDesignatedInitializer(mdot, VecMDot_MPI),
440: PetscDesignatedInitializer(norm, VecNorm_MPI),
441: PetscDesignatedInitializer(tdot, VecTDot_MPI),
442: PetscDesignatedInitializer(mtdot, VecMTDot_MPI),
443: PetscDesignatedInitializer(scale, VecScale_Seq),
444: PetscDesignatedInitializer(copy, VecCopy_Seq), /* 10 */
445: PetscDesignatedInitializer(set, VecSet_Seq),
446: PetscDesignatedInitializer(swap, VecSwap_Seq),
447: PetscDesignatedInitializer(axpy, VecAXPY_Seq),
448: PetscDesignatedInitializer(axpby, VecAXPBY_Seq),
449: PetscDesignatedInitializer(maxpy, VecMAXPY_Seq),
450: PetscDesignatedInitializer(aypx, VecAYPX_Seq),
451: PetscDesignatedInitializer(waxpy, VecWAXPY_Seq),
452: PetscDesignatedInitializer(axpbypcz, VecAXPBYPCZ_Seq),
453: PetscDesignatedInitializer(pointwisemult, VecPointwiseMult_Seq),
454: PetscDesignatedInitializer(pointwisedivide, VecPointwiseDivide_Seq),
455: PetscDesignatedInitializer(setvalues, VecSetValues_MPI), /* 20 */
456: PetscDesignatedInitializer(assemblybegin, VecAssemblyBegin_MPI_BTS),
457: PetscDesignatedInitializer(assemblyend, VecAssemblyEnd_MPI_BTS),
458: PetscDesignatedInitializer(getarray, NULL),
459: PetscDesignatedInitializer(getsize, VecGetSize_MPI),
460: PetscDesignatedInitializer(getlocalsize, VecGetSize_Seq),
461: PetscDesignatedInitializer(restorearray, NULL),
462: PetscDesignatedInitializer(max, VecMax_MPI),
463: PetscDesignatedInitializer(min, VecMin_MPI),
464: PetscDesignatedInitializer(setrandom, VecSetRandom_Seq),
465: PetscDesignatedInitializer(setoption, VecSetOption_MPI),
466: PetscDesignatedInitializer(setvaluesblocked, VecSetValuesBlocked_MPI),
467: PetscDesignatedInitializer(destroy, VecDestroy_MPI),
468: PetscDesignatedInitializer(view, VecView_MPI),
469: PetscDesignatedInitializer(placearray, VecPlaceArray_MPI),
470: PetscDesignatedInitializer(replacearray, VecReplaceArray_Seq),
471: PetscDesignatedInitializer(dot_local, VecDot_Seq),
472: PetscDesignatedInitializer(tdot_local, VecTDot_Seq),
473: PetscDesignatedInitializer(norm_local, VecNorm_Seq),
474: PetscDesignatedInitializer(mdot_local, VecMDot_Seq),
475: PetscDesignatedInitializer(mtdot_local, VecMTDot_Seq),
476: PetscDesignatedInitializer(load, VecLoad_Default),
477: PetscDesignatedInitializer(reciprocal, VecReciprocal_Default),
478: PetscDesignatedInitializer(conjugate, VecConjugate_Seq),
479: PetscDesignatedInitializer(setlocaltoglobalmapping, NULL),
480: PetscDesignatedInitializer(getlocaltoglobalmapping, VecGetLocalToGlobalMapping_MPI_VecGhost),
481: PetscDesignatedInitializer(setvalueslocal, NULL),
482: PetscDesignatedInitializer(resetarray, VecResetArray_MPI),
483: PetscDesignatedInitializer(setfromoptions, VecSetFromOptions_MPI), /*set from options */
484: PetscDesignatedInitializer(maxpointwisedivide, VecMaxPointwiseDivide_Seq),
485: PetscDesignatedInitializer(pointwisemax, VecPointwiseMax_Seq),
486: PetscDesignatedInitializer(pointwisemaxabs, VecPointwiseMaxAbs_Seq),
487: PetscDesignatedInitializer(pointwisemin, VecPointwiseMin_Seq),
488: PetscDesignatedInitializer(getvalues, VecGetValues_MPI),
489: PetscDesignatedInitializer(sqrt, NULL),
490: PetscDesignatedInitializer(abs, NULL),
491: PetscDesignatedInitializer(exp, NULL),
492: PetscDesignatedInitializer(log, NULL),
493: PetscDesignatedInitializer(shift, NULL),
494: PetscDesignatedInitializer(create, NULL), /* really? */
495: PetscDesignatedInitializer(stridegather, VecStrideGather_Default),
496: PetscDesignatedInitializer(stridescatter, VecStrideScatter_Default),
497: PetscDesignatedInitializer(dotnorm2, NULL),
498: PetscDesignatedInitializer(getsubvector, NULL),
499: PetscDesignatedInitializer(restoresubvector, NULL),
500: PetscDesignatedInitializer(getarrayread, NULL),
501: PetscDesignatedInitializer(restorearrayread, NULL),
502: PetscDesignatedInitializer(stridesubsetgather, VecStrideSubSetGather_Default),
503: PetscDesignatedInitializer(stridesubsetscatter, VecStrideSubSetScatter_Default),
504: PetscDesignatedInitializer(viewnative, VecView_MPI),
505: PetscDesignatedInitializer(loadnative, NULL),
506: PetscDesignatedInitializer(createlocalvector, NULL),
507: PetscDesignatedInitializer(getlocalvector, NULL),
508: PetscDesignatedInitializer(restorelocalvector, NULL),
509: PetscDesignatedInitializer(getlocalvectorread, NULL),
510: PetscDesignatedInitializer(restorelocalvectorread, NULL),
511: PetscDesignatedInitializer(bindtocpu, NULL),
512: PetscDesignatedInitializer(getarraywrite, NULL),
513: PetscDesignatedInitializer(restorearraywrite, NULL),
514: PetscDesignatedInitializer(getarrayandmemtype, NULL),
515: PetscDesignatedInitializer(restorearrayandmemtype, NULL),
516: PetscDesignatedInitializer(getarrayreadandmemtype, NULL),
517: PetscDesignatedInitializer(restorearrayreadandmemtype, NULL),
518: PetscDesignatedInitializer(getarraywriteandmemtype, NULL),
519: PetscDesignatedInitializer(restorearraywriteandmemtype, NULL),
520: PetscDesignatedInitializer(concatenate, NULL),
521: PetscDesignatedInitializer(sum, NULL),
522: PetscDesignatedInitializer(setpreallocationcoo, VecSetPreallocationCOO_MPI),
523: PetscDesignatedInitializer(setvaluescoo, VecSetValuesCOO_MPI),
524: PetscDesignatedInitializer(errorwnorm, NULL)};
526: /*
527: VecCreate_MPI_Private - Basic create routine called by VecCreate_MPI() (i.e. VecCreateMPI()),
528: VecCreateMPIWithArray(), VecCreate_Shared() (i.e. VecCreateShared()), VecCreateGhost(),
529: VecDuplicate_MPI(), VecCreateGhostWithArray(), VecDuplicate_MPI(), and VecDuplicate_Shared()
531: If alloc is true and array is NULL then this routine allocates the space, otherwise
532: no space is allocated.
533: */
534: PetscErrorCode VecCreate_MPI_Private(Vec v, PetscBool alloc, PetscInt nghost, const PetscScalar array[])
535: {
536: Vec_MPI *s;
537: PetscBool mdot_use_gemv = PETSC_TRUE;
538: PetscBool maxpy_use_gemv = PETSC_FALSE; // default is false as we saw bad performance with vendors' GEMV with tall skinny matrices.
540: PetscFunctionBegin;
541: PetscCall(PetscNew(&s));
542: v->data = (void *)s;
543: v->ops[0] = DvOps;
545: PetscCall(PetscOptionsGetBool(NULL, NULL, "-vec_mdot_use_gemv", &mdot_use_gemv, NULL));
546: PetscCall(PetscOptionsGetBool(NULL, NULL, "-vec_maxpy_use_gemv", &maxpy_use_gemv, NULL));
548: // allocate multiple vectors together
549: if (mdot_use_gemv || maxpy_use_gemv) v->ops[0].duplicatevecs = VecDuplicateVecs_MPI_GEMV;
551: if (mdot_use_gemv) {
552: v->ops[0].mdot = VecMDot_MPI_GEMV;
553: v->ops[0].mdot_local = VecMDot_Seq_GEMV;
554: v->ops[0].mtdot = VecMTDot_MPI_GEMV;
555: v->ops[0].mtdot_local = VecMTDot_Seq_GEMV;
556: }
557: if (maxpy_use_gemv) v->ops[0].maxpy = VecMAXPY_Seq_GEMV;
559: s->nghost = nghost;
560: v->petscnative = PETSC_TRUE;
561: if (array) v->offloadmask = PETSC_OFFLOAD_CPU;
563: PetscCall(PetscLayoutSetUp(v->map));
565: s->array = (PetscScalar *)array;
566: s->array_allocated = NULL;
567: if (alloc && !array) {
568: PetscInt n = v->map->n + nghost;
569: PetscCall(PetscCalloc1(n, &s->array));
570: s->array_allocated = s->array;
571: PetscCall(PetscObjectComposedDataSetReal((PetscObject)v, NormIds[NORM_2], 0));
572: PetscCall(PetscObjectComposedDataSetReal((PetscObject)v, NormIds[NORM_1], 0));
573: PetscCall(PetscObjectComposedDataSetReal((PetscObject)v, NormIds[NORM_INFINITY], 0));
574: }
576: /* By default parallel vectors do not have local representation */
577: s->localrep = NULL;
578: s->localupdate = NULL;
579: s->ghost = NULL;
581: v->stash.insertmode = NOT_SET_VALUES;
582: v->bstash.insertmode = NOT_SET_VALUES;
583: /* create the stashes. The block-size for bstash is set later when
584: VecSetValuesBlocked is called.
585: */
586: PetscCall(VecStashCreate_Private(PetscObjectComm((PetscObject)v), 1, &v->stash));
587: PetscCall(VecStashCreate_Private(PetscObjectComm((PetscObject)v), PetscAbs(v->map->bs), &v->bstash));
589: #if defined(PETSC_HAVE_MATLAB)
590: PetscCall(PetscObjectComposeFunction((PetscObject)v, "PetscMatlabEnginePut_C", VecMatlabEnginePut_Default));
591: PetscCall(PetscObjectComposeFunction((PetscObject)v, "PetscMatlabEngineGet_C", VecMatlabEngineGet_Default));
592: #endif
593: PetscCall(PetscObjectChangeTypeName((PetscObject)v, VECMPI));
594: PetscFunctionReturn(PETSC_SUCCESS);
595: }
597: /*
598: Create a VECMPI with the given layout and array
600: Collective
602: Input Parameter:
603: + map - the layout
604: - array - the array on host
606: Output Parameter:
607: . V - The vector object
608: */
609: PetscErrorCode VecCreateMPIWithLayoutAndArray_Private(PetscLayout map, const PetscScalar array[], Vec *V)
610: {
611: PetscFunctionBegin;
612: PetscCall(VecCreateWithLayout_Private(map, V));
613: PetscCall(VecCreate_MPI_Private(*V, PETSC_FALSE, 0, array));
614: PetscFunctionReturn(PETSC_SUCCESS);
615: }
617: /*MC
618: VECMPI - VECMPI = "mpi" - The basic parallel vector
620: Options Database Key:
621: . -vec_type mpi - sets the vector type to `VECMPI` during a call to `VecSetFromOptions()`
623: Level: beginner
625: .seealso: [](ch_vectors), `Vec`, `VecType`, `VecCreate()`, `VecSetType()`, `VecSetFromOptions()`, `VecCreateMPIWithArray()`, `VECMPI`, `VecType`, `VecCreateMPI()`, `VecCreateMPI()`
626: M*/
628: PetscErrorCode VecCreate_MPI(Vec vv)
629: {
630: PetscFunctionBegin;
631: PetscCall(VecCreate_MPI_Private(vv, PETSC_TRUE, 0, NULL));
632: PetscFunctionReturn(PETSC_SUCCESS);
633: }
635: /*MC
636: VECSTANDARD = "standard" - A `VECSEQ` on one process and `VECMPI` on more than one process
638: Options Database Key:
639: . -vec_type standard - sets a vector type to standard on calls to `VecSetFromOptions()`
641: Level: beginner
643: .seealso: [](ch_vectors), `Vec`, `VecType`, `VecCreateSeq()`, `VecCreateMPI()`
644: M*/
646: PETSC_EXTERN PetscErrorCode VecCreate_Standard(Vec v)
647: {
648: PetscMPIInt size;
650: PetscFunctionBegin;
651: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)v), &size));
652: if (size == 1) {
653: PetscCall(VecSetType(v, VECSEQ));
654: } else {
655: PetscCall(VecSetType(v, VECMPI));
656: }
657: PetscFunctionReturn(PETSC_SUCCESS);
658: }
660: /*@C
661: VecCreateMPIWithArray - Creates a parallel, array-style vector,
662: where the user provides the array space to store the vector values.
664: Collective
666: Input Parameters:
667: + comm - the MPI communicator to use
668: . bs - block size, same meaning as `VecSetBlockSize()`
669: . n - local vector length, cannot be `PETSC_DECIDE`
670: . N - global vector length (or `PETSC_DETERMINE` to have calculated)
671: - array - the user provided array to store the vector values
673: Output Parameter:
674: . vv - the vector
676: Level: intermediate
678: Notes:
679: Use `VecDuplicate()` or `VecDuplicateVecs()` to form additional vectors of the
680: same type as an existing vector.
682: If the user-provided array is `NULL`, then `VecPlaceArray()` can be used
683: at a later stage to SET the array for storing the vector values.
685: PETSc does NOT free `array` when the vector is destroyed via `VecDestroy()`.
687: The user should not free `array` until the vector is destroyed.
689: .seealso: [](ch_vectors), `Vec`, `VecType`, `VecCreateSeqWithArray()`, `VecCreate()`, `VecDuplicate()`, `VecDuplicateVecs()`, `VecCreateGhost()`,
690: `VecCreateMPI()`, `VecCreateGhostWithArray()`, `VecPlaceArray()`
691: @*/
692: PetscErrorCode VecCreateMPIWithArray(MPI_Comm comm, PetscInt bs, PetscInt n, PetscInt N, const PetscScalar array[], Vec *vv)
693: {
694: PetscFunctionBegin;
695: PetscCheck(n != PETSC_DECIDE, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Must set local size of vector");
696: PetscCall(PetscSplitOwnership(comm, &n, &N));
697: PetscCall(VecCreate(comm, vv));
698: PetscCall(VecSetSizes(*vv, n, N));
699: PetscCall(VecSetBlockSize(*vv, bs));
700: PetscCall(VecCreate_MPI_Private(*vv, PETSC_FALSE, 0, array));
701: PetscFunctionReturn(PETSC_SUCCESS);
702: }
704: /*@C
705: VecCreateGhostWithArray - Creates a parallel vector with ghost padding on each processor;
706: the caller allocates the array space.
708: Collective
710: Input Parameters:
711: + comm - the MPI communicator to use
712: . n - local vector length
713: . N - global vector length (or `PETSC_DETERMINE` to have calculated if `n` is given)
714: . nghost - number of local ghost points
715: . ghosts - global indices of ghost points (or `NULL` if not needed), these do not need to be in increasing order (sorted)
716: - array - the space to store the vector values (as long as n + nghost)
718: Output Parameter:
719: . vv - the global vector representation (without ghost points as part of vector)
721: Level: advanced
723: Notes:
724: Use `VecGhostGetLocalForm()` to access the local, ghosted representation
725: of the vector.
727: This also automatically sets the `ISLocalToGlobalMapping()` for this vector.
729: .seealso: [](ch_vectors), `Vec`, `VecType`, `VecCreate()`, `VecGhostGetLocalForm()`, `VecGhostRestoreLocalForm()`,
730: `VecCreateGhost()`, `VecCreateSeqWithArray()`, `VecCreateMPIWithArray()`,
731: `VecCreateGhostBlock()`, `VecCreateGhostBlockWithArray()`, `VecMPISetGhost()`
732: @*/
733: PetscErrorCode VecCreateGhostWithArray(MPI_Comm comm, PetscInt n, PetscInt N, PetscInt nghost, const PetscInt ghosts[], const PetscScalar array[], Vec *vv)
734: {
735: Vec_MPI *w;
736: PetscScalar *larray;
737: IS from, to;
739: PetscFunctionBegin;
740: *vv = NULL;
742: PetscCheck(n != PETSC_DECIDE, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Must set local size");
743: PetscCheck(nghost != PETSC_DECIDE, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Must set local ghost size");
744: PetscCheck(nghost >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Ghost length must be >= 0");
745: PetscCall(PetscSplitOwnership(comm, &n, &N));
746: /* Create global representation */
747: PetscCall(VecCreate(comm, vv));
748: PetscCall(VecSetSizes(*vv, n, N));
749: PetscCall(VecCreate_MPI_Private(*vv, PETSC_TRUE, nghost, array));
750: w = (Vec_MPI *)(*vv)->data;
751: /* Create local representation */
752: PetscCall(VecGetArray(*vv, &larray));
753: PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, n + nghost, larray, &w->localrep));
754: PetscCall(VecRestoreArray(*vv, &larray));
756: /*
757: Create scatter context for scattering (updating) ghost values
758: */
759: PetscCall(ISCreateGeneral(comm, nghost, ghosts, PETSC_COPY_VALUES, &from));
760: PetscCall(ISCreateStride(PETSC_COMM_SELF, nghost, n, 1, &to));
761: PetscCall(VecScatterCreate(*vv, from, w->localrep, to, &w->localupdate));
762: PetscCall(ISDestroy(&to));
764: w->ghost = from;
765: (*vv)->ops->getlocaltoglobalmapping = VecGetLocalToGlobalMapping_MPI_VecGhost;
766: PetscFunctionReturn(PETSC_SUCCESS);
767: }
769: /*@
770: VecGhostGetGhostIS - Return ghosting indices of a ghost vector
772: Input Parameters:
773: . X - ghost vector
775: Output Parameter:
776: . ghost - ghosting indices
778: Level: beginner
780: .seealso: `VecCreateGhostWithArray()`, `VecCreateMPIWithArray()`
781: @*/
782: PetscErrorCode VecGhostGetGhostIS(Vec X, IS *ghost)
783: {
784: Vec_MPI *w;
785: PetscBool flg;
787: PetscFunctionBegin;
789: PetscAssertPointer(ghost, 2);
790: PetscCall(PetscObjectTypeCompare((PetscObject)X, VECMPI, &flg));
791: PetscCheck(flg, PetscObjectComm((PetscObject)X), PETSC_ERR_ARG_WRONGSTATE, "VecGhostGetGhostIS was not supported for vec type %s", ((PetscObject)X)->type_name);
792: w = (Vec_MPI *)(X)->data;
793: *ghost = w->ghost;
794: PetscFunctionReturn(PETSC_SUCCESS);
795: }
797: /*@
798: VecCreateGhost - Creates a parallel vector with ghost padding on each processor.
800: Collective
802: Input Parameters:
803: + comm - the MPI communicator to use
804: . n - local vector length
805: . N - global vector length (or `PETSC_DETERMINE` to have calculated if `n` is given)
806: . nghost - number of local ghost points
807: - ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)
809: Output Parameter:
810: . vv - the global vector representation (without ghost points as part of vector)
812: Level: advanced
814: Notes:
815: Use `VecGhostGetLocalForm()` to access the local, ghosted representation
816: of the vector.
818: This also automatically sets the `ISLocalToGlobalMapping()` for this vector.
820: .seealso: [](ch_vectors), `Vec`, `VecType`, `VecCreateSeq()`, `VecCreate()`, `VecDuplicate()`, `VecDuplicateVecs()`, `VecCreateMPI()`,
821: `VecGhostGetLocalForm()`, `VecGhostRestoreLocalForm()`, `VecGhostUpdateBegin()`,
822: `VecCreateGhostWithArray()`, `VecCreateMPIWithArray()`, `VecGhostUpdateEnd()`,
823: `VecCreateGhostBlock()`, `VecCreateGhostBlockWithArray()`, `VecMPISetGhost()`
825: @*/
826: PetscErrorCode VecCreateGhost(MPI_Comm comm, PetscInt n, PetscInt N, PetscInt nghost, const PetscInt ghosts[], Vec *vv)
827: {
828: PetscFunctionBegin;
829: PetscCall(VecCreateGhostWithArray(comm, n, N, nghost, ghosts, NULL, vv));
830: PetscFunctionReturn(PETSC_SUCCESS);
831: }
833: /*@
834: VecMPISetGhost - Sets the ghost points for an MPI ghost vector
836: Collective
838: Input Parameters:
839: + vv - the MPI vector
840: . nghost - number of local ghost points
841: - ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)
843: Level: advanced
845: Notes:
846: Use `VecGhostGetLocalForm()` to access the local, ghosted representation
847: of the vector.
849: This also automatically sets the `ISLocalToGlobalMapping()` for this vector.
851: You must call this AFTER you have set the type of the vector (with` VecSetType()`) and the size (with `VecSetSizes()`).
853: .seealso: [](ch_vectors), `Vec`, `VecType`, `VecCreateSeq()`, `VecCreate()`, `VecDuplicate()`, `VecDuplicateVecs()`, `VecCreateMPI()`,
854: `VecGhostGetLocalForm()`, `VecGhostRestoreLocalForm()`, `VecGhostUpdateBegin()`,
855: `VecCreateGhostWithArray()`, `VecCreateMPIWithArray()`, `VecGhostUpdateEnd()`,
856: `VecCreateGhostBlock()`, `VecCreateGhostBlockWithArray()`
857: @*/
858: PetscErrorCode VecMPISetGhost(Vec vv, PetscInt nghost, const PetscInt ghosts[])
859: {
860: PetscBool flg;
862: PetscFunctionBegin;
863: PetscCall(PetscObjectTypeCompare((PetscObject)vv, VECMPI, &flg));
864: /* if already fully existent VECMPI then basically destroy it and rebuild with ghosting */
865: if (flg) {
866: PetscInt n, N;
867: Vec_MPI *w;
868: PetscScalar *larray;
869: IS from, to;
870: MPI_Comm comm;
872: PetscCall(PetscObjectGetComm((PetscObject)vv, &comm));
873: n = vv->map->n;
874: N = vv->map->N;
875: PetscUseTypeMethod(vv, destroy);
876: PetscCall(VecSetSizes(vv, n, N));
877: PetscCall(VecCreate_MPI_Private(vv, PETSC_TRUE, nghost, NULL));
878: w = (Vec_MPI *)(vv)->data;
879: /* Create local representation */
880: PetscCall(VecGetArray(vv, &larray));
881: PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, n + nghost, larray, &w->localrep));
882: PetscCall(VecRestoreArray(vv, &larray));
884: /*
885: Create scatter context for scattering (updating) ghost values
886: */
887: PetscCall(ISCreateGeneral(comm, nghost, ghosts, PETSC_COPY_VALUES, &from));
888: PetscCall(ISCreateStride(PETSC_COMM_SELF, nghost, n, 1, &to));
889: PetscCall(VecScatterCreate(vv, from, w->localrep, to, &w->localupdate));
890: PetscCall(ISDestroy(&to));
892: w->ghost = from;
893: vv->ops->getlocaltoglobalmapping = VecGetLocalToGlobalMapping_MPI_VecGhost;
894: } else {
895: PetscCheck(vv->ops->create != VecCreate_MPI, PetscObjectComm((PetscObject)vv), PETSC_ERR_ARG_WRONGSTATE, "Must set local or global size before setting ghosting");
896: PetscCheck(((PetscObject)vv)->type_name, PetscObjectComm((PetscObject)vv), PETSC_ERR_ARG_WRONGSTATE, "Must set type to VECMPI before ghosting");
897: }
898: PetscFunctionReturn(PETSC_SUCCESS);
899: }
901: /* ------------------------------------------------------------------------------------------*/
902: /*@C
903: VecCreateGhostBlockWithArray - Creates a parallel vector with ghost padding on each processor;
904: the caller allocates the array space. Indices in the ghost region are based on blocks.
906: Collective
908: Input Parameters:
909: + comm - the MPI communicator to use
910: . bs - block size
911: . n - local vector length
912: . N - global vector length (or `PETSC_DETERMINE` to have calculated if `n` is given)
913: . nghost - number of local ghost blocks
914: . ghosts - global indices of ghost blocks (or `NULL` if not needed), counts are by block not by index, these do not need to be in increasing order (sorted)
915: - array - the space to store the vector values (as long as `n + nghost*bs`)
917: Output Parameter:
918: . vv - the global vector representation (without ghost points as part of vector)
920: Level: advanced
922: Notes:
923: Use `VecGhostGetLocalForm()` to access the local, ghosted representation
924: of the vector.
926: n is the local vector size (total local size not the number of blocks) while nghost
927: is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
928: portion is bs*nghost
930: .seealso: [](ch_vectors), `Vec`, `VecType`, `VecCreate()`, `VecGhostGetLocalForm()`, `VecGhostRestoreLocalForm()`,
931: `VecCreateGhost()`, `VecCreateSeqWithArray()`, `VecCreateMPIWithArray()`,
932: `VecCreateGhostWithArray()`, `VecCreateGhostBlock()`
933: @*/
934: PetscErrorCode VecCreateGhostBlockWithArray(MPI_Comm comm, PetscInt bs, PetscInt n, PetscInt N, PetscInt nghost, const PetscInt ghosts[], const PetscScalar array[], Vec *vv)
935: {
936: Vec_MPI *w;
937: PetscScalar *larray;
938: IS from, to;
939: ISLocalToGlobalMapping ltog;
940: PetscInt rstart, i, nb, *indices;
942: PetscFunctionBegin;
943: *vv = NULL;
945: PetscCheck(n != PETSC_DECIDE, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Must set local size");
946: PetscCheck(nghost != PETSC_DECIDE, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Must set local ghost size");
947: PetscCheck(nghost >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Ghost length must be >= 0");
948: PetscCheck(n % bs == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local size must be a multiple of block size");
949: PetscCall(PetscSplitOwnership(comm, &n, &N));
950: /* Create global representation */
951: PetscCall(VecCreate(comm, vv));
952: PetscCall(VecSetSizes(*vv, n, N));
953: PetscCall(VecSetBlockSize(*vv, bs));
954: PetscCall(VecCreate_MPI_Private(*vv, PETSC_TRUE, nghost * bs, array));
955: w = (Vec_MPI *)(*vv)->data;
956: /* Create local representation */
957: PetscCall(VecGetArray(*vv, &larray));
958: PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, bs, n + bs * nghost, larray, &w->localrep));
959: PetscCall(VecRestoreArray(*vv, &larray));
961: /*
962: Create scatter context for scattering (updating) ghost values
963: */
964: PetscCall(ISCreateBlock(comm, bs, nghost, ghosts, PETSC_COPY_VALUES, &from));
965: PetscCall(ISCreateStride(PETSC_COMM_SELF, bs * nghost, n, 1, &to));
966: PetscCall(VecScatterCreate(*vv, from, w->localrep, to, &w->localupdate));
967: PetscCall(ISDestroy(&to));
968: PetscCall(ISDestroy(&from));
970: /* set local to global mapping for ghosted vector */
971: nb = n / bs;
972: PetscCall(PetscMalloc1(nb + nghost, &indices));
973: PetscCall(VecGetOwnershipRange(*vv, &rstart, NULL));
974: rstart = rstart / bs;
976: for (i = 0; i < nb; i++) indices[i] = rstart + i;
977: for (i = 0; i < nghost; i++) indices[nb + i] = ghosts[i];
979: PetscCall(ISLocalToGlobalMappingCreate(comm, bs, nb + nghost, indices, PETSC_OWN_POINTER, <og));
980: PetscCall(VecSetLocalToGlobalMapping(*vv, ltog));
981: PetscCall(ISLocalToGlobalMappingDestroy(<og));
982: PetscFunctionReturn(PETSC_SUCCESS);
983: }
985: /*@
986: VecCreateGhostBlock - Creates a parallel vector with ghost padding on each processor.
987: The indicing of the ghost points is done with blocks.
989: Collective
991: Input Parameters:
992: + comm - the MPI communicator to use
993: . bs - the block size
994: . n - local vector length
995: . N - global vector length (or `PETSC_DETERMINE` to have calculated if `n` is given)
996: . nghost - number of local ghost blocks
997: - ghosts - global indices of ghost blocks, counts are by block, not by individual index, these do not need to be in increasing order (sorted)
999: Output Parameter:
1000: . vv - the global vector representation (without ghost points as part of vector)
1002: Level: advanced
1004: Notes:
1005: Use `VecGhostGetLocalForm()` to access the local, ghosted representation
1006: of the vector.
1008: `n` is the local vector size (total local size not the number of blocks) while `nghost`
1009: is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
1010: portion is `bs*nghost`
1012: .seealso: [](ch_vectors), `Vec`, `VecType`, `VecCreateSeq()`, `VecCreate()`, `VecDuplicate()`, `VecDuplicateVecs()`, `VecCreateMPI()`,
1013: `VecGhostGetLocalForm()`, `VecGhostRestoreLocalForm()`,
1014: `VecCreateGhostWithArray()`, `VecCreateMPIWithArray()`, `VecCreateGhostBlockWithArray()`
1015: @*/
1016: PetscErrorCode VecCreateGhostBlock(MPI_Comm comm, PetscInt bs, PetscInt n, PetscInt N, PetscInt nghost, const PetscInt ghosts[], Vec *vv)
1017: {
1018: PetscFunctionBegin;
1019: PetscCall(VecCreateGhostBlockWithArray(comm, bs, n, N, nghost, ghosts, NULL, vv));
1020: PetscFunctionReturn(PETSC_SUCCESS);
1021: }