Actual source code: vecstash.c
1: #include <petsc/private/vecimpl.h>
3: #define DEFAULT_STASH_SIZE 100
5: /*
6: VecStashCreate_Private - Creates a stash,currently used for all the parallel
7: matrix implementations. The stash is where elements of a matrix destined
8: to be stored on other processors are kept until matrix assembly is done.
10: This is a simple minded stash. Simply adds entries to end of stash.
12: Input Parameters:
13: comm - communicator, required for scatters.
14: bs - stash block size. used when stashing blocks of values
16: Output Parameter:
17: . stash - the newly created stash
18: */
19: PetscErrorCode VecStashCreate_Private(MPI_Comm comm, PetscInt bs, VecStash *stash)
20: {
21: PetscInt max, *opt, nopt;
22: PetscBool flg;
24: PetscFunctionBegin;
25: /* Require 2 tags, get the second using PetscCommGetNewTag() */
26: stash->comm = comm;
27: PetscCall(PetscCommGetNewTag(stash->comm, &stash->tag1));
28: PetscCall(PetscCommGetNewTag(stash->comm, &stash->tag2));
29: PetscCallMPI(MPI_Comm_size(stash->comm, &stash->size));
30: PetscCallMPI(MPI_Comm_rank(stash->comm, &stash->rank));
32: nopt = stash->size;
33: PetscCall(PetscMalloc1(nopt, &opt));
34: PetscCall(PetscOptionsGetIntArray(NULL, NULL, "-vecstash_initial_size", opt, &nopt, &flg));
35: if (flg) {
36: if (nopt == 1) max = opt[0];
37: else if (nopt == stash->size) max = opt[stash->rank];
38: else if (stash->rank < nopt) max = opt[stash->rank];
39: else max = 0; /* use default */
40: stash->umax = max;
41: } else {
42: stash->umax = 0;
43: }
44: PetscCall(PetscFree(opt));
46: if (bs <= 0) bs = 1;
48: stash->bs = bs;
49: stash->nmax = 0;
50: stash->oldnmax = 0;
51: stash->n = 0;
52: stash->reallocs = -1;
53: stash->idx = NULL;
54: stash->array = NULL;
56: stash->send_waits = NULL;
57: stash->recv_waits = NULL;
58: stash->send_status = NULL;
59: stash->nsends = 0;
60: stash->nrecvs = 0;
61: stash->svalues = NULL;
62: stash->rvalues = NULL;
63: stash->rmax = 0;
64: stash->nprocs = NULL;
65: stash->nprocessed = 0;
66: stash->donotstash = PETSC_FALSE;
67: stash->ignorenegidx = PETSC_FALSE;
68: PetscFunctionReturn(PETSC_SUCCESS);
69: }
71: /*
72: VecStashDestroy_Private - Destroy the stash
73: */
74: PetscErrorCode VecStashDestroy_Private(VecStash *stash)
75: {
76: PetscFunctionBegin;
77: PetscCall(PetscFree2(stash->array, stash->idx));
78: PetscCall(PetscFree(stash->bowners));
79: PetscFunctionReturn(PETSC_SUCCESS);
80: }
82: /*
83: VecStashScatterEnd_Private - This is called as the final stage of
84: scatter. The final stages of message passing is done here, and
85: all the memory used for message passing is cleanedu up. This
86: routine also resets the stash, and deallocates the memory used
87: for the stash. It also keeps track of the current memory usage
88: so that the same value can be used the next time through.
89: */
90: PetscErrorCode VecStashScatterEnd_Private(VecStash *stash)
91: {
92: PetscInt nsends = stash->nsends, oldnmax;
93: MPI_Status *send_status;
95: PetscFunctionBegin;
96: /* wait on sends */
97: if (nsends) {
98: PetscCall(PetscMalloc1(2 * nsends, &send_status));
99: PetscCallMPI(MPI_Waitall(2 * nsends, stash->send_waits, send_status));
100: PetscCall(PetscFree(send_status));
101: }
103: /* Now update nmaxold to be app 10% more than max n, this way the
104: wastage of space is reduced the next time this stash is used.
105: Also update the oldmax, only if it increases */
106: if (stash->n) {
107: oldnmax = ((PetscInt)(stash->n * 1.1) + 5) * stash->bs;
108: if (oldnmax > stash->oldnmax) stash->oldnmax = oldnmax;
109: }
111: stash->nmax = 0;
112: stash->n = 0;
113: stash->reallocs = -1;
114: stash->rmax = 0;
115: stash->nprocessed = 0;
117: PetscCall(PetscFree2(stash->array, stash->idx));
118: stash->array = NULL;
119: stash->idx = NULL;
120: PetscCall(PetscFree(stash->send_waits));
121: PetscCall(PetscFree(stash->recv_waits));
122: PetscCall(PetscFree2(stash->svalues, stash->sindices));
123: PetscCall(PetscFree2(stash->rvalues, stash->rindices));
124: PetscCall(PetscFree(stash->nprocs));
125: PetscFunctionReturn(PETSC_SUCCESS);
126: }
128: /*
129: VecStashGetInfo_Private - Gets the relevant statistics of the stash
131: Input Parameters:
132: stash - the stash
133: nstash - the size of the stash
134: reallocs - the number of additional mallocs incurred.
136: */
137: PetscErrorCode VecStashGetInfo_Private(VecStash *stash, PetscInt *nstash, PetscInt *reallocs)
138: {
139: PetscFunctionBegin;
140: if (nstash) *nstash = stash->n * stash->bs;
141: if (reallocs) {
142: if (stash->reallocs < 0) *reallocs = 0;
143: else *reallocs = stash->reallocs;
144: }
145: PetscFunctionReturn(PETSC_SUCCESS);
146: }
148: /*
149: VecStashSetInitialSize_Private - Sets the initial size of the stash
151: Input Parameters:
152: stash - the stash
153: max - the value that is used as the max size of the stash.
154: this value is used while allocating memory. It specifies
155: the number of vals stored, even with the block-stash
156: */
157: PetscErrorCode VecStashSetInitialSize_Private(VecStash *stash, PetscInt max)
158: {
159: PetscFunctionBegin;
160: stash->umax = max;
161: PetscFunctionReturn(PETSC_SUCCESS);
162: }
164: /* VecStashExpand_Private - Expand the stash. This function is called
165: when the space in the stash is not sufficient to add the new values
166: being inserted into the stash.
168: Input Parameters:
169: stash - the stash
170: incr - the minimum increase requested
172: Notes:
173: This routine doubles the currently used memory.
174: */
175: PetscErrorCode VecStashExpand_Private(VecStash *stash, PetscInt incr)
176: {
177: PetscInt *n_idx, newnmax, bs = stash->bs;
178: PetscScalar *n_array;
180: PetscFunctionBegin;
181: /* allocate a larger stash. */
182: if (!stash->oldnmax && !stash->nmax) { /* new stash */
183: if (stash->umax) newnmax = stash->umax / bs;
184: else newnmax = DEFAULT_STASH_SIZE / bs;
185: } else if (!stash->nmax) { /* reusing stash */
186: if (stash->umax > stash->oldnmax) newnmax = stash->umax / bs;
187: else newnmax = stash->oldnmax / bs;
188: } else newnmax = stash->nmax * 2;
190: if (newnmax < (stash->nmax + incr)) newnmax += 2 * incr;
192: PetscCall(PetscMalloc2(bs * newnmax, &n_array, newnmax, &n_idx));
193: PetscCall(PetscMemcpy(n_array, stash->array, bs * stash->nmax * sizeof(PetscScalar)));
194: PetscCall(PetscMemcpy(n_idx, stash->idx, stash->nmax * sizeof(PetscInt)));
195: PetscCall(PetscFree2(stash->array, stash->idx));
197: stash->array = n_array;
198: stash->idx = n_idx;
199: stash->nmax = newnmax;
200: stash->reallocs++;
201: PetscFunctionReturn(PETSC_SUCCESS);
202: }
203: /*
204: VecStashScatterBegin_Private - Initiates the transfer of values to the
205: correct owners. This function goes through the stash, and check the
206: owners of each stashed value, and sends the values off to the owner
207: processors.
209: Input Parameters:
210: stash - the stash
211: owners - an array of size 'no-of-procs' which gives the ownership range
212: for each node.
214: Notes:
215: The 'owners' array in the cased of the blocked-stash has the
216: ranges specified blocked global indices, and for the regular stash in
217: the proper global indices.
218: */
219: PetscErrorCode VecStashScatterBegin_Private(VecStash *stash, const PetscInt *owners)
220: {
221: PetscMPIInt size = stash->size, tag1 = stash->tag1, tag2 = stash->tag2;
222: PetscInt *owner, *start, *nprocs, nsends, nreceives;
223: PetscInt nmax, count, *sindices, *rindices, i, j, idx, bs = stash->bs, lastidx;
224: PetscScalar *rvalues, *svalues;
225: MPI_Comm comm = stash->comm;
226: MPI_Request *send_waits, *recv_waits;
228: PetscFunctionBegin;
229: /* first count number of contributors to each processor */
230: PetscCall(PetscCalloc1(2 * size, &nprocs));
231: PetscCall(PetscMalloc1(stash->n, &owner));
233: j = 0;
234: lastidx = -1;
235: for (i = 0; i < stash->n; i++) {
236: /* if indices are NOT locally sorted, need to start search at the beginning */
237: if (lastidx > (idx = stash->idx[i])) j = 0;
238: lastidx = idx;
239: for (; j < size; j++) {
240: if (idx >= owners[j] && idx < owners[j + 1]) {
241: nprocs[2 * j]++;
242: nprocs[2 * j + 1] = 1;
243: owner[i] = j;
244: break;
245: }
246: }
247: }
248: nsends = 0;
249: for (i = 0; i < size; i++) nsends += nprocs[2 * i + 1];
251: /* inform other processors of number of messages and max length*/
252: PetscCall(PetscMaxSum(comm, nprocs, &nmax, &nreceives));
254: /* post receives:
255: since we don't know how long each individual message is we
256: allocate the largest needed buffer for each receive. Potentially
257: this is a lot of wasted space.
258: */
259: PetscCall(PetscMalloc2(nreceives * nmax * bs, &rvalues, nreceives * nmax, &rindices));
260: PetscCall(PetscMalloc1(2 * nreceives, &recv_waits));
261: for (i = 0, count = 0; i < nreceives; i++) {
262: PetscCallMPI(MPI_Irecv(rvalues + bs * nmax * i, bs * nmax, MPIU_SCALAR, MPI_ANY_SOURCE, tag1, comm, recv_waits + count++));
263: PetscCallMPI(MPI_Irecv(rindices + nmax * i, nmax, MPIU_INT, MPI_ANY_SOURCE, tag2, comm, recv_waits + count++));
264: }
266: /* do sends:
267: 1) starts[i] gives the starting index in svalues for stuff going to
268: the ith processor
269: */
270: PetscCall(PetscMalloc2(stash->n * bs, &svalues, stash->n, &sindices));
271: PetscCall(PetscMalloc1(2 * nsends, &send_waits));
272: PetscCall(PetscMalloc1(size, &start));
273: /* use 2 sends the first with all_v, the next with all_i */
274: start[0] = 0;
275: for (i = 1; i < size; i++) start[i] = start[i - 1] + nprocs[2 * i - 2];
277: for (i = 0; i < stash->n; i++) {
278: j = owner[i];
279: if (bs == 1) svalues[start[j]] = stash->array[i];
280: else PetscCall(PetscMemcpy(svalues + bs * start[j], stash->array + bs * i, bs * sizeof(PetscScalar)));
281: sindices[start[j]] = stash->idx[i];
282: start[j]++;
283: }
284: start[0] = 0;
285: for (i = 1; i < size; i++) start[i] = start[i - 1] + nprocs[2 * i - 2];
287: for (i = 0, count = 0; i < size; i++) {
288: if (nprocs[2 * i + 1]) {
289: PetscCallMPI(MPI_Isend(svalues + bs * start[i], bs * nprocs[2 * i], MPIU_SCALAR, i, tag1, comm, send_waits + count++));
290: PetscCallMPI(MPI_Isend(sindices + start[i], nprocs[2 * i], MPIU_INT, i, tag2, comm, send_waits + count++));
291: }
292: }
293: PetscCall(PetscFree(owner));
294: PetscCall(PetscFree(start));
295: /* This memory is reused in scatter end for a different purpose*/
296: for (i = 0; i < 2 * size; i++) nprocs[i] = -1;
298: stash->nprocs = nprocs;
299: stash->svalues = svalues;
300: stash->sindices = sindices;
301: stash->rvalues = rvalues;
302: stash->rindices = rindices;
303: stash->nsends = nsends;
304: stash->nrecvs = nreceives;
305: stash->send_waits = send_waits;
306: stash->recv_waits = recv_waits;
307: stash->rmax = nmax;
308: PetscFunctionReturn(PETSC_SUCCESS);
309: }
311: /*
312: VecStashScatterGetMesg_Private - This function waits on the receives posted
313: in the function VecStashScatterBegin_Private() and returns one message at
314: a time to the calling function. If no messages are left, it indicates this
315: by setting flg = 0, else it sets flg = 1.
317: Input Parameters:
318: stash - the stash
320: Output Parameters:
321: nvals - the number of entries in the current message.
322: rows - an array of row indices (or blocked indices) corresponding to the values
323: cols - an array of columnindices (or blocked indices) corresponding to the values
324: vals - the values
325: flg - 0 indicates no more message left, and the current call has no values associated.
326: 1 indicates that the current call successfully received a message, and the
327: other output parameters nvals,rows,cols,vals are set appropriately.
328: */
329: PetscErrorCode VecStashScatterGetMesg_Private(VecStash *stash, PetscMPIInt *nvals, PetscInt **rows, PetscScalar **vals, PetscInt *flg)
330: {
331: PetscMPIInt i = 0; /* dummy value so MPI-Uni doesn't think it is not set */
332: PetscInt *flg_v;
333: PetscInt i1, i2, bs = stash->bs;
334: MPI_Status recv_status;
335: PetscBool match_found = PETSC_FALSE;
337: PetscFunctionBegin;
338: *flg = 0; /* When a message is discovered this is reset to 1 */
339: /* Return if no more messages to process */
340: if (stash->nprocessed == stash->nrecvs) PetscFunctionReturn(PETSC_SUCCESS);
342: flg_v = stash->nprocs;
343: /* If a matching pair of receives are found, process them, and return the data to
344: the calling function. Until then keep receiving messages */
345: while (!match_found) {
346: PetscCallMPI(MPI_Waitany(2 * stash->nrecvs, stash->recv_waits, &i, &recv_status));
347: /* Now pack the received message into a structure which is usable by others */
348: if (i % 2) {
349: PetscCallMPI(MPI_Get_count(&recv_status, MPIU_INT, nvals));
350: flg_v[2 * recv_status.MPI_SOURCE + 1] = i / 2;
351: } else {
352: PetscCallMPI(MPI_Get_count(&recv_status, MPIU_SCALAR, nvals));
353: flg_v[2 * recv_status.MPI_SOURCE] = i / 2;
354: *nvals = *nvals / bs;
355: }
357: /* Check if we have both the messages from this proc */
358: i1 = flg_v[2 * recv_status.MPI_SOURCE];
359: i2 = flg_v[2 * recv_status.MPI_SOURCE + 1];
360: if (i1 != -1 && i2 != -1) {
361: *rows = stash->rindices + i2 * stash->rmax;
362: *vals = stash->rvalues + i1 * bs * stash->rmax;
363: *flg = 1;
364: stash->nprocessed++;
365: match_found = PETSC_TRUE;
366: }
367: }
368: PetscFunctionReturn(PETSC_SUCCESS);
369: }
371: /*
372: * Sort the stash, removing duplicates (combining as appropriate).
373: */
374: PetscErrorCode VecStashSortCompress_Private(VecStash *stash)
375: {
376: PetscInt i, j, bs = stash->bs;
378: PetscFunctionBegin;
379: if (!stash->n) PetscFunctionReturn(PETSC_SUCCESS);
380: if (bs == 1) {
381: PetscCall(PetscSortIntWithScalarArray(stash->n, stash->idx, stash->array));
382: for (i = 1, j = 0; i < stash->n; i++) {
383: if (stash->idx[i] == stash->idx[j]) {
384: switch (stash->insertmode) {
385: case ADD_VALUES:
386: stash->array[j] += stash->array[i];
387: break;
388: case INSERT_VALUES:
389: stash->array[j] = stash->array[i];
390: break;
391: default:
392: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Insert mode not supported 0x%x", stash->insertmode);
393: }
394: } else {
395: j++;
396: stash->idx[j] = stash->idx[i];
397: stash->array[j] = stash->array[i];
398: }
399: }
400: stash->n = j + 1;
401: } else { /* block stash */
402: PetscInt *perm = NULL;
403: PetscScalar *arr;
404: PetscCall(PetscMalloc2(stash->n, &perm, stash->n * bs, &arr));
405: for (i = 0; i < stash->n; i++) perm[i] = i;
406: PetscCall(PetscSortIntWithArray(stash->n, stash->idx, perm));
408: /* Out-of-place copy of arr */
409: PetscCall(PetscMemcpy(arr, stash->array + perm[0] * bs, bs * sizeof(PetscScalar)));
410: for (i = 1, j = 0; i < stash->n; i++) {
411: PetscInt k;
412: if (stash->idx[i] == stash->idx[j]) {
413: switch (stash->insertmode) {
414: case ADD_VALUES:
415: for (k = 0; k < bs; k++) arr[j * bs + k] += stash->array[perm[i] * bs + k];
416: break;
417: case INSERT_VALUES:
418: for (k = 0; k < bs; k++) arr[j * bs + k] = stash->array[perm[i] * bs + k];
419: break;
420: default:
421: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Insert mode not supported 0x%x", stash->insertmode);
422: }
423: } else {
424: j++;
425: stash->idx[j] = stash->idx[i];
426: for (k = 0; k < bs; k++) arr[j * bs + k] = stash->array[perm[i] * bs + k];
427: }
428: }
429: stash->n = j + 1;
430: PetscCall(PetscMemcpy(stash->array, arr, stash->n * bs * sizeof(PetscScalar)));
431: PetscCall(PetscFree2(perm, arr));
432: }
433: PetscFunctionReturn(PETSC_SUCCESS);
434: }
436: PetscErrorCode VecStashGetOwnerList_Private(VecStash *stash, PetscLayout map, PetscMPIInt *nowners, PetscMPIInt **owners)
437: {
438: PetscInt i, bs = stash->bs;
439: PetscMPIInt r;
440: PetscSegBuffer seg;
442: PetscFunctionBegin;
443: PetscCheck(bs == 1 || bs == map->bs, map->comm, PETSC_ERR_PLIB, "Stash block size %" PetscInt_FMT " does not match layout block size %" PetscInt_FMT, bs, map->bs);
444: PetscCall(PetscSegBufferCreate(sizeof(PetscMPIInt), 50, &seg));
445: *nowners = 0;
446: for (i = 0, r = -1; i < stash->n; i++) {
447: if (stash->idx[i] * bs >= map->range[r + 1]) {
448: PetscMPIInt *rank;
449: PetscCall(PetscSegBufferGet(seg, 1, &rank));
450: PetscCall(PetscLayoutFindOwner(map, stash->idx[i] * bs, &r));
451: *rank = r;
452: (*nowners)++;
453: }
454: }
455: PetscCall(PetscSegBufferExtractAlloc(seg, owners));
456: PetscCall(PetscSegBufferDestroy(&seg));
457: PetscFunctionReturn(PETSC_SUCCESS);
458: }