Actual source code: psort.c

petsc-master 2020-08-10
Report Typos and Errors

  2:  #include <petsc/private/petscimpl.h>
  3:  #include <petscis.h>

  5: /* This is the bitonic merge that works on non-power-of-2 sizes found at http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/oddn.htm */
  6: static PetscErrorCode PetscParallelSortInt_Bitonic_Merge(MPI_Comm comm, PetscMPIInt tag, PetscMPIInt rankStart, PetscMPIInt rankEnd, PetscMPIInt rank, PetscMPIInt n, PetscInt keys[], PetscInt buffer[], PetscBool forward)
  7: {
  8:   PetscInt       diff;
  9:   PetscInt       split, mid, partner;

 13:   diff = rankEnd - rankStart;
 14:   if (diff <= 0) return(0);
 15:   if (diff == 1) {
 16:     if (forward) {
 17:       PetscSortInt((PetscInt) n, keys);
 18:     } else {
 19:       PetscSortReverseInt((PetscInt) n, keys);
 20:     }
 21:     return(0);
 22:   }
 23:   split = 1;
 24:   while (2 * split < diff) split *= 2;
 25:   mid = rankStart + split;
 26:   if (rank < mid) {
 27:     partner = rank + split;
 28:   } else {
 29:     partner = rank - split;
 30:   }
 31:   if (partner < rankEnd) {
 32:     PetscMPIInt i;

 34:     MPI_Sendrecv(keys, n, MPIU_INT, partner, tag, buffer, n, MPIU_INT, partner, tag, comm, MPI_STATUS_IGNORE);
 35:     if ((rank < partner) == (forward == PETSC_TRUE)) {
 36:       for (i = 0; i < n; i++) {
 37:         keys[i] = (keys[i] <= buffer[i]) ? keys[i] : buffer[i];
 38:       }
 39:     } else {
 40:       for (i = 0; i < n; i++) {
 41:         keys[i] = (keys[i] > buffer[i]) ? keys[i] : buffer[i];
 42:       }
 43:     }
 44:   }
 45:   /* divide and conquer */
 46:   if (rank < mid) {
 47:     PetscParallelSortInt_Bitonic_Merge(comm, tag, rankStart, mid, rank, n, keys, buffer, forward);
 48:   } else {
 49:     PetscParallelSortInt_Bitonic_Merge(comm, tag, mid, rankEnd, rank, n, keys, buffer, forward);
 50:   }
 51:   return(0);
 52: }

 54: /* This is the bitonic sort that works on non-power-of-2 sizes found at http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/oddn.htm */
 55: static PetscErrorCode PetscParallelSortInt_Bitonic_Recursive(MPI_Comm comm, PetscMPIInt tag, PetscMPIInt rankStart, PetscMPIInt rankEnd, PetscMPIInt rank, PetscMPIInt n, PetscInt keys[], PetscInt buffer[], PetscBool forward)
 56: {
 57:   PetscInt       diff;
 58:   PetscInt       mid;

 62:   diff = rankEnd - rankStart;
 63:   if (diff <= 0) return(0);
 64:   if (diff == 1) {
 65:     if (forward) {
 66:       PetscSortInt(n, keys);
 67:     } else {
 68:       PetscSortReverseInt(n, keys);
 69:     }
 70:     return(0);
 71:   }
 72:   mid = rankStart + diff / 2;
 73:   /* divide and conquer */
 74:   if (rank < mid) {
 75:     PetscParallelSortInt_Bitonic_Recursive(comm, tag, rankStart, mid, rank, n, keys, buffer, (PetscBool) !forward);
 76:   } else {
 77:     PetscParallelSortInt_Bitonic_Recursive(comm, tag, mid, rankEnd, rank, n, keys, buffer, forward);
 78:   }
 79:   /* bitonic merge */
 80:   PetscParallelSortInt_Bitonic_Merge(comm, tag, rankStart, rankEnd, rank, n, keys, buffer, forward);
 81:   return(0);
 82: }

 84: static PetscErrorCode PetscParallelSortInt_Bitonic(MPI_Comm comm, PetscInt n, PetscInt keys[])
 85: {
 86:   PetscMPIInt size, rank, tag, mpin;
 87:   PetscInt       *buffer;

 92:   PetscCommGetNewTag(comm, &tag);
 93:   MPI_Comm_size(comm, &size);
 94:   MPI_Comm_rank(comm, &rank);
 95:   PetscMPIIntCast(n, &mpin);
 96:   PetscMalloc1(n, &buffer);
 97:   PetscParallelSortInt_Bitonic_Recursive(comm, tag, 0, size, rank, mpin, keys, buffer, PETSC_TRUE);
 98:   PetscFree(buffer);
 99:   return(0);
100: }

102: static PetscErrorCode PetscParallelSampleSelect(PetscLayout mapin, PetscLayout mapout, PetscInt keysin[], PetscInt *outpivots[])
103: {
104:   PetscMPIInt    size, rank;
105:   PetscInt       *pivots, *finalpivots, i;
106:   PetscInt       non_empty, my_first, count;
107:   PetscMPIInt    *keys_per, max_keys_per;

111:   MPI_Comm_size(mapin->comm, &size);
112:   MPI_Comm_rank(mapin->comm, &rank);

114:   /* Choose P - 1 pivots that would be ideal for the distribution on this process */
115:   PetscMalloc1(size - 1, &pivots);
116:   for (i = 0; i < size - 1; i++) {
117:     PetscInt index;

119:     if (!mapin->n) {
120:       /* if this rank is empty, put "infinity" in the list.  each process knows how many empty
121:        * processes are in the layout, so those values will be ignored from the end of the sorted
122:        * pivots */
123:       pivots[i] = PETSC_MAX_INT;
124:     } else {
125:       /* match the distribution to the desired output described by mapout by getting the index
126:        * that is approximately the appropriate fraction through the list */
127:       index = ((PetscReal) mapout->range[i + 1]) * ((PetscReal) mapin->n) / ((PetscReal) mapout->N);
128:       index = PetscMin(index, (mapin->n - 1));
129:       index = PetscMax(index, 0);
130:       pivots[i] = keysin[index];
131:     }
132:   }
133:   /* sort the pivots in parallel */
134:   PetscParallelSortInt_Bitonic(mapin->comm, size - 1, pivots);
135:   if (PetscDefined(USE_DEBUG)) {
136:     PetscBool sorted;

138:     PetscParallelSortedInt(mapin->comm, size - 1, pivots, &sorted);
139:     if (!sorted) SETERRQ(mapin->comm, PETSC_ERR_PLIB, "bitonic sort failed");
140:   }

142:   /* if there are Z nonempty processes, we have (P - 1) * Z real pivots, and we want to select
143:    * at indices Z - 1, 2*Z - 1, ... (P - 1) * Z - 1 */
144:   non_empty = size;
145:   for (i = 0; i < size; i++) if (mapout->range[i] == mapout->range[i+1]) non_empty--;
146:   PetscCalloc1(size + 1, &keys_per);
147:   my_first = -1;
148:   if (non_empty) {
149:     for (i = 0; i < size - 1; i++) {
150:       size_t sample = (i + 1) * non_empty - 1;
151:       size_t sample_rank = sample / (size - 1);

153:       keys_per[sample_rank]++;
154:       if (my_first < 0 && (PetscMPIInt) sample_rank == rank) {
155:         my_first = (PetscInt) (sample - sample_rank * (size - 1));
156:       }
157:     }
158:   }
159:   for (i = 0, max_keys_per = 0; i < size; i++) max_keys_per = PetscMax(keys_per[i], max_keys_per);
160:   PetscMalloc1(size * max_keys_per, &finalpivots);
161:   /* now that we know how many pivots each process will provide, gather the selected pivots at the start of the array
162:    * and allgather them */
163:   for (i = 0; i < keys_per[rank]; i++) pivots[i] = pivots[my_first + i * non_empty];
164:   for (i = keys_per[rank]; i < max_keys_per; i++) pivots[i] = PETSC_MAX_INT;
165:   MPI_Allgather(pivots, max_keys_per, MPIU_INT, finalpivots, max_keys_per, MPIU_INT, mapin->comm);
166:   for (i = 0, count = 0; i < size; i++) {
167:     PetscInt j;

169:     for (j = 0; j < max_keys_per; j++) {
170:       if (j < keys_per[i]) {
171:         finalpivots[count++] = finalpivots[i * max_keys_per + j];
172:       }
173:     }
174:   }
175:   *outpivots = finalpivots;
176:   PetscFree(keys_per);
177:   PetscFree(pivots);
178:   return(0);
179: }

181: static PetscErrorCode PetscParallelRedistribute(PetscLayout map, PetscInt n, PetscInt arrayin[], PetscInt arrayout[])
182: {
183:   PetscMPIInt    size, rank;
184:   PetscInt       myOffset, nextOffset;
185:   PetscInt       i;
186:   PetscMPIInt    total, filled;
187:   PetscMPIInt    sender, nfirst, nsecond;
188:   PetscMPIInt    firsttag, secondtag;
189:   MPI_Request    firstreqrcv;
190:   MPI_Request    *firstreqs;
191:   MPI_Request    *secondreqs;
192:   MPI_Status     firststatus;


197:   MPI_Comm_size(map->comm, &size);
198:   MPI_Comm_rank(map->comm, &rank);
199:   PetscCommGetNewTag(map->comm, &firsttag);
200:   PetscCommGetNewTag(map->comm, &secondtag);
201:   myOffset = 0;
202:   PetscMalloc2(size, &firstreqs, size, &secondreqs);
203:   MPI_Scan(&n, &nextOffset, 1, MPIU_INT, MPI_SUM, map->comm);
204:   myOffset = nextOffset - n;
205:   total = map->range[rank + 1] - map->range[rank];
206:   if (total > 0) {
207:     MPI_Irecv(arrayout, total, MPIU_INT, MPI_ANY_SOURCE, firsttag, map->comm, &firstreqrcv);
208:   }
209:   for (i = 0, nsecond = 0, nfirst = 0; i < size; i++) {
210:     PetscInt itotal;
211:     PetscInt overlap, oStart, oEnd;

213:     itotal = map->range[i + 1] - map->range[i];
214:     if (itotal <= 0) continue;
215:     oStart = PetscMax(myOffset, map->range[i]);
216:     oEnd   = PetscMin(nextOffset, map->range[i + 1]);
217:     overlap = oEnd - oStart;
218:     if (map->range[i] >= myOffset && map->range[i] < nextOffset) {
219:       /* send first message */
220:       MPI_Isend(&arrayin[map->range[i] - myOffset], overlap, MPIU_INT, i, firsttag, map->comm, &(firstreqs[nfirst++]));
221:     } else if (overlap > 0) {
222:       /* send second message */
223:       MPI_Isend(&arrayin[oStart - myOffset], overlap, MPIU_INT, i, secondtag, map->comm, &(secondreqs[nsecond++]));
224:     } else if (overlap == 0 && myOffset > map->range[i] && myOffset < map->range[i + 1]) {
225:       /* send empty second message */
226:       MPI_Isend(&arrayin[oStart - myOffset], 0, MPIU_INT, i, secondtag, map->comm, &(secondreqs[nsecond++]));
227:     }
228:   }
229:   filled = 0;
230:   sender = -1;
231:   if (total > 0) {
232:     MPI_Wait(&firstreqrcv, &firststatus);
233:     sender = firststatus.MPI_SOURCE;
234:     MPI_Get_count(&firststatus, MPIU_INT, &filled);
235:   }
236:   while (filled < total) {
237:     PetscMPIInt mfilled;
238:     MPI_Status stat;

240:     sender++;
241:     MPI_Recv(&arrayout[filled], total - filled, MPIU_INT, sender, secondtag, map->comm, &stat);
242:     MPI_Get_count(&stat, MPIU_INT, &mfilled);
243:     filled += mfilled;
244:   }
245:   MPI_Waitall(nfirst, firstreqs, MPI_STATUSES_IGNORE);
246:   MPI_Waitall(nsecond, secondreqs, MPI_STATUSES_IGNORE);
247:   PetscFree2(firstreqs, secondreqs);
248:   return(0);
249: }

251: static PetscErrorCode PetscParallelSortInt_Samplesort(PetscLayout mapin, PetscLayout mapout, PetscInt keysin[], PetscInt keysout[])
252: {
253:   PetscMPIInt    size, rank;
254:   PetscInt       *pivots = NULL, *buffer;
255:   PetscInt       i, j;
256:   PetscMPIInt    *keys_per_snd, *keys_per_rcv, *offsets_snd, *offsets_rcv, nrecv;

260:   MPI_Comm_size(mapin->comm, &size);
261:   MPI_Comm_rank(mapin->comm, &rank);
262:   PetscMalloc4(size, &keys_per_snd, size, &keys_per_rcv, size + 1, &offsets_snd, size + 1, &offsets_rcv);
263:   /* sort locally */
264:   PetscSortInt(mapin->n, keysin);
265:   /* get P - 1 pivots */
266:   PetscParallelSampleSelect(mapin, mapout, keysin, &pivots);
267:   /* determine which entries in the sorted array go to which other processes based on the pivots */
268:   for (i = 0, j = 0; i < size - 1; i++) {
269:     PetscInt prev = j;

271:     while ((j < mapin->n) && (keysin[j] < pivots[i])) j++;
272:     offsets_snd[i] = prev;
273:     keys_per_snd[i] = j - prev;
274:   }
275:   offsets_snd[size - 1] = j;
276:   keys_per_snd[size - 1] = mapin->n - j;
277:   offsets_snd[size] = mapin->n;
278:   /* get the incoming sizes */
279:   MPI_Alltoall(keys_per_snd, 1, MPI_INT, keys_per_rcv, 1, MPI_INT, mapin->comm);
280:   offsets_rcv[0] = 0;
281:   for (i = 0; i < size; i++) {
282:     offsets_rcv[i+1] = offsets_rcv[i] + keys_per_rcv[i];
283:   }
284:   nrecv = offsets_rcv[size];
285:   /* all to all exchange */
286:   PetscMalloc1(nrecv, &buffer);
287:   MPI_Alltoallv(keysin, keys_per_snd, offsets_snd, MPIU_INT, buffer, keys_per_rcv, offsets_rcv, MPIU_INT, mapin->comm);
288:   PetscFree(pivots);
289:   PetscFree4(keys_per_snd, keys_per_rcv, offsets_snd, offsets_rcv);

291:   /* local sort */
292:   PetscSortInt(nrecv, buffer);
293: #if defined(PETSC_USE_DEBUG)
294:   {
295:     PetscBool sorted;

297:     PetscParallelSortedInt(mapin->comm, nrecv, buffer, &sorted);
298:     if (!sorted) SETERRQ(mapin->comm, PETSC_ERR_PLIB, "samplesort (pre-redistribute) sort failed");
299:   }
300: #endif

302:   /* redistribute to the desired order */
303:   PetscParallelRedistribute(mapout, nrecv, buffer, keysout);
304:   PetscFree(buffer);
305:   return(0);
306: }

308: /*@
309:   PetscParallelSortInt - Globally sort a distributed array of integers

311:   Collective

313:   Input Parameters:
314: + mapin - PetscLayout describing the distribution of the input keys
315: . mapout - PetscLayout describing the distribution of the output keys
316: - keysin - the pre-sorted array of integers

318:   Output Parameter:
319: . keysout - the array in which the sorted integers will be stored.  If mapin == mapout, then keysin may be equal to keysout.

321:   Level: developer

323:   Notes: This implements a distributed samplesort, which, with local array sizes n_in and n_out, global size N, and global number of processes P, does:

325:   - sorts locally
326:   - chooses pivots by sorting (in parallel) (P-1) pivot suggestions from each process using bitonic sort and allgathering a subset of (P-1) of those
327:   - using to the pivots to repartition the keys by all-to-all exchange
328:   - sorting the repartitioned keys locally (the array is now globally sorted, but does not match the mapout layout)
329:   - redistributing to match the mapout layout

331:   If keysin != keysout, then keysin will not be changed during PetscParallelSortInt.

333: .seealso: PetscParallelSortedInt()
334: @*/
335: PetscErrorCode PetscParallelSortInt(PetscLayout mapin, PetscLayout mapout, PetscInt keysin[], PetscInt keysout[])
336: {
337:   PetscMPIInt    size;
338:   PetscMPIInt    result;
339:   PetscInt       *keysincopy = NULL;

345:   MPI_Comm_compare(mapin->comm, mapout->comm, &result);
346:   if (result != MPI_IDENT && result != MPI_CONGRUENT) SETERRQ(mapin->comm, PETSC_ERR_ARG_NOTSAMECOMM, "layouts are not on the same communicator");
347:   PetscLayoutSetUp(mapin);
348:   PetscLayoutSetUp(mapout);
351:   if (mapin->N != mapout->N) SETERRQ2(mapin->comm, PETSC_ERR_ARG_SIZ, "Input and output layouts have different global sizes (%D != %D)", mapin->N, mapout->N);
352:   MPI_Comm_size(mapin->comm, &size);
353:   if (size == 1) {
354:     if (keysout != keysin) {
355:       PetscMemcpy(keysout, keysin, mapin->n * sizeof(PetscInt));
356:     }
357:     PetscSortInt(mapout->n, keysout);
358:     if (size == 1) return(0);
359:   }
360:   if (keysout != keysin) {
361:     PetscMalloc1(mapin->n, &keysincopy);
362:     PetscMemcpy(keysincopy, keysin, mapin->n * sizeof(PetscInt));
363:     keysin = keysincopy;
364:   }
365:   PetscParallelSortInt_Samplesort(mapin, mapout, keysin, keysout);
366: #if defined(PETSC_USE_DEBUG)
367:   {
368:     PetscBool sorted;

370:     PetscParallelSortedInt(mapout->comm, mapout->n, keysout, &sorted);
371:     if (!sorted) SETERRQ(mapout->comm, PETSC_ERR_PLIB, "samplesort sort failed");
372:   }
373: #endif
374:   PetscFree(keysincopy);
375:   return(0);
376: }