Actual source code: plexdistribute.c

petsc-master 2020-02-18
Report Typos and Errors
  1:  #include <petsc/private/dmpleximpl.h>
  2:  #include <petsc/private/dmlabelimpl.h>

  4: /*@C
  5:   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback

  7:   Input Parameters:
  8: + dm      - The DM object
  9: . user    - The user callback, may be NULL (to clear the callback)
 10: - ctx     - context for callback evaluation, may be NULL

 12:   Level: advanced

 14:   Notes:
 15:      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.

 17:      Any setting here overrides other configuration of DMPlex adjacency determination.

 19: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
 20: @*/
 21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
 22: {
 23:   DM_Plex *mesh = (DM_Plex *)dm->data;

 27:   mesh->useradjacency = user;
 28:   mesh->useradjacencyctx = ctx;
 29:   return(0);
 30: }

 32: /*@C
 33:   DMPlexGetAdjacencyUser - get the user-defined adjacency callback

 35:   Input Parameter:
 36: . dm      - The DM object

 38:   Output Parameters:
 39: - user    - The user callback
 40: - ctx     - context for callback evaluation

 42:   Level: advanced

 44: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
 45: @*/
 46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
 47: {
 48:   DM_Plex *mesh = (DM_Plex *)dm->data;

 52:   if (user) *user = mesh->useradjacency;
 53:   if (ctx) *ctx = mesh->useradjacencyctx;
 54:   return(0);
 55: }

 57: /*@
 58:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

 60:   Input Parameters:
 61: + dm      - The DM object
 62: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 64:   Level: intermediate

 66: .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 67: @*/
 68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
 69: {
 70:   DM_Plex *mesh = (DM_Plex *) dm->data;

 74:   mesh->useAnchors = useAnchors;
 75:   return(0);
 76: }

 78: /*@
 79:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

 81:   Input Parameter:
 82: . dm      - The DM object

 84:   Output Parameter:
 85: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 87:   Level: intermediate

 89: .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 90: @*/
 91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
 92: {
 93:   DM_Plex *mesh = (DM_Plex *) dm->data;

 98:   *useAnchors = mesh->useAnchors;
 99:   return(0);
100: }

102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104:   const PetscInt *cone = NULL;
105:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106:   PetscErrorCode  ierr;

109:   DMPlexGetConeSize(dm, p, &coneSize);
110:   DMPlexGetCone(dm, p, &cone);
111:   for (c = 0; c <= coneSize; ++c) {
112:     const PetscInt  point   = !c ? p : cone[c-1];
113:     const PetscInt *support = NULL;
114:     PetscInt        supportSize, s, q;

116:     DMPlexGetSupportSize(dm, point, &supportSize);
117:     DMPlexGetSupport(dm, point, &support);
118:     for (s = 0; s < supportSize; ++s) {
119:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
120:         if (support[s] == adj[q]) break;
121:       }
122:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
123:     }
124:   }
125:   *adjSize = numAdj;
126:   return(0);
127: }

129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131:   const PetscInt *support = NULL;
132:   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
133:   PetscErrorCode  ierr;

136:   DMPlexGetSupportSize(dm, p, &supportSize);
137:   DMPlexGetSupport(dm, p, &support);
138:   for (s = 0; s <= supportSize; ++s) {
139:     const PetscInt  point = !s ? p : support[s-1];
140:     const PetscInt *cone  = NULL;
141:     PetscInt        coneSize, c, q;

143:     DMPlexGetConeSize(dm, point, &coneSize);
144:     DMPlexGetCone(dm, point, &cone);
145:     for (c = 0; c < coneSize; ++c) {
146:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
147:         if (cone[c] == adj[q]) break;
148:       }
149:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
150:     }
151:   }
152:   *adjSize = numAdj;
153:   return(0);
154: }

156: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
157: {
158:   PetscInt      *star = NULL;
159:   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;

163:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
164:   for (s = 0; s < starSize*2; s += 2) {
165:     const PetscInt *closure = NULL;
166:     PetscInt        closureSize, c, q;

168:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
169:     for (c = 0; c < closureSize*2; c += 2) {
170:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
171:         if (closure[c] == adj[q]) break;
172:       }
173:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
174:     }
175:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
176:   }
177:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
178:   *adjSize = numAdj;
179:   return(0);
180: }

182: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
183: {
184:   static PetscInt asiz = 0;
185:   PetscInt maxAnchors = 1;
186:   PetscInt aStart = -1, aEnd = -1;
187:   PetscInt maxAdjSize;
188:   PetscSection aSec = NULL;
189:   IS aIS = NULL;
190:   const PetscInt *anchors;
191:   DM_Plex *mesh = (DM_Plex *)dm->data;
192:   PetscErrorCode  ierr;

195:   if (useAnchors) {
196:     DMPlexGetAnchors(dm,&aSec,&aIS);
197:     if (aSec) {
198:       PetscSectionGetMaxDof(aSec,&maxAnchors);
199:       maxAnchors = PetscMax(1,maxAnchors);
200:       PetscSectionGetChart(aSec,&aStart,&aEnd);
201:       ISGetIndices(aIS,&anchors);
202:     }
203:   }
204:   if (!*adj) {
205:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

207:     DMPlexGetChart(dm, &pStart,&pEnd);
208:     DMPlexGetDepth(dm, &depth);
209:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
210:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
211:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
212:     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
213:     asiz *= maxAnchors;
214:     asiz  = PetscMin(asiz,pEnd-pStart);
215:     PetscMalloc1(asiz,adj);
216:   }
217:   if (*adjSize < 0) *adjSize = asiz;
218:   maxAdjSize = *adjSize;
219:   if (mesh->useradjacency) {
220:     (*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx);
221:   } else if (useTransitiveClosure) {
222:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
223:   } else if (useCone) {
224:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
225:   } else {
226:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
227:   }
228:   if (useAnchors && aSec) {
229:     PetscInt origSize = *adjSize;
230:     PetscInt numAdj = origSize;
231:     PetscInt i = 0, j;
232:     PetscInt *orig = *adj;

234:     while (i < origSize) {
235:       PetscInt p = orig[i];
236:       PetscInt aDof = 0;

238:       if (p >= aStart && p < aEnd) {
239:         PetscSectionGetDof(aSec,p,&aDof);
240:       }
241:       if (aDof) {
242:         PetscInt aOff;
243:         PetscInt s, q;

245:         for (j = i + 1; j < numAdj; j++) {
246:           orig[j - 1] = orig[j];
247:         }
248:         origSize--;
249:         numAdj--;
250:         PetscSectionGetOffset(aSec,p,&aOff);
251:         for (s = 0; s < aDof; ++s) {
252:           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
253:             if (anchors[aOff+s] == orig[q]) break;
254:           }
255:           if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
256:         }
257:       }
258:       else {
259:         i++;
260:       }
261:     }
262:     *adjSize = numAdj;
263:     ISRestoreIndices(aIS,&anchors);
264:   }
265:   return(0);
266: }

268: /*@
269:   DMPlexGetAdjacency - Return all points adjacent to the given point

271:   Input Parameters:
272: + dm - The DM object
273: . p  - The point
274: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
275: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize

277:   Output Parameters:
278: + adjSize - The number of adjacent points
279: - adj - The adjacent points

281:   Level: advanced

283:   Notes:
284:     The user must PetscFree the adj array if it was not passed in.

286: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
287: @*/
288: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
289: {
290:   PetscBool      useCone, useClosure, useAnchors;

297:   DMGetBasicAdjacency(dm, &useCone, &useClosure);
298:   DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
299:   DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
300:   return(0);
301: }

303: /*@
304:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

306:   Collective on dm

308:   Input Parameters:
309: + dm      - The DM
310: - sfPoint - The PetscSF which encodes point connectivity

312:   Output Parameters:
313: + processRanks - A list of process neighbors, or NULL
314: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

316:   Level: developer

318: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
319: @*/
320: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
321: {
322:   const PetscSFNode *remotePoints;
323:   PetscInt          *localPointsNew;
324:   PetscSFNode       *remotePointsNew;
325:   const PetscInt    *nranks;
326:   PetscInt          *ranksNew;
327:   PetscBT            neighbors;
328:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
329:   PetscMPIInt        size, proc, rank;
330:   PetscErrorCode     ierr;

337:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
338:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
339:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
340:   PetscBTCreate(size, &neighbors);
341:   PetscBTMemzero(size, neighbors);
342:   /* Compute root-to-leaf process connectivity */
343:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
344:   ISGetIndices(rootRanks, &nranks);
345:   for (p = pStart; p < pEnd; ++p) {
346:     PetscInt ndof, noff, n;

348:     PetscSectionGetDof(rootRankSection, p, &ndof);
349:     PetscSectionGetOffset(rootRankSection, p, &noff);
350:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
351:   }
352:   ISRestoreIndices(rootRanks, &nranks);
353:   /* Compute leaf-to-neighbor process connectivity */
354:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
355:   ISGetIndices(leafRanks, &nranks);
356:   for (p = pStart; p < pEnd; ++p) {
357:     PetscInt ndof, noff, n;

359:     PetscSectionGetDof(leafRankSection, p, &ndof);
360:     PetscSectionGetOffset(leafRankSection, p, &noff);
361:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
362:   }
363:   ISRestoreIndices(leafRanks, &nranks);
364:   /* Compute leaf-to-root process connectivity */
365:   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
366:   /* Calculate edges */
367:   PetscBTClear(neighbors, rank);
368:   for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
369:   PetscMalloc1(numNeighbors, &ranksNew);
370:   PetscMalloc1(numNeighbors, &localPointsNew);
371:   PetscMalloc1(numNeighbors, &remotePointsNew);
372:   for(proc = 0, n = 0; proc < size; ++proc) {
373:     if (PetscBTLookup(neighbors, proc)) {
374:       ranksNew[n]              = proc;
375:       localPointsNew[n]        = proc;
376:       remotePointsNew[n].index = rank;
377:       remotePointsNew[n].rank  = proc;
378:       ++n;
379:     }
380:   }
381:   PetscBTDestroy(&neighbors);
382:   if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
383:   else              {PetscFree(ranksNew);}
384:   if (sfProcess) {
385:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
386:     PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
387:     PetscSFSetFromOptions(*sfProcess);
388:     PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
389:   }
390:   return(0);
391: }

393: /*@
394:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

396:   Collective on dm

398:   Input Parameter:
399: . dm - The DM

401:   Output Parameters:
402: + rootSection - The number of leaves for a given root point
403: . rootrank    - The rank of each edge into the root point
404: . leafSection - The number of processes sharing a given leaf point
405: - leafrank    - The rank of each process sharing a leaf point

407:   Level: developer

409: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap()
410: @*/
411: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
412: {
413:   MPI_Comm        comm;
414:   PetscSF         sfPoint;
415:   const PetscInt *rootdegree;
416:   PetscInt       *myrank, *remoterank;
417:   PetscInt        pStart, pEnd, p, nedges;
418:   PetscMPIInt     rank;
419:   PetscErrorCode  ierr;

422:   PetscObjectGetComm((PetscObject) dm, &comm);
423:   MPI_Comm_rank(comm, &rank);
424:   DMPlexGetChart(dm, &pStart, &pEnd);
425:   DMGetPointSF(dm, &sfPoint);
426:   /* Compute number of leaves for each root */
427:   PetscObjectSetName((PetscObject) rootSection, "Root Section");
428:   PetscSectionSetChart(rootSection, pStart, pEnd);
429:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
430:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
431:   for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
432:   PetscSectionSetUp(rootSection);
433:   /* Gather rank of each leaf to root */
434:   PetscSectionGetStorageSize(rootSection, &nedges);
435:   PetscMalloc1(pEnd-pStart, &myrank);
436:   PetscMalloc1(nedges,  &remoterank);
437:   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
438:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
439:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
440:   PetscFree(myrank);
441:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
442:   /* Distribute remote ranks to leaves */
443:   PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
444:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
445:   return(0);
446: }

448: /*@C
449:   DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.

451:   Collective on dm

453:   Input Parameters:
454: + dm          - The DM
455: . levels      - Number of overlap levels
456: . rootSection - The number of leaves for a given root point
457: . rootrank    - The rank of each edge into the root point
458: . leafSection - The number of processes sharing a given leaf point
459: - leafrank    - The rank of each process sharing a leaf point

461:   Output Parameter:
462: . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

464:   Level: developer

466: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
467: @*/
468: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
469: {
470:   MPI_Comm           comm;
471:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
472:   PetscSF            sfPoint;
473:   const PetscSFNode *remote;
474:   const PetscInt    *local;
475:   const PetscInt    *nrank, *rrank;
476:   PetscInt          *adj = NULL;
477:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
478:   PetscMPIInt        rank, size;
479:   PetscBool          flg;
480:   PetscErrorCode     ierr;

483:   *ovLabel = NULL;
484:   PetscObjectGetComm((PetscObject) dm, &comm);
485:   MPI_Comm_size(comm, &size);
486:   MPI_Comm_rank(comm, &rank);
487:   if (size == 1) return(0);
488:   DMGetPointSF(dm, &sfPoint);
489:   DMPlexGetChart(dm, &pStart, &pEnd);
490:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
491:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
492:   DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
493:   /* Handle leaves: shared with the root point */
494:   for (l = 0; l < nleaves; ++l) {
495:     PetscInt adjSize = PETSC_DETERMINE, a;

497:     DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
498:     for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
499:   }
500:   ISGetIndices(rootrank, &rrank);
501:   ISGetIndices(leafrank, &nrank);
502:   /* Handle roots */
503:   for (p = pStart; p < pEnd; ++p) {
504:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

506:     if ((p >= sStart) && (p < sEnd)) {
507:       /* Some leaves share a root with other leaves on different processes */
508:       PetscSectionGetDof(leafSection, p, &neighbors);
509:       if (neighbors) {
510:         PetscSectionGetOffset(leafSection, p, &noff);
511:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
512:         for (n = 0; n < neighbors; ++n) {
513:           const PetscInt remoteRank = nrank[noff+n];

515:           if (remoteRank == rank) continue;
516:           for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
517:         }
518:       }
519:     }
520:     /* Roots are shared with leaves */
521:     PetscSectionGetDof(rootSection, p, &neighbors);
522:     if (!neighbors) continue;
523:     PetscSectionGetOffset(rootSection, p, &noff);
524:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
525:     for (n = 0; n < neighbors; ++n) {
526:       const PetscInt remoteRank = rrank[noff+n];

528:       if (remoteRank == rank) continue;
529:       for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
530:     }
531:   }
532:   PetscFree(adj);
533:   ISRestoreIndices(rootrank, &rrank);
534:   ISRestoreIndices(leafrank, &nrank);
535:   /* Add additional overlap levels */
536:   for (l = 1; l < levels; l++) {
537:     /* Propagate point donations over SF to capture remote connections */
538:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
539:     /* Add next level of point donations to the label */
540:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
541:   }
542:   /* We require the closure in the overlap */
543:   DMPlexPartitionLabelClosure(dm, ovAdjByRank);
544:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
545:   if (flg) {
546:     PetscViewer viewer;
547:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
548:     DMLabelView(ovAdjByRank, viewer);
549:   }
550:   /* Invert sender to receiver label */
551:   DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
552:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
553:   /* Add owned points, except for shared local points */
554:   for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
555:   for (l = 0; l < nleaves; ++l) {
556:     DMLabelClearValue(*ovLabel, local[l], rank);
557:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
558:   }
559:   /* Clean up */
560:   DMLabelDestroy(&ovAdjByRank);
561:   return(0);
562: }

564: /*@C
565:   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF

567:   Collective on dm

569:   Input Parameters:
570: + dm          - The DM
571: - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes

573:   Output Parameter:
574: . migrationSF - An SF that maps original points in old locations to points in new locations

576:   Level: developer

578: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute()
579: @*/
580: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
581: {
582:   MPI_Comm           comm;
583:   PetscMPIInt        rank, size;
584:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
585:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
586:   PetscInt          *depthRecv, *depthShift, *depthIdx;
587:   PetscSFNode       *iremote;
588:   PetscSF            pointSF;
589:   const PetscInt    *sharedLocal;
590:   const PetscSFNode *overlapRemote, *sharedRemote;
591:   PetscErrorCode     ierr;

595:   PetscObjectGetComm((PetscObject)dm, &comm);
596:   MPI_Comm_rank(comm, &rank);
597:   MPI_Comm_size(comm, &size);
598:   DMGetDimension(dm, &dim);

600:   /* Before building the migration SF we need to know the new stratum offsets */
601:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
602:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
603:   for (d=0; d<dim+1; d++) {
604:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
605:     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
606:   }
607:   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
608:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
609:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);

611:   /* Count recevied points in each stratum and compute the internal strata shift */
612:   PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
613:   for (d=0; d<dim+1; d++) depthRecv[d]=0;
614:   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
615:   depthShift[dim] = 0;
616:   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
617:   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
618:   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
619:   for (d=0; d<dim+1; d++) {
620:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
621:     depthIdx[d] = pStart + depthShift[d];
622:   }

624:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
625:   DMPlexGetChart(dm, &pStart, &pEnd);
626:   newLeaves = pEnd - pStart + nleaves;
627:   PetscMalloc1(newLeaves, &ilocal);
628:   PetscMalloc1(newLeaves, &iremote);
629:   /* First map local points to themselves */
630:   for (d=0; d<dim+1; d++) {
631:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
632:     for (p=pStart; p<pEnd; p++) {
633:       point = p + depthShift[d];
634:       ilocal[point] = point;
635:       iremote[point].index = p;
636:       iremote[point].rank = rank;
637:       depthIdx[d]++;
638:     }
639:   }

641:   /* Add in the remote roots for currently shared points */
642:   DMGetPointSF(dm, &pointSF);
643:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
644:   for (d=0; d<dim+1; d++) {
645:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
646:     for (p=0; p<numSharedPoints; p++) {
647:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
648:         point = sharedLocal[p] + depthShift[d];
649:         iremote[point].index = sharedRemote[p].index;
650:         iremote[point].rank = sharedRemote[p].rank;
651:       }
652:     }
653:   }

655:   /* Now add the incoming overlap points */
656:   for (p=0; p<nleaves; p++) {
657:     point = depthIdx[remoteDepths[p]];
658:     ilocal[point] = point;
659:     iremote[point].index = overlapRemote[p].index;
660:     iremote[point].rank = overlapRemote[p].rank;
661:     depthIdx[remoteDepths[p]]++;
662:   }
663:   PetscFree2(pointDepths,remoteDepths);

665:   PetscSFCreate(comm, migrationSF);
666:   PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
667:   PetscSFSetFromOptions(*migrationSF);
668:   DMPlexGetChart(dm, &pStart, &pEnd);
669:   PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

671:   PetscFree3(depthRecv, depthShift, depthIdx);
672:   return(0);
673: }

675: /*@
676:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

678:   Input Parameters:
679: + dm          - The DM
680: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

682:   Output Parameter:
683: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

685:   Level: developer

687: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
688: @*/
689: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
690: {
691:   MPI_Comm           comm;
692:   PetscMPIInt        rank, size;
693:   PetscInt           d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
694:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
695:   PetscInt          *depthRecv, *depthShift, *depthIdx;
696:   PetscInt           hybEnd[4];
697:   const PetscSFNode *iremote;
698:   PetscErrorCode     ierr;

702:   PetscObjectGetComm((PetscObject) dm, &comm);
703:   MPI_Comm_rank(comm, &rank);
704:   MPI_Comm_size(comm, &size);
705:   DMPlexGetDepth(dm, &ldepth);
706:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
707:   if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
708:   PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0);

710:   /* Before building the migration SF we need to know the new stratum offsets */
711:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
712:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
713:   DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[PetscMax(depth-1,0)],&hybEnd[1],&hybEnd[0]);
714:   for (d = 0; d < depth+1; ++d) {
715:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
716:     for (p = pStart; p < pEnd; ++p) {
717:       if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */
718:         pointDepths[p] = 2 * d;
719:       } else {
720:         pointDepths[p] = 2 * d + 1;
721:       }
722:     }
723:   }
724:   for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
725:   PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
726:   PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
727:   /* Count received points in each stratum and compute the internal strata shift */
728:   PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);
729:   for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0;
730:   for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
731:   depthShift[2*depth+1] = 0;
732:   for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1];
733:   for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth];
734:   depthShift[0] += depthRecv[1];
735:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1];
736:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0];
737:   for (d = 2 * depth-1; d > 2; --d) {
738:     PetscInt e;

740:     for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d];
741:   }
742:   for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;}
743:   /* Derive a new local permutation based on stratified indices */
744:   PetscMalloc1(nleaves, &ilocal);
745:   for (p = 0; p < nleaves; ++p) {
746:     const PetscInt dep = remoteDepths[p];

748:     ilocal[p] = depthShift[dep] + depthIdx[dep];
749:     depthIdx[dep]++;
750:   }
751:   PetscSFCreate(comm, migrationSF);
752:   PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
753:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
754:   PetscFree2(pointDepths,remoteDepths);
755:   PetscFree3(depthRecv, depthShift, depthIdx);
756:   PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0);
757:   return(0);
758: }

760: /*@
761:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

763:   Collective on dm

765:   Input Parameters:
766: + dm - The DMPlex object
767: . pointSF - The PetscSF describing the communication pattern
768: . originalSection - The PetscSection for existing data layout
769: - originalVec - The existing data in a local vector

771:   Output Parameters:
772: + newSection - The PetscSF describing the new data layout
773: - newVec - The new data in a local vector

775:   Level: developer

777: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
778: @*/
779: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
780: {
781:   PetscSF        fieldSF;
782:   PetscInt      *remoteOffsets, fieldSize;
783:   PetscScalar   *originalValues, *newValues;

787:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
788:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

790:   PetscSectionGetStorageSize(newSection, &fieldSize);
791:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
792:   VecSetType(newVec,dm->vectype);

794:   VecGetArray(originalVec, &originalValues);
795:   VecGetArray(newVec, &newValues);
796:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
797:   PetscFree(remoteOffsets);
798:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
799:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
800:   PetscSFDestroy(&fieldSF);
801:   VecRestoreArray(newVec, &newValues);
802:   VecRestoreArray(originalVec, &originalValues);
803:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
804:   return(0);
805: }

807: /*@
808:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

810:   Collective on dm

812:   Input Parameters:
813: + dm - The DMPlex object
814: . pointSF - The PetscSF describing the communication pattern
815: . originalSection - The PetscSection for existing data layout
816: - originalIS - The existing data

818:   Output Parameters:
819: + newSection - The PetscSF describing the new data layout
820: - newIS - The new data

822:   Level: developer

824: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
825: @*/
826: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
827: {
828:   PetscSF         fieldSF;
829:   PetscInt       *newValues, *remoteOffsets, fieldSize;
830:   const PetscInt *originalValues;
831:   PetscErrorCode  ierr;

834:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
835:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

837:   PetscSectionGetStorageSize(newSection, &fieldSize);
838:   PetscMalloc1(fieldSize, &newValues);

840:   ISGetIndices(originalIS, &originalValues);
841:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
842:   PetscFree(remoteOffsets);
843:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
844:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
845:   PetscSFDestroy(&fieldSF);
846:   ISRestoreIndices(originalIS, &originalValues);
847:   ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
848:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
849:   return(0);
850: }

852: /*@
853:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

855:   Collective on dm

857:   Input Parameters:
858: + dm - The DMPlex object
859: . pointSF - The PetscSF describing the communication pattern
860: . originalSection - The PetscSection for existing data layout
861: . datatype - The type of data
862: - originalData - The existing data

864:   Output Parameters:
865: + newSection - The PetscSection describing the new data layout
866: - newData - The new data

868:   Level: developer

870: .seealso: DMPlexDistribute(), DMPlexDistributeField()
871: @*/
872: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
873: {
874:   PetscSF        fieldSF;
875:   PetscInt      *remoteOffsets, fieldSize;
876:   PetscMPIInt    dataSize;

880:   PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
881:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

883:   PetscSectionGetStorageSize(newSection, &fieldSize);
884:   MPI_Type_size(datatype, &dataSize);
885:   PetscMalloc(fieldSize * dataSize, newData);

887:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
888:   PetscFree(remoteOffsets);
889:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
890:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
891:   PetscSFDestroy(&fieldSF);
892:   PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
893:   return(0);
894: }

896: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
897: {
898:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
899:   MPI_Comm               comm;
900:   PetscSF                coneSF;
901:   PetscSection           originalConeSection, newConeSection;
902:   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
903:   PetscBool              flg;
904:   PetscErrorCode         ierr;

909:   PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
910:   /* Distribute cone section */
911:   PetscObjectGetComm((PetscObject)dm, &comm);
912:   DMPlexGetConeSection(dm, &originalConeSection);
913:   DMPlexGetConeSection(dmParallel, &newConeSection);
914:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
915:   DMSetUp(dmParallel);
916:   {
917:     PetscInt pStart, pEnd, p;

919:     PetscSectionGetChart(newConeSection, &pStart, &pEnd);
920:     for (p = pStart; p < pEnd; ++p) {
921:       PetscInt coneSize;
922:       PetscSectionGetDof(newConeSection, p, &coneSize);
923:       pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
924:     }
925:   }
926:   /* Communicate and renumber cones */
927:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
928:   PetscFree(remoteOffsets);
929:   DMPlexGetCones(dm, &cones);
930:   if (original) {
931:     PetscInt numCones;

933:     PetscSectionGetStorageSize(originalConeSection,&numCones);
934:     PetscMalloc1(numCones,&globCones);
935:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
936:   } else {
937:     globCones = cones;
938:   }
939:   DMPlexGetCones(dmParallel, &newCones);
940:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
941:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
942:   if (original) {
943:     PetscFree(globCones);
944:   }
945:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
946:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
947: #if defined(PETSC_USE_DEBUG)
948:   {
949:     PetscInt  p;
950:     PetscBool valid = PETSC_TRUE;
951:     for (p = 0; p < newConesSize; ++p) {
952:       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);}
953:     }
954:     if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
955:   }
956: #endif
957:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
958:   if (flg) {
959:     PetscPrintf(comm, "Serial Cone Section:\n");
960:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm));
961:     PetscPrintf(comm, "Parallel Cone Section:\n");
962:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm));
963:     PetscSFView(coneSF, NULL);
964:   }
965:   DMPlexGetConeOrientations(dm, &cones);
966:   DMPlexGetConeOrientations(dmParallel, &newCones);
967:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
968:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
969:   PetscSFDestroy(&coneSF);
970:   PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
971:   /* Create supports and stratify DMPlex */
972:   {
973:     PetscInt pStart, pEnd;

975:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
976:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
977:   }
978:   DMPlexSymmetrize(dmParallel);
979:   DMPlexStratify(dmParallel);
980:   {
981:     PetscBool useCone, useClosure, useAnchors;

983:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
984:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
985:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
986:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
987:   }
988:   return(0);
989: }

991: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
992: {
993:   MPI_Comm         comm;
994:   PetscSection     originalCoordSection, newCoordSection;
995:   Vec              originalCoordinates, newCoordinates;
996:   PetscInt         bs;
997:   PetscBool        isper;
998:   const char      *name;
999:   const PetscReal *maxCell, *L;
1000:   const DMBoundaryType *bd;
1001:   PetscErrorCode   ierr;


1007:   PetscObjectGetComm((PetscObject)dm, &comm);
1008:   DMGetCoordinateSection(dm, &originalCoordSection);
1009:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1010:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1011:   if (originalCoordinates) {
1012:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1013:     PetscObjectGetName((PetscObject) originalCoordinates, &name);
1014:     PetscObjectSetName((PetscObject) newCoordinates, name);

1016:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1017:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1018:     VecGetBlockSize(originalCoordinates, &bs);
1019:     VecSetBlockSize(newCoordinates, bs);
1020:     VecDestroy(&newCoordinates);
1021:   }
1022:   DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1023:   DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1024:   return(0);
1025: }

1027: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1028: {
1029:   DM_Plex         *mesh = (DM_Plex*) dm->data;
1030:   MPI_Comm         comm;
1031:   DMLabel          depthLabel;
1032:   PetscMPIInt      rank;
1033:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1034:   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1035:   PetscObjectState depthState = -1;
1036:   PetscErrorCode   ierr;


1042:   PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1043:   PetscObjectGetComm((PetscObject)dm, &comm);
1044:   MPI_Comm_rank(comm, &rank);

1046:   /* If the user has changed the depth label, communicate it instead */
1047:   DMPlexGetDepth(dm, &depth);
1048:   DMPlexGetDepthLabel(dm, &depthLabel);
1049:   if (depthLabel) {PetscObjectStateGet((PetscObject) depthLabel, &depthState);}
1050:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1051:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1052:   if (sendDepth) {
1053:     DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel);
1054:     DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE);
1055:   }
1056:   /* Everyone must have either the same number of labels, or none */
1057:   DMGetNumLabels(dm, &numLocalLabels);
1058:   numLabels = numLocalLabels;
1059:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1060:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1061:   for (l = 0; l < numLabels; ++l) {
1062:     DMLabel     label = NULL, labelNew = NULL;
1063:     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;
1064:     const char *name = NULL;

1066:     if (hasLabels) {
1067:       DMGetLabelByNum(dm, l, &label);
1068:       /* Skip "depth" because it is recreated */
1069:       PetscObjectGetName((PetscObject) label, &name);
1070:       PetscStrcmp(name, "depth", &isDepth);
1071:     }
1072:     MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1073:     if (isDepth && !sendDepth) continue;
1074:     DMLabelDistribute(label, migrationSF, &labelNew);
1075:     if (isDepth) {
1076:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1077:       PetscInt gdepth;

1079:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1080:       if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1081:       for (d = 0; d <= gdepth; ++d) {
1082:         PetscBool has;

1084:         DMLabelHasStratum(labelNew, d, &has);
1085:         if (!has) {DMLabelAddStratum(labelNew, d);}
1086:       }
1087:     }
1088:     DMAddLabel(dmParallel, labelNew);
1089:     /* Put the output flag in the new label */
1090:     if (hasLabels) {DMGetLabelOutput(dm, name, &lisOutput);}
1091:     MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1092:     PetscObjectGetName((PetscObject) labelNew, &name);
1093:     DMSetLabelOutput(dmParallel, name, isOutput);
1094:     DMLabelDestroy(&labelNew);
1095:   }
1096:   PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1097:   return(0);
1098: }

1100: /* Set hybrid and ghost state of points */
1101: static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1102: {
1103:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1104:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1105:   PetscInt       *isHybrid, *isHybridParallel; /* 0 for normal, 1 for hybrid, 2 for ghost cell */
1106:   PetscInt        dim, depth, d;
1107:   PetscInt        pStart, pEnd, pStartP, pEndP, gcStart, gcEnd;
1108:   PetscErrorCode  ierr;


1114:   DMGetDimension(dm, &dim);
1115:   DMPlexGetDepth(dm, &depth);
1116:   DMPlexGetChart(dm,&pStart,&pEnd);
1117:   DMPlexGetChart(dmParallel,&pStartP,&pEndP);
1118:   DMPlexGetGhostCellStratum(dm, &gcStart, &gcEnd);
1119:   PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);
1120:   for (d = 0; d <= depth; d++) {
1121:     PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d], p;

1123:     if (hybridMax >= 0) {
1124:       PetscInt sStart, sEnd;

1126:       DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);
1127:       for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = 1;
1128:     }
1129:     if (d == depth) for (p = gcStart; p < gcEnd; ++p) isHybrid[p-pStart] = 2;
1130:   }
1131:   PetscSFBcastBegin(migrationSF,MPIU_INT,isHybrid,isHybridParallel);
1132:   PetscSFBcastEnd(migrationSF,MPIU_INT,isHybrid,isHybridParallel);
1133:   for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1;
1134:   for (d = 0; d <= depth; d++) {
1135:     PetscInt sStart, sEnd, p, dd;

1137:     DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);
1138:     dd = (depth == 1 && d == 1) ? dim : d;
1139:     for (p = sStart; p < sEnd; p++) {
1140:       if (isHybridParallel[p-pStartP] == 1) {
1141:         pmesh->hybridPointMax[dd] = p;
1142:         break;
1143:       }
1144:       if (d == depth && isHybridParallel[p-pStartP] == 2) {
1145:         DMPlexSetGhostCellStratum(dmParallel, p, PETSC_DETERMINE);
1146:         break;
1147:       }
1148:     }
1149:   }
1150:   PetscFree2(isHybrid,isHybridParallel);
1151:   return(0);
1152: }

1154: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1155: {
1156:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1157:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1158:   MPI_Comm        comm;
1159:   DM              refTree;
1160:   PetscSection    origParentSection, newParentSection;
1161:   PetscInt        *origParents, *origChildIDs;
1162:   PetscBool       flg;
1163:   PetscErrorCode  ierr;

1168:   PetscObjectGetComm((PetscObject)dm, &comm);

1170:   /* Set up tree */
1171:   DMPlexGetReferenceTree(dm,&refTree);
1172:   DMPlexSetReferenceTree(dmParallel,refTree);
1173:   DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1174:   if (origParentSection) {
1175:     PetscInt        pStart, pEnd;
1176:     PetscInt        *newParents, *newChildIDs, *globParents;
1177:     PetscInt        *remoteOffsetsParents, newParentSize;
1178:     PetscSF         parentSF;

1180:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1181:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1182:     PetscSectionSetChart(newParentSection,pStart,pEnd);
1183:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1184:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1185:     PetscFree(remoteOffsetsParents);
1186:     PetscSectionGetStorageSize(newParentSection,&newParentSize);
1187:     PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1188:     if (original) {
1189:       PetscInt numParents;

1191:       PetscSectionGetStorageSize(origParentSection,&numParents);
1192:       PetscMalloc1(numParents,&globParents);
1193:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1194:     }
1195:     else {
1196:       globParents = origParents;
1197:     }
1198:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1199:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1200:     if (original) {
1201:       PetscFree(globParents);
1202:     }
1203:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1204:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1205:     ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1206: #if defined(PETSC_USE_DEBUG)
1207:     {
1208:       PetscInt  p;
1209:       PetscBool valid = PETSC_TRUE;
1210:       for (p = 0; p < newParentSize; ++p) {
1211:         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1212:       }
1213:       if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1214:     }
1215: #endif
1216:     PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1217:     if (flg) {
1218:       PetscPrintf(comm, "Serial Parent Section: \n");
1219:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm));
1220:       PetscPrintf(comm, "Parallel Parent Section: \n");
1221:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm));
1222:       PetscSFView(parentSF, NULL);
1223:     }
1224:     DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1225:     PetscSectionDestroy(&newParentSection);
1226:     PetscFree2(newParents,newChildIDs);
1227:     PetscSFDestroy(&parentSF);
1228:   }
1229:   pmesh->useAnchors = mesh->useAnchors;
1230:   return(0);
1231: }

1233: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1234: {
1235:   PetscMPIInt            rank, size;
1236:   MPI_Comm               comm;
1237:   PetscErrorCode         ierr;


1243:   /* Create point SF for parallel mesh */
1244:   PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1245:   PetscObjectGetComm((PetscObject)dm, &comm);
1246:   MPI_Comm_rank(comm, &rank);
1247:   MPI_Comm_size(comm, &size);
1248:   {
1249:     const PetscInt *leaves;
1250:     PetscSFNode    *remotePoints, *rowners, *lowners;
1251:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1252:     PetscInt        pStart, pEnd;

1254:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1255:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1256:     PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1257:     for (p=0; p<numRoots; p++) {
1258:       rowners[p].rank  = -1;
1259:       rowners[p].index = -1;
1260:     }
1261:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1262:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1263:     for (p = 0; p < numLeaves; ++p) {
1264:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1265:         lowners[p].rank  = rank;
1266:         lowners[p].index = leaves ? leaves[p] : p;
1267:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1268:         lowners[p].rank  = -2;
1269:         lowners[p].index = -2;
1270:       }
1271:     }
1272:     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1273:       rowners[p].rank  = -3;
1274:       rowners[p].index = -3;
1275:     }
1276:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1277:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1278:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1279:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1280:     for (p = 0; p < numLeaves; ++p) {
1281:       if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1282:       if (lowners[p].rank != rank) ++numGhostPoints;
1283:     }
1284:     PetscMalloc1(numGhostPoints, &ghostPoints);
1285:     PetscMalloc1(numGhostPoints, &remotePoints);
1286:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1287:       if (lowners[p].rank != rank) {
1288:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1289:         remotePoints[gp].rank  = lowners[p].rank;
1290:         remotePoints[gp].index = lowners[p].index;
1291:         ++gp;
1292:       }
1293:     }
1294:     PetscFree2(rowners,lowners);
1295:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1296:     PetscSFSetFromOptions((dmParallel)->sf);
1297:   }
1298:   {
1299:     PetscBool useCone, useClosure, useAnchors;

1301:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1302:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1303:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1304:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1305:   }
1306:   PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1307:   return(0);
1308: }

1310: /*@
1311:   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?

1313:   Input Parameters:
1314: + dm - The DMPlex object
1315: - flg - Balance the partition?

1317:   Level: intermediate

1319: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1320: @*/
1321: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1322: {
1323:   DM_Plex *mesh = (DM_Plex *)dm->data;

1326:   mesh->partitionBalance = flg;
1327:   return(0);
1328: }

1330: /*@
1331:   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?

1333:   Input Parameter:
1334: . dm - The DMPlex object

1336:   Output Parameter:
1337: . flg - Balance the partition?

1339:   Level: intermediate

1341: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1342: @*/
1343: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1344: {
1345:   DM_Plex *mesh = (DM_Plex *)dm->data;

1350:   *flg = mesh->partitionBalance;
1351:   return(0);
1352: }

1354: typedef struct {
1355:   PetscInt vote, rank, index;
1356: } Petsc3Int;

1358: /* MaxLoc, but carry a third piece of information around */
1359: static void MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1360: {
1361:   Petsc3Int *a = (Petsc3Int *)inout_;
1362:   Petsc3Int *b = (Petsc3Int *)in_;
1363:   PetscInt i, len = *len_;
1364:   for (i = 0; i < len; i++) {
1365:     if (a[i].vote < b[i].vote) {
1366:       a[i].vote = b[i].vote;
1367:       a[i].rank = b[i].rank;
1368:       a[i].index = b[i].index;
1369:     } else if (a[i].vote <= b[i].vote) {
1370:       if (a[i].rank >= b[i].rank) {
1371:         a[i].rank = b[i].rank;
1372:         a[i].index = b[i].index;
1373:       }
1374:     }
1375:   }
1376: }

1378: /*@C
1379:   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration

1381:   Input Parameters:
1382: + dm          - The source DMPlex object
1383: . migrationSF - The star forest that describes the parallel point remapping
1384: . ownership   - Flag causing a vote to determine point ownership

1386:   Output Parameter:
1387: - pointSF     - The star forest describing the point overlap in the remapped DM

1389:   Notes:
1390:   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.

1392:   Level: developer

1394: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1395: @*/
1396: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1397: {
1398:   PetscMPIInt        rank, size;
1399:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1400:   PetscInt          *pointLocal;
1401:   const PetscInt    *leaves;
1402:   const PetscSFNode *roots;
1403:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1404:   Vec                shifts;
1405:   const PetscInt     numShifts = 13759;
1406:   const PetscScalar *shift = NULL;
1407:   const PetscBool    shiftDebug = PETSC_FALSE;
1408:   PetscBool          balance;
1409:   PetscErrorCode     ierr;

1413:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1414:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
1415:   PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0);

1417:   DMPlexGetPartitionBalance(dm, &balance);
1418:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1419:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1420:   if (ownership) {
1421:     MPI_Op       op;
1422:     MPI_Datatype datatype;
1423:     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1424:     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1425:     if (balance) {
1426:       PetscRandom r;

1428:       PetscRandomCreate(PETSC_COMM_SELF, &r);
1429:       PetscRandomSetInterval(r, 0, 2467*size);
1430:       VecCreate(PETSC_COMM_SELF, &shifts);
1431:       VecSetSizes(shifts, numShifts, numShifts);
1432:       VecSetType(shifts, VECSTANDARD);
1433:       VecSetRandom(shifts, r);
1434:       PetscRandomDestroy(&r);
1435:       VecGetArrayRead(shifts, &shift);
1436:     }

1438:     PetscMalloc1(nroots, &rootVote);
1439:     PetscMalloc1(nleaves, &leafVote);
1440:     /* Point ownership vote: Process with highest rank owns shared points */
1441:     for (p = 0; p < nleaves; ++p) {
1442:       if (shiftDebug) {
1443:         PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1444:       }
1445:       /* Either put in a bid or we know we own it */
1446:       leafVote[p].vote  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1447:       leafVote[p].rank = rank;
1448:       leafVote[p].index = p;
1449:     }
1450:     for (p = 0; p < nroots; p++) {
1451:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1452:       rootVote[p].vote  = -3;
1453:       rootVote[p].rank  = -3;
1454:       rootVote[p].index = -3;
1455:     }
1456:     MPI_Type_contiguous(3, MPIU_INT, &datatype);
1457:     MPI_Type_commit(&datatype);
1458:     MPI_Op_create(&MaxLocCarry, 1, &op);
1459:     PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op);
1460:     PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op);
1461:     MPI_Op_free(&op);
1462:     MPI_Type_free(&datatype);
1463:     for (p = 0; p < nroots; p++) {
1464:       rootNodes[p].rank = rootVote[p].rank;
1465:       rootNodes[p].index = rootVote[p].index;
1466:     }
1467:     PetscFree(leafVote);
1468:     PetscFree(rootVote);
1469:   } else {
1470:     for (p = 0; p < nroots; p++) {
1471:       rootNodes[p].index = -1;
1472:       rootNodes[p].rank = rank;
1473:     }
1474:     for (p = 0; p < nleaves; p++) {
1475:       /* Write new local id into old location */
1476:       if (roots[p].rank == rank) {
1477:         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1478:       }
1479:     }
1480:   }
1481:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1482:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);

1484:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1485:     if (leafNodes[p].rank != rank) npointLeaves++;
1486:   }
1487:   PetscMalloc1(npointLeaves, &pointLocal);
1488:   PetscMalloc1(npointLeaves, &pointRemote);
1489:   for (idx = 0, p = 0; p < nleaves; p++) {
1490:     if (leafNodes[p].rank != rank) {
1491:       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1492:       pointLocal[idx] = p;
1493:       pointRemote[idx] = leafNodes[p];
1494:       idx++;
1495:     }
1496:   }
1497:   if (shift) {
1498:     VecRestoreArrayRead(shifts, &shift);
1499:     VecDestroy(&shifts);
1500:   }
1501:   if (shiftDebug) {PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);}
1502:   PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1503:   PetscSFSetFromOptions(*pointSF);
1504:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1505:   PetscFree2(rootNodes, leafNodes);
1506:   PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0);
1507:   return(0);
1508: }

1510: /*@C
1511:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest

1513:   Collective on dm

1515:   Input Parameters:
1516: + dm       - The source DMPlex object
1517: . sf       - The star forest communication context describing the migration pattern

1519:   Output Parameter:
1520: - targetDM - The target DMPlex object

1522:   Level: intermediate

1524: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1525: @*/
1526: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1527: {
1528:   MPI_Comm               comm;
1529:   PetscInt               dim, cdim, nroots;
1530:   PetscSF                sfPoint;
1531:   ISLocalToGlobalMapping ltogMigration;
1532:   ISLocalToGlobalMapping ltogOriginal = NULL;
1533:   PetscBool              flg;
1534:   PetscErrorCode         ierr;

1538:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1539:   PetscObjectGetComm((PetscObject) dm, &comm);
1540:   DMGetDimension(dm, &dim);
1541:   DMSetDimension(targetDM, dim);
1542:   DMGetCoordinateDim(dm, &cdim);
1543:   DMSetCoordinateDim(targetDM, cdim);

1545:   /* Check for a one-to-all distribution pattern */
1546:   DMGetPointSF(dm, &sfPoint);
1547:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1548:   if (nroots >= 0) {
1549:     IS        isOriginal;
1550:     PetscInt  n, size, nleaves;
1551:     PetscInt  *numbering_orig, *numbering_new;

1553:     /* Get the original point numbering */
1554:     DMPlexCreatePointNumbering(dm, &isOriginal);
1555:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1556:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1557:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1558:     /* Convert to positive global numbers */
1559:     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1560:     /* Derive the new local-to-global mapping from the old one */
1561:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1562:     PetscMalloc1(nleaves, &numbering_new);
1563:     PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new);
1564:     PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new);
1565:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1566:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1567:     ISDestroy(&isOriginal);
1568:   } else {
1569:     /* One-to-all distribution pattern: We can derive LToG from SF */
1570:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1571:   }
1572:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1573:   if (flg) {
1574:     PetscPrintf(comm, "Point renumbering for DM migration:\n");
1575:     ISLocalToGlobalMappingView(ltogMigration, NULL);
1576:   }
1577:   /* Migrate DM data to target DM */
1578:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1579:   DMPlexDistributeLabels(dm, sf, targetDM);
1580:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1581:   DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1582:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1583:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1584:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1585:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1586:   return(0);
1587: }

1589: /*@C
1590:   DMPlexDistribute - Distributes the mesh and any associated sections.

1592:   Collective on dm

1594:   Input Parameters:
1595: + dm  - The original DMPlex object
1596: - overlap - The overlap of partitions, 0 is the default

1598:   Output Parameters:
1599: + sf - The PetscSF used for point distribution, or NULL if not needed
1600: - dmParallel - The distributed DMPlex object

1602:   Note: If the mesh was not distributed, the output dmParallel will be NULL.

1604:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1605:   representation on the mesh.

1607:   Level: intermediate

1609: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap()
1610: @*/
1611: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1612: {
1613:   MPI_Comm               comm;
1614:   PetscPartitioner       partitioner;
1615:   IS                     cellPart;
1616:   PetscSection           cellPartSection;
1617:   DM                     dmCoord;
1618:   DMLabel                lblPartition, lblMigration;
1619:   PetscSF                sfMigration, sfStratified, sfPoint;
1620:   PetscBool              flg, balance;
1621:   PetscMPIInt            rank, size;
1622:   PetscErrorCode         ierr;


1630:   if (sf) *sf = NULL;
1631:   *dmParallel = NULL;
1632:   PetscObjectGetComm((PetscObject)dm,&comm);
1633:   MPI_Comm_rank(comm, &rank);
1634:   MPI_Comm_size(comm, &size);
1635:   if (size == 1) return(0);

1637:   PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1638:   /* Create cell partition */
1639:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1640:   PetscSectionCreate(comm, &cellPartSection);
1641:   DMPlexGetPartitioner(dm, &partitioner);
1642:   PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart);
1643:   PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0);
1644:   {
1645:     /* Convert partition to DMLabel */
1646:     IS             is;
1647:     PetscHSetI     ht;
1648:     const PetscInt *points;
1649:     PetscInt       *iranks;
1650:     PetscInt       pStart, pEnd, proc, npoints, poff = 0, nranks;

1652:     DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1653:     /* Preallocate strata */
1654:     PetscHSetICreate(&ht);
1655:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1656:     for (proc = pStart; proc < pEnd; proc++) {
1657:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1658:       if (npoints) {PetscHSetIAdd(ht, proc);}
1659:     }
1660:     PetscHSetIGetSize(ht, &nranks);
1661:     PetscMalloc1(nranks, &iranks);
1662:     PetscHSetIGetElems(ht, &poff, iranks);
1663:     PetscHSetIDestroy(&ht);
1664:     DMLabelAddStrata(lblPartition, nranks, iranks);
1665:     PetscFree(iranks);
1666:     /* Inline DMPlexPartitionLabelClosure() */
1667:     ISGetIndices(cellPart, &points);
1668:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1669:     for (proc = pStart; proc < pEnd; proc++) {
1670:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1671:       if (!npoints) continue;
1672:       PetscSectionGetOffset(cellPartSection, proc, &poff);
1673:       DMPlexClosurePoints_Private(dm, npoints, points+poff, &is);
1674:       DMLabelSetStratumIS(lblPartition, proc, is);
1675:       ISDestroy(&is);
1676:     }
1677:     ISRestoreIndices(cellPart, &points);
1678:   }
1679:   PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0);

1681:   DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1682:   DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1683:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1684:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1685:   PetscSFDestroy(&sfMigration);
1686:   sfMigration = sfStratified;
1687:   PetscSFSetUp(sfMigration);
1688:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1689:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1690:   if (flg) {
1691:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm));
1692:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm));
1693:   }

1695:   /* Create non-overlapping parallel DM and migrate internal data */
1696:   DMPlexCreate(comm, dmParallel);
1697:   PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1698:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1700:   /* Build the point SF without overlap */
1701:   DMPlexGetPartitionBalance(dm, &balance);
1702:   DMPlexSetPartitionBalance(*dmParallel, balance);
1703:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1704:   DMSetPointSF(*dmParallel, sfPoint);
1705:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1706:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1707:   if (flg) {PetscSFView(sfPoint, NULL);}

1709:   if (overlap > 0) {
1710:     DM                 dmOverlap;
1711:     PetscInt           nroots, nleaves, noldleaves, l;
1712:     const PetscInt    *oldLeaves;
1713:     PetscSFNode       *newRemote, *permRemote;
1714:     const PetscSFNode *oldRemote;
1715:     PetscSF            sfOverlap, sfOverlapPoint;

1717:     /* Add the partition overlap to the distributed DM */
1718:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1719:     DMDestroy(dmParallel);
1720:     *dmParallel = dmOverlap;
1721:     if (flg) {
1722:       PetscPrintf(comm, "Overlap Migration SF:\n");
1723:       PetscSFView(sfOverlap, NULL);
1724:     }

1726:     /* Re-map the migration SF to establish the full migration pattern */
1727:     PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote);
1728:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1729:     PetscMalloc1(nleaves, &newRemote);
1730:     /* oldRemote: original root point mapping to original leaf point
1731:        newRemote: original leaf point mapping to overlapped leaf point */
1732:     if (oldLeaves) {
1733:       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1734:       PetscMalloc1(noldleaves, &permRemote);
1735:       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1736:       oldRemote = permRemote;
1737:     }
1738:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1739:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1740:     if (oldLeaves) {PetscFree(oldRemote);}
1741:     PetscSFCreate(comm, &sfOverlapPoint);
1742:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1743:     PetscSFDestroy(&sfOverlap);
1744:     PetscSFDestroy(&sfMigration);
1745:     sfMigration = sfOverlapPoint;
1746:   }
1747:   /* Cleanup Partition */
1748:   DMLabelDestroy(&lblPartition);
1749:   DMLabelDestroy(&lblMigration);
1750:   PetscSectionDestroy(&cellPartSection);
1751:   ISDestroy(&cellPart);
1752:   /* Copy BC */
1753:   DMCopyBoundary(dm, *dmParallel);
1754:   /* Create sfNatural */
1755:   if (dm->useNatural) {
1756:     PetscSection section;

1758:     DMGetLocalSection(dm, &section);
1759:     DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1760:     DMSetUseNatural(*dmParallel, PETSC_TRUE);
1761:   }
1762:   /* Cleanup */
1763:   if (sf) {*sf = sfMigration;}
1764:   else    {PetscSFDestroy(&sfMigration);}
1765:   PetscSFDestroy(&sfPoint);
1766:   PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1767:   return(0);
1768: }

1770: /*@C
1771:   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.

1773:   Collective on dm

1775:   Input Parameters:
1776: + dm  - The non-overlapping distributed DMPlex object
1777: - overlap - The overlap of partitions (the same on all ranks)

1779:   Output Parameters:
1780: + sf - The PetscSF used for point distribution
1781: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1783:   Notes:
1784:   If the mesh was not distributed, the return value is NULL.

1786:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1787:   representation on the mesh.

1789:   Level: advanced

1791: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap()
1792: @*/
1793: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1794: {
1795:   MPI_Comm               comm;
1796:   PetscMPIInt            size, rank;
1797:   PetscSection           rootSection, leafSection;
1798:   IS                     rootrank, leafrank;
1799:   DM                     dmCoord;
1800:   DMLabel                lblOverlap;
1801:   PetscSF                sfOverlap, sfStratified, sfPoint;
1802:   PetscErrorCode         ierr;


1810:   if (sf) *sf = NULL;
1811:   *dmOverlap  = NULL;
1812:   PetscObjectGetComm((PetscObject)dm,&comm);
1813:   MPI_Comm_size(comm, &size);
1814:   MPI_Comm_rank(comm, &rank);
1815:   if (size == 1) return(0);

1817:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1818:   /* Compute point overlap with neighbouring processes on the distributed DM */
1819:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1820:   PetscSectionCreate(comm, &rootSection);
1821:   PetscSectionCreate(comm, &leafSection);
1822:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1823:   DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1824:   /* Convert overlap label to stratified migration SF */
1825:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1826:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1827:   PetscSFDestroy(&sfOverlap);
1828:   sfOverlap = sfStratified;
1829:   PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1830:   PetscSFSetFromOptions(sfOverlap);

1832:   PetscSectionDestroy(&rootSection);
1833:   PetscSectionDestroy(&leafSection);
1834:   ISDestroy(&rootrank);
1835:   ISDestroy(&leafrank);
1836:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);

1838:   /* Build the overlapping DM */
1839:   DMPlexCreate(comm, dmOverlap);
1840:   PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1841:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1842:   /* Store the overlap in the new DM */
1843:   ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap;
1844:   /* Build the new point SF */
1845:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1846:   DMSetPointSF(*dmOverlap, sfPoint);
1847:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
1848:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1849:   PetscSFDestroy(&sfPoint);
1850:   /* Cleanup overlap partition */
1851:   DMLabelDestroy(&lblOverlap);
1852:   if (sf) *sf = sfOverlap;
1853:   else    {PetscSFDestroy(&sfOverlap);}
1854:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1855:   return(0);
1856: }

1858: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1859: {
1860:   DM_Plex        *mesh  = (DM_Plex*) dm->data;

1863:   *overlap = mesh->overlap;
1864:   return(0);
1865: }

1867: /*@
1868:   DMPlexGetOverlap - Get the DMPlex partition overlap.

1870:   Not collective

1872:   Input Parameter:
1873: . dm - The DM

1875:   Output Parameter:
1876: . overlap - The overlap of this DM

1878:   Level: intermediate

1880: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel()
1881: @*/
1882: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1883: {
1884:   PetscErrorCode     ierr;

1888:   PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));
1889:   return(0);
1890: }


1893: /*@C
1894:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1895:   root process of the original's communicator.

1897:   Collective on dm

1899:   Input Parameter:
1900: . dm - the original DMPlex object

1902:   Output Parameters:
1903: + sf - the PetscSF used for point distribution (optional)
1904: - gatherMesh - the gathered DM object, or NULL

1906:   Level: intermediate

1908: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1909: @*/
1910: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1911: {
1912:   MPI_Comm       comm;
1913:   PetscMPIInt    size;
1914:   PetscPartitioner oldPart, gatherPart;

1920:   *gatherMesh = NULL;
1921:   if (sf) *sf = NULL;
1922:   comm = PetscObjectComm((PetscObject)dm);
1923:   MPI_Comm_size(comm,&size);
1924:   if (size == 1) return(0);
1925:   DMPlexGetPartitioner(dm,&oldPart);
1926:   PetscObjectReference((PetscObject)oldPart);
1927:   PetscPartitionerCreate(comm,&gatherPart);
1928:   PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1929:   DMPlexSetPartitioner(dm,gatherPart);
1930:   DMPlexDistribute(dm,0,sf,gatherMesh);

1932:   DMPlexSetPartitioner(dm,oldPart);
1933:   PetscPartitionerDestroy(&gatherPart);
1934:   PetscPartitionerDestroy(&oldPart);
1935:   return(0);
1936: }

1938: /*@C
1939:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.

1941:   Collective on dm

1943:   Input Parameter:
1944: . dm - the original DMPlex object

1946:   Output Parameters:
1947: + sf - the PetscSF used for point distribution (optional)
1948: - redundantMesh - the redundant DM object, or NULL

1950:   Level: intermediate

1952: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1953: @*/
1954: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1955: {
1956:   MPI_Comm       comm;
1957:   PetscMPIInt    size, rank;
1958:   PetscInt       pStart, pEnd, p;
1959:   PetscInt       numPoints = -1;
1960:   PetscSF        migrationSF, sfPoint, gatherSF;
1961:   DM             gatherDM, dmCoord;
1962:   PetscSFNode    *points;

1968:   *redundantMesh = NULL;
1969:   comm = PetscObjectComm((PetscObject)dm);
1970:   MPI_Comm_size(comm,&size);
1971:   if (size == 1) {
1972:     PetscObjectReference((PetscObject) dm);
1973:     *redundantMesh = dm;
1974:     if (sf) *sf = NULL;
1975:     return(0);
1976:   }
1977:   DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);
1978:   if (!gatherDM) return(0);
1979:   MPI_Comm_rank(comm,&rank);
1980:   DMPlexGetChart(gatherDM,&pStart,&pEnd);
1981:   numPoints = pEnd - pStart;
1982:   MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1983:   PetscMalloc1(numPoints,&points);
1984:   PetscSFCreate(comm,&migrationSF);
1985:   for (p = 0; p < numPoints; p++) {
1986:     points[p].index = p;
1987:     points[p].rank  = 0;
1988:   }
1989:   PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1990:   DMPlexCreate(comm, redundantMesh);
1991:   PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1992:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1993:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1994:   DMSetPointSF(*redundantMesh, sfPoint);
1995:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
1996:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1997:   PetscSFDestroy(&sfPoint);
1998:   if (sf) {
1999:     PetscSF tsf;

2001:     PetscSFCompose(gatherSF,migrationSF,&tsf);
2002:     DMPlexStratifyMigrationSF(dm, tsf, sf);
2003:     PetscSFDestroy(&tsf);
2004:   }
2005:   PetscSFDestroy(&migrationSF);
2006:   PetscSFDestroy(&gatherSF);
2007:   DMDestroy(&gatherDM);
2008:   return(0);
2009: }

2011: /*@
2012:   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.

2014:   Collective

2016:   Input Parameter:
2017: . dm      - The DM object

2019:   Output Parameter:
2020: . distributed - Flag whether the DM is distributed

2022:   Level: intermediate

2024:   Notes:
2025:   This currently finds out whether at least two ranks have any DAG points.
2026:   This involves MPI_Allreduce() with one integer.
2027:   The result is currently not stashed so every call to this routine involves this global communication.

2029: .seealso: DMPlexDistribute(), DMPlexGetOverlap(), DMPlexIsInterpolated()
2030: @*/
2031: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2032: {
2033:   PetscInt          pStart, pEnd, count;
2034:   MPI_Comm          comm;
2035:   PetscErrorCode    ierr;

2040:   PetscObjectGetComm((PetscObject)dm,&comm);
2041:   DMPlexGetChart(dm, &pStart, &pEnd);
2042:   count = !!(pEnd - pStart);
2043:   MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm);
2044:   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2045:   return(0);
2046: }