Actual source code: plexdistribute.c

petsc-master 2017-03-28
Report Typos and Errors
  1:  #include <petsc/private/dmpleximpl.h>
  2:  #include <petsc/private/dmlabelimpl.h>

  4: /*@
  5:   DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first

  7:   Input Parameters:
  8: + dm      - The DM object
  9: - useCone - Flag to use the cone first

 11:   Level: intermediate

 13:   Notes:
 14: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
 15: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
 16: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

 18: .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
 19: @*/
 20: PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone)
 21: {
 22:   DM_Plex *mesh = (DM_Plex *) dm->data;

 26:   mesh->useCone = useCone;
 27:   return(0);
 28: }

 30: /*@
 31:   DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first

 33:   Input Parameter:
 34: . dm      - The DM object

 36:   Output Parameter:
 37: . useCone - Flag to use the cone first

 39:   Level: intermediate

 41:   Notes:
 42: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
 43: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
 44: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

 46: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
 47: @*/
 48: PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone)
 49: {
 50:   DM_Plex *mesh = (DM_Plex *) dm->data;

 55:   *useCone = mesh->useCone;
 56:   return(0);
 57: }

 59: /*@
 60:   DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure

 62:   Input Parameters:
 63: + dm      - The DM object
 64: - useClosure - Flag to use the closure

 66:   Level: intermediate

 68:   Notes:
 69: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
 70: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
 71: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

 73: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
 74: @*/
 75: PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure)
 76: {
 77:   DM_Plex *mesh = (DM_Plex *) dm->data;

 81:   mesh->useClosure = useClosure;
 82:   return(0);
 83: }

 85: /*@
 86:   DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure

 88:   Input Parameter:
 89: . dm      - The DM object

 91:   Output Parameter:
 92: . useClosure - Flag to use the closure

 94:   Level: intermediate

 96:   Notes:
 97: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
 98: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
 99: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

101: .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
102: @*/
103: PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure)
104: {
105:   DM_Plex *mesh = (DM_Plex *) dm->data;

110:   *useClosure = mesh->useClosure;
111:   return(0);
112: }

114: /*@
115:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

117:   Input Parameters:
118: + dm      - The DM object
119: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

121:   Level: intermediate

123: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
124: @*/
125: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
126: {
127:   DM_Plex *mesh = (DM_Plex *) dm->data;

131:   mesh->useAnchors = useAnchors;
132:   return(0);
133: }

135: /*@
136:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

138:   Input Parameter:
139: . dm      - The DM object

141:   Output Parameter:
142: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

144:   Level: intermediate

146: .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
147: @*/
148: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
149: {
150:   DM_Plex *mesh = (DM_Plex *) dm->data;

155:   *useAnchors = mesh->useAnchors;
156:   return(0);
157: }

159: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
160: {
161:   const PetscInt *cone = NULL;
162:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
163:   PetscErrorCode  ierr;

166:   DMPlexGetConeSize(dm, p, &coneSize);
167:   DMPlexGetCone(dm, p, &cone);
168:   for (c = 0; c <= coneSize; ++c) {
169:     const PetscInt  point   = !c ? p : cone[c-1];
170:     const PetscInt *support = NULL;
171:     PetscInt        supportSize, s, q;

173:     DMPlexGetSupportSize(dm, point, &supportSize);
174:     DMPlexGetSupport(dm, point, &support);
175:     for (s = 0; s < supportSize; ++s) {
176:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
177:         if (support[s] == adj[q]) break;
178:       }
179:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
180:     }
181:   }
182:   *adjSize = numAdj;
183:   return(0);
184: }

186: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
187: {
188:   const PetscInt *support = NULL;
189:   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
190:   PetscErrorCode  ierr;

193:   DMPlexGetSupportSize(dm, p, &supportSize);
194:   DMPlexGetSupport(dm, p, &support);
195:   for (s = 0; s <= supportSize; ++s) {
196:     const PetscInt  point = !s ? p : support[s-1];
197:     const PetscInt *cone  = NULL;
198:     PetscInt        coneSize, c, q;

200:     DMPlexGetConeSize(dm, point, &coneSize);
201:     DMPlexGetCone(dm, point, &cone);
202:     for (c = 0; c < coneSize; ++c) {
203:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
204:         if (cone[c] == adj[q]) break;
205:       }
206:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
207:     }
208:   }
209:   *adjSize = numAdj;
210:   return(0);
211: }

213: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
214: {
215:   PetscInt      *star = NULL;
216:   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;

220:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
221:   for (s = 0; s < starSize*2; s += 2) {
222:     const PetscInt *closure = NULL;
223:     PetscInt        closureSize, c, q;

225:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
226:     for (c = 0; c < closureSize*2; c += 2) {
227:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
228:         if (closure[c] == adj[q]) break;
229:       }
230:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
231:     }
232:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
233:   }
234:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
235:   *adjSize = numAdj;
236:   return(0);
237: }

239: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
240: {
241:   static PetscInt asiz = 0;
242:   PetscInt maxAnchors = 1;
243:   PetscInt aStart = -1, aEnd = -1;
244:   PetscInt maxAdjSize;
245:   PetscSection aSec = NULL;
246:   IS aIS = NULL;
247:   const PetscInt *anchors;
248:   PetscErrorCode  ierr;

251:   if (useAnchors) {
252:     DMPlexGetAnchors(dm,&aSec,&aIS);
253:     if (aSec) {
254:       PetscSectionGetMaxDof(aSec,&maxAnchors);
255:       maxAnchors = PetscMax(1,maxAnchors);
256:       PetscSectionGetChart(aSec,&aStart,&aEnd);
257:       ISGetIndices(aIS,&anchors);
258:     }
259:   }
260:   if (!*adj) {
261:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

263:     DMPlexGetChart(dm, &pStart,&pEnd);
264:     DMPlexGetDepth(dm, &depth);
265:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
266:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
267:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
268:     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
269:     asiz *= maxAnchors;
270:     asiz  = PetscMin(asiz,pEnd-pStart);
271:     PetscMalloc1(asiz,adj);
272:   }
273:   if (*adjSize < 0) *adjSize = asiz;
274:   maxAdjSize = *adjSize;
275:   if (useTransitiveClosure) {
276:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
277:   } else if (useCone) {
278:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
279:   } else {
280:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
281:   }
282:   if (useAnchors && aSec) {
283:     PetscInt origSize = *adjSize;
284:     PetscInt numAdj = origSize;
285:     PetscInt i = 0, j;
286:     PetscInt *orig = *adj;

288:     while (i < origSize) {
289:       PetscInt p = orig[i];
290:       PetscInt aDof = 0;

292:       if (p >= aStart && p < aEnd) {
293:         PetscSectionGetDof(aSec,p,&aDof);
294:       }
295:       if (aDof) {
296:         PetscInt aOff;
297:         PetscInt s, q;

299:         for (j = i + 1; j < numAdj; j++) {
300:           orig[j - 1] = orig[j];
301:         }
302:         origSize--;
303:         numAdj--;
304:         PetscSectionGetOffset(aSec,p,&aOff);
305:         for (s = 0; s < aDof; ++s) {
306:           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
307:             if (anchors[aOff+s] == orig[q]) break;
308:           }
309:           if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
310:         }
311:       }
312:       else {
313:         i++;
314:       }
315:     }
316:     *adjSize = numAdj;
317:     ISRestoreIndices(aIS,&anchors);
318:   }
319:   return(0);
320: }

322: /*@
323:   DMPlexGetAdjacency - Return all points adjacent to the given point

325:   Input Parameters:
326: + dm - The DM object
327: . p  - The point
328: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
329: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize

331:   Output Parameters:
332: + adjSize - The number of adjacent points
333: - adj - The adjacent points

335:   Level: advanced

337:   Notes: The user must PetscFree the adj array if it was not passed in.

339: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
340: @*/
341: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
342: {
343:   DM_Plex       *mesh = (DM_Plex *) dm->data;

350:   DMPlexGetAdjacency_Internal(dm, p, mesh->useCone, mesh->useClosure, mesh->useAnchors, adjSize, adj);
351:   return(0);
352: }

354: /*@
355:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

357:   Collective on DM

359:   Input Parameters:
360: + dm      - The DM
361: - sfPoint - The PetscSF which encodes point connectivity

363:   Output Parameters:
364: + processRanks - A list of process neighbors, or NULL
365: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

367:   Level: developer

369: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
370: @*/
371: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
372: {
373:   const PetscSFNode *remotePoints;
374:   PetscInt          *localPointsNew;
375:   PetscSFNode       *remotePointsNew;
376:   const PetscInt    *nranks;
377:   PetscInt          *ranksNew;
378:   PetscBT            neighbors;
379:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
380:   PetscMPIInt        size, proc, rank;
381:   PetscErrorCode     ierr;

388:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
389:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
390:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
391:   PetscBTCreate(size, &neighbors);
392:   PetscBTMemzero(size, neighbors);
393:   /* Compute root-to-leaf process connectivity */
394:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
395:   ISGetIndices(rootRanks, &nranks);
396:   for (p = pStart; p < pEnd; ++p) {
397:     PetscInt ndof, noff, n;

399:     PetscSectionGetDof(rootRankSection, p, &ndof);
400:     PetscSectionGetOffset(rootRankSection, p, &noff);
401:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
402:   }
403:   ISRestoreIndices(rootRanks, &nranks);
404:   /* Compute leaf-to-neighbor process connectivity */
405:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
406:   ISGetIndices(leafRanks, &nranks);
407:   for (p = pStart; p < pEnd; ++p) {
408:     PetscInt ndof, noff, n;

410:     PetscSectionGetDof(leafRankSection, p, &ndof);
411:     PetscSectionGetOffset(leafRankSection, p, &noff);
412:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
413:   }
414:   ISRestoreIndices(leafRanks, &nranks);
415:   /* Compute leaf-to-root process connectivity */
416:   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
417:   /* Calculate edges */
418:   PetscBTClear(neighbors, rank);
419:   for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
420:   PetscMalloc1(numNeighbors, &ranksNew);
421:   PetscMalloc1(numNeighbors, &localPointsNew);
422:   PetscMalloc1(numNeighbors, &remotePointsNew);
423:   for(proc = 0, n = 0; proc < size; ++proc) {
424:     if (PetscBTLookup(neighbors, proc)) {
425:       ranksNew[n]              = proc;
426:       localPointsNew[n]        = proc;
427:       remotePointsNew[n].index = rank;
428:       remotePointsNew[n].rank  = proc;
429:       ++n;
430:     }
431:   }
432:   PetscBTDestroy(&neighbors);
433:   if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
434:   else              {PetscFree(ranksNew);}
435:   if (sfProcess) {
436:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
437:     PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
438:     PetscSFSetFromOptions(*sfProcess);
439:     PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
440:   }
441:   return(0);
442: }

444: /*@
445:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

447:   Collective on DM

449:   Input Parameter:
450: . dm - The DM

452:   Output Parameters:
453: + rootSection - The number of leaves for a given root point
454: . rootrank    - The rank of each edge into the root point
455: . leafSection - The number of processes sharing a given leaf point
456: - leafrank    - The rank of each process sharing a leaf point

458:   Level: developer

460: .seealso: DMPlexCreateOverlap()
461: @*/
462: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
463: {
464:   MPI_Comm        comm;
465:   PetscSF         sfPoint;
466:   const PetscInt *rootdegree;
467:   PetscInt       *myrank, *remoterank;
468:   PetscInt        pStart, pEnd, p, nedges;
469:   PetscMPIInt     rank;
470:   PetscErrorCode  ierr;

473:   PetscObjectGetComm((PetscObject) dm, &comm);
474:   MPI_Comm_rank(comm, &rank);
475:   DMPlexGetChart(dm, &pStart, &pEnd);
476:   DMGetPointSF(dm, &sfPoint);
477:   /* Compute number of leaves for each root */
478:   PetscObjectSetName((PetscObject) rootSection, "Root Section");
479:   PetscSectionSetChart(rootSection, pStart, pEnd);
480:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
481:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
482:   for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
483:   PetscSectionSetUp(rootSection);
484:   /* Gather rank of each leaf to root */
485:   PetscSectionGetStorageSize(rootSection, &nedges);
486:   PetscMalloc1(pEnd-pStart, &myrank);
487:   PetscMalloc1(nedges,  &remoterank);
488:   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
489:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
490:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
491:   PetscFree(myrank);
492:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
493:   /* Distribute remote ranks to leaves */
494:   PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
495:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
496:   return(0);
497: }

499: /*@C
500:   DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF.

502:   Collective on DM

504:   Input Parameters:
505: + dm          - The DM
506: . levels      - Number of overlap levels
507: . rootSection - The number of leaves for a given root point
508: . rootrank    - The rank of each edge into the root point
509: . leafSection - The number of processes sharing a given leaf point
510: - leafrank    - The rank of each process sharing a leaf point

512:   Output Parameters:
513: + ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

515:   Level: developer

517: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
518: @*/
519: PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
520: {
521:   MPI_Comm           comm;
522:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
523:   PetscSF            sfPoint, sfProc;
524:   const PetscSFNode *remote;
525:   const PetscInt    *local;
526:   const PetscInt    *nrank, *rrank;
527:   PetscInt          *adj = NULL;
528:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
529:   PetscMPIInt        rank, size;
530:   PetscBool          useCone, useClosure, flg;
531:   PetscErrorCode     ierr;

534:   PetscObjectGetComm((PetscObject) dm, &comm);
535:   MPI_Comm_size(comm, &size);
536:   MPI_Comm_rank(comm, &rank);
537:   DMGetPointSF(dm, &sfPoint);
538:   DMPlexGetChart(dm, &pStart, &pEnd);
539:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
540:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
541:   DMLabelCreate("Overlap adjacency", &ovAdjByRank);
542:   /* Handle leaves: shared with the root point */
543:   for (l = 0; l < nleaves; ++l) {
544:     PetscInt adjSize = PETSC_DETERMINE, a;

546:     DMPlexGetAdjacency(dm, local[l], &adjSize, &adj);
547:     for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
548:   }
549:   ISGetIndices(rootrank, &rrank);
550:   ISGetIndices(leafrank, &nrank);
551:   /* Handle roots */
552:   for (p = pStart; p < pEnd; ++p) {
553:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

555:     if ((p >= sStart) && (p < sEnd)) {
556:       /* Some leaves share a root with other leaves on different processes */
557:       PetscSectionGetDof(leafSection, p, &neighbors);
558:       if (neighbors) {
559:         PetscSectionGetOffset(leafSection, p, &noff);
560:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
561:         for (n = 0; n < neighbors; ++n) {
562:           const PetscInt remoteRank = nrank[noff+n];

564:           if (remoteRank == rank) continue;
565:           for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
566:         }
567:       }
568:     }
569:     /* Roots are shared with leaves */
570:     PetscSectionGetDof(rootSection, p, &neighbors);
571:     if (!neighbors) continue;
572:     PetscSectionGetOffset(rootSection, p, &noff);
573:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
574:     for (n = 0; n < neighbors; ++n) {
575:       const PetscInt remoteRank = rrank[noff+n];

577:       if (remoteRank == rank) continue;
578:       for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
579:     }
580:   }
581:   PetscFree(adj);
582:   ISRestoreIndices(rootrank, &rrank);
583:   ISRestoreIndices(leafrank, &nrank);
584:   /* Add additional overlap levels */
585:   for (l = 1; l < levels; l++) {
586:     /* Propagate point donations over SF to capture remote connections */
587:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
588:     /* Add next level of point donations to the label */
589:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
590:   }
591:   /* We require the closure in the overlap */
592:   DMPlexGetAdjacencyUseCone(dm, &useCone);
593:   DMPlexGetAdjacencyUseClosure(dm, &useClosure);
594:   if (useCone || !useClosure) {
595:     DMPlexPartitionLabelClosure(dm, ovAdjByRank);
596:   }
597:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
598:   if (flg) {
599:     DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);
600:   }
601:   /* Make global process SF and invert sender to receiver label */
602:   {
603:     /* Build a global process SF */
604:     PetscSFNode *remoteProc;
605:     PetscMalloc1(size, &remoteProc);
606:     for (p = 0; p < size; ++p) {
607:       remoteProc[p].rank  = p;
608:       remoteProc[p].index = rank;
609:     }
610:     PetscSFCreate(comm, &sfProc);
611:     PetscObjectSetName((PetscObject) sfProc, "Process SF");
612:     PetscSFSetGraph(sfProc, size, size, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
613:   }
614:   DMLabelCreate("Overlap label", ovLabel);
615:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);
616:   /* Add owned points, except for shared local points */
617:   for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
618:   for (l = 0; l < nleaves; ++l) {
619:     DMLabelClearValue(*ovLabel, local[l], rank);
620:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
621:   }
622:   /* Clean up */
623:   DMLabelDestroy(&ovAdjByRank);
624:   PetscSFDestroy(&sfProc);
625:   return(0);
626: }

628: /*@C
629:   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF

631:   Collective on DM

633:   Input Parameters:
634: + dm          - The DM
635: - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes

637:   Output Parameters:
638: + migrationSF - An SF that maps original points in old locations to points in new locations

640:   Level: developer

642: .seealso: DMPlexCreateOverlap(), DMPlexDistribute()
643: @*/
644: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
645: {
646:   MPI_Comm           comm;
647:   PetscMPIInt        rank, size;
648:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
649:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
650:   PetscInt          *depthRecv, *depthShift, *depthIdx;
651:   PetscSFNode       *iremote;
652:   PetscSF            pointSF;
653:   const PetscInt    *sharedLocal;
654:   const PetscSFNode *overlapRemote, *sharedRemote;
655:   PetscErrorCode     ierr;

659:   PetscObjectGetComm((PetscObject)dm, &comm);
660:   MPI_Comm_rank(comm, &rank);
661:   MPI_Comm_size(comm, &size);
662:   DMGetDimension(dm, &dim);

664:   /* Before building the migration SF we need to know the new stratum offsets */
665:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
666:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
667:   for (d=0; d<dim+1; d++) {
668:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
669:     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
670:   }
671:   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
672:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
673:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);

675:   /* Count recevied points in each stratum and compute the internal strata shift */
676:   PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
677:   for (d=0; d<dim+1; d++) depthRecv[d]=0;
678:   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
679:   depthShift[dim] = 0;
680:   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
681:   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
682:   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
683:   for (d=0; d<dim+1; d++) {
684:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
685:     depthIdx[d] = pStart + depthShift[d];
686:   }

688:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
689:   DMPlexGetChart(dm, &pStart, &pEnd);
690:   newLeaves = pEnd - pStart + nleaves;
691:   PetscMalloc1(newLeaves, &ilocal);
692:   PetscMalloc1(newLeaves, &iremote);
693:   /* First map local points to themselves */
694:   for (d=0; d<dim+1; d++) {
695:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
696:     for (p=pStart; p<pEnd; p++) {
697:       point = p + depthShift[d];
698:       ilocal[point] = point;
699:       iremote[point].index = p;
700:       iremote[point].rank = rank;
701:       depthIdx[d]++;
702:     }
703:   }

705:   /* Add in the remote roots for currently shared points */
706:   DMGetPointSF(dm, &pointSF);
707:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
708:   for (d=0; d<dim+1; d++) {
709:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
710:     for (p=0; p<numSharedPoints; p++) {
711:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
712:         point = sharedLocal[p] + depthShift[d];
713:         iremote[point].index = sharedRemote[p].index;
714:         iremote[point].rank = sharedRemote[p].rank;
715:       }
716:     }
717:   }

719:   /* Now add the incoming overlap points */
720:   for (p=0; p<nleaves; p++) {
721:     point = depthIdx[remoteDepths[p]];
722:     ilocal[point] = point;
723:     iremote[point].index = overlapRemote[p].index;
724:     iremote[point].rank = overlapRemote[p].rank;
725:     depthIdx[remoteDepths[p]]++;
726:   }
727:   PetscFree2(pointDepths,remoteDepths);

729:   PetscSFCreate(comm, migrationSF);
730:   PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
731:   PetscSFSetFromOptions(*migrationSF);
732:   DMPlexGetChart(dm, &pStart, &pEnd);
733:   PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

735:   PetscFree3(depthRecv, depthShift, depthIdx);
736:   return(0);
737: }

739: /*@
740:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

742:   Input Parameter:
743: + dm          - The DM
744: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

746:   Output Parameter:
747: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

749:   Level: developer

751: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
752: @*/
753: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
754: {
755:   MPI_Comm           comm;
756:   PetscMPIInt        rank, size;
757:   PetscInt           d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
758:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
759:   PetscInt          *depthRecv, *depthShift, *depthIdx;
760:   PetscInt           hybEnd[4];
761:   const PetscSFNode *iremote;
762:   PetscErrorCode     ierr;

766:   PetscObjectGetComm((PetscObject) dm, &comm);
767:   MPI_Comm_rank(comm, &rank);
768:   MPI_Comm_size(comm, &size);
769:   DMPlexGetDepth(dm, &ldepth);
770:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
771:   if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);

773:   /* Before building the migration SF we need to know the new stratum offsets */
774:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
775:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
776:   DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[depth-1],&hybEnd[1],&hybEnd[0]);
777:   for (d = 0; d < depth+1; ++d) {
778:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
779:     for (p = pStart; p < pEnd; ++p) {
780:       if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */
781:         pointDepths[p] = 2 * d;
782:       } else {
783:         pointDepths[p] = 2 * d + 1;
784:       }
785:     }
786:   }
787:   for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
788:   PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
789:   PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
790:   /* Count recevied points in each stratum and compute the internal strata shift */
791:   PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);
792:   for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0;
793:   for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
794:   depthShift[2*depth+1] = 0;
795:   for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1];
796:   for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth];
797:   depthShift[0] += depthRecv[1];
798:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1];
799:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0];
800:   for (d = 2 * depth-1; d > 2; --d) {
801:     PetscInt e;

803:     for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d];
804:   }
805:   for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;}
806:   /* Derive a new local permutation based on stratified indices */
807:   PetscMalloc1(nleaves, &ilocal);
808:   for (p = 0; p < nleaves; ++p) {
809:     const PetscInt dep = remoteDepths[p];

811:     ilocal[p] = depthShift[dep] + depthIdx[dep];
812:     depthIdx[dep]++;
813:   }
814:   PetscSFCreate(comm, migrationSF);
815:   PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
816:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
817:   PetscFree2(pointDepths,remoteDepths);
818:   PetscFree3(depthRecv, depthShift, depthIdx);
819:   return(0);
820: }

822: /*@
823:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

825:   Collective on DM

827:   Input Parameters:
828: + dm - The DMPlex object
829: . pointSF - The PetscSF describing the communication pattern
830: . originalSection - The PetscSection for existing data layout
831: - originalVec - The existing data

833:   Output Parameters:
834: + newSection - The PetscSF describing the new data layout
835: - newVec - The new data

837:   Level: developer

839: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
840: @*/
841: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
842: {
843:   PetscSF        fieldSF;
844:   PetscInt      *remoteOffsets, fieldSize;
845:   PetscScalar   *originalValues, *newValues;

849:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
850:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

852:   PetscSectionGetStorageSize(newSection, &fieldSize);
853:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
854:   VecSetType(newVec,dm->vectype);

856:   VecGetArray(originalVec, &originalValues);
857:   VecGetArray(newVec, &newValues);
858:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
859:   PetscFree(remoteOffsets);
860:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
861:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
862:   PetscSFDestroy(&fieldSF);
863:   VecRestoreArray(newVec, &newValues);
864:   VecRestoreArray(originalVec, &originalValues);
865:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
866:   return(0);
867: }

869: /*@
870:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

872:   Collective on DM

874:   Input Parameters:
875: + dm - The DMPlex object
876: . pointSF - The PetscSF describing the communication pattern
877: . originalSection - The PetscSection for existing data layout
878: - originalIS - The existing data

880:   Output Parameters:
881: + newSection - The PetscSF describing the new data layout
882: - newIS - The new data

884:   Level: developer

886: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
887: @*/
888: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
889: {
890:   PetscSF         fieldSF;
891:   PetscInt       *newValues, *remoteOffsets, fieldSize;
892:   const PetscInt *originalValues;
893:   PetscErrorCode  ierr;

896:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
897:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

899:   PetscSectionGetStorageSize(newSection, &fieldSize);
900:   PetscMalloc1(fieldSize, &newValues);

902:   ISGetIndices(originalIS, &originalValues);
903:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
904:   PetscFree(remoteOffsets);
905:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
906:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
907:   PetscSFDestroy(&fieldSF);
908:   ISRestoreIndices(originalIS, &originalValues);
909:   ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
910:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
911:   return(0);
912: }

914: /*@
915:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

917:   Collective on DM

919:   Input Parameters:
920: + dm - The DMPlex object
921: . pointSF - The PetscSF describing the communication pattern
922: . originalSection - The PetscSection for existing data layout
923: . datatype - The type of data
924: - originalData - The existing data

926:   Output Parameters:
927: + newSection - The PetscSection describing the new data layout
928: - newData - The new data

930:   Level: developer

932: .seealso: DMPlexDistribute(), DMPlexDistributeField()
933: @*/
934: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
935: {
936:   PetscSF        fieldSF;
937:   PetscInt      *remoteOffsets, fieldSize;
938:   PetscMPIInt    dataSize;

942:   PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
943:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

945:   PetscSectionGetStorageSize(newSection, &fieldSize);
946:   MPI_Type_size(datatype, &dataSize);
947:   PetscMalloc(fieldSize * dataSize, newData);

949:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
950:   PetscFree(remoteOffsets);
951:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
952:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
953:   PetscSFDestroy(&fieldSF);
954:   PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
955:   return(0);
956: }

958: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
959: {
960:   DM_Plex               *mesh  = (DM_Plex*) dm->data;
961:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
962:   MPI_Comm               comm;
963:   PetscSF                coneSF;
964:   PetscSection           originalConeSection, newConeSection;
965:   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
966:   PetscBool              flg;
967:   PetscErrorCode         ierr;

972:   PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);

974:   /* Distribute cone section */
975:   PetscObjectGetComm((PetscObject)dm, &comm);
976:   DMPlexGetConeSection(dm, &originalConeSection);
977:   DMPlexGetConeSection(dmParallel, &newConeSection);
978:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
979:   DMSetUp(dmParallel);
980:   {
981:     PetscInt pStart, pEnd, p;

983:     PetscSectionGetChart(newConeSection, &pStart, &pEnd);
984:     for (p = pStart; p < pEnd; ++p) {
985:       PetscInt coneSize;
986:       PetscSectionGetDof(newConeSection, p, &coneSize);
987:       pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
988:     }
989:   }
990:   /* Communicate and renumber cones */
991:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
992:   PetscFree(remoteOffsets);
993:   DMPlexGetCones(dm, &cones);
994:   if (original) {
995:     PetscInt numCones;

997:     PetscSectionGetStorageSize(originalConeSection,&numCones); PetscMalloc1(numCones,&globCones);
998:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
999:   }
1000:   else {
1001:     globCones = cones;
1002:   }
1003:   DMPlexGetCones(dmParallel, &newCones);
1004:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
1005:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
1006:   if (original) {
1007:     PetscFree(globCones);
1008:   }
1009:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
1010:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
1011: #if PETSC_USE_DEBUG
1012:   {
1013:     PetscInt  p;
1014:     PetscBool valid = PETSC_TRUE;
1015:     for (p = 0; p < newConesSize; ++p) {
1016:       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1017:     }
1018:     if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1019:   }
1020: #endif
1021:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
1022:   if (flg) {
1023:     PetscPrintf(comm, "Serial Cone Section:\n");
1024:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);
1025:     PetscPrintf(comm, "Parallel Cone Section:\n");
1026:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);
1027:     PetscSFView(coneSF, NULL);
1028:   }
1029:   DMPlexGetConeOrientations(dm, &cones);
1030:   DMPlexGetConeOrientations(dmParallel, &newCones);
1031:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
1032:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
1033:   PetscSFDestroy(&coneSF);
1034:   PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1035:   /* Create supports and stratify sieve */
1036:   {
1037:     PetscInt pStart, pEnd;

1039:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1040:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1041:   }
1042:   DMPlexSymmetrize(dmParallel);
1043:   DMPlexStratify(dmParallel);
1044:   pmesh->useCone    = mesh->useCone;
1045:   pmesh->useClosure = mesh->useClosure;
1046:   return(0);
1047: }

1049: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1050: {
1051:   MPI_Comm         comm;
1052:   PetscSection     originalCoordSection, newCoordSection;
1053:   Vec              originalCoordinates, newCoordinates;
1054:   PetscInt         bs;
1055:   const char      *name;
1056:   const PetscReal *maxCell, *L;
1057:   const DMBoundaryType *bd;
1058:   PetscErrorCode   ierr;


1064:   PetscObjectGetComm((PetscObject)dm, &comm);
1065:   DMGetCoordinateSection(dm, &originalCoordSection);
1066:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1067:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1068:   if (originalCoordinates) {
1069:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1070:     PetscObjectGetName((PetscObject) originalCoordinates, &name);
1071:     PetscObjectSetName((PetscObject) newCoordinates, name);

1073:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1074:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1075:     VecGetBlockSize(originalCoordinates, &bs);
1076:     VecSetBlockSize(newCoordinates, bs);
1077:     VecDestroy(&newCoordinates);
1078:   }
1079:   DMGetPeriodicity(dm, &maxCell, &L, &bd);
1080:   if (L) {DMSetPeriodicity(dmParallel, maxCell, L, bd);}
1081:   return(0);
1082: }

1084: /* Here we are assuming that process 0 always has everything */
1085: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1086: {
1087:   DM_Plex         *mesh = (DM_Plex*) dm->data;
1088:   MPI_Comm         comm;
1089:   DMLabel          depthLabel;
1090:   PetscMPIInt      rank;
1091:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1092:   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1093:   PetscObjectState depthState = -1;
1094:   PetscErrorCode   ierr;

1099:   PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1100:   PetscObjectGetComm((PetscObject)dm, &comm);
1101:   MPI_Comm_rank(comm, &rank);

1103:   /* If the user has changed the depth label, communicate it instead */
1104:   DMPlexGetDepth(dm, &depth);
1105:   DMPlexGetDepthLabel(dm, &depthLabel);
1106:   if (depthLabel) {DMLabelGetState(depthLabel, &depthState);}
1107:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1108:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1109:   if (sendDepth) {
1110:     DMRemoveLabel(dmParallel, "depth", &depthLabel);
1111:     DMLabelDestroy(&depthLabel);
1112:   }
1113:   /* Everyone must have either the same number of labels, or none */
1114:   DMGetNumLabels(dm, &numLocalLabels);
1115:   numLabels = numLocalLabels;
1116:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1117:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1118:   for (l = numLabels-1; l >= 0; --l) {
1119:     DMLabel     label = NULL, labelNew = NULL;
1120:     PetscBool   isdepth;

1122:     if (hasLabels) {
1123:       DMGetLabelByNum(dm, l, &label);
1124:       /* Skip "depth" because it is recreated */
1125:       PetscStrcmp(label->name, "depth", &isdepth);
1126:     }
1127:     MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);
1128:     if (isdepth && !sendDepth) continue;
1129:     DMLabelDistribute(label, migrationSF, &labelNew);
1130:     if (isdepth) {
1131:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1132:       PetscInt gdepth;

1134:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1135:       if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1136:       for (d = 0; d <= gdepth; ++d) {
1137:         PetscBool has;

1139:         DMLabelHasStratum(labelNew, d, &has);
1140:         if (!has) DMLabelAddStratum(labelNew, d);
1141:       }
1142:     }
1143:     DMAddLabel(dmParallel, labelNew);
1144:   }
1145:   PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1146:   return(0);
1147: }

1149: static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1150: {
1151:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1152:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1153:   PetscBool      *isHybrid, *isHybridParallel;
1154:   PetscInt        dim, depth, d;
1155:   PetscInt        pStart, pEnd, pStartP, pEndP;
1156:   PetscErrorCode  ierr;


1162:   DMGetDimension(dm, &dim);
1163:   DMPlexGetDepth(dm, &depth);
1164:   DMPlexGetChart(dm,&pStart,&pEnd);
1165:   DMPlexGetChart(dmParallel,&pStartP,&pEndP);
1166:   PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);
1167:   for (d = 0; d <= depth; d++) {
1168:     PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d];

1170:     if (hybridMax >= 0) {
1171:       PetscInt sStart, sEnd, p;

1173:       DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);
1174:       for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE;
1175:     }
1176:   }
1177:   PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1178:   PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1179:   for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1;
1180:   for (d = 0; d <= depth; d++) {
1181:     PetscInt sStart, sEnd, p, dd;

1183:     DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);
1184:     dd = (depth == 1 && d == 1) ? dim : d;
1185:     for (p = sStart; p < sEnd; p++) {
1186:       if (isHybridParallel[p-pStartP]) {
1187:         pmesh->hybridPointMax[dd] = p;
1188:         break;
1189:       }
1190:     }
1191:   }
1192:   PetscFree2(isHybrid,isHybridParallel);
1193:   return(0);
1194: }

1196: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1197: {
1198:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1199:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1200:   MPI_Comm        comm;
1201:   DM              refTree;
1202:   PetscSection    origParentSection, newParentSection;
1203:   PetscInt        *origParents, *origChildIDs;
1204:   PetscBool       flg;
1205:   PetscErrorCode  ierr;

1210:   PetscObjectGetComm((PetscObject)dm, &comm);

1212:   /* Set up tree */
1213:   DMPlexGetReferenceTree(dm,&refTree);
1214:   DMPlexSetReferenceTree(dmParallel,refTree);
1215:   DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1216:   if (origParentSection) {
1217:     PetscInt        pStart, pEnd;
1218:     PetscInt        *newParents, *newChildIDs, *globParents;
1219:     PetscInt        *remoteOffsetsParents, newParentSize;
1220:     PetscSF         parentSF;

1222:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1223:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1224:     PetscSectionSetChart(newParentSection,pStart,pEnd);
1225:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1226:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1227:     PetscFree(remoteOffsetsParents);
1228:     PetscSectionGetStorageSize(newParentSection,&newParentSize);
1229:     PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1230:     if (original) {
1231:       PetscInt numParents;

1233:       PetscSectionGetStorageSize(origParentSection,&numParents);
1234:       PetscMalloc1(numParents,&globParents);
1235:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1236:     }
1237:     else {
1238:       globParents = origParents;
1239:     }
1240:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1241:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1242:     if (original) {
1243:       PetscFree(globParents);
1244:     }
1245:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1246:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1247:     ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1248: #if PETSC_USE_DEBUG
1249:     {
1250:       PetscInt  p;
1251:       PetscBool valid = PETSC_TRUE;
1252:       for (p = 0; p < newParentSize; ++p) {
1253:         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1254:       }
1255:       if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1256:     }
1257: #endif
1258:     PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1259:     if (flg) {
1260:       PetscPrintf(comm, "Serial Parent Section: \n");
1261:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);
1262:       PetscPrintf(comm, "Parallel Parent Section: \n");
1263:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);
1264:       PetscSFView(parentSF, NULL);
1265:     }
1266:     DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1267:     PetscSectionDestroy(&newParentSection);
1268:     PetscFree2(newParents,newChildIDs);
1269:     PetscSFDestroy(&parentSF);
1270:   }
1271:   pmesh->useAnchors = mesh->useAnchors;
1272:   return(0);
1273: }

1275: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1276: {
1277:   DM_Plex               *mesh  = (DM_Plex*) dm->data;
1278:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
1279:   PetscMPIInt            rank, size;
1280:   MPI_Comm               comm;
1281:   PetscErrorCode         ierr;


1287:   /* Create point SF for parallel mesh */
1288:   PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1289:   PetscObjectGetComm((PetscObject)dm, &comm);
1290:   MPI_Comm_rank(comm, &rank);
1291:   MPI_Comm_size(comm, &size);
1292:   {
1293:     const PetscInt *leaves;
1294:     PetscSFNode    *remotePoints, *rowners, *lowners;
1295:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1296:     PetscInt        pStart, pEnd;

1298:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1299:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1300:     PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1301:     for (p=0; p<numRoots; p++) {
1302:       rowners[p].rank  = -1;
1303:       rowners[p].index = -1;
1304:     }
1305:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1306:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1307:     for (p = 0; p < numLeaves; ++p) {
1308:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1309:         lowners[p].rank  = rank;
1310:         lowners[p].index = leaves ? leaves[p] : p;
1311:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1312:         lowners[p].rank  = -2;
1313:         lowners[p].index = -2;
1314:       }
1315:     }
1316:     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1317:       rowners[p].rank  = -3;
1318:       rowners[p].index = -3;
1319:     }
1320:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1321:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1322:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1323:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1324:     for (p = 0; p < numLeaves; ++p) {
1325:       if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1326:       if (lowners[p].rank != rank) ++numGhostPoints;
1327:     }
1328:     PetscMalloc1(numGhostPoints, &ghostPoints);
1329:     PetscMalloc1(numGhostPoints, &remotePoints);
1330:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1331:       if (lowners[p].rank != rank) {
1332:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1333:         remotePoints[gp].rank  = lowners[p].rank;
1334:         remotePoints[gp].index = lowners[p].index;
1335:         ++gp;
1336:       }
1337:     }
1338:     PetscFree2(rowners,lowners);
1339:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1340:     PetscSFSetFromOptions((dmParallel)->sf);
1341:   }
1342:   pmesh->useCone    = mesh->useCone;
1343:   pmesh->useClosure = mesh->useClosure;
1344:   PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1345:   return(0);
1346: }

1348: /*@C
1349:   DMPlexDerivePointSF - Build a point SF from an SF describing a point migration

1351:   Input Parameter:
1352: + dm          - The source DMPlex object
1353: . migrationSF - The star forest that describes the parallel point remapping
1354: . ownership   - Flag causing a vote to determine point ownership

1356:   Output Parameter:
1357: - pointSF     - The star forest describing the point overlap in the remapped DM

1359:   Level: developer

1361: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1362: @*/
1363: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1364: {
1365:   PetscMPIInt        rank;
1366:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1367:   PetscInt          *pointLocal;
1368:   const PetscInt    *leaves;
1369:   const PetscSFNode *roots;
1370:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1371:   PetscErrorCode     ierr;

1375:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);

1377:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1378:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1379:   if (ownership) {
1380:     /* Point ownership vote: Process with highest rank ownes shared points */
1381:     for (p = 0; p < nleaves; ++p) {
1382:       /* Either put in a bid or we know we own it */
1383:       leafNodes[p].rank  = rank;
1384:       leafNodes[p].index = p;
1385:     }
1386:     for (p = 0; p < nroots; p++) {
1387:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1388:       rootNodes[p].rank  = -3;
1389:       rootNodes[p].index = -3;
1390:     }
1391:     PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1392:     PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1393:   } else {
1394:     for (p = 0; p < nroots; p++) {
1395:       rootNodes[p].index = -1;
1396:       rootNodes[p].rank = rank;
1397:     };
1398:     for (p = 0; p < nleaves; p++) {
1399:       /* Write new local id into old location */
1400:       if (roots[p].rank == rank) {
1401:         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1402:       }
1403:     }
1404:   }
1405:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1406:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);

1408:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {if (leafNodes[p].rank != rank) npointLeaves++;}
1409:   PetscMalloc1(npointLeaves, &pointLocal);
1410:   PetscMalloc1(npointLeaves, &pointRemote);
1411:   for (idx = 0, p = 0; p < nleaves; p++) {
1412:     if (leafNodes[p].rank != rank) {
1413:       pointLocal[idx] = p;
1414:       pointRemote[idx] = leafNodes[p];
1415:       idx++;
1416:     }
1417:   }
1418:   PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1419:   PetscSFSetFromOptions(*pointSF);
1420:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1421:   PetscFree2(rootNodes, leafNodes);
1422:   return(0);
1423: }

1425: /*@C
1426:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest

1428:   Input Parameter:
1429: + dm       - The source DMPlex object
1430: . sf       - The star forest communication context describing the migration pattern

1432:   Output Parameter:
1433: - targetDM - The target DMPlex object

1435:   Level: intermediate

1437: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1438: @*/
1439: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1440: {
1441:   MPI_Comm               comm;
1442:   PetscInt               dim, nroots;
1443:   PetscSF                sfPoint;
1444:   ISLocalToGlobalMapping ltogMigration;
1445:   ISLocalToGlobalMapping ltogOriginal = NULL;
1446:   PetscBool              flg;
1447:   PetscErrorCode         ierr;

1451:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1452:   PetscObjectGetComm((PetscObject) dm, &comm);
1453:   DMGetDimension(dm, &dim);
1454:   DMSetDimension(targetDM, dim);

1456:   /* Check for a one-to-all distribution pattern */
1457:   DMGetPointSF(dm, &sfPoint);
1458:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1459:   if (nroots >= 0) {
1460:     IS                     isOriginal;
1461:     PetscInt               n, size, nleaves;
1462:     PetscInt              *numbering_orig, *numbering_new;
1463:     /* Get the original point numbering */
1464:     DMPlexCreatePointNumbering(dm, &isOriginal);
1465:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1466:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1467:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1468:     /* Convert to positive global numbers */
1469:     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1470:     /* Derive the new local-to-global mapping from the old one */
1471:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1472:     PetscMalloc1(nleaves, &numbering_new);
1473:     PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1474:     PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1475:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1476:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1477:     ISDestroy(&isOriginal);
1478:   } else {
1479:     /* One-to-all distribution pattern: We can derive LToG from SF */
1480:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1481:   }
1482:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1483:   if (flg) {
1484:     PetscPrintf(comm, "Point renumbering for DM migration:\n");
1485:     ISLocalToGlobalMappingView(ltogMigration, NULL);
1486:   }
1487:   /* Migrate DM data to target DM */
1488:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1489:   DMPlexDistributeLabels(dm, sf, targetDM);
1490:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1491:   DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1492:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1493:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1494:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1495:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1496:   return(0);
1497: }

1499: /*@C
1500:   DMPlexDistribute - Distributes the mesh and any associated sections.

1502:   Not Collective

1504:   Input Parameter:
1505: + dm  - The original DMPlex object
1506: - overlap - The overlap of partitions, 0 is the default

1508:   Output Parameter:
1509: + sf - The PetscSF used for point distribution
1510: - parallelMesh - The distributed DMPlex object, or NULL

1512:   Note: If the mesh was not distributed, the return value is NULL.

1514:   The user can control the definition of adjacency for the mesh using DMPlexSetAdjacencyUseCone() and
1515:   DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1516:   representation on the mesh.

1518:   Level: intermediate

1520: .keywords: mesh, elements
1521: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1522: @*/
1523: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1524: {
1525:   MPI_Comm               comm;
1526:   PetscPartitioner       partitioner;
1527:   IS                     cellPart;
1528:   PetscSection           cellPartSection;
1529:   DM                     dmCoord;
1530:   DMLabel                lblPartition, lblMigration;
1531:   PetscSF                sfProcess, sfMigration, sfStratified, sfPoint;
1532:   PetscBool              flg;
1533:   PetscMPIInt            rank, size, p;
1534:   PetscErrorCode         ierr;


1541:   PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1542:   PetscObjectGetComm((PetscObject)dm,&comm);
1543:   MPI_Comm_rank(comm, &rank);
1544:   MPI_Comm_size(comm, &size);

1546:   *dmParallel = NULL;
1547:   if (size == 1) {
1548:     PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1549:     return(0);
1550:   }

1552:   /* Create cell partition */
1553:   PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1554:   PetscSectionCreate(comm, &cellPartSection);
1555:   DMPlexGetPartitioner(dm, &partitioner);
1556:   PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1557:   {
1558:     /* Convert partition to DMLabel */
1559:     PetscInt proc, pStart, pEnd, npoints, poffset;
1560:     const PetscInt *points;
1561:     DMLabelCreate("Point Partition", &lblPartition);
1562:     ISGetIndices(cellPart, &points);
1563:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1564:     for (proc = pStart; proc < pEnd; proc++) {
1565:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1566:       PetscSectionGetOffset(cellPartSection, proc, &poffset);
1567:       for (p = poffset; p < poffset+npoints; p++) {
1568:         DMLabelSetValue(lblPartition, points[p], proc);
1569:       }
1570:     }
1571:     ISRestoreIndices(cellPart, &points);
1572:   }
1573:   DMPlexPartitionLabelClosure(dm, lblPartition);
1574:   {
1575:     /* Build a global process SF */
1576:     PetscSFNode *remoteProc;
1577:     PetscMalloc1(size, &remoteProc);
1578:     for (p = 0; p < size; ++p) {
1579:       remoteProc[p].rank  = p;
1580:       remoteProc[p].index = rank;
1581:     }
1582:     PetscSFCreate(comm, &sfProcess);
1583:     PetscObjectSetName((PetscObject) sfProcess, "Process SF");
1584:     PetscSFSetGraph(sfProcess, size, size, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
1585:   }
1586:   DMLabelCreate("Point migration", &lblMigration);
1587:   DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);
1588:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1589:   /* Stratify the SF in case we are migrating an already parallel plex */
1590:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1591:   PetscSFDestroy(&sfMigration);
1592:   sfMigration = sfStratified;
1593:   PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);
1594:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1595:   if (flg) {
1596:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);
1597:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);
1598:   }

1600:   /* Create non-overlapping parallel DM and migrate internal data */
1601:   DMPlexCreate(comm, dmParallel);
1602:   PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1603:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1605:   /* Build the point SF without overlap */
1606:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1607:   DMSetPointSF(*dmParallel, sfPoint);
1608:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1609:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1610:   if (flg) {PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);}

1612:   if (overlap > 0) {
1613:     DM                 dmOverlap;
1614:     PetscInt           nroots, nleaves;
1615:     PetscSFNode       *newRemote;
1616:     const PetscSFNode *oldRemote;
1617:     PetscSF            sfOverlap, sfOverlapPoint;
1618:     /* Add the partition overlap to the distributed DM */
1619:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1620:     DMDestroy(dmParallel);
1621:     *dmParallel = dmOverlap;
1622:     if (flg) {
1623:       PetscPrintf(comm, "Overlap Migration SF:\n");
1624:       PetscSFView(sfOverlap, NULL);
1625:     }

1627:     /* Re-map the migration SF to establish the full migration pattern */
1628:     PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);
1629:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1630:     PetscMalloc1(nleaves, &newRemote);
1631:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1632:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1633:     PetscSFCreate(comm, &sfOverlapPoint);
1634:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1635:     PetscSFDestroy(&sfOverlap);
1636:     PetscSFDestroy(&sfMigration);
1637:     sfMigration = sfOverlapPoint;
1638:   }
1639:   /* Cleanup Partition */
1640:   PetscSFDestroy(&sfProcess);
1641:   DMLabelDestroy(&lblPartition);
1642:   DMLabelDestroy(&lblMigration);
1643:   PetscSectionDestroy(&cellPartSection);
1644:   ISDestroy(&cellPart);
1645:   /* Copy BC */
1646:   DMCopyBoundary(dm, *dmParallel);
1647:   /* Create sfNatural */
1648:   if (dm->useNatural) {
1649:     PetscSection section;

1651:     DMGetDefaultSection(dm, &section);
1652:     DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1653:   }
1654:   /* Cleanup */
1655:   if (sf) {*sf = sfMigration;}
1656:   else    {PetscSFDestroy(&sfMigration);}
1657:   PetscSFDestroy(&sfPoint);
1658:   PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1659:   return(0);
1660: }

1662: /*@C
1663:   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.

1665:   Not Collective

1667:   Input Parameter:
1668: + dm  - The non-overlapping distrbuted DMPlex object
1669: - overlap - The overlap of partitions, 0 is the default

1671:   Output Parameter:
1672: + sf - The PetscSF used for point distribution
1673: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1675:   Note: If the mesh was not distributed, the return value is NULL.

1677:   The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
1678:   DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1679:   representation on the mesh.

1681:   Level: intermediate

1683: .keywords: mesh, elements
1684: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1685: @*/
1686: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1687: {
1688:   MPI_Comm               comm;
1689:   PetscMPIInt            rank;
1690:   PetscSection           rootSection, leafSection;
1691:   IS                     rootrank, leafrank;
1692:   DM                     dmCoord;
1693:   DMLabel                lblOverlap;
1694:   PetscSF                sfOverlap, sfStratified, sfPoint;
1695:   PetscErrorCode         ierr;


1702:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1703:   PetscObjectGetComm((PetscObject)dm,&comm);
1704:   MPI_Comm_rank(comm, &rank);

1706:   /* Compute point overlap with neighbouring processes on the distributed DM */
1707:   PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1708:   PetscSectionCreate(comm, &rootSection);
1709:   PetscSectionCreate(comm, &leafSection);
1710:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1711:   DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1712:   /* Convert overlap label to stratified migration SF */
1713:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1714:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1715:   PetscSFDestroy(&sfOverlap);
1716:   sfOverlap = sfStratified;
1717:   PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1718:   PetscSFSetFromOptions(sfOverlap);

1720:   PetscSectionDestroy(&rootSection);
1721:   PetscSectionDestroy(&leafSection);
1722:   ISDestroy(&rootrank);
1723:   ISDestroy(&leafrank);
1724:   PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);

1726:   /* Build the overlapping DM */
1727:   DMPlexCreate(comm, dmOverlap);
1728:   PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1729:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1730:   /* Build the new point SF */
1731:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1732:   DMSetPointSF(*dmOverlap, sfPoint);
1733:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
1734:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1735:   PetscSFDestroy(&sfPoint);
1736:   /* Cleanup overlap partition */
1737:   DMLabelDestroy(&lblOverlap);
1738:   if (sf) *sf = sfOverlap;
1739:   else    {PetscSFDestroy(&sfOverlap);}
1740:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1741:   return(0);
1742: }

1744: /*@C
1745:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1746:   root process of the original's communicator.

1748:   Input Parameters:
1749: . dm - the original DMPlex object

1751:   Output Parameters:
1752: . gatherMesh - the gathered DM object, or NULL

1754:   Level: intermediate

1756: .keywords: mesh
1757: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1758: @*/
1759: PetscErrorCode DMPlexGetGatherDM(DM dm, DM * gatherMesh)
1760: {
1761:   MPI_Comm       comm;
1762:   PetscMPIInt    size;
1763:   PetscPartitioner oldPart, gatherPart;

1768:   comm = PetscObjectComm((PetscObject)dm);
1769:   MPI_Comm_size(comm,&size);
1770:   *gatherMesh = NULL;
1771:   if (size == 1) return(0);
1772:   DMPlexGetPartitioner(dm,&oldPart);
1773:   PetscObjectReference((PetscObject)oldPart);
1774:   PetscPartitionerCreate(comm,&gatherPart);
1775:   PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1776:   DMPlexSetPartitioner(dm,gatherPart);
1777:   DMPlexDistribute(dm,0,NULL,gatherMesh);
1778:   DMPlexSetPartitioner(dm,oldPart);
1779:   PetscPartitionerDestroy(&gatherPart);
1780:   PetscPartitionerDestroy(&oldPart);
1781:   return(0);
1782: }

1784: /*@C
1785:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.

1787:   Input Parameters:
1788: . dm - the original DMPlex object

1790:   Output Parameters:
1791: . redundantMesh - the redundant DM object, or NULL

1793:   Level: intermediate

1795: .keywords: mesh
1796: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1797: @*/
1798: PetscErrorCode DMPlexGetRedundantDM(DM dm, DM * redundantMesh)
1799: {
1800:   MPI_Comm       comm;
1801:   PetscMPIInt    size, rank;
1802:   PetscInt       pStart, pEnd, p;
1803:   PetscInt       numPoints = -1;
1804:   PetscSF        migrationSF, sfPoint;
1805:   DM             gatherDM, dmCoord;
1806:   PetscSFNode    *points;

1811:   comm = PetscObjectComm((PetscObject)dm);
1812:   MPI_Comm_size(comm,&size);
1813:   *redundantMesh = NULL;
1814:   if (size == 1) {
1815:     PetscObjectReference((PetscObject) dm);
1816:     *redundantMesh = dm;
1817:     return(0);
1818:   }
1819:   DMPlexGetGatherDM(dm,&gatherDM);
1820:   if (!gatherDM) return(0);
1821:   MPI_Comm_rank(comm,&rank);
1822:   DMPlexGetChart(gatherDM,&pStart,&pEnd);
1823:   numPoints = pEnd - pStart;
1824:   MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1825:   PetscMalloc1(numPoints,&points);
1826:   PetscSFCreate(comm,&migrationSF);
1827:   for (p = 0; p < numPoints; p++) {
1828:     points[p].index = p;
1829:     points[p].rank  = 0;
1830:   }
1831:   PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1832:   DMPlexCreate(comm, redundantMesh);
1833:   PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1834:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1835:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1836:   DMSetPointSF(*redundantMesh, sfPoint);
1837:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
1838:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1839:   PetscSFDestroy(&sfPoint);
1840:   PetscSFDestroy(&migrationSF);
1841:   DMDestroy(&gatherDM);
1842:   return(0);
1843: }