Actual source code: plexdistribute.c

petsc-master 2016-08-26
Report Typos and Errors
  1: #include <petsc/private/dmpleximpl.h>    /*I      "petscdmplex.h"   I*/
 2:  #include <petsc/private/dmlabelimpl.h>

  6: /*@
  7:   DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first

  9:   Input Parameters:
 10: + dm      - The DM object
 11: - useCone - Flag to use the cone first

 13:   Level: intermediate

 15:   Notes:
 16: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
 17: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
 18: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

 20: .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
 21: @*/
 22: PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone)
 23: {
 24:   DM_Plex *mesh = (DM_Plex *) dm->data;

 28:   mesh->useCone = useCone;
 29:   return(0);
 30: }

 34: /*@
 35:   DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first

 37:   Input Parameter:
 38: . dm      - The DM object

 40:   Output Parameter:
 41: . useCone - Flag to use the cone first

 43:   Level: intermediate

 45:   Notes:
 46: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
 47: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
 48: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

 50: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
 51: @*/
 52: PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone)
 53: {
 54:   DM_Plex *mesh = (DM_Plex *) dm->data;

 59:   *useCone = mesh->useCone;
 60:   return(0);
 61: }

 65: /*@
 66:   DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure

 68:   Input Parameters:
 69: + dm      - The DM object
 70: - useClosure - Flag to use the closure

 72:   Level: intermediate

 74:   Notes:
 75: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
 76: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
 77: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

 79: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
 80: @*/
 81: PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure)
 82: {
 83:   DM_Plex *mesh = (DM_Plex *) dm->data;

 87:   mesh->useClosure = useClosure;
 88:   return(0);
 89: }

 93: /*@
 94:   DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure

 96:   Input Parameter:
 97: . dm      - The DM object

 99:   Output Parameter:
100: . useClosure - Flag to use the closure

102:   Level: intermediate

104:   Notes:
105: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
106: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
107: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

109: .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
110: @*/
111: PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure)
112: {
113:   DM_Plex *mesh = (DM_Plex *) dm->data;

118:   *useClosure = mesh->useClosure;
119:   return(0);
120: }

124: /*@
125:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

127:   Input Parameters:
128: + dm      - The DM object
129: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

131:   Level: intermediate

133: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
134: @*/
135: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
136: {
137:   DM_Plex *mesh = (DM_Plex *) dm->data;

141:   mesh->useAnchors = useAnchors;
142:   return(0);
143: }

147: /*@
148:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

150:   Input Parameter:
151: . dm      - The DM object

153:   Output Parameter:
154: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

156:   Level: intermediate

158: .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
159: @*/
160: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
161: {
162:   DM_Plex *mesh = (DM_Plex *) dm->data;

167:   *useAnchors = mesh->useAnchors;
168:   return(0);
169: }

173: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
174: {
175:   const PetscInt *cone = NULL;
176:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
177:   PetscErrorCode  ierr;

180:   DMPlexGetConeSize(dm, p, &coneSize);
181:   DMPlexGetCone(dm, p, &cone);
182:   for (c = 0; c <= coneSize; ++c) {
183:     const PetscInt  point   = !c ? p : cone[c-1];
184:     const PetscInt *support = NULL;
185:     PetscInt        supportSize, s, q;

187:     DMPlexGetSupportSize(dm, point, &supportSize);
188:     DMPlexGetSupport(dm, point, &support);
189:     for (s = 0; s < supportSize; ++s) {
190:       for (q = 0; q < numAdj || (adj[numAdj++] = support[s],0); ++q) {
191:         if (support[s] == adj[q]) break;
192:       }
193:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
194:     }
195:   }
196:   *adjSize = numAdj;
197:   return(0);
198: }

202: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
203: {
204:   const PetscInt *support = NULL;
205:   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
206:   PetscErrorCode  ierr;

209:   DMPlexGetSupportSize(dm, p, &supportSize);
210:   DMPlexGetSupport(dm, p, &support);
211:   for (s = 0; s <= supportSize; ++s) {
212:     const PetscInt  point = !s ? p : support[s-1];
213:     const PetscInt *cone  = NULL;
214:     PetscInt        coneSize, c, q;

216:     DMPlexGetConeSize(dm, point, &coneSize);
217:     DMPlexGetCone(dm, point, &cone);
218:     for (c = 0; c < coneSize; ++c) {
219:       for (q = 0; q < numAdj || (adj[numAdj++] = cone[c],0); ++q) {
220:         if (cone[c] == adj[q]) break;
221:       }
222:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
223:     }
224:   }
225:   *adjSize = numAdj;
226:   return(0);
227: }

231: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
232: {
233:   PetscInt      *star = NULL;
234:   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;

238:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
239:   for (s = 0; s < starSize*2; s += 2) {
240:     const PetscInt *closure = NULL;
241:     PetscInt        closureSize, c, q;

243:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
244:     for (c = 0; c < closureSize*2; c += 2) {
245:       for (q = 0; q < numAdj || (adj[numAdj++] = closure[c],0); ++q) {
246:         if (closure[c] == adj[q]) break;
247:       }
248:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
249:     }
250:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
251:   }
252:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
253:   *adjSize = numAdj;
254:   return(0);
255: }

259: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
260: {
261:   static PetscInt asiz = 0;
262:   PetscInt maxAnchors = 1;
263:   PetscInt aStart = -1, aEnd = -1;
264:   PetscInt maxAdjSize;
265:   PetscSection aSec = NULL;
266:   IS aIS = NULL;
267:   const PetscInt *anchors;
268:   PetscErrorCode  ierr;

271:   if (useAnchors) {
272:     DMPlexGetAnchors(dm,&aSec,&aIS);
273:     if (aSec) {
274:       PetscSectionGetMaxDof(aSec,&maxAnchors);
275:       maxAnchors = PetscMax(1,maxAnchors);
276:       PetscSectionGetChart(aSec,&aStart,&aEnd);
277:       ISGetIndices(aIS,&anchors);
278:     }
279:   }
280:   if (!*adj) {
281:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

283:     DMPlexGetChart(dm, &pStart,&pEnd);
284:     DMPlexGetDepth(dm, &depth);
285:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
286:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
287:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
288:     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
289:     asiz *= maxAnchors;
290:     asiz  = PetscMin(asiz,pEnd-pStart);
291:     PetscMalloc1(asiz,adj);
292:   }
293:   if (*adjSize < 0) *adjSize = asiz;
294:   maxAdjSize = *adjSize;
295:   if (useTransitiveClosure) {
296:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
297:   } else if (useCone) {
298:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
299:   } else {
300:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
301:   }
302:   if (useAnchors && aSec) {
303:     PetscInt origSize = *adjSize;
304:     PetscInt numAdj = origSize;
305:     PetscInt i = 0, j;
306:     PetscInt *orig = *adj;

308:     while (i < origSize) {
309:       PetscInt p = orig[i];
310:       PetscInt aDof = 0;

312:       if (p >= aStart && p < aEnd) {
313:         PetscSectionGetDof(aSec,p,&aDof);
314:       }
315:       if (aDof) {
316:         PetscInt aOff;
317:         PetscInt s, q;

319:         for (j = i + 1; j < numAdj; j++) {
320:           orig[j - 1] = orig[j];
321:         }
322:         origSize--;
323:         numAdj--;
324:         PetscSectionGetOffset(aSec,p,&aOff);
325:         for (s = 0; s < aDof; ++s) {
326:           for (q = 0; q < numAdj || (orig[numAdj++] = anchors[aOff+s],0); ++q) {
327:             if (anchors[aOff+s] == orig[q]) break;
328:           }
329:           if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
330:         }
331:       }
332:       else {
333:         i++;
334:       }
335:     }
336:     *adjSize = numAdj;
337:     ISRestoreIndices(aIS,&anchors);
338:   }
339:   return(0);
340: }

344: /*@
345:   DMPlexGetAdjacency - Return all points adjacent to the given point

347:   Input Parameters:
348: + dm - The DM object
349: . p  - The point
350: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
351: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize

353:   Output Parameters:
354: + adjSize - The number of adjacent points
355: - adj - The adjacent points

357:   Level: advanced

359:   Notes: The user must PetscFree the adj array if it was not passed in.

361: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
362: @*/
363: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
364: {
365:   DM_Plex       *mesh = (DM_Plex *) dm->data;

372:   DMPlexGetAdjacency_Internal(dm, p, mesh->useCone, mesh->useClosure, mesh->useAnchors, adjSize, adj);
373:   return(0);
374: }

378: /*@
379:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

381:   Collective on DM

383:   Input Parameters:
384: + dm      - The DM
385: - sfPoint - The PetscSF which encodes point connectivity

387:   Output Parameters:
388: + processRanks - A list of process neighbors, or NULL
389: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

391:   Level: developer

393: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
394: @*/
395: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
396: {
397:   const PetscSFNode *remotePoints;
398:   PetscInt          *localPointsNew;
399:   PetscSFNode       *remotePointsNew;
400:   const PetscInt    *nranks;
401:   PetscInt          *ranksNew;
402:   PetscBT            neighbors;
403:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
404:   PetscMPIInt        numProcs, proc, rank;
405:   PetscErrorCode     ierr;

412:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &numProcs);
413:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
414:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
415:   PetscBTCreate(numProcs, &neighbors);
416:   PetscBTMemzero(numProcs, neighbors);
417:   /* Compute root-to-leaf process connectivity */
418:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
419:   ISGetIndices(rootRanks, &nranks);
420:   for (p = pStart; p < pEnd; ++p) {
421:     PetscInt ndof, noff, n;

423:     PetscSectionGetDof(rootRankSection, p, &ndof);
424:     PetscSectionGetOffset(rootRankSection, p, &noff);
425:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
426:   }
427:   ISRestoreIndices(rootRanks, &nranks);
428:   /* Compute leaf-to-neighbor process connectivity */
429:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
430:   ISGetIndices(leafRanks, &nranks);
431:   for (p = pStart; p < pEnd; ++p) {
432:     PetscInt ndof, noff, n;

434:     PetscSectionGetDof(leafRankSection, p, &ndof);
435:     PetscSectionGetOffset(leafRankSection, p, &noff);
436:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
437:   }
438:   ISRestoreIndices(leafRanks, &nranks);
439:   /* Compute leaf-to-root process connectivity */
440:   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
441:   /* Calculate edges */
442:   PetscBTClear(neighbors, rank);
443:   for(proc = 0, numNeighbors = 0; proc < numProcs; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
444:   PetscMalloc1(numNeighbors, &ranksNew);
445:   PetscMalloc1(numNeighbors, &localPointsNew);
446:   PetscMalloc1(numNeighbors, &remotePointsNew);
447:   for(proc = 0, n = 0; proc < numProcs; ++proc) {
448:     if (PetscBTLookup(neighbors, proc)) {
449:       ranksNew[n]              = proc;
450:       localPointsNew[n]        = proc;
451:       remotePointsNew[n].index = rank;
452:       remotePointsNew[n].rank  = proc;
453:       ++n;
454:     }
455:   }
456:   PetscBTDestroy(&neighbors);
457:   if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
458:   else              {PetscFree(ranksNew);}
459:   if (sfProcess) {
460:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
461:     PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
462:     PetscSFSetFromOptions(*sfProcess);
463:     PetscSFSetGraph(*sfProcess, numProcs, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
464:   }
465:   return(0);
466: }

470: /*@
471:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

473:   Collective on DM

475:   Input Parameter:
476: . dm - The DM

478:   Output Parameters:
479: + rootSection - The number of leaves for a given root point
480: . rootrank    - The rank of each edge into the root point
481: . leafSection - The number of processes sharing a given leaf point
482: - leafrank    - The rank of each process sharing a leaf point

484:   Level: developer

486: .seealso: DMPlexCreateOverlap()
487: @*/
488: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
489: {
490:   MPI_Comm        comm;
491:   PetscSF         sfPoint;
492:   const PetscInt *rootdegree;
493:   PetscInt       *myrank, *remoterank;
494:   PetscInt        pStart, pEnd, p, nedges;
495:   PetscMPIInt     rank;
496:   PetscErrorCode  ierr;

499:   PetscObjectGetComm((PetscObject) dm, &comm);
500:   MPI_Comm_rank(comm, &rank);
501:   DMPlexGetChart(dm, &pStart, &pEnd);
502:   DMGetPointSF(dm, &sfPoint);
503:   /* Compute number of leaves for each root */
504:   PetscObjectSetName((PetscObject) rootSection, "Root Section");
505:   PetscSectionSetChart(rootSection, pStart, pEnd);
506:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
507:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
508:   for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
509:   PetscSectionSetUp(rootSection);
510:   /* Gather rank of each leaf to root */
511:   PetscSectionGetStorageSize(rootSection, &nedges);
512:   PetscMalloc1(pEnd-pStart, &myrank);
513:   PetscMalloc1(nedges,  &remoterank);
514:   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
515:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
516:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
517:   PetscFree(myrank);
518:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
519:   /* Distribute remote ranks to leaves */
520:   PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
521:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
522:   return(0);
523: }

527: /*@C
528:   DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF.

530:   Collective on DM

532:   Input Parameters:
533: + dm          - The DM
534: . levels      - Number of overlap levels
535: . rootSection - The number of leaves for a given root point
536: . rootrank    - The rank of each edge into the root point
537: . leafSection - The number of processes sharing a given leaf point
538: - leafrank    - The rank of each process sharing a leaf point

540:   Output Parameters:
541: + ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

543:   Level: developer

545: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
546: @*/
547: PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
548: {
549:   MPI_Comm           comm;
550:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
551:   PetscSF            sfPoint, sfProc;
552:   const PetscSFNode *remote;
553:   const PetscInt    *local;
554:   const PetscInt    *nrank, *rrank;
555:   PetscInt          *adj = NULL;
556:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
557:   PetscMPIInt        rank, numProcs;
558:   PetscBool          useCone, useClosure, flg;
559:   PetscErrorCode     ierr;

562:   PetscObjectGetComm((PetscObject) dm, &comm);
563:   MPI_Comm_size(comm, &numProcs);
564:   MPI_Comm_rank(comm, &rank);
565:   DMGetPointSF(dm, &sfPoint);
566:   DMPlexGetChart(dm, &pStart, &pEnd);
567:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
568:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
569:   DMLabelCreate("Overlap adjacency", &ovAdjByRank);
570:   /* Handle leaves: shared with the root point */
571:   for (l = 0; l < nleaves; ++l) {
572:     PetscInt adjSize = PETSC_DETERMINE, a;

574:     DMPlexGetAdjacency(dm, local[l], &adjSize, &adj);
575:     for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
576:   }
577:   ISGetIndices(rootrank, &rrank);
578:   ISGetIndices(leafrank, &nrank);
579:   /* Handle roots */
580:   for (p = pStart; p < pEnd; ++p) {
581:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

583:     if ((p >= sStart) && (p < sEnd)) {
584:       /* Some leaves share a root with other leaves on different processes */
585:       PetscSectionGetDof(leafSection, p, &neighbors);
586:       if (neighbors) {
587:         PetscSectionGetOffset(leafSection, p, &noff);
588:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
589:         for (n = 0; n < neighbors; ++n) {
590:           const PetscInt remoteRank = nrank[noff+n];

592:           if (remoteRank == rank) continue;
593:           for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
594:         }
595:       }
596:     }
597:     /* Roots are shared with leaves */
598:     PetscSectionGetDof(rootSection, p, &neighbors);
599:     if (!neighbors) continue;
600:     PetscSectionGetOffset(rootSection, p, &noff);
601:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
602:     for (n = 0; n < neighbors; ++n) {
603:       const PetscInt remoteRank = rrank[noff+n];

605:       if (remoteRank == rank) continue;
606:       for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
607:     }
608:   }
609:   PetscFree(adj);
610:   ISRestoreIndices(rootrank, &rrank);
611:   ISRestoreIndices(leafrank, &nrank);
612:   /* Add additional overlap levels */
613:   for (l = 1; l < levels; l++) {
614:     /* Propagate point donations over SF to capture remote connections */
615:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
616:     /* Add next level of point donations to the label */
617:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
618:   }
619:   /* We require the closure in the overlap */
620:   DMPlexGetAdjacencyUseCone(dm, &useCone);
621:   DMPlexGetAdjacencyUseClosure(dm, &useClosure);
622:   if (useCone || !useClosure) {
623:     DMPlexPartitionLabelClosure(dm, ovAdjByRank);
624:   }
625:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
626:   if (flg) {
627:     DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);
628:   }
629:   /* Make global process SF and invert sender to receiver label */
630:   {
631:     /* Build a global process SF */
632:     PetscSFNode *remoteProc;
633:     PetscMalloc1(numProcs, &remoteProc);
634:     for (p = 0; p < numProcs; ++p) {
635:       remoteProc[p].rank  = p;
636:       remoteProc[p].index = rank;
637:     }
638:     PetscSFCreate(comm, &sfProc);
639:     PetscObjectSetName((PetscObject) sfProc, "Process SF");
640:     PetscSFSetGraph(sfProc, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
641:   }
642:   DMLabelCreate("Overlap label", ovLabel);
643:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);
644:   /* Add owned points, except for shared local points */
645:   for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
646:   for (l = 0; l < nleaves; ++l) {
647:     DMLabelClearValue(*ovLabel, local[l], rank);
648:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
649:   }
650:   /* Clean up */
651:   DMLabelDestroy(&ovAdjByRank);
652:   PetscSFDestroy(&sfProc);
653:   return(0);
654: }

658: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
659: {
660:   MPI_Comm           comm;
661:   PetscMPIInt        rank, numProcs;
662:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
663:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
664:   PetscInt          *depthRecv, *depthShift, *depthIdx;
665:   PetscSFNode       *iremote;
666:   PetscSF            pointSF;
667:   const PetscInt    *sharedLocal;
668:   const PetscSFNode *overlapRemote, *sharedRemote;
669:   PetscErrorCode     ierr;

673:   PetscObjectGetComm((PetscObject)dm, &comm);
674:   MPI_Comm_rank(comm, &rank);
675:   MPI_Comm_size(comm, &numProcs);
676:   DMGetDimension(dm, &dim);

678:   /* Before building the migration SF we need to know the new stratum offsets */
679:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
680:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
681:   for (d=0; d<dim+1; d++) {
682:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
683:     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
684:   }
685:   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
686:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
687:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);

689:   /* Count recevied points in each stratum and compute the internal strata shift */
690:   PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
691:   for (d=0; d<dim+1; d++) depthRecv[d]=0;
692:   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
693:   depthShift[dim] = 0;
694:   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
695:   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
696:   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
697:   for (d=0; d<dim+1; d++) {
698:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
699:     depthIdx[d] = pStart + depthShift[d];
700:   }

702:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
703:   DMPlexGetChart(dm, &pStart, &pEnd);
704:   newLeaves = pEnd - pStart + nleaves;
705:   PetscMalloc1(newLeaves, &ilocal);
706:   PetscMalloc1(newLeaves, &iremote);
707:   /* First map local points to themselves */
708:   for (d=0; d<dim+1; d++) {
709:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
710:     for (p=pStart; p<pEnd; p++) {
711:       point = p + depthShift[d];
712:       ilocal[point] = point;
713:       iremote[point].index = p;
714:       iremote[point].rank = rank;
715:       depthIdx[d]++;
716:     }
717:   }

719:   /* Add in the remote roots for currently shared points */
720:   DMGetPointSF(dm, &pointSF);
721:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
722:   for (d=0; d<dim+1; d++) {
723:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
724:     for (p=0; p<numSharedPoints; p++) {
725:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
726:         point = sharedLocal[p] + depthShift[d];
727:         iremote[point].index = sharedRemote[p].index;
728:         iremote[point].rank = sharedRemote[p].rank;
729:       }
730:     }
731:   }

733:   /* Now add the incoming overlap points */
734:   for (p=0; p<nleaves; p++) {
735:     point = depthIdx[remoteDepths[p]];
736:     ilocal[point] = point;
737:     iremote[point].index = overlapRemote[p].index;
738:     iremote[point].rank = overlapRemote[p].rank;
739:     depthIdx[remoteDepths[p]]++;
740:   }
741:   PetscFree2(pointDepths,remoteDepths);

743:   PetscSFCreate(comm, migrationSF);
744:   PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
745:   PetscSFSetFromOptions(*migrationSF);
746:   DMPlexGetChart(dm, &pStart, &pEnd);
747:   PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

749:   PetscFree3(depthRecv, depthShift, depthIdx);
750:   return(0);
751: }

755: /*@
756:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

758:   Input Parameter:
759: + dm          - The DM
760: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

762:   Output Parameter:
763: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

765:   Level: developer

767: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
768: @*/
769: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
770: {
771:   MPI_Comm           comm;
772:   PetscMPIInt        rank, numProcs;
773:   PetscInt           d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
774:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
775:   PetscInt          *depthRecv, *depthShift, *depthIdx;
776:   PetscInt           hybEnd[4];
777:   const PetscSFNode *iremote;
778:   PetscErrorCode     ierr;

782:   PetscObjectGetComm((PetscObject) dm, &comm);
783:   MPI_Comm_rank(comm, &rank);
784:   MPI_Comm_size(comm, &numProcs);
785:   DMPlexGetDepth(dm, &ldepth);
786:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
787:   if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);

789:   /* Before building the migration SF we need to know the new stratum offsets */
790:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
791:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
792:   DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[depth-1],&hybEnd[1],&hybEnd[0]);
793:   for (d = 0; d < depth+1; ++d) {
794:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
795:     for (p = pStart; p < pEnd; ++p) {
796:       if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */
797:         pointDepths[p] = 2 * d;
798:       } else {
799:         pointDepths[p] = 2 * d + 1;
800:       }
801:     }
802:   }
803:   for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
804:   PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
805:   PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
806:   /* Count recevied points in each stratum and compute the internal strata shift */
807:   PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);
808:   for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0;
809:   for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
810:   depthShift[2*depth+1] = 0;
811:   for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1];
812:   for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth];
813:   depthShift[0] += depthRecv[1];
814:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1];
815:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0];
816:   for (d = 2 * depth-1; d > 2; --d) {
817:     PetscInt e;

819:     for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d];
820:   }
821:   for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;}
822:   /* Derive a new local permutation based on stratified indices */
823:   PetscMalloc1(nleaves, &ilocal);
824:   for (p = 0; p < nleaves; ++p) {
825:     const PetscInt dep = remoteDepths[p];

827:     ilocal[p] = depthShift[dep] + depthIdx[dep];
828:     depthIdx[dep]++;
829:   }
830:   PetscSFCreate(comm, migrationSF);
831:   PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
832:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
833:   PetscFree2(pointDepths,remoteDepths);
834:   PetscFree3(depthRecv, depthShift, depthIdx);
835:   return(0);
836: }

840: /*@
841:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

843:   Collective on DM

845:   Input Parameters:
846: + dm - The DMPlex object
847: . pointSF - The PetscSF describing the communication pattern
848: . originalSection - The PetscSection for existing data layout
849: - originalVec - The existing data

851:   Output Parameters:
852: + newSection - The PetscSF describing the new data layout
853: - newVec - The new data

855:   Level: developer

857: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
858: @*/
859: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
860: {
861:   PetscSF        fieldSF;
862:   PetscInt      *remoteOffsets, fieldSize;
863:   PetscScalar   *originalValues, *newValues;

867:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
868:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

870:   PetscSectionGetStorageSize(newSection, &fieldSize);
871:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
872:   VecSetType(newVec,dm->vectype);

874:   VecGetArray(originalVec, &originalValues);
875:   VecGetArray(newVec, &newValues);
876:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
877:   PetscFree(remoteOffsets);
878:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
879:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
880:   PetscSFDestroy(&fieldSF);
881:   VecRestoreArray(newVec, &newValues);
882:   VecRestoreArray(originalVec, &originalValues);
883:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
884:   return(0);
885: }

889: /*@
890:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

892:   Collective on DM

894:   Input Parameters:
895: + dm - The DMPlex object
896: . pointSF - The PetscSF describing the communication pattern
897: . originalSection - The PetscSection for existing data layout
898: - originalIS - The existing data

900:   Output Parameters:
901: + newSection - The PetscSF describing the new data layout
902: - newIS - The new data

904:   Level: developer

906: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
907: @*/
908: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
909: {
910:   PetscSF         fieldSF;
911:   PetscInt       *newValues, *remoteOffsets, fieldSize;
912:   const PetscInt *originalValues;
913:   PetscErrorCode  ierr;

916:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
917:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

919:   PetscSectionGetStorageSize(newSection, &fieldSize);
920:   PetscMalloc1(fieldSize, &newValues);

922:   ISGetIndices(originalIS, &originalValues);
923:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
924:   PetscFree(remoteOffsets);
925:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
926:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
927:   PetscSFDestroy(&fieldSF);
928:   ISRestoreIndices(originalIS, &originalValues);
929:   ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
930:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
931:   return(0);
932: }

936: /*@
937:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

939:   Collective on DM

941:   Input Parameters:
942: + dm - The DMPlex object
943: . pointSF - The PetscSF describing the communication pattern
944: . originalSection - The PetscSection for existing data layout
945: . datatype - The type of data
946: - originalData - The existing data

948:   Output Parameters:
949: + newSection - The PetscSection describing the new data layout
950: - newData - The new data

952:   Level: developer

954: .seealso: DMPlexDistribute(), DMPlexDistributeField()
955: @*/
956: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
957: {
958:   PetscSF        fieldSF;
959:   PetscInt      *remoteOffsets, fieldSize;
960:   PetscMPIInt    dataSize;

964:   PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
965:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

967:   PetscSectionGetStorageSize(newSection, &fieldSize);
968:   MPI_Type_size(datatype, &dataSize);
969:   PetscMalloc(fieldSize * dataSize, newData);

971:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
972:   PetscFree(remoteOffsets);
973:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
974:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
975:   PetscSFDestroy(&fieldSF);
976:   PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
977:   return(0);
978: }

982: PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
983: {
984:   DM_Plex               *mesh  = (DM_Plex*) dm->data;
985:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
986:   MPI_Comm               comm;
987:   PetscSF                coneSF;
988:   PetscSection           originalConeSection, newConeSection;
989:   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
990:   PetscBool              flg;
991:   PetscErrorCode         ierr;

996:   PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);

998:   /* Distribute cone section */
999:   PetscObjectGetComm((PetscObject)dm, &comm);
1000:   DMPlexGetConeSection(dm, &originalConeSection);
1001:   DMPlexGetConeSection(dmParallel, &newConeSection);
1002:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
1003:   DMSetUp(dmParallel);
1004:   {
1005:     PetscInt pStart, pEnd, p;

1007:     PetscSectionGetChart(newConeSection, &pStart, &pEnd);
1008:     for (p = pStart; p < pEnd; ++p) {
1009:       PetscInt coneSize;
1010:       PetscSectionGetDof(newConeSection, p, &coneSize);
1011:       pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
1012:     }
1013:   }
1014:   /* Communicate and renumber cones */
1015:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
1016:   PetscFree(remoteOffsets);
1017:   DMPlexGetCones(dm, &cones);
1018:   if (original) {
1019:     PetscInt numCones;

1021:     PetscSectionGetStorageSize(originalConeSection,&numCones); PetscMalloc1(numCones,&globCones);
1022:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
1023:   }
1024:   else {
1025:     globCones = cones;
1026:   }
1027:   DMPlexGetCones(dmParallel, &newCones);
1028:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
1029:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
1030:   if (original) {
1031:     PetscFree(globCones);
1032:   }
1033:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
1034:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
1035: #if PETSC_USE_DEBUG
1036:   {
1037:     PetscInt  p;
1038:     PetscBool valid = PETSC_TRUE;
1039:     for (p = 0; p < newConesSize; ++p) {
1040:       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1041:     }
1042:     if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1043:   }
1044: #endif
1045:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
1046:   if (flg) {
1047:     PetscPrintf(comm, "Serial Cone Section:\n");
1048:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);
1049:     PetscPrintf(comm, "Parallel Cone Section:\n");
1050:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);
1051:     PetscSFView(coneSF, NULL);
1052:   }
1053:   DMPlexGetConeOrientations(dm, &cones);
1054:   DMPlexGetConeOrientations(dmParallel, &newCones);
1055:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
1056:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
1057:   PetscSFDestroy(&coneSF);
1058:   PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1059:   /* Create supports and stratify sieve */
1060:   {
1061:     PetscInt pStart, pEnd;

1063:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1064:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1065:   }
1066:   DMPlexSymmetrize(dmParallel);
1067:   DMPlexStratify(dmParallel);
1068:   pmesh->useCone    = mesh->useCone;
1069:   pmesh->useClosure = mesh->useClosure;
1070:   return(0);
1071: }

1075: PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1076: {
1077:   MPI_Comm         comm;
1078:   PetscSection     originalCoordSection, newCoordSection;
1079:   Vec              originalCoordinates, newCoordinates;
1080:   PetscInt         bs;
1081:   const char      *name;
1082:   const PetscReal *maxCell, *L;
1083:   const DMBoundaryType *bd;
1084:   PetscErrorCode   ierr;


1090:   PetscObjectGetComm((PetscObject)dm, &comm);
1091:   DMGetCoordinateSection(dm, &originalCoordSection);
1092:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1093:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1094:   if (originalCoordinates) {
1095:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1096:     PetscObjectGetName((PetscObject) originalCoordinates, &name);
1097:     PetscObjectSetName((PetscObject) newCoordinates, name);

1099:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1100:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1101:     VecGetBlockSize(originalCoordinates, &bs);
1102:     VecSetBlockSize(newCoordinates, bs);
1103:     VecDestroy(&newCoordinates);
1104:   }
1105:   DMGetPeriodicity(dm, &maxCell, &L, &bd);
1106:   if (L) {DMSetPeriodicity(dmParallel, maxCell, L, bd);}
1107:   return(0);
1108: }

1112: /* Here we are assuming that process 0 always has everything */
1113: PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1114: {
1115:   DM_Plex         *mesh = (DM_Plex*) dm->data;
1116:   MPI_Comm         comm;
1117:   DMLabel          depthLabel;
1118:   PetscMPIInt      rank;
1119:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1120:   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1121:   PetscObjectState depthState = -1;
1122:   PetscErrorCode   ierr;

1127:   PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1128:   PetscObjectGetComm((PetscObject)dm, &comm);
1129:   MPI_Comm_rank(comm, &rank);

1131:   /* If the user has changed the depth label, communicate it instead */
1132:   DMPlexGetDepth(dm, &depth);
1133:   DMPlexGetDepthLabel(dm, &depthLabel);
1134:   if (depthLabel) {DMLabelGetState(depthLabel, &depthState);}
1135:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1136:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1137:   if (sendDepth) {
1138:     DMRemoveLabel(dmParallel, "depth", &depthLabel);
1139:     DMLabelDestroy(&depthLabel);
1140:   }
1141:   /* Everyone must have either the same number of labels, or none */
1142:   DMGetNumLabels(dm, &numLocalLabels);
1143:   numLabels = numLocalLabels;
1144:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1145:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1146:   for (l = numLabels-1; l >= 0; --l) {
1147:     DMLabel     label = NULL, labelNew = NULL;
1148:     PetscBool   isdepth;

1150:     if (hasLabels) {
1151:       DMGetLabelByNum(dm, l, &label);
1152:       /* Skip "depth" because it is recreated */
1153:       PetscStrcmp(label->name, "depth", &isdepth);
1154:     }
1155:     MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);
1156:     if (isdepth && !sendDepth) continue;
1157:     DMLabelDistribute(label, migrationSF, &labelNew);
1158:     if (isdepth) {
1159:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1160:       PetscInt gdepth;

1162:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1163:       if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1164:       for (d = 0; d <= gdepth; ++d) {
1165:         PetscBool has;

1167:         DMLabelHasStratum(labelNew, d, &has);
1168:         if (!has) DMLabelAddStratum(labelNew, d);
1169:       }
1170:     }
1171:     DMAddLabel(dmParallel, labelNew);
1172:   }
1173:   PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1174:   return(0);
1175: }

1179: PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1180: {
1181:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1182:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1183:   PetscBool      *isHybrid, *isHybridParallel;
1184:   PetscInt        dim, depth, d;
1185:   PetscInt        pStart, pEnd, pStartP, pEndP;
1186:   PetscErrorCode  ierr;


1192:   DMGetDimension(dm, &dim);
1193:   DMPlexGetDepth(dm, &depth);
1194:   DMPlexGetChart(dm,&pStart,&pEnd);
1195:   DMPlexGetChart(dmParallel,&pStartP,&pEndP);
1196:   PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);
1197:   for (d = 0; d <= depth; d++) {
1198:     PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d];

1200:     if (hybridMax >= 0) {
1201:       PetscInt sStart, sEnd, p;

1203:       DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);
1204:       for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE;
1205:     }
1206:   }
1207:   PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1208:   PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1209:   for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1;
1210:   for (d = 0; d <= depth; d++) {
1211:     PetscInt sStart, sEnd, p, dd;

1213:     DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);
1214:     dd = (depth == 1 && d == 1) ? dim : d;
1215:     for (p = sStart; p < sEnd; p++) {
1216:       if (isHybridParallel[p-pStartP]) {
1217:         pmesh->hybridPointMax[dd] = p;
1218:         break;
1219:       }
1220:     }
1221:   }
1222:   PetscFree2(isHybrid,isHybridParallel);
1223:   return(0);
1224: }

1228: PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1229: {
1230:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1231:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1232:   MPI_Comm        comm;
1233:   DM              refTree;
1234:   PetscSection    origParentSection, newParentSection;
1235:   PetscInt        *origParents, *origChildIDs;
1236:   PetscBool       flg;
1237:   PetscErrorCode  ierr;

1242:   PetscObjectGetComm((PetscObject)dm, &comm);

1244:   /* Set up tree */
1245:   DMPlexGetReferenceTree(dm,&refTree);
1246:   DMPlexSetReferenceTree(dmParallel,refTree);
1247:   DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1248:   if (origParentSection) {
1249:     PetscInt        pStart, pEnd;
1250:     PetscInt        *newParents, *newChildIDs, *globParents;
1251:     PetscInt        *remoteOffsetsParents, newParentSize;
1252:     PetscSF         parentSF;

1254:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1255:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1256:     PetscSectionSetChart(newParentSection,pStart,pEnd);
1257:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1258:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1259:     PetscFree(remoteOffsetsParents);
1260:     PetscSectionGetStorageSize(newParentSection,&newParentSize);
1261:     PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1262:     if (original) {
1263:       PetscInt numParents;

1265:       PetscSectionGetStorageSize(origParentSection,&numParents);
1266:       PetscMalloc1(numParents,&globParents);
1267:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1268:     }
1269:     else {
1270:       globParents = origParents;
1271:     }
1272:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1273:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1274:     if (original) {
1275:       PetscFree(globParents);
1276:     }
1277:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1278:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1279:     ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1280: #if PETSC_USE_DEBUG
1281:     {
1282:       PetscInt  p;
1283:       PetscBool valid = PETSC_TRUE;
1284:       for (p = 0; p < newParentSize; ++p) {
1285:         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1286:       }
1287:       if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1288:     }
1289: #endif
1290:     PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1291:     if (flg) {
1292:       PetscPrintf(comm, "Serial Parent Section: \n");
1293:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);
1294:       PetscPrintf(comm, "Parallel Parent Section: \n");
1295:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);
1296:       PetscSFView(parentSF, NULL);
1297:     }
1298:     DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1299:     PetscSectionDestroy(&newParentSection);
1300:     PetscFree2(newParents,newChildIDs);
1301:     PetscSFDestroy(&parentSF);
1302:   }
1303:   pmesh->useAnchors = mesh->useAnchors;
1304:   return(0);
1305: }

1309: PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1310: {
1311:   DM_Plex               *mesh  = (DM_Plex*) dm->data;
1312:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
1313:   PetscMPIInt            rank, numProcs;
1314:   MPI_Comm               comm;
1315:   PetscErrorCode         ierr;


1321:   /* Create point SF for parallel mesh */
1322:   PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1323:   PetscObjectGetComm((PetscObject)dm, &comm);
1324:   MPI_Comm_rank(comm, &rank);
1325:   MPI_Comm_size(comm, &numProcs);
1326:   {
1327:     const PetscInt *leaves;
1328:     PetscSFNode    *remotePoints, *rowners, *lowners;
1329:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1330:     PetscInt        pStart, pEnd;

1332:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1333:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1334:     PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1335:     for (p=0; p<numRoots; p++) {
1336:       rowners[p].rank  = -1;
1337:       rowners[p].index = -1;
1338:     }
1339:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1340:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1341:     for (p = 0; p < numLeaves; ++p) {
1342:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1343:         lowners[p].rank  = rank;
1344:         lowners[p].index = leaves ? leaves[p] : p;
1345:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1346:         lowners[p].rank  = -2;
1347:         lowners[p].index = -2;
1348:       }
1349:     }
1350:     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1351:       rowners[p].rank  = -3;
1352:       rowners[p].index = -3;
1353:     }
1354:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1355:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1356:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1357:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1358:     for (p = 0; p < numLeaves; ++p) {
1359:       if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1360:       if (lowners[p].rank != rank) ++numGhostPoints;
1361:     }
1362:     PetscMalloc1(numGhostPoints, &ghostPoints);
1363:     PetscMalloc1(numGhostPoints, &remotePoints);
1364:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1365:       if (lowners[p].rank != rank) {
1366:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1367:         remotePoints[gp].rank  = lowners[p].rank;
1368:         remotePoints[gp].index = lowners[p].index;
1369:         ++gp;
1370:       }
1371:     }
1372:     PetscFree2(rowners,lowners);
1373:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1374:     PetscSFSetFromOptions((dmParallel)->sf);
1375:   }
1376:   pmesh->useCone    = mesh->useCone;
1377:   pmesh->useClosure = mesh->useClosure;
1378:   PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1379:   return(0);
1380: }

1384: /*@C
1385:   DMPlexDerivePointSF - Build a point SF from an SF describing a point migration

1387:   Input Parameter:
1388: + dm          - The source DMPlex object
1389: . migrationSF - The star forest that describes the parallel point remapping
1390: . ownership   - Flag causing a vote to determine point ownership

1392:   Output Parameter:
1393: - pointSF     - The star forest describing the point overlap in the remapped DM

1395:   Level: developer

1397: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1398: @*/
1399: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1400: {
1401:   PetscMPIInt        rank;
1402:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1403:   PetscInt          *pointLocal;
1404:   const PetscInt    *leaves;
1405:   const PetscSFNode *roots;
1406:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1407:   PetscErrorCode     ierr;

1411:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);

1413:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1414:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1415:   if (ownership) {
1416:     /* Point ownership vote: Process with highest rank ownes shared points */
1417:     for (p = 0; p < nleaves; ++p) {
1418:       /* Either put in a bid or we know we own it */
1419:       leafNodes[p].rank  = rank;
1420:       leafNodes[p].index = p;
1421:     }
1422:     for (p = 0; p < nroots; p++) {
1423:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1424:       rootNodes[p].rank  = -3;
1425:       rootNodes[p].index = -3;
1426:     }
1427:     PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1428:     PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1429:   } else {
1430:     for (p = 0; p < nroots; p++) {
1431:       rootNodes[p].index = -1;
1432:       rootNodes[p].rank = rank;
1433:     };
1434:     for (p = 0; p < nleaves; p++) {
1435:       /* Write new local id into old location */
1436:       if (roots[p].rank == rank) {
1437:         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1438:       }
1439:     }
1440:   }
1441:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1442:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);

1444:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {if (leafNodes[p].rank != rank) npointLeaves++;}
1445:   PetscMalloc1(npointLeaves, &pointLocal);
1446:   PetscMalloc1(npointLeaves, &pointRemote);
1447:   for (idx = 0, p = 0; p < nleaves; p++) {
1448:     if (leafNodes[p].rank != rank) {
1449:       pointLocal[idx] = p;
1450:       pointRemote[idx] = leafNodes[p];
1451:       idx++;
1452:     }
1453:   }
1454:   PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1455:   PetscSFSetFromOptions(*pointSF);
1456:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1457:   PetscFree2(rootNodes, leafNodes);
1458:   return(0);
1459: }

1463: /*@C
1464:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest

1466:   Input Parameter:
1467: + dm       - The source DMPlex object
1468: . sf       - The star forest communication context describing the migration pattern

1470:   Output Parameter:
1471: - targetDM - The target DMPlex object

1473:   Level: intermediate

1475: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1476: @*/
1477: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1478: {
1479:   MPI_Comm               comm;
1480:   PetscInt               dim, nroots;
1481:   PetscSF                sfPoint;
1482:   ISLocalToGlobalMapping ltogMigration;
1483:   ISLocalToGlobalMapping ltogOriginal = NULL;
1484:   PetscBool              flg;
1485:   PetscErrorCode         ierr;

1489:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1490:   PetscObjectGetComm((PetscObject) dm, &comm);
1491:   DMGetDimension(dm, &dim);
1492:   DMSetDimension(targetDM, dim);

1494:   /* Check for a one-to-all distribution pattern */
1495:   DMGetPointSF(dm, &sfPoint);
1496:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1497:   if (nroots >= 0) {
1498:     IS                     isOriginal;
1499:     PetscInt               n, size, nleaves;
1500:     PetscInt              *numbering_orig, *numbering_new;
1501:     /* Get the original point numbering */
1502:     DMPlexCreatePointNumbering(dm, &isOriginal);
1503:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1504:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1505:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1506:     /* Convert to positive global numbers */
1507:     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1508:     /* Derive the new local-to-global mapping from the old one */
1509:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1510:     PetscMalloc1(nleaves, &numbering_new);
1511:     PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1512:     PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1513:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1514:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1515:     ISDestroy(&isOriginal);
1516:   } else {
1517:     /* One-to-all distribution pattern: We can derive LToG from SF */
1518:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1519:   }
1520:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1521:   if (flg) {
1522:     PetscPrintf(comm, "Point renumbering for DM migration:\n");
1523:     ISLocalToGlobalMappingView(ltogMigration, NULL);
1524:   }
1525:   /* Migrate DM data to target DM */
1526:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1527:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1528:   DMPlexDistributeLabels(dm, sf, targetDM);
1529:   DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1530:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1531:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1532:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1533:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1534:   return(0);
1535: }

1539: /*@C
1540:   DMPlexDistribute - Distributes the mesh and any associated sections.

1542:   Not Collective

1544:   Input Parameter:
1545: + dm  - The original DMPlex object
1546: - overlap - The overlap of partitions, 0 is the default

1548:   Output Parameter:
1549: + sf - The PetscSF used for point distribution
1550: - parallelMesh - The distributed DMPlex object, or NULL

1552:   Note: If the mesh was not distributed, the return value is NULL.

1554:   The user can control the definition of adjacency for the mesh using DMPlexSetAdjacencyUseCone() and
1555:   DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1556:   representation on the mesh.

1558:   Level: intermediate

1560: .keywords: mesh, elements
1561: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1562: @*/
1563: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1564: {
1565:   MPI_Comm               comm;
1566:   PetscPartitioner       partitioner;
1567:   IS                     cellPart;
1568:   PetscSection           cellPartSection;
1569:   DM                     dmCoord;
1570:   DMLabel                lblPartition, lblMigration;
1571:   PetscSF                sfProcess, sfMigration, sfStratified, sfPoint;
1572:   PetscBool              flg;
1573:   PetscMPIInt            rank, numProcs, p;
1574:   PetscErrorCode         ierr;


1581:   PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1582:   PetscObjectGetComm((PetscObject)dm,&comm);
1583:   MPI_Comm_rank(comm, &rank);
1584:   MPI_Comm_size(comm, &numProcs);

1586:   *dmParallel = NULL;
1587:   if (numProcs == 1) {
1588:     PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1589:     return(0);
1590:   }

1592:   /* Create cell partition */
1593:   PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1594:   PetscSectionCreate(comm, &cellPartSection);
1595:   DMPlexGetPartitioner(dm, &partitioner);
1596:   PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1597:   {
1598:     /* Convert partition to DMLabel */
1599:     PetscInt proc, pStart, pEnd, npoints, poffset;
1600:     const PetscInt *points;
1601:     DMLabelCreate("Point Partition", &lblPartition);
1602:     ISGetIndices(cellPart, &points);
1603:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1604:     for (proc = pStart; proc < pEnd; proc++) {
1605:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1606:       PetscSectionGetOffset(cellPartSection, proc, &poffset);
1607:       for (p = poffset; p < poffset+npoints; p++) {
1608:         DMLabelSetValue(lblPartition, points[p], proc);
1609:       }
1610:     }
1611:     ISRestoreIndices(cellPart, &points);
1612:   }
1613:   DMPlexPartitionLabelClosure(dm, lblPartition);
1614:   {
1615:     /* Build a global process SF */
1616:     PetscSFNode *remoteProc;
1617:     PetscMalloc1(numProcs, &remoteProc);
1618:     for (p = 0; p < numProcs; ++p) {
1619:       remoteProc[p].rank  = p;
1620:       remoteProc[p].index = rank;
1621:     }
1622:     PetscSFCreate(comm, &sfProcess);
1623:     PetscObjectSetName((PetscObject) sfProcess, "Process SF");
1624:     PetscSFSetGraph(sfProcess, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
1625:   }
1626:   DMLabelCreate("Point migration", &lblMigration);
1627:   DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);
1628:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1629:   /* Stratify the SF in case we are migrating an already parallel plex */
1630:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1631:   PetscSFDestroy(&sfMigration);
1632:   sfMigration = sfStratified;
1633:   PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);
1634:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1635:   if (flg) {
1636:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);
1637:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);
1638:   }

1640:   /* Create non-overlapping parallel DM and migrate internal data */
1641:   DMPlexCreate(comm, dmParallel);
1642:   PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1643:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1645:   /* Build the point SF without overlap */
1646:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1647:   DMSetPointSF(*dmParallel, sfPoint);
1648:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1649:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1650:   if (flg) {PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);}

1652:   if (overlap > 0) {
1653:     DM                 dmOverlap;
1654:     PetscInt           nroots, nleaves;
1655:     PetscSFNode       *newRemote;
1656:     const PetscSFNode *oldRemote;
1657:     PetscSF            sfOverlap, sfOverlapPoint;
1658:     /* Add the partition overlap to the distributed DM */
1659:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1660:     DMDestroy(dmParallel);
1661:     *dmParallel = dmOverlap;
1662:     if (flg) {
1663:       PetscPrintf(comm, "Overlap Migration SF:\n");
1664:       PetscSFView(sfOverlap, NULL);
1665:     }

1667:     /* Re-map the migration SF to establish the full migration pattern */
1668:     PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);
1669:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1670:     PetscMalloc1(nleaves, &newRemote);
1671:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1672:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1673:     PetscSFCreate(comm, &sfOverlapPoint);
1674:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1675:     PetscSFDestroy(&sfOverlap);
1676:     PetscSFDestroy(&sfMigration);
1677:     sfMigration = sfOverlapPoint;
1678:   }
1679:   /* Cleanup Partition */
1680:   PetscSFDestroy(&sfProcess);
1681:   DMLabelDestroy(&lblPartition);
1682:   DMLabelDestroy(&lblMigration);
1683:   PetscSectionDestroy(&cellPartSection);
1684:   ISDestroy(&cellPart);
1685:   /* Copy BC */
1686:   DMCopyBoundary(dm, *dmParallel);
1687:   /* Create sfNatural */
1688:   if (dm->useNatural) {
1689:     PetscSection section;

1691:     DMGetDefaultSection(dm, &section);
1692:     DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1693:   }
1694:   /* Cleanup */
1695:   if (sf) {*sf = sfMigration;}
1696:   else    {PetscSFDestroy(&sfMigration);}
1697:   PetscSFDestroy(&sfPoint);
1698:   PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1699:   return(0);
1700: }

1704: /*@C
1705:   DMPlexDistribute - Add partition overlap to a distributed non-overlapping DM.

1707:   Not Collective

1709:   Input Parameter:
1710: + dm  - The non-overlapping distrbuted DMPlex object
1711: - overlap - The overlap of partitions, 0 is the default

1713:   Output Parameter:
1714: + sf - The PetscSF used for point distribution
1715: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1717:   Note: If the mesh was not distributed, the return value is NULL.

1719:   The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
1720:   DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1721:   representation on the mesh.

1723:   Level: intermediate

1725: .keywords: mesh, elements
1726: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1727: @*/
1728: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1729: {
1730:   MPI_Comm               comm;
1731:   PetscMPIInt            rank;
1732:   PetscSection           rootSection, leafSection;
1733:   IS                     rootrank, leafrank;
1734:   DM                     dmCoord;
1735:   DMLabel                lblOverlap;
1736:   PetscSF                sfOverlap, sfStratified, sfPoint;
1737:   PetscErrorCode         ierr;


1744:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1745:   PetscObjectGetComm((PetscObject)dm,&comm);
1746:   MPI_Comm_rank(comm, &rank);

1748:   /* Compute point overlap with neighbouring processes on the distributed DM */
1749:   PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1750:   PetscSectionCreate(comm, &rootSection);
1751:   PetscSectionCreate(comm, &leafSection);
1752:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1753:   DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1754:   /* Convert overlap label to stratified migration SF */
1755:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1756:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1757:   PetscSFDestroy(&sfOverlap);
1758:   sfOverlap = sfStratified;
1759:   PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1760:   PetscSFSetFromOptions(sfOverlap);

1762:   PetscSectionDestroy(&rootSection);
1763:   PetscSectionDestroy(&leafSection);
1764:   ISDestroy(&rootrank);
1765:   ISDestroy(&leafrank);
1766:   PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);

1768:   /* Build the overlapping DM */
1769:   DMPlexCreate(comm, dmOverlap);
1770:   PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1771:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1772:   /* Build the new point SF */
1773:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1774:   DMSetPointSF(*dmOverlap, sfPoint);
1775:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
1776:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1777:   PetscSFDestroy(&sfPoint);
1778:   /* Cleanup overlap partition */
1779:   DMLabelDestroy(&lblOverlap);
1780:   if (sf) *sf = sfOverlap;
1781:   else    {PetscSFDestroy(&sfOverlap);}
1782:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1783:   return(0);
1784: }

1788: /*@C
1789:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1790:   root process of the original's communicator.

1792:   Input Parameters:
1793: . dm - the original DMPlex object

1795:   Output Parameters:
1796: . gatherMesh - the gathered DM object, or NULL

1798:   Level: intermediate

1800: .keywords: mesh
1801: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1802: @*/
1803: PetscErrorCode DMPlexGetGatherDM(DM dm, DM * gatherMesh)
1804: {
1805:   MPI_Comm       comm;
1806:   PetscMPIInt    size;
1807:   PetscPartitioner oldPart, gatherPart;

1812:   comm = PetscObjectComm((PetscObject)dm);
1813:   MPI_Comm_size(comm,&size);
1814:   *gatherMesh = NULL;
1815:   if (size == 1) return(0);
1816:   DMPlexGetPartitioner(dm,&oldPart);
1817:   PetscObjectReference((PetscObject)oldPart);
1818:   PetscPartitionerCreate(comm,&gatherPart);
1819:   PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1820:   DMPlexSetPartitioner(dm,gatherPart);
1821:   DMPlexDistribute(dm,0,NULL,gatherMesh);
1822:   DMPlexSetPartitioner(dm,oldPart);
1823:   PetscPartitionerDestroy(&gatherPart);
1824:   PetscPartitionerDestroy(&oldPart);
1825:   return(0);
1826: }

1830: /*@C
1831:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.

1833:   Input Parameters:
1834: . dm - the original DMPlex object

1836:   Output Parameters:
1837: . redundantMesh - the redundant DM object, or NULL

1839:   Level: intermediate

1841: .keywords: mesh
1842: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1843: @*/
1844: PetscErrorCode DMPlexGetRedundantDM(DM dm, DM * redundantMesh)
1845: {
1846:   MPI_Comm       comm;
1847:   PetscMPIInt    size, rank;
1848:   PetscInt       pStart, pEnd, p;
1849:   PetscInt       numPoints = -1;
1850:   PetscSF        migrationSF, sfPoint;
1851:   DM             gatherDM, dmCoord;
1852:   PetscSFNode    *points;

1857:   comm = PetscObjectComm((PetscObject)dm);
1858:   MPI_Comm_size(comm,&size);
1859:   *redundantMesh = NULL;
1860:   if (size == 1) return(0);
1861:   DMPlexGetGatherDM(dm,&gatherDM);
1862:   if (!gatherDM) return(0);
1863:   MPI_Comm_rank(comm,&rank);
1864:   DMPlexGetChart(gatherDM,&pStart,&pEnd);
1865:   numPoints = pEnd - pStart;
1866:   MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1867:   PetscMalloc1(numPoints,&points);
1868:   PetscSFCreate(comm,&migrationSF);
1869:   for (p = 0; p < numPoints; p++) {
1870:     points[p].index = p;
1871:     points[p].rank  = 0;
1872:   }
1873:   PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1874:   DMPlexCreate(comm, redundantMesh);
1875:   PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1876:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1877:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1878:   DMSetPointSF(*redundantMesh, sfPoint);
1879:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
1880:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1881:   PetscSFDestroy(&sfPoint);
1882:   PetscSFDestroy(&migrationSF);
1883:   DMDestroy(&gatherDM);
1884:   return(0);
1885: }