Actual source code: sfallgatherv.c

petsc-master 2019-09-15
Report Typos and Errors
  1:  #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>

  3: PETSC_INTERN PetscErrorCode PetscSFBcastAndOpBegin_Gatherv(PetscSF,MPI_Datatype,const void*,void*,MPI_Op);

  5: /*===================================================================================*/
  6: /*              Internal routines for PetscSFPack_Allgatherv                         */
  7: /*===================================================================================*/
  8: PETSC_INTERN PetscErrorCode PetscSFPackGet_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *rkey,const void *lkey,PetscSFPack_Allgatherv *mylink)
  9: {
 10:   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
 11:   PetscSFPack_Allgatherv link;
 12:   PetscSFPack            *p;
 13:   PetscErrorCode         ierr;

 16:   PetscSFPackSetErrorOnUnsupportedOverlap(sf,unit,rkey,lkey);
 17:   /* Look for types in cache */
 18:   for (p=&dat->avail; (link=(PetscSFPack_Allgatherv)*p); p=&link->next) {
 19:     PetscBool match;
 20:     MPIPetsc_Type_compare(unit,link->unit,&match);
 21:     if (match) {
 22:       *p = link->next; /* Remove from available list */
 23:       goto found;
 24:     }
 25:   }

 27:   PetscNew(&link);
 28:   PetscSFPackSetupType((PetscSFPack)link,unit);
 29:   /* Must be init'ed to NULL so that we can safely wait on them even they are not used */
 30:   link->request = MPI_REQUEST_NULL;
 31:   /* One tag per link */
 32:   PetscCommGetNewTag(PetscObjectComm((PetscObject)sf),&link->tag);

 34:   /* DO NOT allocate link->root/leaf. We use lazy allocation since these buffers are likely not needed */
 35: found:
 36:   link->rkey = rkey;
 37:   link->lkey = lkey;
 38:   link->next = dat->inuse;
 39:   dat->inuse = (PetscSFPack)link;

 41:   *mylink     = link;
 42:   return(0);
 43: }

 45: /*===================================================================================*/
 46: /*              Implementations of SF public APIs                                    */
 47: /*===================================================================================*/

 49: /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
 50: PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
 51: {
 53:   PetscInt       i,j,k;
 54:   const PetscInt *range;
 55:   PetscMPIInt    size;

 58:   MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);
 59:   if (nroots)  *nroots  = sf->nroots;
 60:   if (nleaves) *nleaves = sf->nleaves;
 61:   if (ilocal)  *ilocal  = NULL; /* Contiguous leaves */
 62:   if (iremote) {
 63:     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
 64:       PetscLayoutGetRanges(sf->map,&range);
 65:       PetscMalloc1(sf->nleaves,&sf->remote);
 66:       sf->remote_alloc = sf->remote;
 67:       for (i=0; i<size; i++) {
 68:         for (j=range[i],k=0; j<range[i+1]; j++,k++) {
 69:           sf->remote[j].rank  = i;
 70:           sf->remote[j].index = k;
 71:         }
 72:       }
 73:     }
 74:     *iremote = sf->remote;
 75:   }
 76:   return(0);
 77: }

 79: PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
 80: {
 81:   PetscErrorCode     ierr;
 82:   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
 83:   PetscMPIInt        size;
 84:   PetscInt           i;
 85:   const PetscInt     *range;

 88:   MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);
 89:   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
 90:     PetscMalloc1(size,&dat->recvcounts);
 91:     PetscMalloc1(size,&dat->displs);
 92:     PetscLayoutGetRanges(sf->map,&range);

 94:     for (i=0; i<size; i++) {
 95:       PetscMPIIntCast(range[i],&dat->displs[i]);
 96:       PetscMPIIntCast(range[i+1]-range[i],&dat->recvcounts[i]);
 97:     }
 98:   }
 99:   return(0);
100: }

102: PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
103: {
104:   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
105:   PetscSFPack_Allgatherv link,next;
106:   PetscErrorCode         ierr;

109:   PetscFree(dat->iranks);
110:   PetscFree(dat->ioffset);
111:   PetscFree(dat->irootloc);
112:   PetscFree(dat->recvcounts);
113:   PetscFree(dat->displs);
114:   if (dat->inuse) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Outstanding operation has not been completed");
115:   for (link=(PetscSFPack_Allgatherv)dat->avail; link; link=next) {
116:     next = (PetscSFPack_Allgatherv)link->next;
117:     if (!link->isbuiltin) {MPI_Type_free(&link->unit);}
118:     PetscFree(link->root);
119:     PetscFree(link->leaf);
120:     PetscFree(link);
121:   }
122:   dat->avail = NULL;
123:   return(0);
124: }

126: PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
127: {

131:   PetscSFReset_Allgatherv(sf);
132:   PetscFree(sf->data);
133:   return(0);
134: }

136: static PetscErrorCode PetscSFBcastAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *rootdata,void *leafdata,MPI_Op op)
137: {
138:   PetscErrorCode         ierr;
139:   PetscSFPack_Allgatherv link;
140:   PetscMPIInt            sendcount;
141:   MPI_Comm               comm;
142:   void                   *recvbuf;
143:   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;

146:   PetscSFPackGet_Allgatherv(sf,unit,rootdata,leafdata,&link);
147:   PetscObjectGetComm((PetscObject)sf,&comm);
148:   PetscMPIIntCast(sf->nroots,&sendcount);

150:   if (op == MPIU_REPLACE) {
151:     recvbuf = leafdata;
152:   } else {
153:     if (!link->leaf) {PetscMalloc(sf->nleaves*link->unitbytes,&link->leaf);}
154:     recvbuf = link->leaf;
155:   }
156:   MPIU_Iallgatherv(rootdata,sendcount,unit,recvbuf,dat->recvcounts,dat->displs,unit,comm,&link->request);
157:   return(0);
158: }

160: PETSC_INTERN PetscErrorCode PetscSFBcastAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *rootdata,void *leafdata,MPI_Op op)
161: {
162:   PetscErrorCode         ierr,(*UnpackAndOp)(PetscInt,PetscInt,const PetscInt*,PetscInt,PetscSFPackOpt,void*,const void*);
163:   PetscSFPack_Allgatherv link;

166:   PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,(PetscSFPack*)&link);
167:   PetscSFPackGetUnpackAndOp(sf,(PetscSFPack)link,op,&UnpackAndOp);
168:   MPI_Wait(&link->request,MPI_STATUS_IGNORE);

170:   if (op != MPIU_REPLACE) {
171:     if (UnpackAndOp) {(*UnpackAndOp)(sf->nleaves,link->bs,NULL,0,NULL,leafdata,link->leaf);}
172: #if defined(PETSC_HAVE_MPI_REDUCE_LOCAL)
173:     else {
174:       /* op might be user-defined */
175:       PetscMPIInt count;
176:       PetscMPIIntCast(sf->nleaves,&count);
177:       MPI_Reduce_local(link->leaf,leafdata,count,unit,op);
178:     }
179: #else
180:     else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"No unpacking reduction operation for this MPI_Op");
181: #endif
182:   }
183:   PetscSFPackReclaim(sf,(PetscSFPack*)&link);
184:   return(0);
185: }

187: static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
188: {
189:   PetscErrorCode         ierr;
190:   PetscSFPack_Allgatherv link;
191:   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
192:   PetscInt               rstart;
193:   PetscMPIInt            rank,count,recvcount;
194:   MPI_Comm               comm;

197:   PetscSFPackGet_Allgatherv(sf,unit,rootdata,leafdata,&link);
198:   PetscObjectGetComm((PetscObject)sf,&comm);
199:   MPI_Comm_rank(comm,&rank);

201:   if (op == MPIU_REPLACE) {
202:     /* REPLACE is only meaningful when all processes have the same leafdata to readue. Therefore copy from local leafdata is fine */
203:     PetscLayoutGetRange(sf->map,&rstart,NULL);
204:     PetscMemcpy(rootdata,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);
205:   } else {
206:     /* Reduce all leafdata on rank 0, then scatter the result to root buffer, then reduce root buffer to leafdata */
207:     if (!rank && !link->leaf) {PetscMalloc(sf->nleaves*link->unitbytes,&link->leaf);}
208:     PetscMPIIntCast(sf->nleaves*link->bs,&count);
209:     PetscMPIIntCast(sf->nroots,&recvcount);
210:     MPI_Reduce(leafdata,link->leaf,count,link->basicunit,op,0,comm); /* Must do reduce with MPI builltin datatype basicunit */
211:     if (!link->root) {PetscMalloc(sf->nroots*link->unitbytes,&link->root);}
212:     MPIU_Iscatterv(link->leaf,dat->recvcounts,dat->displs,unit,link->root,recvcount,unit,0,comm,&link->request);
213:   }
214:   return(0);
215: }

217: PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
218: {
219:   PetscErrorCode         ierr,(*UnpackAndOp)(PetscInt,PetscInt,const PetscInt*,PetscInt,PetscSFPackOpt,void*,const void*);
220:   PetscSFPack_Allgatherv link;

223:   PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,(PetscSFPack*)&link);
224:   MPI_Wait(&link->request,MPI_STATUS_IGNORE);
225:   if (op != MPIU_REPLACE) {
226:     PetscSFPackGetUnpackAndOp(sf,(PetscSFPack)link,op,&UnpackAndOp);
227:     if (UnpackAndOp) {(*UnpackAndOp)(sf->nroots,link->bs,NULL,0,NULL,rootdata,link->root);}
228: #if defined(PETSC_HAVE_MPI_REDUCE_LOCAL)
229:     else if (sf->nroots) {
230:       /* op might be user-defined */
231:       PetscMPIInt count;
232:       PetscMPIIntCast(sf->nroots,&count);
233:       MPI_Reduce_local(link->root,rootdata,count,unit,op);
234:     }
235: #else
236:     else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"No unpacking reduction operation for this MPI_Op");
237: #endif
238:   }
239:   PetscSFPackReclaim(sf,(PetscSFPack*)&link);
240:   return(0);
241: }

243: static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *rootdata,void *leafdata)
244: {
245:   PetscErrorCode         ierr;
246:   PetscSFPack_Allgatherv link;

249:   PetscSFBcastAndOpBegin_Gatherv(sf,unit,rootdata,leafdata,MPIU_REPLACE);
250:   /* A simplified PetscSFBcastAndOpEnd_Allgatherv */
251:   PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,(PetscSFPack*)&link);
252:   MPI_Wait(&link->request,MPI_STATUS_IGNORE);
253:   PetscSFPackReclaim(sf,(PetscSFPack*)&link);
254:   return(0);
255: }

257: /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).

259:    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
260:    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
261:    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
262:    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
263:    0,1,2 is 1,3,6 respectively. root is 10.

265:    One optimized implementation could be: starting from the initial state:
266:              rank-0   rank-1    rank-2
267:         Root     1
268:         Leaf     2       3         4

270:    Shift leaves rightwards to leafupdate. Rank 0 gathers the root value and puts it in leafupdate. We have:
271:              rank-0   rank-1    rank-2
272:         Root     1
273:         Leaf     2       3         4
274:      Leafupdate  1       2         3

276:    Then, do MPI_Scan on leafupdate and get:
277:              rank-0   rank-1    rank-2
278:         Root     1
279:         Leaf     2       3         4
280:      Leafupdate  1       3         6

282:    Rank 2 sums its leaf and leafupdate, scatters the result to the root, and gets
283:              rank-0   rank-1    rank-2
284:         Root     10
285:         Leaf     2       3         4
286:      Leafupdate  1       3         6

288:    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
289:              rank-0   rank-1    rank-2
290:         Root     1
291:         Leaf     2       3         4
292:      Leafupdate  2       3         4

294:    Do MPI_Exscan on leafupdate,
295:              rank-0   rank-1    rank-2
296:         Root     1
297:         Leaf     2       3         4
298:      Leafupdate  2       2         5

300:    BcastAndOp from root to leafupdate,
301:              rank-0   rank-1    rank-2
302:         Root     1
303:         Leaf     2       3         4
304:      Leafupdate  3       3         6

306:    Copy root to leafupdate on rank-0
307:              rank-0   rank-1    rank-2
308:         Root     1
309:         Leaf     2       3         4
310:      Leafupdate  1       3         6

312:    Reduce from leaf to root,
313:              rank-0   rank-1    rank-2
314:         Root     10
315:         Leaf     2       3         4
316:      Leafupdate  1       3         6
317: */
318: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
319: {
320:   PetscErrorCode         ierr;
321:   PetscSFPack_Allgatherv link;
322:   MPI_Comm               comm;
323:   PetscMPIInt            count;

326:   /* Copy leafdata to leafupdate */
327:   PetscSFPackGet_Allgatherv(sf,unit,rootdata,leafdata,&link);
328:   PetscMemcpy(leafupdate,leafdata,sf->nleaves*link->unitbytes);
329:   PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,(PetscSFPack*)&link);

331:   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
332:   PetscObjectGetComm((PetscObject)sf,&comm);
333:   PetscMPIIntCast(sf->nleaves,&count);
334:   if (op == MPIU_REPLACE) {
335:     PetscMPIInt size,rank,prev,next;
336:     MPI_Comm_rank(comm,&rank);
337:     MPI_Comm_size(comm,&size);
338:     prev = rank ?            rank-1 : MPI_PROC_NULL;
339:     next = (rank < size-1) ? rank+1 : MPI_PROC_NULL;
340:     MPI_Sendrecv_replace(leafupdate,count,unit,next,link->tag,prev,link->tag,comm,MPI_STATUSES_IGNORE);
341:   } else {MPI_Exscan(MPI_IN_PLACE,leafupdate,count,unit,op,comm);}
342:   PetscSFPackReclaim(sf,(PetscSFPack*)&link);
343:   PetscSFBcastAndOpBegin(sf,unit,rootdata,leafupdate,op);
344:   PetscSFBcastAndOpEnd(sf,unit,rootdata,leafupdate,op);

346:   /* Bcast roots to rank 0's leafupdate */
347:   PetscSFBcastToZero_Private(sf,unit,rootdata,leafupdate); /* Using this line makes Allgather SFs able to inherit this routine */

349:   /* Reduce leafdata to rootdata */
350:   PetscSFReduceBegin(sf,unit,leafdata,rootdata,op);
351:   return(0);
352: }

354: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
355: {
356:   PetscErrorCode         ierr;

359:   PetscSFReduceEnd(sf,unit,leafdata,rootdata,op);
360:   return(0);
361: }

363: /* Get root ranks accessing my leaves */
364: PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscInt *nranks,const PetscMPIInt **ranks,const PetscInt **roffset,const PetscInt **rmine,const PetscInt **rremote)
365: {
367:   PetscInt       i,j,k,size;
368:   const PetscInt *range;

371:   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
372:   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
373:     size = sf->nranks;
374:     PetscLayoutGetRanges(sf->map,&range);
375:     PetscMalloc4(size,&sf->ranks,size+1,&sf->roffset,sf->nleaves,&sf->rmine,sf->nleaves,&sf->rremote);
376:     for (i=0; i<size; i++) sf->ranks[i] = i;
377:     PetscArraycpy(sf->roffset,range,size+1);
378:     for (i=0; i<sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
379:     for (i=0; i<size; i++) {
380:       for (j=range[i],k=0; j<range[i+1]; j++,k++) sf->rremote[j] = k;
381:     }
382:   }

384:   if (nranks)  *nranks  = sf->nranks;
385:   if (ranks)   *ranks   = sf->ranks;
386:   if (roffset) *roffset = sf->roffset;
387:   if (rmine)   *rmine   = sf->rmine;
388:   if (rremote) *rremote = sf->rremote;
389:   return(0);
390: }

392: /* Get leaf ranks accessing my roots */
393: PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscInt *niranks,const PetscMPIInt **iranks,const PetscInt **ioffset,const PetscInt **irootloc)
394: {
395:   PetscErrorCode     ierr;
396:   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
397:   MPI_Comm           comm;
398:   PetscMPIInt        size,rank;
399:   PetscInt           i,j;

402:   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
403:   PetscObjectGetComm((PetscObject)sf,&comm);
404:   MPI_Comm_size(comm,&size);
405:   MPI_Comm_rank(comm,&rank);
406:   if (niranks) *niranks = size;

408:   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
409:      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
410:    */
411:   if (iranks) {
412:     if (!dat->iranks) {
413:       PetscMalloc1(size,&dat->iranks);
414:       dat->iranks[0] = rank;
415:       for (i=0,j=1; i<size; i++) {if (i == rank) continue; dat->iranks[j++] = i;}
416:     }
417:     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
418:   }

420:   if (ioffset) {
421:     if (!dat->ioffset) {
422:       PetscMalloc1(size+1,&dat->ioffset);
423:       for (i=0; i<=size; i++) dat->ioffset[i] = i*sf->nroots;
424:     }
425:     *ioffset = dat->ioffset;
426:   }

428:   if (irootloc) {
429:     if (!dat->irootloc) {
430:       PetscMalloc1(sf->nleaves,&dat->irootloc);
431:       for (i=0; i<size; i++) {
432:         for (j=0; j<sf->nroots; j++) dat->irootloc[i*sf->nroots+j] = j;
433:       }
434:     }
435:     *irootloc = dat->irootloc;
436:   }
437:   return(0);
438: }

440: PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF *out)
441: {
442:   PetscInt       i,nroots,nleaves,rstart,*ilocal;
443:   PetscSFNode    *iremote;
444:   PetscSF        lsf;

448:   nroots  = sf->nroots;
449:   nleaves = sf->nleaves ? sf->nroots : 0;
450:   PetscMalloc1(nleaves,&ilocal);
451:   PetscMalloc1(nleaves,&iremote);
452:   PetscLayoutGetRange(sf->map,&rstart,NULL);

454:   for (i=0; i<nleaves; i++) {
455:     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
456:     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
457:     iremote[i].index = i;          /* root index */
458:   }

460:   PetscSFCreate(PETSC_COMM_SELF,&lsf);
461:   PetscSFSetGraph(lsf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
462:   PetscSFSetUp(lsf);
463:   *out = lsf;
464:   return(0);
465: }

467: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
468: {
469:   PetscErrorCode     ierr;
470:   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;

473:   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
474:   sf->ops->Reset           = PetscSFReset_Allgatherv;
475:   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
476:   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
477:   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
478:   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
479:   sf->ops->BcastAndOpBegin = PetscSFBcastAndOpBegin_Allgatherv;
480:   sf->ops->BcastAndOpEnd   = PetscSFBcastAndOpEnd_Allgatherv;
481:   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
482:   sf->ops->ReduceEnd       = PetscSFReduceEnd_Allgatherv;
483:   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
484:   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
485:   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
486:   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;

488:   PetscNewLog(sf,&dat);
489:   sf->data = (void*)dat;
490:   return(0);
491: }