Actual source code: sfallgatherv.c

petsc-master 2020-01-15
Report Typos and Errors
  1:  #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>

  3: PETSC_INTERN PetscErrorCode PetscSFBcastAndOpBegin_Gatherv(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);

  5: /*===================================================================================*/
  6: /*              Internal routines for PetscSFPack                                    */
  7: /*===================================================================================*/
  8: PETSC_INTERN PetscErrorCode PetscSFPackGet_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,PetscSFPack *mylink)
  9: {
 10:   PetscErrorCode         ierr;
 11:   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
 12:   PetscSFPack            link,*p;
 13:   PetscBool              match;
 14:   PetscInt               i,j;

 17:   PetscSFPackSetErrorOnUnsupportedOverlap(sf,unit,rootdata,leafdata);
 18:   /* Look for types in cache */
 19:   for (p=&dat->avail; (link=*p); p=&link->next) {
 20:     MPIPetsc_Type_compare(unit,link->unit,&match);
 21:     if (match) {
 22:       *p = link->next; /* Remove from available list */
 23:       goto found;
 24:     }
 25:   }

 27:   PetscNew(&link);
 28:   PetscSFPackSetUp_Host(sf,link,unit);

 30:   link->rootbuflen = sf->nroots;
 31:   link->leafbuflen = sf->nleaves;
 32:   link->nrootreqs  = 1;
 33:   link->nleafreqs  = 0;
 34:   PetscMalloc1(4,&link->reqs); /* 4 = (nrootreqs+nleafreqs)*4 */
 35:   for (i=0; i<4; i++) link->reqs[i] = MPI_REQUEST_NULL; /* Initialized to NULL so that we know which need to be freed in Destroy */

 37:   for (i=0; i<2; i++) {
 38:     for (j=0; j<2; j++) {
 39:       link->rootreqs[i][j] = link->reqs + (2*i+j);
 40:       link->leafreqs[i][j] = NULL; /* leaf requests are not needed. Make it NULL to segfault accident use */
 41:     }
 42:   }

 44:   /* DO NOT allocate link->rootbuf[]/leafleaf[]. We use lazy allocation since these buffers are likely not needed */
 45: found:
 46:   link->rootmtype = rootmtype;
 47:   link->leafmtype = leafmtype;
 48: #if defined(PETSC_HAVE_CUDA)
 49:   PetscSFPackSetUp_Device(sf,link,unit);
 50: #endif
 51:   link->rootdata  = rootdata;
 52:   link->leafdata  = leafdata;
 53:   link->next      = dat->inuse;
 54:   dat->inuse      = link;

 56:   *mylink         = link;
 57:   return(0);
 58: }

 60: /*===================================================================================*/
 61: /*              Implementations of SF public APIs                                    */
 62: /*===================================================================================*/

 64: /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
 65: PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
 66: {
 68:   PetscInt       i,j,k;
 69:   const PetscInt *range;
 70:   PetscMPIInt    size;

 73:   MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);
 74:   if (nroots)  *nroots  = sf->nroots;
 75:   if (nleaves) *nleaves = sf->nleaves;
 76:   if (ilocal)  *ilocal  = NULL; /* Contiguous leaves */
 77:   if (iremote) {
 78:     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
 79:       PetscLayoutGetRanges(sf->map,&range);
 80:       PetscMalloc1(sf->nleaves,&sf->remote);
 81:       sf->remote_alloc = sf->remote;
 82:       for (i=0; i<size; i++) {
 83:         for (j=range[i],k=0; j<range[i+1]; j++,k++) {
 84:           sf->remote[j].rank  = i;
 85:           sf->remote[j].index = k;
 86:         }
 87:       }
 88:     }
 89:     *iremote = sf->remote;
 90:   }
 91:   return(0);
 92: }

 94: PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
 95: {
 96:   PetscErrorCode     ierr;
 97:   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
 98:   PetscMPIInt        size;
 99:   PetscInt           i;
100:   const PetscInt     *range;

103:   MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);
104:   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
105:     PetscMalloc1(size,&dat->recvcounts);
106:     PetscMalloc1(size,&dat->displs);
107:     PetscLayoutGetRanges(sf->map,&range);

109:     for (i=0; i<size; i++) {
110:       PetscMPIIntCast(range[i],&dat->displs[i]);
111:       PetscMPIIntCast(range[i+1]-range[i],&dat->recvcounts[i]);
112:     }
113:   }
114:   return(0);
115: }

117: PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
118: {
119:   PetscErrorCode         ierr;
120:   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;

123:   PetscFree(dat->iranks);
124:   PetscFree(dat->ioffset);
125:   PetscFree(dat->irootloc);
126:   PetscFree(dat->recvcounts);
127:   PetscFree(dat->displs);
128:   if (dat->inuse) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Outstanding operation has not been completed");
129:   PetscSFPackDestroyAvailable(sf,&dat->avail);
130:   return(0);
131: }

133: PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
134: {

138:   PetscSFReset_Allgatherv(sf);
139:   PetscFree(sf->data);
140:   return(0);
141: }

143: /*
144:   Prepare the rootbuf, leafbuf etc used by MPI in PetscSFBcastAndOpBegin.

146: Input Arguments:
147: + sf    - the start forest
148: . link  - the link PetscSFBcastAndOp is currently using
149: - op    - the reduction op

151: Output Arguments:
152: +rootmtype_mpi  - memtype of rootbuf_mpi
153: .rootbuf_mpi    - root buffer used by MPI in the following MPI call
154: .leafmtype_mpi  - memtype of leafbuf_mpi
155: -leafbuf_mpi    - leaf buffer used by MPI in the following MPI call

157: Notes:
158:   This function was created because things became complex when rootdata or leafdata is on device, but the user does not want to use GPU-aware MPI.
159:   We have to copy data from device to host before doing MPI. This function encapsulates all varieties and is reused by Allgatherv & Allgahter.
160: */
161: PETSC_INTERN PetscErrorCode PetscSFBcastPrepareMPIBuffers_Allgatherv(PetscSF sf,PetscSFPack link,MPI_Op op,PetscMemType *rootmtype_mpi,const void **rootbuf_mpi,PetscMemType *leafmtype_mpi, void **leafbuf_mpi)
162: {
163:   PetscErrorCode         ierr;

166:   /* If rootdata is on device but no gpu-aware mpi, we need to copy rootdata to rootbuf on host before bcast; otherwise we directly bcast from leafdata */
167:   if (link->rootmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {
168:     if (!link->rootbuf[PETSC_MEMTYPE_HOST]) {PetscMallocWithMemType(PETSC_MEMTYPE_HOST,link->rootbuflen*link->unitbytes,(void**)&link->rootbuf[PETSC_MEMTYPE_HOST]);}
169:     PetscMemcpyWithMemType(PETSC_MEMTYPE_HOST,PETSC_MEMTYPE_DEVICE,link->rootbuf[PETSC_MEMTYPE_HOST],link->rootdata,link->rootbuflen*link->unitbytes);
170:     *rootbuf_mpi   = link->rootbuf[PETSC_MEMTYPE_HOST];
171:     *rootmtype_mpi = PETSC_MEMTYPE_HOST;
172:   } else {
173:     *rootbuf_mpi   = link->rootdata;
174:     *rootmtype_mpi = link->rootmtype;
175:   }

177:   if (link->leafmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {  /* If leafdata is on device but no gpu-aware mpi, we need a leafbuf on host to receive bcast'ed data */
178:     if (!link->leafbuf[PETSC_MEMTYPE_HOST]) {PetscMallocWithMemType(PETSC_MEMTYPE_HOST,link->leafbuflen*link->unitbytes,(void**)&link->leafbuf[PETSC_MEMTYPE_HOST]);}
179:     *leafbuf_mpi   = link->leafbuf[PETSC_MEMTYPE_HOST];
180:     *leafmtype_mpi = PETSC_MEMTYPE_HOST;
181:   } else if (op == MPIU_REPLACE) { /* If op is MPIU_REPLACE, we can directly bcast to leafdata. No intermediate buffer is needed. */
182:     *leafbuf_mpi   = (char *)link->leafdata;
183:     *leafmtype_mpi = link->leafmtype;
184:   } else { /* Otherwise, op is a reduction. Have to allocate a buffer aside leafdata to apply the op. The buffer is either on host or device, depending on where leafdata is. */
185:     if (!link->leafbuf[link->leafmtype]) {PetscMallocWithMemType(link->leafmtype,link->leafbuflen*link->unitbytes,(void**)&link->leafbuf[link->leafmtype]);}
186:     *leafbuf_mpi   = link->leafbuf[link->leafmtype];
187:     *leafmtype_mpi = link->leafmtype;
188:   }
189:   return(0);
190: }

192: static PetscErrorCode PetscSFBcastAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
193: {
194:   PetscErrorCode         ierr;
195:   PetscSFPack            link;
196:   PetscMPIInt            sendcount;
197:   MPI_Comm               comm;
198:   const void             *rootbuf_mpi = NULL; /* buffer used by MPI */
199:   void                   *leafbuf_mpi = NULL;
200:   PetscMemType           rootmtype_mpi = PETSC_MEMTYPE_HOST,leafmtype_mpi = PETSC_MEMTYPE_HOST; /* Seen by MPI */
201:   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;

204:   PetscSFPackGet_Allgatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,&link);
205:   PetscObjectGetComm((PetscObject)sf,&comm);
206:   PetscMPIIntCast(sf->nroots,&sendcount);
207:   PetscSFBcastPrepareMPIBuffers_Allgatherv(sf,link,op,&rootmtype_mpi,&rootbuf_mpi,&leafmtype_mpi,&leafbuf_mpi);
208:   MPIU_Iallgatherv(rootbuf_mpi,sendcount,unit,leafbuf_mpi,dat->recvcounts,dat->displs,unit,comm,link->rootreqs[PETSCSF_../../../../../..2LEAF_BCAST][rootmtype_mpi]);
209:   return(0);
210: }

212: PETSC_INTERN PetscErrorCode PetscSFBcastAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
213: {
214:   PetscErrorCode         ierr;
215:   PetscSFPack            link;

218:   PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
219:   PetscSFPackWaitall(link,PETSCSF_../../../../../..2LEAF_BCAST);
220:   if (op != MPIU_REPLACE) {
221:     /* Have a leaf buffer aside leafdata to do Op */
222:     PetscSFUnpackAndOpLeafData(sf,link,NULL,leafdata,op,PETSC_FALSE);
223:   } else if (leafmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {
224:     /* Just need to copy data in leafbuf on host to leafdata on device */
225:     PetscMemcpyWithMemType(PETSC_MEMTYPE_DEVICE,PETSC_MEMTYPE_HOST,leafdata,link->leafbuf[PETSC_MEMTYPE_HOST],link->leafbuflen*link->unitbytes);
226:   }
227:   PetscSFPackReclaim(sf,&link);
228:   return(0);
229: }

231: /*
232:   Prepare the rootbuf, leafbuf etc used by MPI in PetscSFReduceBegin.

234: Input Arguments:
235: + sf    - the start forest
236: . link  - the link PetscSFReduceBegin is currently using
237: - op    - the reduction op

239: Output Arguments:
240: +rootmtype_mpi  - memtype of rootbuf_mpi
241: .rootbuf_mpi    - root buffer used by MPI in the following MPI call
242: .leafmtype_mpi  - memtype of leafbuf_mpi
243: -leafbuf_mpi    - leaf buffer used by MPI in the following MPI call

245: Notes: This function is called assuming op != MPIU_REPLACE.
246: */
247: PETSC_INTERN PetscErrorCode PetscSFReducePrepareMPIBuffers_Allgatherv(PetscSF sf,PetscSFPack link,MPI_Op op,PetscMemType *rootmtype_mpi,void **rootbuf_mpi,PetscMemType *leafmtype_mpi,const void **leafbuf_mpi)
248: {
249:   PetscErrorCode         ierr;
250:   PetscMPIInt            rank,count;
251:   MPI_Comm               comm;
252:   const void             *leafdata_mpi;

255:   PetscObjectGetComm((PetscObject)sf,&comm);
256:   MPI_Comm_rank(comm,&rank);

258:   /* Step 1: Reduce leafdata on all ranks to leafbuf on rank 0 */
259:   if (link->leafmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) { /* Need to copy leafdata to leafbuf on every rank */
260:     if (!link->leafbuf[PETSC_MEMTYPE_HOST]) {PetscMallocWithMemType(PETSC_MEMTYPE_HOST,link->leafbuflen*link->unitbytes,(void**)&link->leafbuf[PETSC_MEMTYPE_HOST]);}
261:     PetscMemcpyWithMemType(PETSC_MEMTYPE_HOST,PETSC_MEMTYPE_DEVICE,link->leafbuf[PETSC_MEMTYPE_HOST],link->leafdata,link->leafbuflen*link->unitbytes);
262:     leafdata_mpi   = !rank ? MPI_IN_PLACE : link->leafbuf[PETSC_MEMTYPE_HOST];
263:     *leafmtype_mpi = PETSC_MEMTYPE_HOST;
264:   } else { /* Only need to allocate a leafbuf on rank 0. Then directly reduce leafdata to the leafbuf */
265:     if (!rank && !link->leafbuf[link->leafmtype]) {PetscMallocWithMemType(link->leafmtype,link->leafbuflen*link->unitbytes,(void**)&link->leafbuf[link->leafmtype]);}
266:     leafdata_mpi   = link->leafdata;
267:     *leafmtype_mpi = link->leafmtype;
268:   }
269:   *leafbuf_mpi = (const char*)link->leafbuf[*leafmtype_mpi];
270:   PetscMPIIntCast(sf->nleaves*link->bs,&count);
271:   MPI_Reduce(leafdata_mpi,(void*)(*leafbuf_mpi),count,link->basicunit,op,0,comm); /* Must do reduce with MPI builltin datatype basicunit */

273:   /* Step 2: Prepare the root buffer (we'll scatter the reduction result to it in a moment) */
274:   if (link->rootmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) *rootmtype_mpi = PETSC_MEMTYPE_HOST;
275:   else *rootmtype_mpi = link->rootmtype;

277:   if (!link->rootbuf[*rootmtype_mpi]) {PetscMallocWithMemType(*rootmtype_mpi,link->rootbuflen*link->unitbytes,(void**)&link->rootbuf[*rootmtype_mpi]);}
278:   *rootbuf_mpi = link->rootbuf[*rootmtype_mpi];
279:   return(0);
280: }

282: static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
283: {
284:   PetscErrorCode         ierr;
285:   PetscSFPack            link;
286:   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
287:   PetscInt               rstart;
288:   PetscMPIInt            rank,recvcount;
289:   MPI_Comm               comm;
290:   const void             *leafbuf_mpi;
291:   void                   *rootbuf_mpi;
292:   PetscMemType           leafmtype_mpi,rootmtype_mpi; /* Seen by MPI */

295:   PetscSFPackGet_Allgatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,&link);
296:   PetscObjectGetComm((PetscObject)sf,&comm);
297:   MPI_Comm_rank(comm,&rank);

299:   if (op == MPIU_REPLACE) {
300:     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
301:     PetscLayoutGetRange(sf->map,&rstart,NULL);
302:     PetscMemcpyWithMemType(rootmtype,leafmtype,rootdata,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);
303:   } else {
304:     PetscMPIIntCast(sf->nroots,&recvcount);
305:     PetscSFReducePrepareMPIBuffers_Allgatherv(sf,link,op,&rootmtype_mpi,&rootbuf_mpi,&leafmtype_mpi,&leafbuf_mpi);
306:     MPIU_Iscatterv(leafbuf_mpi,dat->recvcounts,dat->displs,unit,rootbuf_mpi,recvcount,unit,0,comm,link->rootreqs[PETSCSF_LEAF2../../../../../.._REDUCE][rootmtype_mpi]);
307:   }
308:   return(0);
309: }

311: PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
312: {
313:   PetscErrorCode         ierr;
314:   PetscSFPack            link;

317:   PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
318:   PetscSFPackWaitall(link,PETSCSF_LEAF2../../../../../.._REDUCE);
319:   if (op != MPIU_REPLACE) {
320:     PetscSFUnpackAndOpRootData(sf,link,NULL,rootdata,op,PETSC_FALSE);
321:   } else if (rootmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {
322:     PetscMemcpyWithMemType(PETSC_MEMTYPE_DEVICE,PETSC_MEMTYPE_HOST,rootdata,link->rootbuf[PETSC_MEMTYPE_HOST],link->rootbuflen*link->unitbytes);
323:   }
324:   PetscSFPackReclaim(sf,&link);
325:   return(0);
326: }

328: static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
329: {
330:   PetscErrorCode         ierr;
331:   PetscSFPack            link;
332:   PetscMPIInt            rank;

335:   PetscSFBcastAndOpBegin_Gatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPIU_REPLACE);
336:   PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
337:   PetscSFPackWaitall(link,PETSCSF_../../../../../..2LEAF_BCAST);
338:   MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);
339:   if (!rank && leafmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {
340:     PetscMemcpyWithMemType(PETSC_MEMTYPE_DEVICE,PETSC_MEMTYPE_HOST,leafdata,link->leafbuf[PETSC_MEMTYPE_HOST],link->leafbuflen*link->unitbytes);
341:   }
342:   PetscSFPackReclaim(sf,&link);
343:   return(0);
344: }

346: /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).

348:    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
349:    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
350:    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
351:    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
352:    0,1,2 is 1,3,6 respectively. root is 10.

354:    One optimized implementation could be: starting from the initial state:
355:              rank-0   rank-1    rank-2
356:         Root     1
357:         Leaf     2       3         4

359:    Shift leaves rightwards to leafupdate. Rank 0 gathers the root value and puts it in leafupdate. We have:
360:              rank-0   rank-1    rank-2
361:         Root     1
362:         Leaf     2       3         4
363:      Leafupdate  1       2         3

365:    Then, do MPI_Scan on leafupdate and get:
366:              rank-0   rank-1    rank-2
367:         Root     1
368:         Leaf     2       3         4
369:      Leafupdate  1       3         6

371:    Rank 2 sums its leaf and leafupdate, scatters the result to the root, and gets
372:              rank-0   rank-1    rank-2
373:         Root     10
374:         Leaf     2       3         4
375:      Leafupdate  1       3         6

377:    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
378:              rank-0   rank-1    rank-2
379:         Root     1
380:         Leaf     2       3         4
381:      Leafupdate  2       3         4

383:    Do MPI_Exscan on leafupdate,
384:              rank-0   rank-1    rank-2
385:         Root     1
386:         Leaf     2       3         4
387:      Leafupdate  2       2         5

389:    BcastAndOp from root to leafupdate,
390:              rank-0   rank-1    rank-2
391:         Root     1
392:         Leaf     2       3         4
393:      Leafupdate  3       3         6

395:    Copy root to leafupdate on rank-0
396:              rank-0   rank-1    rank-2
397:         Root     1
398:         Leaf     2       3         4
399:      Leafupdate  1       3         6

401:    Reduce from leaf to root,
402:              rank-0   rank-1    rank-2
403:         Root     10
404:         Leaf     2       3         4
405:      Leafupdate  1       3         6
406: */
407: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
408: {
409:   PetscErrorCode         ierr;
410:   PetscSFPack            link;
411:   MPI_Comm               comm;
412:   PetscMPIInt            count;

415:   PetscObjectGetComm((PetscObject)sf,&comm);
416:   if (!use_gpu_aware_mpi && (rootmtype == PETSC_MEMTYPE_DEVICE || leafmtype == PETSC_MEMTYPE_DEVICE)) SETERRQ(comm,PETSC_ERR_SUP,"No support for FetchAndOp"); /* No known uses */
417:   /* Copy leafdata to leafupdate */
418:   PetscSFPackGet_Allgatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,&link);
419:   PetscMemcpyWithMemType(leafmtype,leafmtype,leafupdate,leafdata,sf->nleaves*link->unitbytes);
420:   PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);

422:   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
423:   PetscMPIIntCast(sf->nleaves,&count);
424:   if (op == MPIU_REPLACE) {
425:     PetscMPIInt size,rank,prev,next;
426:     MPI_Comm_rank(comm,&rank);
427:     MPI_Comm_size(comm,&size);
428:     prev = rank ?            rank-1 : MPI_PROC_NULL;
429:     next = (rank < size-1) ? rank+1 : MPI_PROC_NULL;
430:     MPI_Sendrecv_replace(leafupdate,count,unit,next,link->tag,prev,link->tag,comm,MPI_STATUSES_IGNORE);
431:   } else {MPI_Exscan(MPI_IN_PLACE,leafupdate,count,unit,op,comm);}
432:   PetscSFPackReclaim(sf,&link);
433:   PetscSFBcastAndOpBegin(sf,unit,rootdata,leafupdate,op);
434:   PetscSFBcastAndOpEnd(sf,unit,rootdata,leafupdate,op);

436:   /* Bcast roots to rank 0's leafupdate */
437:   PetscSFBcastToZero_Private(sf,unit,rootdata,leafupdate); /* Using this line makes Allgather SFs able to inherit this routine */

439:   /* Reduce leafdata to rootdata */
440:   PetscSFReduceBegin(sf,unit,leafdata,rootdata,op);
441:   return(0);
442: }

444: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
445: {
446:   PetscErrorCode         ierr;

449:   PetscSFReduceEnd(sf,unit,leafdata,rootdata,op);
450:   return(0);
451: }

453: /* Get root ranks accessing my leaves */
454: PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscInt *nranks,const PetscMPIInt **ranks,const PetscInt **roffset,const PetscInt **rmine,const PetscInt **rremote)
455: {
457:   PetscInt       i,j,k,size;
458:   const PetscInt *range;

461:   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
462:   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
463:     size = sf->nranks;
464:     PetscLayoutGetRanges(sf->map,&range);
465:     PetscMalloc4(size,&sf->ranks,size+1,&sf->roffset,sf->nleaves,&sf->rmine,sf->nleaves,&sf->rremote);
466:     for (i=0; i<size; i++) sf->ranks[i] = i;
467:     PetscArraycpy(sf->roffset,range,size+1);
468:     for (i=0; i<sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
469:     for (i=0; i<size; i++) {
470:       for (j=range[i],k=0; j<range[i+1]; j++,k++) sf->rremote[j] = k;
471:     }
472:   }

474:   if (nranks)  *nranks  = sf->nranks;
475:   if (ranks)   *ranks   = sf->ranks;
476:   if (roffset) *roffset = sf->roffset;
477:   if (rmine)   *rmine   = sf->rmine;
478:   if (rremote) *rremote = sf->rremote;
479:   return(0);
480: }

482: /* Get leaf ranks accessing my roots */
483: PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscInt *niranks,const PetscMPIInt **iranks,const PetscInt **ioffset,const PetscInt **irootloc)
484: {
485:   PetscErrorCode     ierr;
486:   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
487:   MPI_Comm           comm;
488:   PetscMPIInt        size,rank;
489:   PetscInt           i,j;

492:   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
493:   PetscObjectGetComm((PetscObject)sf,&comm);
494:   MPI_Comm_size(comm,&size);
495:   MPI_Comm_rank(comm,&rank);
496:   if (niranks) *niranks = size;

498:   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
499:      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
500:    */
501:   if (iranks) {
502:     if (!dat->iranks) {
503:       PetscMalloc1(size,&dat->iranks);
504:       dat->iranks[0] = rank;
505:       for (i=0,j=1; i<size; i++) {if (i == rank) continue; dat->iranks[j++] = i;}
506:     }
507:     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
508:   }

510:   if (ioffset) {
511:     if (!dat->ioffset) {
512:       PetscMalloc1(size+1,&dat->ioffset);
513:       for (i=0; i<=size; i++) dat->ioffset[i] = i*sf->nroots;
514:     }
515:     *ioffset = dat->ioffset;
516:   }

518:   if (irootloc) {
519:     if (!dat->irootloc) {
520:       PetscMalloc1(sf->nleaves,&dat->irootloc);
521:       for (i=0; i<size; i++) {
522:         for (j=0; j<sf->nroots; j++) dat->irootloc[i*sf->nroots+j] = j;
523:       }
524:     }
525:     *irootloc = dat->irootloc;
526:   }
527:   return(0);
528: }

530: PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF *out)
531: {
532:   PetscInt       i,nroots,nleaves,rstart,*ilocal;
533:   PetscSFNode    *iremote;
534:   PetscSF        lsf;

538:   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
539:   nroots  = nleaves;
540:   PetscMalloc1(nleaves,&ilocal);
541:   PetscMalloc1(nleaves,&iremote);
542:   PetscLayoutGetRange(sf->map,&rstart,NULL);

544:   for (i=0; i<nleaves; i++) {
545:     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
546:     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
547:     iremote[i].index = i;          /* root index */
548:   }

550:   PetscSFCreate(PETSC_COMM_SELF,&lsf);
551:   PetscSFSetGraph(lsf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
552:   PetscSFSetUp(lsf);
553:   *out = lsf;
554:   return(0);
555: }

557: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
558: {
559:   PetscErrorCode     ierr;
560:   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;

563:   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
564:   sf->ops->Reset           = PetscSFReset_Allgatherv;
565:   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
566:   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
567:   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
568:   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
569:   sf->ops->BcastAndOpBegin = PetscSFBcastAndOpBegin_Allgatherv;
570:   sf->ops->BcastAndOpEnd   = PetscSFBcastAndOpEnd_Allgatherv;
571:   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
572:   sf->ops->ReduceEnd       = PetscSFReduceEnd_Allgatherv;
573:   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
574:   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
575:   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
576:   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;

578:   PetscNewLog(sf,&dat);
579:   sf->data = (void*)dat;
580:   return(0);
581: }