Actual source code: sfallgather.c

petsc-3.15.0 2021-04-05
Report Typos and Errors
  1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>

  3: /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */
  4: typedef PetscSF_Allgatherv PetscSF_Allgather;

  6: PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gather(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);

  8: PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf)
  9: {
 10:   PetscInt              i;
 11:   PetscSF_Allgather     *dat = (PetscSF_Allgather*)sf->data;

 14:   for (i=PETSCSF_LOCAL; i<=PETSCSF_REMOTE; i++) {
 15:     sf->leafbuflen[i]  = 0;
 16:     sf->leafstart[i]   = 0;
 17:     sf->leafcontig[i]  = PETSC_TRUE;
 18:     sf->leafdups[i]    = PETSC_FALSE;
 19:     dat->rootbuflen[i] = 0;
 20:     dat->rootstart[i]  = 0;
 21:     dat->rootcontig[i] = PETSC_TRUE;
 22:     dat->rootdups[i]   = PETSC_FALSE;
 23:   }

 25:   sf->leafbuflen[PETSCSF_REMOTE]  = sf->nleaves;
 26:   dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots;
 27:   sf->persistent = PETSC_FALSE;
 28:   sf->nleafreqs  = 0; /* MPI collectives only need one request. We treat it as a root request. */
 29:   dat->nrootreqs = 1;
 30:   return(0);
 31: }

 33: static PetscErrorCode PetscSFBcastBegin_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
 34: {
 35:   PetscErrorCode        ierr;
 36:   PetscSFLink           link;
 37:   PetscMPIInt           sendcount;
 38:   MPI_Comm              comm;
 39:   void                  *rootbuf = NULL,*leafbuf = NULL; /* buffer seen by MPI */
 40:   MPI_Request           *req;

 43:   PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link);
 44:   PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata);
 45:   PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */);
 46:   PetscObjectGetComm((PetscObject)sf,&comm);
 47:   PetscMPIIntCast(sf->nroots,&sendcount);
 48:   PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_../../../../../..2LEAF,&rootbuf,&leafbuf,&req,NULL);
 49:   PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_../../../../../..2LEAF);
 50:   MPIU_Iallgather(rootbuf,sendcount,unit,leafbuf,sendcount,unit,comm,req);
 51:   return(0);
 52: }

 54: static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
 55: {
 56:   PetscErrorCode        ierr;
 57:   PetscSFLink           link;
 58:   PetscInt              rstart;
 59:   MPI_Comm              comm;
 60:   PetscMPIInt           rank,count,recvcount;
 61:   void                  *rootbuf = NULL,*leafbuf = NULL; /* buffer seen by MPI */
 62:   PetscSF_Allgather     *dat = (PetscSF_Allgather*)sf->data;
 63:   MPI_Request           *req;

 66:   PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link);
 67:   if (op == MPI_REPLACE) {
 68:     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
 69:     PetscLayoutGetRange(sf->map,&rstart,NULL);
 70:     (*link->Memcpy)(link,rootmtype,rootdata,leafmtype,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);
 71:     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) {(*link->SyncStream)(link);} /* Sync the device to host memcpy */
 72:   } else {
 73:     PetscObjectGetComm((PetscObject)sf,&comm);
 74:     MPI_Comm_rank(comm,&rank);
 75:     PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata);
 76:     PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */);
 77:     PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2../../../../../..,&rootbuf,&leafbuf,&req,NULL);
 78:     PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE],&recvcount);
 79:     if (!rank && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
 80:       PetscSFMalloc(sf,link->leafmtype_mpi,sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]);
 81:     }
 82:     if (!rank && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
 83:     PetscMPIIntCast(sf->nleaves*link->bs,&count);
 84:     PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_LEAF2../../../../../..);
 85:     MPI_Reduce(leafbuf,link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],count,link->basicunit,op,0,comm); /* Must do reduce with MPI builltin datatype basicunit */
 86:     MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],recvcount,unit,rootbuf,recvcount,unit,0/*rank 0*/,comm,req);
 87:   }
 88:   return(0);
 89: }

 91: static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
 92: {
 93:   PetscErrorCode        ierr;
 94:   PetscSFLink           link;
 95:   PetscMPIInt           rank;

 98:   PetscSFBcastBegin_Gather(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPI_REPLACE);
 99:   PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
100:   PetscSFLinkFinishCommunication(sf,link,PETSCSF_../../../../../..2LEAF);
101:   MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);
102:   if (!rank && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
103:     (*link->Memcpy)(link,PETSC_MEMTYPE_DEVICE,leafdata,PETSC_MEMTYPE_HOST,link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST],sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes);
104:   }
105:   PetscSFLinkReclaim(sf,&link);
106:   return(0);
107: }

109: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf)
110: {
111:   PetscErrorCode    ierr;
112:   PetscSF_Allgather *dat = (PetscSF_Allgather*)sf->data;

115:   sf->ops->BcastEnd        = PetscSFBcastEnd_Basic;
116:   sf->ops->ReduceEnd       = PetscSFReduceEnd_Basic;

118:   /* Inherit from Allgatherv */
119:   sf->ops->Reset           = PetscSFReset_Allgatherv;
120:   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
121:   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
122:   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
123:   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
124:   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
125:   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
126:   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;

128:   /* Allgather stuff */
129:   sf->ops->SetUp           = PetscSFSetUp_Allgather;
130:   sf->ops->BcastBegin      = PetscSFBcastBegin_Allgather;
131:   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgather;
132:   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgather;

134:   PetscNewLog(sf,&dat);
135:   sf->data = (void*)dat;
136:   return(0);
137: }