Actual source code: sfimpl.h

petsc-master 2019-08-22
Report Typos and Errors
  1: #if !defined(PETSCSFIMPL_H)
  2: #define PETSCSFIMPL_H

  4:  #include <petscsf.h>
  5:  #include <petsc/private/petscimpl.h>
  6:  #include <petscviewer.h>

  8: PETSC_EXTERN PetscLogEvent PETSCSF_SetGraph;
  9: PETSC_EXTERN PetscLogEvent PETSCSF_SetUp;
 10: PETSC_EXTERN PetscLogEvent PETSCSF_BcastBegin;
 11: PETSC_EXTERN PetscLogEvent PETSCSF_BcastEnd;
 12: PETSC_EXTERN PetscLogEvent PETSCSF_BcastAndOpBegin;
 13: PETSC_EXTERN PetscLogEvent PETSCSF_BcastAndOpEnd;
 14: PETSC_EXTERN PetscLogEvent PETSCSF_ReduceBegin;
 15: PETSC_EXTERN PetscLogEvent PETSCSF_ReduceEnd;
 16: PETSC_EXTERN PetscLogEvent PETSCSF_FetchAndOpBegin;
 17: PETSC_EXTERN PetscLogEvent PETSCSF_FetchAndOpEnd;
 18: PETSC_EXTERN PetscLogEvent PETSCSF_EmbedSF;
 19: PETSC_EXTERN PetscLogEvent PETSCSF_DistSect;
 20: PETSC_EXTERN PetscLogEvent PETSCSF_SectSF;
 21: PETSC_EXTERN PetscLogEvent PETSCSF_RemoteOff;

 23: struct _PetscSFOps {
 24:   PetscErrorCode (*Reset)(PetscSF);
 25:   PetscErrorCode (*Destroy)(PetscSF);
 26:   PetscErrorCode (*SetUp)(PetscSF);
 27:   PetscErrorCode (*SetFromOptions)(PetscOptionItems*,PetscSF);
 28:   PetscErrorCode (*View)(PetscSF,PetscViewer);
 29:   PetscErrorCode (*Duplicate)(PetscSF,PetscSFDuplicateOption,PetscSF);
 30:   PetscErrorCode (*BcastAndOpBegin)(PetscSF,MPI_Datatype,const void*,void*,MPI_Op);
 31:   PetscErrorCode (*BcastAndOpEnd)(PetscSF,MPI_Datatype,const void*,void*,MPI_Op);
 32:   PetscErrorCode (*BcastToZero)(PetscSF,MPI_Datatype,const void*,void*); /* For interal use only */
 33:   PetscErrorCode (*ReduceBegin)(PetscSF,MPI_Datatype,const void*,void*,MPI_Op);
 34:   PetscErrorCode (*ReduceEnd)(PetscSF,MPI_Datatype,const void*,void*,MPI_Op);
 35:   PetscErrorCode (*FetchAndOpBegin)(PetscSF,MPI_Datatype,void*,const void*,void*,MPI_Op);
 36:   PetscErrorCode (*FetchAndOpEnd)(PetscSF,MPI_Datatype,void*,const void *,void *,MPI_Op);
 37:   PetscErrorCode (*GetRootRanks)(PetscSF,PetscInt*,const PetscMPIInt**,const PetscInt**,const PetscInt**,const PetscInt**);
 38:   PetscErrorCode (*GetLeafRanks)(PetscSF,PetscInt*,const PetscMPIInt**,const PetscInt**,const PetscInt**);
 39:   PetscErrorCode (*CreateLocalSF)(PetscSF,PetscSF*);
 40:   PetscErrorCode (*GetGraph)(PetscSF,PetscInt*,PetscInt*,const PetscInt**,const PetscSFNode**);
 41:   PetscErrorCode (*CreateEmbeddedSF)(PetscSF,PetscInt,const PetscInt*,PetscSF*);
 42:   PetscErrorCode (*CreateEmbeddedLeafSF)(PetscSF,PetscInt,const PetscInt*,PetscSF*);
 43: };

 45: typedef struct _n_PetscSFPackOpt *PetscSFPackOpt;

 47: struct _p_PetscSF {
 48:   PETSCHEADER(struct _PetscSFOps);
 49:   PetscInt        nroots;       /* Number of root vertices on current process (candidates for incoming edges) */
 50:   PetscInt        nleaves;      /* Number of leaf vertices on current process (this process specifies a root for each leaf) */
 51:   PetscInt        *mine;        /* Location of leaves in leafdata arrays provided to the communication routines */
 52:   PetscInt        *mine_alloc;
 53:   PetscInt        minleaf,maxleaf;
 54:   PetscSFNode     *remote;      /* Remote references to roots for each local leaf */
 55:   PetscSFNode     *remote_alloc;
 56:   PetscInt        nranks;       /* Number of ranks owning roots connected to my leaves */
 57:   PetscInt        ndranks;      /* Number of ranks in distinguished group holding roots connected to my leaves */
 58:   PetscMPIInt     *ranks;       /* List of ranks referenced by "remote" */
 59:   PetscInt        *roffset;     /* Array of length nranks+1, offset in rmine/rremote for each rank */
 60:   PetscInt        *rmine;       /* Concatenated array holding local indices referencing each remote rank */
 61:   PetscInt        *rremote;     /* Concatenated array holding remote indices referenced for each remote rank */
 62:   PetscBool       degreeknown;  /* The degree is currently known, do not have to recompute */
 63:   PetscInt        *degree;      /* Degree of each of my root vertices */
 64:   PetscInt        *degreetmp;   /* Temporary local array for computing degree */
 65:   PetscBool       rankorder;    /* Sort ranks for gather and scatter operations */
 66:   MPI_Group       ingroup;      /* Group of processes connected to my roots */
 67:   MPI_Group       outgroup;     /* Group of processes connected to my leaves */
 68:   PetscSF         multi;        /* Internal graph used to implement gather and scatter operations */
 69:   PetscBool       graphset;     /* Flag indicating that the graph has been set, required before calling communication routines */
 70:   PetscBool       setupcalled;  /* Type and communication structures have been set up */
 71:   PetscSFPackOpt  leafpackopt;  /* Optimization plans to (un)pack leaves based on patterns in rmine[]. NULL for no optimization */

 73:   PetscSFPattern  pattern;      /* Pattern of the graph */
 74:   PetscLayout     map;          /* Layout of leaves over all processes when building a patterned graph */

 76:   void *data;                   /* Pointer to implementation */
 77: };

 79: PETSC_EXTERN PetscBool PetscSFRegisterAllCalled;
 80: PETSC_EXTERN PetscErrorCode PetscSFRegisterAll(void);

 82: PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Private(PetscSF,PetscSF*);
 83: PETSC_INTERN PetscErrorCode PetscSFBcastToZero_Private(PetscSF,MPI_Datatype,const void*,void*);

 85: PETSC_EXTERN PetscErrorCode MPIPetsc_Type_unwrap(MPI_Datatype,MPI_Datatype*,PetscBool*);
 86: PETSC_EXTERN PetscErrorCode MPIPetsc_Type_compare(MPI_Datatype,MPI_Datatype,PetscBool*);
 87: PETSC_EXTERN PetscErrorCode MPIPetsc_Type_compare_contig(MPI_Datatype,MPI_Datatype,PetscInt*);

 89: #if defined(PETSC_HAVE_MPI_NONBLOCKING_COLLECTIVES)
 90: #define MPIU_Iscatter(a,b,c,d,e,f,g,h,req)     MPI_Iscatter(a,b,c,d,e,f,g,h,req)
 91: #define MPIU_Iscatterv(a,b,c,d,e,f,g,h,i,req)  MPI_Iscatterv(a,b,c,d,e,f,g,h,i,req)
 92: #define MPIU_Igather(a,b,c,d,e,f,g,h,req)      MPI_Igather(a,b,c,d,e,f,g,h,req)
 93: #define MPIU_Igatherv(a,b,c,d,e,f,g,h,i,req)   MPI_Igatherv(a,b,c,d,e,f,g,h,i,req)
 94: #define MPIU_Iallgather(a,b,c,d,e,f,g,req)     MPI_Iallgather(a,b,c,d,e,f,g,req)
 95: #define MPIU_Iallgatherv(a,b,c,d,e,f,g,h,req)  MPI_Iallgatherv(a,b,c,d,e,f,g,h,req)
 96: #define MPIU_Ialltoall(a,b,c,d,e,f,g,req)      MPI_Ialltoall(a,b,c,d,e,f,g,req)
 97: #else
 98: /* Ignore req, the MPI_Request argument, and use MPI blocking collectives. One should initialize req
 99:    to MPI_REQUEST_NULL so that one can do MPI_Wait(req,status) no matter the call is blocking or not.
100:  */
101: #define MPIU_Iscatter(a,b,c,d,e,f,g,h,req)     MPI_Scatter(a,b,c,d,e,f,g,h)
102: #define MPIU_Iscatterv(a,b,c,d,e,f,g,h,i,req)  MPI_Scatterv(a,b,c,d,e,f,g,h,i)
103: #define MPIU_Igather(a,b,c,d,e,f,g,h,req)      MPI_Gather(a,b,c,d,e,f,g,h)
104: #define MPIU_Igatherv(a,b,c,d,e,f,g,h,i,req)   MPI_Gatherv(a,b,c,d,e,f,g,h,i)
105: #define MPIU_Iallgather(a,b,c,d,e,f,g,req)     MPI_Allgather(a,b,c,d,e,f,g)
106: #define MPIU_Iallgatherv(a,b,c,d,e,f,g,h,req)  MPI_Allgatherv(a,b,c,d,e,f,g,h)
107: #define MPIU_Ialltoall(a,b,c,d,e,f,g,req)      MPI_Alltoall(a,b,c,d,e,f,g)
108: #endif

110: #endif