Actual source code: tagm.c

petsc-master 2019-06-18
Report Typos and Errors

  2: /*
  3:       Some PETSc utilites
  4: */
  5:  #include <petsc/private/petscimpl.h>
  6: /* ---------------------------------------------------------------- */
  7: /*
  8:    A simple way to manage tags inside a communicator.

 10:    It uses the attributes to determine if a new communicator
 11:       is needed and to store the available tags.

 13: */


 16: /*@C
 17:     PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
 18:     processors that share the object MUST call this routine EXACTLY the same
 19:     number of times.  This tag should only be used with the current objects
 20:     communicator; do NOT use it with any other MPI communicator.

 22:     Collective on PetscObject

 24:     Input Parameter:
 25: .   obj - the PETSc object; this must be cast with a (PetscObject), for example,
 26:          PetscObjectGetNewTag((PetscObject)mat,&tag);

 28:     Output Parameter:
 29: .   tag - the new tag

 31:     Level: developer

 33: .seealso: PetscCommGetNewTag()
 34: @*/
 35: PetscErrorCode  PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
 36: {

 40:   PetscCommGetNewTag(obj->comm,tag);
 41:   return(0);
 42: }

 44: /*@
 45:     PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
 46:     processors that share the communicator MUST call this routine EXACTLY the same
 47:     number of times.  This tag should only be used with the current objects
 48:     communicator; do NOT use it with any other MPI communicator.

 50:     Collective

 52:     Input Parameter:
 53: .   comm - the MPI communicator

 55:     Output Parameter:
 56: .   tag - the new tag

 58:     Level: developer

 60: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
 61: @*/
 62: PetscErrorCode  PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
 63: {
 64:   PetscErrorCode   ierr;
 65:   PetscCommCounter *counter;
 66:   PetscMPIInt      *maxval,flg;


 71:   MPI_Comm_get_attr(comm,Petsc_Counter_keyval,&counter,&flg);
 72:   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");

 74:   if (counter->tag < 1) {
 75:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
 76:     MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
 77:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
 78:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
 79:   }

 81:   *tag = counter->tag--;
 82: #if defined(PETSC_USE_DEBUG)
 83:   /*
 84:      Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
 85:   */
 86:   MPI_Barrier(comm);
 87: #endif
 88:   return(0);
 89: }

 91: /*@C
 92:   PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.

 94:   Collective

 96:   Input Parameters:
 97: . comm_in - Input communicator

 99:   Output Parameters:
100: + comm_out - Output communicator.  May be comm_in.
101: - first_tag - Tag available that has not already been used with this communicator (you may
102:               pass in NULL if you do not need a tag)

104:   PETSc communicators are just regular MPI communicators that keep track of which
105:   tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
106:   a PETSc creation routine it will attach a private communicator for use in the objects communications.
107:   The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
108:   level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.

110:   Level: developer

112: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
113: @*/
114: PetscErrorCode  PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
115: {
116:   PetscErrorCode   ierr;
117:   PetscCommCounter *counter;
118:   PetscMPIInt      *maxval,flg;

121:   PetscSpinlockLock(&PetscCommSpinLock);
122:   MPI_Comm_get_attr(comm_in,Petsc_Counter_keyval,&counter,&flg);

124:   if (!flg) {  /* this is NOT a PETSc comm */
125:     union {MPI_Comm comm; void *ptr;} ucomm;
126:     /* check if this communicator has a PETSc communicator imbedded in it */
127:     MPI_Comm_get_attr(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);
128:     if (!flg) {
129:       /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
130:       MPI_Comm_dup(comm_in,comm_out);
131:       MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
132:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
133:       PetscNew(&counter);

135:       counter->tag       = *maxval;
136:       counter->refcount  = 0;
137:       counter->namecount = 0;

139:       MPI_Comm_set_attr(*comm_out,Petsc_Counter_keyval,counter);
140:       PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);

142:       /* save PETSc communicator inside user communicator, so we can get it next time */
143:       ucomm.comm = *comm_out;   /* ONLY the comm part of the union is significant. */
144:       MPI_Comm_set_attr(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);
145:       ucomm.comm = comm_in;
146:       MPI_Comm_set_attr(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);
147:     } else {
148:       *comm_out = ucomm.comm;
149:       /* pull out the inner MPI_Comm and hand it back to the caller */
150:       MPI_Comm_get_attr(*comm_out,Petsc_Counter_keyval,&counter,&flg);
151:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
152:       PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
153:     }
154:   } else *comm_out = comm_in;

156: #if defined(PETSC_USE_DEBUG)
157:   /*
158:      Hanging here means that some processes have called PetscCommDuplicate() and others have not.
159:      This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
160:      ALL processes that share a communicator MUST shared objects created from that communicator.
161:   */
162:   MPI_Barrier(comm_in);
163: #endif

165:   if (counter->tag < 1) {
166:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
167:     MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
168:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
169:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
170:   }

172:   if (first_tag) *first_tag = counter->tag--;

174:   counter->refcount++; /* number of references to this comm */
175:   PetscSpinlockUnlock(&PetscCommSpinLock);
176:   return(0);
177: }

179: /*@C
180:    PetscCommDestroy - Frees communicator.  Use in conjunction with PetscCommDuplicate().

182:    Collective

184:    Input Parameter:
185: .  comm - the communicator to free

187:    Level: developer

189: .seealso:   PetscCommDuplicate()
190: @*/
191: PetscErrorCode  PetscCommDestroy(MPI_Comm *comm)
192: {
193:   PetscErrorCode   ierr;
194:   PetscCommCounter *counter;
195:   PetscMPIInt      flg;
196:   MPI_Comm         icomm = *comm,ocomm;
197:   union {MPI_Comm comm; void *ptr;} ucomm;

200:   if (*comm == MPI_COMM_NULL) return(0);
201:   PetscSpinlockLock(&PetscCommSpinLock);
202:   MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);
203:   if (!flg) { /* not a PETSc comm, check if it has an inner comm */
204:     MPI_Comm_get_attr(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);
205:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
206:     icomm = ucomm.comm;
207:     MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);
208:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
209:   }

211:   counter->refcount--;

213:   if (!counter->refcount) {
214:     /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
215:     MPI_Comm_get_attr(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);
216:     if (flg) {
217:       ocomm = ucomm.comm;
218:       MPI_Comm_get_attr(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);
219:       if (flg) {
220:         MPI_Comm_delete_attr(ocomm,Petsc_InnerComm_keyval);
221:       } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
222:     }

224:     PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
225:     MPI_Comm_free(&icomm);
226:   }
227:   *comm = MPI_COMM_NULL;
228:   PetscSpinlockUnlock(&PetscCommSpinLock);
229:   return(0);
230: }

232: /*@C
233:     PetscObjectsListGetGlobalNumbering - computes a global numbering
234:     of PetscObjects living on subcommunicators of a given communicator.


237:     Collective.

239:     Input Parameters:
240: +   comm    - MPI_Comm
241: .   len     - local length of objlist
242: -   objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
243:               (subcomm ordering is assumed to be deadlock-free)

245:     Output Parameters:
246: +   count      - global number of distinct subcommunicators on objlist (may be > len)
247: -   numbering  - global numbers of objlist entries (allocated by user)


250:     Level: developer

252: @*/
253: PetscErrorCode  PetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
254: {
256:   PetscInt       i, roots, offset;
257:   PetscMPIInt    size, rank;

261:   if (!count && !numbering) return(0);

263:   MPI_Comm_size(comm, &size);
264:   MPI_Comm_rank(comm, &rank);
265:   roots = 0;
266:   for (i = 0; i < len; ++i) {
267:     PetscMPIInt srank;
268:     MPI_Comm_rank(objlist[i]->comm, &srank);
269:     /* Am I the root of the i-th subcomm? */
270:     if (!srank) ++roots;
271:   }
272:   if (count) {
273:     /* Obtain the sum of all roots -- the global number of distinct subcomms. */
274:     MPIU_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);
275:   }
276:   if (numbering){
277:     /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
278:     /*
279:       At each subcomm root number all of the subcomms it owns locally
280:       and make it global by calculating the shift among all of the roots.
281:       The roots are ordered using the comm ordering.
282:     */
283:     MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);
284:     offset -= roots;
285:     /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
286:     /*
287:       This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
288:       broadcast is collective on the subcomm.
289:     */
290:     roots = 0;
291:     for (i = 0; i < len; ++i) {
292:       PetscMPIInt srank;
293:       numbering[i] = offset + roots; /* only meaningful if !srank. */

295:       MPI_Comm_rank(objlist[i]->comm, &srank);
296:       MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
297:       if (!srank) ++roots;
298:     }
299:   }
300:   return(0);
301: }