Actual source code: dasub.c

  1: /*
  2:   Code for manipulating distributed regular arrays in parallel.
  3: */

 5:  #include src/dm/da/daimpl.h

  9: /*@C
 10:    DAGetProcessorSubset - Returns a communicator consisting only of the
 11:    processors in a DA that own a particular global x, y, or z grid point
 12:    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).

 14:    Collective on DA

 16:    Input Parameters:
 17: +  da - the distributed array
 18: .  dir - Cartesian direction, either DA_X, DA_Y, or DA_Z
 19: -  gp - global grid point number in this direction

 21:    Output Parameters:
 22: .  comm - new communicator

 24:    Level: advanced

 26:    Notes:
 27:    All processors that share the DA must call this with the same gp value

 29:    This routine is particularly useful to compute boundary conditions
 30:    or other application-specific calculations that require manipulating
 31:    sets of data throughout a logical plane of grid points.

 33: .keywords: distributed array, get, processor subset
 34: @*/
 35: PetscErrorCode DAGetProcessorSubset(DA da,DADirection dir,PetscInt gp,MPI_Comm *comm)
 36: {
 37:   MPI_Group      group,subgroup;
 39:   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
 40:   PetscMPIInt    size,*ranks;

 44:   flag = 0;
 45:   DAGetCorners(da,&xs,&xm,&ys,&ym,&zs,&zm);
 46:   MPI_Comm_size(da->comm,&size);
 47:   if (dir == DA_Z) {
 48:     if (da->dim < 3) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"DA_Z invalid for DA dim < 3");
 49:     if (gp < 0 || gp > da->P) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 50:     if (gp >= zs && gp < zs+zm) flag = 1;
 51:   } else if (dir == DA_Y) {
 52:     if (da->dim == 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"DA_Y invalid for DA dim = 1");
 53:     if (gp < 0 || gp > da->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 54:     if (gp >= ys && gp < ys+ym) flag = 1;
 55:   } else if (dir == DA_X) {
 56:     if (gp < 0 || gp > da->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 57:     if (gp >= xs && gp < xs+xm) flag = 1;
 58:   } else SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");

 60:   PetscMalloc2(size,PetscInt,&owners,size,PetscMPIInt,&ranks);
 61:   MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,da->comm);
 62:   ict = 0;
 63:   PetscLogInfo(da,"DAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
 64:   for (i=0; i<size; i++) {
 65:     if (owners[i]) {
 66:       ranks[ict] = i; ict++;
 67:       PetscLogInfo(da,"%D ",i);
 68:     }
 69:   }
 70:   PetscLogInfo(da,"\n");
 71:   MPI_Comm_group(da->comm,&group);
 72:   MPI_Group_incl(group,ict,ranks,&subgroup);
 73:   MPI_Comm_create(da->comm,subgroup,comm);
 74:   PetscFree2(owners,ranks);
 75:   return(0);
 76: }