Actual source code: dasub.c

petsc-3.3-p7 2013-05-11
  2: /*
  3:   Code for manipulating distributed regular arrays in parallel.
  4: */

  6: #include <petsc-private/daimpl.h>    /*I   "petscdmda.h"   I*/

 10: /*@C
 11:    DMDAGetProcessorSubset - Returns a communicator consisting only of the
 12:    processors in a DMDA that own a particular global x, y, or z grid point
 13:    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).

 15:    Collective on DMDA

 17:    Input Parameters:
 18: +  da - the distributed array
 19: .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
 20: -  gp - global grid point number in this direction

 22:    Output Parameters:
 23: .  comm - new communicator

 25:    Level: advanced

 27:    Notes:
 28:    All processors that share the DMDA must call this with the same gp value

 30:    This routine is particularly useful to compute boundary conditions
 31:    or other application-specific calculations that require manipulating
 32:    sets of data throughout a logical plane of grid points.

 34: .keywords: distributed array, get, processor subset
 35: @*/
 36: PetscErrorCode  DMDAGetProcessorSubset(DM da,DMDADirection dir,PetscInt gp,MPI_Comm *comm)
 37: {
 38:   MPI_Group      group,subgroup;
 40:   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
 41:   PetscMPIInt    size,*ranks = PETSC_NULL;
 42:   DM_DA          *dd = (DM_DA*)da->data;

 46:   flag = 0;
 47:   DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);
 48:   MPI_Comm_size(((PetscObject)da)->comm,&size);
 49:   if (dir == DMDA_Z) {
 50:     if (dd->dim < 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
 51:     if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 52:     if (gp >= zs && gp < zs+zm) flag = 1;
 53:   } else if (dir == DMDA_Y) {
 54:     if (dd->dim == 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
 55:     if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 56:     if (gp >= ys && gp < ys+ym) flag = 1;
 57:   } else if (dir == DMDA_X) {
 58:     if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 59:     if (gp >= xs && gp < xs+xm) flag = 1;
 60:   } else SETERRQ(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");

 62:   PetscMalloc2(size,PetscInt,&owners,size,PetscMPIInt,&ranks);
 63:   MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,((PetscObject)da)->comm);
 64:   ict  = 0;
 65:   PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);
 66:   for (i=0; i<size; i++) {
 67:     if (owners[i]) {
 68:       ranks[ict] = i; ict++;
 69:       PetscInfo1(da,"%D ",i);
 70:     }
 71:   }
 72:   PetscInfo(da,"\n");
 73:   MPI_Comm_group(((PetscObject)da)->comm,&group);
 74:   MPI_Group_incl(group,ict,ranks,&subgroup);
 75:   MPI_Comm_create(((PetscObject)da)->comm,subgroup,comm);
 76:   MPI_Group_free(&subgroup);
 77:   MPI_Group_free(&group);
 78:   PetscFree2(owners,ranks);
 79:   return(0);
 80: }

 84: /*@C
 85:    DMDAGetProcessorSubsets - Returns communicators consisting only of the
 86:    processors in a DMDA adjacent in a particular dimension,
 87:    corresponding to a logical plane in a 3D grid or a line in a 2D grid.

 89:    Collective on DMDA

 91:    Input Parameters:
 92: +  da - the distributed array
 93: -  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z

 95:    Output Parameters:
 96: .  subcomm - new communicator

 98:    Level: advanced

100:    Notes:
101:    This routine is useful for distributing one-dimensional data in a tensor product grid.

103: .keywords: distributed array, get, processor subset
104: @*/
105: PetscErrorCode  DMDAGetProcessorSubsets(DM da, DMDADirection dir, MPI_Comm *subcomm)
106: {
107:   MPI_Comm       comm;
108:   MPI_Group      group, subgroup;
109:   PetscInt       subgroupSize = 0;
110:   PetscInt      *firstPoints;
111:   PetscMPIInt    size, *subgroupRanks = PETSC_NULL;
112:   PetscInt       xs, xm, ys, ym, zs, zm, firstPoint, p;
114:   DM_DA          *dd = (DM_DA*)da->data;

118:   comm = ((PetscObject) da)->comm;
119:   DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
120:   MPI_Comm_size(comm, &size);
121:   if (dir == DMDA_Z) {
122:     if (dd->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
123:     firstPoint = zs;
124:   } else if (dir == DMDA_Y) {
125:     if (dd->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
126:     firstPoint = ys;
127:   } else if (dir == DMDA_X) {
128:     firstPoint = xs;
129:   } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");

131:   PetscMalloc2(size, PetscInt, &firstPoints, size, PetscMPIInt, &subgroupRanks);
132:   MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);
133:   PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);
134:   for(p = 0; p < size; ++p) {
135:     if (firstPoints[p] == firstPoint) {
136:       subgroupRanks[subgroupSize++] = p;
137:       PetscInfo1(da, "%D ", p);
138:     }
139:   }
140:   PetscInfo(da, "\n");
141:   MPI_Comm_group(comm, &group);
142:   MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);
143:   MPI_Comm_create(comm, subgroup, subcomm);
144:   MPI_Group_free(&subgroup);
145:   MPI_Group_free(&group);
146:   PetscFree2(firstPoints, subgroupRanks);
147:   return(0);
148: }