Actual source code: ex4.c

petsc-3.4.5 2014-06-29
  1: static char help[] = "Test MatSetValuesBatch: setting batches of elements using the GPU.\n\
  2: This works with SeqAIJCUSP and MPIAIJCUSP matrices.\n\n";
  3: #include <petscdmda.h>
  4: #include <petscksp.h>

  6: /* We will use a structured mesh for this assembly test. Each square will be divided into two triangles:
  7:   C       D
  8:    _______
  9:   |\      | The matrix for 0 and 1 is /   1  -0.5 -0.5 \
 10:   | \   1 |                           | -0.5  0.5  0.0 |
 11:   |  \    |                           \ -0.5  0.0  0.5 /
 12:   |   \   |
 13:   |    \  |
 14:   |  0  \ |
 15:   |      \|
 16:   ---------
 17:   A       B

 19: TO ADD:
 20:   DONE 1) Build and run on baconost
 21:     - Gather data for CPU/GPU up to da_grid_x 1300
 22:       - Looks 6x faster than CPU
 23:     - Make plot

 25:   DONE 2) Solve the Neumann Poisson problem

 27:   3) Multi-GPU Assembly
 28:     - MPIAIJCUSP: Just have two SEQAIJCUSP matrices, nothing else special
 29:     a) Filter rows to be sent to other procs (normally stashed)
 30:     b) send/recv rows, might as well do with a VecScatter
 31:     c) Potential to overlap this computation w/ GPU (talk to Nathan)
 32:     c') Just shove these rows in after the local
 33:     d) Have implicit rep of COO from repeated/tiled_range
 34:     e) Do a filtered copy, decrementing rows and remapping columns, which splits into two sets
 35:     f) Make two COO matrices and do separate aggregation on each one

 37:   4) Solve the Neumann Poisson problem in parallel
 38:     - Try it on GPU machine at Brown (They need another GNU install)

 40:   5) GPU FEM integration
 41:     - Move launch code to PETSc   or   - Try again now that assembly is in PETSc
 42:     - Move build code to PETSc

 44:   6) Try out CUSP PCs
 45: */

 49: PetscErrorCode IntegrateCells(DM dm, PetscInt *Ne, PetscInt *Nl, PetscInt **elemRows, PetscScalar **elemMats)
 50: {
 51:   DMDALocalInfo  info;
 52:   PetscInt       *er;
 53:   PetscScalar    *em;
 54:   PetscInt       X, Y, dof;
 55:   PetscInt       nl, nxe, nye, ne;
 56:   PetscInt       k = 0, m  = 0;
 57:   PetscInt       i, j;
 58:   PetscLogEvent  integrationEvent;

 62:   PetscLogEventRegister("ElemIntegration", DM_CLASSID, &integrationEvent);
 63:   PetscLogEventBegin(integrationEvent,0,0,0,0);
 64:   DMDAGetInfo(dm, 0, &X, &Y,0,0,0,0, &dof,0,0,0,0,0);
 65:   DMDAGetLocalInfo(dm, &info);
 66:   nl   = dof*3;
 67:   nxe  = info.xm; if (info.xs+info.xm == X) nxe--;
 68:   nye  = info.ym; if (info.ys+info.ym == Y) nye--;
 69:   ne   = 2 * nxe * nye;
 70:   *Ne  = ne;
 71:   *Nl  = nl;
 72:   PetscMalloc2(ne*nl, PetscInt, elemRows, ne*nl*nl, PetscScalar, elemMats);
 73:   er   = *elemRows;
 74:   em   = *elemMats;
 75:   /* Proc 0        Proc 1                                               */
 76:   /* xs: 0  xm: 3  xs: 0 xm: 3                                          */
 77:   /* ys: 0  ym: 2  ys: 2 ym: 1                                          */
 78:   /* 8 elements x 3 vertices = 24 element matrix rows and 72 entries    */
 79:   /*   6 offproc rows containing 18 element matrix entries              */
 80:   /*  18  onproc rows containing 54 element matrix entries              */
 81:   /*   3 offproc columns in 8 element matrix entries                    */
 82:   /*   so we should have 46 diagonal matrix entries                     */
 83:   for (j = info.ys; j < info.ys+nye; ++j) {
 84:     for (i = info.xs; i < info.xs+nxe; ++i) {
 85:       PetscInt rowA = j*X     + i, rowB = j*X     + i+1;
 86:       PetscInt rowC = (j+1)*X + i, rowD = (j+1)*X + i+1;

 88:       /* Lower triangle */
 89:       er[k+0] = rowA; em[m+0*nl+0] =  1.0; em[m+0*nl+1] = -0.5; em[m+0*nl+2] = -0.5;
 90:       er[k+1] = rowB; em[m+1*nl+0] = -0.5; em[m+1*nl+1] =  0.5; em[m+1*nl+2] =  0.0;
 91:       er[k+2] = rowC; em[m+2*nl+0] = -0.5; em[m+2*nl+1] =  0.0; em[m+2*nl+2] =  0.5;
 92:       k      += nl; m += nl*nl;
 93:       /* Upper triangle */
 94:       er[k+0] = rowD; em[m+0*nl+0] =  1.0; em[m+0*nl+1] = -0.5; em[m+0*nl+2] = -0.5;
 95:       er[k+1] = rowC; em[m+1*nl+0] = -0.5; em[m+1*nl+1] =  0.5; em[m+1*nl+2] =  0.0;
 96:       er[k+2] = rowB; em[m+2*nl+0] = -0.5; em[m+2*nl+1] =  0.0; em[m+2*nl+2] =  0.5;
 97:       k      += nl; m += nl*nl;
 98:     }
 99:   }
100:   PetscLogEventEnd(integrationEvent,0,0,0,0);
101:   return(0);
102: }

106: int main(int argc, char **argv)
107: {
108:   KSP            ksp;
109:   MatNullSpace   nullsp;
110:   DM             dm;
111:   Mat            A;
112:   Vec            x, b;
113:   PetscViewer    viewer;
114:   PetscInt       Nl, Ne;
115:   PetscInt       *elemRows;
116:   PetscScalar    *elemMats;
117:   PetscBool      doGPU = PETSC_TRUE, doCPU = PETSC_TRUE, doSolve = PETSC_FALSE, doView = PETSC_TRUE;
118:   PetscLogStage  gpuStage, cpuStage;

121:   PetscInitialize(&argc, &argv, 0, help);
122:   DMDACreate2d(PETSC_COMM_WORLD, DMDA_BOUNDARY_NONE, DMDA_BOUNDARY_NONE, DMDA_STENCIL_BOX, -3, -3, PETSC_DECIDE, PETSC_DECIDE, 1, 1, NULL, NULL, &dm);
123:   IntegrateCells(dm, &Ne, &Nl, &elemRows, &elemMats);
124:   PetscOptionsGetBool(NULL, "-view", &doView, NULL);
125:   /* Construct matrix using GPU */
126:   PetscOptionsGetBool(NULL, "-gpu", &doGPU, NULL);
127:   if (doGPU) {
128:     PetscLogStageRegister("GPU Stage", &gpuStage);
129:     PetscLogStagePush(gpuStage);
130:     DMCreateMatrix(dm, MATAIJ, &A);
131:     MatSetType(A, MATAIJCUSP);
132:     MatSeqAIJSetPreallocation(A, 0, NULL);
133:     MatMPIAIJSetPreallocation(A, 0, NULL, 0, NULL);
134:     MatSetValuesBatch(A, Ne, Nl, elemRows, elemMats);
135:     MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
136:     MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
137:     if (doView) {
138:       PetscViewerASCIIOpen(PETSC_COMM_WORLD, NULL, &viewer);
139:       if (Ne > 500) {PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO);}
140:       MatView(A, viewer);
141:       PetscViewerDestroy(&viewer);
142:     }
143:     PetscLogStagePop();
144:     MatDestroy(&A);
145:   }
146:   /* Construct matrix using CPU */
147:   PetscOptionsGetBool(NULL, "-cpu", &doCPU, NULL);
148:   if (doCPU) {
149:     PetscLogStageRegister("CPU Stage", &cpuStage);
150:     PetscLogStagePush(cpuStage);
151:     DMCreateMatrix(dm, MATAIJ, &A);
152:     MatZeroEntries(A);
153:     MatSetValuesBatch(A, Ne, Nl, elemRows, elemMats);
154:     MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
155:     MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
156:     if (doView) {
157:       PetscViewerASCIIOpen(PETSC_COMM_WORLD, NULL, &viewer);
158:       if (Ne > 500) {PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO);}
159:       MatView(A, viewer);
160:       PetscViewerDestroy(&viewer);
161:     }
162:     PetscLogStagePop();
163:   }
164:   /* Solve simple system with random rhs */
165:   PetscOptionsGetBool(NULL, "-solve", &doSolve, NULL);
166:   if (doSolve) {
167:     MatGetVecs(A, &x, &b);
168:     VecSetRandom(b, NULL);
169:     KSPCreate(PETSC_COMM_WORLD, &ksp);
170:     KSPSetOperators(ksp, A, A, DIFFERENT_NONZERO_PATTERN);
171:     MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nullsp);
172:     KSPSetNullSpace(ksp, nullsp);
173:     MatNullSpaceDestroy(&nullsp);
174:     KSPSetFromOptions(ksp);
175:     KSPSolve(ksp, b, x);
176:     VecDestroy(&x);
177:     VecDestroy(&b);
178:     /* Solve physical system:

180:          -\Delta u = -6 (x + y - 1)

182:        where u = x^3 - 3/2 x^2 + y^3 - 3/2y^2 + 1/2,
183:        so \Delta u = 6 x - 3 + 6 y - 3,
184:        and \frac{\partial u}{\partial n} = {3x (x - 1), 3y (y - 1)} \cdot n
185:                                          = \pm 3x (x - 1) at x=0,1 = 0
186:                                          = \pm 3y (y - 1) at y=0,1 = 0
187:     */
188:   }
189:   /* Cleanup */
190:   MatDestroy(&A);
191:   PetscFree2(elemRows, elemMats);
192:   DMDestroy(&dm);
193:   PetscFinalize();
194:   return 0;
195: }