Actual source code: ex4.c

petsc-3.5.4 2015-05-23
Report Typos and Errors
  1: static char help[] = "Test MatSetValuesBatch: setting batches of elements using the GPU.\n\
  2: This works with SeqAIJCUSP and MPIAIJCUSP matrices.\n\n";
  3: #include <petscdm.h>
  4: #include <petscdmda.h>
  5: #include <petscksp.h>

  7: /* We will use a structured mesh for this assembly test. Each square will be divided into two triangles:
  8:   C       D
  9:    _______
 10:   |\      | The matrix for 0 and 1 is /   1  -0.5 -0.5 \
 11:   | \   1 |                           | -0.5  0.5  0.0 |
 12:   |  \    |                           \ -0.5  0.0  0.5 /
 13:   |   \   |
 14:   |    \  |
 15:   |  0  \ |
 16:   |      \|
 17:   ---------
 18:   A       B

 20: TO ADD:
 21:   DONE 1) Build and run on baconost
 22:     - Gather data for CPU/GPU up to da_grid_x 1300
 23:       - Looks 6x faster than CPU
 24:     - Make plot

 26:   DONE 2) Solve the Neumann Poisson problem

 28:   3) Multi-GPU Assembly
 29:     - MPIAIJCUSP: Just have two SEQAIJCUSP matrices, nothing else special
 30:     a) Filter rows to be sent to other procs (normally stashed)
 31:     b) send/recv rows, might as well do with a VecScatter
 32:     c) Potential to overlap this computation w/ GPU (talk to Nathan)
 33:     c') Just shove these rows in after the local
 34:     d) Have implicit rep of COO from repeated/tiled_range
 35:     e) Do a filtered copy, decrementing rows and remapping columns, which splits into two sets
 36:     f) Make two COO matrices and do separate aggregation on each one

 38:   4) Solve the Neumann Poisson problem in parallel
 39:     - Try it on GPU machine at Brown (They need another GNU install)

 41:   5) GPU FEM integration
 42:     - Move launch code to PETSc   or   - Try again now that assembly is in PETSc
 43:     - Move build code to PETSc

 45:   6) Try out CUSP PCs
 46: */

 50: PetscErrorCode IntegrateCells(DM dm, PetscInt *Ne, PetscInt *Nl, PetscInt **elemRows, PetscScalar **elemMats)
 51: {
 52:   DMDALocalInfo  info;
 53:   PetscInt       *er;
 54:   PetscScalar    *em;
 55:   PetscInt       X, Y, dof;
 56:   PetscInt       nl, nxe, nye, ne;
 57:   PetscInt       k = 0, m  = 0;
 58:   PetscInt       i, j;
 59:   PetscLogEvent  integrationEvent;

 63:   PetscLogEventRegister("ElemIntegration", DM_CLASSID, &integrationEvent);
 64:   PetscLogEventBegin(integrationEvent,0,0,0,0);
 65:   DMDAGetInfo(dm, 0, &X, &Y,0,0,0,0, &dof,0,0,0,0,0);
 66:   DMDAGetLocalInfo(dm, &info);
 67:   nl   = dof*3;
 68:   nxe  = info.xm; if (info.xs+info.xm == X) nxe--;
 69:   nye  = info.ym; if (info.ys+info.ym == Y) nye--;
 70:   ne   = 2 * nxe * nye;
 71:   *Ne  = ne;
 72:   *Nl  = nl;
 73:   PetscMalloc2(ne*nl, elemRows, ne*nl*nl, elemMats);
 74:   er   = *elemRows;
 75:   em   = *elemMats;
 76:   /* Proc 0        Proc 1                                               */
 77:   /* xs: 0  xm: 3  xs: 0 xm: 3                                          */
 78:   /* ys: 0  ym: 2  ys: 2 ym: 1                                          */
 79:   /* 8 elements x 3 vertices = 24 element matrix rows and 72 entries    */
 80:   /*   6 offproc rows containing 18 element matrix entries              */
 81:   /*  18  onproc rows containing 54 element matrix entries              */
 82:   /*   3 offproc columns in 8 element matrix entries                    */
 83:   /*   so we should have 46 diagonal matrix entries                     */
 84:   for (j = info.ys; j < info.ys+nye; ++j) {
 85:     for (i = info.xs; i < info.xs+nxe; ++i) {
 86:       PetscInt rowA = j*X     + i, rowB = j*X     + i+1;
 87:       PetscInt rowC = (j+1)*X + i, rowD = (j+1)*X + i+1;

 89:       /* Lower triangle */
 90:       er[k+0] = rowA; em[m+0*nl+0] =  1.0; em[m+0*nl+1] = -0.5; em[m+0*nl+2] = -0.5;
 91:       er[k+1] = rowB; em[m+1*nl+0] = -0.5; em[m+1*nl+1] =  0.5; em[m+1*nl+2] =  0.0;
 92:       er[k+2] = rowC; em[m+2*nl+0] = -0.5; em[m+2*nl+1] =  0.0; em[m+2*nl+2] =  0.5;
 93:       k      += nl; m += nl*nl;
 94:       /* Upper triangle */
 95:       er[k+0] = rowD; em[m+0*nl+0] =  1.0; em[m+0*nl+1] = -0.5; em[m+0*nl+2] = -0.5;
 96:       er[k+1] = rowC; em[m+1*nl+0] = -0.5; em[m+1*nl+1] =  0.5; em[m+1*nl+2] =  0.0;
 97:       er[k+2] = rowB; em[m+2*nl+0] = -0.5; em[m+2*nl+1] =  0.0; em[m+2*nl+2] =  0.5;
 98:       k      += nl; m += nl*nl;
 99:     }
100:   }
101:   PetscLogEventEnd(integrationEvent,0,0,0,0);
102:   return(0);
103: }

107: int main(int argc, char **argv)
108: {
109:   KSP            ksp;
110:   MatNullSpace   nullsp;
111:   DM             dm;
112:   Mat            A;
113:   Vec            x, b;
114:   PetscViewer    viewer;
115:   PetscInt       Nl, Ne;
116:   PetscInt       *elemRows;
117:   PetscScalar    *elemMats;
118:   PetscBool      doGPU = PETSC_TRUE, doCPU = PETSC_TRUE, doSolve = PETSC_FALSE, doView = PETSC_TRUE;
119:   PetscLogStage  gpuStage, cpuStage;

122:   PetscInitialize(&argc, &argv, 0, help);
123:   DMDACreate2d(PETSC_COMM_WORLD, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DMDA_STENCIL_BOX, -3, -3, PETSC_DECIDE, PETSC_DECIDE, 1, 1, NULL, NULL, &dm);
124:   IntegrateCells(dm, &Ne, &Nl, &elemRows, &elemMats);
125:   PetscOptionsGetBool(NULL, "-view", &doView, NULL);
126:   /* Construct matrix using GPU */
127:   PetscOptionsGetBool(NULL, "-gpu", &doGPU, NULL);
128:   if (doGPU) {
129:     PetscLogStageRegister("GPU Stage", &gpuStage);
130:     PetscLogStagePush(gpuStage);
131:     DMSetMatType(dm,MATAIJ);
132:     DMCreateMatrix(dm, &A);
133:     MatSetType(A, MATAIJCUSP);
134:     MatSeqAIJSetPreallocation(A, 0, NULL);
135:     MatMPIAIJSetPreallocation(A, 0, NULL, 0, NULL);
136:     MatSetValuesBatch(A, Ne, Nl, elemRows, elemMats);
137:     MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
138:     MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
139:     if (doView) {
140:       PetscViewerASCIIOpen(PETSC_COMM_WORLD, NULL, &viewer);
141:       if (Ne > 500) {PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO);}
142:       MatView(A, viewer);
143:       PetscViewerDestroy(&viewer);
144:     }
145:     PetscLogStagePop();
146:     MatDestroy(&A);
147:   }
148:   /* Construct matrix using CPU */
149:   PetscOptionsGetBool(NULL, "-cpu", &doCPU, NULL);
150:   if (doCPU) {
151:     PetscLogStageRegister("CPU Stage", &cpuStage);
152:     PetscLogStagePush(cpuStage);
153:     DMSetMatType(dm,MATAIJ);
154:     DMCreateMatrix(dm, &A);
155:     MatZeroEntries(A);
156:     MatSetValuesBatch(A, Ne, Nl, elemRows, elemMats);
157:     MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
158:     MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
159:     if (doView) {
160:       PetscViewerASCIIOpen(PETSC_COMM_WORLD, NULL, &viewer);
161:       if (Ne > 500) {PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO);}
162:       MatView(A, viewer);
163:       PetscViewerDestroy(&viewer);
164:     }
165:     PetscLogStagePop();
166:   }
167:   /* Solve simple system with random rhs */
168:   PetscOptionsGetBool(NULL, "-solve", &doSolve, NULL);
169:   if (doSolve) {
170:     MatGetVecs(A, &x, &b);
171:     VecSetRandom(b, NULL);
172:     KSPCreate(PETSC_COMM_WORLD, &ksp);
173:     KSPSetOperators(ksp, A, A);
174:     MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nullsp);
175:     KSPSetNullSpace(ksp, nullsp);
176:     MatNullSpaceDestroy(&nullsp);
177:     KSPSetFromOptions(ksp);
178:     KSPSolve(ksp, b, x);
179:     VecDestroy(&x);
180:     VecDestroy(&b);
181:     /* Solve physical system:

183:          -\Delta u = -6 (x + y - 1)

185:        where u = x^3 - 3/2 x^2 + y^3 - 3/2y^2 + 1/2,
186:        so \Delta u = 6 x - 3 + 6 y - 3,
187:        and \frac{\partial u}{\partial n} = {3x (x - 1), 3y (y - 1)} \cdot n
188:                                          = \pm 3x (x - 1) at x=0,1 = 0
189:                                          = \pm 3y (y - 1) at y=0,1 = 0
190:     */
191:   }
192:   /* Cleanup */
193:   MatDestroy(&A);
194:   PetscFree2(elemRows, elemMats);
195:   DMDestroy(&dm);
196:   PetscFinalize();
197:   return 0;
198: }