Actual source code: mpiaijcusparse.cu

petsc-master 2019-12-07
Report Typos and Errors
  1: #define PETSC_SKIP_SPINLOCK
  2: #define PETSC_SKIP_CXX_COMPLEX_FIX
  3: #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1

  5: #include <petscconf.h>
  6:  #include <../src/mat/impls/aij/mpi/mpiaij.h>
  7:  #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
  8:  #include <../src/mat/impls/aij/mpi/mpicusparse/mpicusparsematimpl.h>

 10: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJCUSPARSE(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
 11: {
 12:   Mat_MPIAIJ         *b               = (Mat_MPIAIJ*)B->data;
 13:   Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)b->spptr;
 14:   PetscErrorCode     ierr;
 15:   PetscInt           i;

 18:   PetscLayoutSetUp(B->rmap);
 19:   PetscLayoutSetUp(B->cmap);
 20:   if (d_nnz) {
 21:     for (i=0; i<B->rmap->n; i++) {
 22:       if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]);
 23:     }
 24:   }
 25:   if (o_nnz) {
 26:     for (i=0; i<B->rmap->n; i++) {
 27:       if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]);
 28:     }
 29:   }
 30:   if (!B->preallocated) {
 31:     /* Explicitly create 2 MATSEQAIJCUSPARSE matrices. */
 32:     MatCreate(PETSC_COMM_SELF,&b->A);
 33:     MatPinToCPU(b->A,B->pinnedtocpu);
 34:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
 35:     MatSetType(b->A,MATSEQAIJCUSPARSE);
 36:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
 37:     MatCreate(PETSC_COMM_SELF,&b->B);
 38:     MatPinToCPU(b->B,B->pinnedtocpu);
 39:     MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
 40:     MatSetType(b->B,MATSEQAIJCUSPARSE);
 41:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
 42:   }
 43:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
 44:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
 45:   MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusparseStruct->diagGPUMatFormat);
 46:   MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusparseStruct->offdiagGPUMatFormat);
 47:   MatCUSPARSESetHandle(b->A,cusparseStruct->handle);
 48:   MatCUSPARSESetHandle(b->B,cusparseStruct->handle);
 49:   MatCUSPARSESetStream(b->A,cusparseStruct->stream);
 50:   MatCUSPARSESetStream(b->B,cusparseStruct->stream);

 52:   B->preallocated = PETSC_TRUE;
 53:   return(0);
 54: }

 56: PetscErrorCode MatMult_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy)
 57: {
 58:   /*
 59:      This multiplication sequence is different sequence
 60:      than the CPU version. In particular, the diagonal block
 61:      multiplication kernel is launched in one stream. Then,
 62:      in a separate stream, the data transfers from DeviceToHost
 63:      (with MPI messaging in between), then HostToDevice are
 64:      launched. Once the data transfer stream is synchronized,
 65:      to ensure messaging is complete, the MatMultAdd kernel
 66:      is launched in the original (MatMult) stream to protect
 67:      against race conditions.
 68:   */
 69:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
 71:   PetscInt       nt;

 74:   VecGetLocalSize(xx,&nt);
 75:   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
 76:   VecScatterInitializeForGPU(a->Mvctx,xx);
 77:   (*a->A->ops->mult)(a->A,xx,yy);
 78:   VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
 79:   VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
 80:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
 81:   VecScatterFinalizeForGPU(a->Mvctx);
 82:   return(0);
 83: }

 85: PetscErrorCode MatMultAdd_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
 86: {
 87:   /*
 88:      This multiplication sequence is different sequence
 89:      than the CPU version. In particular, the diagonal block
 90:      multiplication kernel is launched in one stream. Then,
 91:      in a separate stream, the data transfers from DeviceToHost
 92:      (with MPI messaging in between), then HostToDevice are
 93:      launched. Once the data transfer stream is synchronized,
 94:      to ensure messaging is complete, the MatMultAdd kernel
 95:      is launched in the original (MatMult) stream to protect
 96:      against race conditions.
 97:   */
 98:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
100:   PetscInt       nt;

103:   VecGetLocalSize(xx,&nt);
104:   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
105:   VecScatterInitializeForGPU(a->Mvctx,xx);
106:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
107:   VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
108:   VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
109:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
110:   VecScatterFinalizeForGPU(a->Mvctx);
111:   return(0);
112: }

114: PetscErrorCode MatMultTranspose_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy)
115: {
116:   /* This multiplication sequence is different sequence
117:      than the CPU version. In particular, the diagonal block
118:      multiplication kernel is launched in one stream. Then,
119:      in a separate stream, the data transfers from DeviceToHost
120:      (with MPI messaging in between), then HostToDevice are
121:      launched. Once the data transfer stream is synchronized,
122:      to ensure messaging is complete, the MatMultAdd kernel
123:      is launched in the original (MatMult) stream to protect
124:      against race conditions.

126:      This sequence should only be called for GPU computation. */
127:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
129:   PetscInt       nt;

132:   VecGetLocalSize(xx,&nt);
133:   if (nt != A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->rmap->n,nt);
134:   VecScatterInitializeForGPU(a->Mvctx,a->lvec);
135:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
136:   (*a->A->ops->multtranspose)(a->A,xx,yy);
137:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
138:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
139:   VecScatterFinalizeForGPU(a->Mvctx);
140:   return(0);
141: }

143: PetscErrorCode MatCUSPARSESetFormat_MPIAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
144: {
145:   Mat_MPIAIJ         *a               = (Mat_MPIAIJ*)A->data;
146:   Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;

149:   switch (op) {
150:   case MAT_CUSPARSE_MULT_DIAG:
151:     cusparseStruct->diagGPUMatFormat = format;
152:     break;
153:   case MAT_CUSPARSE_MULT_OFFDIAG:
154:     cusparseStruct->offdiagGPUMatFormat = format;
155:     break;
156:   case MAT_CUSPARSE_ALL:
157:     cusparseStruct->diagGPUMatFormat    = format;
158:     cusparseStruct->offdiagGPUMatFormat = format;
159:     break;
160:   default:
161:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. Only MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_DIAG, and MAT_CUSPARSE_MULT_ALL are currently supported.",op);
162:   }
163:   return(0);
164: }

166: PetscErrorCode MatSetFromOptions_MPIAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
167: {
168:   MatCUSPARSEStorageFormat format;
169:   PetscErrorCode           ierr;
170:   PetscBool                flg;
171:   Mat_MPIAIJ               *a = (Mat_MPIAIJ*)A->data;
172:   Mat_MPIAIJCUSPARSE       *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;

175:   PetscOptionsHead(PetscOptionsObject,"MPIAIJCUSPARSE options");
176:   if (A->factortype==MAT_FACTOR_NONE) {
177:     PetscOptionsEnum("-mat_cusparse_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusparse gpu matrices for SpMV",
178:                             "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);
179:     if (flg) {
180:       MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_DIAG,format);
181:     }
182:     PetscOptionsEnum("-mat_cusparse_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV",
183:                             "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->offdiagGPUMatFormat,(PetscEnum*)&format,&flg);
184:     if (flg) {
185:       MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_OFFDIAG,format);
186:     }
187:     PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV",
188:                             "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);
189:     if (flg) {
190:       MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);
191:     }
192:   }
193:   PetscOptionsTail();
194:   return(0);
195: }

197: PetscErrorCode MatAssemblyEnd_MPIAIJCUSPARSE(Mat A,MatAssemblyType mode)
198: {
200:   Mat_MPIAIJ     *mpiaij;

203:   mpiaij = (Mat_MPIAIJ*)A->data;
204:   MatAssemblyEnd_MPIAIJ(A,mode);
205:   if (!A->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
206:     VecSetType(mpiaij->lvec,VECSEQCUDA);
207:   }
208:   return(0);
209: }

211: PetscErrorCode MatDestroy_MPIAIJCUSPARSE(Mat A)
212: {
213:   PetscErrorCode     ierr;
214:   Mat_MPIAIJ         *a              = (Mat_MPIAIJ*)A->data;
215:   Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
216:   cudaError_t        err;
217:   cusparseStatus_t   stat;

220:   try {
221:     MatCUSPARSEClearHandle(a->A);
222:     MatCUSPARSEClearHandle(a->B);
223:     stat = cusparseDestroy(cusparseStruct->handle);CHKERRCUSPARSE(stat);
224:     if (cusparseStruct->stream) {
225:       err = cudaStreamDestroy(cusparseStruct->stream);CHKERRCUDA(err);
226:     }
227:     delete cusparseStruct;
228:   } catch(char *ex) {
229:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSPARSE error: %s", ex);
230:   }
231:   MatDestroy_MPIAIJ(A);
232:   return(0);
233: }

235: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSPARSE(Mat A)
236: {
237:   PetscErrorCode     ierr;
238:   Mat_MPIAIJ         *a;
239:   Mat_MPIAIJCUSPARSE * cusparseStruct;
240:   cusparseStatus_t   stat;

243:   MatCreate_MPIAIJ(A);
244:   PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSPARSE);
245:   PetscFree(A->defaultvectype);
246:   PetscStrallocpy(VECCUDA,&A->defaultvectype);

248:   a        = (Mat_MPIAIJ*)A->data;
249:   a->spptr = new Mat_MPIAIJCUSPARSE;

251:   cusparseStruct                      = (Mat_MPIAIJCUSPARSE*)a->spptr;
252:   cusparseStruct->diagGPUMatFormat    = MAT_CUSPARSE_CSR;
253:   cusparseStruct->offdiagGPUMatFormat = MAT_CUSPARSE_CSR;
254:   cusparseStruct->stream              = 0;
255:   stat = cusparseCreate(&(cusparseStruct->handle));CHKERRCUSPARSE(stat);

257:   A->ops->assemblyend    = MatAssemblyEnd_MPIAIJCUSPARSE;
258:   A->ops->mult           = MatMult_MPIAIJCUSPARSE;
259:   A->ops->multadd        = MatMultAdd_MPIAIJCUSPARSE;
260:   A->ops->multtranspose  = MatMultTranspose_MPIAIJCUSPARSE;
261:   A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSPARSE;
262:   A->ops->destroy        = MatDestroy_MPIAIJCUSPARSE;

264:   PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSPARSE);
265:   PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",  MatCUSPARSESetFormat_MPIAIJCUSPARSE);
266:   return(0);
267: }

269: /*@
270:    MatCreateAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
271:    (the default parallel PETSc format).  This matrix will ultimately pushed down
272:    to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
273:    assembly performance the user should preallocate the matrix storage by setting
274:    the parameter nz (or the array nnz).  By setting these parameters accurately,
275:    performance during matrix assembly can be increased by more than a factor of 50.

277:    Collective

279:    Input Parameters:
280: +  comm - MPI communicator, set to PETSC_COMM_SELF
281: .  m - number of rows
282: .  n - number of columns
283: .  nz - number of nonzeros per row (same for all rows)
284: -  nnz - array containing the number of nonzeros in the various rows
285:          (possibly different for each row) or NULL

287:    Output Parameter:
288: .  A - the matrix

290:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
291:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
292:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

294:    Notes:
295:    If nnz is given then nz is ignored

297:    The AIJ format (also called the Yale sparse matrix format or
298:    compressed row storage), is fully compatible with standard Fortran 77
299:    storage.  That is, the stored row and column indices can begin at
300:    either one (as in Fortran) or zero.  See the users' manual for details.

302:    Specify the preallocated storage with either nz or nnz (not both).
303:    Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
304:    allocation.  For large problems you MUST preallocate memory or you
305:    will get TERRIBLE performance, see the users' manual chapter on matrices.

307:    By default, this format uses inodes (identical nodes) when possible, to
308:    improve numerical efficiency of matrix-vector products and solves. We
309:    search for consecutive rows with the same nonzero structure, thereby
310:    reusing matrix information to achieve increased efficiency.

312:    Level: intermediate

314: .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSPARSE, MATAIJCUSPARSE
315: @*/
316: PetscErrorCode  MatCreateAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
317: {
319:   PetscMPIInt    size;

322:   MatCreate(comm,A);
323:   MatSetSizes(*A,m,n,M,N);
324:   MPI_Comm_size(comm,&size);
325:   if (size > 1) {
326:     MatSetType(*A,MATMPIAIJCUSPARSE);
327:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
328:   } else {
329:     MatSetType(*A,MATSEQAIJCUSPARSE);
330:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
331:   }
332:   return(0);
333: }

335: /*MC
336:    MATAIJCUSPARSE - MATMPIAIJCUSPARSE = "aijcusparse" = "mpiaijcusparse" - A matrix type to be used for sparse matrices.

338:    A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
339:    CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
340:    All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.

342:    This matrix type is identical to MATSEQAIJCUSPARSE when constructed with a single process communicator,
343:    and MATMPIAIJCUSPARSE otherwise.  As a result, for single process communicators,
344:    MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
345:    for communicators controlling multiple processes.  It is recommended that you call both of
346:    the above preallocation routines for simplicity.

348:    Options Database Keys:
349: +  -mat_type mpiaijcusparse - sets the matrix type to "mpiaijcusparse" during a call to MatSetFromOptions()
350: .  -mat_cusparse_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
351: .  -mat_cusparse_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
352: -  -mat_cusparse_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).

354:   Level: beginner

356:  .seealso: MatCreateAIJCUSPARSE(), MATSEQAIJCUSPARSE, MatCreateSeqAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
357: M
358: M*/