Actual source code: cusparsematimpl.h

petsc-master 2020-09-18
Report Typos and Errors

  4: #include <petscpkg_version.h>
  5: #include <petsc/private/cudavecimpl.h>

  7: #include <cusparse_v2.h>

  9: #include <algorithm>
 10: #include <vector>

 12: #include <thrust/device_vector.h>
 13: #include <thrust/device_ptr.h>
 14: #include <thrust/device_malloc_allocator.h>
 15: #include <thrust/transform.h>
 16: #include <thrust/functional.h>
 17: #include <thrust/sequence.h>

 19: #if (CUSPARSE_VER_MAJOR > 10 || CUSPARSE_VER_MAJOR == 10 && CUSPARSE_VER_MINOR >= 2) /* According to cuda/10.1.168 on OLCF Summit */
 20: #define CHKERRCUSPARSE(stat) \
 21: do { \
 22:    if (PetscUnlikely(stat)) { \
 23:       const char *name  = cusparseGetErrorName(stat); \
 24:       const char *descr = cusparseGetErrorString(stat); \
 25:       SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_LIB,"cuSPARSE error %d (%s) : %s",(int)stat,name,descr); \
 26:    } \
 27: } while (0)
 28: #else
 29: #define CHKERRCUSPARSE(stat) do {if (PetscUnlikely(stat)) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusparse error %d",(int)stat);} while (0)
 30: #endif

 32: #if defined(PETSC_USE_COMPLEX)
 33: #if defined(PETSC_USE_REAL_SINGLE)
 34: const cuComplex PETSC_CUSPARSE_ONE  = {1.0f, 0.0f};
 35: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
 36: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k)              cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k))
 37: #define cusparse_analysis(a,b,c,d,e,f,g,h,i)               cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i))
 38: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m)       cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m))
 39: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseCcsrmm((a),(b),(c),(d),(e),(f),(cuComplex*)(g),(h),(cuComplex*)(i),(j),(k),(cuComplex*)(l),(m),(cuComplex*)(n),(cuComplex*)(o),(p))
 40: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l)          cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l))
 41: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h)                 cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h))
 42: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j)              cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j))
 43: #define cusparse_hyb2csr(a,b,c,d,e,f)                      cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f))
 44: #elif defined(PETSC_USE_REAL_DOUBLE)
 45: const cuDoubleComplex PETSC_CUSPARSE_ONE  = {1.0, 0.0};
 46: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
 47: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k)              cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k))
 48: #define cusparse_analysis(a,b,c,d,e,f,g,h,i)               cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i))
 49: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m)       cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m))
 50: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseZcsrmm((a),(b),(c),(d),(e),(f),(cuDoubleComplex*)(g),(h),(cuDoubleComplex*)(i),(j),(k),(cuDoubleComplex*)(l),(m),(cuDoubleComplex*)(n),(cuDoubleComplex*)(o),(p))
 51: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l)          cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l))
 52: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h)                 cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h))
 53: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j)              cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j))
 54: #define cusparse_hyb2csr(a,b,c,d,e,f)                      cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
 55: #endif
 56: #else
 57: const PetscScalar PETSC_CUSPARSE_ONE  = 1.0;
 58: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
 59: #if defined(PETSC_USE_REAL_SINGLE)
 60: #define cusparse_solve    cusparseScsrsv_solve
 61: #define cusparse_analysis cusparseScsrsv_analysis
 62: #define cusparse_csr_spmv cusparseScsrmv
 63: #define cusparse_csr_spmm cusparseScsrmm
 64: #define cusparse_csr2csc  cusparseScsr2csc
 65: #define cusparse_hyb_spmv cusparseShybmv
 66: #define cusparse_csr2hyb  cusparseScsr2hyb
 67: #define cusparse_hyb2csr  cusparseShyb2csr
 68: #elif defined(PETSC_USE_REAL_DOUBLE)
 69: #define cusparse_solve    cusparseDcsrsv_solve
 70: #define cusparse_analysis cusparseDcsrsv_analysis
 71: #define cusparse_csr_spmv cusparseDcsrmv
 72: #define cusparse_csr_spmm cusparseDcsrmm
 73: #define cusparse_csr2csc  cusparseDcsr2csc
 74: #define cusparse_hyb_spmv cusparseDhybmv
 75: #define cusparse_csr2hyb  cusparseDcsr2hyb
 76: #define cusparse_hyb2csr  cusparseDhyb2csr
 77: #endif
 78: #endif

 80: #define THRUSTINTARRAY32 thrust::device_vector<int>
 81: #define THRUSTINTARRAY thrust::device_vector<PetscInt>
 82: #define THRUSTARRAY thrust::device_vector<PetscScalar>

 84: /* A CSR matrix structure */
 85: struct CsrMatrix {
 86:   PetscInt         num_rows;
 87:   PetscInt         num_cols;
 88:   PetscInt         num_entries;
 89:   THRUSTINTARRAY32 *row_offsets;
 90:   THRUSTINTARRAY32 *column_indices;
 91:   THRUSTARRAY      *values;
 92: };

 94: #if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
 95: /* This is struct holding the relevant data needed to a MatSolve */
 96: struct Mat_SeqAIJCUSPARSETriFactorStruct {
 97:   /* Data needed for triangular solve */
 98:   cusparseMatDescr_t          descr;
 99:   cusparseSolveAnalysisInfo_t solveInfo;
100:   cusparseOperation_t         solveOp;
101:   CsrMatrix                   *csrMat;
102: };
103: #endif

105: /* This is struct holding the relevant data needed to a MatMult */
106: struct Mat_SeqAIJCUSPARSEMultStruct {
107:   void               *mat;  /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
108:   cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
109:   THRUSTINTARRAY     *cprowIndices;   /* compressed row indices used in the parallel SpMV */
110:   PetscScalar        *alpha; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
111:   PetscScalar        *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
112:   PetscScalar        *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
113: };

115: #if PETSC_PKG_CUDA_VERSION_LT(11,0,0)

117: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and
118:  any indices used in a reordering */
119: struct Mat_SeqAIJCUSPARSETriFactors {
120:   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
121:   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
122:   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
123:   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
124:   THRUSTINTARRAY                    *rpermIndices;  /* indices used for any reordering */
125:   THRUSTINTARRAY                    *cpermIndices;  /* indices used for any reordering */
126:   THRUSTARRAY                       *workVector;
127:   cusparseHandle_t                  handle;   /* a handle to the cusparse library */
128:   PetscInt                          nnz;      /* number of nonzeros ... need this for accurate logging between ICC and ILU */
129: };
130: #endif

132: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
133: struct Mat_SeqAIJCUSPARSE {
134:   Mat_SeqAIJCUSPARSEMultStruct *mat;            /* pointer to the matrix on the GPU */
135:   Mat_SeqAIJCUSPARSEMultStruct *matTranspose;   /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
136:   THRUSTARRAY                  *workVector;     /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
137:   THRUSTINTARRAY32             *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
138:   PetscInt                     nrows;           /* number of rows of the matrix seen by GPU */
139:   MatCUSPARSEStorageFormat     format;          /* the storage format for the matrix on the device */
140:   cudaStream_t                 stream;          /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
141:   cusparseHandle_t             handle;          /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
142:   PetscObjectState             nonzerostate;    /* track nonzero state to possibly recreate the GPU matrix */
143:   PetscBool                    transgen;        /* whether or not to generate explicit transpose for MatMultTranspose operations */
144: };

146: PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
147: PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
148: PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
149: PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
150: #endif