Actual source code: sliced.c
petsc-3.3-p5 2012-12-01
1: #include <petscdmsliced.h> /*I "petscdmsliced.h" I*/
2: #include <petscmat.h> /*I "petscmat.h" I*/
3: #include <petsc-private/dmimpl.h> /*I "petscdm.h" I*/
5: /* CSR storage of the nonzero structure of a bs*bs matrix */
6: typedef struct {
7: PetscInt bs,nz,*i,*j;
8: } DMSlicedBlockFills;
10: typedef struct {
11: PetscInt bs,n,N,Nghosts,*ghosts;
12: PetscInt d_nz,o_nz,*d_nnz,*o_nnz;
13: DMSlicedBlockFills *dfill,*ofill;
14: } DM_Sliced;
18: PetscErrorCode DMCreateMatrix_Sliced(DM dm, const MatType mtype,Mat *J)
19: {
20: PetscErrorCode ierr;
21: PetscInt *globals,*sd_nnz,*so_nnz,rstart,bs,i;
22: ISLocalToGlobalMapping lmap,blmap;
23: void (*aij)(void) = PETSC_NULL;
24: DM_Sliced *slice = (DM_Sliced*)dm->data;
27: bs = slice->bs;
28: MatCreate(((PetscObject)dm)->comm,J);
29: MatSetSizes(*J,slice->n*bs,slice->n*bs,PETSC_DETERMINE,PETSC_DETERMINE);
30: MatSetBlockSize(*J,bs);
31: MatSetType(*J,mtype);
32: MatSeqBAIJSetPreallocation(*J,bs,slice->d_nz,slice->d_nnz);
33: MatMPIBAIJSetPreallocation(*J,bs,slice->d_nz,slice->d_nnz,slice->o_nz,slice->o_nnz);
34: /* In general, we have to do extra work to preallocate for scalar (AIJ) matrices so we check whether it will do any
35: * good before going on with it. */
36: PetscObjectQueryFunction((PetscObject)*J,"MatMPIAIJSetPreallocation_C",&aij);
37: if (!aij) {
38: PetscObjectQueryFunction((PetscObject)*J,"MatSeqAIJSetPreallocation_C",&aij);
39: }
40: if (aij) {
41: if (bs == 1) {
42: MatSeqAIJSetPreallocation(*J,slice->d_nz,slice->d_nnz);
43: MatMPIAIJSetPreallocation(*J,slice->d_nz,slice->d_nnz,slice->o_nz,slice->o_nnz);
44: } else if (!slice->d_nnz) {
45: MatSeqAIJSetPreallocation(*J,slice->d_nz*bs,PETSC_NULL);
46: MatMPIAIJSetPreallocation(*J,slice->d_nz*bs,PETSC_NULL,slice->o_nz*bs,PETSC_NULL);
47: } else {
48: /* The user has provided preallocation per block-row, convert it to per scalar-row respecting DMSlicedSetBlockFills() if applicable */
49: PetscMalloc2(slice->n*bs,PetscInt,&sd_nnz,(!!slice->o_nnz)*slice->n*bs,PetscInt,&so_nnz);
50: for (i=0; i<slice->n*bs; i++) {
51: sd_nnz[i] = (slice->d_nnz[i/bs]-1) * (slice->ofill ? slice->ofill->i[i%bs+1]-slice->ofill->i[i%bs] : bs)
52: + (slice->dfill ? slice->dfill->i[i%bs+1]-slice->dfill->i[i%bs] : bs);
53: if (so_nnz) {
54: so_nnz[i] = slice->o_nnz[i/bs] * (slice->ofill ? slice->ofill->i[i%bs+1]-slice->ofill->i[i%bs] : bs);
55: }
56: }
57: MatSeqAIJSetPreallocation(*J,slice->d_nz*bs,sd_nnz);
58: MatMPIAIJSetPreallocation(*J,slice->d_nz*bs,sd_nnz,slice->o_nz*bs,so_nnz);
59: PetscFree2(sd_nnz,so_nnz);
60: }
61: }
63: /* Set up the local to global map. For the scalar map, we have to translate to entry-wise indexing instead of block-wise. */
64: PetscMalloc((slice->n+slice->Nghosts)*bs*sizeof(PetscInt),&globals);
65: MatGetOwnershipRange(*J,&rstart,PETSC_NULL);
66: for (i=0; i<slice->n*bs; i++) {
67: globals[i] = rstart + i;
68: }
69: for (i=0; i<slice->Nghosts*bs; i++) {
70: globals[slice->n*bs+i] = slice->ghosts[i/bs]*bs + i%bs;
71: }
72: ISLocalToGlobalMappingCreate(PETSC_COMM_SELF,(slice->n+slice->Nghosts)*bs,globals,PETSC_OWN_POINTER,&lmap);
73: ISLocalToGlobalMappingBlock(lmap,bs,&blmap);
74: MatSetLocalToGlobalMapping(*J,lmap,lmap);
75: MatSetLocalToGlobalMappingBlock(*J,blmap,blmap);
76: ISLocalToGlobalMappingDestroy(&lmap);
77: ISLocalToGlobalMappingDestroy(&blmap);
78: return(0);
79: }
83: /*@C
84: DMSlicedSetGhosts - Sets the global indices of other processes elements that will
85: be ghosts on this process
87: Not Collective
89: Input Parameters:
90: + slice - the DM object
91: . bs - block size
92: . nlocal - number of local (owned, non-ghost) blocks
93: . Nghosts - number of ghost blocks on this process
94: - ghosts - global indices of each ghost block
96: Level: advanced
98: .seealso DMDestroy(), DMCreateGlobalVector()
100: @*/
101: PetscErrorCode DMSlicedSetGhosts(DM dm,PetscInt bs,PetscInt nlocal,PetscInt Nghosts,const PetscInt ghosts[])
102: {
104: DM_Sliced *slice = (DM_Sliced*)dm->data;
108: PetscFree(slice->ghosts);
109: PetscMalloc(Nghosts*sizeof(PetscInt),&slice->ghosts);
110: PetscMemcpy(slice->ghosts,ghosts,Nghosts*sizeof(PetscInt));
111: slice->bs = bs;
112: slice->n = nlocal;
113: slice->Nghosts = Nghosts;
114: return(0);
115: }
119: /*@C
120: DMSlicedSetPreallocation - sets the matrix memory preallocation for matrices computed by DMSliced
122: Not Collective
124: Input Parameters:
125: + slice - the DM object
126: . d_nz - number of block nonzeros per block row in diagonal portion of local
127: submatrix (same for all local rows)
128: . d_nnz - array containing the number of block nonzeros in the various block rows
129: of the in diagonal portion of the local (possibly different for each block
130: row) or PETSC_NULL.
131: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
132: submatrix (same for all local rows).
133: - o_nnz - array containing the number of nonzeros in the various block rows of the
134: off-diagonal portion of the local submatrix (possibly different for
135: each block row) or PETSC_NULL.
137: Notes:
138: See MatMPIBAIJSetPreallocation() for more details on preallocation. If a scalar matrix (AIJ) is
139: obtained with DMSlicedGetMatrix(), the correct preallocation will be set, respecting DMSlicedSetBlockFills().
141: Level: advanced
143: .seealso DMDestroy(), DMCreateGlobalVector(), MatMPIAIJSetPreallocation(),
144: MatMPIBAIJSetPreallocation(), DMSlicedGetMatrix(), DMSlicedSetBlockFills()
146: @*/
147: PetscErrorCode DMSlicedSetPreallocation(DM dm,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
148: {
149: DM_Sliced *slice = (DM_Sliced*)dm->data;
153: slice->d_nz = d_nz;
154: slice->d_nnz = (PetscInt*)d_nnz;
155: slice->o_nz = o_nz;
156: slice->o_nnz = (PetscInt*)o_nnz;
157: return(0);
158: }
162: static PetscErrorCode DMSlicedSetBlockFills_Private(PetscInt bs,const PetscInt *fill,DMSlicedBlockFills **inf)
163: {
164: PetscErrorCode ierr;
165: PetscInt i,j,nz,*fi,*fj;
166: DMSlicedBlockFills *f;
170: if (*inf) {PetscFree3((*inf)->i,(*inf)->j,*inf);}
171: if (!fill) return(0);
172: for (i=0,nz=0; i<bs*bs; i++) if (fill[i]) nz++;
173: PetscMalloc3(1,DMSlicedBlockFills,&f,bs+1,PetscInt,&fi,nz,PetscInt,&fj);
174: f->bs = bs;
175: f->nz = nz;
176: f->i = fi;
177: f->j = fj;
178: for (i=0,nz=0; i<bs; i++) {
179: fi[i] = nz;
180: for (j=0; j<bs; j++) if (fill[i*bs+j]) fj[nz++] = j;
181: }
182: fi[i] = nz;
183: *inf = f;
184: return(0);
185: }
189: /*@C
190: DMSlicedSetBlockFills - Sets the fill pattern in each block for a multi-component problem
191: of the matrix returned by DMSlicedGetMatrix().
193: Logically Collective on DM
195: Input Parameter:
196: + sliced - the DM object
197: . dfill - the fill pattern in the diagonal block (may be PETSC_NULL, means use dense block)
198: - ofill - the fill pattern in the off-diagonal blocks
200: Notes:
201: This only makes sense for multicomponent problems using scalar matrix formats (AIJ).
202: See DMDASetBlockFills() for example usage.
204: Level: advanced
206: .seealso DMSlicedGetMatrix(), DMDASetBlockFills()
207: @*/
208: PetscErrorCode DMSlicedSetBlockFills(DM dm,const PetscInt *dfill,const PetscInt *ofill)
209: {
210: DM_Sliced *slice = (DM_Sliced*)dm->data;
215: DMSlicedSetBlockFills_Private(slice->bs,dfill,&slice->dfill);
216: DMSlicedSetBlockFills_Private(slice->bs,ofill,&slice->ofill);
217: return(0);
218: }
222: PetscErrorCode DMDestroy_Sliced(DM dm)
223: {
225: DM_Sliced *slice = (DM_Sliced*)dm->data;
228: PetscFree(slice->ghosts);
229: if (slice->dfill) {PetscFree3(slice->dfill,slice->dfill->i,slice->dfill->j);}
230: if (slice->ofill) {PetscFree3(slice->ofill,slice->ofill->i,slice->ofill->j);}
231: return(0);
232: }
236: PetscErrorCode DMCreateGlobalVector_Sliced(DM dm,Vec *gvec)
237: {
238: PetscErrorCode ierr;
239: DM_Sliced *slice = (DM_Sliced*)dm->data;
244: *gvec = 0;
245: VecCreateGhostBlock(((PetscObject)dm)->comm,slice->bs,slice->n*slice->bs,PETSC_DETERMINE,slice->Nghosts,slice->ghosts,gvec);
246: PetscObjectCompose((PetscObject)*gvec,"DM",(PetscObject)dm);
247: return(0);
248: }
250: EXTERN_C_BEGIN
253: PetscErrorCode DMCreate_Sliced(DM p)
254: {
256: DM_Sliced *slice;
259: PetscNewLog(p,DM_Sliced,&slice);
260: p->data = slice;
262: PetscObjectChangeTypeName((PetscObject)p,DMSLICED);
263: p->ops->createglobalvector = DMCreateGlobalVector_Sliced;
264: p->ops->creatematrix = DMCreateMatrix_Sliced;
265: p->ops->destroy = DMDestroy_Sliced;
266: return(0);
267: }
268: EXTERN_C_END
272: /*@C
273: DMSlicedCreate - Creates a DM object, used to manage data for a unstructured problem
275: Collective on MPI_Comm
277: Input Parameter:
278: . comm - the processors that will share the global vector
280: Output Parameters:
281: . slice - the slice object
283: Level: advanced
285: .seealso DMDestroy(), DMCreateGlobalVector()
287: @*/
288: PetscErrorCode DMSlicedCreate(MPI_Comm comm,DM *dm)
289: {
294: DMCreate(comm,dm);
295: DMSetType(*dm,DMSLICED);
296: return(0);
297: }
299: /* Explanation of the missing functions for DMDA-style handling of the local vector:
301: DMSlicedCreateLocalVector()
302: DMSlicedGlobalToLocalBegin()
303: DMSlicedGlobalToLocalEnd()
305: There is no way to get the global form from a local form, so DMSlicedCreateLocalVector() is a memory leak without
306: external accounting for the global vector. Also, DMSliced intends the user to work with the VecGhost interface since the
307: ghosts are already ordered after the owned entries. Contrast this to a DMDA where the local vector has a special
308: ordering described by the structured grid, hence it cannot share memory with the global form. For this reason, users
309: of DMSliced should work with the global vector and use
311: VecGhostGetLocalForm(), VecGhostRestoreLocalForm()
312: VecGhostUpdateBegin(), VecGhostUpdateEnd()
314: rather than the missing DMDA-style functions. This is conceptually simpler and offers better performance than is
315: possible with the DMDA-style interface.
316: */