Actual source code: mmaij.c
petsc-3.3-p5 2012-12-01
2: /*
3: Support for the parallel AIJ matrix vector multiply
4: */
5: #include <../src/mat/impls/aij/mpi/mpiaij.h>
9: PetscErrorCode MatSetUpMultiply_MPIAIJ(Mat mat)
10: {
11: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
12: Mat_SeqAIJ *B = (Mat_SeqAIJ*)(aij->B->data);
13: PetscErrorCode ierr;
14: PetscInt i,j,*aj = B->j,ec = 0,*garray;
15: IS from,to;
16: Vec gvec;
17: PetscBool useblockis;
18: #if defined (PETSC_USE_CTABLE)
19: PetscTable gid1_lid1;
20: PetscTablePosition tpos;
21: PetscInt gid,lid;
22: #else
23: PetscInt N = mat->cmap->N,*indices;
24: #endif
28: #if defined (PETSC_USE_CTABLE)
29: /* use a table */
30: PetscTableCreate(aij->B->rmap->n,mat->cmap->N+1,&gid1_lid1);
31: for (i=0; i<aij->B->rmap->n; i++) {
32: for (j=0; j<B->ilen[i]; j++) {
33: PetscInt data,gid1 = aj[B->i[i] + j] + 1;
34: PetscTableFind(gid1_lid1,gid1,&data);
35: if (!data) {
36: /* one based table */
37: PetscTableAdd(gid1_lid1,gid1,++ec,INSERT_VALUES);
38: }
39: }
40: }
41: /* form array of columns we need */
42: PetscMalloc((ec+1)*sizeof(PetscInt),&garray);
43: PetscTableGetHeadPosition(gid1_lid1,&tpos);
44: while (tpos) {
45: PetscTableGetNext(gid1_lid1,&tpos,&gid,&lid);
46: gid--;
47: lid--;
48: garray[lid] = gid;
49: }
50: PetscSortInt(ec,garray); /* sort, and rebuild */
51: PetscTableRemoveAll(gid1_lid1);
52: for (i=0; i<ec; i++) {
53: PetscTableAdd(gid1_lid1,garray[i]+1,i+1,INSERT_VALUES);
54: }
55: /* compact out the extra columns in B */
56: for (i=0; i<aij->B->rmap->n; i++) {
57: for (j=0; j<B->ilen[i]; j++) {
58: PetscInt gid1 = aj[B->i[i] + j] + 1;
59: PetscTableFind(gid1_lid1,gid1,&lid);
60: lid --;
61: aj[B->i[i] + j] = lid;
62: }
63: }
64: aij->B->cmap->n = aij->B->cmap->N = ec;
65: PetscLayoutSetUp((aij->B->cmap));
66: PetscTableDestroy(&gid1_lid1);
67: #else
68: /* Make an array as long as the number of columns */
69: /* mark those columns that are in aij->B */
70: PetscMalloc((N+1)*sizeof(PetscInt),&indices);
71: PetscMemzero(indices,N*sizeof(PetscInt));
72: for (i=0; i<aij->B->rmap->n; i++) {
73: for (j=0; j<B->ilen[i]; j++) {
74: if (!indices[aj[B->i[i] + j] ]) ec++;
75: indices[aj[B->i[i] + j] ] = 1;
76: }
77: }
79: /* form array of columns we need */
80: PetscMalloc((ec+1)*sizeof(PetscInt),&garray);
81: ec = 0;
82: for (i=0; i<N; i++) {
83: if (indices[i]) garray[ec++] = i;
84: }
86: /* make indices now point into garray */
87: for (i=0; i<ec; i++) {
88: indices[garray[i]] = i;
89: }
91: /* compact out the extra columns in B */
92: for (i=0; i<aij->B->rmap->n; i++) {
93: for (j=0; j<B->ilen[i]; j++) {
94: aj[B->i[i] + j] = indices[aj[B->i[i] + j]];
95: }
96: }
97: aij->B->cmap->n = aij->B->cmap->N = ec;
98: PetscLayoutSetUp((aij->B->cmap));
99: PetscFree(indices);
100: #endif
101: /* create local vector that is used to scatter into */
102: VecCreateSeq(PETSC_COMM_SELF,ec,&aij->lvec);
104: /* create two temporary Index sets for build scatter gather */
105: /* check for the special case where blocks are communicated for faster VecScatterXXX */
106: useblockis = PETSC_FALSE;
107: if (mat->cmap->bs > 1) {
108: PetscInt bs = mat->cmap->bs,ibs,ga;
109: if (!(ec % bs)) {
110: useblockis = PETSC_TRUE;
111: for (i=0; i<ec/bs; i++) {
112: if ((ga = garray[ibs = i*bs]) % bs) {
113: useblockis = PETSC_FALSE;
114: break;
115: }
116: for (j=1; j<bs; j++) {
117: if (garray[ibs+j] != ga+j) {
118: useblockis = PETSC_FALSE;
119: break;
120: }
121: }
122: if (!useblockis) break;
123: }
124: }
125: }
126: #if defined(PETSC_USE_DEBUG)
127: i = (PetscInt)useblockis;
128: MPI_Allreduce(&i,&j,1,MPIU_INT,MPI_MIN,((PetscObject)mat)->comm);
129: if(j!=i) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Use of blocked not consistant (I am usning blocked)");
130: #endif
132: if (useblockis) {
133: PetscInt *ga,bs = mat->cmap->bs,iec = ec/bs;
134: if(ec%bs)SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ec=%D bs=%D",ec,bs);
135: PetscInfo(mat,"Using block index set to define scatter\n");
136: PetscMalloc(iec*sizeof(PetscInt),&ga);
137: for (i=0; i<iec; i++) ga[i] = garray[i*bs]/bs;
138: ISCreateBlock(((PetscObject)mat)->comm,bs,iec,ga,PETSC_OWN_POINTER,&from);
139: } else {
140: ISCreateGeneral(((PetscObject)mat)->comm,ec,garray,PETSC_COPY_VALUES,&from);
141: }
143: ISCreateStride(PETSC_COMM_SELF,ec,0,1,&to);
145: /* create temporary global vector to generate scatter context */
146: /* This does not allocate the array's memory so is efficient */
147: VecCreateMPIWithArray(((PetscObject)mat)->comm,1,mat->cmap->n,mat->cmap->N,PETSC_NULL,&gvec);
149: /* generate the scatter context */
150: VecScatterCreate(gvec,from,aij->lvec,to,&aij->Mvctx);
151: PetscLogObjectParent(mat,aij->Mvctx);
152: PetscLogObjectParent(mat,aij->lvec);
153: PetscLogObjectParent(mat,from);
154: PetscLogObjectParent(mat,to);
155: aij->garray = garray;
156: PetscLogObjectMemory(mat,(ec+1)*sizeof(PetscInt));
157: ISDestroy(&from);
158: ISDestroy(&to);
159: VecDestroy(&gvec);
160: return(0);
161: }
166: /*
167: Takes the local part of an already assembled MPIAIJ matrix
168: and disassembles it. This is to allow new nonzeros into the matrix
169: that require more communication in the matrix vector multiply.
170: Thus certain data-structures must be rebuilt.
172: Kind of slow! But that's what application programmers get when
173: they are sloppy.
174: */
175: PetscErrorCode MatDisAssemble_MPIAIJ(Mat A)
176: {
177: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
178: Mat B = aij->B,Bnew;
179: Mat_SeqAIJ *Baij = (Mat_SeqAIJ*)B->data;
181: PetscInt i,j,m = B->rmap->n,n = A->cmap->N,col,ct = 0,*garray = aij->garray,*nz,ec;
182: PetscScalar v;
185: /* free stuff related to matrix-vec multiply */
186: VecGetSize(aij->lvec,&ec); /* needed for PetscLogObjectMemory below */
187: VecDestroy(&aij->lvec);
188: VecScatterDestroy(&aij->Mvctx);
189: if (aij->colmap) {
190: #if defined (PETSC_USE_CTABLE)
191: PetscTableDestroy(&aij->colmap);
192: #else
193: PetscFree(aij->colmap);
194: PetscLogObjectMemory(A,-aij->B->cmap->n*sizeof(PetscInt));
195: #endif
196: }
198: /* make sure that B is assembled so we can access its values */
199: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
200: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
202: /* invent new B and copy stuff over */
203: PetscMalloc((m+1)*sizeof(PetscInt),&nz);
204: for (i=0; i<m; i++) {
205: nz[i] = Baij->i[i+1] - Baij->i[i];
206: }
207: MatCreate(PETSC_COMM_SELF,&Bnew);
208: MatSetSizes(Bnew,m,n,m,n);
209: MatSetBlockSizes(Bnew,A->rmap->bs,A->cmap->bs);
210: MatSetType(Bnew,((PetscObject)B)->type_name);
211: MatSeqAIJSetPreallocation(Bnew,0,nz);
212: ((Mat_SeqAIJ*)Bnew->data)->nonew = Baij->nonew; /* Inherit insertion error options. */
213: PetscFree(nz);
214: for (i=0; i<m; i++) {
215: for (j=Baij->i[i]; j<Baij->i[i+1]; j++) {
216: col = garray[Baij->j[ct]];
217: v = Baij->a[ct++];
218: MatSetValues(Bnew,1,&i,1,&col,&v,B->insertmode);
219: }
220: }
221: PetscFree(aij->garray);
222: PetscLogObjectMemory(A,-ec*sizeof(PetscInt));
223: MatDestroy(&B);
224: PetscLogObjectParent(A,Bnew);
225: aij->B = Bnew;
226: A->was_assembled = PETSC_FALSE;
227: return(0);
228: }
230: /* ugly stuff added for Glenn someday we should fix this up */
232: static PetscInt *auglyrmapd = 0,*auglyrmapo = 0; /* mapping from the local ordering to the "diagonal" and "off-diagonal"
233: parts of the local matrix */
234: static Vec auglydd = 0,auglyoo = 0; /* work vectors used to scale the two parts of the local matrix */
239: PetscErrorCode MatMPIAIJDiagonalScaleLocalSetUp(Mat inA,Vec scale)
240: {
241: Mat_MPIAIJ *ina = (Mat_MPIAIJ*) inA->data; /*access private part of matrix */
243: PetscInt i,n,nt,cstart,cend,no,*garray = ina->garray,*lindices;
244: PetscInt *r_rmapd,*r_rmapo;
245:
247: MatGetOwnershipRange(inA,&cstart,&cend);
248: MatGetSize(ina->A,PETSC_NULL,&n);
249: PetscMalloc((inA->rmap->mapping->n+1)*sizeof(PetscInt),&r_rmapd);
250: PetscMemzero(r_rmapd,inA->rmap->mapping->n*sizeof(PetscInt));
251: nt = 0;
252: for (i=0; i<inA->rmap->mapping->n; i++) {
253: if (inA->rmap->mapping->indices[i] >= cstart && inA->rmap->mapping->indices[i] < cend) {
254: nt++;
255: r_rmapd[i] = inA->rmap->mapping->indices[i] + 1;
256: }
257: }
258: if (nt != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hmm nt %D n %D",nt,n);
259: PetscMalloc((n+1)*sizeof(PetscInt),&auglyrmapd);
260: for (i=0; i<inA->rmap->mapping->n; i++) {
261: if (r_rmapd[i]){
262: auglyrmapd[(r_rmapd[i]-1)-cstart] = i;
263: }
264: }
265: PetscFree(r_rmapd);
266: VecCreateSeq(PETSC_COMM_SELF,n,&auglydd);
268: PetscMalloc((inA->cmap->N+1)*sizeof(PetscInt),&lindices);
269: PetscMemzero(lindices,inA->cmap->N*sizeof(PetscInt));
270: for (i=0; i<ina->B->cmap->n; i++) {
271: lindices[garray[i]] = i+1;
272: }
273: no = inA->rmap->mapping->n - nt;
274: PetscMalloc((inA->rmap->mapping->n+1)*sizeof(PetscInt),&r_rmapo);
275: PetscMemzero(r_rmapo,inA->rmap->mapping->n*sizeof(PetscInt));
276: nt = 0;
277: for (i=0; i<inA->rmap->mapping->n; i++) {
278: if (lindices[inA->rmap->mapping->indices[i]]) {
279: nt++;
280: r_rmapo[i] = lindices[inA->rmap->mapping->indices[i]];
281: }
282: }
283: if (nt > no) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hmm nt %D no %D",nt,n);
284: PetscFree(lindices);
285: PetscMalloc((nt+1)*sizeof(PetscInt),&auglyrmapo);
286: for (i=0; i<inA->rmap->mapping->n; i++) {
287: if (r_rmapo[i]){
288: auglyrmapo[(r_rmapo[i]-1)] = i;
289: }
290: }
291: PetscFree(r_rmapo);
292: VecCreateSeq(PETSC_COMM_SELF,nt,&auglyoo);
294: return(0);
295: }
299: PetscErrorCode MatMPIAIJDiagonalScaleLocal(Mat A,Vec scale)
300: {
301: /* This routine should really be abandoned as it duplicates MatDiagonalScaleLocal */
305: PetscTryMethod(A,"MatDiagonalScaleLocal_C",(Mat,Vec),(A,scale));
306: return(0);
307: }
309: EXTERN_C_BEGIN
312: PetscErrorCode MatDiagonalScaleLocal_MPIAIJ(Mat A,Vec scale)
313: {
314: Mat_MPIAIJ *a = (Mat_MPIAIJ*) A->data; /*access private part of matrix */
316: PetscInt n,i;
317: PetscScalar *d,*o,*s;
318:
320: if (!auglyrmapd) {
321: MatMPIAIJDiagonalScaleLocalSetUp(A,scale);
322: }
324: VecGetArray(scale,&s);
325:
326: VecGetLocalSize(auglydd,&n);
327: VecGetArray(auglydd,&d);
328: for (i=0; i<n; i++) {
329: d[i] = s[auglyrmapd[i]]; /* copy "diagonal" (true local) portion of scale into dd vector */
330: }
331: VecRestoreArray(auglydd,&d);
332: /* column scale "diagonal" portion of local matrix */
333: MatDiagonalScale(a->A,PETSC_NULL,auglydd);
335: VecGetLocalSize(auglyoo,&n);
336: VecGetArray(auglyoo,&o);
337: for (i=0; i<n; i++) {
338: o[i] = s[auglyrmapo[i]]; /* copy "off-diagonal" portion of scale into oo vector */
339: }
340: VecRestoreArray(scale,&s);
341: VecRestoreArray(auglyoo,&o);
342: /* column scale "off-diagonal" portion of local matrix */
343: MatDiagonalScale(a->B,PETSC_NULL,auglyoo);
345: return(0);
346: }
347: EXTERN_C_END