Actual source code: baijov.c

petsc-3.3-p5 2012-12-01
  2: /*
  3:    Routines to compute overlapping regions of a parallel MPI matrix
  4:   and to find submatrices that were shared across processors.
  5: */
  6: #include <../src/mat/impls/baij/mpi/mpibaij.h>
  7: #include <petscbt.h>

  9: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,PetscInt,char **,PetscInt*,PetscInt**);
 10: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,PetscInt,PetscInt **,PetscInt**,PetscInt*);
 11: extern PetscErrorCode MatGetRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
 12: extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);

 16: PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov)
 17: {
 19:   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs;
 20:   IS             *is_new;

 23:   PetscMalloc(imax*sizeof(IS),&is_new);
 24:   /* Convert the indices into block format */
 25:   ISCompressIndicesGeneral(N,C->rmap->n,bs,imax,is,is_new);
 26:   if (ov < 0){ SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
 27:   for (i=0; i<ov; ++i) {
 28:     MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);
 29:   }
 30:   for (i=0; i<imax; i++) {ISDestroy(&is[i]);}
 31:   ISExpandIndicesGeneral(N,N,bs,imax,is_new,is);
 32:   for (i=0; i<imax; i++) {ISDestroy(&is_new[i]);}
 33:   PetscFree(is_new);
 34:   return(0);
 35: }

 37: /*
 38:   Sample message format:
 39:   If a processor A wants processor B to process some elements corresponding
 40:   to index sets is[1], is[5]
 41:   mesg [0] = 2   (no of index sets in the mesg)
 42:   -----------  
 43:   mesg [1] = 1 => is[1]
 44:   mesg [2] = sizeof(is[1]);
 45:   -----------  
 46:   mesg [5] = 5  => is[5]
 47:   mesg [6] = sizeof(is[5]);
 48:   -----------
 49:   mesg [7] 
 50:   mesg [n]  data(is[1])
 51:   -----------  
 52:   mesg[n+1]
 53:   mesg[m]  data(is[5])
 54:   -----------  
 55:   
 56:   Notes:
 57:   nrqs - no of requests sent (or to be sent out)
 58:   nrqr - no of requests recieved (which have to be or which have been processed
 59: */
 62: PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,PetscInt imax,IS is[])
 63: {
 64:   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
 65:   const PetscInt **idx,*idx_i;
 66:   PetscInt       *n,*w3,*w4,**data,len;
 68:   PetscMPIInt    size,rank,tag1,tag2,*w2,*w1,nrqr;
 69:   PetscInt       Mbs,i,j,k,**rbuf,row,proc=-1,nrqs,msz,**outdat,**ptr;
 70:   PetscInt       *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2,*d_p;
 71:   PetscMPIInt    *onodes1,*olengths1,*onodes2,*olengths2;
 72:   PetscBT        *table;
 73:   MPI_Comm       comm;
 74:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
 75:   MPI_Status     *s_status,*recv_status;
 76:   char           *t_p;

 79:   comm   = ((PetscObject)C)->comm;
 80:   size   = c->size;
 81:   rank   = c->rank;
 82:   Mbs    = c->Mbs;

 84:   PetscObjectGetNewTag((PetscObject)C,&tag1);
 85:   PetscObjectGetNewTag((PetscObject)C,&tag2);
 86: 
 87:   PetscMalloc2(imax+1,const PetscInt*,&idx,imax,PetscInt,&n);

 89:   for (i=0; i<imax; i++) {
 90:     ISGetIndices(is[i],&idx[i]);
 91:     ISGetLocalSize(is[i],&n[i]);
 92:   }

 94:   /* evaluate communication - mesg to who,length of mesg, and buffer space
 95:      required. Based on this, buffers are allocated, and data copied into them*/
 96:   PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);
 97:   PetscMemzero(w1,size*sizeof(PetscMPIInt));
 98:   PetscMemzero(w2,size*sizeof(PetscMPIInt));
 99:   PetscMemzero(w3,size*sizeof(PetscInt));
100:   for (i=0; i<imax; i++) {
101:     PetscMemzero(w4,size*sizeof(PetscInt)); /* initialise work vector*/
102:     idx_i = idx[i];
103:     len   = n[i];
104:     for (j=0; j<len; j++) {
105:       row  = idx_i[j];
106:       if (row < 0) {
107:         SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries");
108:       }
109:       PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);
110:       w4[proc]++;
111:     }
112:     for (j=0; j<size; j++){
113:       if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
114:     }
115:   }

117:   nrqs     = 0;              /* no of outgoing messages */
118:   msz      = 0;              /* total mesg length (for all proc */
119:   w1[rank] = 0;              /* no mesg sent to itself */
120:   w3[rank] = 0;
121:   for (i=0; i<size; i++) {
122:     if (w1[i])  {w2[i] = 1; nrqs++;} /* there exists a message to proc i */
123:   }
124:   /* pa - is list of processors to communicate with */
125:   PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);
126:   for (i=0,j=0; i<size; i++) {
127:     if (w1[i]) {pa[j] = i; j++;}
128:   }

130:   /* Each message would have a header = 1 + 2*(no of IS) + data */
131:   for (i=0; i<nrqs; i++) {
132:     j      = pa[i];
133:     w1[j] += w2[j] + 2*w3[j];
134:     msz   += w1[j];
135:   }
136: 
137:   /* Determine the number of messages to expect, their lengths, from from-ids */
138:   PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);
139:   PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);

141:   /* Now post the Irecvs corresponding to these messages */
142:   PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);
143: 
144:   /* Allocate Memory for outgoing messages */
145:   PetscMalloc4(size,PetscInt*,&outdat,size,PetscInt*,&ptr,msz,PetscInt,&tmp,size,PetscInt,&ctr);
146:   PetscMemzero(outdat,size*sizeof(PetscInt*));
147:   PetscMemzero(ptr,size*sizeof(PetscInt*));
148:   {
149:     PetscInt *iptr = tmp,ict  = 0;
150:     for (i=0; i<nrqs; i++) {
151:       j         = pa[i];
152:       iptr     +=  ict;
153:       outdat[j] = iptr;
154:       ict       = w1[j];
155:     }
156:   }

158:   /* Form the outgoing messages */
159:   /*plug in the headers*/
160:   for (i=0; i<nrqs; i++) {
161:     j            = pa[i];
162:     outdat[j][0] = 0;
163:     PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));
164:     ptr[j]       = outdat[j] + 2*w3[j] + 1;
165:   }
166: 
167:   /* Memory for doing local proc's work*/
168:   {
169:     PetscMalloc5(imax,PetscBT,&table, imax,PetscInt*,&data, imax,PetscInt,&isz,
170:                         Mbs*imax,PetscInt,&d_p, (Mbs/PETSC_BITS_PER_BYTE+1)*imax,char,&t_p);
171:     PetscMemzero(table,imax*sizeof(PetscBT));
172:     PetscMemzero(data,imax*sizeof(PetscInt*));
173:     PetscMemzero(isz,imax*sizeof(PetscInt));
174:     PetscMemzero(d_p,Mbs*imax*sizeof(PetscInt));
175:     PetscMemzero(t_p,(Mbs/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char));

177:     for (i=0; i<imax; i++) {
178:       table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
179:       data[i]  = d_p + (Mbs)*i;
180:     }
181:   }

183:   /* Parse the IS and update local tables and the outgoing buf with the data*/
184:   {
185:     PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j;
186:     PetscBT  table_i;

188:     for (i=0; i<imax; i++) {
189:       PetscMemzero(ctr,size*sizeof(PetscInt));
190:       n_i     = n[i];
191:       table_i = table[i];
192:       idx_i   = idx[i];
193:       data_i  = data[i];
194:       isz_i   = isz[i];
195:       for (j=0;  j<n_i; j++) {  /* parse the indices of each IS */
196:         row  = idx_i[j];
197:         PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);
198:         if (proc != rank) { /* copy to the outgoing buffer */
199:           ctr[proc]++;
200:           *ptr[proc] = row;
201:           ptr[proc]++;
202:         } else { /* Update the local table */
203:           if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
204:         }
205:       }
206:       /* Update the headers for the current IS */
207:       for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
208:         if ((ctr_j = ctr[j])) {
209:           outdat_j        = outdat[j];
210:           k               = ++outdat_j[0];
211:           outdat_j[2*k]   = ctr_j;
212:           outdat_j[2*k-1] = i;
213:         }
214:       }
215:       isz[i] = isz_i;
216:     }
217:   }
218: 
219:   /*  Now  post the sends */
220:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
221:   for (i=0; i<nrqs; ++i) {
222:     j    = pa[i];
223:     MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);
224:   }
225: 
226:   /* No longer need the original indices*/
227:   for (i=0; i<imax; ++i) {
228:     ISRestoreIndices(is[i],idx+i);
229:   }
230:   PetscFree2(idx,n);

232:   for (i=0; i<imax; ++i) {
233:     ISDestroy(&is[i]);
234:   }
235: 
236:   /* Do Local work*/
237:   MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);

239:   /* Receive messages*/
240:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);
241:   if (nrqr) {MPI_Waitall(nrqr,r_waits1,recv_status);}
242: 
243:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);
244:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status);}

246:   /* Phase 1 sends are complete - deallocate buffers */
247:   PetscFree4(outdat,ptr,tmp,ctr);
248:   PetscFree4(w1,w2,w3,w4);

250:   PetscMalloc((nrqr+1)*sizeof(PetscInt*),&xdata);
251:   PetscMalloc((nrqr+1)*sizeof(PetscInt),&isz1);
252:   MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);
253:   PetscFree(rbuf[0]);
254:   PetscFree(rbuf);

256:   /* Send the data back*/
257:   /* Do a global reduction to know the buffer space req for incoming messages*/
258:   {
259:     PetscMPIInt *rw1;
260: 
261:     PetscMalloc(size*sizeof(PetscInt),&rw1);
262:     PetscMemzero(rw1,size*sizeof(PetscInt));

264:     for (i=0; i<nrqr; ++i) {
265:       proc      = recv_status[i].MPI_SOURCE;
266:       if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch");
267:       rw1[proc] = isz1[i];
268:     }
269: 
270:     PetscFree(onodes1);
271:     PetscFree(olengths1);

273:     /* Determine the number of messages to expect, their lengths, from from-ids */
274:     PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);
275:     PetscFree(rw1);
276:   }
277:   /* Now post the Irecvs corresponding to these messages */
278:   PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);
279: 
280:   /*  Now  post the sends */
281:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
282:   for (i=0; i<nrqr; ++i) {
283:     j    = recv_status[i].MPI_SOURCE;
284:     MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);
285:   }

287:   /* receive work done on other processors*/
288:   {
289:     PetscMPIInt idex;
290:     PetscInt    is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax;
291:     PetscBT     table_i;
292:     MPI_Status  *status2;
293: 
294:     PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);
295:     for (i=0; i<nrqs; ++i) {
296:       MPI_Waitany(nrqs,r_waits2,&idex,status2+i);
297:       /* Process the message*/
298:       rbuf2_i = rbuf2[idex];
299:       ct1     = 2*rbuf2_i[0]+1;
300:       jmax    = rbuf2[idex][0];
301:       for (j=1; j<=jmax; j++) {
302:         max     = rbuf2_i[2*j];
303:         is_no   = rbuf2_i[2*j-1];
304:         isz_i   = isz[is_no];
305:         data_i  = data[is_no];
306:         table_i = table[is_no];
307:         for (k=0; k<max; k++,ct1++) {
308:           row = rbuf2_i[ct1];
309:           if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
310:         }
311:         isz[is_no] = isz_i;
312:       }
313:     }
314:     if (nrqr) {MPI_Waitall(nrqr,s_waits2,status2);}
315:     PetscFree(status2);
316:   }
317: 
318:   for (i=0; i<imax; ++i) {
319:     ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);
320:   }
321: 
322: 
323:   PetscFree(onodes2);
324:   PetscFree(olengths2);

326:   PetscFree(pa);
327:   PetscFree(rbuf2[0]);
328:   PetscFree(rbuf2);
329:   PetscFree(s_waits1);
330:   PetscFree(r_waits1);
331:   PetscFree(s_waits2);
332:   PetscFree(r_waits2);
333:   PetscFree5(table,data,isz,d_p,t_p);
334:   PetscFree(s_status);
335:   PetscFree(recv_status);
336:   PetscFree(xdata[0]);
337:   PetscFree(xdata);
338:   PetscFree(isz1);
339:   return(0);
340: }

344: /*  
345:    MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do 
346:        the work on the local processor.

348:      Inputs:
349:       C      - MAT_MPIBAIJ;
350:       imax - total no of index sets processed at a time;
351:       table  - an array of char - size = Mbs bits.
352:       
353:      Output:
354:       isz    - array containing the count of the solution elements corresponding
355:                to each index set;
356:       data   - pointer to the solutions
357: */
358: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data)
359: {
360:   Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
361:   Mat         A = c->A,B = c->B;
362:   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
363:   PetscInt    start,end,val,max,rstart,cstart,*ai,*aj;
364:   PetscInt    *bi,*bj,*garray,i,j,k,row,*data_i,isz_i;
365:   PetscBT     table_i;

368:   rstart = c->rstartbs;
369:   cstart = c->cstartbs;
370:   ai     = a->i;
371:   aj     = a->j;
372:   bi     = b->i;
373:   bj     = b->j;
374:   garray = c->garray;

376: 
377:   for (i=0; i<imax; i++) {
378:     data_i  = data[i];
379:     table_i = table[i];
380:     isz_i   = isz[i];
381:     for (j=0,max=isz[i]; j<max; j++) {
382:       row   = data_i[j] - rstart;
383:       start = ai[row];
384:       end   = ai[row+1];
385:       for (k=start; k<end; k++) { /* Amat */
386:         val = aj[k] + cstart;
387:         if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
388:       }
389:       start = bi[row];
390:       end   = bi[row+1];
391:       for (k=start; k<end; k++) { /* Bmat */
392:         val = garray[bj[k]];
393:         if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
394:       }
395:     }
396:     isz[i] = isz_i;
397:   }
398:   return(0);
399: }
402: /*     
403:       MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages,
404:          and return the output

406:          Input:
407:            C    - the matrix
408:            nrqr - no of messages being processed.
409:            rbuf - an array of pointers to the recieved requests
410:            
411:          Output:
412:            xdata - array of messages to be sent back
413:            isz1  - size of each message

415:   For better efficiency perhaps we should malloc separately each xdata[i],
416: then if a remalloc is required we need only copy the data for that one row
417: rather than all previous rows as it is now where a single large chunck of 
418: memory is used.

420: */
421: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1)
422: {
423:   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
424:   Mat            A = c->A,B = c->B;
425:   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
427:   PetscInt       rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k;
428:   PetscInt       row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end;
429:   PetscInt       val,max1,max2,Mbs,no_malloc =0,*tmp,new_estimate,ctr;
430:   PetscInt       *rbuf_i,kmax,rbuf_0;
431:   PetscBT        xtable;

434:   Mbs    = c->Mbs;
435:   rstart = c->rstartbs;
436:   cstart = c->cstartbs;
437:   ai     = a->i;
438:   aj     = a->j;
439:   bi     = b->i;
440:   bj     = b->j;
441:   garray = c->garray;
442: 
443: 
444:   for (i=0,ct=0,total_sz=0; i<nrqr; ++i) {
445:     rbuf_i  =  rbuf[i];
446:     rbuf_0  =  rbuf_i[0];
447:     ct     += rbuf_0;
448:     for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; }
449:   }
450: 
451:   if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs;
452:   else        max1 = 1;
453:   mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1);
454:   PetscMalloc(mem_estimate*sizeof(PetscInt),&xdata[0]);
455:   ++no_malloc;
456:   PetscBTCreate(Mbs,&xtable);
457:   PetscMemzero(isz1,nrqr*sizeof(PetscInt));
458: 
459:   ct3 = 0;
460:   for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */
461:     rbuf_i =  rbuf[i];
462:     rbuf_0 =  rbuf_i[0];
463:     ct1    =  2*rbuf_0+1;
464:     ct2    =  ct1;
465:     ct3    += ct1;
466:     for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/
467:       PetscBTMemzero(Mbs,xtable);
468:       oct2 = ct2;
469:       kmax = rbuf_i[2*j];
470:       for (k=0; k<kmax; k++,ct1++) {
471:         row = rbuf_i[ct1];
472:         if (!PetscBTLookupSet(xtable,row)) {
473:           if (!(ct3 < mem_estimate)) {
474:             new_estimate = (PetscInt)(1.5*mem_estimate)+1;
475:             PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);
476:             PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));
477:             PetscFree(xdata[0]);
478:             xdata[0]     = tmp;
479:             mem_estimate = new_estimate; ++no_malloc;
480:             for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
481:           }
482:           xdata[i][ct2++] = row;
483:           ct3++;
484:         }
485:       }
486:       for (k=oct2,max2=ct2; k<max2; k++)  {
487:         row   = xdata[i][k] - rstart;
488:         start = ai[row];
489:         end   = ai[row+1];
490:         for (l=start; l<end; l++) {
491:           val = aj[l] + cstart;
492:           if (!PetscBTLookupSet(xtable,val)) {
493:             if (!(ct3 < mem_estimate)) {
494:               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
495:               PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);
496:               PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));
497:               PetscFree(xdata[0]);
498:               xdata[0]     = tmp;
499:               mem_estimate = new_estimate; ++no_malloc;
500:               for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
501:             }
502:             xdata[i][ct2++] = val;
503:             ct3++;
504:           }
505:         }
506:         start = bi[row];
507:         end   = bi[row+1];
508:         for (l=start; l<end; l++) {
509:           val = garray[bj[l]];
510:           if (!PetscBTLookupSet(xtable,val)) {
511:             if (!(ct3 < mem_estimate)) {
512:               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
513:               PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);
514:               PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));
515:               PetscFree(xdata[0]);
516:               xdata[0]     = tmp;
517:               mem_estimate = new_estimate; ++no_malloc;
518:               for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
519:             }
520:             xdata[i][ct2++] = val;
521:             ct3++;
522:           }
523:         }
524:       }
525:       /* Update the header*/
526:       xdata[i][2*j]   = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
527:       xdata[i][2*j-1] = rbuf_i[2*j-1];
528:     }
529:     xdata[i][0] = rbuf_0;
530:     xdata[i+1]  = xdata[i] + ct2;
531:     isz1[i]     = ct2; /* size of each message */
532:   }
533:   PetscBTDestroy(&xtable);
534:   PetscInfo3(C,"Allocated %D bytes, required %D, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);
535:   return(0);
536: }

540: PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
541: {
542:   IS             *isrow_new,*iscol_new;
543:   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
545:   PetscInt       nmax,nstages_local,nstages,i,pos,max_no,ncol,nrow,N=C->cmap->N,bs=C->rmap->bs;
546:   PetscBool      colflag,*allcolumns,*allrows;

549:   /* Currently, unsorted column indices will result in inverted column indices in the resulting submatrices. */
550:   for(i = 0; i < ismax; ++i) {
551:     PetscBool sorted;
552:     ISSorted(iscol[i], &sorted);
553:     if(!sorted) SETERRQ1(((PetscObject)iscol[i])->comm, PETSC_ERR_SUP, "Column index set %D not sorted", i);
554:   }
555:   /* The compression and expansion should be avoided. Doesn't point
556:      out errors, might change the indices, hence buggey */
557:   PetscMalloc2(ismax+1,IS,&isrow_new,ismax+1,IS,&iscol_new);
558:   ISCompressIndicesGeneral(N,C->rmap->n,bs,ismax,isrow,isrow_new);
559:   ISCompressIndicesGeneral(N,C->cmap->n,bs,ismax,iscol,iscol_new);

561:   /* Check for special case: each processor gets entire matrix columns */
562:   PetscMalloc2(ismax+1,PetscBool,&allcolumns,ismax+1,PetscBool,&allrows);
563:   for (i=0; i<ismax; i++) {
564:     ISIdentity(iscol[i],&colflag);
565:     ISGetLocalSize(iscol[i],&ncol);
566:     if (colflag && ncol == C->cmap->N){
567:       allcolumns[i] = PETSC_TRUE;
568:     } else {
569:       allcolumns[i] = PETSC_FALSE;
570:     }

572:     ISIdentity(isrow[i],&colflag);
573:     ISGetLocalSize(isrow[i],&nrow);
574:     if (colflag && nrow == C->rmap->N){
575:       allrows[i] = PETSC_TRUE;
576:     } else {
577:       allrows[i] = PETSC_FALSE;
578:     }
579:   }

581:   /* Allocate memory to hold all the submatrices */
582:   if (scall != MAT_REUSE_MATRIX) {
583:     PetscMalloc((ismax+1)*sizeof(Mat),submat);
584:   }
585:   /* Determine the number of stages through which submatrices are done */
586:   nmax          = 20*1000000 / (c->Nbs * sizeof(PetscInt));
587:   if (!nmax) nmax = 1;
588:   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
589: 
590:   /* Make sure every processor loops through the nstages */
591:   MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);
592:   for (i=0,pos=0; i<nstages; i++) {
593:     if (pos+nmax <= ismax) max_no = nmax;
594:     else if (pos == ismax) max_no = 0;
595:     else                   max_no = ismax-pos;
596:     MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,allrows+pos,allcolumns+pos,*submat+pos);
597:     pos += max_no;
598:   }
599: 
600:   for (i=0; i<ismax; i++) {
601:     ISDestroy(&isrow_new[i]);
602:     ISDestroy(&iscol_new[i]);
603:   }
604:   PetscFree2(isrow_new,iscol_new);
605:   PetscFree2(allcolumns,allrows);
606:   return(0);
607: }

609: #if defined (PETSC_USE_CTABLE)
612: PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank)
613: {
614:   PetscInt    nGlobalNd = proc_gnode[size];
615:   PetscMPIInt fproc = PetscMPIIntCast( (PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5)));
616: 
618:   if (fproc > size) fproc = size;
619:   while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) {
620:     if (row < proc_gnode[fproc]) fproc--;
621:     else                         fproc++;
622:   }
623:   *rank = fproc;
624:   return(0);
625: }
626: #endif

628: /* -------------------------------------------------------------------------*/
629: /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */
632: PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool *allrows,PetscBool *allcolumns,Mat *submats)
633: {
634:   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
635:   Mat            A = c->A;
636:   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat;
637:   const PetscInt **irow,**icol,*irow_i;
638:   PetscInt       *nrow,*ncol,*w3,*w4,start;
640:   PetscMPIInt    size,tag0,tag1,tag2,tag3,*w1,*w2,nrqr,idex,end,proc;
641:   PetscInt       **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,**rbuf1,row;
642:   PetscInt       nrqs,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol;
643:   PetscInt       **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2;
644:   PetscInt       **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax;
645:   PetscInt       ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i;
646:   PetscInt       bs=C->rmap->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB;
647:   PetscInt       cstart = c->cstartbs,nzA,nzB,*a_i=a->i,*b_i=b->i,imark;
648:   PetscInt       *bmap = c->garray,ctmp,rstart=c->rstartbs;
649:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3,*s_waits3;
650:   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status3,*s_status2,*r_status3;
651:   MPI_Comm       comm;
652:   PetscBool      flag;
653:   PetscMPIInt    *onodes1,*olengths1;
654:   PetscBool      ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */
655:   /* variables below are used for the matrix numerical values - case of !ijonly */
656:   MPI_Request    *r_waits4,*s_waits4;
657:   MPI_Status     *r_status4,*s_status4;
658:   MatScalar      **rbuf4,**sbuf_aa,*vals,*mat_a = PETSC_NULL,*sbuf_aa_i,*vworkA = PETSC_NULL,*vworkB = PETSC_NULL;
659:   MatScalar      *a_a=a->a,*b_a=b->a;

661: #if defined (PETSC_USE_CTABLE)
662:   PetscInt       tt;
663:   PetscTable     *rmap,*cmap,rmap_i,cmap_i=PETSC_NULL;
664: #else
665:   PetscInt       **cmap,*cmap_i=PETSC_NULL,*rtable,*rmap_i,**rmap, Mbs = c->Mbs;
666: #endif

669:   comm   = ((PetscObject)C)->comm;
670:   tag0   = ((PetscObject)C)->tag;
671:   size   = c->size;
672:   rank   = c->rank;
673: 
674:   /* Get some new tags to keep the communication clean */
675:   PetscObjectGetNewTag((PetscObject)C,&tag1);
676:   PetscObjectGetNewTag((PetscObject)C,&tag2);
677:   PetscObjectGetNewTag((PetscObject)C,&tag3);

679: #if defined(PETSC_USE_CTABLE)
680:   PetscMalloc4(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol);
681: #else 
682:   PetscMalloc5(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol,Mbs+1,PetscInt,&rtable);
683:   /* Create hash table for the mapping :row -> proc*/
684:   for (i=0,j=0; i<size; i++) {
685:     jmax = C->rmap->range[i+1]/bs;
686:     for (; j<jmax; j++) {
687:       rtable[j] = i;
688:     }
689:   }
690: #endif
691: 
692:   for (i=0; i<ismax; i++) {
693:     if (allrows[i]){
694:       irow[i] = PETSC_NULL;
695:       nrow[i] = C->rmap->N/bs;
696:     } else {
697:       ISGetIndices(isrow[i],&irow[i]);
698:       ISGetLocalSize(isrow[i],&nrow[i]);
699:     }

701:     if (allcolumns[i]){
702:       icol[i] = PETSC_NULL;
703:       ncol[i] = C->cmap->N/bs;
704:     } else {
705:       ISGetIndices(iscol[i],&icol[i]);
706:       ISGetLocalSize(iscol[i],&ncol[i]);
707:     }
708:   }

710:   /* evaluate communication - mesg to who,length of mesg,and buffer space
711:      required. Based on this, buffers are allocated, and data copied into them*/
712:   PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);
713:   PetscMemzero(w1,size*sizeof(PetscMPIInt));
714:   PetscMemzero(w2,size*sizeof(PetscMPIInt));
715:   PetscMemzero(w3,size*sizeof(PetscInt));
716:   for (i=0; i<ismax; i++) {
717:     PetscMemzero(w4,size*sizeof(PetscInt)); /* initialise work vector*/
718:     jmax   = nrow[i];
719:     irow_i = irow[i];
720:     for (j=0; j<jmax; j++) {
721:       if (allrows[i]) {
722:         row = j;
723:       } else {
724:         row  = irow_i[j];
725:       }
726: #if defined (PETSC_USE_CTABLE)
727:       PetscGetProc(row,size,c->rangebs,&proc);
728: #else
729:       proc = rtable[row];
730: #endif
731:       w4[proc]++;
732:     }
733:     for (j=0; j<size; j++) {
734:       if (w4[j]) { w1[j] += w4[j];  w3[j]++;}
735:     }
736:   }

738:   nrqs     = 0;              /* no of outgoing messages */
739:   msz      = 0;              /* total mesg length for all proc */
740:   w1[rank] = 0;              /* no mesg sent to intself */
741:   w3[rank] = 0;
742:   for (i=0; i<size; i++) {
743:     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
744:   }
745:   PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa); /*(proc -array)*/
746:   for (i=0,j=0; i<size; i++) {
747:     if (w1[i]) { pa[j] = i; j++; }
748:   }

750:   /* Each message would have a header = 1 + 2*(no of IS) + data */
751:   for (i=0; i<nrqs; i++) {
752:     j     = pa[i];
753:     w1[j] += w2[j] + 2* w3[j];
754:     msz   += w1[j];
755:   }

757:   /* Determine the number of messages to expect, their lengths, from from-ids */
758:   PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);
759:   PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);

761:   /* Now post the Irecvs corresponding to these messages */
762:   PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);
763: 
764:   PetscFree(onodes1);
765:   PetscFree(olengths1);

767:   /* Allocate Memory for outgoing messages */
768:   PetscMalloc4(size,PetscInt*,&sbuf1,size,PetscInt*,&ptr,2*msz,PetscInt,&tmp,size,PetscInt,&ctr);
769:   PetscMemzero(sbuf1,size*sizeof(PetscInt*));
770:   PetscMemzero(ptr,size*sizeof(PetscInt*));
771:   {
772:     PetscInt *iptr = tmp,ict = 0;
773:     for (i=0; i<nrqs; i++) {
774:       j         = pa[i];
775:       iptr     += ict;
776:       sbuf1[j]  = iptr;
777:       ict       = w1[j];
778:     }
779:   }

781:   /* Form the outgoing messages */
782:   /* Initialise the header space */
783:   for (i=0; i<nrqs; i++) {
784:     j           = pa[i];
785:     sbuf1[j][0] = 0;
786:     PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));
787:     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
788:   }
789: 
790:   /* Parse the isrow and copy data into outbuf */
791:   for (i=0; i<ismax; i++) {
792:     PetscMemzero(ctr,size*sizeof(PetscInt));
793:     irow_i = irow[i];
794:     jmax   = nrow[i];
795:     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
796:       if (allrows[i]){
797:         row = j;
798:       } else {
799:         row  = irow_i[j];
800:       }
801: #if defined (PETSC_USE_CTABLE)
802:       PetscGetProc(row,size,c->rangebs,&proc);
803: #else
804:       proc = rtable[row];
805: #endif
806:       if (proc != rank) { /* copy to the outgoing buf*/
807:         ctr[proc]++;
808:         *ptr[proc] = row;
809:         ptr[proc]++;
810:       }
811:     }
812:     /* Update the headers for the current IS */
813:     for (j=0; j<size; j++) { /* Can Optimise this loop too */
814:       if ((ctr_j = ctr[j])) {
815:         sbuf1_j        = sbuf1[j];
816:         k              = ++sbuf1_j[0];
817:         sbuf1_j[2*k]   = ctr_j;
818:         sbuf1_j[2*k-1] = i;
819:       }
820:     }
821:   }

823:   /*  Now  post the sends */
824:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
825:   for (i=0; i<nrqs; ++i) {
826:     j = pa[i];
827:     MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);
828:   }

830:   /* Post Recieves to capture the buffer size */
831:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);
832:   PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf2);
833:   rbuf2[0] = tmp + msz;
834:   for (i=1; i<nrqs; ++i) {
835:     j        = pa[i];
836:     rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
837:   }
838:   for (i=0; i<nrqs; ++i) {
839:     j    = pa[i];
840:     MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag1,comm,r_waits2+i);
841:   }

843:   /* Send to other procs the buf size they should allocate */

845:   /* Receive messages*/
846:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
847:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);
848:   PetscMalloc3(nrqr+1,PetscInt*,&sbuf2,nrqr,PetscInt,&req_size,nrqr,PetscInt,&req_source);
849:   {
850:     Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data;
851:     PetscInt    *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i;

853:     for (i=0; i<nrqr; ++i) {
854:       MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);
855:       req_size[idex] = 0;
856:       rbuf1_i         = rbuf1[idex];
857:       start           = 2*rbuf1_i[0] + 1;
858:       MPI_Get_count(r_status1+i,MPIU_INT,&end);
859:       PetscMalloc(end*sizeof(PetscInt),&sbuf2[idex]);
860:       sbuf2_i         = sbuf2[idex];
861:       for (j=start; j<end; j++) {
862:         id               = rbuf1_i[j] - rstart;
863:         ncols            = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id];
864:         sbuf2_i[j]       = ncols;
865:         req_size[idex] += ncols;
866:       }
867:       req_source[idex] = r_status1[i].MPI_SOURCE;
868:       /* form the header */
869:       sbuf2_i[0]   = req_size[idex];
870:       for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; }
871:       MPI_Isend(sbuf2_i,end,MPIU_INT,req_source[idex],tag1,comm,s_waits2+i);
872:     }
873:   }
874:   PetscFree(r_status1);
875:   PetscFree(r_waits1);

877:   /*  recv buffer sizes */
878:   /* Receive messages*/
879:   PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf3);
880:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);
881:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);
882:   if (!ijonly){
883:     PetscMalloc((nrqs+1)*sizeof(MatScalar*),&rbuf4);
884:     PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);
885:   }

887:   for (i=0; i<nrqs; ++i) {
888:     MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);
889:     PetscMalloc(rbuf2[idex][0]*sizeof(PetscInt),&rbuf3[idex]);
890:     MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPIU_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);
891:     if (!ijonly){
892:       PetscMalloc(rbuf2[idex][0]*bs2*sizeof(MatScalar),&rbuf4[idex]);
893:       MPI_Irecv(rbuf4[idex],rbuf2[idex][0]*bs2,MPIU_MATSCALAR,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);
894:     }
895:   }
896:   PetscFree(r_status2);
897:   PetscFree(r_waits2);
898: 
899:   /* Wait on sends1 and sends2 */
900:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);
901:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);

903:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status1);}
904:   if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status2);}
905:   PetscFree(s_status1);
906:   PetscFree(s_status2);
907:   PetscFree(s_waits1);
908:   PetscFree(s_waits2);

910:   /* Now allocate buffers for a->j, and send them off */
911:   PetscMalloc((nrqr+1)*sizeof(PetscInt*),&sbuf_aj);
912:   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
913:   PetscMalloc((j+1)*sizeof(PetscInt),&sbuf_aj[0]);
914:   for (i=1; i<nrqr; i++)  sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
915: 
916:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);
917:   {
918:      for (i=0; i<nrqr; i++) {
919:       rbuf1_i   = rbuf1[i];
920:       sbuf_aj_i = sbuf_aj[i];
921:       ct1       = 2*rbuf1_i[0] + 1;
922:       ct2       = 0;
923:       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
924:         kmax = rbuf1[i][2*j];
925:         for (k=0; k<kmax; k++,ct1++) {
926:           row    = rbuf1_i[ct1] - rstart;
927:           nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
928:           ncols  = nzA + nzB;
929:           cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];

931:           /* load the column indices for this row into cols*/
932:           cols  = sbuf_aj_i + ct2;
933:           for (l=0; l<nzB; l++) {
934:             if ((ctmp = bmap[cworkB[l]]) < cstart)  cols[l] = ctmp;
935:             else break;
936:           }
937:           imark = l;
938:           for (l=0; l<nzA; l++)   cols[imark+l] = cstart + cworkA[l];
939:           for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]];
940:           ct2 += ncols;
941:         }
942:       }
943:       MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source[i],tag2,comm,s_waits3+i);
944:     }
945:   }
946:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);
947:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);

949:   /* Allocate buffers for a->a, and send them off */
950:   if (!ijonly){
951:     PetscMalloc((nrqr+1)*sizeof(MatScalar *),&sbuf_aa);
952:     for (i=0,j=0; i<nrqr; i++) j += req_size[i];
953:     PetscMalloc((j+1)*bs2*sizeof(MatScalar),&sbuf_aa[0]);
954:     for (i=1; i<nrqr; i++)  sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2;
955: 
956:     PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);
957:     {
958:       for (i=0; i<nrqr; i++) {
959:         rbuf1_i   = rbuf1[i];
960:         sbuf_aa_i = sbuf_aa[i];
961:         ct1       = 2*rbuf1_i[0]+1;
962:         ct2       = 0;
963:         for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
964:           kmax = rbuf1_i[2*j];
965:           for (k=0; k<kmax; k++,ct1++) {
966:             row    = rbuf1_i[ct1] - rstart;
967:             nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
968:             ncols  = nzA + nzB;
969:             cworkA = a_j + a_i[row];     cworkB = b_j + b_i[row];
970:             vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2;

972:             /* load the column values for this row into vals*/
973:             vals  = sbuf_aa_i+ct2*bs2;
974:             for (l=0; l<nzB; l++) {
975:               if ((bmap[cworkB[l]]) < cstart) {
976:                 PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));
977:               }
978:               else break;
979:             }
980:             imark = l;
981:             for (l=0; l<nzA; l++) {
982:               PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));
983:             }
984:             for (l=imark; l<nzB; l++) {
985:               PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));
986:             }
987:             ct2 += ncols;
988:           }
989:         }
990:         MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);
991:       }
992:     }
993:     PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);
994:     PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);
995:   }
996:   PetscFree(rbuf1[0]);
997:   PetscFree(rbuf1);

999:   /* Form the matrix */
1000:   /* create col map: global col of C -> local col of submatrices */
1001:   {
1002:     const PetscInt *icol_i;
1003: #if defined (PETSC_USE_CTABLE)
1004:     PetscMalloc((1+ismax)*sizeof(PetscTable),&cmap);
1005:     for (i=0; i<ismax; i++) {
1006:       if (!allcolumns[i]){
1007:         PetscTableCreate(ncol[i]+1,c->Nbs+1,&cmap[i]);
1008:         jmax   = ncol[i];
1009:         icol_i = icol[i];
1010:         cmap_i = cmap[i];
1011:         for (j=0; j<jmax; j++) {
1012:           PetscTableAdd(cmap_i,icol_i[j]+1,j+1,INSERT_VALUES);
1013:         }
1014:       } else {
1015:         cmap[i] = PETSC_NULL;
1016:       }
1017:     }
1018: #else
1019:     PetscMalloc(ismax*sizeof(PetscInt*),&cmap);
1020:     for (i=0; i<ismax; i++) {
1021:       if (!allcolumns[i]){
1022:         PetscMalloc(c->Nbs*sizeof(PetscInt),&cmap[i]);
1023:         PetscMemzero(cmap[i],c->Nbs*sizeof(PetscInt));
1024:         jmax   = ncol[i];
1025:         icol_i = icol[i];
1026:         cmap_i = cmap[i];
1027:         for (j=0; j<jmax; j++) {
1028:           cmap_i[icol_i[j]] = j+1;
1029:         }
1030:       } else { /* allcolumns[i] */
1031:         cmap[i] = PETSC_NULL;
1032:       }
1033:     }
1034: #endif 
1035:   }

1037:   /* Create lens which is required for MatCreate... */
1038:   for (i=0,j=0; i<ismax; i++) { j += nrow[i]; }
1039:   PetscMalloc((1+ismax)*sizeof(PetscInt*)+ j*sizeof(PetscInt),&lens);
1040:   lens[0] = (PetscInt*)(lens + ismax);
1041:   PetscMemzero(lens[0],j*sizeof(PetscInt));
1042:   for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; }
1043: 
1044:   /* Update lens from local data */
1045:   for (i=0; i<ismax; i++) {
1046:     jmax   = nrow[i];
1047:     if (!allcolumns[i]) cmap_i = cmap[i];
1048:     irow_i = irow[i];
1049:     lens_i = lens[i];
1050:     for (j=0; j<jmax; j++) {
1051:       if (allrows[i]){
1052:         row = j;
1053:       } else {
1054:         row  = irow_i[j];
1055:       }
1056: #if defined (PETSC_USE_CTABLE)
1057:       PetscGetProc(row,size,c->rangebs,&proc);
1058: #else
1059:       proc = rtable[row];
1060: #endif
1061:       if (proc == rank) {
1062:         /* Get indices from matA and then from matB */
1063:         row    = row - rstart;
1064:         nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1065:         cworkA =  a_j + a_i[row]; cworkB = b_j + b_i[row];
1066:         if (!allcolumns[i]){
1067: #if defined (PETSC_USE_CTABLE)
1068:           for (k=0; k<nzA; k++) {
1069:             PetscTableFind(cmap_i,cstart+cworkA[k]+1,&tt);
1070:             if (tt) { lens_i[j]++; }
1071:           }
1072:           for (k=0; k<nzB; k++) {
1073:             PetscTableFind(cmap_i,bmap[cworkB[k]]+1,&tt);
1074:             if (tt) { lens_i[j]++; }
1075:           }
1076: 
1077: #else
1078:           for (k=0; k<nzA; k++) {
1079:             if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++; }
1080:           }
1081:           for (k=0; k<nzB; k++) {
1082:             if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++; }
1083:           }
1084: #endif
1085:         } else {/* allcolumns */
1086:           lens_i[j] = nzA + nzB;
1087:         }
1088:       }
1089:     }
1090:   }
1091: #if defined (PETSC_USE_CTABLE)
1092:   /* Create row map*/
1093:   PetscMalloc((1+ismax)*sizeof(PetscTable),&rmap);
1094:   for (i=0; i<ismax; i++){
1095:     PetscTableCreate(nrow[i]+1,c->Mbs+1,&rmap[i]);
1096:   }
1097: #else
1098:   /* Create row map*/
1099:   PetscMalloc((1+ismax)*sizeof(PetscInt*)+ ismax*Mbs*sizeof(PetscInt),&rmap);
1100:   rmap[0] = (PetscInt*)(rmap + ismax);
1101:   PetscMemzero(rmap[0],ismax*Mbs*sizeof(PetscInt));
1102:   for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + Mbs;}
1103: #endif
1104:   for (i=0; i<ismax; i++) {
1105:     irow_i = irow[i];
1106:     jmax   = nrow[i];
1107: #if defined (PETSC_USE_CTABLE)
1108:     rmap_i = rmap[i];
1109:     for (j=0; j<jmax; j++) {
1110:       if (allrows[i]){
1111:         PetscTableAdd(rmap_i,j+1,j+1,INSERT_VALUES);
1112:       } else {
1113:         PetscTableAdd(rmap_i,irow_i[j]+1,j+1,INSERT_VALUES);
1114:       }
1115:     }
1116: #else
1117:     rmap_i = rmap[i];
1118:     for (j=0; j<jmax; j++) {
1119:       if (allrows[i]){
1120:         rmap_i[j] = j;
1121:       } else {
1122:         rmap_i[irow_i[j]] = j;
1123:       }
1124:     }
1125: #endif
1126:   }

1128:   /* Update lens from offproc data */
1129:   {
1130:     PetscInt    *rbuf2_i,*rbuf3_i,*sbuf1_i;
1131:     PetscMPIInt ii;

1133:     for (tmp2=0; tmp2<nrqs; tmp2++) {
1134:       MPI_Waitany(nrqs,r_waits3,&ii,r_status3+tmp2);
1135:       idex   = pa[ii];
1136:       sbuf1_i = sbuf1[idex];
1137:       jmax    = sbuf1_i[0];
1138:       ct1     = 2*jmax+1;
1139:       ct2     = 0;
1140:       rbuf2_i = rbuf2[ii];
1141:       rbuf3_i = rbuf3[ii];
1142:       for (j=1; j<=jmax; j++) {
1143:         is_no   = sbuf1_i[2*j-1];
1144:         max1    = sbuf1_i[2*j];
1145:         lens_i  = lens[is_no];
1146:         if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1147:         rmap_i = rmap[is_no];
1148:         for (k=0; k<max1; k++,ct1++) {
1149: #if defined (PETSC_USE_CTABLE)
1150:           PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);
1151:           row--;
1152:           if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); }
1153: #else
1154:           row  = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1155: #endif
1156:           max2 = rbuf2_i[ct1];
1157:           for (l=0; l<max2; l++,ct2++) {
1158:             if (!allcolumns[is_no]){
1159: #if defined (PETSC_USE_CTABLE)
1160:               PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tt);
1161:               if (tt) {
1162:                 lens_i[row]++;
1163:               }
1164: #else
1165:               if (cmap_i[rbuf3_i[ct2]]) {
1166:                 lens_i[row]++;
1167:               }
1168: #endif
1169:             } else { /* allcolumns */
1170:               lens_i[row]++;
1171:             }
1172:           }
1173:         }
1174:       }
1175:     }
1176:   }
1177:   PetscFree(r_status3);
1178:   PetscFree(r_waits3);
1179:   if (nrqr) {MPI_Waitall(nrqr,s_waits3,s_status3);}
1180:   PetscFree(s_status3);
1181:   PetscFree(s_waits3);

1183:   /* Create the submatrices */
1184:   if (scall == MAT_REUSE_MATRIX) {
1185:     if (ijonly) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP," MAT_REUSE_MATRIX and ijonly is not supported yet");
1186:     /*
1187:         Assumes new rows are same length as the old rows, hence bug!
1188:     */
1189:     for (i=0; i<ismax; i++) {
1190:       mat = (Mat_SeqBAIJ *)(submats[i]->data);
1191:       if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || C->rmap->bs != bs)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
1192:       PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(PetscInt),&flag);
1193:       if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros");
1194:       /* Initial matrix as if empty */
1195:       PetscMemzero(mat->ilen,mat->mbs*sizeof(PetscInt));
1196:       submats[i]->factortype = C->factortype;
1197:     }
1198:   } else {
1199:     PetscInt bs_tmp;
1200:     if (ijonly){
1201:       bs_tmp = 1;
1202:     } else {
1203:       bs_tmp = bs;
1204:     }
1205:     for (i=0; i<ismax; i++) {
1206:       MatCreate(PETSC_COMM_SELF,submats+i);
1207:       MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,nrow[i]*bs_tmp,ncol[i]*bs_tmp);
1208:       MatSetType(submats[i],((PetscObject)A)->type_name);
1209:       MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);
1210:       MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]); /* this subroutine is used by SBAIJ routines */
1211:     }
1212:   }

1214:   /* Assemble the matrices */
1215:   /* First assemble the local rows */
1216:   {
1217:     PetscInt  ilen_row,*imat_ilen,*imat_j,*imat_i;
1218:     MatScalar *imat_a = PETSC_NULL;
1219: 
1220:     for (i=0; i<ismax; i++) {
1221:       mat       = (Mat_SeqBAIJ*)submats[i]->data;
1222:       imat_ilen = mat->ilen;
1223:       imat_j    = mat->j;
1224:       imat_i    = mat->i;
1225:       if (!ijonly) imat_a = mat->a;
1226:       if (!allcolumns[i]) cmap_i = cmap[i];
1227:       rmap_i = rmap[i];
1228:       irow_i    = irow[i];
1229:       jmax      = nrow[i];
1230:       for (j=0; j<jmax; j++) {
1231:         if (allrows[i]){
1232:           row = j;
1233:         } else {
1234:           row      = irow_i[j];
1235:         }
1236: #if defined (PETSC_USE_CTABLE)
1237:         PetscGetProc(row,size,c->rangebs,&proc);
1238: #else
1239:         proc = rtable[row];
1240: #endif
1241:         if (proc == rank) {
1242:           row      = row - rstart;
1243:           nzA      = a_i[row+1] - a_i[row];
1244:           nzB      = b_i[row+1] - b_i[row];
1245:           cworkA   = a_j + a_i[row];
1246:           cworkB   = b_j + b_i[row];
1247:           if (!ijonly){
1248:             vworkA = a_a + a_i[row]*bs2;
1249:             vworkB = b_a + b_i[row]*bs2;
1250:           }
1251: #if defined (PETSC_USE_CTABLE)
1252:           PetscTableFind(rmap_i,row+rstart+1,&row);
1253:           row--;
1254:           if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); }
1255: #else
1256:           row      = rmap_i[row + rstart];
1257: #endif
1258:           mat_i    = imat_i[row];
1259:           if (!ijonly) mat_a = imat_a + mat_i*bs2;
1260:           mat_j    = imat_j + mat_i;
1261:           ilen_row = imat_ilen[row];

1263:           /* load the column indices for this row into cols*/
1264:           if (!allcolumns[i]){
1265:             for (l=0; l<nzB; l++) {
1266:               if ((ctmp = bmap[cworkB[l]]) < cstart) {
1267: #if defined (PETSC_USE_CTABLE)
1268:                 PetscTableFind(cmap_i,ctmp+1,&tcol);
1269:                 if (tcol) {
1270: #else
1271:                 if ((tcol = cmap_i[ctmp])) {
1272: #endif
1273:                   *mat_j++ = tcol - 1;
1274:                   PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));
1275:                   mat_a   += bs2;
1276:                   ilen_row++;
1277:                 }
1278:               } else break;
1279:             }
1280:             imark = l;
1281:             for (l=0; l<nzA; l++) {
1282: #if defined (PETSC_USE_CTABLE)
1283:               PetscTableFind(cmap_i,cstart+cworkA[l]+1,&tcol);
1284:               if (tcol) {
1285: #else
1286:               if ((tcol = cmap_i[cstart + cworkA[l]])) {
1287: #endif
1288:                 *mat_j++ = tcol - 1;
1289:                 if (!ijonly){
1290:                   PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));
1291:                   mat_a += bs2;
1292:                 }
1293:                 ilen_row++;
1294:               }
1295:             }
1296:             for (l=imark; l<nzB; l++) {
1297: #if defined (PETSC_USE_CTABLE)
1298:               PetscTableFind(cmap_i,bmap[cworkB[l]]+1,&tcol);
1299:               if (tcol) {
1300: #else
1301:               if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1302: #endif
1303:                 *mat_j++ = tcol - 1;
1304:                 if (!ijonly){
1305:                   PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));
1306:                   mat_a   += bs2;
1307:                 }
1308:                 ilen_row++;
1309:               }
1310:             }
1311:           } else { /* allcolumns */
1312:             for (l=0; l<nzB; l++) {
1313:               if ((ctmp = bmap[cworkB[l]]) < cstart) {
1314:                 *mat_j++ = ctmp;
1315:                 PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));
1316:                 mat_a   += bs2;
1317:                 ilen_row++;
1318:               } else break;
1319:             }
1320:             imark = l;
1321:             for (l=0; l<nzA; l++) {
1322:               *mat_j++ = cstart+cworkA[l];
1323:               if (!ijonly){
1324:                 PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));
1325:                 mat_a += bs2;
1326:               }
1327:               ilen_row++;
1328:             }
1329:             for (l=imark; l<nzB; l++) {
1330:               *mat_j++ = bmap[cworkB[l]];
1331:               if (!ijonly){
1332:                 PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));
1333:                 mat_a   += bs2;
1334:               }
1335:               ilen_row++;
1336:             }
1337:           }
1338:           imat_ilen[row] = ilen_row;
1339:         }
1340:       }
1341:     }
1342:   }

1344:   /*   Now assemble the off proc rows*/
1345:   {
1346:     PetscInt    *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen;
1347:     PetscInt    *imat_j,*imat_i;
1348:     MatScalar   *imat_a = PETSC_NULL,*rbuf4_i = PETSC_NULL;
1349:     PetscMPIInt ii;

1351:     for (tmp2=0; tmp2<nrqs; tmp2++) {
1352:       if (ijonly){
1353:         ii = tmp2;
1354:       } else {
1355:         MPI_Waitany(nrqs,r_waits4,&ii,r_status4+tmp2);
1356:       }
1357:       idex   = pa[ii];
1358:       sbuf1_i = sbuf1[idex];
1359:       jmax    = sbuf1_i[0];
1360:       ct1     = 2*jmax + 1;
1361:       ct2     = 0;
1362:       rbuf2_i = rbuf2[ii];
1363:       rbuf3_i = rbuf3[ii];
1364:       if (!ijonly) rbuf4_i = rbuf4[ii];
1365:       for (j=1; j<=jmax; j++) {
1366:         is_no     = sbuf1_i[2*j-1];
1367:         if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1368:         rmap_i    = rmap[is_no];
1369:         mat       = (Mat_SeqBAIJ*)submats[is_no]->data;
1370:         imat_ilen = mat->ilen;
1371:         imat_j    = mat->j;
1372:         imat_i    = mat->i;
1373:         if (!ijonly) imat_a = mat->a;
1374:         max1      = sbuf1_i[2*j];
1375:         for (k=0; k<max1; k++,ct1++) {
1376:           row   = sbuf1_i[ct1];
1377: #if defined (PETSC_USE_CTABLE)
1378:           PetscTableFind(rmap_i,row+1,&row);
1379:           row--;
1380:           if(row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); }
1381: #else
1382:           row   = rmap_i[row];
1383: #endif
1384:           ilen  = imat_ilen[row];
1385:           mat_i = imat_i[row];
1386:           if (!ijonly) mat_a = imat_a + mat_i*bs2;
1387:           mat_j = imat_j + mat_i;
1388:           max2 = rbuf2_i[ct1];

1390:           if (!allcolumns[is_no]){
1391:             for (l=0; l<max2; l++,ct2++) {
1392: #if defined (PETSC_USE_CTABLE)
1393:               PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);
1394:               if (tcol) {
1395: #else
1396:               if ((tcol = cmap_i[rbuf3_i[ct2]])) {
1397: #endif
1398:                 *mat_j++    = tcol - 1;
1399:                 if (!ijonly){
1400:                   PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));
1401:                   mat_a += bs2;
1402:                 }
1403:                 ilen++;
1404:               }
1405:             }
1406:           } else { /* allcolumns */
1407:             for (l=0; l<max2; l++,ct2++) {
1408:               *mat_j++    = rbuf3_i[ct2];
1409:               if (!ijonly){
1410:                 PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));
1411:                 mat_a += bs2;
1412:               }
1413:               ilen++;
1414:             }
1415:           }
1416:           imat_ilen[row] = ilen;
1417:         }
1418:       }
1419:     }
1420:   }
1421:   if (!ijonly){
1422:     PetscFree(r_status4);
1423:     PetscFree(r_waits4);
1424:     if (nrqr) {MPI_Waitall(nrqr,s_waits4,s_status4);}
1425:     PetscFree(s_waits4);
1426:     PetscFree(s_status4);
1427:   }

1429:   /* Restore the indices */
1430:   for (i=0; i<ismax; i++) {
1431:     if (!allrows[i]){
1432:       ISRestoreIndices(isrow[i],irow+i);
1433:     }
1434:     if (!allcolumns[i]){
1435:       ISRestoreIndices(iscol[i],icol+i);
1436:     }
1437:   }

1439:   /* Destroy allocated memory */
1440: #if defined(PETSC_USE_CTABLE)
1441:   PetscFree4(irow,icol,nrow,ncol);
1442: #else
1443:   PetscFree5(irow,icol,nrow,ncol,rtable);
1444: #endif
1445:   PetscFree4(w1,w2,w3,w4);
1446:   PetscFree(pa);

1448:   PetscFree4(sbuf1,ptr,tmp,ctr);
1449:   PetscFree(sbuf1);
1450:   PetscFree(rbuf2);
1451:   for (i=0; i<nrqr; ++i) {
1452:     PetscFree(sbuf2[i]);
1453:   }
1454:   for (i=0; i<nrqs; ++i) {
1455:     PetscFree(rbuf3[i]);
1456:   }
1457:   PetscFree3(sbuf2,req_size,req_source);
1458:   PetscFree(rbuf3);
1459:   PetscFree(sbuf_aj[0]);
1460:   PetscFree(sbuf_aj);
1461:   if (!ijonly) {
1462:     for (i=0; i<nrqs; ++i) {PetscFree(rbuf4[i]);}
1463:     PetscFree(rbuf4);
1464:     PetscFree(sbuf_aa[0]);
1465:     PetscFree(sbuf_aa);
1466:   }

1468: #if defined (PETSC_USE_CTABLE)
1469:   for (i=0; i<ismax; i++) {
1470:     PetscTableDestroy((PetscTable*)&rmap[i]);
1471:   }
1472: #endif
1473:   PetscFree(rmap);

1475:   for (i=0; i<ismax; i++){
1476:     if (!allcolumns[i]){
1477: #if defined (PETSC_USE_CTABLE)
1478:       PetscTableDestroy((PetscTable*)&cmap[i]);
1479: #else
1480:       PetscFree(cmap[i]);
1481: #endif
1482:     }
1483:   }
1484:   PetscFree(cmap);
1485:   PetscFree(lens);

1487:   for (i=0; i<ismax; i++) {
1488:     MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
1489:     MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
1490:   }

1492:   c->ijonly = PETSC_FALSE; /* set back to the default */
1493:   return(0);
1494: }