Actual source code: isltog.c

petsc-3.3-p7 2013-05-11
  2: #include <petscvec.h>   /*I "petscvec.h" I*/
  3: #include <petsc-private/isimpl.h>    /*I "petscis.h"  I*/

  5: PetscClassId  IS_LTOGM_CLASSID;

  9: /*@C
 10:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 12:     Not Collective

 14:     Input Parameter:
 15: .   ltog - local to global mapping

 17:     Output Parameter:
 18: .   n - the number of entries in the local mapping

 20:     Level: advanced

 22:     Concepts: mapping^local to global

 24: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 25: @*/
 26: PetscErrorCode  ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
 27: {
 31:   *n = mapping->n;
 32:   return(0);
 33: }

 37: /*@C
 38:     ISLocalToGlobalMappingView - View a local to global mapping

 40:     Not Collective

 42:     Input Parameters:
 43: +   ltog - local to global mapping
 44: -   viewer - viewer

 46:     Level: advanced

 48:     Concepts: mapping^local to global

 50: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 51: @*/
 52: PetscErrorCode  ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 53: {
 54:   PetscInt        i;
 55:   PetscMPIInt     rank;
 56:   PetscBool       iascii;
 57:   PetscErrorCode  ierr;

 61:   if (!viewer) {
 62:     PetscViewerASCIIGetStdout(((PetscObject)mapping)->comm,&viewer);
 63:   }

 66:   MPI_Comm_rank(((PetscObject)mapping)->comm,&rank);
 67:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
 68:   if (iascii) {
 69:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
 70:     for (i=0; i<mapping->n; i++) {
 71:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);
 72:     }
 73:     PetscViewerFlush(viewer);
 74:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
 75:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 76:   return(0);
 77: }

 81: /*@
 82:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
 83:     ordering and a global parallel ordering.

 85:     Not collective

 87:     Input Parameter:
 88: .   is - index set containing the global numbers for each local number

 90:     Output Parameter:
 91: .   mapping - new mapping data structure

 93:     Level: advanced

 95:     Concepts: mapping^local to global

 97: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 98: @*/
 99: PetscErrorCode  ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
100: {
102:   PetscInt       n;
103:   const PetscInt *indices;
104:   MPI_Comm       comm;


110:   PetscObjectGetComm((PetscObject)is,&comm);
111:   ISGetLocalSize(is,&n);
112:   ISGetIndices(is,&indices);
113:   ISLocalToGlobalMappingCreate(comm,n,indices,PETSC_COPY_VALUES,mapping);
114:   ISRestoreIndices(is,&indices);
115:   return(0);
116: }

120: /*@C
121:     ISLocalToGlobalMappingCreateSF - Creates a mapping between a local (0 to n)
122:     ordering and a global parallel ordering.

124:     Collective

126:     Input Parameter:
127: +   sf - star forest mapping contiguous local indices to (rank, offset)
128: -   start - first global index on this process

130:     Output Parameter:
131: .   mapping - new mapping data structure

133:     Level: advanced

135:     Concepts: mapping^local to global

137: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
138: @*/
139: PetscErrorCode ISLocalToGlobalMappingCreateSF(PetscSF sf,PetscInt start,ISLocalToGlobalMapping *mapping)
140: {
142:   PetscInt       i,maxlocal,nroots,nleaves,*globals,*ltog;
143:   const PetscInt *ilocal;
144:   MPI_Comm       comm;


150:   PetscObjectGetComm((PetscObject)sf,&comm);
151:   PetscSFGetGraph(sf,&nroots,&nleaves,&ilocal,PETSC_NULL);
152:   if (ilocal) {for (i=0,maxlocal=0; i<nleaves; i++) maxlocal = PetscMax(maxlocal,ilocal[i]+1);}
153:   else maxlocal = nleaves;
154:   PetscMalloc(nroots*sizeof(PetscInt),&globals);
155:   PetscMalloc(maxlocal*sizeof(PetscInt),&ltog);
156:   for (i=0; i<nroots; i++) globals[i] = start + i;
157:   for (i=0; i<maxlocal; i++) ltog[i] = -1;
158:   PetscSFBcastBegin(sf,MPIU_INT,globals,ltog);
159:   PetscSFBcastEnd(sf,MPIU_INT,globals,ltog);
160:   ISLocalToGlobalMappingCreate(comm,maxlocal,ltog,PETSC_OWN_POINTER,mapping);
161:   PetscFree(globals);
162:   return(0);
163: }

167: /*@
168:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
169:     ordering and a global parallel ordering.

171:     Not Collective, but communicator may have more than one process

173:     Input Parameters:
174: +   comm - MPI communicator
175: .   n - the number of local elements
176: .   indices - the global index for each local element
177: -   mode - see PetscCopyMode

179:     Output Parameter:
180: .   mapping - new mapping data structure

182:     Level: advanced

184:     Concepts: mapping^local to global

186: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
187: @*/
188: PetscErrorCode  ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],PetscCopyMode mode,ISLocalToGlobalMapping *mapping)
189: {
191:   PetscInt       *in;


197:   *mapping = PETSC_NULL;
198: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
199:   ISInitializePackage(PETSC_NULL);
200: #endif

202:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_CLASSID,0,"ISLocalToGlobalMapping","Local to global mapping","IS",
203:                            cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
204:   (*mapping)->n       = n;
205:   /*
206:     Do not create the global to local mapping. This is only created if 
207:     ISGlobalToLocalMapping() is called 
208:   */
209:   (*mapping)->globals = 0;
210:   if (mode == PETSC_COPY_VALUES) {
211:     PetscMalloc(n*sizeof(PetscInt),&in);
212:     PetscMemcpy(in,indices,n*sizeof(PetscInt));
213:     PetscLogObjectMemory(*mapping,n*sizeof(PetscInt));
214:     (*mapping)->indices = in;
215:   } else if (mode == PETSC_OWN_POINTER) {
216:     (*mapping)->indices = (PetscInt*)indices;
217:   } else SETERRQ(cm,PETSC_ERR_SUP,"Cannot currently use PETSC_USE_POINTER");
218:   return(0);
219: }

223: /*@
224:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an 
225:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
226:        and VecSetLocalToGlobalMappingBlock().

228:     Not Collective, but communicator may have more than one process

230:     Input Parameters:
231: +    inmap - original point-wise mapping
232: -    bs - block size

234:     Output Parameter:
235: .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.

237:     Level: advanced

239:     Concepts: mapping^local to global

241: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
242: @*/
243: PetscErrorCode  ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
244: {
246:   PetscInt       *ii,i,n;

251:   if (bs > 1) {
252:     n    = inmap->n/bs;
253:     if (n*bs != inmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
254:     PetscMalloc(n*sizeof(PetscInt),&ii);
255:     for (i=0; i<n; i++) {
256:       ii[i] = inmap->indices[bs*i]/bs;
257:     }
258:     ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);
259:   } else {
260:     PetscObjectReference((PetscObject)inmap);
261:     *outmap = inmap;
262:   }
263:   return(0);
264: }

268: /*@
269:     ISLocalToGlobalMappingUnBlock - Creates a scalar index version of a blocked
270:        ISLocalToGlobalMapping

272:     Not Collective, but communicator may have more than one process

274:     Input Parameter:
275: + inmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
276: - bs - block size

278:     Output Parameter:
279: .   outmap - pointwise mapping

281:     Level: advanced

283:     Concepts: mapping^local to global

285: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingBlock()
286: @*/
287: PetscErrorCode  ISLocalToGlobalMappingUnBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
288: {
290:   PetscInt       *ii,i,n;

295:   if (bs > 1) {
296:     n    = inmap->n*bs;
297:     PetscMalloc(n*sizeof(PetscInt),&ii);
298:     for (i=0; i<n; i++) {
299:       ii[i] = inmap->indices[i/bs]*bs + (i%bs);
300:     }
301:     ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);
302:   } else {
303:     PetscObjectReference((PetscObject)inmap);
304:     *outmap = inmap;
305:   }
306:   return(0);
307: }

311: /*@
312:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
313:    ordering and a global parallel ordering.

315:    Note Collective

317:    Input Parameters:
318: .  mapping - mapping data structure

320:    Level: advanced

322: .seealso: ISLocalToGlobalMappingCreate()
323: @*/
324: PetscErrorCode  ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping *mapping)
325: {
328:   if (!*mapping) return(0);
330:   if (--((PetscObject)(*mapping))->refct > 0) {*mapping = 0;return(0);}
331:   PetscFree((*mapping)->indices);
332:   PetscFree((*mapping)->globals);
333:   PetscHeaderDestroy(mapping);
334:   *mapping = 0;
335:   return(0);
336: }
337: 
340: /*@
341:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
342:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
343:     context.

345:     Not collective

347:     Input Parameters:
348: +   mapping - mapping between local and global numbering
349: -   is - index set in local numbering

351:     Output Parameters:
352: .   newis - index set in global numbering

354:     Level: advanced

356:     Concepts: mapping^local to global

358: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
359:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
360: @*/
361: PetscErrorCode  ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
362: {
364:   PetscInt       n,i,*idxmap,*idxout,Nmax = mapping->n;
365:   const PetscInt *idxin;


372:   ISGetLocalSize(is,&n);
373:   ISGetIndices(is,&idxin);
374:   idxmap = mapping->indices;
375: 
376:   PetscMalloc(n*sizeof(PetscInt),&idxout);
377:   for (i=0; i<n; i++) {
378:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
379:     idxout[i] = idxmap[idxin[i]];
380:   }
381:   ISRestoreIndices(is,&idxin);
382:   ISCreateGeneral(PETSC_COMM_SELF,n,idxout,PETSC_OWN_POINTER,newis);
383:   return(0);
384: }

386: /*MC
387:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
388:    and converts them to the global numbering.

390:    Synopsis:
391:    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])

393:    Not collective

395:    Input Parameters:
396: +  mapping - the local to global mapping context
397: .  N - number of integers
398: -  in - input indices in local numbering

400:    Output Parameter:
401: .  out - indices in global numbering

403:    Notes: 
404:    The in and out array parameters may be identical.

406:    Level: advanced

408: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 
409:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
410:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

412:     Concepts: mapping^local to global

414: M*/

416: /* -----------------------------------------------------------------------------------------*/

420: /*
421:     Creates the global fields in the ISLocalToGlobalMapping structure
422: */
423: static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
424: {
426:   PetscInt       i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

429:   end   = 0;
430:   start = PETSC_MAX_INT;

432:   for (i=0; i<n; i++) {
433:     if (idx[i] < 0) continue;
434:     if (idx[i] < start) start = idx[i];
435:     if (idx[i] > end)   end   = idx[i];
436:   }
437:   if (start > end) {start = 0; end = -1;}
438:   mapping->globalstart = start;
439:   mapping->globalend   = end;

441:   PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);
442:   mapping->globals = globals;
443:   for (i=0; i<end-start+1; i++) {
444:     globals[i] = -1;
445:   }
446:   for (i=0; i<n; i++) {
447:     if (idx[i] < 0) continue;
448:     globals[idx[i] - start] = i;
449:   }

451:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));
452:   return(0);
453: }

457: /*@
458:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
459:     specified with a global numbering.

461:     Not collective

463:     Input Parameters:
464: +   mapping - mapping between local and global numbering
465: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
466:            IS_GTOLM_DROP - drops the indices with no local value from the output list
467: .   n - number of global indices to map
468: -   idx - global indices to map

470:     Output Parameters:
471: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
472: -   idxout - local index of each global index, one must pass in an array long enough 
473:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with 
474:              idxout == PETSC_NULL to determine the required length (returned in nout)
475:              and then allocate the required space and call ISGlobalToLocalMappingApply()
476:              a second time to set the values.

478:     Notes:
479:     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.

481:     This is not scalable in memory usage. Each processor requires O(Nglobal) size 
482:     array to compute these.

484:     Level: advanced

486:     Developer Note: The manual page states that idx and idxout may be identical but the calling
487:        sequence declares idx as const so it cannot be the same as idxout. 

489:     Concepts: mapping^global to local

491: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
492:           ISLocalToGlobalMappingDestroy()
493: @*/
494: PetscErrorCode  ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
495:                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
496: {
497:   PetscInt       i,*globals,nf = 0,tmp,start,end;

502:   if (!mapping->globals) {
503:     ISGlobalToLocalMappingSetUp_Private(mapping);
504:   }
505:   globals = mapping->globals;
506:   start   = mapping->globalstart;
507:   end     = mapping->globalend;

509:   if (type == IS_GTOLM_MASK) {
510:     if (idxout) {
511:       for (i=0; i<n; i++) {
512:         if (idx[i] < 0) idxout[i] = idx[i];
513:         else if (idx[i] < start) idxout[i] = -1;
514:         else if (idx[i] > end)   idxout[i] = -1;
515:         else                     idxout[i] = globals[idx[i] - start];
516:       }
517:     }
518:     if (nout) *nout = n;
519:   } else {
520:     if (idxout) {
521:       for (i=0; i<n; i++) {
522:         if (idx[i] < 0) continue;
523:         if (idx[i] < start) continue;
524:         if (idx[i] > end) continue;
525:         tmp = globals[idx[i] - start];
526:         if (tmp < 0) continue;
527:         idxout[nf++] = tmp;
528:       }
529:     } else {
530:       for (i=0; i<n; i++) {
531:         if (idx[i] < 0) continue;
532:         if (idx[i] < start) continue;
533:         if (idx[i] > end) continue;
534:         tmp = globals[idx[i] - start];
535:         if (tmp < 0) continue;
536:         nf++;
537:       }
538:     }
539:     if (nout) *nout = nf;
540:   }

542:   return(0);
543: }

547: /*@C
548:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
549:      each index shared by more than one processor 

551:     Collective on ISLocalToGlobalMapping

553:     Input Parameters:
554: .   mapping - the mapping from local to global indexing

556:     Output Parameter:
557: +   nproc - number of processors that are connected to this one
558: .   proc - neighboring processors
559: .   numproc - number of indices for each subdomain (processor)
560: -   indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering)

562:     Level: advanced

564:     Concepts: mapping^local to global

566:     Fortran Usage: 
567: $        ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by 
568: $        ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
569:           PetscInt indices[nproc][numprocmax],ierr)
570:         There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and 
571:         indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.


574: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
575:           ISLocalToGlobalMappingRestoreInfo()
576: @*/
577: PetscErrorCode  ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
578: {
580:   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
581:   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
582:   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
583:   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
584:   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
585:   PetscInt       first_procs,first_numprocs,*first_indices;
586:   MPI_Request    *recv_waits,*send_waits;
587:   MPI_Status     recv_status,*send_status,*recv_statuses;
588:   MPI_Comm       comm = ((PetscObject)mapping)->comm;
589:   PetscBool      debug = PETSC_FALSE;

593:   MPI_Comm_size(comm,&size);
594:   MPI_Comm_rank(comm,&rank);
595:   if (size == 1) {
596:     *nproc         = 0;
597:     *procs         = PETSC_NULL;
598:     PetscMalloc(sizeof(PetscInt),numprocs);
599:     (*numprocs)[0] = 0;
600:     PetscMalloc(sizeof(PetscInt*),indices);
601:     (*indices)[0]  = PETSC_NULL;
602:     return(0);
603:   }

605:   PetscOptionsGetBool(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,PETSC_NULL);

607:   /*
608:     Notes on ISLocalToGlobalMappingGetInfo

610:     globally owned node - the nodes that have been assigned to this processor in global
611:            numbering, just for this routine.

613:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
614:            boundary (i.e. is has more than one local owner)

616:     locally owned node - node that exists on this processors subdomain

618:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
619:            local subdomain
620:   */
621:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
622:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
623:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

625:   for (i=0; i<n; i++) {
626:     if (lindices[i] > max) max = lindices[i];
627:   }
628:   MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);
629:   Ng++;
630:   MPI_Comm_size(comm,&size);
631:   MPI_Comm_rank(comm,&rank);
632:   scale  = Ng/size + 1;
633:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
634:   rstart = scale*rank;

636:   /* determine ownership ranges of global indices */
637:   PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
638:   PetscMemzero(nprocs,2*size*sizeof(PetscInt));

640:   /* determine owners of each local node  */
641:   PetscMalloc(n*sizeof(PetscInt),&owner);
642:   for (i=0; i<n; i++) {
643:     proc             = lindices[i]/scale; /* processor that globally owns this index */
644:     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
645:     owner[i]         = proc;
646:     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
647:   }
648:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
649:   PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);

651:   /* inform other processors of number of messages and max length*/
652:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
653:   PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);

655:   /* post receives for owned rows */
656:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);
657:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
658:   for (i=0; i<nrecvs; i++) {
659:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
660:   }

662:   /* pack messages containing lists of local nodes to owners */
663:   PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);
664:   PetscMalloc((size+1)*sizeof(PetscInt),&starts);
665:   starts[0]  = 0;
666:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
667:   for (i=0; i<n; i++) {
668:     sends[starts[owner[i]]++] = lindices[i];
669:     sends[starts[owner[i]]++] = i;
670:   }
671:   PetscFree(owner);
672:   starts[0]  = 0;
673:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}

675:   /* send the messages */
676:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
677:   PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);
678:   cnt = 0;
679:   for (i=0; i<size; i++) {
680:     if (nprocs[2*i]) {
681:       MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);
682:       dest[cnt] = i;
683:       cnt++;
684:     }
685:   }
686:   PetscFree(starts);

688:   /* wait on receives */
689:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);
690:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);
691:   cnt  = nrecvs;
692:   PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);
693:   PetscMemzero(nownedsenders,ng*sizeof(PetscInt));
694:   while (cnt) {
695:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
696:     /* unpack receives into our local space */
697:     MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);
698:     source[imdex]  = recv_status.MPI_SOURCE;
699:     len[imdex]     = len[imdex]/2;
700:     /* count how many local owners for each of my global owned indices */
701:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
702:     cnt--;
703:   }
704:   PetscFree(recv_waits);

706:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
707:   nowned  = 0;
708:   nownedm = 0;
709:   for (i=0; i<ng; i++) {
710:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
711:   }

713:   /* create single array to contain rank of all local owners of each globally owned index */
714:   PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);
715:   PetscMalloc((ng+1)*sizeof(PetscInt),&starts);
716:   starts[0] = 0;
717:   for (i=1; i<ng; i++) {
718:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
719:     else starts[i] = starts[i-1];
720:   }

722:   /* for each nontrival globally owned node list all arriving processors */
723:   for (i=0; i<nrecvs; i++) {
724:     for (j=0; j<len[i]; j++) {
725:       node = recvs[2*i*nmax+2*j]-rstart;
726:       if (nownedsenders[node] > 1) {
727:         ownedsenders[starts[node]++] = source[i];
728:       }
729:     }
730:   }

732:   if (debug) { /* -----------------------------------  */
733:     starts[0]    = 0;
734:     for (i=1; i<ng; i++) {
735:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
736:       else starts[i] = starts[i-1];
737:     }
738:     for (i=0; i<ng; i++) {
739:       if (nownedsenders[i] > 1) {
740:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
741:         for (j=0; j<nownedsenders[i]; j++) {
742:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
743:         }
744:         PetscSynchronizedPrintf(comm,"\n");
745:       }
746:     }
747:     PetscSynchronizedFlush(comm);
748:   }/* -----------------------------------  */

750:   /* wait on original sends */
751:   if (nsends) {
752:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
753:     MPI_Waitall(nsends,send_waits,send_status);
754:     PetscFree(send_status);
755:   }
756:   PetscFree(send_waits);
757:   PetscFree(sends);
758:   PetscFree(nprocs);

760:   /* pack messages to send back to local owners */
761:   starts[0]    = 0;
762:   for (i=1; i<ng; i++) {
763:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
764:     else starts[i] = starts[i-1];
765:   }
766:   nsends2 = nrecvs;
767:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs); /* length of each message */
768:   for (i=0; i<nrecvs; i++) {
769:     nprocs[i] = 1;
770:     for (j=0; j<len[i]; j++) {
771:       node = recvs[2*i*nmax+2*j]-rstart;
772:       if (nownedsenders[node] > 1) {
773:         nprocs[i] += 2 + nownedsenders[node];
774:       }
775:     }
776:   }
777:   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
778:   PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);
779:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);
780:   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
781:   /*
782:      Each message is 1 + nprocs[i] long, and consists of 
783:        (0) the number of nodes being sent back 
784:        (1) the local node number,
785:        (2) the number of processors sharing it,
786:        (3) the processors sharing it
787:   */
788:   for (i=0; i<nsends2; i++) {
789:     cnt = 1;
790:     sends2[starts2[i]] = 0;
791:     for (j=0; j<len[i]; j++) {
792:       node = recvs[2*i*nmax+2*j]-rstart;
793:       if (nownedsenders[node] > 1) {
794:         sends2[starts2[i]]++;
795:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
796:         sends2[starts2[i]+cnt++] = nownedsenders[node];
797:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));
798:         cnt += nownedsenders[node];
799:       }
800:     }
801:   }

803:   /* receive the message lengths */
804:   nrecvs2 = nsends;
805:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);
806:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);
807:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
808:   for (i=0; i<nrecvs2; i++) {
809:     MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);
810:   }

812:   /* send the message lengths */
813:   for (i=0; i<nsends2; i++) {
814:     MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);
815:   }

817:   /* wait on receives of lens */
818:   if (nrecvs2) {
819:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
820:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
821:     PetscFree(recv_statuses);
822:   }
823:   PetscFree(recv_waits);

825:   starts3[0] = 0;
826:   nt         = 0;
827:   for (i=0; i<nrecvs2-1; i++) {
828:     starts3[i+1] = starts3[i] + lens2[i];
829:     nt          += lens2[i];
830:   }
831:   nt += lens2[nrecvs2-1];

833:   PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);
834:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
835:   for (i=0; i<nrecvs2; i++) {
836:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);
837:   }
838: 
839:   /* send the messages */
840:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
841:   for (i=0; i<nsends2; i++) {
842:     MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);
843:   }

845:   /* wait on receives */
846:   if (nrecvs2) {
847:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
848:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
849:     PetscFree(recv_statuses);
850:   }
851:   PetscFree(recv_waits);
852:   PetscFree(nprocs);

854:   if (debug) { /* -----------------------------------  */
855:     cnt = 0;
856:     for (i=0; i<nrecvs2; i++) {
857:       nt = recvs2[cnt++];
858:       for (j=0; j<nt; j++) {
859:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
860:         for (k=0; k<recvs2[cnt+1]; k++) {
861:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
862:         }
863:         cnt += 2 + recvs2[cnt+1];
864:         PetscSynchronizedPrintf(comm,"\n");
865:       }
866:     }
867:     PetscSynchronizedFlush(comm);
868:   } /* -----------------------------------  */

870:   /* count number subdomains for each local node */
871:   PetscMalloc(size*sizeof(PetscInt),&nprocs);
872:   PetscMemzero(nprocs,size*sizeof(PetscInt));
873:   cnt  = 0;
874:   for (i=0; i<nrecvs2; i++) {
875:     nt = recvs2[cnt++];
876:     for (j=0; j<nt; j++) {
877:       for (k=0; k<recvs2[cnt+1]; k++) {
878:         nprocs[recvs2[cnt+2+k]]++;
879:       }
880:       cnt += 2 + recvs2[cnt+1];
881:     }
882:   }
883:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
884:   *nproc    = nt;
885:   PetscMalloc((nt+1)*sizeof(PetscInt),procs);
886:   PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);
887:   PetscMalloc((nt+1)*sizeof(PetscInt*),indices);
888:   PetscMalloc(size*sizeof(PetscInt),&bprocs);
889:   cnt       = 0;
890:   for (i=0; i<size; i++) {
891:     if (nprocs[i] > 0) {
892:       bprocs[i]        = cnt;
893:       (*procs)[cnt]    = i;
894:       (*numprocs)[cnt] = nprocs[i];
895:       PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);
896:       cnt++;
897:     }
898:   }

900:   /* make the list of subdomains for each nontrivial local node */
901:   PetscMemzero(*numprocs,nt*sizeof(PetscInt));
902:   cnt  = 0;
903:   for (i=0; i<nrecvs2; i++) {
904:     nt = recvs2[cnt++];
905:     for (j=0; j<nt; j++) {
906:       for (k=0; k<recvs2[cnt+1]; k++) {
907:         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
908:       }
909:       cnt += 2 + recvs2[cnt+1];
910:     }
911:   }
912:   PetscFree(bprocs);
913:   PetscFree(recvs2);

915:   /* sort the node indexing by their global numbers */
916:   nt = *nproc;
917:   for (i=0; i<nt; i++) {
918:     PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);
919:     for (j=0; j<(*numprocs)[i]; j++) {
920:       tmp[j] = lindices[(*indices)[i][j]];
921:     }
922:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
923:     PetscFree(tmp);
924:   }

926:   if (debug) { /* -----------------------------------  */
927:     nt = *nproc;
928:     for (i=0; i<nt; i++) {
929:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
930:       for (j=0; j<(*numprocs)[i]; j++) {
931:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
932:       }
933:       PetscSynchronizedPrintf(comm,"\n");
934:     }
935:     PetscSynchronizedFlush(comm);
936:   } /* -----------------------------------  */

938:   /* wait on sends */
939:   if (nsends2) {
940:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
941:     MPI_Waitall(nsends2,send_waits,send_status);
942:     PetscFree(send_status);
943:   }

945:   PetscFree(starts3);
946:   PetscFree(dest);
947:   PetscFree(send_waits);

949:   PetscFree(nownedsenders);
950:   PetscFree(ownedsenders);
951:   PetscFree(starts);
952:   PetscFree(starts2);
953:   PetscFree(lens2);

955:   PetscFree(source);
956:   PetscFree(len);
957:   PetscFree(recvs);
958:   PetscFree(nprocs);
959:   PetscFree(sends2);

961:   /* put the information about myself as the first entry in the list */
962:   first_procs    = (*procs)[0];
963:   first_numprocs = (*numprocs)[0];
964:   first_indices  = (*indices)[0];
965:   for (i=0; i<*nproc; i++) {
966:     if ((*procs)[i] == rank) {
967:       (*procs)[0]    = (*procs)[i];
968:       (*numprocs)[0] = (*numprocs)[i];
969:       (*indices)[0]  = (*indices)[i];
970:       (*procs)[i]    = first_procs;
971:       (*numprocs)[i] = first_numprocs;
972:       (*indices)[i]  = first_indices;
973:       break;
974:     }
975:   }
976:   return(0);
977: }

981: /*@C
982:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

984:     Collective on ISLocalToGlobalMapping

986:     Input Parameters:
987: .   mapping - the mapping from local to global indexing

989:     Output Parameter:
990: +   nproc - number of processors that are connected to this one
991: .   proc - neighboring processors
992: .   numproc - number of indices for each processor
993: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

995:     Level: advanced

997: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
998:           ISLocalToGlobalMappingGetInfo()
999: @*/
1000: PetscErrorCode  ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
1001: {
1003:   PetscInt       i;

1006:   PetscFree(*procs);
1007:   PetscFree(*numprocs);
1008:   if (*indices) {
1009:     PetscFree((*indices)[0]);
1010:     for (i=1; i<*nproc; i++) {
1011:       PetscFree((*indices)[i]);
1012:     }
1013:     PetscFree(*indices);
1014:   }
1015:   return(0);
1016: }

1020: /*@C
1021:    ISLocalToGlobalMappingGetIndices - Get global indices for every local point

1023:    Not Collective

1025:    Input Arguments:
1026: . ltog - local to global mapping

1028:    Output Arguments:
1029: . array - array of indices

1031:    Level: advanced

1033: .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingRestoreIndices()
1034: @*/
1035: PetscErrorCode  ISLocalToGlobalMappingGetIndices(ISLocalToGlobalMapping ltog,const PetscInt **array)
1036: {

1041:   *array = ltog->indices;
1042:   return(0);
1043: }

1047: /*@C
1048:    ISLocalToGlobalMappingRestoreIndices - Restore indices obtained with ISLocalToGlobalMappingRestoreIndices()

1050:    Not Collective

1052:    Input Arguments:
1053: + ltog - local to global mapping
1054: - array - array of indices

1056:    Level: advanced

1058: .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingGetIndices()
1059: @*/
1060: PetscErrorCode  ISLocalToGlobalMappingRestoreIndices(ISLocalToGlobalMapping ltog,const PetscInt **array)
1061: {

1066:   if (*array != ltog->indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Trying to return mismatched pointer");
1067:   *array = PETSC_NULL;
1068:   return(0);
1069: }

1073: /*@C
1074:    ISLocalToGlobalMappingConcatenate - Create a new mapping that concatenates a list of mappings

1076:    Not Collective

1078:    Input Arguments:
1079: + comm - communicator for the new mapping, must contain the communicator of every mapping to concatenate
1080: . n - number of mappings to concatenate
1081: - ltogs - local to global mappings

1083:    Output Arguments:
1084: . ltogcat - new mapping

1086:    Level: advanced

1088: .seealso: ISLocalToGlobalMappingCreate()
1089: @*/
1090: PetscErrorCode ISLocalToGlobalMappingConcatenate(MPI_Comm comm,PetscInt n,const ISLocalToGlobalMapping ltogs[],ISLocalToGlobalMapping *ltogcat)
1091: {
1092:   PetscInt       i,cnt,m,*idx;

1096:   if (n < 0) SETERRQ1(comm,PETSC_ERR_ARG_OUTOFRANGE,"Must have a non-negative number of mappings, given %D",n);
1100:   for (cnt=0,i=0; i<n; i++) {
1101:     ISLocalToGlobalMappingGetSize(ltogs[i],&m);
1102:     cnt += m;
1103:   }
1104:   PetscMalloc(cnt*sizeof(PetscInt),&idx);
1105:   for (cnt=0,i=0; i<n; i++) {
1106:     const PetscInt *subidx;
1107:     ISLocalToGlobalMappingGetSize(ltogs[i],&m);
1108:     ISLocalToGlobalMappingGetIndices(ltogs[i],&subidx);
1109:     PetscMemcpy(&idx[cnt],subidx,m*sizeof(PetscInt));
1110:     ISLocalToGlobalMappingRestoreIndices(ltogs[i],&subidx);
1111:     cnt += m;
1112:   }
1113:   ISLocalToGlobalMappingCreate(comm,cnt,idx,PETSC_OWN_POINTER,ltogcat);
1114:   return(0);
1115: }