Actual source code: iscoloring.c

petsc-3.7.1 2016-05-15
Report Typos and Errors
  2: #include <petsc/private/isimpl.h>    /*I "petscis.h"  I*/
  3: #include <petscviewer.h>
  4: #include <petscsf.h>

  6: const char *const ISColoringTypes[] = {"global","ghosted","ISColoringType","IS_COLORING_",0};

 10: PetscErrorCode ISColoringReference(ISColoring coloring)
 11: {
 13:   (coloring)->refct++;
 14:   return(0);
 15: }

 19: PetscErrorCode ISColoringSetType(ISColoring coloring,ISColoringType type)
 20: {
 22:   (coloring)->ctype = type;
 23:   return(0);
 24: }

 28: /*@
 29:    ISColoringDestroy - Destroys a coloring context.

 31:    Collective on ISColoring

 33:    Input Parameter:
 34: .  iscoloring - the coloring context

 36:    Level: advanced

 38: .seealso: ISColoringView(), MatColoring
 39: @*/
 40: PetscErrorCode  ISColoringDestroy(ISColoring *iscoloring)
 41: {
 42:   PetscInt       i;

 46:   if (!*iscoloring) return(0);
 48:   if (--(*iscoloring)->refct > 0) {*iscoloring = 0; return(0);}

 50:   if ((*iscoloring)->is) {
 51:     for (i=0; i<(*iscoloring)->n; i++) {
 52:       ISDestroy(&(*iscoloring)->is[i]);
 53:     }
 54:     PetscFree((*iscoloring)->is);
 55:   }
 56:   if ((*iscoloring)->allocated) {PetscFree((*iscoloring)->colors);}
 57:   PetscCommDestroy(&(*iscoloring)->comm);
 58:   PetscFree((*iscoloring));
 59:   return(0);
 60: }

 64: /*
 65:   ISColoringViewFromOptions - Processes command line options to determine if/how an ISColoring object is to be viewed. 

 67:   Collective on ISColoring

 69:   Input Parameters:
 70: + obj   - the ISColoring object
 71: . prefix - prefix to use for viewing, or NULL to use prefix of 'mat'
 72: - optionname - option to activate viewing

 74:   Level: intermediate

 76:   Developer Note: This cannot use PetscObjectViewFromOptions() because ISColoring is not a PetscObject

 78: */
 79: PetscErrorCode ISColoringViewFromOptions(ISColoring obj,PetscObject bobj,const char optionname[])
 80: {
 81:   PetscErrorCode    ierr;
 82:   PetscViewer       viewer;
 83:   PetscBool         flg;
 84:   PetscViewerFormat format;
 85:   char              *prefix;

 88:   prefix = bobj ? bobj->prefix : NULL;
 89:   PetscOptionsGetViewer(obj->comm,prefix,optionname,&viewer,&format,&flg);
 90:   if (flg) {
 91:     PetscViewerPushFormat(viewer,format);
 92:     ISColoringView(obj,viewer);
 93:     PetscViewerPopFormat(viewer);
 94:     PetscViewerDestroy(&viewer);
 95:   }
 96:   return(0);
 97: }

101: /*@C
102:    ISColoringView - Views a coloring context.

104:    Collective on ISColoring

106:    Input Parameters:
107: +  iscoloring - the coloring context
108: -  viewer - the viewer

110:    Level: advanced

112: .seealso: ISColoringDestroy(), ISColoringGetIS(), MatColoring
113: @*/
114: PetscErrorCode  ISColoringView(ISColoring iscoloring,PetscViewer viewer)
115: {
116:   PetscInt       i;
118:   PetscBool      iascii;
119:   IS             *is;

123:   if (!viewer) {
124:     PetscViewerASCIIGetStdout(iscoloring->comm,&viewer);
125:   }

128:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
129:   if (iascii) {
130:     MPI_Comm    comm;
131:     PetscMPIInt size,rank;

133:     PetscObjectGetComm((PetscObject)viewer,&comm);
134:     MPI_Comm_size(comm,&size);
135:     MPI_Comm_rank(comm,&rank);
136:     PetscViewerASCIIPrintf(viewer,"ISColoring Object: %d MPI processes\n",size);
137:     PetscViewerASCIIPushSynchronized(viewer);
138:     PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of colors %d\n",rank,iscoloring->n);
139:     PetscViewerFlush(viewer);
140:     PetscViewerASCIIPopSynchronized(viewer);
141:   }

143:   ISColoringGetIS(iscoloring,PETSC_IGNORE,&is);
144:   for (i=0; i<iscoloring->n; i++) {
145:     ISView(iscoloring->is[i],viewer);
146:   }
147:   ISColoringRestoreIS(iscoloring,&is);
148:   return(0);
149: }

153: /*@C
154:    ISColoringGetIS - Extracts index sets from the coloring context

156:    Collective on ISColoring

158:    Input Parameter:
159: .  iscoloring - the coloring context

161:    Output Parameters:
162: +  nn - number of index sets in the coloring context
163: -  is - array of index sets

165:    Level: advanced

167: .seealso: ISColoringRestoreIS(), ISColoringView()
168: @*/
169: PetscErrorCode  ISColoringGetIS(ISColoring iscoloring,PetscInt *nn,IS *isis[])
170: {


176:   if (nn) *nn = iscoloring->n;
177:   if (isis) {
178:     if (!iscoloring->is) {
179:       PetscInt        *mcolors,**ii,nc = iscoloring->n,i,base, n = iscoloring->N;
180:       ISColoringValue *colors = iscoloring->colors;
181:       IS              *is;

183: #if defined(PETSC_USE_DEBUG)
184:       for (i=0; i<n; i++) {
185:         if (((PetscInt)colors[i]) >= nc) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coloring is our of range index %d value %d number colors %d",(int)i,(int)colors[i],(int)nc);
186:       }
187: #endif

189:       /* generate the lists of nodes for each color */
190:       PetscCalloc1(nc,&mcolors);
191:       for (i=0; i<n; i++) mcolors[colors[i]]++;

193:       PetscMalloc1(nc,&ii);
194:       PetscMalloc1(n,&ii[0]);
195:       for (i=1; i<nc; i++) ii[i] = ii[i-1] + mcolors[i-1];
196:       PetscMemzero(mcolors,nc*sizeof(PetscInt));

198:       if (iscoloring->ctype == IS_COLORING_GLOBAL) {
199:         MPI_Scan(&iscoloring->N,&base,1,MPIU_INT,MPI_SUM,iscoloring->comm);
200:         base -= iscoloring->N;
201:         for (i=0; i<n; i++) ii[colors[i]][mcolors[colors[i]]++] = i + base; /* global idx */
202:       } else if (iscoloring->ctype == IS_COLORING_GHOSTED) {
203:         for (i=0; i<n; i++) ii[colors[i]][mcolors[colors[i]]++] = i;   /* local idx */
204:       } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not provided for this ISColoringType type");

206:       PetscMalloc1(nc,&is);
207:       for (i=0; i<nc; i++) {
208:         ISCreateGeneral(iscoloring->comm,mcolors[i],ii[i],PETSC_COPY_VALUES,is+i);
209:       }

211:       iscoloring->is = is;
212:       PetscFree(ii[0]);
213:       PetscFree(ii);
214:       PetscFree(mcolors);
215:     }
216:     *isis = iscoloring->is;
217:   }
218:   return(0);
219: }

223: /*@C
224:    ISColoringRestoreIS - Restores the index sets extracted from the coloring context

226:    Collective on ISColoring

228:    Input Parameter:
229: +  iscoloring - the coloring context
230: -  is - array of index sets

232:    Level: advanced

234: .seealso: ISColoringGetIS(), ISColoringView()
235: @*/
236: PetscErrorCode  ISColoringRestoreIS(ISColoring iscoloring,IS *is[])
237: {

241:   /* currently nothing is done here */
242:   return(0);
243: }


248: /*@
249:     ISColoringCreate - Generates an ISColoring context from lists (provided
250:     by each processor) of colors for each node.

252:     Collective on MPI_Comm

254:     Input Parameters:
255: +   comm - communicator for the processors creating the coloring
256: .   ncolors - max color value
257: .   n - number of nodes on this processor
258: .   colors - array containing the colors for this processor, color numbers begin at 0.
259: -   mode - see PetscCopyMode for meaning of this flag.

261:     Output Parameter:
262: .   iscoloring - the resulting coloring data structure

264:     Options Database Key:
265: .   -is_coloring_view - Activates ISColoringView()

267:    Level: advanced

269:     Notes: By default sets coloring type to  IS_COLORING_GLOBAL

271: .seealso: MatColoringCreate(), ISColoringView(), ISColoringDestroy(), ISColoringSetType()

273: @*/
274: PetscErrorCode  ISColoringCreate(MPI_Comm comm,PetscInt ncolors,PetscInt n,const ISColoringValue colors[],PetscCopyMode mode,ISColoring *iscoloring)
275: {
277:   PetscMPIInt    size,rank,tag;
278:   PetscInt       base,top,i;
279:   PetscInt       nc,ncwork;
280:   MPI_Status     status;

283:   if (ncolors != PETSC_DECIDE && ncolors > IS_COLORING_MAX) {
284:     if (ncolors > 65535) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds 65535 limit. This number is unrealistic. Perhaps a bug in code?\nCurrent max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
285:     else                 SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds limit. Perhaps reconfigure PETSc with --with-is-color-value-type=short?\n Current max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
286:   }
287:   PetscNew(iscoloring);
288:   PetscCommDuplicate(comm,&(*iscoloring)->comm,&tag);
289:   comm = (*iscoloring)->comm;

291:   /* compute the number of the first node on my processor */
292:   MPI_Comm_size(comm,&size);

294:   /* should use MPI_Scan() */
295:   MPI_Comm_rank(comm,&rank);
296:   if (!rank) {
297:     base = 0;
298:     top  = n;
299:   } else {
300:     MPI_Recv(&base,1,MPIU_INT,rank-1,tag,comm,&status);
301:     top  = base+n;
302:   }
303:   if (rank < size-1) {
304:     MPI_Send(&top,1,MPIU_INT,rank+1,tag,comm);
305:   }

307:   /* compute the total number of colors */
308:   ncwork = 0;
309:   for (i=0; i<n; i++) {
310:     if (ncwork < colors[i]) ncwork = colors[i];
311:   }
312:   ncwork++;
313:   MPIU_Allreduce(&ncwork,&nc,1,MPIU_INT,MPI_MAX,comm);
314:   if (nc > ncolors) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of colors passed in %D is less then the actual number of colors in array %D",ncolors,nc);
315:   (*iscoloring)->n      = nc;
316:   (*iscoloring)->is     = 0;
317:   (*iscoloring)->N      = n;
318:   (*iscoloring)->refct  = 1;
319:   (*iscoloring)->ctype  = IS_COLORING_GLOBAL;
320:   if (mode == PETSC_COPY_VALUES) {
321:     PetscMalloc1(n,&(*iscoloring)->colors);
322:     PetscLogObjectMemory((PetscObject)(*iscoloring),n*sizeof(ISColoringValue));
323:     PetscMemcpy((*iscoloring)->colors,colors,n*sizeof(ISColoringValue));
324:     (*iscoloring)->allocated = PETSC_TRUE;
325:   } else if (mode == PETSC_OWN_POINTER) {
326:     (*iscoloring)->colors    = (ISColoringValue*)colors;
327:     (*iscoloring)->allocated = PETSC_TRUE;
328:   } else {
329:     (*iscoloring)->colors    = (ISColoringValue*)colors;
330:     (*iscoloring)->allocated = PETSC_FALSE;
331:   }
332:   ISColoringViewFromOptions(*iscoloring,NULL,"-is_coloring_view");
333:   PetscInfo1(0,"Number of colors %D\n",nc);
334:   return(0);
335: }

339: /*@
340:     ISBuildTwoSided - Takes an IS that describes where we will go. Generates an IS that contains new numbers from remote or local
341:     on the IS.

343:     Collective on IS

345:     Input Parameters
346: .   to - an IS describes where we will go. Negative target rank will be ignored
347: .   toindx - an IS describes what indices should send. NULL means sending natural numbering

349:     Output Parameter:
350: .   rows - contains new numbers from remote or local

352:    Level: advanced

354: .seealso: MatPartitioningCreate(), ISPartitioningToNumbering(), ISPartitioningCount()

356: @*/
357: PetscErrorCode  ISBuildTwoSided(IS ito,IS toindx, IS *rows)
358: {
359:    const PetscInt       *ito_indices,*toindx_indices;
360:    PetscInt             *send_indices,rstart,*recv_indices,nrecvs,nsends;
361:    PetscInt             *tosizes,*fromsizes,i,j,*tosizes_tmp,*tooffsets_tmp,ito_ln;
362:    PetscMPIInt          *toranks,*fromranks,size,target_rank,*fromperm_newtoold,nto,nfrom;
363:    PetscLayout           isrmap;
364:    MPI_Comm              comm;
365:    PetscSF               sf;
366:    PetscSFNode          *iremote;
367:    PetscErrorCode        ierr;

370:    PetscObjectGetComm((PetscObject)ito,&comm);
371:    MPI_Comm_size(comm,&size);
372:    ISGetLocalSize(ito,&ito_ln);
373:    /* why we do not have ISGetLayout? */
374:    isrmap = ito->map;
375:    PetscLayoutGetRange(isrmap,&rstart,PETSC_NULL);
376:    ISGetIndices(ito,&ito_indices);
377:    PetscCalloc2(size,&tosizes_tmp,size+1,&tooffsets_tmp);
378:    for(i=0; i<ito_ln; i++){
379:      if(ito_indices[i]<0) continue;
380: #if defined(PETSC_USE_DEBUG)
381:      if(ito_indices[i]>=size) SETERRQ2(comm,PETSC_ERR_ARG_OUTOFRANGE,"target rank %d is larger than communicator size %d ",ito_indices[i],size);
382: #endif
383:      tosizes_tmp[ito_indices[i]]++;
384:    }
385:    nto = 0;
386:    for(i=0; i<size; i++){
387:          tooffsets_tmp[i+1] = tooffsets_tmp[i]+tosizes_tmp[i];
388:      if(tosizes_tmp[i]>0) nto++;
389:     }
390:    PetscCalloc2(nto,&toranks,2*nto,&tosizes);
391:    nto = 0;
392:    for(i=0; i<size; i++){
393:      if(tosizes_tmp[i]>0){
394:         toranks[nto]      = i;
395:         tosizes[2*nto]    = tosizes_tmp[i];/* size */
396:         tosizes[2*nto+1]  = tooffsets_tmp[i];/* offset */
397:         nto++;
398:      }
399:    }
400:    nsends = tooffsets_tmp[size];
401:    PetscCalloc1(nsends,&send_indices);
402:    if(toindx){
403:          ISGetIndices(toindx,&toindx_indices);
404:    }
405:    for(i=0; i<ito_ln; i++){
406:          if(ito_indices[i]<0) continue;
407:          target_rank = ito_indices[i];
408:          send_indices[tooffsets_tmp[target_rank]] = toindx? toindx_indices[i]:(i+rstart);
409:          tooffsets_tmp[target_rank]++;
410:    }
411:    if(toindx){
412:             ISRestoreIndices(toindx,&toindx_indices);
413:    }
414:    ISRestoreIndices(ito,&ito_indices);
415:    PetscFree2(tosizes_tmp,tooffsets_tmp);
416:    PetscCommBuildTwoSided(comm,2,MPIU_INT,nto,toranks,tosizes,&nfrom,&fromranks,&fromsizes);
417:    PetscFree2(toranks,tosizes);
418:    PetscCalloc1(nfrom,&fromperm_newtoold);
419:    for(i=0; i<nfrom; i++){
420:          fromperm_newtoold[i] = i;
421:    }
422:    PetscSortMPIIntWithArray(nfrom,fromranks,fromperm_newtoold);
423:    nrecvs   = 0;
424:    for(i=0; i<nfrom; i++){
425:          nrecvs += fromsizes[i*2];
426:    }
427:    PetscCalloc1(nrecvs,&recv_indices);
428:    PetscCalloc1(nrecvs,&iremote);
429:    nrecvs = 0;
430:    for(i=0; i<nfrom; i++){
431:      for(j=0; j<fromsizes[2*fromperm_newtoold[i]]; j++){
432:        iremote[nrecvs].rank    = fromranks[i];
433:        iremote[nrecvs++].index = fromsizes[2*fromperm_newtoold[i]+1]+j;
434:      }
435:    }
436:    PetscSFCreate(comm,&sf);
437:    PetscSFSetGraph(sf,nsends,nrecvs,PETSC_NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
438:    PetscSFSetType(sf,PETSCSFBASIC);
439:    /* how to put a prefix ? */
440:    PetscSFSetFromOptions(sf);
441:    PetscSFBcastBegin(sf,MPIU_INT,send_indices,recv_indices);
442:    PetscSFBcastEnd(sf,MPIU_INT,send_indices,recv_indices);
443:    PetscSFDestroy(&sf);
444:    PetscFree(fromranks);
445:    PetscFree(fromsizes);
446:    PetscFree(fromperm_newtoold);
447:    PetscFree(send_indices);
448:    if(rows){
449:          PetscSortInt(nrecvs,recv_indices);
450:      ISCreateGeneral(comm, nrecvs,recv_indices,PETSC_OWN_POINTER,rows);
451:    }else{
452:          PetscFree(recv_indices);
453:    }
454:    return(0);
455: }


460: /*@
461:     ISPartitioningToNumbering - Takes an ISPartitioning and on each processor
462:     generates an IS that contains a new global node number for each index based
463:     on the partitioing.

465:     Collective on IS

467:     Input Parameters
468: .   partitioning - a partitioning as generated by MatPartitioningApply()

470:     Output Parameter:
471: .   is - on each processor the index set that defines the global numbers
472:          (in the new numbering) for all the nodes currently (before the partitioning)
473:          on that processor

475:    Level: advanced

477: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningCount()

479: @*/
480: PetscErrorCode  ISPartitioningToNumbering(IS part,IS *is)
481: {
482:   MPI_Comm       comm;
483:   PetscInt       i,np,npt,n,*starts = NULL,*sums = NULL,*lsizes = NULL,*newi = NULL;
484:   const PetscInt *indices = NULL;

488:   PetscObjectGetComm((PetscObject)part,&comm);

490:   /* count the number of partitions, i.e., virtual processors */
491:   ISGetLocalSize(part,&n);
492:   ISGetIndices(part,&indices);
493:   np   = 0;
494:   for (i=0; i<n; i++) np = PetscMax(np,indices[i]);
495:   MPIU_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
496:   np   = npt+1; /* so that it looks like a MPI_Comm_size output */

498:   /*
499:         lsizes - number of elements of each partition on this particular processor
500:         sums - total number of "previous" nodes for any particular partition
501:         starts - global number of first element in each partition on this processor
502:   */
503:   PetscMalloc3(np,&lsizes,np,&starts,np,&sums);
504:   PetscMemzero(lsizes,np*sizeof(PetscInt));
505:   for (i=0; i<n; i++) lsizes[indices[i]]++;
506:   MPIU_Allreduce(lsizes,sums,np,MPIU_INT,MPI_SUM,comm);
507:   MPI_Scan(lsizes,starts,np,MPIU_INT,MPI_SUM,comm);
508:   for (i=0; i<np; i++) starts[i] -= lsizes[i];
509:   for (i=1; i<np; i++) {
510:     sums[i]   += sums[i-1];
511:     starts[i] += sums[i-1];
512:   }

514:   /*
515:       For each local index give it the new global number
516:   */
517:   PetscMalloc1(n,&newi);
518:   for (i=0; i<n; i++) newi[i] = starts[indices[i]]++;
519:   PetscFree3(lsizes,starts,sums);

521:   ISRestoreIndices(part,&indices);
522:   ISCreateGeneral(comm,n,newi,PETSC_OWN_POINTER,is);
523:   ISSetPermutation(*is);
524:   return(0);
525: }

529: /*@
530:     ISPartitioningCount - Takes a ISPartitioning and determines the number of
531:     resulting elements on each (partition) process

533:     Collective on IS

535:     Input Parameters:
536: +   partitioning - a partitioning as generated by MatPartitioningApply()
537: -   len - length of the array count, this is the total number of partitions

539:     Output Parameter:
540: .   count - array of length size, to contain the number of elements assigned
541:         to each partition, where size is the number of partitions generated
542:          (see notes below).

544:    Level: advanced

546:     Notes:
547:         By default the number of partitions generated (and thus the length
548:         of count) is the size of the communicator associated with IS,
549:         but it can be set by MatPartitioningSetNParts. The resulting array
550:         of lengths can for instance serve as input of PCBJacobiSetTotalBlocks.


553: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningToNumbering(),
554:         MatPartitioningSetNParts(), MatPartitioningApply()

556: @*/
557: PetscErrorCode  ISPartitioningCount(IS part,PetscInt len,PetscInt count[])
558: {
559:   MPI_Comm       comm;
560:   PetscInt       i,n,*lsizes;
561:   const PetscInt *indices;
563:   PetscMPIInt    npp;

566:   PetscObjectGetComm((PetscObject)part,&comm);
567:   if (len == PETSC_DEFAULT) {
568:     PetscMPIInt size;
569:     MPI_Comm_size(comm,&size);
570:     len  = (PetscInt) size;
571:   }

573:   /* count the number of partitions */
574:   ISGetLocalSize(part,&n);
575:   ISGetIndices(part,&indices);
576: #if defined(PETSC_USE_DEBUG)
577:   {
578:     PetscInt np = 0,npt;
579:     for (i=0; i<n; i++) np = PetscMax(np,indices[i]);
580:     MPIU_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
581:     np   = npt+1; /* so that it looks like a MPI_Comm_size output */
582:     if (np > len) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Length of count array %D is less than number of partitions %D",len,np);
583:   }
584: #endif

586:   /*
587:         lsizes - number of elements of each partition on this particular processor
588:         sums - total number of "previous" nodes for any particular partition
589:         starts - global number of first element in each partition on this processor
590:   */
591:   PetscCalloc1(len,&lsizes);
592:   for (i=0; i<n; i++) lsizes[indices[i]]++;
593:   ISRestoreIndices(part,&indices);
594:   PetscMPIIntCast(len,&npp);
595:   MPIU_Allreduce(lsizes,count,npp,MPIU_INT,MPI_SUM,comm);
596:   PetscFree(lsizes);
597:   return(0);
598: }

602: /*@
603:     ISAllGather - Given an index set (IS) on each processor, generates a large
604:     index set (same on each processor) by concatenating together each
605:     processors index set.

607:     Collective on IS

609:     Input Parameter:
610: .   is - the distributed index set

612:     Output Parameter:
613: .   isout - the concatenated index set (same on all processors)

615:     Notes:
616:     ISAllGather() is clearly not scalable for large index sets.

618:     The IS created on each processor must be created with a common
619:     communicator (e.g., PETSC_COMM_WORLD). If the index sets were created
620:     with PETSC_COMM_SELF, this routine will not work as expected, since
621:     each process will generate its own new IS that consists only of
622:     itself.

624:     The communicator for this new IS is PETSC_COMM_SELF

626:     Level: intermediate

628:     Concepts: gather^index sets
629:     Concepts: index sets^gathering to all processors
630:     Concepts: IS^gathering to all processors

632: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock()
633: @*/
634: PetscErrorCode  ISAllGather(IS is,IS *isout)
635: {
637:   PetscInt       *indices,n,i,N,step,first;
638:   const PetscInt *lindices;
639:   MPI_Comm       comm;
640:   PetscMPIInt    size,*sizes = NULL,*offsets = NULL,nn;
641:   PetscBool      stride;


647:   PetscObjectGetComm((PetscObject)is,&comm);
648:   MPI_Comm_size(comm,&size);
649:   ISGetLocalSize(is,&n);
650:   PetscObjectTypeCompare((PetscObject)is,ISSTRIDE,&stride);
651:   if (size == 1 && stride) { /* should handle parallel ISStride also */
652:     ISStrideGetInfo(is,&first,&step);
653:     ISCreateStride(PETSC_COMM_SELF,n,first,step,isout);
654:   } else {
655:     PetscMalloc2(size,&sizes,size,&offsets);

657:     PetscMPIIntCast(n,&nn);
658:     MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
659:     offsets[0] = 0;
660:     for (i=1; i<size; i++) {
661:       PetscInt s = offsets[i-1] + sizes[i-1];
662:       PetscMPIIntCast(s,&offsets[i]);
663:     }
664:     N = offsets[size-1] + sizes[size-1];

666:     PetscMalloc1(N,&indices);
667:     ISGetIndices(is,&lindices);
668:     MPI_Allgatherv((void*)lindices,nn,MPIU_INT,indices,sizes,offsets,MPIU_INT,comm);
669:     ISRestoreIndices(is,&lindices);
670:     PetscFree2(sizes,offsets);

672:     ISCreateGeneral(PETSC_COMM_SELF,N,indices,PETSC_OWN_POINTER,isout);
673:   }
674:   return(0);
675: }

679: /*@C
680:     ISAllGatherColors - Given a a set of colors on each processor, generates a large
681:     set (same on each processor) by concatenating together each processors colors

683:     Collective on MPI_Comm

685:     Input Parameter:
686: +   comm - communicator to share the indices
687: .   n - local size of set
688: -   lindices - local colors

690:     Output Parameter:
691: +   outN - total number of indices
692: -   outindices - all of the colors

694:     Notes:
695:     ISAllGatherColors() is clearly not scalable for large index sets.


698:     Level: intermediate

700:     Concepts: gather^index sets
701:     Concepts: index sets^gathering to all processors
702:     Concepts: IS^gathering to all processors

704: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
705: @*/
706: PetscErrorCode  ISAllGatherColors(MPI_Comm comm,PetscInt n,ISColoringValue *lindices,PetscInt *outN,ISColoringValue *outindices[])
707: {
708:   ISColoringValue *indices;
709:   PetscErrorCode  ierr;
710:   PetscInt        i,N;
711:   PetscMPIInt     size,*offsets = NULL,*sizes = NULL, nn = n;

714:   MPI_Comm_size(comm,&size);
715:   PetscMalloc2(size,&sizes,size,&offsets);

717:   MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
718:   offsets[0] = 0;
719:   for (i=1; i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
720:   N    = offsets[size-1] + sizes[size-1];
721:   PetscFree2(sizes,offsets);

723:   PetscMalloc1(N+1,&indices);
724:   MPI_Allgatherv(lindices,(PetscMPIInt)n,MPIU_COLORING_VALUE,indices,sizes,offsets,MPIU_COLORING_VALUE,comm);

726:   *outindices = indices;
727:   if (outN) *outN = N;
728:   return(0);
729: }

733: /*@
734:     ISComplement - Given an index set (IS) generates the complement index set. That is all
735:        all indices that are NOT in the given set.

737:     Collective on IS

739:     Input Parameter:
740: +   is - the index set
741: .   nmin - the first index desired in the local part of the complement
742: -   nmax - the largest index desired in the local part of the complement (note that all indices in is must be greater or equal to nmin and less than nmax)

744:     Output Parameter:
745: .   isout - the complement

747:     Notes:  The communicator for this new IS is the same as for the input IS

749:       For a parallel IS, this will generate the local part of the complement on each process

751:       To generate the entire complement (on each process) of a parallel IS, first call ISAllGather() and then
752:     call this routine.

754:     Level: intermediate

756:     Concepts: gather^index sets
757:     Concepts: index sets^gathering to all processors
758:     Concepts: IS^gathering to all processors

760: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
761: @*/
762: PetscErrorCode  ISComplement(IS is,PetscInt nmin,PetscInt nmax,IS *isout)
763: {
765:   const PetscInt *indices;
766:   PetscInt       n,i,j,unique,cnt,*nindices;
767:   PetscBool      sorted;

772:   if (nmin < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be negative",nmin);
773:   if (nmin > nmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be greater than nmax %D",nmin,nmax);
774:   ISSorted(is,&sorted);
775:   if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Index set must be sorted");

777:   ISGetLocalSize(is,&n);
778:   ISGetIndices(is,&indices);
779: #if defined(PETSC_USE_DEBUG)
780:   for (i=0; i<n; i++) {
781:     if (indices[i] <  nmin) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is smaller than minimum given %D",i,indices[i],nmin);
782:     if (indices[i] >= nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is larger than maximum given %D",i,indices[i],nmax);
783:   }
784: #endif
785:   /* Count number of unique entries */
786:   unique = (n>0);
787:   for (i=0; i<n-1; i++) {
788:     if (indices[i+1] != indices[i]) unique++;
789:   }
790:   PetscMalloc1(nmax-nmin-unique,&nindices);
791:   cnt  = 0;
792:   for (i=nmin,j=0; i<nmax; i++) {
793:     if (j<n && i==indices[j]) do { j++; } while (j<n && i==indices[j]);
794:     else nindices[cnt++] = i;
795:   }
796:   if (cnt != nmax-nmin-unique) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of entries found in complement %D does not match expected %D",cnt,nmax-nmin-unique);
797:   ISCreateGeneral(PetscObjectComm((PetscObject)is),cnt,nindices,PETSC_OWN_POINTER,isout);
798:   ISRestoreIndices(is,&indices);
799:   return(0);
800: }