moab
ParallelComm.cpp
Go to the documentation of this file.
00001 #include "moab/Interface.hpp"
00002 #include "moab/ParallelComm.hpp"
00003 #include "moab/WriteUtilIface.hpp"
00004 #include "moab/ReadUtilIface.hpp"
00005 #include "SequenceManager.hpp"
00006 #include "moab/Error.hpp"
00007 #include "EntitySequence.hpp"
00008 #include "MBTagConventions.hpp"
00009 #include "moab/Skinner.hpp"
00010 #include "MBParallelConventions.h"
00011 #include "moab/Core.hpp"
00012 #include "ElementSequence.hpp"
00013 #include "moab/CN.hpp"
00014 #include "moab/RangeMap.hpp"
00015 #include "moab/MeshTopoUtil.hpp"
00016 #include "TagInfo.hpp"
00017 #include "DebugOutput.hpp"
00018 #include "SharedSetData.hpp"
00019 #include "moab/ScdInterface.hpp"
00020 #include "moab/TupleList.hpp"
00021 #include "moab/gs.hpp"
00022 
00023 #include <iostream>
00024 #include <sstream>
00025 #include <algorithm>
00026 #include <functional>
00027 #include <numeric>
00028 
00029 #include <math.h>
00030 #include <assert.h>
00031 
00032 #ifdef USE_MPI
00033 #include "moab_mpi.h"
00034 #endif
00035 #ifdef USE_MPE
00036 #  include "mpe.h"
00037 int IFACE_START, IFACE_END;
00038 int GHOST_START, GHOST_END;
00039 int SHAREDV_START, SHAREDV_END;
00040 int RESOLVE_START, RESOLVE_END;
00041 int ENTITIES_START, ENTITIES_END;
00042 int RHANDLES_START, RHANDLES_END;
00043 int OWNED_START, OWNED_END;
00044 #endif
00045 
00046 namespace moab {
00047 
00048   const unsigned int ParallelComm::INITIAL_BUFF_SIZE = 1024;
00049 
00050   const int MAX_BCAST_SIZE = (1<<28);
00051 
00052   std::vector<ParallelComm::Buffer*> msgs;
00053   unsigned int __PACK_num = 0, __UNPACK_num = 0, __PACK_count = 0, __UNPACK_count = 0;
00054   std::string __PACK_string, __UNPACK_string;
00055 
00056 #ifdef DEBUG_PACKING_TIMES
00057 #  define PC(n, m) {                                                        \
00058     if (__PACK_num == (unsigned int)n && __PACK_string == m) __PACK_count++; \
00059     else {                                                                \
00060       if (__PACK_count > 1) std::cerr << " (" << __PACK_count << "x)";        \
00061       __PACK_count = 1; __PACK_string = m; __PACK_num = n;                \
00062       std::cerr << std::endl << "PACK: " << n << m;                        \
00063     }}
00064 #  define UPC(n, m) {                                                        \
00065     if (__UNPACK_num == (unsigned int)n && __UNPACK_string == m) __UNPACK_count++; \
00066     else {                                                                \
00067       if (__UNPACK_count > 1) std::cerr << "(" << __UNPACK_count << "x)"; \
00068       __UNPACK_count = 1; __UNPACK_string = m; __UNPACK_num = n;        \
00069       std::cerr << std::endl << "UNPACK: " << n << m;                        \
00070     }}
00071 #else
00072 #  define PC(n, m)
00073 #  define UPC(n, m)
00074 #endif
00075 
00076   template <typename T> static inline
00077   void UNPACK( unsigned char*& buff, T* val, size_t count )
00078   {
00079     memcpy( val, buff, count*sizeof(T) );
00080     buff += count*sizeof(T);
00081   }
00082 
00083   template <typename T> static inline
00084   void PACK( unsigned char*& buff, const T* val, size_t count )
00085   {
00086     memcpy( buff, val, count*sizeof(T) );
00087     buff += count*sizeof(T);
00088   }
00089 
00090   static inline
00091   void PACK_INTS( unsigned char*& buff, const int* int_val, size_t num )
00092   { PACK( buff, int_val, num ); PC(num, " ints"); }
00093 
00094   static inline
00095   void PACK_INT( unsigned char*& buff, int int_val )
00096   { PACK_INTS( buff, &int_val, 1 ); }
00097 
00098   static inline
00099   void PACK_DBLS( unsigned char*& buff, const double* dbl_val, size_t num )
00100   { PACK( buff, dbl_val, num ); PC(num, " doubles"); }
00101 
00102   //static inline
00103   //void PACK_DBL( unsigned char*& buff, const double dbl_val)
00104   //{ PACK_DBLS( buff, &dbl_val, 1 ); }
00105 
00106   static inline
00107   void PACK_EH( unsigned char*& buff, const EntityHandle* eh_val, size_t num )
00108   { PACK( buff, eh_val, num ); PC(num, " handles"); }
00109 
00110   //static inline
00111   //void PACK_CHAR_64( unsigned char*& buff, const char* str )
00112   //{
00113   //  memcpy(buff, str, 64 );
00114   //  buff += 64;
00115   //  PC(64, " chars");
00116   //}
00117 
00118   static inline
00119   void PACK_VOID( unsigned char*& buff, const void* val, size_t num )
00120   {
00121     PACK( buff, reinterpret_cast<const unsigned char*>(val), num );
00122     PC(num, " void");
00123   }
00124 
00125   static inline
00126   void PACK_BYTES( unsigned char*& buff, const void* val, int num )
00127   { PACK_INT(buff, num); PACK_VOID(buff, val, num); }
00128 
00129   static inline
00130   void PACK_RANGE( unsigned char*& buff, const Range& rng )
00131   {
00132     PACK_INT( buff, rng.psize() );
00133     Range::const_pair_iterator cit;
00134     for (cit = rng.const_pair_begin(); cit != rng.const_pair_end(); ++cit) {
00135       EntityHandle eh[2] = { cit->first, cit->second };
00136       PACK_EH(buff, eh, 2);
00137     }
00138     PC(rng.psize(), "-subranged range");
00139   }
00140 
00141   static inline
00142   void UNPACK_INTS( unsigned char*& buff, int* int_val, size_t num )
00143   { UNPACK(buff, int_val, num); UPC(num, " ints"); }
00144 
00145   static inline
00146   void UNPACK_INT( unsigned char*& buff, int& int_val )
00147   { UNPACK_INTS( buff, &int_val, 1 ); }
00148 
00149   static inline
00150   void UNPACK_DBLS( unsigned char*& buff, double* dbl_val, size_t num )
00151   { UNPACK(buff, dbl_val, num); UPC(num, " doubles"); }
00152 
00153   static inline
00154   void UNPACK_DBL( unsigned char*& buff, double &dbl_val)
00155   { UNPACK_DBLS(buff, &dbl_val, 1); }
00156 
00157   static inline
00158   void UNPACK_EH( unsigned char*& buff, EntityHandle* eh_val, size_t num )
00159   { UNPACK(buff, eh_val, num); UPC(num, " handles"); }
00160 
00161   //static inline
00162   //void UNPACK_CHAR_64( unsigned char*& buff, char* char_val )
00163   //{
00164   //  memcpy( buff, char_val, 64 );
00165   //  buff += 64;
00166   //  UPC(64, " chars");
00167   //}
00168 
00169   static inline
00170   void UNPACK_VOID( unsigned char*& buff, void* val, size_t num )
00171   {
00172     UNPACK(buff, reinterpret_cast<unsigned char*>(val), num);
00173     UPC(num, " void");
00174   }
00175 
00176   static inline
00177   void UNPACK_TYPE( unsigned char*& buff, EntityType& type )
00178   {
00179     int int_type = MBMAXTYPE;
00180     UNPACK_INT(buff, int_type);
00181     type = static_cast<EntityType>(int_type);
00182     assert(type >= MBVERTEX && type <= MBMAXTYPE);
00183   }
00184 
00185   static inline
00186   void UNPACK_RANGE( unsigned char*& buff, Range& rng )
00187   {
00188     int num_subs;
00189     EntityHandle eh[2];
00190     UNPACK_INT( buff, num_subs );
00191     for (int i = 0; i < num_subs; ++i) {
00192       UPC(num_subs, "-subranged range"); 
00193       UNPACK_EH(buff, eh, 2); 
00194       rng.insert(eh[0], eh[1]);
00195     }
00196   }    
00197 
00198   enum MBMessageTag {MB_MESG_ANY=MPI_ANY_TAG, 
00199                      MB_MESG_ENTS_ACK,
00200                      MB_MESG_ENTS_SIZE,
00201                      MB_MESG_ENTS_LARGE,
00202                      MB_MESG_REMOTEH_ACK,
00203                      MB_MESG_REMOTEH_SIZE,
00204                      MB_MESG_REMOTEH_LARGE,
00205                      MB_MESG_TAGS_ACK,
00206                      MB_MESG_TAGS_SIZE,
00207                      MB_MESG_TAGS_LARGE
00208   };
00209 
00210   static inline size_t RANGE_SIZE(const Range& rng)
00211   { return 2*sizeof(EntityHandle)*rng.psize()+sizeof(int); }
00212 
00213 #define PRINT_DEBUG_ISEND(A,B,C,D,E)   print_debug_isend((A),(B),(C),(D),(E))
00214 #define PRINT_DEBUG_IRECV(A,B,C,D,E,F) print_debug_irecv((A),(B),(C),(D),(E),(F))
00215 #define PRINT_DEBUG_RECD(A)            print_debug_recd((A))
00216 #define PRINT_DEBUG_WAITANY(A,B,C)     print_debug_waitany((A),(B),(C))
00217 
00218   void ParallelComm::print_debug_isend(int from, int to, unsigned char *buff,
00219                                        int tag, int sz) 
00220   {
00221     myDebug->tprintf(3, "Isend, %d->%d, buffer ptr = %p, tag=%d, size=%d\n",
00222                      from, to, (void*)buff, tag, sz);
00223   }
00224 
00225   void ParallelComm::print_debug_irecv(int to, int from, unsigned char *buff, int sz,
00226                                        int tag, int incoming) 
00227   {
00228     myDebug->tprintf(3, "Irecv, %d<-%d, buffer ptr = %p, tag=%d, size=%d",
00229                      to, from, (void*)buff, tag, sz);
00230     if (tag < MB_MESG_REMOTEH_ACK) myDebug->printf(3, ", incoming1=%d\n", incoming);
00231     else if (tag < MB_MESG_TAGS_ACK) myDebug->printf(3, ", incoming2=%d\n", incoming);
00232     else myDebug->printf(3, ", incoming=%d\n", incoming);
00233   }
00234 
00235   void ParallelComm::print_debug_recd(MPI_Status status) 
00236   {
00237     if (myDebug->get_verbosity() == 3) {
00238       int this_count;
00239       int success = MPI_Get_count(&status, MPI_UNSIGNED_CHAR, &this_count);
00240       if (MPI_SUCCESS != success) this_count = -1;
00241       myDebug->tprintf(3, "Received from %d, count = %d, tag = %d\n", 
00242                        status.MPI_SOURCE, this_count , status.MPI_TAG);
00243     }
00244   }
00245 
00246   void ParallelComm::print_debug_waitany(std::vector<MPI_Request> &reqs, int tag, int proc) 
00247   {
00248     if (myDebug->get_verbosity() == 3) {
00249       myDebug->tprintf(3, "Waitany, p=%d, ", proc);
00250       if (tag < MB_MESG_REMOTEH_ACK) myDebug->print(3, ", recv_ent_reqs=");
00251       else if (tag < MB_MESG_TAGS_ACK) myDebug->print(3, ", recv_remoteh_reqs=");
00252       else myDebug->print(3, ", recv_tag_reqs=");
00253       for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %p", (void*)(intptr_t)reqs[i]);
00254       myDebug->print(3, "\n");
00255     }
00256   }
00257 
00258 #define RR(a) if (MB_SUCCESS != result) {        \
00259     errorHandler->set_last_error(a);                \
00260     return result;}
00261 
00262 #define RRA(a) if (MB_SUCCESS != result) {                        \
00263     std::string tmp_str; mbImpl->get_last_error(tmp_str);        \
00264     tmp_str.append("\n"); tmp_str.append(a);                        \
00265     errorHandler->set_last_error(tmp_str);                        \
00266     return result;}
00267 
00268 #define RRAI(i, e, a) if (MB_SUCCESS != result) {        \
00269     std::string tmp_str; i->get_last_error(tmp_str);        \
00270     tmp_str.append("\n"); tmp_str.append(a);                \
00271     e->set_last_error(tmp_str);                                \
00272     return result;}
00273 
00275   const char* PARTITIONING_PCOMM_TAG_NAME = "__PRTN_PCOMM";
00276  
00290 #define PARALLEL_COMM_TAG_NAME "__PARALLEL_COMM"
00291 
00292   ParallelComm::ParallelComm(Interface *impl, MPI_Comm cm, int* id ) 
00293     : mbImpl(impl), procConfig(cm),
00294       sharedpTag(0), sharedpsTag(0), 
00295       sharedhTag(0), sharedhsTag(0), pstatusTag(0), ifaceSetsTag(0),
00296       partitionTag(0), globalPartCount(-1), partitioningSet(0), 
00297       myDebug(NULL), 
00298       sharedSetData( new SharedSetData(*impl,procConfig.proc_rank()) )
00299   {
00300     initialize();
00301   
00302     if (id)
00303       *id = pcommID;
00304   }
00305 
00306   ParallelComm::ParallelComm(Interface *impl,
00307                              std::vector<unsigned char> &/*tmp_buff*/, 
00308                              MPI_Comm cm,
00309                              int* id) 
00310     : mbImpl(impl), procConfig(cm),
00311       sharedpTag(0), sharedpsTag(0), 
00312       sharedhTag(0), sharedhsTag(0), pstatusTag(0), ifaceSetsTag(0),
00313       partitionTag(0), globalPartCount(-1), partitioningSet(0),
00314       myDebug(NULL),
00315       sharedSetData( new SharedSetData(*impl,procConfig.proc_rank()) )
00316   {
00317     initialize();
00318   
00319     if (id)
00320       *id = pcommID;
00321   }
00322 
00323   ParallelComm::~ParallelComm() 
00324   {
00325     remove_pcomm(this);
00326     delete_all_buffers();
00327     delete myDebug;
00328     delete sharedSetData;
00329   }
00330 
00331   void ParallelComm::initialize() 
00332   {
00333     Core* core = dynamic_cast<Core*>(mbImpl);
00334     sequenceManager = core->sequence_manager();
00335     mbImpl->query_interface(errorHandler);
00336   
00337     // initialize MPI, if necessary
00338     int flag = 1;
00339     int retval = MPI_Initialized(&flag);
00340     if (MPI_SUCCESS != retval || !flag) {
00341       int argc = 0;
00342       char **argv = NULL;
00343     
00344       // mpi not initialized yet - initialize here
00345       retval = MPI_Init(&argc, &argv);
00346     }
00347 
00348     // reserve space for vectors
00349     buffProcs.reserve(MAX_SHARING_PROCS);
00350     localOwnedBuffs.reserve(MAX_SHARING_PROCS);
00351     remoteOwnedBuffs.reserve(MAX_SHARING_PROCS);
00352 
00353     pcommID = add_pcomm(this);
00354 
00355     if (!myDebug) myDebug = new DebugOutput("ParallelComm", std::cerr);
00356   }
00357 
00358   int ParallelComm::add_pcomm(ParallelComm *pc) 
00359   {
00360     // add this pcomm to instance tag
00361     std::vector<ParallelComm *> pc_array(MAX_SHARING_PROCS, 
00362                                          (ParallelComm*)NULL);
00363     Tag pc_tag = pcomm_tag(mbImpl, true);
00364     assert(0 != pc_tag);
00365   
00366     const EntityHandle root = 0;
00367     ErrorCode result = mbImpl->tag_get_data(pc_tag, &root, 1, (void*)&pc_array[0]);
00368     if (MB_SUCCESS != result && MB_TAG_NOT_FOUND != result) 
00369       return -1;
00370     int index = 0;
00371     while (index < MAX_SHARING_PROCS && pc_array[index]) index++;
00372     if (index == MAX_SHARING_PROCS) {
00373       index = -1;
00374       assert(false);
00375     }
00376     else {
00377       pc_array[index] = pc;
00378       mbImpl->tag_set_data(pc_tag, &root, 1, (void*)&pc_array[0]);
00379     }
00380     return index;
00381   }
00382 
00383   void ParallelComm::remove_pcomm(ParallelComm *pc) 
00384   {
00385     // remove this pcomm from instance tag
00386     std::vector<ParallelComm *> pc_array(MAX_SHARING_PROCS);
00387     Tag pc_tag = pcomm_tag(mbImpl, true);
00388   
00389     const EntityHandle root = 0;
00390     ErrorCode result = mbImpl->tag_get_data(pc_tag, &root, 1, (void*)&pc_array[0]);
00391     std::vector<ParallelComm*>::iterator pc_it = 
00392       std::find(pc_array.begin(), pc_array.end(), pc);
00393     assert(MB_SUCCESS == result && 
00394            pc_it != pc_array.end());
00395     // empty if test to get around compiler warning about unused var
00396     if (MB_SUCCESS == result) {}
00397     
00398     *pc_it = NULL;
00399     mbImpl->tag_set_data(pc_tag, &root, 1, (void*)&pc_array[0]);
00400   }
00401 
00404   ErrorCode ParallelComm::assign_global_ids( EntityHandle this_set,
00405                                              const int dimension, 
00406                                              const int start_id,
00407                                              const bool largest_dim_only,
00408                                              const bool parallel,
00409                                              const bool owned_only) 
00410   {
00411     Range entities[4];
00412     ErrorCode result;
00413     std::vector<unsigned char> pstatus;
00414     for (int dim = 0; dim <= dimension; dim++) {
00415       if (dim == 0 || !largest_dim_only || dim == dimension) {
00416         result = mbImpl->get_entities_by_dimension(this_set, dim, entities[dim]); 
00417         RRA("Failed to get vertices in assign_global_ids.");
00418       }
00419 
00420       // need to filter out non-locally-owned entities!!!
00421       pstatus.resize(entities[dim].size());
00422       result = mbImpl->tag_get_data(pstatus_tag(), entities[dim], &pstatus[0]);
00423       RRA("Failed to get pstatus in assign_global_ids.");
00424     
00425       Range dum_range;
00426       Range::iterator rit;
00427       unsigned int i;
00428       for (rit = entities[dim].begin(), i = 0; rit != entities[dim].end(); rit++, i++)
00429         if (pstatus[i] & PSTATUS_NOT_OWNED)
00430           dum_range.insert(*rit);
00431       entities[dim] = subtract( entities[dim], dum_range);
00432     }
00433     
00434     return assign_global_ids(entities, dimension, start_id, parallel, owned_only);
00435   }
00436     
00439   ErrorCode ParallelComm::assign_global_ids( Range entities[],
00440                                              const int dimension, 
00441                                              const int start_id,
00442                                              const bool parallel,
00443                                              const bool owned_only) 
00444   {
00445     int local_num_elements[4];
00446     ErrorCode result;
00447     std::vector<unsigned char> pstatus;
00448     for (int dim = 0; dim <= dimension; dim++) {
00449       local_num_elements[dim] = entities[dim].size();
00450     }
00451   
00452     // communicate numbers
00453     std::vector<int> num_elements(procConfig.proc_size()*4);
00454 #ifdef USE_MPI
00455     if (procConfig.proc_size() > 1 && parallel) {
00456       int retval = MPI_Allgather(local_num_elements, 4, MPI_INT,
00457                                  &num_elements[0], 4, 
00458                                  MPI_INT, procConfig.proc_comm());
00459       if (0 != retval) return MB_FAILURE;
00460     }
00461     else
00462 #endif
00463       for (int dim = 0; dim < 4; dim++) num_elements[dim] = local_num_elements[dim];
00464   
00465     // my entities start at one greater than total_elems[d]
00466     int total_elems[4] = {start_id, start_id, start_id, start_id};
00467   
00468     for (unsigned int proc = 0; proc < procConfig.proc_rank(); proc++) {
00469       for (int dim = 0; dim < 4; dim++) total_elems[dim] += num_elements[4*proc + dim];
00470     }
00471   
00472     //assign global ids now
00473     Tag gid_tag;
00474     int zero = 0;
00475     result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, 
00476                                     gid_tag, MB_TAG_DENSE|MB_TAG_CREAT, &zero);
00477     if (MB_SUCCESS != result) return result;
00478   
00479     for (int dim = 0; dim < 4; dim++) {
00480       if (entities[dim].empty()) continue;
00481       num_elements.resize(entities[dim].size());
00482       int i = 0;
00483       for (Range::iterator rit = entities[dim].begin(); rit != entities[dim].end(); rit++)
00484         num_elements[i++] = total_elems[dim]++;
00485     
00486       result = mbImpl->tag_set_data(gid_tag, entities[dim], &num_elements[0]); 
00487       RRA("Failed to set global id tag in assign_global_ids.");
00488     }
00489   
00490     if (owned_only)
00491       return MB_SUCCESS;
00492   
00493     // Exchange tags
00494     for (int dim = 1; dim < 4; dim++) 
00495       entities[0].merge( entities[dim] );
00496     return exchange_tags( gid_tag, entities[0] );
00497   }
00498 
00499   int ParallelComm::get_buffers(int to_proc, bool *is_new) 
00500   {
00501     int ind = -1;
00502     std::vector<unsigned int>::iterator vit = 
00503       std::find(buffProcs.begin(), buffProcs.end(), to_proc);
00504     if (vit == buffProcs.end()) {
00505       assert("shouldn't need buffer to myself" && to_proc != (int)procConfig.proc_rank());
00506       ind = buffProcs.size();
00507       buffProcs.push_back((unsigned int)to_proc);
00508       localOwnedBuffs.push_back(new Buffer(INITIAL_BUFF_SIZE));
00509       remoteOwnedBuffs.push_back(new Buffer(INITIAL_BUFF_SIZE));
00510       if (is_new) *is_new = true;
00511     }
00512     else {
00513       ind = vit - buffProcs.begin();
00514       if (is_new) *is_new = false;
00515     }
00516     assert(ind < MAX_SHARING_PROCS);
00517     return ind;
00518   }
00519 
00520   ErrorCode ParallelComm::broadcast_entities( const int from_proc,
00521                                               Range &entities,
00522                                               const bool adjacencies,
00523                                               const bool tags) 
00524   {
00525 #ifndef USE_MPI
00526     return MB_FAILURE;
00527 #else
00528   
00529     ErrorCode result = MB_SUCCESS;
00530     int success;
00531     int buff_size;
00532 
00533     Buffer buff(INITIAL_BUFF_SIZE);
00534     buff.reset_ptr(sizeof(int));
00535     if ((int)procConfig.proc_rank() == from_proc) {
00536       result = add_verts(entities);
00537       RRA("Failed to add adj vertices.");
00538 
00539       buff.reset_ptr(sizeof(int));
00540       result = pack_buffer( entities, adjacencies, tags, 
00541                             false, -1, &buff); 
00542       RRA("Failed to compute buffer size in broadcast_entities.");
00543       buff.set_stored_size();
00544       buff_size = buff.buff_ptr - buff.mem_ptr;
00545     }
00546 
00547     success = MPI_Bcast( &buff_size, 1, MPI_INT, from_proc, procConfig.proc_comm() );
00548     if (MPI_SUCCESS != success) {
00549       result = MB_FAILURE;
00550       RRA("MPI_Bcast of buffer size failed.");
00551     }
00552   
00553     if (!buff_size) // no data
00554       return MB_SUCCESS;
00555 
00556     if ((int)procConfig.proc_rank() != from_proc) 
00557       buff.reserve(buff_size);
00558 
00559     size_t offset = 0;
00560     while (buff_size) {
00561       int sz = std::min( buff_size, MAX_BCAST_SIZE );
00562       success = MPI_Bcast(buff.mem_ptr+offset, sz, MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
00563       if (MPI_SUCCESS != success) {
00564         result = MB_FAILURE;
00565         RRA("MPI_Bcast of buffer failed.");
00566       }
00567     
00568       offset += sz;
00569       buff_size -= sz;
00570     }
00571 
00572     if ((int)procConfig.proc_rank() != from_proc) {
00573       std::vector<std::vector<EntityHandle> > dum1a, dum1b;
00574       std::vector<std::vector<int> > dum1p;
00575       std::vector<EntityHandle> dum2, dum4;
00576       std::vector<unsigned int> dum3;
00577       buff.reset_ptr(sizeof(int));
00578       result = unpack_buffer(buff.buff_ptr, false, from_proc, -1, 
00579                              dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4);
00580       RRA("Failed to unpack buffer in broadcast_entities.");
00581       std::copy(dum4.begin(), dum4.end(), range_inserter(entities));
00582     }
00583 
00584     return MB_SUCCESS;
00585 #endif
00586   }
00587 
00588   ErrorCode ParallelComm::scatter_entities( const int from_proc,
00589                                             std::vector<Range> &entities,
00590                                             const bool adjacencies,
00591                                             const bool tags)
00592   {
00593 #ifndef USE_MPI
00594     return MB_FAILURE;
00595 #else
00596     ErrorCode result = MB_SUCCESS;
00597     int i, success, buff_size, prev_size;
00598     int nProcs = (int)procConfig.proc_size();
00599     int* sendCounts = new int[nProcs];
00600     int* displacements = new int[nProcs];
00601     sendCounts[0] = sizeof(int);
00602     displacements[0] = 0;
00603     Buffer buff(INITIAL_BUFF_SIZE);
00604     buff.reset_ptr(sizeof(int));
00605     buff.set_stored_size();
00606     unsigned int my_proc = procConfig.proc_rank();
00607 
00608     // get buffer size array for each remote processor
00609     if (my_proc == (unsigned int) from_proc) {
00610       for (i = 1; i < nProcs; i++) {
00611         prev_size = buff.buff_ptr - buff.mem_ptr;
00612         buff.reset_ptr(prev_size + sizeof(int));
00613         result = add_verts(entities[i]);
00614       
00615         result = pack_buffer(entities[i], adjacencies, tags, 
00616                              false, -1, &buff); 
00617         if (MB_SUCCESS != result) {
00618           delete[] sendCounts;
00619           delete[] displacements;
00620         }
00621         RRA("Failed to compute buffer size in scatter_entities.");
00622 
00623         buff_size = buff.buff_ptr - buff.mem_ptr - prev_size;
00624         *((int*)(buff.mem_ptr + prev_size)) = buff_size;
00625         sendCounts[i] = buff_size;
00626       }
00627     }
00628   
00629     // broadcast buffer size array
00630     success = MPI_Bcast(sendCounts, nProcs, MPI_INT, from_proc, procConfig.proc_comm());
00631     if (MPI_SUCCESS != success) {
00632       result = MB_FAILURE;
00633       delete[] sendCounts;
00634       delete[] displacements;
00635       RRA("MPI_Bcast of buffer size failed.");
00636     }
00637   
00638     for (i = 1; i < nProcs; i++) {
00639       displacements[i] = displacements[i-1] + sendCounts[i-1];
00640     }
00641 
00642     Buffer rec_buff;
00643     rec_buff.reserve(sendCounts[my_proc]);
00644 
00645     // scatter actual geometry
00646     success = MPI_Scatterv(buff.mem_ptr, sendCounts, displacements,
00647                            MPI_UNSIGNED_CHAR, rec_buff.mem_ptr, sendCounts[my_proc],
00648                            MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm());
00649   
00650     if (MPI_SUCCESS != success) {
00651       result = MB_FAILURE;
00652       delete[] sendCounts;
00653       delete[] displacements;
00654       RRA("MPI_Scatterv of buffer failed.");
00655     }
00656 
00657     // unpack in remote processors
00658     if (my_proc != (unsigned int) from_proc) {
00659       std::vector<std::vector<EntityHandle> > dum1a, dum1b;
00660       std::vector<std::vector<int> > dum1p;
00661       std::vector<EntityHandle> dum2, dum4;
00662       std::vector<unsigned int> dum3;
00663       rec_buff.reset_ptr(sizeof(int));
00664       result = unpack_buffer(rec_buff.buff_ptr, false, from_proc, -1, 
00665                              dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4);
00666       if (MB_SUCCESS != result) {
00667         delete[] sendCounts;
00668         delete[] displacements;
00669       }
00670       RRA("Failed to unpack buffer in scatter_entities.");
00671       std::copy(dum4.begin(), dum4.end(), range_inserter(entities[my_proc]));
00672     }
00673 
00674     delete[] sendCounts;
00675     delete[] displacements;
00676 
00677     return MB_SUCCESS;
00678 #endif
00679   }
00680 
00681   ErrorCode ParallelComm::send_entities(const int to_proc,
00682                                         Range &orig_ents,
00683                                         const bool adjs,
00684                                         const bool tags,
00685                                         const bool store_remote_handles,
00686                                         const bool is_iface,
00687                                         Range &/*final_ents*/,
00688                                         int &incoming1,
00689                                         int &incoming2,
00690                                         TupleList& entprocs,
00691                                         std::vector<MPI_Request> &recv_remoteh_reqs,
00692                                         bool /*wait_all*/)
00693   {
00694 #ifndef USE_MPI
00695     return MB_FAILURE;
00696 #else
00697     // pack entities to local buffer
00698     ErrorCode result = MB_SUCCESS;
00699     int ind = get_buffers(to_proc);
00700     localOwnedBuffs[ind]->reset_ptr(sizeof(int));
00701 
00702     // add vertices
00703     result = add_verts(orig_ents);
00704     RRA("Failed to add vertex in send_entities.");
00705 
00706     // filter out entities already shared with destination
00707     Range tmp_range;
00708     result = filter_pstatus(orig_ents, PSTATUS_SHARED, PSTATUS_AND,
00709                             to_proc, &tmp_range);
00710     RRA("Couldn't filter on owner.");
00711     if (!tmp_range.empty()) {
00712       orig_ents = subtract(orig_ents, tmp_range);
00713     }
00714 
00715     result = pack_buffer(orig_ents, adjs, tags, store_remote_handles,
00716                          to_proc, localOwnedBuffs[ind], &entprocs);
00717     RRA("Failed to pack buffer in send_entities.");
00718 
00719     // send buffer
00720     result = send_buffer(to_proc, localOwnedBuffs[ind], MB_MESG_ENTS_SIZE,
00721                          sendReqs[2*ind], recvReqs[2*ind + 1],
00722                          (int*)(remoteOwnedBuffs[ind]->mem_ptr),
00723                          //&ackbuff,
00724                          incoming1,
00725                          MB_MESG_REMOTEH_SIZE,
00726                          (!is_iface && store_remote_handles ? 
00727                           localOwnedBuffs[ind] : NULL),
00728                          &recv_remoteh_reqs[2*ind], &incoming2);
00729     RRA("Failed to send buffer.");
00730 
00731     return MB_SUCCESS;
00732 
00733 #endif
00734   }
00735 
00736 ErrorCode ParallelComm::send_entities(std::vector<unsigned int>& send_procs,
00737                                       std::vector<Range*>& send_ents,
00738                                       int& incoming1, int& incoming2,
00739                                       const bool store_remote_handles)
00740 {
00741 #ifdef USE_MPE
00742   if (myDebug->get_verbosity() == 2) {
00743     MPE_Log_event(OWNED_START, procConfig.proc_rank(), "Starting send entities.");
00744   }
00745 #endif
00746   myDebug->tprintf(1, "Entering send_entities\n");
00747   if (myDebug->get_verbosity() == 4) {
00748     msgs.clear();
00749     msgs.reserve(MAX_SHARING_PROCS);
00750   }
00751 
00752   unsigned int i;
00753   int ind;
00754   ErrorCode result = MB_SUCCESS;
00755 
00756   // set buffProcs with communicating procs
00757   unsigned int n_proc = send_procs.size();
00758   for (i = 0; i < n_proc; i++) {
00759     ind = get_buffers(send_procs[i]);
00760     result = add_verts(*send_ents[i]);
00761     RRA("Couldn't add verts.");
00762 
00763     // filter out entities already shared with destination
00764     Range tmp_range;
00765     result = filter_pstatus(*send_ents[i], PSTATUS_SHARED, PSTATUS_AND,
00766                             buffProcs[ind], &tmp_range);
00767     RRA("Couldn't filter on owner.");
00768     if (!tmp_range.empty()) {
00769       *send_ents[i] = subtract(*send_ents[i], tmp_range);
00770     }
00771   }
00772  
00773   //===========================================
00774   // get entities to be sent to neighbors
00775   // need to get procs each entity is sent to
00776   //===========================================  
00777   Range allsent, tmp_range;
00778   int npairs = 0;
00779   TupleList entprocs;
00780   for (i = 0; i < n_proc; i++) {
00781     int n_ents = send_ents[i]->size();
00782     if (n_ents > 0) {
00783       npairs += n_ents; // get the total # of proc/handle pairs
00784       allsent.merge(*send_ents[i]);
00785     }
00786   }
00787 
00788   // allocate a TupleList of that size
00789   entprocs.initialize(1, 0, 1, 0, npairs); 
00790   entprocs.enableWriteAccess();
00791 
00792   // put the proc/handle pairs in the list
00793   for (i = 0; i < n_proc; i++) {
00794     for (Range::iterator rit = send_ents[i]->begin(); rit != send_ents[i]->end(); rit++) { 
00795       entprocs.vi_wr[entprocs.get_n()] = send_procs[i];
00796       entprocs.vul_wr[entprocs.get_n()] = *rit; 
00797       entprocs.inc_n(); 
00798     } 
00799   }
00800   
00801   // sort by handle
00802   moab::TupleList::buffer sort_buffer; 
00803   sort_buffer.buffer_init(npairs); 
00804   entprocs.sort(1, &sort_buffer);
00805   entprocs.disableWriteAccess();
00806   sort_buffer.reset(); 
00807 
00808   myDebug->tprintf(1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
00809                   (unsigned long)allsent.size());
00810 
00811   //===========================================
00812   // pack and send ents from this proc to others
00813   //===========================================
00814   for (i = 0; i < n_proc; i++) {
00815     if (send_ents[i]->size() > 0) {
00816       ind = get_buffers(send_procs[i]);
00817       myDebug->tprintf(1, "Sent ents compactness (size) = %f (%lu)\n", send_ents[i]->compactness(),
00818                        (unsigned long)send_ents[i]->size());
00819       // reserve space on front for size and for initial buff size
00820       localOwnedBuffs[ind]->reset_buffer(sizeof(int));
00821       result = pack_buffer(*send_ents[i], false, true,
00822                            store_remote_handles, buffProcs[ind],
00823                            localOwnedBuffs[ind], &entprocs, &allsent);
00824       
00825       if (myDebug->get_verbosity() == 4) {
00826         msgs.resize(msgs.size()+1);
00827         msgs.back() = new Buffer(*localOwnedBuffs[ind]);
00828       }
00829       
00830       // send the buffer (size stored in front in send_buffer)
00831       result = send_buffer(send_procs[i], localOwnedBuffs[ind], 
00832                            MB_MESG_ENTS_SIZE, sendReqs[2*ind], 
00833                            recvReqs[2*ind+1],
00834                            &ackbuff,
00835                            incoming1,
00836                            MB_MESG_REMOTEH_SIZE, 
00837                            (store_remote_handles ? 
00838                             localOwnedBuffs[ind] : NULL),
00839                            &recvRemotehReqs[2*ind], &incoming2);
00840       RRA("Failed to Isend in ghost send.");
00841     }
00842   }
00843   entprocs.reset();
00844 
00845 #ifdef USE_MPE
00846   if (myDebug->get_verbosity() == 2) {
00847     MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending send_entities.");
00848   }
00849 #endif
00850 
00851   return MB_SUCCESS;
00852 }
00853 
00854   ErrorCode ParallelComm::recv_entities(const int from_proc,
00855                                         const bool store_remote_handles,
00856                                         const bool is_iface,
00857                                         Range &final_ents,
00858                                         int& incoming1,
00859                                         int& incoming2,
00860                                         std::vector<std::vector<EntityHandle> > &L1hloc,
00861                                         std::vector<std::vector<EntityHandle> > &L1hrem,
00862                                         std::vector<std::vector<int> > &L1p,
00863                                         std::vector<EntityHandle> &L2hloc,
00864                                         std::vector<EntityHandle> &L2hrem,
00865                                         std::vector<unsigned int> &L2p,
00866                                         std::vector<MPI_Request> &recv_remoteh_reqs,
00867                                         bool /*wait_all*/)
00868   {
00869 #ifndef USE_MPI
00870     return MB_FAILURE;
00871 #else
00872     // non-blocking receive for the first message (having size info)
00873     ErrorCode result;
00874     int ind1 = get_buffers(from_proc);
00875     incoming1++;
00876     PRINT_DEBUG_IRECV(procConfig.proc_rank(), from_proc,
00877                       remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE,
00878                       MB_MESG_ENTS_SIZE, incoming1);
00879     int success = MPI_Irecv(remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE,
00880                             MPI_UNSIGNED_CHAR, from_proc,
00881                             MB_MESG_ENTS_SIZE, procConfig.proc_comm(),
00882                             &recvReqs[2*ind1]);
00883     if (success != MPI_SUCCESS) {
00884       result = MB_FAILURE;
00885       RRA("Failed to post irecv in ghost exchange.");
00886     }
00887   
00888     // receive messages in while loop
00889     return recv_messages(from_proc, store_remote_handles, is_iface, final_ents,
00890                          incoming1, incoming2, L1hloc, L1hrem, L1p, L2hloc,
00891                          L2hrem, L2p, recv_remoteh_reqs);
00892   
00893 #endif
00894   }
00895 
00896 ErrorCode ParallelComm::recv_entities(std::set<unsigned int>& recv_procs,
00897                                       int incoming1, int incoming2,
00898                                       const bool store_remote_handles,
00899                                       const bool migrate)
00900 {
00901   //===========================================
00902   // receive/unpack new entities
00903   //===========================================
00904   // number of incoming messages is the number of procs we communicate with
00905   int success, ind, i;
00906   ErrorCode result;
00907   MPI_Status status;
00908   std::vector<std::vector<EntityHandle> > recd_ents(buffProcs.size());
00909   std::vector<std::vector<EntityHandle> > L1hloc(buffProcs.size()), L1hrem(buffProcs.size());
00910   std::vector<std::vector<int> > L1p(buffProcs.size());
00911   std::vector<EntityHandle> L2hloc, L2hrem;
00912   std::vector<unsigned int> L2p;
00913   std::vector<EntityHandle> new_ents;
00914 
00915   while (incoming1) {
00916     // wait for all recvs of ents before proceeding to sending remote handles,
00917     // b/c some procs may have sent to a 3rd proc ents owned by me;
00918     PRINT_DEBUG_WAITANY(recvReqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank());
00919     
00920     success = MPI_Waitany(2*buffProcs.size(), &recvReqs[0], &ind, &status);
00921     if (MPI_SUCCESS != success) {
00922       result = MB_FAILURE;
00923       RRA("Failed in waitany in owned entity exchange.");
00924     }
00925     
00926     PRINT_DEBUG_RECD(status);
00927     
00928     // ok, received something; decrement incoming counter
00929     incoming1--;
00930     bool done = false;
00931 
00932     // In case ind is for ack, we need index of one before it
00933     unsigned int base_ind = 2*(ind/2);
00934     result = recv_buffer(MB_MESG_ENTS_SIZE,
00935                          status,
00936                          remoteOwnedBuffs[ind/2],
00937                          recvReqs[ind], recvReqs[ind+1],
00938                          incoming1,
00939                          localOwnedBuffs[ind/2], sendReqs[base_ind], sendReqs[base_ind+1],
00940                          done,
00941                          (store_remote_handles ? 
00942                           localOwnedBuffs[ind/2] : NULL),
00943                          MB_MESG_REMOTEH_SIZE,
00944                          &recvRemotehReqs[base_ind], &incoming2);
00945     RRA("Failed to receive buffer.");
00946 
00947     if (done) {
00948       if (myDebug->get_verbosity() == 4) {
00949         msgs.resize(msgs.size()+1);
00950         msgs.back() = new Buffer(*remoteOwnedBuffs[ind/2]);
00951       }
00952       
00953       // message completely received - process buffer that was sent
00954       remoteOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
00955       result = unpack_buffer(remoteOwnedBuffs[ind/2]->buff_ptr,
00956                              store_remote_handles, buffProcs[ind/2], ind/2,
00957                              L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
00958                              new_ents, true);
00959       if (MB_SUCCESS != result) {
00960         std::cout << "Failed to unpack entities.  Buffer contents:" << std::endl;
00961         print_buffer(remoteOwnedBuffs[ind/2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind/2], false);
00962         return result;
00963       }
00964 
00965       if (recvReqs.size() != 2*buffProcs.size()) {
00966         // post irecv's for remote handles from new proc
00967         recvRemotehReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
00968         for (i = recvReqs.size(); i < (int)(2*buffProcs.size()); i+=2) {
00969           localOwnedBuffs[i/2]->reset_buffer();
00970           incoming2++;
00971           PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[i/2], 
00972                             localOwnedBuffs[i/2]->mem_ptr, INITIAL_BUFF_SIZE,
00973                             MB_MESG_REMOTEH_SIZE, incoming2);
00974           success = MPI_Irecv(localOwnedBuffs[i/2]->mem_ptr, INITIAL_BUFF_SIZE, 
00975                               MPI_UNSIGNED_CHAR, buffProcs[i/2],
00976                               MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(), 
00977                               &recvRemotehReqs[i]);
00978           if (success != MPI_SUCCESS) {
00979             result = MB_FAILURE;
00980             RRA("Failed to post irecv for remote handles in ghost exchange.");
00981           }
00982         }
00983         recvReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
00984         sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
00985       }
00986     }
00987   }
00988 
00989   // assign and remove newly created elements from/to receive processor
00990   result = assign_entities_part(new_ents, procConfig.proc_rank());
00991   RRA("Failed to assign entities to part.");
00992   if (migrate) {
00993     //result = remove_entities_part(allsent, procConfig.proc_rank());
00994     RRA("Failed to remove entities to part.");
00995   }
00996 
00997   // add requests for any new addl procs
00998   if (recvReqs.size() != 2*buffProcs.size()) {
00999     // shouldn't get here...
01000     result = MB_FAILURE;
01001     RRA("Requests length doesn't match proc count in entity exchange.");
01002   }
01003 
01004 #ifdef USE_MPE
01005   if (myDebug->get_verbosity() == 2) {
01006     MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending recv entities.");
01007   }
01008 #endif
01009 
01010   //===========================================
01011   // send local handles for new entity to owner
01012   //===========================================
01013   std::set<unsigned int>::iterator it = recv_procs.begin();
01014   std::set<unsigned int>::iterator eit = recv_procs.end();
01015   for (; it != eit; it++) {
01016     ind = get_buffers(*it);
01017     // reserve space on front for size and for initial buff size
01018     remoteOwnedBuffs[ind]->reset_buffer(sizeof(int));
01019     
01020     result = pack_remote_handles(L1hloc[ind], L1hrem[ind], L1p[ind],
01021                                  buffProcs[ind], remoteOwnedBuffs[ind]);
01022     RRA("Failed to pack remote handles.");
01023     remoteOwnedBuffs[ind]->set_stored_size();
01024 
01025     if (myDebug->get_verbosity() == 4) {
01026       msgs.resize(msgs.size()+1);
01027       msgs.back() = new Buffer(*remoteOwnedBuffs[ind]);
01028     }
01029     result = send_buffer(buffProcs[ind], remoteOwnedBuffs[ind], 
01030                          MB_MESG_REMOTEH_SIZE, 
01031                          sendReqs[2*ind], recvRemotehReqs[2*ind+1],
01032                          &ackbuff,
01033                          incoming2);
01034     RRA("Failed to send remote handles.");
01035   }
01036 
01037   //===========================================
01038   // process remote handles of my ghosteds
01039   //===========================================
01040   while (incoming2) {
01041     PRINT_DEBUG_WAITANY(recvRemotehReqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank());
01042     success = MPI_Waitany(2*buffProcs.size(), &recvRemotehReqs[0], &ind, &status);
01043     if (MPI_SUCCESS != success) {
01044       result = MB_FAILURE;
01045       RRA("Failed in waitany in owned entity exchange.");
01046     }
01047 
01048     // ok, received something; decrement incoming counter
01049     incoming2--;
01050     
01051     PRINT_DEBUG_RECD(status);
01052     bool done = false;
01053     unsigned int base_ind = 2*(ind/2);
01054     result = recv_buffer(MB_MESG_REMOTEH_SIZE, status, 
01055                          localOwnedBuffs[ind/2], 
01056                          recvRemotehReqs[ind], recvRemotehReqs[ind+1], incoming2,
01057                          remoteOwnedBuffs[ind/2], 
01058                          sendReqs[base_ind], sendReqs[base_ind+1],
01059                          done);
01060     RRA("Failed to receive remote handles.");
01061 
01062     if (done) {
01063       // incoming remote handles
01064       if (myDebug->get_verbosity() == 4) {
01065         msgs.resize(msgs.size()+1);
01066         msgs.back() = new Buffer(*localOwnedBuffs[ind]);
01067       }
01068     
01069       localOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
01070       result = unpack_remote_handles(buffProcs[ind/2], 
01071                                      localOwnedBuffs[ind/2]->buff_ptr,
01072                                      L2hloc, L2hrem, L2p);
01073       RRA("Failed to unpack remote handles.");
01074     }
01075   }
01076 
01077 #ifdef USE_MPE
01078   if (myDebug->get_verbosity() == 2) {
01079     MPE_Log_event(RHANDLES_END, procConfig.proc_rank(), "Ending remote handles.");
01080     MPE_Log_event(OWNED_END, procConfig.proc_rank(), 
01081                   "Ending recv entities (still doing checks).");
01082   }
01083 #endif
01084   myDebug->tprintf(1, "Exiting recv_entities.\n");
01085 
01086   return MB_SUCCESS;
01087 }
01088 
01089   ErrorCode ParallelComm::recv_messages(const int from_proc,
01090                                         const bool store_remote_handles,
01091                                         const bool is_iface,
01092                                         Range &final_ents,
01093                                         int& incoming1,
01094                                         int& incoming2,
01095                                         std::vector<std::vector<EntityHandle> > &L1hloc,
01096                                         std::vector<std::vector<EntityHandle> > &L1hrem,
01097                                         std::vector<std::vector<int> > &L1p,
01098                                         std::vector<EntityHandle> &L2hloc,
01099                                         std::vector<EntityHandle> &L2hrem,
01100                                         std::vector<unsigned int> &L2p,
01101                                         std::vector<MPI_Request> &recv_remoteh_reqs)
01102   {
01103 #ifndef USE_MPI
01104     return MB_FAILURE;
01105 #else
01106     MPI_Status status;
01107     ErrorCode result;
01108     int ind1 = get_buffers(from_proc);
01109     int success, ind2;
01110     std::vector<EntityHandle> new_ents;
01111 
01112     // wait and receive messages
01113     while (incoming1) {
01114       PRINT_DEBUG_WAITANY(recvReqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
01115       success = MPI_Waitany(2, &recvReqs[2*ind1], &ind2, &status);
01116       if (MPI_SUCCESS != success) {
01117         result = MB_FAILURE;
01118         RRA("Failed in waitany in recv_messages.");
01119       }
01120     
01121       PRINT_DEBUG_RECD(status);
01122     
01123       // ok, received something; decrement incoming counter
01124       incoming1--;
01125       bool done = false;
01126     
01127       // In case ind is for ack, we need index of one before it
01128       ind2 += 2*ind1;
01129       unsigned int base_ind = 2*(ind2/2);
01130 
01131       result = recv_buffer(MB_MESG_ENTS_SIZE, status,
01132                            remoteOwnedBuffs[ind2/2],
01133                            //recvbuff,
01134                            recvReqs[ind2], recvReqs[ind2+1],
01135                            incoming1, localOwnedBuffs[ind2/2],
01136                            sendReqs[base_ind], sendReqs[base_ind+1],
01137                            done,
01138                            (!is_iface && store_remote_handles ? 
01139                             localOwnedBuffs[ind2/2] : NULL),
01140                            MB_MESG_REMOTEH_SIZE,
01141                            &recv_remoteh_reqs[base_ind], &incoming2);
01142       RRA("Failed to receive buffer.");
01143     
01144       if (done) {
01145         // if it is done, unpack buffer
01146         remoteOwnedBuffs[ind2/2]->reset_ptr(sizeof(int));
01147         result = unpack_buffer(remoteOwnedBuffs[ind2/2]->buff_ptr,
01148                                store_remote_handles, from_proc, ind2/2,
01149                                L1hloc, L1hrem, L1p, L2hloc, L2hrem,
01150                                L2p, new_ents);
01151         RRA("Failed to unpack buffer in recev_messages.");
01152 
01153         std::copy(new_ents.begin(), new_ents.end(), range_inserter(final_ents));
01154 
01155         // send local handles for new elements to owner
01156         // reserve space on front for size and for initial buff size
01157         remoteOwnedBuffs[ind2/2]->reset_buffer(sizeof(int));
01158       
01159         result = pack_remote_handles(L1hloc[ind2/2], L1hrem[ind2/2], L1p[ind2/2],
01160                                      from_proc, remoteOwnedBuffs[ind2/2]);
01161         RRA("Failed to pack remote handles.");
01162         remoteOwnedBuffs[ind2/2]->set_stored_size();
01163       
01164         result = send_buffer(buffProcs[ind2/2], remoteOwnedBuffs[ind2/2], 
01165                              MB_MESG_REMOTEH_SIZE, 
01166                              sendReqs[ind2], recv_remoteh_reqs[ind2+1], 
01167                              (int*)(localOwnedBuffs[ind2/2]->mem_ptr),
01168                              //&ackbuff,
01169                              incoming2);
01170         RRA("Failed to send remote handles.");
01171       }
01172     }
01173 
01174     return MB_SUCCESS;
01175 #endif
01176   }
01177 
01178   ErrorCode ParallelComm::recv_remote_handle_messages(const int from_proc,
01179                                                       int& incoming2,
01180                                                       std::vector<EntityHandle> &L2hloc,
01181                                                       std::vector<EntityHandle> &L2hrem,
01182                                                       std::vector<unsigned int> &L2p,
01183                                                       std::vector<MPI_Request> &recv_remoteh_reqs)
01184   {
01185 #ifndef USE_MPI
01186     return MB_FAILURE;
01187 #else
01188     MPI_Status status;
01189     ErrorCode result;
01190     int ind1 = get_buffers(from_proc);
01191     int success, ind2;
01192 
01193     while (incoming2) {
01194       PRINT_DEBUG_WAITANY(recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE,
01195                           procConfig.proc_rank());
01196       success = MPI_Waitany(2, &recv_remoteh_reqs[2*ind1],
01197                             &ind2, &status);
01198       if (MPI_SUCCESS != success) {
01199         result = MB_FAILURE;
01200         RRA("Failed in waitany in recv_remote_handle_messages.");
01201       }
01202     
01203       // ok, received something; decrement incoming counter
01204       incoming2--;
01205 
01206       PRINT_DEBUG_RECD(status);
01207     
01208       bool done = false;
01209       ind2 += 2*ind1;
01210       unsigned int base_ind = 2*(ind2/2);
01211       result = recv_buffer(MB_MESG_REMOTEH_SIZE, status, 
01212                            localOwnedBuffs[ind2/2], 
01213                            recv_remoteh_reqs[ind2], recv_remoteh_reqs[ind2+1], incoming2,
01214                            remoteOwnedBuffs[ind2/2], 
01215                            sendReqs[base_ind], sendReqs[base_ind+1],
01216                            done);
01217       RRA("Failed to receive remote handles.");
01218       if (done) {
01219         // incoming remote handles
01220         localOwnedBuffs[ind2/2]->reset_ptr(sizeof(int));
01221         result = unpack_remote_handles(buffProcs[ind2/2], 
01222                                        localOwnedBuffs[ind2/2]->buff_ptr,
01223                                        L2hloc, L2hrem, L2p);
01224         RRA("Failed to unpack remote handles.");
01225       }
01226     }
01227 
01228     return MB_SUCCESS;
01229 #endif
01230   }
01231 
01232   ErrorCode ParallelComm::pack_buffer(Range &orig_ents, 
01233                                       const bool /*adjacencies*/,
01234                                       const bool tags,
01235                                       const bool store_remote_handles,
01236                                       const int to_proc,
01237                                       Buffer *buff,
01238                                       TupleList *entprocs,
01239                                       Range *allsent)
01240   {
01241     // pack the buffer with the entity ranges, adjacencies, and tags sections
01242     // 
01243     // Note: new entities used in subsequent connectivity lists, sets, or tags, 
01244     //   are referred to as (MBMAXTYPE + index), where index is into vector 
01245     //   of new entities, 0-based
01246     ErrorCode result;
01247 
01248     Range set_range;
01249     std::vector<Range> set_ranges;
01250     std::vector<Tag> all_tags;
01251     std::vector<Range> tag_ranges;
01252     std::vector<int> set_sizes;
01253     std::vector<unsigned int> options_vec;
01254 
01255     Range::const_iterator rit;
01256 
01257     // entities
01258     result = pack_entities(orig_ents, buff,
01259                            store_remote_handles, to_proc, false,
01260                            entprocs, allsent);
01261     RRA("Packing entities failed.");
01262   
01263     // sets
01264     result = pack_sets(orig_ents, buff,
01265                        store_remote_handles, to_proc); 
01266     RRA("Packing sets (count) failed.");
01267 
01268     // tags
01269     Range final_ents;
01270     if (tags) {
01271       result = get_tag_send_list(orig_ents, all_tags, tag_ranges );
01272       RRA("Failed to get tagged entities.");
01273       result = pack_tags(orig_ents, all_tags, all_tags, tag_ranges, 
01274                          buff, store_remote_handles, to_proc);
01275       RRA("Packing tags (count) failed.");
01276     }
01277     else { // set tag size to 0
01278       buff->check_space(sizeof(int));
01279       PACK_INT(buff->buff_ptr, 0);
01280       buff->set_stored_size();
01281     }
01282 
01283     return result;
01284   }
01285 
01286   ErrorCode ParallelComm::unpack_buffer(unsigned char *buff_ptr,
01287                                         const bool store_remote_handles,
01288                                         const int from_proc,
01289                                         const int ind,
01290                                         std::vector<std::vector<EntityHandle> > &L1hloc,
01291                                         std::vector<std::vector<EntityHandle> > &L1hrem,
01292                                         std::vector<std::vector<int> > &L1p,
01293                                         std::vector<EntityHandle> &L2hloc, 
01294                                         std::vector<EntityHandle> &L2hrem,
01295                                         std::vector<unsigned int> &L2p,
01296                                         std::vector<EntityHandle> &new_ents,
01297                                         const bool created_iface) 
01298   {
01299     unsigned char *tmp_buff = buff_ptr;
01300     ErrorCode result;
01301     result = unpack_entities(buff_ptr, store_remote_handles,
01302                              ind, false, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, created_iface);
01303     RRA("Unpacking entities failed.");
01304     if (myDebug->get_verbosity() == 3) {
01305       myDebug->tprintf(4, "unpack_entities buffer space: %ld bytes.\n", (long int)(buff_ptr-tmp_buff));
01306       tmp_buff = buff_ptr;
01307     }
01308     result = unpack_sets(buff_ptr, new_ents, store_remote_handles, from_proc);
01309     RRA("Unpacking sets failed.");
01310     if (myDebug->get_verbosity() == 3) {
01311       myDebug->tprintf(4, "unpack_sets buffer space: %ld bytes.\n", (long int)(buff_ptr - tmp_buff));
01312       tmp_buff = buff_ptr;
01313     }
01314     result = unpack_tags(buff_ptr, new_ents, store_remote_handles, from_proc);
01315     RRA("Unpacking tags failed.");
01316     if (myDebug->get_verbosity() == 3) {
01317       myDebug->tprintf(4, "unpack_tags buffer space: %ld bytes.\n", (long int)(buff_ptr - tmp_buff));
01318       tmp_buff = buff_ptr;
01319     }
01320 
01321     if (myDebug->get_verbosity() == 3)
01322       myDebug->print(4, "\n");
01323   
01324     return MB_SUCCESS;
01325   }
01326 
01327   int ParallelComm::num_subranges(const Range &this_range)
01328   {
01329     // ok, have all the ranges we'll pack; count the subranges
01330     int num_sub_ranges = 0;
01331     for (Range::const_pair_iterator pit = this_range.const_pair_begin(); 
01332          pit != this_range.const_pair_end(); pit++)
01333       num_sub_ranges++;
01334 
01335     return num_sub_ranges;
01336   }
01337 
01338   int ParallelComm::estimate_ents_buffer_size(Range &entities,
01339                                               const bool store_remote_handles) 
01340   {
01341     int buff_size = 0;
01342     std::vector<EntityHandle> dum_connect_vec;
01343     const EntityHandle *connect;
01344     int num_connect;
01345 
01346     int num_verts = entities.num_of_type(MBVERTEX);
01347     // # verts + coords + handles
01348     buff_size += 2*sizeof(int) + 3*sizeof(double)*num_verts;
01349     if (store_remote_handles) buff_size += sizeof(EntityHandle)*num_verts;
01350 
01351     // do a rough count by looking at first entity of each type
01352     for (EntityType t = MBEDGE; t < MBENTITYSET; t++) {
01353       const Range::iterator rit = entities.lower_bound(t);
01354       if (TYPE_FROM_HANDLE(*rit) != t) continue;
01355     
01356       ErrorCode result = mbImpl->get_connectivity(*rit, connect, num_connect, 
01357                                                   false, &dum_connect_vec);
01358       RRA("Failed to get connectivity to estimate buffer size.");
01359 
01360       // number, type, nodes per entity
01361       buff_size += 3*sizeof(int);
01362       int num_ents = entities.num_of_type(t);
01363       // connectivity, handle for each ent
01364       buff_size += (num_connect+1)*sizeof(EntityHandle)*num_ents;
01365     }
01366 
01367     // extra entity type at end, passed as int
01368     buff_size += sizeof(int);
01369 
01370     return buff_size;
01371   }
01372 
01373   int ParallelComm::estimate_sets_buffer_size(Range &entities,
01374                                               const bool /*store_remote_handles*/) 
01375   {
01376     // number of sets
01377     int buff_size = sizeof(int);
01378   
01379     // do a rough count by looking at first entity of each type
01380     Range::iterator rit = entities.lower_bound(MBENTITYSET);
01381     ErrorCode result;
01382   
01383     for (; rit != entities.end(); rit++) {
01384       unsigned int options;
01385       result = mbImpl->get_meshset_options(*rit, options);
01386       RRA("Failed to get meshset options.");
01387 
01388       buff_size += sizeof(int);
01389     
01390       Range set_range;
01391       if (options & MESHSET_SET) {
01392         // range-based set; count the subranges
01393         result = mbImpl->get_entities_by_handle(*rit, set_range);
01394         RRA("Failed to get set entities.");
01395 
01396         // set range
01397         buff_size += RANGE_SIZE(set_range);
01398       }
01399       else if (options & MESHSET_ORDERED) {
01400         // just get the number of entities in the set
01401         int num_ents;
01402         result = mbImpl->get_number_entities_by_handle(*rit, num_ents);
01403         RRA("Failed to get number entities in ordered set.");
01404 
01405         // set vec
01406         buff_size += sizeof(EntityHandle) * num_ents + sizeof(int);
01407       }
01408 
01409       // get numbers of parents/children
01410       int num_par, num_ch;
01411       result = mbImpl->num_child_meshsets(*rit, &num_ch);
01412       RRA("Failed to get num children.");
01413 
01414       result = mbImpl->num_parent_meshsets(*rit, &num_par);
01415       RRA("Failed to get num parents.");
01416 
01417       buff_size += (num_ch + num_par) * sizeof(EntityHandle) + 2*sizeof(int);
01418     }
01419 
01420     return buff_size;
01421   }
01422 
01423   ErrorCode ParallelComm::pack_entities(Range &entities,
01424                                         Buffer *buff,
01425                                         const bool store_remote_handles,
01426                                         const int to_proc,
01427                                         const bool /*is_iface*/,
01428                                         TupleList *entprocs,
01429                                         Range */*allsent*/) 
01430   {
01431     // packed information:
01432     // 1. # entities = E
01433     // 2. for e in E
01434     //   a. # procs sharing e, incl. sender and receiver = P
01435     //   b. for p in P (procs sharing e)
01436     //   c. for p in P (handle for e on p) (Note1)
01437     // 3. vertex/entity info
01438 
01439     // get an estimate of the buffer size & pre-allocate buffer size
01440     unsigned int buff_size = estimate_ents_buffer_size(entities, 
01441                                                        store_remote_handles);
01442     buff->check_space(buff_size);
01443   
01444     WriteUtilIface *wu;
01445     ErrorCode result = mbImpl->query_interface(wu);
01446     RRA("Couldn't get WriteUtilIface.");
01447 
01448     unsigned int num_ents;
01449 
01450     std::vector<EntityHandle> entities_vec(entities.size());
01451     std::copy(entities.begin(), entities.end(), entities_vec.begin());
01452 
01453     // first pack procs/handles sharing this ent, not including this dest but including
01454     // others (with zero handles)
01455     if (store_remote_handles) {
01456 
01457       // buff space is at least proc+handle for each entity; use avg of 4 other procs
01458       // to estimate buff size, but check later
01459       buff->check_space(sizeof(int) + (5*sizeof(int) + sizeof(EntityHandle))*entities.size());
01460 
01461       // 1. # entities = E
01462       PACK_INT(buff->buff_ptr, entities.size());
01463   
01464       Range::iterator rit;
01465   
01466       // pre-fetch sharedp and pstatus
01467       std::vector<int> sharedp_vals(entities.size());
01468       result = mbImpl->tag_get_data(sharedp_tag(), entities, &sharedp_vals[0]);
01469       RRA("Failed to get sharedp_tag.");
01470       std::vector<char> pstatus_vals(entities.size());
01471       result = mbImpl->tag_get_data(pstatus_tag(), entities, &pstatus_vals[0]);
01472       RRA("Failed to get sharedp_tag.");
01473   
01474       unsigned int i;
01475       int tmp_procs[MAX_SHARING_PROCS];
01476       EntityHandle tmp_handles[MAX_SHARING_PROCS];
01477       std::set<unsigned int> dumprocs;
01478 
01479       // 2. for e in E
01480       for (rit = entities.begin(), i = 0; 
01481            rit != entities.end(); rit++, i++) {
01482         unsigned int ind = std::lower_bound(entprocs->vul_rd, entprocs->vul_rd+entprocs->get_n(), *rit) - entprocs->vul_rd;
01483         assert(ind < entprocs->get_n());
01484       
01485         while (ind < entprocs->get_n() && entprocs->vul_rd[ind] == *rit)
01486           dumprocs.insert(entprocs->vi_rd[ind++]);
01487       
01488         result = build_sharedhps_list(*rit, pstatus_vals[i], sharedp_vals[i],
01489                                       dumprocs, num_ents, tmp_procs, tmp_handles);
01490         RRA("Failed to build sharedhps.");
01491 
01492         dumprocs.clear();
01493 
01494         // now pack them
01495         buff->check_space((num_ents+1)*sizeof(int) + 
01496                           num_ents*sizeof(EntityHandle));
01497         PACK_INT(buff->buff_ptr, num_ents);
01498         PACK_INTS(buff->buff_ptr, tmp_procs, num_ents);
01499         PACK_EH(buff->buff_ptr, tmp_handles, num_ents);
01500 
01501 #ifndef NDEBUG
01502         // check for duplicates in proc list
01503         unsigned int dp = 0;
01504         for (; dp < MAX_SHARING_PROCS && -1 != tmp_procs[dp]; dp++)
01505           dumprocs.insert(tmp_procs[dp]);
01506         assert(dumprocs.size() == dp);
01507         dumprocs.clear();
01508 #endif      
01509       }
01510     }
01511   
01512     // pack vertices
01513     Range these_ents = entities.subset_by_type(MBVERTEX);
01514     num_ents = these_ents.size();
01515 
01516     if (num_ents) {
01517       buff_size = 2*sizeof(int) + 3*num_ents*sizeof(double);
01518       buff->check_space(buff_size);
01519 
01520       // type, # ents
01521       PACK_INT(buff->buff_ptr, ((int) MBVERTEX));
01522       PACK_INT(buff->buff_ptr, ((int) num_ents));
01523 
01524       std::vector<double> tmp_coords(3*num_ents);
01525       result = mbImpl->get_coords(these_ents, &tmp_coords[0]);
01526       PACK_DBLS(buff->buff_ptr, &tmp_coords[0], 3*num_ents);
01527       RRA("Couldn't get vertex coordinates.");
01528 
01529       myDebug->tprintf(4, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
01530                        CN::EntityTypeName(TYPE_FROM_HANDLE(*these_ents.begin())));
01531     }
01532 
01533     // now entities; go through range, packing by type and equal # verts per element
01534     Range::iterator start_rit = entities.find(*these_ents.rbegin());
01535     start_rit++;
01536     int last_nodes = -1;
01537     EntityType last_type = MBMAXTYPE;
01538     these_ents.clear();
01539     Range::iterator end_rit = start_rit;
01540     EntitySequence *seq;
01541     ElementSequence *eseq;
01542   
01543     while (start_rit != entities.end() || !these_ents.empty()) {
01544       // cases:
01545       // A: !end, last_type == MBMAXTYPE, seq: save contig sequence in these_ents
01546       // B: !end, last type & nodes same, seq: save contig sequence in these_ents
01547       // C: !end, last type & nodes different: pack these_ents, then save contig sequence in these_ents
01548       // D: end: pack these_ents
01549 
01550       // find the sequence holding current start entity, if we're not at end
01551       eseq = NULL;
01552       if (start_rit != entities.end()) {
01553         result = sequenceManager->find(*start_rit, seq);
01554         RRA("Couldn't find entity sequence.");
01555         if (NULL == seq) return MB_FAILURE;
01556         eseq = dynamic_cast<ElementSequence*>(seq);
01557       }
01558 
01559       // pack the last batch if at end or next one is different
01560       if (!these_ents.empty() &&
01561           (!eseq || eseq->type() != last_type ||
01562            last_nodes != (int) eseq->nodes_per_element())) {
01563         result = pack_entity_seq(last_nodes, store_remote_handles,
01564                                  to_proc, these_ents, entities_vec, buff);
01565         RRA("Failed to pack entities from a sequence.");
01566         these_ents.clear();
01567       }
01568 
01569       if (eseq) {
01570         // continuation of current range, just save these entities
01571         // get position in entities list one past end of this sequence
01572         end_rit = entities.lower_bound(start_rit, entities.end(), eseq->end_handle()+1);
01573 
01574         // put these entities in the range
01575         std::copy(start_rit, end_rit, range_inserter(these_ents));
01576 
01577         last_type = eseq->type();
01578         last_nodes = eseq->nodes_per_element();
01579       }
01580       else if (start_rit != entities.end() &&
01581                TYPE_FROM_HANDLE(*start_rit) == MBENTITYSET)
01582         break;
01583 
01584       start_rit = end_rit;
01585     }
01586 
01587     // pack MBMAXTYPE to indicate end of ranges
01588     buff->check_space(sizeof(int));
01589     PACK_INT(buff->buff_ptr, ((int)MBMAXTYPE));
01590 
01591     buff->set_stored_size();
01592     return MB_SUCCESS;
01593   }
01594 
01595   ErrorCode ParallelComm::build_sharedhps_list(const EntityHandle entity,
01596                                                const unsigned char pstatus,
01597                                                const int 
01598 #ifndef NDEBUG
01599                                                sharedp
01600 #endif
01601                                                , 
01602                                                const std::set<unsigned int> &procs,
01603                                                unsigned int &num_ents,
01604                                                int *tmp_procs,
01605                                                EntityHandle *tmp_handles)
01606   {
01607     num_ents = 0;
01608     unsigned char pstat;
01609     ErrorCode result = get_sharing_data(entity, tmp_procs, tmp_handles,
01610                                         pstat, num_ents);
01611     RRA("Failed in get_sharing_data.");
01612     assert(pstat == pstatus);
01613   
01614     // build shared proc/handle lists
01615     // start with multi-shared, since if it is the owner will be first
01616     if (pstatus & PSTATUS_MULTISHARED) {
01617     }
01618     else if (pstatus & PSTATUS_NOT_OWNED) {
01619       // if not multishared and not owned, other sharing proc is owner, put that
01620       // one first
01621       assert("If not owned, I should be shared too" &&
01622              pstatus & PSTATUS_SHARED &&
01623              num_ents == 1);
01624       tmp_procs[1] = procConfig.proc_rank();
01625       tmp_handles[1] = entity;
01626       num_ents = 2;
01627     }
01628     else if (pstatus & PSTATUS_SHARED) {
01629       // if not multishared and owned, I'm owner
01630       assert("shared and owned, should be only 1 sharing proc" &&
01631              1 == num_ents);
01632       tmp_procs[1] = tmp_procs[0];
01633       tmp_procs[0] = procConfig.proc_rank();
01634       tmp_handles[1] = tmp_handles[0];
01635       tmp_handles[0] = entity;
01636       num_ents = 2;
01637     }
01638     else {
01639       // not shared yet, just add owner (me)
01640       tmp_procs[0] = procConfig.proc_rank();
01641       tmp_handles[0] = entity;
01642       num_ents = 1;
01643     }
01644 
01645 #ifndef NDEBUG
01646     int tmp_ps = num_ents;
01647 #endif
01648   
01649     // now add others, with zero handle for now
01650     for (std::set<unsigned int>::iterator sit = procs.begin();
01651          sit != procs.end(); sit++) {
01652 #ifndef NDEBUG
01653       if (tmp_ps && std::find(tmp_procs, tmp_procs+tmp_ps, *sit) != tmp_procs+tmp_ps) {
01654         std::cerr << "Trouble with something already in shared list on proc " << procConfig.proc_rank()
01655                   << ".  Entity:" << std::endl;
01656         list_entities(&entity, 1);
01657         std::cerr << "pstatus = " << (int) pstatus << ", sharedp = " << sharedp << std::endl;
01658         std::cerr << "tmp_ps = ";
01659         for (int i = 0; i < tmp_ps; i++) std::cerr << tmp_procs[i] << " ";
01660         std::cerr << std::endl;
01661         std::cerr << "procs = ";
01662         for (std::set<unsigned int>::iterator sit2 = procs.begin(); sit2 != procs.end(); sit2++) 
01663           std::cerr << *sit2 << " ";
01664         assert(false);
01665       }
01666 #endif    
01667       tmp_procs[num_ents] = *sit;
01668       tmp_handles[num_ents] = 0;
01669       num_ents++;
01670     }
01671 
01672     // put -1 after procs and 0 after handles
01673     if (MAX_SHARING_PROCS > num_ents) {
01674       tmp_procs[num_ents] = -1;
01675       tmp_handles[num_ents] = 0;
01676     }
01677   
01678     return MB_SUCCESS;
01679   }
01680 
01681   ErrorCode ParallelComm::pack_entity_seq(const int nodes_per_entity,
01682                                           const bool store_remote_handles,
01683                                           const int to_proc,
01684                                           Range &these_ents,
01685                                           std::vector<EntityHandle> &entities_vec,
01686                                           Buffer *buff) 
01687   {
01688     int tmp_space = 3*sizeof(int) + nodes_per_entity*these_ents.size()*sizeof(EntityHandle);
01689     buff->check_space(tmp_space);
01690   
01691     // pack the entity type
01692     PACK_INT(buff->buff_ptr, ((int)TYPE_FROM_HANDLE(*these_ents.begin())));
01693 
01694     // pack # ents
01695     PACK_INT(buff->buff_ptr, these_ents.size());
01696       
01697     // pack the nodes per entity
01698     PACK_INT(buff->buff_ptr, nodes_per_entity);
01699       
01700     // pack the connectivity
01701     std::vector<EntityHandle> connect;
01702     ErrorCode result = MB_SUCCESS;
01703     for (Range::const_iterator rit = these_ents.begin(); rit != these_ents.end(); rit++) {
01704       connect.clear();
01705       result = mbImpl->get_connectivity(&(*rit), 1, connect, false);
01706       RRA("Failed to get connectivity.");
01707       assert((int)connect.size() == nodes_per_entity);
01708       result = get_remote_handles(store_remote_handles, &connect[0], &connect[0],
01709                                   connect.size(), to_proc, entities_vec);
01710       RRA("Failed in get_remote_handles.");
01711       PACK_EH(buff->buff_ptr, &connect[0], connect.size());
01712     }
01713 
01714     // substitute destination handles
01715     RRA("Trouble getting remote handles when packing entities.");
01716 
01717     myDebug->tprintf(4, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
01718                      CN::EntityTypeName(TYPE_FROM_HANDLE(*these_ents.begin())));
01719 
01720     return result;
01721   }
01722 
01723 
01724   ErrorCode ParallelComm::get_remote_handles(const bool store_remote_handles,
01725                                              EntityHandle *from_vec, 
01726                                              EntityHandle *to_vec_tmp,
01727                                              int num_ents, int to_proc,
01728                                              const std::vector<EntityHandle> &new_ents) 
01729   {
01730     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE RANGE-BASED VERSION, NO REUSE
01731     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
01732     // OTHER VERSION TOO!!!
01733     if (0 == num_ents) return MB_SUCCESS;
01734   
01735     // use a local destination ptr in case we're doing an in-place copy
01736     std::vector<EntityHandle> tmp_vector;
01737     EntityHandle *to_vec = to_vec_tmp;
01738     if (to_vec == from_vec) {
01739       tmp_vector.resize(num_ents);
01740       to_vec = &tmp_vector[0];
01741     }
01742 
01743     if (!store_remote_handles) {
01744       int err;
01745       // in this case, substitute position in new_ents list
01746       for (int i = 0; i < num_ents; i++) {
01747         int ind = std::lower_bound(new_ents.begin(), new_ents.end(), from_vec[i]) - new_ents.begin();
01748         assert(new_ents[ind] == from_vec[i]);
01749         to_vec[i] = CREATE_HANDLE(MBMAXTYPE, ind, err);
01750         assert(to_vec[i] != 0 && !err && -1 != ind);
01751       }
01752     }
01753     else {
01754       Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
01755       ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag, 
01756                                               shh_tag, shhs_tag, pstat_tag);
01757   
01758       // get single-proc destination handles and shared procs
01759       std::vector<int> sharing_procs(num_ents);
01760       result = mbImpl->tag_get_data(shh_tag, from_vec, num_ents,
01761                                     to_vec);
01762       RRA("Failed to get shared handle tag for remote_handles.");
01763       result = mbImpl->tag_get_data(shp_tag, from_vec, num_ents, &sharing_procs[0]);
01764       RRA("Failed to get sharing proc tag in remote_handles.");
01765       for (int j = 0; j < num_ents; j++) {
01766         if (to_vec[j] && sharing_procs[j] != to_proc)
01767           to_vec[j] = 0;
01768       }
01769     
01770       EntityHandle tmp_handles[MAX_SHARING_PROCS];
01771       int tmp_procs[MAX_SHARING_PROCS];
01772       int i;
01773       // go through results, and for 0-valued ones, look for multiple shared proc
01774       for (i = 0; i < num_ents; i++) {
01775         if (!to_vec[i]) {
01776           result = mbImpl->tag_get_data(shps_tag, from_vec+i, 1, tmp_procs);
01777           if (MB_SUCCESS == result) {
01778             for (int j = 0; j < MAX_SHARING_PROCS; j++) {
01779               if (-1 == tmp_procs[j]) break;
01780               else if (tmp_procs[j] == to_proc) {
01781                 result = mbImpl->tag_get_data(shhs_tag, from_vec+i, 1, tmp_handles);
01782                 RRA("Trouble getting sharedhs tag.");
01783                 to_vec[i] = tmp_handles[j];
01784                 assert(to_vec[i]);
01785                 break;
01786               }
01787             }
01788           }
01789           if (!to_vec[i]) {
01790             int j = std::lower_bound(new_ents.begin(), new_ents.end(), from_vec[i]) - new_ents.begin();
01791             if ((int)new_ents.size() == j) {
01792               result = MB_FAILURE;
01793               std::cout << "Failed to find new entity in send list, proc " 
01794                         << procConfig.proc_rank() << std::endl;
01795               for (int k = 0; k <= num_ents; k++) 
01796                 std::cout << k << ": " << from_vec[k] << " " << to_vec[k] 
01797                           << std::endl;
01798               RRA("Failed to find new entity in send list.");
01799             }
01800             int err;
01801             to_vec[i] = CREATE_HANDLE(MBMAXTYPE, j, err);
01802             if (err) {
01803               result = MB_FAILURE;
01804               RRA("Failed to create handle in remote_handles.");
01805             }
01806           }
01807         }
01808       }
01809     }
01810   
01811     // memcpy over results if from_vec and to_vec are the same
01812     if (to_vec_tmp == from_vec) 
01813       memcpy(from_vec, to_vec, num_ents * sizeof(EntityHandle));
01814   
01815     return MB_SUCCESS;
01816   }
01817 
01818   ErrorCode ParallelComm::get_remote_handles(const bool store_remote_handles,
01819                                              const Range &from_range, 
01820                                              EntityHandle *to_vec,
01821                                              int to_proc,
01822                                              const std::vector<EntityHandle> &new_ents) 
01823   {
01824     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE VECTOR-BASED VERSION, NO REUSE
01825     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
01826     // OTHER VERSION TOO!!!
01827     if (from_range.empty()) return MB_SUCCESS;
01828   
01829     if (!store_remote_handles) {
01830       int err;
01831       // in this case, substitute position in new_ents list
01832       Range::iterator rit;
01833       unsigned int i;
01834       for (rit = from_range.begin(), i = 0; rit != from_range.end(); rit++, i++) {
01835         int ind = std::lower_bound(new_ents.begin(), new_ents.end(), *rit) - new_ents.begin();
01836         assert(new_ents[ind] == *rit);
01837         to_vec[i] = CREATE_HANDLE(MBMAXTYPE, ind, err);
01838         assert(to_vec[i] != 0 && !err && -1 != ind);
01839       }
01840     }
01841     else {
01842       Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
01843       ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag, 
01844                                               shh_tag, shhs_tag, pstat_tag);
01845   
01846       // get single-proc destination handles and shared procs
01847       std::vector<int> sharing_procs(from_range.size());
01848       result = mbImpl->tag_get_data(shh_tag, from_range, to_vec);
01849       RRA("Failed to get shared handle tag for remote_handles.");
01850       result = mbImpl->tag_get_data(shp_tag, from_range, &sharing_procs[0]);
01851       RRA("Failed to get sharing proc tag in remote_handles.");
01852       for (unsigned int j = 0; j < from_range.size(); j++) {
01853         if (to_vec[j] && sharing_procs[j] != to_proc)
01854           to_vec[j] = 0;
01855       }
01856     
01857       EntityHandle tmp_handles[MAX_SHARING_PROCS];
01858       int tmp_procs[MAX_SHARING_PROCS];
01859       // go through results, and for 0-valued ones, look for multiple shared proc
01860       Range::iterator rit;
01861       unsigned int i;
01862       for (rit = from_range.begin(), i = 0; rit != from_range.end(); rit++, i++) {
01863         if (!to_vec[i]) {
01864           result = mbImpl->tag_get_data(shhs_tag, &(*rit), 1, tmp_handles);
01865           if (MB_SUCCESS == result) {
01866             result = mbImpl->tag_get_data(shps_tag, &(*rit), 1, tmp_procs);
01867             RRA("Trouble getting sharedps tag.");
01868             for (int j = 0; j < MAX_SHARING_PROCS; j++)
01869               if (tmp_procs[j] == to_proc) {
01870                 to_vec[i] = tmp_handles[j];
01871                 break;
01872               }
01873           }
01874       
01875           if (!to_vec[i]) {
01876             int j = std::lower_bound(new_ents.begin(), new_ents.end(), *rit) - new_ents.begin();
01877             if ((int)new_ents.size() == j) {
01878               result = MB_FAILURE;
01879               RRA("Failed to find new entity in send list.");
01880             }
01881             int err;
01882             to_vec[i] = CREATE_HANDLE(MBMAXTYPE, j, err);
01883             if (err) {
01884               result = MB_FAILURE;
01885               RRA("Failed to create handle in remote_handles.");
01886             }
01887           }
01888         }
01889       }
01890     }
01891   
01892     return MB_SUCCESS;
01893   }
01894 
01895   ErrorCode ParallelComm::get_remote_handles(const bool store_remote_handles,
01896                                              const Range &from_range, 
01897                                              Range &to_range,
01898                                              int to_proc,
01899                                              const std::vector<EntityHandle> &new_ents) 
01900   {
01901     std::vector<EntityHandle> to_vector(from_range.size());
01902 
01903     ErrorCode result =
01904       get_remote_handles(store_remote_handles, from_range, &to_vector[0],
01905                          to_proc, new_ents);
01906     RRA("Trouble getting remote handles.");
01907     std::copy(to_vector.begin(), to_vector.end(), range_inserter(to_range));
01908     return result;
01909   }
01910 
01911   ErrorCode ParallelComm::unpack_entities(unsigned char *&buff_ptr,
01912                                           const bool store_remote_handles,
01913                                           const int /*from_ind*/,
01914                                           const bool is_iface,
01915                                           std::vector<std::vector<EntityHandle> > &L1hloc,
01916                                           std::vector<std::vector<EntityHandle> > &L1hrem,
01917                                           std::vector<std::vector<int> > &L1p,
01918                                           std::vector<EntityHandle> &L2hloc, 
01919                                           std::vector<EntityHandle> &L2hrem,
01920                                           std::vector<unsigned int> &L2p,
01921                                           std::vector<EntityHandle> &new_ents,
01922                                           const bool created_iface) 
01923   {
01924     // general algorithm:
01925     // - unpack # entities
01926     // - save start of remote handle info, then scan forward to entity definition data
01927     // - for all vertices or entities w/ same # verts:
01928     //   . get entity type, num ents, and (if !vert) # verts 
01929     //   . for each ent:
01930     //      o get # procs/handles in remote handle info
01931     //      o if # procs/handles > 2, check for already-created entity:
01932     //        x get index of owner proc (1st in proc list), resize L1 list if nec
01933     //        x look for already-arrived entity in L2 by owner handle
01934     //      o if no existing entity:
01935     //        x if iface, look for existing entity with same connect & type
01936     //        x if none found, create vertex or element
01937     //        x if !iface & multi-shared, save on L2
01938     //        x if !iface, put new entity on new_ents list
01939     //      o update proc/handle, pstatus tags, adjusting to put owner first if iface
01940     //      o if !iface, save new handle on L1 for all sharing procs
01941 
01942     // lists of handles/procs to return to sending/other procs
01943     // L1hloc[p], L1hrem[p]: handle pairs [h, h'], where h is the local proc handle
01944     //         and h' is either the remote proc handle (if that is known) or
01945     //         the owner proc handle (otherwise);
01946     // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
01947     // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
01948     //         remote handles are on owning proc
01949     // L2p: owning procs for handles in L2hrem
01950 
01951     ErrorCode result;
01952     bool done = false;
01953     ReadUtilIface *ru = NULL;
01954 
01955     result = mbImpl->query_interface(ru);
01956     RRA("Failed to get ReadUtilIface.");
01957 
01958     // procs the sending proc is telling me I'll be receiving from
01959     std::set<unsigned int> comm_procs;
01960 
01961     // 1. # entities = E
01962     int num_ents = 0;
01963     unsigned char *buff_save = buff_ptr;
01964     int i, j;
01965 
01966     if (store_remote_handles) {
01967       UNPACK_INT(buff_ptr, num_ents);
01968 
01969       buff_save = buff_ptr;
01970     
01971       // save place where remote handle info starts, then scan forward to ents
01972       for (i = 0; i < num_ents; i++) {
01973         UNPACK_INT(buff_ptr, j);
01974         if (j < 0) {
01975           std::cout << "Should be non-negative # proc/handles.";
01976           return MB_FAILURE;
01977         }
01978       
01979         buff_ptr += j * (sizeof(int)+sizeof(EntityHandle));
01980       }
01981     }
01982 
01983     std::vector<EntityHandle> msg_ents;
01984   
01985     while (!done) {
01986       EntityType this_type = MBMAXTYPE;
01987       UNPACK_TYPE(buff_ptr, this_type);
01988       assert(this_type != MBENTITYSET);
01989 
01990       // MBMAXTYPE signifies end of entities data
01991       if (MBMAXTYPE == this_type) break;
01992 
01993       // get the number of ents
01994       int num_ents2, verts_per_entity = 0;
01995       UNPACK_INT(buff_ptr, num_ents2);
01996 
01997       // unpack the nodes per entity
01998       if (MBVERTEX != this_type && num_ents2) {
01999         UNPACK_INT(buff_ptr, verts_per_entity);
02000       }
02001       
02002       std::vector<int> ps(MAX_SHARING_PROCS, -1);
02003       std::vector<EntityHandle> hs(MAX_SHARING_PROCS, 0);
02004       for (int e = 0; e < num_ents2; e++) {
02005         // check for existing entity, otherwise make new one
02006         EntityHandle new_h = 0;
02007 
02008         EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
02009         double coords[3];
02010         int num_ps = -1;
02011 
02012         //=======================================
02013         // unpack all the data at once, to make sure the buffer pointers
02014         // are tracked correctly
02015         //=======================================
02016         if (store_remote_handles) {
02017           // pointers to other procs/handles
02018           UNPACK_INT(buff_save, num_ps);
02019           if (0 >= num_ps) {
02020             std::cout << "Shouldn't ever be fewer than 1 procs here." << std::endl;
02021             return MB_FAILURE;
02022           }
02023         
02024           UNPACK_INTS(buff_save, &ps[0], num_ps);
02025           UNPACK_EH(buff_save, &hs[0], num_ps);
02026         }
02027 
02028         if (MBVERTEX == this_type) {
02029           UNPACK_DBLS(buff_ptr, coords, 3);
02030         }
02031         else {
02032           assert(verts_per_entity <= CN::MAX_NODES_PER_ELEMENT);
02033           UNPACK_EH(buff_ptr, connect, verts_per_entity);
02034 
02035           // update connectivity to local handles
02036           result = get_local_handles(connect, verts_per_entity, msg_ents);
02037           RRA("Couldn't get local handles.");
02038         }
02039 
02040         //=======================================
02041         // now, process that data; begin by finding an identical 
02042         // entity, if there is one
02043         //=======================================
02044         if (store_remote_handles) {
02045           result = find_existing_entity(is_iface, ps[0], hs[0], num_ps, 
02046                                         connect, verts_per_entity,
02047                                         this_type,
02048                                         L2hloc, L2hrem, L2p,
02049                                         new_h);
02050           RRA("Trouble getting existing entity.");
02051         }
02052 
02053         //=======================================
02054         // if we didn't find one, we'll have to create one
02055         //=======================================
02056         bool created_here = false;
02057         if (!new_h && !is_iface) {
02058 
02059           if (MBVERTEX == this_type) {
02060             // create a vertex
02061             result = mbImpl->create_vertex(coords, new_h);
02062             RRA("Couldn't make new vertex.");
02063           }
02064           else {
02065             // create the element
02066             result = mbImpl->create_element(this_type, connect, verts_per_entity, new_h);
02067             RRA("Couldn't make new vertex.");
02068 
02069             // update adjacencies
02070             result = ru->update_adjacencies(new_h, 1, 
02071                                             verts_per_entity, connect);
02072             RRA("Failed to update adjacencies.");
02073           }
02074 
02075           // should have a new handle now
02076           assert(new_h);
02077         
02078           created_here = true;
02079         }
02080 
02081         //=======================================
02082         // take care of sharing data
02083         //=======================================
02084 
02085         // need to save entities found in order, for interpretation of
02086         // later parts of this message
02087         if (!is_iface) {
02088           assert(new_h);
02089           msg_ents.push_back(new_h);
02090         }
02091 
02092         if (created_here) new_ents.push_back(new_h);
02093 
02094         if (new_h && store_remote_handles) {
02095           unsigned char new_pstat = 0x0;
02096           if (is_iface) {
02097             new_pstat = PSTATUS_INTERFACE;
02098               // here, lowest rank proc should be first
02099             int idx = std::min_element(&ps[0], &ps[0]+num_ps) - &ps[0];
02100             if (idx) {
02101               std::swap(ps[0], ps[idx]);
02102               std::swap(hs[0], hs[idx]);
02103             }
02104               // set ownership based on lowest rank; can't be in update_remote_data, because
02105               // there we don't know whether it resulted from ghosting or not
02106             if ((num_ps > 1 && ps[0] != (int) rank()))
02107               new_pstat |= PSTATUS_NOT_OWNED;
02108           }
02109           else if (created_here) {
02110             if (created_iface) new_pstat = PSTATUS_NOT_OWNED;
02111             else new_pstat = PSTATUS_GHOST | PSTATUS_NOT_OWNED;
02112           }
02113         
02114           // update sharing data and pstatus, adjusting order if iface
02115           result = update_remote_data(new_h, &ps[0], &hs[0], num_ps, new_pstat);
02116           RRA("unpack_entities");
02117         
02118           // if a new multi-shared entity, save owner for subsequent lookup in L2 lists
02119           if (store_remote_handles && !is_iface && num_ps > 2) {
02120             L2hrem.push_back(hs[0]);
02121             L2hloc.push_back(new_h);
02122             L2p.push_back(ps[0]);
02123           }
02124 
02125           // need to send this new handle to all sharing procs
02126           if (!is_iface) {
02127             for (j = 0; j < num_ps; j++) {
02128               if (ps[j] == (int)procConfig.proc_rank()) continue;
02129               int idx = get_buffers(ps[j]);
02130               if (idx == (int)L1hloc.size()) {
02131                 L1hloc.resize(idx+1);
02132                 L1hrem.resize(idx+1);
02133                 L1p.resize(idx+1);
02134               }
02135             
02136               // don't bother adding if it's already in the list
02137               std::vector<EntityHandle>::iterator vit = 
02138                 std::find(L1hloc[idx].begin(), L1hloc[idx].end(), new_h);
02139               if (vit != L1hloc[idx].end()) {
02140                 // if it's in the list but remote handle isn't known but we know
02141                 // it, replace in the list
02142                 if (L1p[idx][vit-L1hloc[idx].begin()] != -1 && hs[j]) {
02143                   L1hrem[idx][vit-L1hloc[idx].begin()] = hs[j];
02144                   L1p[idx][vit-L1hloc[idx].begin()] = -1;
02145                 }
02146                 else continue;
02147               }
02148               else {
02149                 if (!hs[j]) {
02150                   assert(-1 != ps[0] && num_ps > 2);
02151                   L1p[idx].push_back(ps[0]);
02152                   L1hrem[idx].push_back(hs[0]);
02153                 }
02154                 else {
02155                   assert("either this remote handle isn't in the remote list, or it's for another proc" &&
02156                          (std::find(L1hrem[idx].begin(), L1hrem[idx].end(), hs[j]) == 
02157                           L1hrem[idx].end() ||
02158                           L1p[idx][std::find(L1hrem[idx].begin(), L1hrem[idx].end(), hs[j]) - 
02159                                    L1hrem[idx].begin()] != -1));
02160                   L1p[idx].push_back(-1);
02161                   L1hrem[idx].push_back(hs[j]);
02162                 }
02163                 L1hloc[idx].push_back(new_h);
02164               }
02165             }
02166           }
02167 
02168           assert("Shouldn't be here for non-shared entities" &&
02169                  -1 != num_ps);
02170           std::fill(&ps[0], &ps[num_ps], -1);
02171           std::fill(&hs[0], &hs[num_ps], 0);
02172         }
02173       }
02174     
02175     
02176       myDebug->tprintf(4, "Unpacked %d ents of type %s", num_ents2,
02177                        CN::EntityTypeName(TYPE_FROM_HANDLE(this_type)));
02178     }
02179 
02180     myDebug->tprintf(4, "Done unpacking entities.\n");
02181 
02182     // need to sort here, to enable searching
02183     std::sort(new_ents.begin(), new_ents.end());
02184   
02185     return MB_SUCCESS;
02186   }
02187 
02188   ErrorCode ParallelComm::print_buffer(unsigned char *buff_ptr, 
02189                                        int mesg_tag, 
02190                                        int from_proc, bool sent) 
02191   {
02192     std::cerr << procConfig.proc_rank();
02193     if (sent) std::cerr << " sent";
02194     else std::cerr << " received";
02195     std::cerr << " message type " << mesg_tag 
02196               << " to/from proc " << from_proc << "; contents:" << std::endl;
02197 
02198     int msg_length, num_ents;
02199     unsigned char *orig_ptr = buff_ptr;
02200     UNPACK_INT(buff_ptr, msg_length);
02201     std::cerr << msg_length << " bytes..." << std::endl;
02202 
02203     if (MB_MESG_ENTS_SIZE == mesg_tag || MB_MESG_ENTS_LARGE == mesg_tag) {
02204 
02205       // 1. # entities = E
02206       int i, j, k;
02207       std::vector<int> ps;
02208       std::vector<EntityHandle> hs;
02209 
02210       UNPACK_INT(buff_ptr, num_ents);
02211       std::cerr << num_ents << " entities..." << std::endl;
02212 
02213       // save place where remote handle info starts, then scan forward to ents
02214       for (i = 0; i < num_ents; i++) {
02215         UNPACK_INT(buff_ptr, j);
02216         if (0 > j) return MB_FAILURE;
02217         ps.resize(j);
02218         hs.resize(j);
02219         std::cerr << "Entity " << i << ", # procs = " << j << std::endl;
02220         UNPACK_INTS(buff_ptr, &ps[0], j);
02221         UNPACK_EH(buff_ptr, &hs[0], j);
02222         std::cerr << "   Procs: ";
02223         for (k = 0; k < j; k++) std::cerr << ps[k] << " ";
02224         std::cerr << std::endl;
02225         std::cerr << "   Handles: ";
02226         for (k = 0; k < j; k++) std::cerr << hs[k] << " ";
02227         std::cerr << std::endl;
02228 
02229         if (buff_ptr-orig_ptr > msg_length) {
02230           std::cerr << "End of buffer..." << std::endl;
02231           std::cerr.flush();
02232           return MB_FAILURE;
02233         }
02234       }
02235   
02236       while (true) {
02237         EntityType this_type = MBMAXTYPE;
02238         UNPACK_TYPE(buff_ptr, this_type);
02239         assert(this_type != MBENTITYSET);
02240 
02241         // MBMAXTYPE signifies end of entities data
02242         if (MBMAXTYPE == this_type) break;
02243 
02244         // get the number of ents
02245         int num_ents2, verts_per_entity = 0;
02246         UNPACK_INT(buff_ptr, num_ents2);
02247 
02248         // unpack the nodes per entity
02249         if (MBVERTEX != this_type && num_ents2) {
02250           UNPACK_INT(buff_ptr, verts_per_entity);
02251         }
02252 
02253         std::cerr << "Type: " << CN::EntityTypeName(this_type)
02254                   << "; num_ents = " << num_ents2;
02255         if (MBVERTEX != this_type) std::cerr << "; verts_per_ent = " << verts_per_entity;
02256         std::cerr << std::endl;
02257         if (num_ents2 < 0 || num_ents2 > msg_length) {
02258           std::cerr << "Wrong number of entities, returning." << std::endl;
02259           return MB_FAILURE;
02260         }
02261     
02262         for (int e = 0; e < num_ents2; e++) {
02263           // check for existing entity, otherwise make new one
02264 
02265           if (MBVERTEX == this_type) {
02266             double coords[3];
02267             UNPACK_DBLS(buff_ptr, coords, 3);
02268             std::cerr << "xyz = " << coords[0] << ", " << coords[1] << ", " 
02269                       << coords[2] << std::endl;
02270           }
02271           else {
02272             EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
02273             assert(verts_per_entity <= CN::MAX_NODES_PER_ELEMENT);
02274             UNPACK_EH(buff_ptr, connect, verts_per_entity);
02275 
02276             // update connectivity to local handles
02277             std::cerr << "Connectivity: ";
02278             for (k = 0; k < verts_per_entity; k++) std::cerr << connect[k] << " ";
02279             std::cerr << std::endl;
02280           }
02281 
02282           if (buff_ptr-orig_ptr > msg_length) {
02283             std::cerr << "End of buffer..." << std::endl;
02284             std::cerr.flush();
02285             return MB_FAILURE;
02286           }
02287         }
02288       }
02289     }
02290   
02291     else if (MB_MESG_REMOTEH_SIZE == mesg_tag || MB_MESG_REMOTEH_LARGE == mesg_tag) {
02292       UNPACK_INT(buff_ptr, num_ents);
02293       std::cerr << num_ents << " entities..." << std::endl;
02294       if (0 > num_ents || num_ents > msg_length) {
02295         std::cerr << "Wrong number of entities, returning." << std::endl;
02296         return MB_FAILURE;
02297       }
02298       std::vector<EntityHandle> L1hloc(num_ents), L1hrem(num_ents);
02299       std::vector<int> L1p(num_ents);
02300       UNPACK_INTS(buff_ptr, &L1p[0], num_ents);
02301       UNPACK_EH(buff_ptr, &L1hrem[0], num_ents);
02302       UNPACK_EH(buff_ptr, &L1hloc[0], num_ents);
02303       std::cerr << num_ents << " Entity pairs; hremote/hlocal/proc: " << std::endl;
02304       for (int i = 0; i < num_ents; i++) {
02305         EntityType etype = TYPE_FROM_HANDLE(L1hloc[i]);
02306         std::cerr << CN::EntityTypeName(etype) << ID_FROM_HANDLE(L1hrem[i])  << ", " 
02307                   << CN::EntityTypeName(etype) << ID_FROM_HANDLE(L1hloc[i])  << ", " 
02308                   << L1p[i] << std::endl;
02309       }
02310 
02311       if (buff_ptr-orig_ptr > msg_length) {
02312         std::cerr << "End of buffer..." << std::endl;
02313         std::cerr.flush();
02314         return MB_FAILURE;
02315       }
02316 
02317     }
02318     else if (mesg_tag == MB_MESG_TAGS_SIZE || mesg_tag == MB_MESG_TAGS_LARGE) {
02319       int num_tags, dum1, data_type, tag_size;
02320       UNPACK_INT(buff_ptr, num_tags);
02321       std::cerr << "Number of tags = " << num_tags << std::endl;
02322       for (int i = 0; i < num_tags; i++) {
02323         std::cerr << "Tag " << i << ":" << std::endl;
02324         UNPACK_INT(buff_ptr, tag_size);
02325         UNPACK_INT(buff_ptr, dum1);
02326         UNPACK_INT(buff_ptr, data_type);
02327         std::cerr << "Tag size, type, data type = " << tag_size << ", " 
02328                   << dum1 << ", " << data_type << std::endl;
02329         UNPACK_INT(buff_ptr, dum1);
02330         std::cerr << "Default value size = " << dum1 << std::endl;
02331         buff_ptr += dum1;
02332         UNPACK_INT(buff_ptr, dum1);
02333         std::string name((char*)buff_ptr, dum1);
02334         std::cerr << "Tag name = " << name.c_str() << std::endl;
02335         buff_ptr += dum1;
02336         UNPACK_INT(buff_ptr, num_ents);
02337         std::cerr << "Number of ents = " << num_ents << std::endl;
02338         std::vector<EntityHandle> tmp_buff(num_ents);
02339         UNPACK_EH(buff_ptr, &tmp_buff[0], num_ents);
02340         int tot_length = 0;
02341         for (int j = 0; j < num_ents; j++) {
02342           EntityType etype = TYPE_FROM_HANDLE(tmp_buff[j]);
02343           std::cerr << CN::EntityTypeName(etype) << " " 
02344                     << ID_FROM_HANDLE(tmp_buff[j])
02345                     << ", tag = ";
02346           if (tag_size == MB_VARIABLE_LENGTH) {
02347             UNPACK_INT(buff_ptr, dum1);
02348             tot_length += dum1;
02349             std::cerr << "(variable, length = " << dum1 << ")" << std::endl;
02350           }
02351           else if (data_type == MB_TYPE_DOUBLE) {
02352             double dum_dbl;
02353             UNPACK_DBL(buff_ptr, dum_dbl);
02354             std::cerr << dum_dbl << std::endl;
02355           }
02356           else if (data_type == MB_TYPE_INTEGER) {
02357             int dum_int;
02358             UNPACK_INT(buff_ptr, dum_int);
02359             std::cerr << dum_int << std::endl;
02360           }
02361           else if (data_type == MB_TYPE_OPAQUE) {
02362             std::cerr << "(opaque)" << std::endl;
02363             buff_ptr += tag_size;
02364           }
02365           else if (data_type == MB_TYPE_HANDLE) {
02366             EntityHandle dum_eh;
02367             UNPACK_EH(buff_ptr, &dum_eh, 1);
02368             std::cerr <<  dum_eh << std::endl;
02369           }
02370           else if (data_type == MB_TYPE_BIT) {
02371             std::cerr << "(bit)" << std::endl;
02372             buff_ptr += tag_size;
02373           }
02374         }
02375         if (tag_size == MB_VARIABLE_LENGTH) buff_ptr += tot_length;
02376       }
02377     }
02378     else {
02379       assert(false);
02380       return MB_FAILURE;
02381     }
02382 
02383     std::cerr.flush();
02384   
02385     return MB_SUCCESS;
02386   }
02387 
02388   ErrorCode ParallelComm::list_entities(const EntityHandle *ents, int num_ents) 
02389   {
02390     if (NULL == ents && 0 == num_ents) {
02391       Range shared_ents;
02392       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(shared_ents));
02393       shared_ents.print("Shared entities:\n");
02394       return MB_SUCCESS;
02395     }
02396   
02397     else if (NULL == ents && 0 != num_ents) {
02398       return list_entities(&sharedEnts[0], sharedEnts.size());
02399     }
02400     
02401     unsigned char pstat;
02402     EntityHandle tmp_handles[MAX_SHARING_PROCS];
02403     int tmp_procs[MAX_SHARING_PROCS];
02404     unsigned int num_ps;
02405     ErrorCode result;
02406 
02407     for (int i = 0; i < num_ents; i++) {
02408       result = mbImpl->list_entities(ents+i, 1);
02409 
02410       result = get_sharing_data(ents[i], tmp_procs, tmp_handles, pstat, num_ps);
02411       RRA("Failed to get sharing data.");
02412 
02413       std::cout << "Pstatus: ";
02414       if (!num_ps)
02415         std::cout << "local " << std::endl;
02416       else {
02417         if (pstat & PSTATUS_NOT_OWNED) std::cout << "NOT_OWNED; ";
02418         if (pstat & PSTATUS_SHARED) std::cout << "SHARED; ";
02419         if (pstat & PSTATUS_MULTISHARED) std::cout << "MULTISHARED; ";
02420         if (pstat & PSTATUS_INTERFACE) std::cout << "INTERFACE; ";
02421         if (pstat & PSTATUS_GHOST) std::cout << "GHOST; ";
02422         std::cout << std::endl;
02423         for (unsigned int j = 0; j < num_ps; j++) {
02424           std::cout << "  proc " << tmp_procs[j] << " id (handle) " 
02425                     << mbImpl->id_from_handle(tmp_handles[j]) 
02426                     << "(" << tmp_handles[j] << ")" << std::endl;
02427         }
02428       }
02429       std::cout << std::endl;
02430     }
02431 
02432     return MB_SUCCESS;
02433   }
02434   
02435   ErrorCode ParallelComm::list_entities(const Range &ents) 
02436   {
02437     for (Range::iterator rit = ents.begin(); rit != ents.end(); rit++)
02438       list_entities(&(*rit), 1);
02439     return MB_SUCCESS;
02440   }
02441 
02442   ErrorCode ParallelComm::update_remote_data(Range &local_range,
02443                                              Range &remote_range,
02444                                              int other_proc,
02445                                              const unsigned char add_pstat) 
02446   {
02447     Range::iterator rit, rit2;
02448     ErrorCode result = MB_SUCCESS;
02449 
02450     // for each pair of local/remote handles:
02451     for (rit = local_range.begin(), rit2 = remote_range.begin(); 
02452          rit != local_range.end(); rit++, rit2++) {
02453 
02454       result = update_remote_data(*rit, &other_proc, &(*rit2), 1, add_pstat);
02455       RRA(" ");
02456     }
02457 
02458     return result;
02459   }
02460   
02461   ErrorCode ParallelComm::update_remote_data(const EntityHandle new_h,
02462                                                  const int *ps,
02463                                                  const EntityHandle *hs,
02464                                                  const int num_ps,
02465                                                  const unsigned char add_pstat
02466 // the following lines left in for future debugging, at least until I trust this function; tjt, 10/4/2013
02467 //                                             ,int *new_ps,
02468 //                                             EntityHandle *new_hs,
02469 //                                             int &new_numps,
02470 //                                             unsigned char &new_pstat
02471                                              ) 
02472   {
02473       // get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
02474       // in this function, so no need to initialize; sharing data does not include
02475       // this proc if shared with only one other
02476 
02477       // following variables declared here to avoid compiler errors
02478     int new_numps;
02479     unsigned char new_pstat;
02480     std::vector<int> new_ps(MAX_SHARING_PROCS, -1);
02481     std::vector<EntityHandle> new_hs(MAX_SHARING_PROCS, 0);
02482     
02483     new_numps = 0;
02484     ErrorCode result = get_sharing_data(new_h, &new_ps[0], &new_hs[0], new_pstat, new_numps);
02485     RRA("update_remote_data");
02486     int num_exist = new_numps;
02487 
02488       // add new pstat info to the flag
02489     new_pstat |= add_pstat;
02490     
02491 /*
02492 #define plist(str, lst, siz)                                          \
02493     std::cout << str << "(";                                          \
02494     for (int i = 0; i < (int)siz; i++) std::cout << lst[i] << " ";    \
02495     std::cout << ") ";                                                \
02496     
02497     std::cout << "update_remote_data: rank = " << rank() << ", new_h = " << new_h << std::endl;
02498     std::string ostr;
02499     plist("ps", ps, num_ps);
02500     plist("hs", hs, num_ps);
02501     print_pstatus(add_pstat, ostr);
02502     std::cout << ", add_pstat = " << ostr.c_str() << std::endl;
02503     plist("tag_ps", new_ps, new_numps);
02504     plist("tag_hs", new_hs, new_numps);
02505     assert(new_numps <= size());
02506     print_pstatus(new_pstat, ostr);
02507     std::cout << ", tag_pstat=" << ostr.c_str() << std::endl;
02508 */
02509 
02510 #ifndef NDEBUG
02511     {
02512         // check for duplicates in proc list
02513       std::set<unsigned int> dumprocs;
02514       unsigned int dp = 0;
02515       for (; (int) dp < num_ps && -1 != ps[dp]; dp++)
02516         dumprocs.insert(ps[dp]);
02517       assert(dp == dumprocs.size());
02518     }
02519 #endif      
02520 
02521       // if only one sharer and I'm the owner, insert myself in the list;
02522       // otherwise, my data is checked at the end
02523     if (1 == new_numps && !(new_pstat & PSTATUS_NOT_OWNED)) {
02524       new_hs[1] = new_hs[0];
02525       new_ps[1] = new_ps[0];
02526       new_hs[0] = new_h;
02527       new_ps[0] = rank();
02528       new_numps = 2;
02529     }
02530     
02531       // now put passed-in data onto lists
02532     int idx;
02533     for (int i = 0; i < num_ps; i++) {
02534       idx = std::find(&new_ps[0], &new_ps[0] + new_numps, ps[i]) - &new_ps[0];
02535       if (idx < new_numps) {
02536         if (!new_hs[idx] && hs[i])
02537             // h on list is 0 and passed-in h is non-zero, replace it
02538           new_hs[idx] = hs[i];
02539         else
02540           assert(!hs[i] || new_hs[idx] == hs[i]);
02541       }
02542       else {
02543         if (new_numps+1 == MAX_SHARING_PROCS) {
02544           result = MB_FAILURE;
02545           std::ostringstream str;
02546           str << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE(new_h) )
02547               << ' ' << ID_FROM_HANDLE(new_h) << " in process " << rank() << std::endl;
02548           RRA(str.str().c_str());
02549         }
02550         new_ps[new_numps] = ps[i];
02551         new_hs[new_numps] = hs[i];
02552         new_numps++;
02553       }
02554     }
02555 
02556       // add myself, if it isn't there already
02557     idx = std::find(&new_ps[0], &new_ps[0] + new_numps, rank()) - &new_ps[0];
02558     if (idx == new_numps) {
02559       new_ps[new_numps] = rank();
02560       new_hs[new_numps] = new_h;
02561       new_numps++;
02562     }
02563     else if (!new_hs[idx] && new_numps > 2)
02564       new_hs[idx] = new_h;
02565 
02566       // proc list is complete; update for shared, multishared
02567     if (new_numps > 1) {
02568       if (new_numps > 2) new_pstat |= PSTATUS_MULTISHARED;
02569       new_pstat |= PSTATUS_SHARED;
02570     }
02571 
02572 /*    
02573     plist("new_ps", new_ps, new_numps);
02574     plist("new_hs", new_hs, new_numps);
02575     print_pstatus(new_pstat, ostr);
02576     std::cout << ", new_pstat=" << ostr.c_str() << std::endl;
02577     std::cout << std::endl;
02578 */
02579 
02580     result = set_sharing_data(new_h, new_pstat, num_exist, new_numps, &new_ps[0], &new_hs[0]);
02581     RRA("update_remote_data: setting sharing data");
02582 
02583     if (new_pstat & PSTATUS_SHARED) sharedEnts.push_back(new_h);
02584 
02585     return MB_SUCCESS;
02586   }
02587 
02588 ErrorCode ParallelComm::update_remote_data_old(const EntityHandle new_h,
02589                                                const int *ps,
02590                                                const EntityHandle *hs,
02591                                                const int num_ps,
02592                                                const unsigned char add_pstat) 
02593   {
02594     EntityHandle tag_hs[MAX_SHARING_PROCS];
02595     int tag_ps[MAX_SHARING_PROCS];
02596     unsigned char pstat;
02597     // get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
02598     // in this function, so no need to initialize
02599     unsigned int num_exist;
02600     ErrorCode result = get_sharing_data(new_h, tag_ps, tag_hs, pstat, num_exist);
02601     RRA("");
02602   
02603 #ifndef NDEBUG
02604     {
02605       // check for duplicates in proc list
02606       std::set<unsigned int> dumprocs;
02607       unsigned int dp = 0;
02608       for (; (int) dp < num_ps && -1 != ps[dp]; dp++)
02609         dumprocs.insert(ps[dp]);
02610       assert(dp == dumprocs.size());
02611     }
02612 #endif      
02613 
02614     // add any new sharing data
02615     bool changed = false;
02616     int idx;
02617     if (!num_exist) {
02618       // just take what caller passed
02619       memcpy(tag_ps, ps, num_ps*sizeof(int));
02620       memcpy(tag_hs, hs, num_ps*sizeof(EntityHandle));
02621       num_exist = num_ps;
02622       // if it's only one, hopefully I'm not there yet...
02623       assert("I shouldn't be the only proc there." &&
02624              (1 != num_exist || ps[0] != (int)procConfig.proc_rank()));
02625       changed = true;
02626     }
02627     else {
02628       for (int i = 0; i < num_ps; i++) {
02629         idx = std::find(tag_ps, tag_ps+num_exist, ps[i]) - tag_ps;
02630         if (idx == (int) num_exist) {
02631       
02632           if (num_exist == MAX_SHARING_PROCS) {
02633             std::cerr << "Exceeded MAX_SHARING_PROCS for "
02634                       << CN::EntityTypeName( TYPE_FROM_HANDLE(new_h) )
02635                       << ' ' << ID_FROM_HANDLE(new_h) 
02636                       << " in process " << proc_config().proc_rank()
02637                       << std::endl;
02638             std::cerr.flush();
02639             MPI_Abort( proc_config().proc_comm(), 66 );
02640           }
02641       
02642           // if there's only 1 sharing proc, and it's not me, then
02643           // we'll end up with 3; add me to the front
02644           if (!i && num_ps == 1 && num_exist == 1 &&
02645               ps[0] != (int)procConfig.proc_rank()) {
02646             int j = 1;
02647             // if I own this entity, put me at front, otherwise after first
02648             if (!(pstat & PSTATUS_NOT_OWNED)) {
02649               tag_ps[1] = tag_ps[0];
02650               tag_hs[1] = tag_hs[0];
02651               j = 0;
02652             }
02653             tag_ps[j] = procConfig.proc_rank();
02654             tag_hs[j] = new_h;
02655             num_exist++;
02656           }
02657         
02658           tag_ps[num_exist] = ps[i];
02659           tag_hs[num_exist] = hs[i];
02660           num_exist++;
02661           changed = true;
02662         }
02663         else if (0 == tag_hs[idx]) {
02664           tag_hs[idx] = hs[i];
02665           changed = true;
02666         }
02667         else if (0 != hs[i]) {
02668           assert(hs[i] == tag_hs[idx]);
02669         }
02670       }
02671     }
02672   
02673     // adjust for interface layer if necessary
02674     if (add_pstat & PSTATUS_INTERFACE) {
02675       idx = std::min_element(tag_ps, tag_ps+num_exist) - tag_ps;
02676       if (idx) {
02677         int tag_proc = tag_ps[idx];
02678         tag_ps[idx] = tag_ps[0];
02679         tag_ps[0] = tag_proc;
02680         EntityHandle tag_h = tag_hs[idx];
02681         tag_hs[idx] = tag_hs[0];
02682         tag_hs[0] = tag_h;
02683         changed = true;
02684         if (tag_ps[0] != (int)procConfig.proc_rank()) pstat |= PSTATUS_NOT_OWNED;
02685       }
02686     }
02687     
02688     if (!changed) return MB_SUCCESS;
02689   
02690     assert("interface entities should have > 1 proc" &&
02691            (!(add_pstat & PSTATUS_INTERFACE) || num_exist > 1));
02692     assert("ghost entities should have > 1 proc" &&
02693            (!(add_pstat & PSTATUS_GHOST) || num_exist > 1));
02694   
02695     // if it's multi-shared and we created the entity in this unpack,
02696     // local handle probably isn't in handle list yet
02697     if (num_exist > 2) {
02698       idx = std::find(tag_ps, tag_ps+num_exist, procConfig.proc_rank()) - tag_ps;
02699       assert(idx < (int) num_exist);
02700       if (!tag_hs[idx])
02701         tag_hs[idx] = new_h;
02702     }
02703       
02704     int tag_p;
02705     EntityHandle tag_h;
02706 
02707     // update pstat
02708     pstat |= add_pstat;
02709 
02710     if (num_exist > 2) 
02711       pstat |= (PSTATUS_MULTISHARED | PSTATUS_SHARED);
02712     else if (num_exist > 0)
02713       pstat |= PSTATUS_SHARED;
02714 
02715 //    compare_remote_data(new_h, num_ps, hs, ps, add_pstat,
02716 //                        num_exist, tag_hs, tag_ps, pstat);
02717     
02718     // reset single shared proc/handle if was shared and moving to multi-shared
02719     if (num_exist > 2 && !(pstat & PSTATUS_MULTISHARED) &&
02720         (pstat & PSTATUS_SHARED)) {
02721       // must remove sharedp/h first, which really means set to default value
02722       tag_p = -1;
02723       result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, &tag_p);
02724       RRA("Couldn't set sharedp tag.");
02725       tag_h = 0;
02726       result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, &tag_h);
02727       RRA("Couldn't set sharedh tag.");
02728     }
02729 
02730     // set sharing tags
02731     if (num_exist > 2) {
02732       std::fill(tag_ps+num_exist, tag_ps+MAX_SHARING_PROCS, -1);
02733       std::fill(tag_hs+num_exist, tag_hs+MAX_SHARING_PROCS, 0);
02734       result = mbImpl->tag_set_data(sharedps_tag(), &new_h, 1, tag_ps);
02735       RRA("Couldn't set sharedps tag.");
02736       result = mbImpl->tag_set_data(sharedhs_tag(), &new_h, 1, tag_hs);
02737       RRA("Couldn't set sharedhs tag.");
02738 
02739 #ifndef NDEBUG
02740       {
02741         // check for duplicates in proc list
02742         std::set<unsigned int> dumprocs;
02743         unsigned int dp = 0;
02744         for (; dp < num_exist && -1 != tag_ps[dp]; dp++)
02745           dumprocs.insert(tag_ps[dp]);
02746         assert(dp == dumprocs.size());
02747       }
02748 #endif      
02749     }
02750     else if (num_exist == 2 || num_exist == 1) {
02751       if (tag_ps[0] == (int) procConfig.proc_rank()) {
02752         assert(2 == num_exist && tag_ps[1] != (int) procConfig.proc_rank());
02753         tag_ps[0] = tag_ps[1];
02754         tag_hs[0] = tag_hs[1];
02755       }
02756       assert(tag_ps[0] != -1 && tag_hs[0] != 0);
02757       result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, tag_ps);
02758       RRA("Couldn't set sharedp tag.");
02759       result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, tag_hs);
02760       RRA("Couldn't set sharedh tag.");
02761     }
02762 
02763     // now set new pstatus
02764     result = mbImpl->tag_set_data(pstatus_tag(), &new_h, 1, &pstat);
02765     RRA("Couldn't set pstatus tag.");
02766 
02767     if (pstat & PSTATUS_SHARED) sharedEnts.push_back(new_h);
02768   
02769     return MB_SUCCESS;
02770   }
02771 
02772   ErrorCode ParallelComm::get_sharing_data(const Range &entities,
02773                                            std::set<int> &procs,
02774                                            int operation)
02775   {
02776     // get the union or intersection of sharing data for multiple entities
02777 
02778     ErrorCode result;
02779     int sp2[MAX_SHARING_PROCS];
02780     int num_ps;
02781     unsigned char pstat;
02782     std::set<int> tmp_procs;
02783     procs.clear();
02784   
02785     for (Range::const_iterator rit = entities.begin(); rit != entities.end(); rit++) {
02786         
02787       // get sharing procs
02788       result = get_sharing_data(*rit, sp2, NULL, pstat, num_ps);
02789       RRA("Problem getting sharing data in get_sharing_data.");
02790       if (!(pstat & PSTATUS_SHARED) && Interface::INTERSECT == operation) {
02791         procs.clear();
02792         return MB_SUCCESS;
02793       }
02794         
02795       if (rit == entities.begin()) {
02796         std::copy(sp2, sp2+num_ps, std::inserter(procs, procs.begin()));
02797       }
02798       else {
02799         std::sort(sp2, sp2+num_ps);
02800         tmp_procs.clear();
02801         if (Interface::UNION == operation) 
02802           std::set_union(procs.begin(), procs.end(), 
02803                          sp2, sp2+num_ps, std::inserter(tmp_procs, tmp_procs.end()));
02804         else if (Interface::INTERSECT == operation)
02805           std::set_intersection(procs.begin(), procs.end(), 
02806                                 sp2, sp2+num_ps, std::inserter(tmp_procs, tmp_procs.end()));
02807         else {
02808           assert("Unknown operation." && false);
02809           return MB_FAILURE;
02810         }
02811         procs.swap(tmp_procs);
02812       }
02813       if (Interface::INTERSECT == operation && procs.empty()) 
02814         return MB_SUCCESS;
02815     }
02816 
02817     return MB_SUCCESS;
02818   }
02819   
02820   ErrorCode ParallelComm::get_sharing_data(const EntityHandle entity,
02821                                            int *ps, 
02822                                            EntityHandle *hs,
02823                                            unsigned char &pstat,
02824                                            unsigned int &num_ps)
02825   {
02826     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1, &pstat);
02827     RRA("Couldn't get pstatus tag.");
02828     if (pstat & PSTATUS_MULTISHARED) {
02829       result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1, ps);
02830       RRA("Couldn't get sharedps tag.");
02831       if (hs) {
02832         result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1, hs);
02833         RRA("Couldn't get sharedhs tag.");
02834       }
02835       num_ps = std::find(ps, ps+MAX_SHARING_PROCS, -1) - ps;
02836     }
02837     else if (pstat & PSTATUS_SHARED) {
02838       result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1, ps);
02839       RRA("Couldn't get sharedp tag.");
02840       if (hs) {
02841         result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1, hs);
02842         RRA("Couldn't get sharedh tag.");
02843         hs[1] = 0;
02844       }
02845       // initialize past end of data
02846       ps[1] = -1;
02847       num_ps = 1;
02848     }
02849     else {
02850       ps[0] = -1;
02851       if (hs) hs[0] = 0;
02852       num_ps = 0;
02853     }
02854 
02855     assert(MAX_SHARING_PROCS >= num_ps);
02856   
02857     return MB_SUCCESS;
02858   }
02859   
02860   ErrorCode ParallelComm::find_existing_entity(const bool is_iface,
02861                                                const int owner_p,
02862                                                const EntityHandle owner_h,
02863                                                const int num_ps,
02864                                                const EntityHandle *connect,
02865                                                const int num_connect,
02866                                                const EntityType this_type,
02867                                                std::vector<EntityHandle> &L2hloc,
02868                                                std::vector<EntityHandle> &L2hrem,
02869                                                std::vector<unsigned int> &L2p,
02870                                                EntityHandle &new_h) 
02871   {
02872     new_h = 0;
02873     if (!is_iface && num_ps > 2) {
02874       for (unsigned int i = 0; i < L2hrem.size(); i++) {
02875         if (L2hrem[i] == owner_h && owner_p == (int) L2p[i]) {
02876           new_h = L2hloc[i];
02877           return MB_SUCCESS;
02878         }
02879       }        
02880     }
02881 
02882     // if we got here and it's a vertex, we don't need to look further
02883     if (MBVERTEX == this_type || !connect || !num_connect) return MB_SUCCESS;
02884   
02885     Range tmp_range;
02886     ErrorCode result = mbImpl->get_adjacencies(connect, num_connect, 
02887                                                CN::Dimension(this_type), false, 
02888                                                tmp_range);
02889     RRA("Problem getting existing entity.");
02890     if (!tmp_range.empty()) {
02891       // found a corresponding entity - return target
02892       new_h = *tmp_range.begin();
02893     }  
02894     else {
02895       new_h = 0;
02896     }
02897 
02898     return MB_SUCCESS;
02899   }
02900 
02901   ErrorCode ParallelComm::get_local_handles(const Range &remote_handles,
02902                                             Range &local_handles,
02903                                             const std::vector<EntityHandle> &new_ents) 
02904   {
02905     std::vector<EntityHandle> rh_vec;
02906     rh_vec.reserve(remote_handles.size());
02907     std::copy(remote_handles.begin(), remote_handles.end(), std::back_inserter(rh_vec));
02908     ErrorCode result = get_local_handles(&rh_vec[0], remote_handles.size(), new_ents);
02909     std::copy(rh_vec.begin(), rh_vec.end(), range_inserter(local_handles));
02910     return result;
02911   }
02912   
02913   ErrorCode ParallelComm::get_local_handles(EntityHandle *from_vec, 
02914                                             int num_ents,
02915                                             const Range &new_ents) 
02916   {
02917     std::vector<EntityHandle> tmp_ents;
02918     std::copy(new_ents.begin(), new_ents.end(), std::back_inserter(tmp_ents));
02919     return get_local_handles(from_vec, num_ents, tmp_ents);
02920   }
02921 
02922   ErrorCode ParallelComm::get_local_handles(EntityHandle *from_vec,
02923                                             int num_ents,
02924                                             const std::vector<EntityHandle> &new_ents) 
02925   {
02926     for (int i = 0; i < num_ents; i++) {
02927       if (TYPE_FROM_HANDLE(from_vec[i]) == MBMAXTYPE) {
02928         assert(ID_FROM_HANDLE(from_vec[i]) < (int) new_ents.size());
02929         from_vec[i] = new_ents[ID_FROM_HANDLE(from_vec[i])];
02930       }
02931     }
02932   
02933     return MB_SUCCESS;
02934   }
02935 
02936   /*
02937     template <typename T> void
02938     insert_in_array( T* array, size_t array_size, size_t location, T value )
02939     {
02940     assert( location+1 < array_size );
02941     for (size_t i = array_size-1; i > location; --i)
02942     array[i] = array[i-1];
02943     array[location] = value;
02944     }
02945   */
02946 
02947   ErrorCode ParallelComm::pack_range_map(Range &key_range, EntityHandle val_start,
02948                                          HandleMap &handle_map) 
02949   {
02950     for (Range::const_pair_iterator key_it = key_range.const_pair_begin(); 
02951          key_it != key_range.const_pair_end(); key_it++) {
02952       int tmp_num = (*key_it).second - (*key_it).first + 1;
02953       handle_map.insert((*key_it).first, val_start, tmp_num);
02954       val_start += tmp_num;
02955     }
02956 
02957     return MB_SUCCESS;
02958   }
02959 
02960   ErrorCode ParallelComm::pack_sets(Range &entities,
02961                                     Buffer *buff,
02962                                     const bool store_remote_handles,
02963                                     const int to_proc)
02964   {
02965     // SETS:
02966     // . #sets
02967     // . for each set:
02968     //   - options[#sets] (unsigned int)
02969     //   - if (unordered) set range 
02970     //   - else if ordered
02971     //     . #ents in set
02972     //     . handles[#ents]
02973     //   - #parents
02974     //   - if (#parents) handles[#parents]
02975     //   - #children
02976     //   - if (#children) handles[#children]
02977   
02978     // now the sets; assume any sets the application wants to pass are in the entities list
02979     ErrorCode result;
02980     Range all_sets = entities.subset_by_type(MBENTITYSET);
02981 
02982     int buff_size = estimate_sets_buffer_size(all_sets, store_remote_handles);
02983     buff->check_space(buff_size);
02984 
02985     // number of sets
02986     PACK_INT(buff->buff_ptr, all_sets.size());
02987 
02988     // options for all sets
02989     std::vector<unsigned int> options(all_sets.size());
02990     Range::iterator rit;
02991     std::vector<EntityHandle> members;
02992     int i;
02993     for (rit = all_sets.begin(), i = 0; rit != all_sets.end(); rit++, i++) {
02994       result = mbImpl->get_meshset_options(*rit, options[i]);
02995       RRA("Failed to get meshset options.");
02996     }
02997     buff->check_space(all_sets.size()*sizeof(unsigned int));
02998     PACK_VOID(buff->buff_ptr, &options[0], all_sets.size()*sizeof(unsigned int));
02999 
03000     // pack parallel geometry unique id
03001     if (!all_sets.empty()) {
03002       Tag uid_tag;
03003       int n_sets = all_sets.size();
03004       bool b_pack = false;
03005       std::vector<int> id_data(n_sets);
03006       result = mbImpl->tag_get_handle("PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, 
03007                                       uid_tag, MB_TAG_SPARSE|MB_TAG_CREAT);
03008       if (MB_SUCCESS != result) {
03009         RRA("Trouble creating parallel geometry unique id tag.");
03010       }
03011       result = mbImpl->tag_get_data(uid_tag, all_sets, &id_data[0]);
03012       if (MB_TAG_NOT_FOUND != result) {
03013         RRA("Trouble getting parallel geometry unique ids.");
03014         for (i = 0; i < n_sets; i++) {
03015           if (id_data[i] != 0) {
03016             b_pack = true;
03017             break;
03018           }
03019         }
03020       }
03021     
03022       if (b_pack) { // if you find
03023         buff->check_space((n_sets + 1)*sizeof(int));
03024         PACK_INT(buff->buff_ptr, n_sets);
03025         PACK_INTS(buff->buff_ptr, &id_data[0], n_sets);
03026       }
03027       else {
03028         buff->check_space(sizeof(int));
03029         PACK_INT(buff->buff_ptr, 0);
03030       }
03031     }
03032   
03033     // vectors/ranges
03034     std::vector<EntityHandle> entities_vec(entities.size());
03035     std::copy(entities.begin(), entities.end(), entities_vec.begin());
03036     for (rit = all_sets.begin(), i = 0; rit != all_sets.end(); rit++, i++) {
03037       members.clear();
03038       result = mbImpl->get_entities_by_handle(*rit, members);
03039       RRA("Failed to get entities in ordered set.");
03040       result = get_remote_handles(store_remote_handles, &members[0],
03041                                   &members[0], members.size(),
03042                                   to_proc, entities_vec);
03043       RRA("Failed in get_remote_handles.");
03044       buff->check_space(members.size()*sizeof(EntityHandle)+sizeof(int));
03045       PACK_INT(buff->buff_ptr, members.size());
03046       PACK_EH(buff->buff_ptr, &members[0], members.size());
03047     }
03048 
03049     // pack parent/child sets
03050     if (!store_remote_handles) { // only works not store remote handles
03051       // pack numbers of parents/children
03052       unsigned int tot_pch = 0;
03053       int num_pch;
03054       buff->check_space(2*all_sets.size()*sizeof(int));
03055       for (rit = all_sets.begin(), i = 0; rit != all_sets.end(); rit++, i++) {
03056         // pack parents
03057         result = mbImpl->num_parent_meshsets(*rit, &num_pch);
03058         RRA("Failed to get num parents.");
03059         PACK_INT(buff->buff_ptr, num_pch);
03060         tot_pch += num_pch;
03061         result = mbImpl->num_child_meshsets(*rit, &num_pch);
03062         RRA("Failed to get num children.");
03063         PACK_INT(buff->buff_ptr, num_pch);
03064         tot_pch += num_pch;
03065       }
03066 
03067       // now pack actual parents/children
03068       members.clear();
03069       members.reserve(tot_pch);
03070       std::vector<EntityHandle> tmp_pch;
03071       for (rit = all_sets.begin(), i = 0; rit != all_sets.end(); rit++, i++) {
03072 
03073         result = mbImpl->get_parent_meshsets(*rit, tmp_pch);
03074         RRA("Failed to get parents.");
03075         std::copy(tmp_pch.begin(), tmp_pch.end(), std::back_inserter(members));
03076         tmp_pch.clear();
03077         result = mbImpl->get_child_meshsets(*rit, tmp_pch);
03078         RRA("Failed to get children.");
03079         std::copy(tmp_pch.begin(), tmp_pch.end(), std::back_inserter(members));
03080         tmp_pch.clear();
03081       }
03082       assert(members.size() == tot_pch);
03083       if (!members.empty()) {
03084         result = get_remote_handles(store_remote_handles,
03085                                     &members[0], &members[0], 
03086                                     members.size(), to_proc,
03087                                     entities_vec);
03088         RRA("Trouble getting remote handles for set parent/child sets.");
03089 #ifndef NDEBUG
03090         // check that all handles are either sets or maxtype
03091         for (unsigned int __j = 0; __j < members.size(); __j++)
03092           assert((TYPE_FROM_HANDLE(members[__j]) == MBMAXTYPE &&
03093                   ID_FROM_HANDLE(members[__j]) < (int)entities.size()) ||
03094                  TYPE_FROM_HANDLE(members[__j]) == MBENTITYSET);
03095 #endif        
03096         buff->check_space(members.size()*sizeof(EntityHandle));
03097         PACK_EH(buff->buff_ptr, &members[0], members.size());
03098       }
03099     }
03100     else {
03101       buff->check_space(2*all_sets.size()*sizeof(int));
03102       for (rit = all_sets.begin(); rit != all_sets.end(); rit++) {
03103         PACK_INT(buff->buff_ptr, 0);
03104         PACK_INT(buff->buff_ptr, 0);
03105       }
03106     }
03107 
03108     // pack the handles
03109     if (store_remote_handles && !all_sets.empty()) {
03110       buff_size = RANGE_SIZE(all_sets);
03111       buff->check_space(buff_size);
03112       PACK_RANGE(buff->buff_ptr, all_sets);
03113     }
03114 
03115     myDebug->tprintf(4, "Done packing sets.\n");
03116 
03117     buff->set_stored_size();
03118   
03119     return MB_SUCCESS;
03120   }
03121 
03122   ErrorCode ParallelComm::unpack_sets(unsigned char *&buff_ptr,
03123                                       std::vector<EntityHandle> &entities,
03124                                       const bool store_remote_handles,
03125                                       const int from_proc)
03126   {
03127   
03128     // now the sets; assume any sets the application wants to pass are in the entities list
03129     ErrorCode result;
03130 
03131     bool no_sets = (entities.empty() || (mbImpl->type_from_handle(*entities.rbegin()) == MBENTITYSET));
03132 
03133     Range new_sets;
03134     int num_sets;
03135     UNPACK_INT(buff_ptr, num_sets);
03136 
03137     if (!num_sets) return MB_SUCCESS;
03138 
03139     int i;
03140     Range::const_iterator rit;
03141     std::vector<EntityHandle> members;
03142     int num_ents;
03143     std::vector<unsigned int> options_vec(num_sets);
03144     // option value
03145     if (num_sets)
03146       UNPACK_VOID(buff_ptr, &options_vec[0], num_sets*sizeof(unsigned int));
03147 
03148     // unpack parallel geometry unique id
03149     int n_uid;
03150     UNPACK_INT(buff_ptr, n_uid);
03151     if (n_uid > 0 && n_uid != num_sets) {
03152       std::cerr << "The number of Parallel geometry unique ids should be same."
03153                 << std::endl;
03154     }
03155 
03156     if (n_uid > 0) { // if parallel geometry unique id is packed
03157       std::vector<int> uids(n_uid);
03158       UNPACK_INTS(buff_ptr, &uids[0], n_uid);
03159 
03160       Tag uid_tag;
03161       result = mbImpl->tag_get_handle("PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER,
03162                                       uid_tag, MB_TAG_SPARSE|MB_TAG_CREAT);
03163       if (MB_SUCCESS != result) {
03164         RRA("Trouble creating parallel geometry unique id tag.");
03165       }
03166 
03167       // find existing sets
03168       for (i = 0; i < n_uid; i++) {
03169         EntityHandle set_handle;
03170         Range temp_sets;
03171         void* tag_vals[] = { &uids[i] };
03172         if (uids[i] > 0) { 
03173           result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET,
03174                                                         &uid_tag, tag_vals,
03175                                                         1, temp_sets);
03176         }
03177         if (!temp_sets.empty()) { // existing set
03178           set_handle = *temp_sets.begin();
03179         }
03180         else { // create a new set
03181           result = mbImpl->create_meshset(options_vec[i], set_handle);
03182           RRA("Failed to create set in unpack.");
03183         
03184           result = mbImpl->tag_set_data(uid_tag, &set_handle, 1, &uids[i]);
03185           RRA("Couldn't set parallel geometry unique ids.");
03186         }
03187         new_sets.insert(set_handle);
03188       }
03189     }
03190     else {
03191       // create sets
03192       for (i = 0; i < num_sets; i++) {
03193         EntityHandle set_handle;
03194         result = mbImpl->create_meshset(options_vec[i], set_handle);
03195         RRA("Failed to create set in unpack.");
03196       
03197         // make sure new sets handles are monotonically increasing
03198         assert(set_handle > *new_sets.rbegin());
03199       
03200         new_sets.insert(set_handle);
03201       }
03202     }
03203 
03204     std::copy(new_sets.begin(), new_sets.end(), std::back_inserter(entities));
03205     // only need to sort if we came in with no sets on the end
03206     if (!no_sets) std::sort(entities.begin(), entities.end());
03207   
03208     for (rit = new_sets.begin(), i = 0; rit != new_sets.end(); rit++, i++) {
03209       // unpack entities as vector, with length
03210       UNPACK_INT(buff_ptr, num_ents);
03211       members.resize(num_ents);
03212       if (num_ents) UNPACK_EH(buff_ptr, &members[0], num_ents);
03213       result = get_local_handles(&members[0], num_ents, entities);
03214       RRA("Failed to get local handles for ordered set contents.");
03215       result = mbImpl->add_entities(*rit, &members[0], num_ents);
03216       RRA("Failed to add ents to ordered set in unpack.");
03217     }
03218 
03219     std::vector<int> num_pch(2*new_sets.size());
03220     std::vector<int>::iterator vit;
03221     int tot_pch = 0;
03222     for (vit = num_pch.begin(); vit != num_pch.end(); vit++) {
03223       UNPACK_INT(buff_ptr, *vit);
03224       tot_pch += *vit;
03225     }
03226   
03227     members.resize(tot_pch);
03228     UNPACK_EH(buff_ptr, &members[0], tot_pch);
03229     result = get_local_handles(&members[0], tot_pch, entities);
03230     RRA("Couldn't get local handle for parent/child sets.");
03231 
03232     int num = 0;
03233     EntityHandle *mem_ptr = &members[0];
03234     for (rit = new_sets.begin(); rit != new_sets.end(); rit++) {
03235       // unpack parents/children
03236       int num_par = num_pch[num++], num_child = num_pch[num++];
03237       if (num_par+num_child) {
03238         for (i = 0; i < num_par; i++) {
03239           assert(0 != mem_ptr[i]);
03240           result = mbImpl->add_parent_meshset(*rit, mem_ptr[i]);
03241           RRA("Failed to add parent to set in unpack.");
03242         }
03243         mem_ptr += num_par;
03244         for (i = 0; i < num_child; i++) {
03245           assert(0 != mem_ptr[i]);
03246           result = mbImpl->add_child_meshset(*rit, mem_ptr[i]);
03247           RRA("Failed to add child to set in unpack.");
03248         }
03249         mem_ptr += num_child;
03250       }
03251     }
03252 
03253     // unpack source handles
03254     Range dum_range;
03255     if (store_remote_handles && !new_sets.empty()) {
03256       UNPACK_RANGE(buff_ptr, dum_range);
03257       result = update_remote_data(new_sets, dum_range, from_proc, 0);
03258       RRA("Couldn't set sharing data for sets");
03259     }
03260 
03261     myDebug->tprintf(4, "Done unpacking sets.");
03262 
03263     return MB_SUCCESS;
03264   }
03265 
03266   ErrorCode ParallelComm::pack_adjacencies(Range& /*entities*/,
03267                                            Range::const_iterator& /*start_rit*/,
03268                                            Range& /*whole_range*/,
03269                                            unsigned char*& /*buff_ptr*/,
03270                                            int& /*count*/,
03271                                            const bool /*just_count*/,
03272                                            const bool /*store_handles*/,
03273                                            const int /*to_proc*/)
03274   {
03275     return MB_FAILURE;
03276   }
03277 
03278   ErrorCode ParallelComm::unpack_adjacencies(unsigned char*& /*buff_ptr*/,
03279                                              Range& /*entities*/,
03280                                              const bool /*store_handles*/,
03281                                              const int /*from_proc*/)
03282   {
03283     return MB_FAILURE;
03284   }
03285 
03286   ErrorCode ParallelComm::pack_tags(Range &entities,
03287                                     const std::vector<Tag> &src_tags,
03288                                     const std::vector<Tag> &dst_tags,
03289                                     const std::vector<Range> &tag_ranges,
03290                                     Buffer *buff,
03291                                     const bool store_remote_handles,
03292                                     const int to_proc)
03293   {
03294   
03295 
03296     ErrorCode result;
03297     std::vector<Tag>::const_iterator tag_it, dst_it;
03298     std::vector<Range>::const_iterator rit;
03299     int count = 0;
03300   
03301     for (tag_it = src_tags.begin(), rit = tag_ranges.begin(); 
03302          tag_it != src_tags.end(); tag_it++, rit++) {
03303 
03304       result = packed_tag_size( *tag_it, *rit, count );
03305       if (MB_SUCCESS != result)
03306         return result;
03307     }
03308     
03309     // number of tags
03310     count += sizeof(int);
03311 
03312     buff->check_space(count);
03313   
03314     PACK_INT(buff->buff_ptr, src_tags.size());
03315 
03316     std::vector<EntityHandle> entities_vec(entities.size());
03317     std::copy(entities.begin(), entities.end(), entities_vec.begin());
03318     
03319     for (tag_it = src_tags.begin(), dst_it = dst_tags.begin(), rit = tag_ranges.begin(); 
03320          tag_it != src_tags.end(); tag_it++, dst_it++, rit++) {
03321     
03322       result = pack_tag( *tag_it, *dst_it, *rit, entities_vec, buff,
03323                          store_remote_handles, to_proc );
03324       if (MB_SUCCESS != result)
03325         return result;
03326     }
03327   
03328     myDebug->tprintf(4, "Done packing tags.");
03329 
03330     buff->set_stored_size();
03331   
03332     return MB_SUCCESS;
03333   }
03334          
03335 
03336   ErrorCode ParallelComm::packed_tag_size( Tag tag,
03337                                            const Range &tagged_entities,
03338                                            int &count )
03339   {
03340     // for dense tags, compute size assuming all entities have that tag
03341     // for sparse tags, get number of entities w/ that tag to compute size
03342 
03343     std::vector<int> var_len_sizes;
03344     std::vector<const void*> var_len_values;
03345     
03346     // default value
03347     count += sizeof(int);
03348     if (NULL != tag->get_default_value()) 
03349       count += tag->get_default_value_size();
03350 
03351     // size, type, data type
03352     count += 3*sizeof(int);
03353 
03354     // name
03355     count += sizeof(int);
03356     count += tag->get_name().size();
03357 
03358     // range of tag
03359     count += sizeof(int) + tagged_entities.size() * sizeof(EntityHandle);
03360 
03361     if (tag->get_size() == MB_VARIABLE_LENGTH) {
03362       const int num_ent = tagged_entities.size();
03363       // send a tag size for each entity
03364       count += num_ent * sizeof(int);
03365       // send tag data for each entity
03366       var_len_sizes.resize( num_ent );
03367       var_len_values.resize( num_ent );
03368       ErrorCode result = tag->get_data( sequenceManager,
03369                                         errorHandler,
03370                                         tagged_entities, 
03371                                         &var_len_values[0], 
03372                                         &var_len_sizes[0] );
03373       RRA("Failed to get lenghts of variable-length tag values.");
03374       count += std::accumulate( var_len_sizes.begin(), var_len_sizes.end(), 0 );
03375     }
03376     else {
03377       // tag data values for range or vector
03378       count += tagged_entities.size() * tag->get_size();
03379     }
03380   
03381     return MB_SUCCESS;
03382   }
03383 
03384 
03385   ErrorCode ParallelComm::pack_tag( Tag src_tag,
03386                                     Tag dst_tag,
03387                                     const Range &tagged_entities,
03388                                     const std::vector<EntityHandle> &whole_vec,
03389                                     Buffer *buff,
03390                                     const bool store_remote_handles,
03391                                     const int to_proc )
03392   {
03393     ErrorCode result;
03394     std::vector<int> var_len_sizes;
03395     std::vector<const void*> var_len_values;
03396 
03397     if (src_tag != dst_tag) {
03398       if (dst_tag->get_size() != src_tag->get_size())
03399         return MB_TYPE_OUT_OF_RANGE;
03400       if (dst_tag->get_data_type() != src_tag->get_data_type() && 
03401           dst_tag->get_data_type() != MB_TYPE_OPAQUE &&
03402           src_tag->get_data_type() != MB_TYPE_OPAQUE)
03403         return MB_TYPE_OUT_OF_RANGE;
03404     }
03405     
03406     // size, type, data type
03407     buff->check_space(3*sizeof(int));
03408     PACK_INT(buff->buff_ptr, src_tag->get_size());
03409     TagType this_type;
03410     result = mbImpl->tag_get_type(dst_tag, this_type);
03411     PACK_INT(buff->buff_ptr, (int)this_type);
03412     DataType data_type = src_tag->get_data_type();
03413     PACK_INT(buff->buff_ptr, (int)data_type);
03414     int type_size = TagInfo::size_from_data_type(data_type);
03415 
03416     // default value
03417     if (NULL == src_tag->get_default_value()) {
03418       buff->check_space(sizeof(int));
03419       PACK_INT(buff->buff_ptr, 0);
03420     }
03421     else {
03422       buff->check_space(src_tag->get_default_value_size());
03423       PACK_BYTES(buff->buff_ptr, src_tag->get_default_value(), src_tag->get_default_value_size());
03424     }
03425 
03426     // name
03427     buff->check_space(src_tag->get_name().size());
03428     PACK_BYTES(buff->buff_ptr, dst_tag->get_name().c_str(), dst_tag->get_name().size());
03429 
03430     myDebug->tprintf(4, "Packing tag \"%s\"", src_tag->get_name().c_str());
03431     if (src_tag != dst_tag)
03432       myDebug->tprintf(4, " (as tag \"%s\")", dst_tag->get_name().c_str());
03433     myDebug->tprintf(4, "\n");
03434 
03435     // pack entities
03436     buff->check_space(tagged_entities.size()*sizeof(EntityHandle)+sizeof(int));
03437     PACK_INT(buff->buff_ptr, tagged_entities.size());
03438     std::vector<EntityHandle> dum_tagged_entities(tagged_entities.size());
03439     result = get_remote_handles(store_remote_handles,
03440                                 tagged_entities, &dum_tagged_entities[0], to_proc,
03441                                 whole_vec);
03442     if (myDebug->get_verbosity() == 3) {
03443       if (MB_SUCCESS != result) {
03444         std::cerr << "Trouble getting remote handles for tagged entities:" << std::endl;
03445         tagged_entities.print("  ");
03446       }
03447     }
03448     else
03449       RRA("Trouble getting remote handles for tagged entities.");
03450 
03451     PACK_EH(buff->buff_ptr, &dum_tagged_entities[0], dum_tagged_entities.size());
03452 
03453     const size_t num_ent = tagged_entities.size();
03454     if (src_tag->get_size() == MB_VARIABLE_LENGTH) {
03455       var_len_sizes.resize( num_ent, 0 );
03456       var_len_values.resize( num_ent, 0 );
03457       result = mbImpl->tag_get_by_ptr(src_tag, tagged_entities, &var_len_values[0], 
03458                                       &var_len_sizes[0] );
03459       RRA("Failed to get variable-length tag data in pack_tags.");
03460       buff->check_space(num_ent*sizeof(int));
03461       PACK_INTS(buff->buff_ptr, &var_len_sizes[0], num_ent);
03462       for (unsigned int i = 0; i < num_ent; ++i) {
03463         buff->check_space(var_len_sizes[i]);
03464         PACK_VOID(buff->buff_ptr, var_len_values[i], type_size*var_len_sizes[i]);
03465       }
03466     }
03467     else {
03468       buff->check_space(num_ent * src_tag->get_size());
03469       // should be ok to read directly into buffer, since tags are untyped and
03470       // handled by memcpy
03471       result = mbImpl->tag_get_data(src_tag, tagged_entities, buff->buff_ptr);
03472       RRA("Failed to get tag data in pack_tags.");
03473       buff->buff_ptr += num_ent * src_tag->get_size();
03474       PC(num_ent*src_tag->get_size(), " void");
03475     }
03476 
03477     return MB_SUCCESS;
03478   }
03479 
03480   ErrorCode ParallelComm::get_tag_send_list( const Range& whole_range,
03481                                              std::vector<Tag>& all_tags,
03482                                              std::vector<Range>& tag_ranges )
03483   {
03484     std::vector<Tag> tmp_tags;
03485     ErrorCode result = mbImpl->tag_get_tags(tmp_tags);
03486     RRA("Failed to get tags in pack_tags.");
03487 
03488     std::vector<Tag>::iterator tag_it;
03489     for (tag_it = tmp_tags.begin(); tag_it != tmp_tags.end(); tag_it++) {
03490       std::string tag_name;
03491       result = mbImpl->tag_get_name(*tag_it, tag_name);
03492       if (tag_name.c_str()[0] == '_' && tag_name.c_str()[1] == '_')
03493         continue;
03494 
03495       Range tmp_range;
03496       result = (*tag_it)->get_tagged_entities(sequenceManager, tmp_range);
03497       RRA("Failed to get entities for tag in pack_tags.");
03498       tmp_range = intersect( tmp_range, whole_range);
03499 
03500       if (tmp_range.empty()) continue;
03501         
03502       // ok, we'll be sending this tag
03503       all_tags.push_back( *tag_it );
03504       tag_ranges.push_back( Range() );
03505       tag_ranges.back().swap( tmp_range );
03506     }
03507   
03508     return MB_SUCCESS;
03509   }
03510 
03511 
03512 
03513   ErrorCode ParallelComm::unpack_tags(unsigned char *&buff_ptr,
03514                                       std::vector<EntityHandle> &entities,
03515                                       const bool /*store_remote_handles*/,
03516                                       const int /*from_proc*/,
03517                                       const MPI_Op * const mpi_op)
03518   {
03519     // tags
03520     // get all the tags
03521     // for dense tags, compute size assuming all entities have that tag
03522     // for sparse tags, get number of entities w/ that tag to compute size
03523 
03524     ErrorCode result;
03525   
03526     int num_tags;
03527     UNPACK_INT(buff_ptr, num_tags);
03528     std::vector<EntityHandle> tag_ents;
03529     std::vector<const void*> var_len_vals;
03530     std::vector<unsigned char*> dum_vals;
03531     std::vector<EntityHandle> dum_ehvals;
03532 
03533     for (int i = 0; i < num_tags; i++) {
03534     
03535       // tag handle
03536       Tag tag_handle;
03537 
03538       // size, data type
03539       int tag_size, tag_data_type, tag_type;
03540       UNPACK_INT(buff_ptr, tag_size);
03541       UNPACK_INT(buff_ptr, tag_type);
03542       UNPACK_INT(buff_ptr, tag_data_type);
03543       
03544       // default value
03545       int def_val_size;
03546       UNPACK_INT(buff_ptr, def_val_size);
03547       void *def_val_ptr = NULL;
03548       if (def_val_size) {
03549         def_val_ptr = buff_ptr;
03550         buff_ptr += def_val_size;
03551         UPC(tag_size, " void");
03552       }
03553     
03554       // name
03555       int name_len;
03556       UNPACK_INT(buff_ptr, name_len);
03557       std::string tag_name( reinterpret_cast<char*>(buff_ptr), name_len );
03558       buff_ptr += name_len;
03559       UPC(64, " chars");
03560     
03561       myDebug->tprintf(4, "Unpacking tag %s\n", tag_name.c_str());
03562 
03563       // create the tag
03564       if (tag_size == MB_VARIABLE_LENGTH) 
03565         result = mbImpl->tag_get_handle( tag_name.c_str(), def_val_size, (DataType)tag_data_type,
03566                                          tag_handle, MB_TAG_VARLEN|MB_TAG_CREAT|MB_TAG_BYTES|tag_type, 
03567                                          def_val_ptr );
03568       else
03569         result = mbImpl->tag_get_handle( tag_name.c_str(), tag_size, (DataType) tag_data_type,
03570                                          tag_handle, MB_TAG_CREAT|MB_TAG_BYTES|tag_type, 
03571                                          def_val_ptr);
03572       if (MB_SUCCESS != result) return result;
03573 
03574       // get handles and convert to local handles
03575       int num_ents;
03576       UNPACK_INT(buff_ptr, num_ents);
03577       std::vector<EntityHandle> dum_ents(num_ents);
03578       UNPACK_EH(buff_ptr, &dum_ents[0], num_ents);
03579 
03580       // in this case handles are indices into new entity range; need to convert
03581       // to local handles
03582       result = get_local_handles(&dum_ents[0], num_ents, entities);
03583       RRA("Unable to convert to local handles.");
03584 
03585       // if it's a handle type, also convert tag vals in-place in buffer
03586       if (MB_TYPE_HANDLE == tag_type) {
03587         dum_ehvals.resize(num_ents);
03588         UNPACK_EH(buff_ptr, &dum_ehvals[0], num_ents);
03589         result = get_local_handles(&dum_ehvals[0], num_ents, entities);
03590         RRA("Failed to get local handles for tag vals.");
03591       }
03592 
03593       DataType data_type;
03594       mbImpl->tag_get_data_type( tag_handle, data_type );
03595       int type_size = TagInfo::size_from_data_type(data_type);
03596 
03597       if (!dum_ents.empty()) {
03598         if (tag_size == MB_VARIABLE_LENGTH) {
03599           // Be careful of alignment here.  If the integers are aligned
03600           // in the buffer, we can use them directly.  Otherwise we must
03601           // copy them.
03602           std::vector<int> var_lengths(num_ents);
03603           UNPACK_INTS(buff_ptr, &var_lengths[0], num_ents);
03604           UPC(sizeof(int) * num_ents, " void");
03605 
03606           // get pointers into buffer for each tag value
03607           var_len_vals.resize(num_ents);
03608           for (std::vector<EntityHandle>::size_type j = 0; 
03609                j < (std::vector<EntityHandle>::size_type) num_ents; ++j) {
03610             var_len_vals[j] = buff_ptr;
03611             buff_ptr += var_lengths[j]*type_size;
03612             UPC(var_lengths[j], " void");
03613           }
03614           result = mbImpl->tag_set_by_ptr( tag_handle, &dum_ents[0], num_ents,
03615                                            &var_len_vals[0], &var_lengths[0]);
03616           RRA("Trouble setting tag data when unpacking variable-length tag.");
03617         }
03618         else {
03619               // get existing values of dst tag
03620             dum_vals.resize(tag_size*num_ents);
03621             if (mpi_op) {
03622               int tag_length;
03623               result = mbImpl->tag_get_length(tag_handle, tag_length);
03624               RRA("Couldn't get tag length");         
03625               result = mbImpl->tag_get_data(tag_handle, &dum_ents[0], num_ents, &dum_vals[0]);
03626               RRA("Couldn't get existing value of dst tag on entities.");
03627               result = reduce_void(tag_data_type, *mpi_op, tag_length*num_ents, &dum_vals[0], buff_ptr);
03628               RRA("Failed to perform mpi op on dst tags.");
03629             }
03630           result = mbImpl->tag_set_data(tag_handle, &dum_ents[0],
03631                                         num_ents, buff_ptr);
03632           RRA("Trouble setting range-based tag data when unpacking tag.");
03633           buff_ptr += num_ents * tag_size;
03634           UPC(num_ents * tag_size, " void");
03635         }
03636       }
03637     }
03638   
03639     myDebug->tprintf(4, "Done unpacking tags.\n");
03640 
03641     return MB_SUCCESS;
03642   }
03643 
03644 template<class T> T LAND(const T &arg1, const T &arg2) {return arg1&&arg2;}
03645 template<class T> T LOR(const T& arg1, const T& arg2) {return arg1||arg2;}
03646 template<class T> T LXOR(const T& arg1, const T& arg2) {return ((arg1&&!arg2)||(!arg1&&arg2));}
03647 template<class T> T MAX(const T& arg1, const T& arg2) {return (arg1 > arg2 ? arg1 : arg2);}
03648 template<class T> T MIN(const T& arg1, const T& arg2) {return (arg1 < arg2 ? arg1 : arg2);}
03649 template<class T> T ADD(const T &arg1, const T &arg2) {return arg1 + arg2;}
03650 template<class T> T MULT(const T &arg1, const T &arg2) {return arg1 * arg2;}
03651 
03652 template <class T>
03653 ErrorCode ParallelComm::reduce(const MPI_Op mpi_op, int num_ents, void *old_vals, void *new_vals) 
03654 {
03655   T *old_tmp = reinterpret_cast<T*>(old_vals);
03656   T *new_tmp = reinterpret_cast<T*>(new_vals);
03657   
03658   if (mpi_op == MPI_SUM) 
03659     std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, ADD<T>);
03660   else if (mpi_op == MPI_PROD) 
03661     std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MULT<T>);
03662   else if (mpi_op == MPI_MAX) 
03663     std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MAX<T>);
03664   else if (mpi_op == MPI_MIN) 
03665     std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MIN<T>);
03666   else if (mpi_op == MPI_LAND) 
03667     std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LAND<T>);
03668   else if (mpi_op == MPI_LOR) 
03669     std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LOR<T>);
03670   else if (mpi_op == MPI_LXOR) 
03671     std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LXOR<T>);
03672   else if (mpi_op == MPI_BAND || mpi_op == MPI_BOR || mpi_op == MPI_BXOR) {
03673     std::cerr << "Bitwise operations not allowed in tag reductions." << std::endl;
03674     return MB_FAILURE;
03675   }
03676   else if (mpi_op != MPI_OP_NULL) {
03677     std::cerr << "Unknown MPI operation type." << std::endl;
03678     return MB_TYPE_OUT_OF_RANGE;
03679   }
03680 
03681   return MB_SUCCESS;
03682 }
03683 
03684 ErrorCode ParallelComm::reduce_void(int tag_data_type, const MPI_Op mpi_op, int num_ents, void *old_vals, void *new_vals) 
03685 {
03686   ErrorCode result;
03687   switch (tag_data_type) {
03688     case MB_TYPE_INTEGER:
03689         result = reduce<int>(mpi_op, num_ents, old_vals, new_vals);
03690         break;
03691     case MB_TYPE_DOUBLE:
03692         result = reduce<double>(mpi_op, num_ents, old_vals, new_vals);
03693         break;
03694     case MB_TYPE_BIT:
03695         result = reduce<unsigned char>(mpi_op, num_ents, old_vals, new_vals);
03696         break;
03697     default:
03698         result = MB_SUCCESS;
03699         break;
03700   }
03701   
03702   return result;
03703 }
03704 
03705 ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
03706                                               int resolve_dim,
03707                                               int shared_dim,
03708                                               const Tag* id_tag) 
03709   {
03710     ErrorCode result;
03711     Range proc_ents;
03712 
03713     // check for structured mesh, and do it differently if it is
03714     ScdInterface *scdi;
03715     result = mbImpl->query_interface(scdi);
03716     if (scdi) {
03717       result = scdi->tag_shared_vertices(this, this_set);
03718       if (MB_SUCCESS == result) {
03719         myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
03720         return result;
03721       }
03722     }
03723 
03724     if (0 == this_set) {
03725         // get the entities in the partition sets
03726       for (Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); rit++) {
03727         Range tmp_ents;
03728         result = mbImpl->get_entities_by_handle(*rit, tmp_ents, true);
03729         if (MB_SUCCESS != result) return result;
03730         proc_ents.merge(tmp_ents);
03731       }
03732     }
03733     else {
03734       result = mbImpl->get_entities_by_handle(this_set, proc_ents, true);
03735       if (MB_SUCCESS != result) return result;
03736     }
03737       
03738     // resolve dim is maximal dim of entities in proc_ents
03739     if (-1 == resolve_dim) {
03740       if (proc_ents.empty()) 
03741         return MB_ENTITY_NOT_FOUND;
03742 
03743       resolve_dim = mbImpl->dimension_from_handle(*proc_ents.rbegin()); 
03744     }
03745 
03746     // proc_ents should all be of same dimension
03747     if (resolve_dim > shared_dim &&
03748         mbImpl->dimension_from_handle(*proc_ents.rbegin()) !=
03749         mbImpl->dimension_from_handle(*proc_ents.begin())) {
03750       Range::iterator lower = proc_ents.lower_bound(CN::TypeDimensionMap[0].first),
03751         upper = proc_ents.upper_bound(CN::TypeDimensionMap[resolve_dim-1].second);
03752       proc_ents.erase(lower, upper);
03753     }
03754   
03755     // must call even if we don't have any entities, to make sure
03756     // collective comm'n works
03757     return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag);
03758   }
03759   
03760   ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
03761                                               Range &proc_ents,
03762                                               int resolve_dim,
03763                                               int shared_dim,
03764                                               Range *skin_ents,
03765                                               const Tag* id_tag) 
03766   {
03767 #ifdef USE_MPE
03768     if (myDebug->get_verbosity() == 2) {
03769       define_mpe();
03770       MPE_Log_event(RESOLVE_START, procConfig.proc_rank(), "Entering resolve_shared_ents.");
03771     }
03772 #endif
03773 
03774     ErrorCode result;
03775     myDebug->tprintf(1, "Resolving shared entities.\n");
03776 
03777     if (resolve_dim < shared_dim) {
03778       result = MB_FAILURE;
03779       RRA("MOAB does not support vertex-based partitions, only element-based ones.");
03780     }
03781     
03782     if (-1 == shared_dim) {
03783       if (!proc_ents.empty())
03784         shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin())-1;
03785       else if (resolve_dim == 3)
03786         shared_dim = 2;
03787     }
03788     
03789     if (shared_dim < 0 || resolve_dim < 0) {
03790       result = MB_FAILURE;
03791       RRA("Unable to guess shared_dim or resolve_dim.");
03792     }
03793   
03794     // get the skin entities by dimension
03795     Range tmp_skin_ents[4];
03796 
03797     // get the entities to be skinned
03798     // find the skin
03799     int skin_dim = resolve_dim-1;
03800     if (!skin_ents) {
03801       skin_ents = tmp_skin_ents;
03802       skin_ents[resolve_dim] = proc_ents;
03803       Skinner skinner(mbImpl);
03804       result = skinner.find_skin(this_set, skin_ents[skin_dim+1], false, skin_ents[skin_dim],
03805                                  NULL, true, true, true);
03806       RRA("Failed to find skin.");
03807       myDebug->tprintf(1, "Found skin, now resolving.\n");
03808 
03809         // get entities adjacent to skin ents from shared_dim down to zero
03810       for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
03811         result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
03812                                          true, skin_ents[this_dim],
03813                                          Interface::UNION);
03814         RRA("Failed getting skin adjacencies.");
03815       }
03816     }
03817     else if (skin_ents[resolve_dim].empty()) skin_ents[resolve_dim] = proc_ents;
03818     
03819     // global id tag
03820     Tag gid_tag; 
03821     if (id_tag)
03822       gid_tag = *id_tag;
03823     else {
03824       bool tag_created = false;
03825       int def_val = -1;
03826       result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
03827                                       gid_tag, MB_TAG_DENSE|MB_TAG_CREAT, 
03828                                       &def_val, &tag_created );
03829       if (MB_FAILURE == result) return result;
03830 
03831       else if (tag_created) {
03832         // just created it, so we need global ids
03833         result = assign_global_ids(this_set, skin_dim+1,true,true,true);
03834         RRA("Failed assigning global ids.");
03835       }
03836     }
03837   
03838     // get gids for skin ents in a vector, to pass to gs
03839     std::vector<int> gid_data(skin_ents[0].size());
03840     result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
03841     RRA("Couldn't get gid tag for skin vertices.");
03842 
03843     // put handles in vector for passing to gs setup
03844     std::vector<EntityHandle> handle_vec;
03845     std::copy(skin_ents[0].begin(), skin_ents[0].end(), 
03846               std::back_inserter(handle_vec));
03847 
03848 #ifdef USE_MPE
03849     if (myDebug->get_verbosity() == 2) {
03850       MPE_Log_event(SHAREDV_START, procConfig.proc_rank(), "Creating crystal router.");
03851     }
03852 #endif
03853   
03854     // get a crystal router
03855     gs_data::crystal_data *cd = procConfig.crystal_router();
03856 
03857     /*  
03858     // get total number of entities; will overshoot highest global id, but
03859     // that's ok
03860     int num_total[2] = {0, 0}, num_local[2] = {0, 0};
03861     result = mbImpl->get_number_entities_by_dimension(this_set, 0, num_local);
03862     if (MB_SUCCESS != result) return result;
03863     int failure = MPI_Allreduce(num_local, num_total, 1,
03864     MPI_INTEGER, MPI_SUM, procConfig.proc_comm());
03865     if (failure) {
03866     result = MB_FAILURE;
03867     RRA("Allreduce for total number of shared ents failed.");
03868     }
03869   
03870     */
03871     // call gather-scatter to get shared ids & procs
03872     gs_data *gsd = new gs_data();
03873     assert(sizeof(ulong_) == sizeof(EntityHandle));
03874     if (sizeof(int) != sizeof(ulong_)) {
03875       std::vector<long> lgid_data(gid_data.size());
03876       std::copy(gid_data.begin(), gid_data.end(), lgid_data.begin());
03877       result = gsd->initialize(skin_ents[0].size(), &lgid_data[0], 
03878                                (ulong_*)&handle_vec[0], 2, 1, 1, cd);
03879     }
03880     else {
03881       result = gsd->initialize(skin_ents[0].size(), (long*)&gid_data[0], 
03882                                (ulong_*)&handle_vec[0], 2, 1, 1, cd);
03883     }
03884   
03885     RRA("Couldn't create gs data.");
03886 
03887     // get shared proc tags
03888     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
03889     result = get_shared_proc_tags(shp_tag, shps_tag, 
03890                                   shh_tag, shhs_tag, pstat_tag);
03891     RRA("Couldn't get shared proc tags.");
03892   
03893     // load shared verts into a tuple, then sort by index
03894     TupleList shared_verts;
03895     shared_verts.initialize(2, 0, 1, 0, 
03896                             skin_ents[0].size()*(MAX_SHARING_PROCS+1));
03897     shared_verts.enableWriteAccess();
03898 
03899     unsigned int i = 0, j = 0;
03900     for (unsigned int p = 0; p < gsd->nlinfo->_np; p++)
03901       for (unsigned int np = 0; np < gsd->nlinfo->_nshared[p]; np++) {
03902         shared_verts.vi_wr[i++] = gsd->nlinfo->_sh_ind[j];
03903         shared_verts.vi_wr[i++] = gsd->nlinfo->_target[p];
03904         shared_verts.vul_wr[j] = gsd->nlinfo->_ulabels[j];
03905         j++;
03906         shared_verts.inc_n();
03907       }
03908   
03909     int max_size = skin_ents[0].size()*(MAX_SHARING_PROCS+1);
03910     moab::TupleList::buffer sort_buffer;
03911     sort_buffer.buffer_init(max_size);
03912     shared_verts.sort(0, &sort_buffer);
03913     sort_buffer.reset();
03914 
03915     // set sharing procs and handles tags on skin ents
03916     int maxp = -1;
03917     std::vector<int> sharing_procs(MAX_SHARING_PROCS);
03918     std::fill(sharing_procs.begin(), sharing_procs.end(), maxp);
03919     j = 0; i = 0;
03920 
03921     // get ents shared by 1 or n procs
03922     std::map<std::vector<int>, std::vector<EntityHandle> > proc_nvecs;
03923     Range proc_verts;
03924     result = mbImpl->get_adjacencies(proc_ents, 0, false, proc_verts,
03925                                      Interface::UNION);
03926     RRA("Couldn't get proc_verts.");
03927   
03928     result = tag_shared_verts(shared_verts, skin_ents,
03929                               proc_nvecs, proc_verts);
03930     RRA("Trouble tagging shared verts.");
03931 
03932 #ifdef USE_MPE
03933     if (myDebug->get_verbosity() == 2) {
03934       MPE_Log_event(SHAREDV_END, procConfig.proc_rank(), "Finished tag_shared_verts.");
03935     }
03936 #endif
03937 
03938     // get entities shared by 1 or n procs
03939     result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
03940     RRA("Trouble tagging shared entities.");
03941 
03942     shared_verts.reset();
03943   
03944     if (myDebug->get_verbosity() > 0) {
03945       for (std::map<std::vector<int>, std::vector<EntityHandle> >::const_iterator mit = proc_nvecs.begin();
03946            mit != proc_nvecs.end(); mit++) {
03947         myDebug->tprintf(1, "Iface: ");
03948         for (std::vector<int>::const_iterator vit = (mit->first).begin();
03949              vit != (mit->first).end(); vit++) myDebug->printf(1, " %d", *vit);
03950         myDebug->print(1, "\n");
03951       }
03952     }
03953   
03954     // create the sets for each interface; store them as tags on
03955     // the interface instance
03956     Range iface_sets;
03957     result = create_interface_sets(proc_nvecs);
03958     RRA("Trouble creating iface sets.");
03959 
03960     // establish comm procs and buffers for them
03961     std::set<unsigned int> procs;
03962     result = get_interface_procs(procs, true);
03963     RRA("Trouble getting iface procs.");
03964 
03965 #ifndef NDEBUG
03966     result = check_all_shared_handles(true);
03967     RRA("Shared handle check failed after iface vertex exchange.");
03968 #endif  
03969 
03970     // resolve shared entity remote handles; implemented in ghost cell exchange
03971     // code because it's so similar
03972     result = exchange_ghost_cells(-1, -1, 0, 0, true, true);
03973     RRA("Trouble resolving shared entity remote handles.");
03974 
03975     // now build parent/child links for interface sets
03976     result = create_iface_pc_links();
03977     RRA("Trouble creating interface parent/child links.");
03978 
03979     gsd->reset();
03980     delete gsd;
03981 
03982 #ifdef USE_MPE
03983     if (myDebug->get_verbosity() == 2) {
03984       MPE_Log_event(RESOLVE_END, procConfig.proc_rank(), "Exiting resolve_shared_ents.");
03985     }
03986 #endif
03987   
03988     //  std::ostringstream ent_str;
03989     //  ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
03990     //  mbImpl->write_mesh(ent_str.str().c_str());
03991 
03992     // done
03993     return result;
03994   }
03995 
03996   void ParallelComm::define_mpe() 
03997   {
03998 #ifdef USE_MPE
03999     if (myDebug->get_verbosity() == 2) {
04000       // define mpe states used for logging
04001       int success;
04002       MPE_Log_get_state_eventIDs( &IFACE_START, &IFACE_END);
04003       MPE_Log_get_state_eventIDs( &GHOST_START, &GHOST_END);
04004       MPE_Log_get_state_eventIDs( &SHAREDV_START, &SHAREDV_END);
04005       MPE_Log_get_state_eventIDs( &RESOLVE_START, &RESOLVE_END);
04006       MPE_Log_get_state_eventIDs( &ENTITIES_START, &ENTITIES_END);
04007       MPE_Log_get_state_eventIDs( &RHANDLES_START, &RHANDLES_END);
04008       MPE_Log_get_state_eventIDs( &OWNED_START, &OWNED_END);
04009       success = MPE_Describe_state(IFACE_START, IFACE_END, "Resolve interface ents", "green");
04010       success = MPE_Describe_state(GHOST_START, GHOST_END, "Exchange ghost ents", "red");
04011       success = MPE_Describe_state(SHAREDV_START, SHAREDV_END, "Resolve interface vertices", "blue");
04012       success = MPE_Describe_state(RESOLVE_START, RESOLVE_END, "Resolve shared ents", "purple");
04013       success = MPE_Describe_state(ENTITIES_START, ENTITIES_END, "Exchange shared ents", "yellow");
04014       success = MPE_Describe_state(RHANDLES_START, RHANDLES_END, "Remote handles", "cyan");
04015       success = MPE_Describe_state(OWNED_START, OWNED_END, "Exchange owned ents", "black");
04016     }
04017 #endif
04018   }
04019 
04020   ErrorCode ParallelComm::resolve_shared_ents(ParallelComm **pc, 
04021                                               const unsigned int np, 
04022                                               EntityHandle this_set,
04023                                               const int part_dim) 
04024   {
04025     std::vector<Range> verts(np);
04026     int tot_verts = 0;
04027     unsigned int p, i, j, v;
04028     ErrorCode rval;
04029     for (p = 0; p < np; p++) {
04030       Skinner skinner(pc[p]->get_moab());
04031       Range part_ents, skin_ents;
04032       rval = pc[p]->get_moab()->get_entities_by_dimension(this_set, part_dim, part_ents);
04033       if (MB_SUCCESS != rval) return rval;
04034       rval = skinner.find_skin(this_set, part_ents, false, skin_ents, 0, true, true, true);
04035       if (MB_SUCCESS != rval) return rval;
04036       rval = pc[p]->get_moab()->get_adjacencies(skin_ents, 0, true, verts[p],
04037                                                 Interface::UNION);
04038       if (MB_SUCCESS != rval) return rval;
04039       tot_verts += verts[p].size();
04040     }
04041   
04042     TupleList shared_ents;
04043     shared_ents.initialize(2, 0, 1, 0, tot_verts);
04044     shared_ents.enableWriteAccess();
04045 
04046     i = 0; j = 0;
04047     std::vector<int> gids;
04048     Range::iterator rit;
04049     Tag gid_tag;
04050     int dum_default = 0;
04051     for (p = 0; p < np; p++) {
04052       rval = pc[p]->get_moab()->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
04053                                                gid_tag, MB_TAG_DENSE|MB_TAG_CREAT,
04054                                                &dum_default );
04055       if (MB_SUCCESS != rval) return rval;
04056       gids.resize(verts[p].size());
04057       rval = pc[p]->get_moab()->tag_get_data(gid_tag, verts[p], &gids[0]);
04058       if (MB_SUCCESS != rval) return rval;
04059     
04060       for (v = 0, rit = verts[p].begin(); v < gids.size(); v++, rit++) {
04061         shared_ents.vi_wr[i++] = gids[v];
04062         shared_ents.vi_wr[i++] = p;
04063         shared_ents.vul_wr[j] = *rit;
04064         j++;
04065         shared_ents.inc_n();
04066       }
04067     }
04068  
04069     moab::TupleList::buffer sort_buffer;
04070     sort_buffer.buffer_init(tot_verts);
04071     shared_ents.sort(0, &sort_buffer);
04072     sort_buffer.reset();
04073 
04074     j = 0; i = 0;
04075     std::vector<EntityHandle> handles;
04076     std::vector<int> procs;
04077   
04078     while (i < shared_ents.get_n()) {
04079       handles.clear();
04080       procs.clear();
04081     
04082       // count & accumulate sharing procs
04083       int this_gid = shared_ents.vi_rd[j];
04084       while (i < shared_ents.get_n() && shared_ents.vi_rd[j] == this_gid) {
04085         j++;
04086         procs.push_back( shared_ents.vi_rd[j++] );
04087         handles.push_back( shared_ents.vul_rd[i++] );
04088       }
04089       if (1 == procs.size()) continue;
04090     
04091       for (v = 0; v < procs.size(); v++) {
04092         rval = pc[procs[v]]->update_remote_data(handles[v], 
04093                                                 &procs[0], &handles[0], procs.size(),
04094                                                 (procs[0] == (int)pc[procs[v]]->rank() ? PSTATUS_INTERFACE : (PSTATUS_NOT_OWNED|PSTATUS_INTERFACE)));
04095         if (MB_SUCCESS != rval) return rval;
04096       }
04097     }
04098 
04099     std::set<unsigned int> psets;
04100     for (p = 0; p < np; p++) {
04101       rval = pc[p]->create_interface_sets(this_set, part_dim, part_dim-1);
04102       if (MB_SUCCESS != rval) return rval;
04103       // establish comm procs and buffers for them
04104       psets.clear();
04105       rval = pc[p]->get_interface_procs(psets, true);
04106       if (MB_SUCCESS != rval) return rval;
04107     }
04108 
04109     shared_ents.reset();
04110   
04111     return MB_SUCCESS;
04112   }
04113 
04114   ErrorCode ParallelComm::tag_iface_entities() 
04115   {
04116     ErrorCode result = MB_SUCCESS;
04117     Range iface_ents, tmp_ents, rmv_ents;
04118     std::vector<unsigned char> pstat;
04119     unsigned char set_pstat;
04120     Range::iterator rit2;
04121     unsigned int i;
04122   
04123     for (Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); rit++) {
04124       iface_ents.clear();
04125     
04126       result = mbImpl->get_entities_by_handle(*rit, iface_ents);
04127       RRA("Couldn't get iface set contents.");
04128       pstat.resize(iface_ents.size());
04129       result = mbImpl->tag_get_data(pstatus_tag(), iface_ents, &pstat[0]);
04130       RRA("Couldn't get pstatus values for set ents.");
04131       result = mbImpl->tag_get_data(pstatus_tag(), &(*rit), 1, &set_pstat);
04132       RRA("Couldn't get pstatus values for set.");
04133       rmv_ents.clear();
04134       for (rit2 = iface_ents.begin(), i = 0; rit2 != iface_ents.end(); rit2++, i++) {
04135         if (!(pstat[i] & PSTATUS_INTERFACE)) {
04136           rmv_ents.insert(*rit2);
04137           pstat[i] = 0x0;
04138         }
04139       }
04140       result = mbImpl->remove_entities(*rit, rmv_ents);
04141       RRA("Couldn't remove entities from set.");
04142 
04143       if (!(set_pstat & PSTATUS_NOT_OWNED)) continue;
04144       // if we're here, we need to set the notowned status on (remaining) set contents
04145 
04146       // remove rmv_ents from the contents list
04147       iface_ents = subtract(iface_ents, rmv_ents);
04148       // compress the pstat vector (removing 0x0's)
04149       std::remove_if(pstat.begin(), pstat.end(), 
04150                      std::bind2nd(std::equal_to<unsigned char>(), 0x0));
04151       // fold the not_owned bit into remaining values
04152       unsigned int sz = iface_ents.size();
04153       for (i = 0; i < sz; i++)
04154         pstat[i] |= PSTATUS_NOT_OWNED;
04155 
04156       // set the tag on the entities
04157       result = mbImpl->tag_set_data(pstatus_tag(), iface_ents, &pstat[0]);
04158       RRA("Couldn't set pstatus values for set ents.");
04159     }
04160   
04161     return MB_SUCCESS;
04162   }
04163 
04164   ErrorCode ParallelComm::set_pstatus_entities(Range &pstatus_ents,
04165                                                unsigned char pstatus_val,
04166                                                bool lower_dim_ents,
04167                                                bool verts_too,
04168                                                int operation) 
04169   {
04170     std::vector<unsigned char> pstatus_vals(pstatus_ents.size());
04171     Range all_ents, *range_ptr = &pstatus_ents;
04172     ErrorCode result;
04173     if (lower_dim_ents || verts_too) {
04174       all_ents = pstatus_ents;
04175       range_ptr = &all_ents;
04176       int start_dim = (lower_dim_ents ? mbImpl->dimension_from_handle(*pstatus_ents.rbegin())-1 : 0);
04177       for (; start_dim >= 0; start_dim--) {
04178         result = mbImpl->get_adjacencies(all_ents, start_dim, true, all_ents,
04179                                          Interface::UNION);
04180         RRA(" ");
04181       }
04182     }
04183     if (Interface::UNION == operation) {
04184       result = mbImpl->tag_get_data(pstatus_tag(), *range_ptr, &pstatus_vals[0]);
04185       RRA("Couldn't get pstatus tag value.");
04186       for (unsigned int i = 0; i < pstatus_vals.size(); i++)
04187         pstatus_vals[i] |= pstatus_val;
04188     }
04189     else {
04190       for (unsigned int i = 0; i < pstatus_vals.size(); i++)
04191         pstatus_vals[i] = pstatus_val;
04192     }
04193     result = mbImpl->tag_set_data(pstatus_tag(), *range_ptr, &pstatus_vals[0]);
04194     RRA("Couldn't set pstatus tag value.");
04195   
04196     return MB_SUCCESS;
04197   }
04198   
04199   ErrorCode ParallelComm::set_pstatus_entities(EntityHandle *pstatus_ents,
04200                                                int num_ents,
04201                                                unsigned char pstatus_val,
04202                                                bool lower_dim_ents,
04203                                                bool verts_too,
04204                                                int operation) 
04205   {
04206     std::vector<unsigned char> pstatus_vals(num_ents);
04207     ErrorCode result;
04208     if (lower_dim_ents || verts_too) {
04209       // in this case, call the range-based version
04210       Range tmp_range;
04211       std::copy(pstatus_ents, pstatus_ents+num_ents, range_inserter(tmp_range));
04212       return set_pstatus_entities(tmp_range, pstatus_val, lower_dim_ents, 
04213                                   verts_too, operation);
04214     }
04215 
04216     if (Interface::UNION == operation) {
04217       result = mbImpl->tag_get_data(pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0]);
04218       RRA("Couldn't get pstatus tag value.");
04219       for (unsigned int i = 0; i < (unsigned int) num_ents; i++)
04220         pstatus_vals[i] |= pstatus_val;
04221     }
04222     else {
04223       for (unsigned int i = 0; i < (unsigned int) num_ents; i++)
04224         pstatus_vals[i] = pstatus_val;
04225     }
04226     result = mbImpl->tag_set_data(pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0]);
04227     RRA("Couldn't set pstatus tag value.");
04228   
04229     return MB_SUCCESS;
04230   }
04231 
04232   static size_t choose_owner_idx( const std::vector<unsigned>& proc_list )
04233   {
04234     // Try to assign owners randomly so we get a good distribution,
04235     // (note: specifying the same seed on all procs is essential)
04236     unsigned val = 0;
04237     for (size_t i = 0; i < proc_list.size(); ++i)
04238       val ^= proc_list[i];
04239     return rand_r(&val) % proc_list.size();   
04240   }
04241 
04242   struct set_tuple
04243   {
04244     unsigned idx;
04245     unsigned proc;
04246     EntityHandle handle;
04247     inline bool operator<(set_tuple other) const
04248     { return (idx == other.idx) ? (proc < other.proc) : (idx < other.idx); }
04249   };
04250 
04251   ErrorCode ParallelComm::resolve_shared_sets(EntityHandle file, const Tag* idtag)
04252   {
04253 
04254     // find all sets with any of the following tags:
04255 
04256     const char* const shared_set_tag_names[] = { GEOM_DIMENSION_TAG_NAME,
04257                                                  MATERIAL_SET_TAG_NAME,
04258                                                  DIRICHLET_SET_TAG_NAME,
04259                                                  NEUMANN_SET_TAG_NAME,
04260                                                  PARALLEL_PARTITION_TAG_NAME };
04261     int num_tags = sizeof(shared_set_tag_names)/sizeof(shared_set_tag_names[0]);
04262     Range candidate_sets;
04263     ErrorCode result;
04264 
04265     // If we're not given an ID tag to use to globally identify sets,
04266     // then fall back to using known tag values
04267     if (!idtag) {
04268       Tag gid, tag;
04269       result = mbImpl->tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid );
04270       if (MB_SUCCESS == result) 
04271         result = mbImpl->tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tag );
04272       if (MB_SUCCESS == result) {
04273         for (int d = 0; d < 4; ++d) {
04274           candidate_sets.clear();
04275           const void* vals[] = { &d };
04276           result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, vals, 1, candidate_sets );
04277           if (MB_SUCCESS == result)
04278             resolve_shared_sets( candidate_sets, gid );
04279         }
04280       }
04281     
04282       for (int i = 1; i < num_tags; ++i) {
04283         result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag );
04284         if (MB_SUCCESS == result) {
04285           candidate_sets.clear();
04286           result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets );
04287           if (MB_SUCCESS == result)
04288             resolve_shared_sets( candidate_sets, tag );
04289         }
04290       }
04291       return MB_SUCCESS;
04292     } 
04293 
04294 
04295     for (int i = 0; i < num_tags; ++i) {
04296       Tag tag;
04297       result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER,
04298                                        tag, MB_TAG_ANY );
04299       if (MB_SUCCESS != result)
04300         continue;
04301     
04302       mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets, Interface::UNION );
04303     }
04304   
04305     // find any additional sets that contain shared entities
04306     Range::iterator hint = candidate_sets.begin();
04307     Range all_sets;
04308     mbImpl->get_entities_by_type( file, MBENTITYSET, all_sets );
04309     all_sets = subtract( all_sets, candidate_sets );
04310     Range::iterator it = all_sets.begin();
04311     while (it != all_sets.end()) {
04312       Range contents;
04313       mbImpl->get_entities_by_handle( *it, contents );
04314       contents.erase( contents.lower_bound( MBENTITYSET ), contents.end() );
04315       filter_pstatus( contents, PSTATUS_SHARED, PSTATUS_OR );
04316       if (contents.empty()) {
04317         ++it;
04318       }
04319       else {
04320         hint = candidate_sets.insert( hint, *it );
04321         it = all_sets.erase( it );
04322       }
04323     }
04324   
04325     // find any additionl sets that contain or are parents of potential shared sets
04326     Range prev_list = candidate_sets;
04327     while (!prev_list.empty()) {
04328       it = all_sets.begin();
04329       Range new_list;
04330       hint = new_list.begin();
04331       while (it != all_sets.end()) {
04332         Range contents;
04333         mbImpl->get_entities_by_type( *it, MBENTITYSET, contents );
04334         if (!intersect(prev_list,contents).empty()) {
04335           hint = new_list.insert( hint, *it );
04336           it = all_sets.erase(it);
04337         }
04338         else {
04339           new_list.clear();
04340           mbImpl->get_child_meshsets( *it, contents );
04341           if (!intersect(prev_list,contents).empty()) {
04342             hint = new_list.insert( hint, *it );
04343             it = all_sets.erase(it);
04344           }
04345           else {
04346             ++it;
04347           }
04348         }
04349       }
04350     
04351       candidate_sets.merge( new_list );
04352       prev_list.swap(new_list);
04353     }
04354   
04355     return resolve_shared_sets( candidate_sets, *idtag );
04356   }
04357 
04358 #ifndef NDEBUG
04359   bool is_sorted_unique( std::vector<unsigned>& v )
04360   {
04361     for (size_t i = 1; i < v.size(); ++i)
04362       if (v[i-1] >= v[i])
04363         return false;
04364     return true;
04365   }
04366 #endif
04367 
04368   ErrorCode ParallelComm::resolve_shared_sets(Range& sets, Tag idtag)
04369   {
04370     ErrorCode result;
04371     const unsigned rk = proc_config().proc_rank();
04372     MPI_Comm cm = proc_config().proc_comm();
04373 
04374     // build sharing list for all sets
04375   
04376     // get ids for sets in a vector, to pass to gs
04377     std::vector<long> larray; // allocate sufficient space for longs
04378     std::vector<unsigned long> handles;
04379     Range tmp_sets;
04380     for (Range::iterator rit = sets.begin(); rit != sets.end(); rit++) {
04381       int dum;
04382       result = mbImpl->tag_get_data(idtag, &(*rit), 1, &dum);
04383       if (MB_SUCCESS == result) {
04384         larray.push_back(dum);
04385         handles.push_back(*rit);
04386         tmp_sets.insert(tmp_sets.end(), *rit);
04387       }
04388     }
04389   
04390     const size_t nsets = handles.size();
04391     
04392     // get handle array for sets
04393     assert(sizeof(EntityHandle) <= sizeof(unsigned long));
04394 
04395     // do communication of data
04396     gs_data::crystal_data *cd = procConfig.crystal_router();
04397     gs_data *gsd = new gs_data();
04398     result = gsd->initialize( nsets, &larray[0], &handles[0], 2, 1, 1, cd );
04399     RRA("Couldn't create gs data.");
04400  
04401     // convert from global IDs grouped by process rank to list
04402     // of <idx,rank> pairs so that we can sort primarily
04403     // by idx and secondarily by rank (we want lists of procs for each
04404     // idx, not lists if indices for each proc).
04405     size_t ntuple = 0;
04406     for (unsigned p = 0; p < gsd->nlinfo->_np; p++)
04407       ntuple += gsd->nlinfo->_nshared[p];
04408     std::vector< set_tuple > tuples;
04409     tuples.reserve( ntuple );
04410     size_t j = 0;
04411     for (unsigned p = 0; p < gsd->nlinfo->_np; p++) {
04412       for (unsigned np = 0; np < gsd->nlinfo->_nshared[p]; np++) {
04413         set_tuple t;
04414         t.idx = gsd->nlinfo->_sh_ind[j];
04415         t.proc = gsd->nlinfo->_target[p];
04416         t.handle = gsd->nlinfo->_ulabels[j];
04417         tuples.push_back( t );
04418         ++j;
04419       }
04420     }
04421     std::sort( tuples.begin(), tuples.end() );
04422   
04423     // release crystal router stuff
04424     gsd->reset();
04425     delete gsd;
04426 
04427     // storing sharing data for each set
04428     size_t ti = 0;
04429     unsigned idx = 0;
04430     std::vector<unsigned> procs;
04431     Range::iterator si = tmp_sets.begin();
04432     while (si != tmp_sets.end() && ti < tuples.size()) {
04433       assert(idx <= tuples[ti].idx);
04434       if (idx < tuples[ti].idx) 
04435         si += (tuples[ti].idx - idx);
04436       idx = tuples[ti].idx;
04437     
04438       procs.clear();
04439       size_t ti_init = ti;
04440       while (ti < tuples.size() && tuples[ti].idx == idx) {
04441         procs.push_back( tuples[ti].proc );
04442         ++ti;
04443       }
04444       assert( is_sorted_unique( procs ) );
04445     
04446       result = sharedSetData->set_sharing_procs( *si, procs );
04447       if (MB_SUCCESS != result) {
04448         std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
04449         std::cerr.flush();
04450         MPI_Abort( cm, 1 );
04451       }
04452     
04453       // add this proc to list of sharing procs in correct position
04454       // so that all procs select owner based on same list
04455       std::vector<unsigned>::iterator it = std::lower_bound( procs.begin(), procs.end(), rk );
04456       assert(it == procs.end() || *it > rk);
04457       procs.insert( it, rk );
04458       size_t owner_idx = choose_owner_idx(procs);
04459       EntityHandle owner_handle;
04460       if (procs[owner_idx] == rk)
04461         owner_handle = *si;
04462       else if (procs[owner_idx] > rk)
04463         owner_handle = tuples[ti_init+owner_idx-1].handle;
04464       else
04465         owner_handle = tuples[ti_init+owner_idx].handle;
04466       result = sharedSetData->set_owner( *si, procs[owner_idx], owner_handle );
04467       if (MB_SUCCESS != result) {
04468         std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
04469         std::cerr.flush();
04470         MPI_Abort( cm, 1 );
04471       }
04472 
04473       ++si;
04474       ++idx;
04475     }
04476   
04477     return MB_SUCCESS;
04478   }
04479 
04480 
04481 
04482   ErrorCode ParallelComm::create_interface_sets(EntityHandle this_set, int resolve_dim, int shared_dim)
04483   {
04484     std::map<std::vector<int>, std::vector<EntityHandle> > proc_nvecs;
04485   
04486     // build up the list of shared entities
04487     int procs[MAX_SHARING_PROCS];
04488     EntityHandle handles[MAX_SHARING_PROCS];
04489     ErrorCode result;
04490     int nprocs;
04491     unsigned char pstat;
04492     for (std::vector<EntityHandle>::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); vit++) {
04493       if (shared_dim != -1 && mbImpl->dimension_from_handle(*vit) > shared_dim)
04494         continue;
04495       result = get_sharing_data(*vit, procs, handles, pstat, nprocs);
04496       RRA("");
04497       std::sort(procs, procs+nprocs);
04498       std::vector<int> tmp_procs(procs, procs + nprocs);
04499       assert(tmp_procs.size() != 2);
04500       proc_nvecs[tmp_procs].push_back(*vit);
04501     }
04502                                                   
04503     Skinner skinner(mbImpl);
04504     Range skin_ents[4];
04505     result = mbImpl->get_entities_by_dimension(this_set, resolve_dim, skin_ents[resolve_dim]);
04506     RRA("");
04507     result = skinner.find_skin(this_set, skin_ents[resolve_dim], false,
04508                                skin_ents[resolve_dim-1], 0, true, true, true);
04509     RRA("Failed to find skin.");
04510     if (shared_dim > 1) {
04511       result = mbImpl->get_adjacencies(skin_ents[resolve_dim-1], resolve_dim-2, true,
04512                                        skin_ents[resolve_dim-2], Interface::UNION);
04513       RRA("");
04514     }
04515 
04516     result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
04517     
04518     return create_interface_sets(proc_nvecs);
04519   }
04520   
04521   ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs) 
04522   {
04523     if (proc_nvecs.empty()) return MB_SUCCESS;
04524   
04525     int proc_ids[MAX_SHARING_PROCS];
04526     EntityHandle proc_handles[MAX_SHARING_PROCS];
04527     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
04528     ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag, 
04529                                             shh_tag, shhs_tag,
04530                                             pstat_tag);
04531     RRA("Trouble getting shared proc tags in create_interface_sets.");
04532     Range::iterator rit;
04533 
04534     // create interface sets, tag them, and tag their contents with iface set tag
04535     std::vector<EntityHandle> tag_vals;
04536     std::vector<unsigned char> pstatus;
04537     for (std::map<std::vector<int>,std::vector<EntityHandle> >::iterator vit = proc_nvecs.begin();
04538          vit != proc_nvecs.end(); vit++) {
04539       // create the set
04540       EntityHandle new_set;
04541       result = mbImpl->create_meshset(MESHSET_SET, new_set); 
04542       RRA("Failed to create interface set.");
04543       interfaceSets.insert(new_set);
04544 
04545       // add entities
04546       assert(!vit->second.empty());
04547       result = mbImpl->add_entities(new_set, &(vit->second)[0], (vit->second).size()); 
04548       RRA("Failed to add entities to interface set.");
04549       // tag set with the proc rank(s)
04550       if (vit->first.size() == 1) {
04551         assert((vit->first)[0] != (int)procConfig.proc_rank());
04552         result = mbImpl->tag_set_data(shp_tag, &new_set, 1, 
04553                                       &(vit->first)[0]); 
04554         proc_handles[0] = 0;
04555         result = mbImpl->tag_set_data(shh_tag, &new_set, 1, 
04556                                       proc_handles); 
04557       }
04558       else {
04559         // pad tag data out to MAX_SHARING_PROCS with -1
04560         if (vit->first.size() > MAX_SHARING_PROCS) {
04561           std::cerr << "Exceeded MAX_SHARING_PROCS for "
04562                     << CN::EntityTypeName(TYPE_FROM_HANDLE(new_set))
04563                     << ' ' << ID_FROM_HANDLE(new_set) 
04564                     << " on process " << proc_config().proc_rank()
04565                     << std::endl;
04566           std::cerr.flush();
04567           MPI_Abort(proc_config().proc_comm(), 66);
04568         }
04569         //assert( vit->first.size() <= MAX_SHARING_PROCS );
04570         std::copy( vit->first.begin(), vit->first.end(), proc_ids );
04571         std::fill( proc_ids + vit->first.size(), proc_ids + MAX_SHARING_PROCS, -1 );
04572         result = mbImpl->tag_set_data(shps_tag, &new_set, 1, proc_ids );
04573         unsigned int ind = std::find(proc_ids, proc_ids+vit->first.size(), procConfig.proc_rank())
04574           - proc_ids;
04575         assert(ind < vit->first.size());
04576         std::fill( proc_handles, proc_handles + MAX_SHARING_PROCS, 0);
04577         proc_handles[ind] = new_set;
04578         result = mbImpl->tag_set_data(shhs_tag, &new_set, 1, proc_handles); 
04579       }
04580       RRA("Failed to tag interface set with procs.");
04581     
04582       // get the owning proc, then set the pstatus tag on iface set
04583       int min_proc = (vit->first)[0];
04584       unsigned char pval = (PSTATUS_SHARED | PSTATUS_INTERFACE);
04585       if (min_proc < (int) procConfig.proc_rank()) pval |= PSTATUS_NOT_OWNED;
04586       if (vit->first.size() > 1) pval |= PSTATUS_MULTISHARED;
04587       result = mbImpl->tag_set_data(pstat_tag, &new_set, 1, &pval); 
04588       RRA("Failed to tag interface set with pstatus.");
04589 
04590       // tag the vertices with the same thing
04591       pstatus.clear();
04592       std::vector<EntityHandle> verts;
04593       for (std::vector<EntityHandle>::iterator v2it = (vit->second).begin(); v2it != (vit->second).end(); v2it++)
04594         if (mbImpl->type_from_handle(*v2it) == MBVERTEX) verts.push_back(*v2it);
04595       pstatus.resize(verts.size(), pval);
04596       if (!verts.empty()) {
04597         result = mbImpl->tag_set_data(pstat_tag, &verts[0], verts.size(), &pstatus[0]); 
04598         RRA("Failed to tag interface set vertices with pstatus.");
04599       }
04600     }
04601 
04602     return MB_SUCCESS;
04603   }
04604 
04605   ErrorCode ParallelComm::create_iface_pc_links() 
04606   {
04607     // now that we've resolved the entities in the iface sets, 
04608     // set parent/child links between the iface sets
04609 
04610     // first tag all entities in the iface sets
04611     Tag tmp_iface_tag;
04612     EntityHandle tmp_iface_set = 0;
04613     ErrorCode result = mbImpl->tag_get_handle("__tmp_iface", 1, MB_TYPE_HANDLE,
04614                                               tmp_iface_tag, MB_TAG_DENSE|MB_TAG_CREAT,
04615                                               &tmp_iface_set);
04616     if (MB_SUCCESS != result) 
04617       RRA("Failed to create temporary iface set tag.");
04618 
04619     Range iface_ents;
04620     std::vector<EntityHandle> tag_vals;
04621     Range::iterator rit;
04622   
04623     for (rit = interfaceSets.begin(); rit != interfaceSets.end(); rit++) {
04624       // tag entities with interface set
04625       iface_ents.clear();
04626       result = mbImpl->get_entities_by_handle(*rit, iface_ents);
04627       RRA("Couldn't get entities in iface set.");
04628     
04629       if (iface_ents.empty()) continue;
04630     
04631       tag_vals.resize(iface_ents.size());
04632       std::fill(tag_vals.begin(), tag_vals.end(), *rit);
04633       result = mbImpl->tag_set_data(tmp_iface_tag, iface_ents, &tag_vals[0]); 
04634       RRA("Failed to tag iface entities with interface set.");
04635     }
04636   
04637     // now go back through interface sets and add parent/child links
04638     Range tmp_ents2;
04639     for (int d = 2; d >= 0; d--) {
04640       for (rit = interfaceSets.begin(); rit != interfaceSets.end(); rit++) {
04641         // get entities on this interface
04642         iface_ents.clear();
04643         result = mbImpl->get_entities_by_handle(*rit, iface_ents, true);
04644         RRA("Couldn't get entities by dimension.");
04645         if (iface_ents.empty() ||
04646             mbImpl->dimension_from_handle(*iface_ents.rbegin()) != d) continue;
04647 
04648         // get higher-dimensional entities and their interface sets
04649         result = mbImpl->get_adjacencies(&(*iface_ents.begin()), 1, d+1,
04650                                          false, tmp_ents2);
04651         RRA("Couldn't get adjacencies for interface sets.");
04652         tag_vals.resize(tmp_ents2.size());
04653         result = mbImpl->tag_get_data(tmp_iface_tag, tmp_ents2, &tag_vals[0]);
04654         RRA("Couldn't get iface set tag for interface sets.");
04655       
04656         // go through and for any on interface make it a parent
04657         EntityHandle last_set = 0;
04658         for (unsigned int i = 0; i < tag_vals.size(); i++) {
04659           if (tag_vals[i] && tag_vals[i] != last_set) {
04660             result = mbImpl->add_parent_child(tag_vals[i], *rit);
04661             RRA("Couldn't add parent/child link for interface set.");
04662             last_set = tag_vals[i];
04663           }
04664         }
04665       }
04666     }
04667   
04668     // delete the temporary tag
04669     result = mbImpl->tag_delete(tmp_iface_tag);
04670     RRA("Couldn't delete tmp iface tag.");
04671 
04672     return MB_SUCCESS;
04673   }
04674 
04675   ErrorCode ParallelComm::get_proc_nvecs(int resolve_dim,
04676                                          int shared_dim,
04677                                          Range *skin_ents,
04678                                          std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs) 
04679   {
04680     // set sharing procs tags on other skin ents
04681     ErrorCode result;
04682     const EntityHandle *connect; int num_connect;
04683     std::set<int> sharing_procs;
04684     std::vector<EntityHandle> dum_connect;
04685     std::vector<int> sp_vec;
04686 
04687     for (int d = 3; d > 0; d--) {
04688       if (resolve_dim == d) continue;
04689     
04690       for (Range::iterator rit = skin_ents[d].begin();
04691            rit != skin_ents[d].end(); rit++) {
04692         // get connectivity
04693         result = mbImpl->get_connectivity(*rit, connect, num_connect, false,
04694                                           &dum_connect);
04695         RRA("Failed to get connectivity on non-vertex skin entities.");
04696  
04697         int op = (resolve_dim < shared_dim ? Interface::UNION : Interface::INTERSECT);      
04698         result = get_sharing_data(connect, num_connect, sharing_procs, op);
04699         RRA("Failed to get sharing data in get_proc_nvecs");
04700         if (sharing_procs.empty() ||
04701             (sharing_procs.size() == 1 && *sharing_procs.begin() == (int)procConfig.proc_rank())) continue;
04702 
04703         // Need to specify sharing data correctly for entities or they will
04704         // end up in a different interface set than corresponding vertices
04705         if (sharing_procs.size() == 2) {
04706           std::set<int>::iterator it = sharing_procs.find( proc_config().proc_rank() );
04707           assert(it != sharing_procs.end());
04708           sharing_procs.erase( it );
04709         }
04710 
04711         // intersection is the owning proc(s) for this skin ent
04712         sp_vec.clear();
04713         std::copy(sharing_procs.begin(), sharing_procs.end(), std::back_inserter(sp_vec));
04714         assert(sp_vec.size() != 2);
04715         proc_nvecs[sp_vec].push_back(*rit);
04716       }
04717     }
04718 
04719 #ifndef NDEBUG
04720     // shouldn't be any repeated entities in any of the vectors in proc_nvecs
04721     for (std::map<std::vector<int>, std::vector<EntityHandle> >::iterator mit = proc_nvecs.begin();
04722          mit != proc_nvecs.end(); mit++) {
04723       std::vector<EntityHandle> tmp_vec = (mit->second);
04724       std::sort(tmp_vec.begin(), tmp_vec.end());
04725       std::vector<EntityHandle>::iterator vit = std::unique(tmp_vec.begin(), tmp_vec.end());
04726       assert(vit == tmp_vec.end());
04727     }
04728 #endif
04729   
04730     return MB_SUCCESS;
04731   }
04732 
04733   // Overloaded form of tag_shared_verts
04734   // Tuple coming in is of form (arbitrary value, remoteProc, localHandle, remoteHandle)
04735   // Also will check for doubles in the list if the list is sorted
04736   ErrorCode ParallelComm::tag_shared_verts(TupleList &shared_ents,
04737                                            std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
04738                                            Range& /*proc_verts*/,
04739                                            unsigned int i_extra) 
04740   {
04741     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
04742     ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag, 
04743                                             shh_tag, shhs_tag, pstat_tag);
04744     RRA("Trouble getting shared proc tags in tag_shared_verts.");
04745   
04746     unsigned int j = 0, i = 0;
04747     std::vector<int> sharing_procs, sharing_procs2, tag_procs;
04748     std::vector<EntityHandle> sharing_handles, sharing_handles2, tag_lhandles, tag_rhandles;
04749     std::vector<unsigned char> pstatus;
04750   
04751     //Were on tuple j/2
04752     if (i_extra) i += i_extra;
04753     while (j < 2*shared_ents.get_n()) {
04754       // count & accumulate sharing procs
04755       EntityHandle this_ent = shared_ents.vul_rd[j], other_ent = 0;
04756       int other_proc = -1;
04757       while (j < 2*shared_ents.get_n() && shared_ents.vul_rd[j] == this_ent) {
04758         j++;
04759         // shouldn't have same proc
04760         assert(shared_ents.vi_rd[i] != (int)procConfig.proc_rank());
04761         //Grab the remote data if its not a dublicate
04762         if(shared_ents.vul_rd[j] != other_ent || shared_ents.vi_rd[i] != other_proc){
04763           assert(0 != shared_ents.vul_rd[j]);
04764           sharing_procs.push_back( shared_ents.vi_rd[i] );
04765           sharing_handles.push_back( shared_ents.vul_rd[j] );
04766         }
04767         other_proc = shared_ents.vi_rd[i];
04768         other_ent = shared_ents.vul_rd[j];
04769         j++; i += 1 + i_extra;
04770       }
04771 
04772       if (sharing_procs.size() > 1) {
04773         // add current proc/handle to list
04774         sharing_procs.push_back(procConfig.proc_rank());
04775         sharing_handles.push_back(this_ent);
04776       
04777         // sort sharing_procs and sharing_handles such that
04778         // sharing_procs is in ascending order.  Use temporary
04779         // lists and binary search to re-order sharing_handles.
04780         sharing_procs2 = sharing_procs;
04781         std::sort( sharing_procs2.begin(), sharing_procs2.end() );
04782         sharing_handles2.resize( sharing_handles.size() );
04783         for (size_t k = 0; k < sharing_handles.size(); ++k) {
04784           size_t idx = std::lower_bound( sharing_procs2.begin(), 
04785                                          sharing_procs2.end(), 
04786                                          sharing_procs[k] ) - sharing_procs2.begin();
04787           sharing_handles2[idx] = sharing_handles[k];
04788         }
04789         sharing_procs.swap( sharing_procs2 );
04790         sharing_handles.swap( sharing_handles2 );
04791       }
04792     
04793       assert(sharing_procs.size() != 2);
04794       proc_nvecs[sharing_procs].push_back(this_ent);
04795 
04796       unsigned char share_flag = PSTATUS_SHARED, 
04797         ms_flag = (PSTATUS_SHARED | PSTATUS_MULTISHARED);
04798       if (sharing_procs.size() == 1) {
04799         tag_procs.push_back(sharing_procs[0]);
04800         tag_lhandles.push_back(this_ent);
04801         tag_rhandles.push_back(sharing_handles[0]);
04802         pstatus.push_back(share_flag);
04803       }
04804       else {
04805         // pad lists 
04806         //assert( sharing_procs.size() <= MAX_SHARING_PROCS );
04807         if (sharing_procs.size() > MAX_SHARING_PROCS) {
04808           std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent <<
04809             " on process " << proc_config().proc_rank() <<  std::endl;
04810           std::cerr.flush();
04811           MPI_Abort(proc_config().proc_comm(), 66);
04812         }
04813         sharing_procs.resize( MAX_SHARING_PROCS, -1 );
04814         sharing_handles.resize( MAX_SHARING_PROCS, 0 );
04815         result = mbImpl->tag_set_data(shps_tag, &this_ent, 1,
04816                                       &sharing_procs[0]);
04817         result = mbImpl->tag_set_data(shhs_tag, &this_ent, 1,
04818                                       &sharing_handles[0]);
04819         result = mbImpl->tag_set_data(pstat_tag, &this_ent, 1, &ms_flag);
04820         RRA("Couldn't set multi-shared tag on shared vertex.");
04821         sharedEnts.push_back(this_ent);
04822       }
04823       RRA("Failed setting shared_procs tag on skin vertices.");
04824 
04825       // reset sharing proc(s) tags
04826       sharing_procs.clear();
04827       sharing_handles.clear();
04828     }
04829 
04830     if (!tag_procs.empty()) {
04831       result = mbImpl->tag_set_data(shp_tag, &tag_lhandles[0], tag_procs.size(),
04832                                     &tag_procs[0]);
04833       result = mbImpl->tag_set_data(shh_tag, &tag_lhandles[0], tag_procs.size(),
04834                                     &tag_rhandles[0]);
04835       result = mbImpl->tag_set_data(pstat_tag, &tag_lhandles[0], tag_procs.size(), &pstatus[0]);
04836       RRA("Couldn't set shared tag on shared vertex.");
04837       std::copy(tag_lhandles.begin(), tag_lhandles.end(), std::back_inserter(sharedEnts));
04838     }
04839   
04840 #ifndef NDEBUG
04841     // shouldn't be any repeated entities in any of the vectors in proc_nvecs
04842     for (std::map<std::vector<int>, std::vector<EntityHandle> >::iterator mit = proc_nvecs.begin();
04843          mit != proc_nvecs.end(); mit++) {
04844       std::vector<EntityHandle> tmp_vec = (mit->second);
04845       std::sort(tmp_vec.begin(), tmp_vec.end());
04846       std::vector<EntityHandle>::iterator vit = std::unique(tmp_vec.begin(), tmp_vec.end());
04847       assert(vit == tmp_vec.end());
04848     }
04849 #endif
04850   
04851     return MB_SUCCESS;
04852   }
04853  
04854   ErrorCode ParallelComm::tag_shared_verts(TupleList &shared_ents,
04855                                            Range *skin_ents,
04856                                            std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
04857                                            Range& /*proc_verts*/) 
04858   {
04859     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
04860     ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag, 
04861                                             shh_tag, shhs_tag, pstat_tag);
04862     RRA("Trouble getting shared proc tags in tag_shared_verts.");
04863   
04864     unsigned int j = 0, i = 0;
04865     std::vector<int> sharing_procs, sharing_procs2;
04866     std::vector<EntityHandle> sharing_handles, sharing_handles2, skin_verts(skin_ents[0].size());
04867     for (Range::iterator rit = skin_ents[0].begin(); rit != skin_ents[0].end(); rit++, i++)
04868       skin_verts[i] = *rit;
04869     i = 0;
04870   
04871     while (j < 2*shared_ents.get_n()) {
04872       // count & accumulate sharing procs
04873       int this_idx = shared_ents.vi_rd[j];
04874       EntityHandle this_ent = skin_verts[this_idx];
04875       while (j < 2*shared_ents.get_n() && shared_ents.vi_rd[j] == this_idx) {
04876         j++;
04877         // shouldn't have same proc
04878         assert(shared_ents.vi_rd[j] != (int)procConfig.proc_rank());
04879         sharing_procs.push_back( shared_ents.vi_rd[j++] );
04880         sharing_handles.push_back( shared_ents.vul_rd[i++] );
04881       }
04882 
04883       if (sharing_procs.size() > 1) {
04884         // add current proc/handle to list
04885         sharing_procs.push_back(procConfig.proc_rank());
04886         sharing_handles.push_back(this_ent);
04887       }
04888       
04889       // sort sharing_procs and sharing_handles such that
04890       // sharing_procs is in ascending order.  Use temporary
04891       // lists and binary search to re-order sharing_handles.
04892       sharing_procs2 = sharing_procs;
04893       std::sort( sharing_procs2.begin(), sharing_procs2.end() );
04894       sharing_handles2.resize( sharing_handles.size() );
04895       for (size_t k = 0; k < sharing_handles.size(); ++k) {
04896         size_t idx = std::lower_bound( sharing_procs2.begin(), 
04897                                        sharing_procs2.end(), 
04898                                        sharing_procs[k] ) - sharing_procs2.begin();
04899         sharing_handles2[idx] = sharing_handles[k];
04900       }
04901       sharing_procs.swap( sharing_procs2 );
04902       sharing_handles.swap( sharing_handles2 );
04903     
04904       assert(sharing_procs.size() != 2);
04905       proc_nvecs[sharing_procs].push_back(this_ent);
04906 
04907       unsigned char share_flag = PSTATUS_SHARED, 
04908         ms_flag = (PSTATUS_SHARED | PSTATUS_MULTISHARED);
04909       if (sharing_procs.size() == 1) {
04910         result = mbImpl->tag_set_data(shp_tag, &this_ent, 1,
04911                                       &sharing_procs[0]);
04912         result = mbImpl->tag_set_data(shh_tag, &this_ent, 1,
04913                                       &sharing_handles[0]);
04914         result = mbImpl->tag_set_data(pstat_tag, &this_ent, 1, &share_flag);
04915         RRA("Couldn't set shared tag on shared vertex.");
04916         sharedEnts.push_back(this_ent);
04917       }
04918       else {
04919         // pad lists 
04920         //assert( sharing_procs.size() <= MAX_SHARING_PROCS );
04921         if (sharing_procs.size() > MAX_SHARING_PROCS) {
04922           std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent <<
04923             " on process " << proc_config().proc_rank() <<  std::endl;
04924           std::cerr.flush();
04925           MPI_Abort(proc_config().proc_comm(), 66);
04926         }
04927         sharing_procs.resize( MAX_SHARING_PROCS, -1 );
04928         sharing_handles.resize( MAX_SHARING_PROCS, 0 );
04929         result = mbImpl->tag_set_data(shps_tag, &this_ent, 1,
04930                                       &sharing_procs[0]);
04931         result = mbImpl->tag_set_data(shhs_tag, &this_ent, 1,
04932                                       &sharing_handles[0]);
04933         result = mbImpl->tag_set_data(pstat_tag, &this_ent, 1, &ms_flag);
04934         RRA("Couldn't set multi-shared tag on shared vertex.");
04935         sharedEnts.push_back(this_ent);
04936       }
04937       RRA("Failed setting shared_procs tag on skin vertices.");
04938 
04939       // reset sharing proc(s) tags
04940       sharing_procs.clear();
04941       sharing_handles.clear();
04942     }
04943 
04944 #ifndef NDEBUG
04945     // shouldn't be any repeated entities in any of the vectors in proc_nvecs
04946     for (std::map<std::vector<int>, std::vector<EntityHandle> >::iterator mit = proc_nvecs.begin();
04947          mit != proc_nvecs.end(); mit++) {
04948       std::vector<EntityHandle> tmp_vec = (mit->second);
04949       std::sort(tmp_vec.begin(), tmp_vec.end());
04950       std::vector<EntityHandle>::iterator vit = std::unique(tmp_vec.begin(), tmp_vec.end());
04951       assert(vit == tmp_vec.end());
04952     }
04953 #endif
04954   
04955     return MB_SUCCESS;
04956   }
04957   
04959   ErrorCode ParallelComm::get_interface_procs(std::set<unsigned int> &procs_set,
04960                                               bool get_buffs)
04961   {
04962     // make sure the sharing procs vector is empty
04963     procs_set.clear();
04964 
04965     // pre-load vector of single-proc tag values
04966     unsigned int i, j;
04967     std::vector<int> iface_proc(interfaceSets.size());
04968     ErrorCode result = mbImpl->tag_get_data(sharedp_tag(), interfaceSets, &iface_proc[0]);
04969     RRA("Failed to get iface_proc for iface sets.");
04970 
04971     // get sharing procs either from single-proc vector or by getting
04972     // multi-proc tag value
04973     int tmp_iface_procs[MAX_SHARING_PROCS];
04974     std::fill(tmp_iface_procs, tmp_iface_procs+MAX_SHARING_PROCS, -1);
04975     Range::iterator rit;
04976     for (rit = interfaceSets.begin(), i = 0; rit != interfaceSets.end(); rit++, i++) {
04977       if (-1 != iface_proc[i]) {
04978         assert(iface_proc[i] != (int)procConfig.proc_rank());
04979         procs_set.insert((unsigned int) iface_proc[i]);
04980       }    
04981       else {
04982         // get the sharing_procs tag
04983         result = mbImpl->tag_get_data(sharedps_tag(), &(*rit), 1,
04984                                       tmp_iface_procs);
04985         RRA("Failed to get iface_procs for iface set.");
04986         for (j = 0; j < MAX_SHARING_PROCS; j++) {
04987           if (-1 != tmp_iface_procs[j] && tmp_iface_procs[j] != (int)procConfig.proc_rank()) 
04988             procs_set.insert((unsigned int) tmp_iface_procs[j]);
04989           else if (-1 == tmp_iface_procs[j]) {
04990             std::fill(tmp_iface_procs, tmp_iface_procs+j, -1);
04991             break;
04992           }
04993         }
04994       }
04995     }
04996 
04997     if (get_buffs) {
04998       for (std::set<unsigned int>::iterator sit = procs_set.begin(); sit != procs_set.end(); sit++)
04999         get_buffers(*sit);
05000     }
05001   
05002     return MB_SUCCESS;
05003   }
05004   
05005   ErrorCode ParallelComm::get_pstatus(EntityHandle entity,
05006                                       unsigned char &pstatus_val)
05007   {
05008     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1, &pstatus_val);
05009     RRA("Couldn't get pastatus tag.");
05010     return result;
05011   }
05012 
05013   ErrorCode ParallelComm::get_pstatus_entities(int dim,
05014                                                unsigned char pstatus_val,
05015                                                Range &pstatus_ents)
05016   {
05017     Range ents;
05018     ErrorCode result;
05019   
05020     if (-1 == dim) result = mbImpl->get_entities_by_handle(0, ents);
05021     else result = mbImpl->get_entities_by_dimension(0, dim, ents);
05022     RRA(" ");
05023   
05024     std::vector<unsigned char> pstatus(ents.size());
05025     result = mbImpl->tag_get_data(pstatus_tag(), ents, &pstatus[0]);
05026     RRA("Couldn't get pastatus tag.");
05027     Range::iterator rit = ents.begin();
05028     int i = 0;
05029     if (pstatus_val) {
05030       for (; rit != ents.end(); i++, rit++)
05031         if (pstatus[i]&pstatus_val &&
05032             (-1 == dim || mbImpl->dimension_from_handle(*rit) == dim)) 
05033           pstatus_ents.insert(*rit);
05034     }
05035     else {
05036       for (; rit != ents.end(); i++, rit++)
05037         if (!pstatus[i] &&
05038             (-1 == dim || mbImpl->dimension_from_handle(*rit) == dim)) 
05039           pstatus_ents.insert(*rit);
05040     }
05041   
05042     return MB_SUCCESS;
05043   }
05044 
05045   ErrorCode ParallelComm::check_global_ids(EntityHandle this_set,
05046                                            const int dimension, 
05047                                            const int start_id,
05048                                            const bool largest_dim_only,
05049                                            const bool parallel,
05050                                            const bool owned_only)
05051   {
05052     // global id tag
05053     Tag gid_tag; int def_val = -1;
05054     ErrorCode result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
05055                                               gid_tag, MB_TAG_DENSE|MB_TAG_CREAT, &def_val);
05056     if (MB_ALREADY_ALLOCATED != result &&
05057         MB_SUCCESS != result) {
05058       RRA("Failed to create/get gid tag handle.");
05059     }
05060 
05061     Range dum_range;
05062     if (MB_ALREADY_ALLOCATED == result) {
05063       void *tag_ptr = &def_val;
05064       ErrorCode tmp_result = mbImpl->get_entities_by_type_and_tag(this_set, MBVERTEX, 
05065                                                                   &gid_tag, &tag_ptr, 1,
05066                                                                   dum_range);
05067       if (MB_SUCCESS != tmp_result) {
05068         result = tmp_result;
05069         RRA("Failed to get gid tag.");
05070       }
05071     }
05072   
05073     if (MB_ALREADY_ALLOCATED != result || !dum_range.empty()) {
05074       // just created it, so we need global ids
05075       result = assign_global_ids(this_set, dimension, start_id, largest_dim_only,
05076                                  parallel,owned_only);
05077       RRA("Failed assigning global ids.");
05078     }
05079 
05080     return MB_SUCCESS;
05081   }
05082 
05083   bool ParallelComm::is_iface_proc(EntityHandle this_set,
05084                                    int to_proc) 
05085   {
05086     int sharing_procs[MAX_SHARING_PROCS];
05087     std::fill(sharing_procs, sharing_procs+MAX_SHARING_PROCS, -1);
05088     ErrorCode result = mbImpl->tag_get_data(sharedp_tag(), &this_set, 1,
05089                                             sharing_procs);
05090     if (MB_SUCCESS == result && to_proc == sharing_procs[0]) return true;
05091   
05092     result = mbImpl->tag_get_data(sharedps_tag(), &this_set, 1,
05093                                   sharing_procs);
05094     if (MB_SUCCESS != result) return false;
05095 
05096     for (int i = 0; i < MAX_SHARING_PROCS; i++) {
05097       if (to_proc == sharing_procs[i]) return true;
05098       else if (-1 == sharing_procs[i]) return false;
05099     }
05100   
05101     return false;
05102   }
05103 
05104   ErrorCode ParallelComm::filter_pstatus( Range &ents,
05105                                           unsigned char pstat,
05106                                           unsigned char op,
05107                                           int to_proc,
05108                                           Range *returned_ents)
05109   {
05110     Range tmp_ents;
05111 
05112     //assert(!ents.empty());
05113     if (ents.empty()) {
05114       if (returned_ents)
05115         returned_ents->clear();
05116       return MB_SUCCESS;
05117     }
05118 
05119     // Put into tmp_ents any entities which are not owned locally or
05120     // who are already shared with to_proc
05121     std::vector<unsigned char> shared_flags(ents.size()), shared_flags2;
05122     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), ents,
05123                                             &shared_flags[0]);
05124     RRA("Failed to get pstatus flag.");
05125     Range::const_iterator rit, hint = tmp_ents.begin();;
05126     int i;
05127     if (op == PSTATUS_OR) {
05128       for (rit = ents.begin(), i = 0; rit != ents.end(); rit++, i++) 
05129         if (((shared_flags[i] & ~pstat)^shared_flags[i]) & pstat) {
05130           hint = tmp_ents.insert(hint,*rit);
05131           if (-1 != to_proc) shared_flags2.push_back(shared_flags[i]);
05132         }
05133     }
05134     else if (op == PSTATUS_AND) {
05135       for (rit = ents.begin(), i = 0; rit != ents.end(); rit++, i++)
05136         if ((shared_flags[i] & pstat) == pstat) {
05137           hint = tmp_ents.insert(hint,*rit);
05138           if (-1 != to_proc) shared_flags2.push_back(shared_flags[i]);
05139         }
05140     }
05141     else if (op == PSTATUS_NOT) {
05142       for (rit = ents.begin(), i = 0; rit != ents.end(); rit++, i++)
05143         if (!(shared_flags[i] & pstat)) {
05144           hint = tmp_ents.insert(hint,*rit);
05145           if (-1 != to_proc) shared_flags2.push_back(shared_flags[i]);
05146         }
05147     }
05148     else {
05149       assert(false);
05150       return MB_FAILURE;
05151     }
05152 
05153     if (-1 != to_proc) {
05154 
05155       int sharing_procs[MAX_SHARING_PROCS];
05156       std::fill(sharing_procs, sharing_procs+MAX_SHARING_PROCS, -1);
05157       Range tmp_ents2;
05158       hint = tmp_ents2.begin();
05159 
05160       for (rit = tmp_ents.begin(), i = 0; rit != tmp_ents.end(); rit++, i++) {
05161         // we need to check sharing procs
05162         if (shared_flags2[i] & PSTATUS_MULTISHARED) {
05163           result = mbImpl->tag_get_data(sharedps_tag(), &(*rit), 1,
05164                                         sharing_procs);
05165           assert(-1 != sharing_procs[0]);
05166           RRA(" ");
05167           for (unsigned int j = 0; j < MAX_SHARING_PROCS; j++) {
05168             // if to_proc shares this entity, add it to list
05169             if (sharing_procs[j] == to_proc) {
05170               hint = tmp_ents2.insert(hint, *rit);
05171             }
05172             else if (sharing_procs[j] == -1) break;
05173 
05174             sharing_procs[j] = -1;
05175           }
05176         }
05177         else if (shared_flags2[i] & PSTATUS_SHARED) {
05178           result = mbImpl->tag_get_data(sharedp_tag(), &(*rit), 1,
05179                                         sharing_procs);
05180           RRA(" ");
05181           assert(-1 != sharing_procs[0]);
05182           if (sharing_procs[0] == to_proc) 
05183             hint = tmp_ents2.insert(hint,*rit);
05184           sharing_procs[0] = -1;
05185         }
05186         else
05187           assert("should never get here" && false);
05188       }
05189 
05190       tmp_ents.swap(tmp_ents2);
05191     }
05192   
05193     if (returned_ents)
05194       returned_ents->swap(tmp_ents);
05195     else
05196       ents.swap(tmp_ents);
05197   
05198     return MB_SUCCESS;
05199   }
05200 
05201   ErrorCode ParallelComm::exchange_ghost_cells(int ghost_dim, int bridge_dim,
05202                                                int num_layers, int addl_ents,
05203                                                bool store_remote_handles,
05204                                                bool wait_all,
05205                                                EntityHandle *file_set)
05206   {
05207 #ifdef USE_MPE
05208     if (myDebug->get_verbosity() == 2) {
05209       if (!num_layers)
05210         MPE_Log_event(IFACE_START, procConfig.proc_rank(), "Starting interface exchange.");
05211       else
05212         MPE_Log_event(GHOST_START, procConfig.proc_rank(), "Starting ghost exchange.");
05213     }
05214 #endif
05215 
05216     myDebug->tprintf(1, "Entering exchange_ghost_cells with num_layers = %d\n", num_layers);
05217     if (myDebug->get_verbosity() == 4) {
05218       msgs.clear();
05219       msgs.reserve(MAX_SHARING_PROCS);
05220     }
05221   
05222     // if we're only finding out about existing ents, we have to be storing
05223     // remote handles too
05224     assert(num_layers > 0 || store_remote_handles);
05225   
05226     const bool is_iface = !num_layers;
05227 
05228     // get the b-dimensional interface(s) with with_proc, where b = bridge_dim
05229   
05230     int success;
05231     ErrorCode result = MB_SUCCESS;
05232     int incoming1 = 0, incoming2 = 0;
05233 
05234     reset_all_buffers();
05235   
05236     // when this function is called, buffProcs should already have any 
05237     // communicating procs
05238 
05239     //===========================================
05240     // post ghost irecv's for ghost entities from all communicating procs
05241     //===========================================
05242 #ifdef USE_MPE
05243     if (myDebug->get_verbosity() == 2) {
05244       MPE_Log_event(ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange.");
05245     }
05246 #endif
05247   
05248     // index reqs the same as buffer/sharing procs indices
05249     std::vector<MPI_Request> recv_ent_reqs(2*buffProcs.size(), MPI_REQUEST_NULL),
05250       recv_remoteh_reqs(2*buffProcs.size(), MPI_REQUEST_NULL);
05251     std::vector<unsigned int>::iterator proc_it;
05252     int ind, p;
05253     sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
05254     for (ind = 0, proc_it = buffProcs.begin(); 
05255          proc_it != buffProcs.end(); proc_it++, ind++) {
05256       incoming1++;
05257       PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[ind], 
05258                         remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, 
05259                         MB_MESG_ENTS_SIZE, incoming1);
05260       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, 
05261                           MPI_UNSIGNED_CHAR, buffProcs[ind],
05262                           MB_MESG_ENTS_SIZE, procConfig.proc_comm(), 
05263                           &recv_ent_reqs[2*ind]);
05264       if (success != MPI_SUCCESS) {
05265         result = MB_FAILURE;
05266         RRA("Failed to post irecv in ghost exchange.");
05267       }
05268     }
05269   
05270     //===========================================
05271     // get entities to be sent to neighbors
05272     //===========================================
05273 
05274     Range sent_ents[MAX_SHARING_PROCS], allsent, tmp_range;
05275     TupleList entprocs;
05276     int dum_ack_buff;
05277     result = get_sent_ents(is_iface, bridge_dim, ghost_dim, num_layers,
05278                            addl_ents, sent_ents, allsent, entprocs);
05279     RRA("get_sent_ents failed.");
05280 
05281     myDebug->tprintf(1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
05282                      (unsigned long)allsent.size());
05283 
05284     //===========================================
05285     // pack and send ents from this proc to others
05286     //===========================================
05287     for (p = 0, proc_it = buffProcs.begin(); 
05288          proc_it != buffProcs.end(); proc_it++, p++) {
05289 
05290       myDebug->tprintf(1, "Sent ents compactness (size) = %f (%lu)\n", sent_ents[p].compactness(),
05291                        (unsigned long)sent_ents[p].size());
05292     
05293       // reserve space on front for size and for initial buff size
05294       localOwnedBuffs[p]->reset_buffer(sizeof(int));
05295 
05296       // entities
05297       result = pack_entities(sent_ents[p], localOwnedBuffs[p], 
05298                              store_remote_handles, buffProcs[p], is_iface,
05299                              &entprocs, &allsent); 
05300       RRA("Packing entities failed.");
05301 
05302       if (myDebug->get_verbosity() == 4) {
05303         msgs.resize(msgs.size()+1);
05304         msgs.back() = new Buffer(*localOwnedBuffs[p]);
05305       }
05306 
05307       // send the buffer (size stored in front in send_buffer)
05308       result = send_buffer(*proc_it, localOwnedBuffs[p], 
05309                            MB_MESG_ENTS_SIZE, sendReqs[2*p], 
05310                            recv_ent_reqs[2*p+1], &dum_ack_buff,
05311                            incoming1,
05312                            MB_MESG_REMOTEH_SIZE, 
05313                            (!is_iface && store_remote_handles ? 
05314                             localOwnedBuffs[p] : NULL),
05315                            &recv_remoteh_reqs[2*p], &incoming2);
05316       RRA("Failed to Isend in ghost exchange.");
05317     }
05318 
05319     entprocs.reset();
05320 
05321     //===========================================
05322     // receive/unpack new entities
05323     //===========================================
05324     // number of incoming messages for ghosts is the number of procs we 
05325     // communicate with; for iface, it's the number of those with lower rank
05326     MPI_Status status;
05327     std::vector<std::vector<EntityHandle> > recd_ents(buffProcs.size());
05328     std::vector<std::vector<EntityHandle> > L1hloc(buffProcs.size()), L1hrem(buffProcs.size());
05329     std::vector<std::vector<int> > L1p(buffProcs.size());
05330     std::vector<EntityHandle> L2hloc, L2hrem;
05331     std::vector<unsigned int> L2p;
05332     std::vector<EntityHandle> new_ents;
05333   
05334     while (incoming1) {
05335       // wait for all recvs of ghost ents before proceeding to sending remote handles,
05336       // b/c some procs may have sent to a 3rd proc ents owned by me;
05337       PRINT_DEBUG_WAITANY(recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank());
05338     
05339       success = MPI_Waitany(2*buffProcs.size(), &recv_ent_reqs[0], &ind, &status);
05340       if (MPI_SUCCESS != success) {
05341         result = MB_FAILURE;
05342         RRA("Failed in waitany in ghost exchange.");
05343       }
05344 
05345       PRINT_DEBUG_RECD(status);
05346     
05347       // ok, received something; decrement incoming counter
05348       incoming1--;
05349       bool done = false;
05350 
05351       // In case ind is for ack, we need index of one before it
05352       unsigned int base_ind = 2*(ind/2);
05353       result = recv_buffer(MB_MESG_ENTS_SIZE,
05354                            status,
05355                            remoteOwnedBuffs[ind/2],
05356                            recv_ent_reqs[ind], recv_ent_reqs[ind+1],
05357                            incoming1,
05358                            localOwnedBuffs[ind/2], sendReqs[base_ind], sendReqs[base_ind+1],
05359                            done,
05360                            (!is_iface && store_remote_handles ? 
05361                             localOwnedBuffs[ind/2] : NULL),
05362                            MB_MESG_REMOTEH_SIZE,
05363                            &recv_remoteh_reqs[base_ind], &incoming2);
05364       RRA("Failed to receive buffer.");
05365 
05366       if (done) {
05367         if (myDebug->get_verbosity() == 4) {
05368           msgs.resize(msgs.size()+1);
05369           msgs.back() = new Buffer(*remoteOwnedBuffs[ind/2]);
05370         }
05371 
05372         // message completely received - process buffer that was sent
05373         remoteOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
05374         result = unpack_entities(remoteOwnedBuffs[ind/2]->buff_ptr,
05375                                  store_remote_handles, ind/2, is_iface,
05376                                  L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents);
05377         if (MB_SUCCESS != result) {
05378           std::cout << "Failed to unpack entities.  Buffer contents:" << std::endl;
05379           print_buffer(remoteOwnedBuffs[ind/2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind/2], false);
05380           return result;
05381         }
05382 
05383         if (recv_ent_reqs.size() != 2*buffProcs.size()) {
05384           // post irecv's for remote handles from new proc; shouldn't be iface, 
05385           // since we know about all procs we share with
05386           assert(!is_iface);
05387           recv_remoteh_reqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
05388           for (unsigned int i = recv_ent_reqs.size(); i < 2*buffProcs.size(); i+=2) {
05389             localOwnedBuffs[i/2]->reset_buffer();
05390             incoming2++;
05391             PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[i/2], 
05392                               localOwnedBuffs[i/2]->mem_ptr, INITIAL_BUFF_SIZE,
05393                               MB_MESG_REMOTEH_SIZE, incoming2);
05394             success = MPI_Irecv(localOwnedBuffs[i/2]->mem_ptr, INITIAL_BUFF_SIZE, 
05395                                 MPI_UNSIGNED_CHAR, buffProcs[i/2],
05396                                 MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(), 
05397                                 &recv_remoteh_reqs[i]);
05398             if (success != MPI_SUCCESS) {
05399               result = MB_FAILURE;
05400               RRA("Failed to post irecv for remote handles in ghost exchange.");
05401             }
05402           }
05403           recv_ent_reqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
05404           sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
05405         }
05406       }
05407     }
05408   
05409     // add requests for any new addl procs
05410     if (recv_ent_reqs.size() != 2*buffProcs.size()) {
05411       // shouldn't get here...
05412       result = MB_FAILURE;
05413       RRA("Requests length doesn't match proc count in ghost exchange.");
05414     }
05415     
05416 #ifdef USE_MPE
05417     if (myDebug->get_verbosity() == 2) {
05418       MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange.");
05419     }
05420 #endif
05421   
05422     if (is_iface) {
05423       // need to check over entities I sent and make sure I received 
05424       // handles for them from all expected procs; if not, need to clean
05425       // them up
05426       result = check_clean_iface(allsent);
05427       if (MB_SUCCESS != result) std::cout << "Failed check." << std::endl;
05428     
05429       // now set the shared/interface tag on non-vertex entities on interface
05430       result = tag_iface_entities();
05431       RRA("Failed to tag iface entities.");
05432 
05433 #ifndef NDEBUG
05434       result = check_sent_ents(allsent);
05435       if (MB_SUCCESS != result) std::cout << "Failed check." << std::endl;
05436       result = check_all_shared_handles(true);
05437       if (MB_SUCCESS != result) std::cout << "Failed check." << std::endl;
05438 #endif
05439 
05440 #ifdef USE_MPE
05441       if (myDebug->get_verbosity() == 2) {
05442         MPE_Log_event(IFACE_END, procConfig.proc_rank(), "Ending interface exchange.");
05443       }
05444 #endif
05445 
05446       //===========================================
05447       // wait if requested
05448       //===========================================
05449       if (wait_all) {
05450         if (myDebug->get_verbosity() == 5) {
05451           success = MPI_Barrier(procConfig.proc_comm());
05452         }
05453         else {
05454           MPI_Status mult_status[2*MAX_SHARING_PROCS];
05455           success = MPI_Waitall(2*buffProcs.size(), &recv_ent_reqs[0], mult_status);
05456           success = MPI_Waitall(2*buffProcs.size(), &sendReqs[0], mult_status);
05457         }
05458         if (MPI_SUCCESS != success) {
05459           result = MB_FAILURE;
05460           RRA("Failed in waitall in ghost exchange.");
05461         }
05462       }
05463 
05464       myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
05465       myDebug->tprintf(1, "Exiting exchange_ghost_cells\n");
05466 
05467       return MB_SUCCESS;
05468     }
05469 
05470     //===========================================
05471     // send local handles for new ghosts to owner, then add
05472     // those to ghost list for that owner
05473     //===========================================
05474     for (p = 0, proc_it = buffProcs.begin(); 
05475          proc_it != buffProcs.end(); proc_it++, p++) {
05476 
05477       // reserve space on front for size and for initial buff size
05478       remoteOwnedBuffs[p]->reset_buffer(sizeof(int));
05479 
05480       result = pack_remote_handles(L1hloc[p], L1hrem[p], L1p[p], *proc_it,
05481                                    remoteOwnedBuffs[p]);
05482       RRA("Failed to pack remote handles.");
05483       remoteOwnedBuffs[p]->set_stored_size();
05484 
05485       if (myDebug->get_verbosity() == 4) {
05486         msgs.resize(msgs.size()+1);
05487         msgs.back() = new Buffer(*remoteOwnedBuffs[p]);
05488       }
05489       result = send_buffer(buffProcs[p], remoteOwnedBuffs[p], 
05490                            MB_MESG_REMOTEH_SIZE, 
05491                            sendReqs[2*p], recv_remoteh_reqs[2*p+1], 
05492                            &dum_ack_buff, incoming2);
05493       RRA("Failed to send remote handles.");
05494     }
05495   
05496     //===========================================
05497     // process remote handles of my ghosteds
05498     //===========================================
05499     while (incoming2) {
05500       PRINT_DEBUG_WAITANY(recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank());
05501       success = MPI_Waitany(2*buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status);
05502       if (MPI_SUCCESS != success) {
05503         result = MB_FAILURE;
05504         RRA("Failed in waitany in ghost exchange.");
05505       }
05506     
05507       // ok, received something; decrement incoming counter
05508       incoming2--;
05509 
05510       PRINT_DEBUG_RECD(status);
05511     
05512       bool done = false;
05513       unsigned int base_ind = 2*(ind/2);
05514       result = recv_buffer(MB_MESG_REMOTEH_SIZE, status, 
05515                            localOwnedBuffs[ind/2], 
05516                            recv_remoteh_reqs[ind], recv_remoteh_reqs[ind+1], incoming2,
05517                            remoteOwnedBuffs[ind/2], 
05518                            sendReqs[base_ind], sendReqs[base_ind+1],
05519                            done);
05520       RRA("Failed to receive remote handles.");
05521       if (done) {
05522         // incoming remote handles
05523         if (myDebug->get_verbosity() == 4) {
05524           msgs.resize(msgs.size()+1);
05525           msgs.back() = new Buffer(*localOwnedBuffs[ind]);
05526         }
05527         localOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
05528         result = unpack_remote_handles(buffProcs[ind/2], 
05529                                        localOwnedBuffs[ind/2]->buff_ptr,
05530                                        L2hloc, L2hrem, L2p);
05531         RRA("Failed to unpack remote handles.");
05532       }
05533     }
05534     
05535 #ifdef USE_MPE
05536     if (myDebug->get_verbosity() == 2) {
05537       MPE_Log_event(RHANDLES_END, procConfig.proc_rank(), "Ending remote handles.");
05538       MPE_Log_event(GHOST_END, procConfig.proc_rank(), 
05539                     "Ending ghost exchange (still doing checks).");
05540     }
05541 #endif
05542   
05543     //===========================================
05544     // wait if requested
05545     //===========================================
05546     if (wait_all) {
05547       if (myDebug->get_verbosity() == 5) {
05548         success = MPI_Barrier(procConfig.proc_comm());
05549       }
05550       else {
05551         MPI_Status mult_status[2*MAX_SHARING_PROCS];
05552         success = MPI_Waitall(2*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
05553         success = MPI_Waitall(2*buffProcs.size(), &sendReqs[0], mult_status);
05554       }
05555       if (MPI_SUCCESS != success) {
05556         result = MB_FAILURE;
05557         RRA("Failed in waitall in ghost exchange.");
05558       }
05559     }
05560 
05561 #ifndef NDEBUG
05562     result = check_sent_ents(allsent);
05563     RRA("Failed check on shared entities.");
05564     result = check_all_shared_handles(true);
05565     RRA("Failed check on all shared handles.");
05566 #endif
05567 
05568     if (file_set && !new_ents.empty()) {
05569       result = mbImpl->add_entities(*file_set, &new_ents[0], new_ents.size());
05570       RRA("Failed to add new entities to set.");
05571     }
05572 
05573     myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
05574     myDebug->tprintf(1, "Exiting exchange_ghost_cells\n");
05575 
05576     return MB_SUCCESS;
05577   }
05578 
05579   ErrorCode ParallelComm::send_buffer(const unsigned int to_proc,
05580                                       Buffer *send_buff,
05581                                       int mesg_tag,
05582                                       MPI_Request &send_req,
05583                                       MPI_Request &ack_req,
05584                                       int *ack_buff,
05585                                       int &this_incoming,
05586                                       int next_mesg_tag,
05587                                       Buffer *next_recv_buff,
05588                                       MPI_Request *next_recv_req,
05589                                       int *next_incoming) 
05590   {
05591     ErrorCode result = MB_SUCCESS;
05592     int success;
05593 
05594     // if small message, post recv for remote handle message
05595     if (send_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE && next_recv_buff) {
05596       (*next_incoming)++;
05597       PRINT_DEBUG_IRECV(procConfig.proc_rank(), to_proc, next_recv_buff->mem_ptr,
05598                         INITIAL_BUFF_SIZE, next_mesg_tag, *next_incoming);
05599       success = MPI_Irecv(next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, 
05600                           MPI_UNSIGNED_CHAR, to_proc,
05601                           next_mesg_tag, procConfig.proc_comm(), 
05602                           next_recv_req);
05603       if (success != MPI_SUCCESS) {
05604         result = MB_FAILURE;
05605         RRA("Failed to post irecv for next message in ghost exchange.");
05606       }
05607     }
05608     // if large, we'll need an ack before sending the rest
05609     else if (send_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE) {
05610       this_incoming++;
05611       PRINT_DEBUG_IRECV(procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff,
05612                         sizeof(int), mesg_tag-1, this_incoming);
05613       success = MPI_Irecv(ack_buff, sizeof(int), 
05614                           MPI_UNSIGNED_CHAR, to_proc,
05615                           mesg_tag-1, procConfig.proc_comm(), 
05616                           &ack_req);
05617       if (success != MPI_SUCCESS) {
05618         result = MB_FAILURE;
05619         RRA("Failed to post irecv for entity ack in ghost exchange.");
05620       }
05621     }
05622 
05623     // send the buffer
05624     PRINT_DEBUG_ISEND(procConfig.proc_rank(), to_proc, send_buff->mem_ptr, mesg_tag,
05625                       std::min(send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE));
05626     assert(0 <= send_buff->get_stored_size() && 
05627            send_buff->get_stored_size() <= (int)send_buff->alloc_size);
05628     success = MPI_Isend(send_buff->mem_ptr, 
05629                         std::min(send_buff->get_stored_size(), 
05630                                  (int)INITIAL_BUFF_SIZE),
05631                         MPI_UNSIGNED_CHAR, to_proc, 
05632                         mesg_tag, procConfig.proc_comm(), &send_req);
05633     if (success != MPI_SUCCESS) return MB_FAILURE;
05634 
05635     return result;
05636   }
05637 
05638   ErrorCode ParallelComm::recv_buffer(int mesg_tag_expected,
05639                                       const MPI_Status &mpi_status,
05640                                       Buffer *recv_buff,
05641                                       MPI_Request &recv_req,
05642                                       MPI_Request & /*ack_recvd_req*/,
05643                                       int &this_incoming,
05644                                       Buffer *send_buff,
05645                                       MPI_Request &send_req,
05646                                       MPI_Request &sent_ack_req,
05647                                       bool &done,
05648                                       Buffer *next_buff,
05649                                       int next_tag,
05650                                       MPI_Request *next_req,
05651                                       int *next_incoming) 
05652   {
05653     // process a received message; if there will be more coming, 
05654     // post a receive for 2nd part then send an ack message
05655     //
05656     int from_proc = mpi_status.MPI_SOURCE;
05657     int success;
05658     ErrorCode result = MB_SUCCESS;
05659 
05660     // set the buff_ptr on the recv_buffer; needs to point beyond any
05661     // valid data already in the buffer
05662     recv_buff->reset_ptr(std::min(recv_buff->get_stored_size(), 
05663                                   (int)recv_buff->alloc_size));
05664   
05665     if (mpi_status.MPI_TAG == mesg_tag_expected &&
05666         recv_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE) {
05667       // 1st message & large - allocate buffer, post irecv for 2nd message,
05668       // then send ack
05669       recv_buff->reserve(recv_buff->get_stored_size());
05670       assert(recv_buff->alloc_size > INITIAL_BUFF_SIZE);
05671 
05672       // will expect a 2nd message
05673       this_incoming++;
05674 
05675       PRINT_DEBUG_IRECV(procConfig.proc_rank(), from_proc, 
05676                         recv_buff->mem_ptr+INITIAL_BUFF_SIZE,
05677                         recv_buff->get_stored_size() - INITIAL_BUFF_SIZE,
05678                         mesg_tag_expected+1, this_incoming);
05679       success = MPI_Irecv(recv_buff->mem_ptr+INITIAL_BUFF_SIZE, 
05680                           recv_buff->get_stored_size() - INITIAL_BUFF_SIZE, 
05681                           MPI_UNSIGNED_CHAR, from_proc,
05682                           mesg_tag_expected+1, procConfig.proc_comm(), 
05683                           &recv_req);
05684       if (success != MPI_SUCCESS) {
05685         result = MB_FAILURE;
05686         RRA("Failed to post 2nd iRecv in ghost exchange.");
05687       }
05688 
05689       // send ack, doesn't matter what data actually is
05690       PRINT_DEBUG_ISEND(procConfig.proc_rank(), from_proc, recv_buff->mem_ptr, 
05691                         mesg_tag_expected-1, sizeof(int));
05692       success = MPI_Isend(recv_buff->mem_ptr, sizeof(int),
05693                           MPI_UNSIGNED_CHAR, from_proc, 
05694                           mesg_tag_expected-1, procConfig.proc_comm(), &sent_ack_req);
05695       if (success != MPI_SUCCESS) {
05696         result = MB_FAILURE;
05697         RRA("Failed to send ack in ghost exchange.");
05698       }
05699     }
05700 
05701     else if (mpi_status.MPI_TAG == mesg_tag_expected-1) {
05702       // got an ack back, send the 2nd half of message
05703 
05704       // should be a large message if we got this
05705       assert(*((size_t*)send_buff->mem_ptr) > INITIAL_BUFF_SIZE);
05706 
05707       // post irecv for next message, then send 2nd message
05708       if (next_buff) {
05709         // we'll expect a return message
05710         (*next_incoming)++;
05711         PRINT_DEBUG_IRECV(procConfig.proc_rank(), from_proc, next_buff->mem_ptr,
05712                           INITIAL_BUFF_SIZE, next_tag, *next_incoming);
05713 
05714         success = MPI_Irecv(next_buff->mem_ptr, 
05715                             INITIAL_BUFF_SIZE, 
05716                             MPI_UNSIGNED_CHAR, from_proc,
05717                             next_tag, procConfig.proc_comm(), 
05718                             next_req);
05719         if (success != MPI_SUCCESS) {
05720           result = MB_FAILURE;
05721           RRA("Failed to post next irecv in ghost exchange.");
05722         }
05723 
05724       }
05725 
05726       // send 2nd message
05727       PRINT_DEBUG_ISEND(procConfig.proc_rank(), from_proc, 
05728                         send_buff->mem_ptr+INITIAL_BUFF_SIZE,
05729                         mesg_tag_expected+1,
05730                         send_buff->get_stored_size() - INITIAL_BUFF_SIZE);
05731     
05732       assert(send_buff->get_stored_size()-INITIAL_BUFF_SIZE < send_buff->alloc_size &&
05733              0 <= send_buff->get_stored_size());
05734       success = MPI_Isend(send_buff->mem_ptr+INITIAL_BUFF_SIZE, 
05735                           send_buff->get_stored_size() - INITIAL_BUFF_SIZE,
05736                           MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected+1, 
05737                           procConfig.proc_comm(), &send_req);
05738       if (success != MPI_SUCCESS) {
05739         result = MB_FAILURE;
05740         RRA("Failed to send 2nd message in ghost exchange.");
05741       }
05742     }
05743     else if ((mpi_status.MPI_TAG == mesg_tag_expected && 
05744               recv_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE) ||
05745              mpi_status.MPI_TAG == mesg_tag_expected+1) {
05746       // message completely received - signal that we're done
05747       done = true;
05748     }
05749 
05750     return MB_SUCCESS;
05751   }
05752 
05753   struct ProcList {
05754     int procs[MAX_SHARING_PROCS];
05755   };
05756   static bool operator<( const ProcList& a, const ProcList& b ) {
05757     for (int i = 0; i < MAX_SHARING_PROCS; ++i) {
05758       if (a.procs[i] < b.procs[i]) 
05759         return true;
05760       else if (b.procs[i] < a.procs[i])
05761         return false;
05762       else if (a.procs[i] < 0)
05763         return false; 
05764     }
05765     return false;
05766   }
05767 
05768   ErrorCode ParallelComm::check_clean_iface(Range &allsent) 
05769   {
05770     // allsent is all entities I think are on interface; go over them, looking
05771     // for zero-valued handles, and fix any I find
05772 
05773     // Keep lists of entities for which teh sharing data changed, grouped
05774     // by set of sharing procs.
05775     typedef std::map< ProcList, Range > procmap_t;
05776     procmap_t old_procs, new_procs;
05777 
05778     ErrorCode result = MB_SUCCESS;
05779     Range::iterator rit;
05780     Range::reverse_iterator rvit;
05781     unsigned char pstatus;
05782     int nump;
05783     ProcList sharedp;
05784     EntityHandle sharedh[MAX_SHARING_PROCS];
05785     for (rvit = allsent.rbegin(); rvit != allsent.rend(); rvit++) {
05786       result = get_sharing_data(*rvit, sharedp.procs, sharedh, pstatus, nump);
05787       RRA("");
05788       assert("Should be shared with at least one other proc" && 
05789              (nump > 1 || sharedp.procs[0] != (int)procConfig.proc_rank()));
05790       assert(nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1);
05791 
05792       // look for first null handle in list
05793       int idx = std::find( sharedh, sharedh+nump, (EntityHandle)0 ) - sharedh;
05794       if (idx == nump)
05795         continue; // all handles are valid
05796     
05797       ProcList old_list( sharedp );
05798       std::sort( old_list.procs, old_list.procs + nump );
05799       old_procs[old_list].insert( *rvit );
05800     
05801       // remove null handles and corresponding proc ranks from lists
05802       int new_nump = idx;
05803       bool removed_owner = !idx;
05804       for (++idx; idx < nump; ++idx) {
05805         if (sharedh[idx]) {
05806           sharedh[new_nump] = sharedh[idx];
05807           sharedp.procs[new_nump] = sharedp.procs[idx];
05808           ++new_nump;
05809         }
05810       }
05811       sharedp.procs[new_nump] = -1;
05812  
05813       if (removed_owner && new_nump > 1) {
05814         // The proc that we choose as the entity owner isn't sharing the
05815         // entity (doesn't have a copy of it).  We need to pick a different
05816         // owner.  Choose the proc with lowest rank.
05817         idx = std::min_element( sharedp.procs, sharedp.procs+new_nump ) - sharedp.procs;
05818         std::swap( sharedp.procs[0], sharedp.procs[idx] );
05819         std::swap( sharedh[0], sharedh[idx] );
05820         if (sharedp.procs[0] == (int)proc_config().proc_rank())
05821           pstatus &= ~PSTATUS_NOT_OWNED;
05822       }
05823 
05824       result = set_sharing_data(*rvit, pstatus, nump, new_nump, sharedp.procs, sharedh);
05825       RRA("");
05826 
05827       if (new_nump > 1) {
05828         if (new_nump == 2) {
05829           if (sharedp.procs[1] != (int)proc_config().proc_rank()) {
05830             assert(sharedp.procs[0] == (int)proc_config().proc_rank());
05831             sharedp.procs[0] = sharedp.procs[1];
05832           }
05833           sharedp.procs[1] = -1;
05834         } 
05835         else {
05836           std::sort( sharedp.procs, sharedp.procs + new_nump );
05837         }
05838         new_procs[sharedp].insert( *rvit );
05839       }
05840     }
05841   
05842     if (old_procs.empty()) {
05843       assert(new_procs.empty());
05844       return MB_SUCCESS;
05845     }
05846   
05847     // update interface sets
05848     procmap_t::iterator pmit;
05849     //std::vector<unsigned char> pstatus_list;
05850     rit = interface_sets().begin();
05851     while (rit != interface_sets().end()) {
05852       result = get_sharing_data( *rit, sharedp.procs, sharedh, pstatus, nump );
05853       RRA("problems getting sharing data for interface set");
05854       assert( nump != 2 );
05855       std::sort( sharedp.procs, sharedp.procs + nump );
05856       assert(nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1);
05857     
05858       pmit = old_procs.find( sharedp );
05859       if (pmit != old_procs.end()) {
05860         result = mbImpl->remove_entities( *rit, pmit->second ); RRA("");
05861       }
05862   
05863       pmit = new_procs.find( sharedp );
05864       if (pmit == new_procs.end()) {
05865         int count;
05866         result = mbImpl->get_number_entities_by_handle( *rit, count ); RRA("");
05867         if (!count) {
05868           result = mbImpl->delete_entities( &*rit, 1 ); RRA("");
05869           rit = interface_sets().erase( rit );
05870         }
05871         else {  
05872           ++rit;
05873         }
05874       }
05875       else {
05876         result = mbImpl->add_entities( *rit, pmit->second ); RRA("");
05877 
05878         // remove those that we've processed so that we know which ones
05879         // are new.
05880         new_procs.erase( pmit );
05881         ++rit;
05882       }
05883     }
05884   
05885     // create interface sets for new proc id combinations
05886     std::fill( sharedh, sharedh + MAX_SHARING_PROCS, 0);
05887     for (pmit = new_procs.begin(); pmit != new_procs.end(); ++pmit) {
05888       EntityHandle new_set;
05889       result = mbImpl->create_meshset(MESHSET_SET, new_set); 
05890       RRA("Failed to create interface set.");
05891       interfaceSets.insert(new_set);
05892 
05893       // add entities
05894       result = mbImpl->add_entities(new_set, pmit->second); 
05895       RRA("Failed to add entities to interface set.");
05896       // tag set with the proc rank(s)
05897       assert(pmit->first.procs[0] >= 0);
05898       pstatus = PSTATUS_SHARED|PSTATUS_INTERFACE;
05899       if (pmit->first.procs[1] == -1) {
05900         int other = pmit->first.procs[0];
05901         assert(other != (int)procConfig.proc_rank());
05902         result = mbImpl->tag_set_data(sharedp_tag(), &new_set, 1, pmit->first.procs); 
05903         RRA("Failed to tag interface set with procs.");
05904         sharedh[0] = 0;
05905         result = mbImpl->tag_set_data(sharedh_tag(), &new_set, 1, sharedh); 
05906         RRA("Failed to tag interface set with procs.");
05907         if (other < (int)proc_config().proc_rank())
05908           pstatus |= PSTATUS_NOT_OWNED;
05909       }
05910       else {
05911         result = mbImpl->tag_set_data(sharedps_tag(), &new_set, 1, pmit->first.procs );
05912         RRA("Failed to tag interface set with procs.");
05913         result = mbImpl->tag_set_data(sharedhs_tag(), &new_set, 1, sharedh); 
05914         RRA("Failed to tag interface set with procs.");
05915         pstatus |= PSTATUS_MULTISHARED;
05916         if (pmit->first.procs[0] < (int)proc_config().proc_rank())
05917           pstatus |= PSTATUS_NOT_OWNED;
05918       }
05919     
05920       result = mbImpl->tag_set_data(pstatus_tag(), &new_set, 1, &pstatus); 
05921       RRA("Failed to tag interface set with pstatus.");
05922     
05923       // set pstatus on all interface entities in set
05924       result = mbImpl->tag_clear_data(pstatus_tag(), pmit->second, &pstatus );
05925       RRA("Failed to tag interface entities with pstatus.");
05926     }
05927   
05928     return MB_SUCCESS;
05929   }
05930 
05931   ErrorCode ParallelComm::set_sharing_data(EntityHandle ent, unsigned char pstatus,
05932                                            int old_nump, int new_nump,
05933                                            int *ps, EntityHandle *hs) 
05934   {
05935     // If new nump is less than 3, the entity is no longer mutishared
05936      if (old_nump > 2 && (pstatus & PSTATUS_MULTISHARED) && new_nump < 3) {
05937        // Unset multishared flag
05938        pstatus ^= PSTATUS_MULTISHARED;
05939      }
05940 
05941     // check for consistency in input data
05942     assert(new_nump > 1 &&
05943            ((new_nump == 2 && pstatus&PSTATUS_SHARED && !(pstatus&PSTATUS_MULTISHARED)) || // if <= 2 must not be multishared
05944             (new_nump > 2 && pstatus&PSTATUS_SHARED && pstatus&PSTATUS_MULTISHARED)) && // if > 2 procs, must be multishared
05945            (!(pstatus&PSTATUS_GHOST) || pstatus&PSTATUS_SHARED) && // if ghost, it must also be shared
05946            (new_nump < 3 || (pstatus&PSTATUS_NOT_OWNED && ps[0] != (int)rank()) || // I'm not owner and first proc not me
05947             (!(pstatus&PSTATUS_NOT_OWNED) && ps[0] == (int)rank())) // I'm owner and first proc is me
05948            );
05949     
05950 #ifndef NDEBUG
05951       {
05952         // check for duplicates in proc list
05953         std::set<unsigned int> dumprocs;
05954         int dp = 0;
05955         for (; dp < old_nump && -1 != ps[dp]; dp++)
05956           dumprocs.insert(ps[dp]);
05957         assert(dp == (int)dumprocs.size());
05958       }
05959 #endif
05960 
05961     ErrorCode result;
05962       // reset any old data that needs to be
05963     if (old_nump > 2 && new_nump < 3) {
05964       // need to remove multishared tags
05965       result = mbImpl->tag_delete_data(sharedps_tag(), &ent, 1);
05966       RRA("set_sharing_data:1");
05967       result = mbImpl->tag_delete_data(sharedhs_tag(), &ent, 1);
05968       RRA("set_sharing_data:2");
05969 //      if (new_nump < 2) 
05970 //        pstatus = 0x0;
05971 //      else if (ps[0] != (int)proc_config().proc_rank())
05972 //        pstatus |= PSTATUS_NOT_OWNED;
05973     }
05974     else if ((old_nump < 3 && new_nump > 2) || (old_nump > 1 && new_nump == 1)) {
05975         // reset sharedp and sharedh tags
05976       int tmp_p = -1;
05977       EntityHandle tmp_h = 0;
05978       result = mbImpl->tag_set_data(sharedp_tag(), &ent, 1, &tmp_p);
05979       RRA("set_sharing_data:3");
05980       result = mbImpl->tag_set_data(sharedh_tag(), &ent, 1, &tmp_h);
05981       RRA("set_sharing_data:4");
05982     }
05983 
05984     assert("check for multishared/owner I'm first proc" &&
05985            (!(pstatus & PSTATUS_MULTISHARED) || (pstatus & (PSTATUS_NOT_OWNED|PSTATUS_GHOST)) || (ps[0] == (int)rank())) &&
05986            "interface entities should have > 1 proc" &&
05987            (!(pstatus & PSTATUS_INTERFACE) || new_nump > 1) &&
05988            "ghost entities should have > 1 proc" &&
05989            (!(pstatus & PSTATUS_GHOST) || new_nump > 1)
05990            );
05991 
05992       // now set new data
05993     if (new_nump > 2) {
05994       result = mbImpl->tag_set_data(sharedps_tag(), &ent, 1, ps);
05995       RRA("set_sharing_data:5");
05996       result = mbImpl->tag_set_data(sharedhs_tag(), &ent, 1, hs);
05997       RRA("set_sharing_data:6");
05998     }
05999     else {
06000       unsigned int j = (ps[0] == (int)procConfig.proc_rank() ? 1 : 0);
06001       assert(-1 != ps[j]);
06002       result = mbImpl->tag_set_data(sharedp_tag(), &ent, 1, ps+j);
06003       RRA("set_sharing_data:7");
06004       result = mbImpl->tag_set_data(sharedh_tag(), &ent, 1, hs+j);
06005       RRA("set_sharing_data:8");
06006     }
06007   
06008     result = mbImpl->tag_set_data(pstatus_tag(), &ent, 1, &pstatus);
06009     RRA("set_sharing_data:9");
06010 
06011     if (old_nump > 1 && new_nump < 2) 
06012       sharedEnts.erase(std::find(sharedEnts.begin(), sharedEnts.end(), ent));
06013 
06014     return result;
06015   }
06016 
06017   ErrorCode ParallelComm::get_sent_ents(const bool is_iface, 
06018                                         const int bridge_dim, const int ghost_dim,
06019                                         const int num_layers, const int addl_ents,
06020                                         Range *sent_ents, Range &allsent,
06021                                         TupleList &entprocs) 
06022   {
06023     ErrorCode result;
06024     unsigned int ind;
06025     std::vector<unsigned int>::iterator proc_it;
06026     Range tmp_range;
06027   
06028     // done in a separate loop over procs because sometimes later procs 
06029     // need to add info to earlier procs' messages
06030     for (ind = 0, proc_it = buffProcs.begin(); 
06031          proc_it != buffProcs.end(); proc_it++, ind++) {
06032       if (!is_iface) {
06033         result = get_ghosted_entities(bridge_dim, ghost_dim, buffProcs[ind],
06034                                       num_layers, addl_ents, sent_ents[ind]);
06035         RRA("Failed to get ghost layers.");
06036       }
06037       else {
06038         result = get_iface_entities(buffProcs[ind], -1, sent_ents[ind]);
06039         RRA("Failed to get interface layers.");
06040       }
06041 
06042       // filter out entities already shared with destination
06043       tmp_range.clear();
06044       result = filter_pstatus(sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND,
06045                               buffProcs[ind], &tmp_range);
06046       RRA("Couldn't filter on owner.");
06047       if (!tmp_range.empty()) 
06048         sent_ents[ind] = subtract( sent_ents[ind], tmp_range);
06049 
06050       allsent.merge(sent_ents[ind]);
06051     }
06052 
06053     //===========================================
06054     // need to get procs each entity is sent to
06055     //===========================================
06056 
06057     // get the total # of proc/handle pairs
06058     int npairs = 0;
06059     for (ind = 0; ind < buffProcs.size(); ind++)
06060       npairs += sent_ents[ind].size();
06061   
06062     // allocate a TupleList of that size
06063     entprocs.initialize(1, 0, 1, 0, npairs);
06064     entprocs.enableWriteAccess();
06065 
06066     // put the proc/handle pairs in the list
06067     for (ind = 0, proc_it = buffProcs.begin(); 
06068          proc_it != buffProcs.end(); proc_it++, ind++) {
06069       for (Range::iterator rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); rit++) {
06070         entprocs.vi_wr[entprocs.get_n()] = *proc_it;
06071         entprocs.vul_wr[entprocs.get_n()] = *rit;
06072         entprocs.inc_n();
06073       }
06074     }
06075     // sort by handle
06076     moab::TupleList::buffer sort_buffer;
06077     sort_buffer.buffer_init(npairs);
06078     entprocs.sort(1, &sort_buffer);
06079 
06080     entprocs.disableWriteAccess();
06081     sort_buffer.reset();
06082 
06083     return MB_SUCCESS;
06084   }
06085 
06086   ErrorCode ParallelComm::exchange_ghost_cells(ParallelComm **pcs,
06087                                                unsigned int num_procs,
06088                                                int ghost_dim, int bridge_dim,
06089                                                int num_layers, int addl_ents,
06090                                                bool store_remote_handles,
06091                                                EntityHandle *file_sets
06092                                                )
06093   {
06094     // static version of function, exchanging info through buffers rather 
06095     // than through messages
06096 
06097     // if we're only finding out about existing ents, we have to be storing
06098     // remote handles too
06099     assert(num_layers > 0 || store_remote_handles);
06100   
06101     const bool is_iface = !num_layers;
06102   
06103     unsigned int ind;
06104     ParallelComm *pc;
06105     ErrorCode result = MB_SUCCESS;
06106 
06107     std::vector<Error*> ehs(num_procs);
06108     for (unsigned int i = 0; i < num_procs; i++) {
06109       result = pcs[i]->get_moab()->query_interface(ehs[i]);
06110       assert (MB_SUCCESS == result);
06111     }
06112   
06113     // when this function is called, buffProcs should already have any 
06114     // communicating procs
06115 
06116     //===========================================
06117     // get entities to be sent to neighbors
06118     //===========================================
06119 
06120     // done in a separate loop over procs because sometimes later procs 
06121     // need to add info to earlier procs' messages
06122     Range sent_ents[MAX_SHARING_PROCS][MAX_SHARING_PROCS], 
06123       allsent[MAX_SHARING_PROCS];
06124 
06125     //===========================================
06126     // get entities to be sent to neighbors
06127     //===========================================
06128 
06129     TupleList entprocs[MAX_SHARING_PROCS];
06130     for (unsigned int p = 0; p < num_procs; p++) {
06131       pc = pcs[p];
06132       result = pc->get_sent_ents(is_iface, bridge_dim, ghost_dim, num_layers, addl_ents,
06133                                  sent_ents[p], allsent[p], entprocs[p]);
06134       RRAI(pc->get_moab(), ehs[p], "get_sent_ents failed.");
06135   
06136       //===========================================
06137       // pack entities into buffers
06138       //===========================================
06139 
06140       for (ind = 0; ind < pc->buffProcs.size(); ind++) {
06141         // entities
06142         pc->localOwnedBuffs[ind]->reset_ptr(sizeof(int));
06143         result = pc->pack_entities(sent_ents[p][ind], pc->localOwnedBuffs[ind],
06144                                    store_remote_handles, pc->buffProcs[ind], is_iface,
06145                                    &entprocs[p], &allsent[p]); 
06146         RRAI(pc->get_moab(), ehs[p], "Packing entities failed.");
06147       }
06148 
06149       entprocs[p].reset();
06150     }
06151 
06152     //===========================================
06153     // receive/unpack new entities
06154     //===========================================
06155     // number of incoming messages for ghosts is the number of procs we 
06156     // communicate with; for iface, it's the number of those with lower rank
06157     std::vector<std::vector<EntityHandle> > L1hloc[MAX_SHARING_PROCS], L1hrem[MAX_SHARING_PROCS];
06158     std::vector<std::vector<int> > L1p[MAX_SHARING_PROCS];
06159     std::vector<EntityHandle> L2hloc[MAX_SHARING_PROCS], L2hrem[MAX_SHARING_PROCS];
06160     std::vector<unsigned int> L2p[MAX_SHARING_PROCS];
06161     std::vector<EntityHandle> new_ents[MAX_SHARING_PROCS];
06162   
06163     for (unsigned int p = 0; p < num_procs; p++) {
06164       L1hloc[p].resize(pcs[p]->buffProcs.size());
06165       L1hrem[p].resize(pcs[p]->buffProcs.size());
06166       L1p[p].resize(pcs[p]->buffProcs.size());
06167     }
06168   
06169     for (unsigned int p = 0; p < num_procs; p++) {
06170   
06171       pc = pcs[p];
06172     
06173       for (ind = 0; ind < pc->buffProcs.size(); ind++) {
06174         // incoming ghost entities; unpack; returns entities received
06175         // both from sending proc and from owning proc (which may be different)
06176 
06177         // buffer could be empty, which means there isn't any message to
06178         // unpack (due to this comm proc getting added as a result of indirect
06179         // communication); just skip this unpack
06180         if (pc->localOwnedBuffs[ind]->get_stored_size() == 0) continue;
06181 
06182         unsigned int to_p = pc->buffProcs[ind];
06183         pc->localOwnedBuffs[ind]->reset_ptr(sizeof(int));
06184         result = pcs[to_p]->unpack_entities(pc->localOwnedBuffs[ind]->buff_ptr,
06185                                             store_remote_handles, ind, is_iface,
06186                                             L1hloc[to_p], L1hrem[to_p], L1p[to_p], L2hloc[to_p], 
06187                                             L2hrem[to_p], L2p[to_p], new_ents[to_p]);
06188         RRAI(pc->get_moab(), ehs[p], "Failed to unpack entities.");
06189       }
06190     }
06191 
06192     if (is_iface) {
06193       // need to check over entities I sent and make sure I received 
06194       // handles for them from all expected procs; if not, need to clean
06195       // them up
06196       for (unsigned int p = 0; p < num_procs; p++) {
06197         result = pcs[p]->check_clean_iface(allsent[p]);
06198         RRAI(pcs[p]->get_moab(), ehs[p], "Failed check on shared entities.");
06199       }
06200 
06201 #ifndef NDEBUG
06202       for (unsigned int p = 0; p < num_procs; p++) {
06203         result = pcs[p]->check_sent_ents(allsent[p]);
06204         RRAI(pcs[p]->get_moab(), ehs[p], "Failed check on shared entities.");
06205       }
06206       result = check_all_shared_handles(pcs, num_procs);
06207       RRAI(pcs[0]->get_moab(), ehs[0], "Failed check on all shared handles.");
06208 #endif
06209       return MB_SUCCESS;
06210     }
06211   
06212     //===========================================
06213     // send local handles for new ghosts to owner, then add
06214     // those to ghost list for that owner
06215     //===========================================
06216     std::vector<unsigned int>::iterator proc_it;
06217     for (unsigned int p = 0; p < num_procs; p++) {
06218       pc = pcs[p];
06219   
06220       for (ind = 0, proc_it = pc->buffProcs.begin(); 
06221            proc_it != pc->buffProcs.end(); proc_it++, ind++) {
06222         // skip if iface layer and higher-rank proc
06223         pc->localOwnedBuffs[ind]->reset_ptr(sizeof(int));
06224         result = pc->pack_remote_handles(L1hloc[p][ind], L1hrem[p][ind], L1p[p][ind], *proc_it,
06225                                          pc->localOwnedBuffs[ind]);
06226         RRAI(pc->get_moab(), ehs[p], "Failed to pack remote handles.");
06227       }
06228     }
06229   
06230     //===========================================
06231     // process remote handles of my ghosteds
06232     //===========================================
06233     for (unsigned int p = 0; p < num_procs; p++) {
06234       pc = pcs[p];
06235   
06236       for (ind = 0, proc_it = pc->buffProcs.begin(); 
06237            proc_it != pc->buffProcs.end(); proc_it++, ind++) {
06238         // incoming remote handles
06239         unsigned int to_p = pc->buffProcs[ind];
06240         pc->localOwnedBuffs[ind]->reset_ptr(sizeof(int));
06241         result = pcs[to_p]->unpack_remote_handles(p, 
06242                                                   pc->localOwnedBuffs[ind]->buff_ptr,
06243                                                   L2hloc[to_p], L2hrem[to_p], L2p[to_p]);
06244         RRAI(pc->get_moab(), ehs[p], "Failed to unpack remote handles.");
06245       }
06246     }
06247     
06248 #ifndef NDEBUG
06249     for (unsigned int p = 0; p < num_procs; p++) {
06250       result = pcs[p]->check_sent_ents(allsent[p]);
06251       RRAI(pcs[p]->get_moab(), ehs[p], "Failed check on shared entities.");
06252     }
06253   
06254     result = ParallelComm::check_all_shared_handles(pcs, num_procs);
06255     RRAI(pcs[0]->get_moab(), ehs[0], "Failed check on all shared handles.");
06256 #endif
06257 
06258     if (file_sets) {
06259       for (unsigned int p = 0; p < num_procs; p++) {
06260         if (new_ents[p].empty()) continue;
06261         result = pcs[p]->get_moab()->add_entities(file_sets[p], &new_ents[p][0], new_ents[p].size());
06262         RRAI(pcs[p]->get_moab(), ehs[p], "Failed to add new entities to set.");
06263       }
06264     }
06265     return MB_SUCCESS;
06266   }
06267 
06268   ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& exchange_procs)
06269   {
06270     // set buffers
06271     int n_proc = exchange_procs.size();
06272     for (int i = 0; i < n_proc; i++) get_buffers(exchange_procs[i]);
06273     reset_all_buffers();
06274 
06275     // post ghost irecv's for entities from all communicating procs
06276     // index reqs the same as buffer/sharing procs indices
06277     int success;
06278     ErrorCode result;
06279     recvReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06280     recvRemotehReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06281     sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06282 
06283     int incoming = 0;
06284     for (int i = 0; i < n_proc; i++) {
06285       int ind = get_buffers(exchange_procs[i]);
06286       incoming++;
06287       PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[ind], 
06288                         remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, 
06289                         MB_MESG_ENTS_SIZE, incoming);
06290       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, 
06291                           MPI_UNSIGNED_CHAR, buffProcs[ind],
06292                           MB_MESG_ENTS_SIZE, procConfig.proc_comm(), 
06293                           &recvReqs[2*ind]);
06294       if (success != MPI_SUCCESS) {
06295         result = MB_FAILURE;
06296         RRA("Failed to post irecv in owned entity exchange.");
06297       }
06298     }
06299 
06300   return MB_SUCCESS;
06301 }
06302 
06303 ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
06304                                    std::set<unsigned int>& recv_procs)
06305 {
06306   // set buffers
06307   int num = shared_procs.size();
06308   for (int i = 0; i < num; i++) get_buffers(shared_procs[i]);
06309   reset_all_buffers();
06310   num = remoteOwnedBuffs.size();
06311   for (int i = 0; i < num; i++) remoteOwnedBuffs[i]->set_stored_size();
06312   num = localOwnedBuffs.size();
06313   for (int i = 0; i < num; i++) localOwnedBuffs[i]->set_stored_size();
06314 
06315   // post ghost irecv's for entities from all communicating procs
06316   // index reqs the same as buffer/sharing procs indices
06317   int success;
06318   ErrorCode result;
06319   recvReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06320   recvRemotehReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06321   sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06322 
06323   int incoming = 0;
06324   std::set<unsigned int>::iterator it = recv_procs.begin();
06325   std::set<unsigned int>::iterator eit = recv_procs.end();
06326   for (; it != eit; it++) {
06327     int ind = get_buffers(*it);
06328     incoming++;
06329     PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[ind], 
06330                       remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, 
06331                       MB_MESG_ENTS_SIZE, incoming);
06332     success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, 
06333                         MPI_UNSIGNED_CHAR, buffProcs[ind],
06334                         MB_MESG_ENTS_SIZE, procConfig.proc_comm(), 
06335                         &recvReqs[2*ind]);
06336     if (success != MPI_SUCCESS) {
06337       result = MB_FAILURE;
06338       RRA("Failed to post irecv in owned entity exchange.");
06339     }
06340   }
06341 
06342   return MB_SUCCESS;
06343 }
06344 
06345   ErrorCode ParallelComm::exchange_owned_meshs(std::vector<unsigned int>& exchange_procs,
06346                                                std::vector<Range*>& exchange_ents,
06347                                                std::vector<MPI_Request>& recv_ent_reqs,
06348                                                std::vector<MPI_Request>& recv_remoteh_reqs,
06349                                                bool store_remote_handles,
06350                                                bool wait_all,
06351                                                bool migrate,
06352                                                int dim)
06353   {
06354     // filter out entities already shared with destination
06355     // exchange twice for entities and sets
06356     ErrorCode result;
06357     std::vector<unsigned int> exchange_procs_sets;
06358     std::vector<Range*> exchange_sets;
06359     int n_proc = exchange_procs.size();
06360     for (int i = 0; i < n_proc; i++) {
06361       Range set_range = exchange_ents[i]->subset_by_type(MBENTITYSET);
06362       *exchange_ents[i] = subtract(*exchange_ents[i], set_range);
06363       Range* tmp_range = new Range(set_range);
06364       exchange_sets.push_back(tmp_range);
06365       exchange_procs_sets.push_back(exchange_procs[i]);
06366     }
06367 
06368   
06369     if (dim == 2) {
06370       // exchange entities first
06371       result = exchange_owned_mesh(exchange_procs, exchange_ents,
06372                                    recvReqs, recvRemotehReqs, true,
06373                                    store_remote_handles, wait_all, migrate);
06374       RRA("Couldn't exchange owned mesh entities.");
06375     
06376       // exchange sets
06377       result = exchange_owned_mesh(exchange_procs_sets, exchange_sets,
06378                                    recvReqs, recvRemotehReqs, false,
06379                                    store_remote_handles, wait_all, migrate);
06380     }
06381     else {
06382       // exchange entities first
06383       result = exchange_owned_mesh(exchange_procs, exchange_ents,
06384                                    recv_ent_reqs, recv_remoteh_reqs, false,
06385                                    store_remote_handles, wait_all, migrate);
06386       RRA("Couldn't exchange owned mesh entities.");
06387     
06388       // exchange sets
06389       result = exchange_owned_mesh(exchange_procs_sets, exchange_sets,
06390                                    recv_ent_reqs, recv_remoteh_reqs, false,
06391                                    store_remote_handles, wait_all, migrate);
06392       RRA("Couldn't exchange owned mesh sets.");
06393     }
06394 
06395     for (int i = 0; i < n_proc; i++) delete exchange_sets[i];
06396 
06397     // build up the list of shared entities
06398     std::map<std::vector<int>, std::vector<EntityHandle> > proc_nvecs;
06399     int procs[MAX_SHARING_PROCS];
06400     EntityHandle handles[MAX_SHARING_PROCS];
06401     int nprocs;
06402     unsigned char pstat;
06403     for (std::vector<EntityHandle>::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); vit++) {
06404       if (mbImpl->dimension_from_handle(*vit) > 2)
06405         continue;
06406       result = get_sharing_data(*vit, procs, handles, pstat, nprocs);
06407       RRA("");
06408       std::sort(procs, procs+nprocs);
06409       std::vector<int> tmp_procs(procs, procs + nprocs);
06410       assert(tmp_procs.size() != 2);
06411       proc_nvecs[tmp_procs].push_back(*vit);
06412     }
06413 
06414     // create interface sets from shared entities
06415     result = create_interface_sets(proc_nvecs);
06416     RRA("Trouble creating iface sets.");
06417 
06418     return MB_SUCCESS;
06419   }
06420 
06421   ErrorCode ParallelComm::exchange_owned_mesh(std::vector<unsigned int>& exchange_procs,
06422                                               std::vector<Range*>& exchange_ents,
06423                                               std::vector<MPI_Request>& recv_ent_reqs,
06424                                               std::vector<MPI_Request>& recv_remoteh_reqs,
06425                                               const bool recv_posted,
06426                                               bool store_remote_handles,
06427                                               bool wait_all,
06428                                               bool migrate)
06429   {
06430 #ifdef USE_MPE
06431     if (myDebug->get_verbosity() == 2) {
06432       MPE_Log_event(OWNED_START, procConfig.proc_rank(), "Starting owned ents exchange.");
06433     }
06434 #endif
06435 
06436     myDebug->tprintf(1, "Entering exchange_owned_mesh\n");
06437     if (myDebug->get_verbosity() == 4) {
06438       msgs.clear();
06439       msgs.reserve(MAX_SHARING_PROCS);
06440     }
06441     unsigned int i;
06442     int ind, success;
06443     ErrorCode result = MB_SUCCESS;
06444     int incoming1 = 0, incoming2 = 0;
06445 
06446 
06447     // set buffProcs with communicating procs
06448     unsigned int n_proc = exchange_procs.size();
06449     for (i = 0; i < n_proc; i++) {
06450       ind = get_buffers(exchange_procs[i]);
06451       result = add_verts(*exchange_ents[i]);
06452       RRA("Couldn't add verts.");
06453 
06454       // filter out entities already shared with destination
06455       Range tmp_range;
06456       result = filter_pstatus(*exchange_ents[i], PSTATUS_SHARED, PSTATUS_AND,
06457                               buffProcs[ind], &tmp_range);
06458       RRA("Couldn't filter on owner.");
06459       if (!tmp_range.empty()) {
06460         *exchange_ents[i] = subtract(*exchange_ents[i], tmp_range);
06461       }
06462     }
06463 
06464     //===========================================
06465     // post ghost irecv's for entities from all communicating procs
06466     //===========================================
06467 #ifdef USE_MPE
06468     if (myDebug->get_verbosity() == 2) {
06469       MPE_Log_event(ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange.");
06470     }
06471 #endif
06472   
06473     // index reqs the same as buffer/sharing procs indices
06474     if (!recv_posted) {
06475       reset_all_buffers();
06476       recv_ent_reqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06477       recv_remoteh_reqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06478       sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06479 
06480       for (i = 0; i < n_proc; i++) {
06481         ind = get_buffers(exchange_procs[i]);
06482         incoming1++;
06483         PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[ind], 
06484                           remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, 
06485                           MB_MESG_ENTS_SIZE, incoming1);
06486         success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, 
06487                             MPI_UNSIGNED_CHAR, buffProcs[ind],
06488                             MB_MESG_ENTS_SIZE, procConfig.proc_comm(), 
06489                             &recv_ent_reqs[2*ind]);
06490         if (success != MPI_SUCCESS) {
06491           result = MB_FAILURE;
06492           RRA("Failed to post irecv in owned entity exchange.");
06493         }
06494       }
06495     }
06496     else incoming1 += n_proc;
06497  
06498     //===========================================
06499     // get entities to be sent to neighbors
06500     // need to get procs each entity is sent to
06501     //===========================================  
06502     Range allsent, tmp_range;
06503     int dum_ack_buff;
06504     int npairs = 0;
06505     TupleList entprocs;
06506     for (i = 0; i < n_proc; i++) {
06507       int n_ents = exchange_ents[i]->size();
06508       if (n_ents > 0) {
06509         npairs += n_ents; // get the total # of proc/handle pairs
06510         allsent.merge(*exchange_ents[i]);
06511       }
06512     }
06513 
06514     // allocate a TupleList of that size
06515     entprocs.initialize(1, 0, 1, 0, npairs);
06516     entprocs.enableWriteAccess();
06517 
06518     // put the proc/handle pairs in the list
06519     for (i = 0; i < n_proc; i++) {
06520       for (Range::iterator rit = exchange_ents[i]->begin(); rit != exchange_ents[i]->end(); rit++) {
06521         entprocs.vi_wr[entprocs.get_n()] = exchange_procs[i];
06522         entprocs.vul_wr[entprocs.get_n()] = *rit;
06523         entprocs.inc_n();
06524       }
06525     }
06526 
06527     // sort by handle
06528     moab::TupleList::buffer sort_buffer;
06529     sort_buffer.buffer_init(npairs);
06530     entprocs.sort(1, &sort_buffer);
06531     sort_buffer.reset();
06532 
06533     myDebug->tprintf(1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
06534                      (unsigned long)allsent.size());
06535 
06536     //===========================================
06537     // pack and send ents from this proc to others
06538     //===========================================
06539     for (i = 0; i < n_proc; i++) {
06540       ind = get_buffers(exchange_procs[i]);
06541       myDebug->tprintf(1, "Sent ents compactness (size) = %f (%lu)\n", exchange_ents[i]->compactness(),
06542                        (unsigned long)exchange_ents[i]->size());
06543       // reserve space on front for size and for initial buff size
06544       localOwnedBuffs[ind]->reset_buffer(sizeof(int));
06545       result = pack_buffer(*exchange_ents[i], false, true,
06546                            store_remote_handles, buffProcs[ind],
06547                            localOwnedBuffs[ind], &entprocs, &allsent);
06548 
06549       if (myDebug->get_verbosity() == 4) {
06550         msgs.resize(msgs.size()+1);
06551         msgs.back() = new Buffer(*localOwnedBuffs[ind]);
06552       }
06553     
06554       // send the buffer (size stored in front in send_buffer)
06555       result = send_buffer(exchange_procs[i], localOwnedBuffs[ind], 
06556                            MB_MESG_ENTS_SIZE, sendReqs[2*ind], 
06557                            recv_ent_reqs[2*ind+1], &dum_ack_buff,
06558                            incoming1,
06559                            MB_MESG_REMOTEH_SIZE, 
06560                            (store_remote_handles ? 
06561                             localOwnedBuffs[ind] : NULL),
06562                            &recv_remoteh_reqs[2*ind], &incoming2);
06563       RRA("Failed to Isend in ghost exchange.");
06564     }
06565 
06566     entprocs.reset();
06567 
06568     //===========================================
06569     // receive/unpack new entities
06570     //===========================================
06571     // number of incoming messages is the number of procs we communicate with
06572     MPI_Status status;
06573     std::vector<std::vector<EntityHandle> > recd_ents(buffProcs.size());
06574     std::vector<std::vector<EntityHandle> > L1hloc(buffProcs.size()), L1hrem(buffProcs.size());
06575     std::vector<std::vector<int> > L1p(buffProcs.size());
06576     std::vector<EntityHandle> L2hloc, L2hrem;
06577     std::vector<unsigned int> L2p;
06578     std::vector<EntityHandle> new_ents;
06579 
06580     while (incoming1) {
06581       // wait for all recvs of ents before proceeding to sending remote handles,
06582       // b/c some procs may have sent to a 3rd proc ents owned by me;
06583       PRINT_DEBUG_WAITANY(recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank());
06584     
06585       success = MPI_Waitany(2*buffProcs.size(), &recv_ent_reqs[0], &ind, &status);
06586       if (MPI_SUCCESS != success) {
06587         result = MB_FAILURE;
06588         RRA("Failed in waitany in owned entity exchange.");
06589       }
06590     
06591       PRINT_DEBUG_RECD(status);
06592     
06593       // ok, received something; decrement incoming counter
06594       incoming1--;
06595       bool done = false;
06596     
06597       // In case ind is for ack, we need index of one before it
06598       unsigned int base_ind = 2*(ind/2);
06599       result = recv_buffer(MB_MESG_ENTS_SIZE,
06600                            status,
06601                            remoteOwnedBuffs[ind/2],
06602                            recv_ent_reqs[ind], recv_ent_reqs[ind+1],
06603                            incoming1,
06604                            localOwnedBuffs[ind/2], sendReqs[base_ind], sendReqs[base_ind+1],
06605                            done,
06606                            (store_remote_handles ? 
06607                             localOwnedBuffs[ind/2] : NULL),
06608                            MB_MESG_REMOTEH_SIZE,
06609                            &recv_remoteh_reqs[base_ind], &incoming2);
06610       RRA("Failed to receive buffer.");
06611 
06612       if (done) {
06613         if (myDebug->get_verbosity() == 4) {
06614           msgs.resize(msgs.size()+1);
06615           msgs.back() = new Buffer(*remoteOwnedBuffs[ind/2]);
06616         }
06617       
06618         // message completely received - process buffer that was sent
06619         remoteOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
06620         result = unpack_buffer(remoteOwnedBuffs[ind/2]->buff_ptr,
06621                                store_remote_handles, buffProcs[ind/2], ind/2,
06622                                L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
06623                                new_ents, true);
06624         if (MB_SUCCESS != result) {
06625           std::cout << "Failed to unpack entities.  Buffer contents:" << std::endl;
06626           print_buffer(remoteOwnedBuffs[ind/2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind/2], false);
06627           return result;
06628         }
06629 
06630         if (recv_ent_reqs.size() != 2*buffProcs.size()) {
06631           // post irecv's for remote handles from new proc
06632           recv_remoteh_reqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06633           for (i = recv_ent_reqs.size(); i < 2*buffProcs.size(); i+=2) {
06634             localOwnedBuffs[i/2]->reset_buffer();
06635             incoming2++;
06636             PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[i/2], 
06637                               localOwnedBuffs[i/2]->mem_ptr, INITIAL_BUFF_SIZE,
06638                               MB_MESG_REMOTEH_SIZE, incoming2);
06639             success = MPI_Irecv(localOwnedBuffs[i/2]->mem_ptr, INITIAL_BUFF_SIZE, 
06640                                 MPI_UNSIGNED_CHAR, buffProcs[i/2],
06641                                 MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(), 
06642                                 &recv_remoteh_reqs[i]);
06643             if (success != MPI_SUCCESS) {
06644               result = MB_FAILURE;
06645               RRA("Failed to post irecv for remote handles in ghost exchange.");
06646             }
06647           }
06648           recv_ent_reqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06649           sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
06650         }
06651       }
06652     }
06653 
06654     // assign and remove newly created elements from/to receive processor
06655     result = assign_entities_part(new_ents, procConfig.proc_rank());
06656     RRA("Failed to assign entities to part.");
06657     if (migrate) {
06658       result = remove_entities_part(allsent, procConfig.proc_rank());
06659       RRA("Failed to remove entities to part.");
06660     }
06661 
06662     // add requests for any new addl procs
06663     if (recv_ent_reqs.size() != 2*buffProcs.size()) {
06664       // shouldn't get here...
06665       result = MB_FAILURE;
06666       RRA("Requests length doesn't match proc count in entity exchange.");
06667     }
06668 
06669 #ifdef USE_MPE
06670     if (myDebug->get_verbosity() == 2) {
06671       MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange.");
06672     }
06673 #endif
06674 
06675     //===========================================
06676     // send local handles for new entity to owner
06677     //===========================================
06678     for (i = 0; i < n_proc; i++) {
06679       ind = get_buffers(exchange_procs[i]);
06680       // reserve space on front for size and for initial buff size
06681       remoteOwnedBuffs[ind]->reset_buffer(sizeof(int));
06682     
06683       result = pack_remote_handles(L1hloc[ind], L1hrem[ind], L1p[ind],
06684                                    buffProcs[ind], remoteOwnedBuffs[ind]);
06685       RRA("Failed to pack remote handles.");
06686       remoteOwnedBuffs[ind]->set_stored_size();
06687 
06688       if (myDebug->get_verbosity() == 4) {
06689         msgs.resize(msgs.size()+1);
06690         msgs.back() = new Buffer(*remoteOwnedBuffs[ind]);
06691       }
06692       result = send_buffer(buffProcs[ind], remoteOwnedBuffs[ind], 
06693                            MB_MESG_REMOTEH_SIZE, 
06694                            sendReqs[2*ind], recv_remoteh_reqs[2*ind+1], 
06695                            &dum_ack_buff, incoming2);
06696       RRA("Failed to send remote handles.");
06697     }
06698 
06699     //===========================================
06700     // process remote handles of my ghosteds
06701     //===========================================
06702     while (incoming2) {
06703       PRINT_DEBUG_WAITANY(recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank());
06704       success = MPI_Waitany(2*buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status);
06705       if (MPI_SUCCESS != success) {
06706         result = MB_FAILURE;
06707         RRA("Failed in waitany in owned entity exchange.");
06708       }
06709 
06710       // ok, received something; decrement incoming counter
06711       incoming2--;
06712     
06713       PRINT_DEBUG_RECD(status);
06714     
06715       bool done = false;
06716       unsigned int base_ind = 2*(ind/2);
06717       result = recv_buffer(MB_MESG_REMOTEH_SIZE, status, 
06718                            localOwnedBuffs[ind/2], 
06719                            recv_remoteh_reqs[ind], recv_remoteh_reqs[ind+1], incoming2,
06720                            remoteOwnedBuffs[ind/2], 
06721                            sendReqs[base_ind], sendReqs[base_ind+1],
06722                            done);
06723       RRA("Failed to receive remote handles.");
06724 
06725       if (done) {
06726         // incoming remote handles
06727         if (myDebug->get_verbosity() == 4) {
06728           msgs.resize(msgs.size()+1);
06729           msgs.back() = new Buffer(*localOwnedBuffs[ind]);
06730         }
06731     
06732         localOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
06733         result = unpack_remote_handles(buffProcs[ind/2], 
06734                                        localOwnedBuffs[ind/2]->buff_ptr,
06735                                        L2hloc, L2hrem, L2p);
06736         RRA("Failed to unpack remote handles.");
06737       }
06738     }
06739 
06740 #ifdef USE_MPE
06741     if (myDebug->get_verbosity() == 2) {
06742       MPE_Log_event(RHANDLES_END, procConfig.proc_rank(), "Ending remote handles.");
06743       MPE_Log_event(OWNED_END, procConfig.proc_rank(), 
06744                     "Ending ghost exchange (still doing checks).");
06745     }
06746 #endif
06747   
06748     //===========================================
06749     // wait if requested
06750     //===========================================
06751     if (wait_all) {
06752       if (myDebug->get_verbosity() == 5) {
06753         success = MPI_Barrier(procConfig.proc_comm());
06754       }
06755       else {
06756         MPI_Status mult_status[2*MAX_SHARING_PROCS];
06757         success = MPI_Waitall(2*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
06758         success = MPI_Waitall(2*buffProcs.size(), &sendReqs[0], mult_status);
06759       }
06760       if (MPI_SUCCESS != success) {
06761         result = MB_FAILURE;
06762         RRA("Failed in waitall in owned entity exchange.");
06763       }
06764     }
06765 
06766 #ifndef NDEBUG
06767     result = check_sent_ents(allsent);
06768     RRA("Failed check on shared entities.");
06769 #endif
06770     myDebug->tprintf(1, "Exiting exchange_owned_mesh\n");
06771 
06772     return MB_SUCCESS;
06773   }
06774 
06775   ErrorCode ParallelComm::get_iface_entities(int other_proc,
06776                                              int dim,
06777                                              Range &iface_ents) 
06778   {
06779     Range iface_sets;
06780     ErrorCode result = MB_SUCCESS;
06781   
06782     for (Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); rit++) {
06783       if (-1 != other_proc && !is_iface_proc(*rit, other_proc)) continue;
06784     
06785       if (-1 == dim) result = mbImpl->get_entities_by_handle(*rit, iface_ents);
06786       else result = mbImpl->get_entities_by_dimension(*rit, dim, iface_ents);
06787       RRA(" Failed to get entities in iface set.");
06788     }
06789   
06790     return MB_SUCCESS;
06791   }
06792 
06793   ErrorCode ParallelComm::assign_entities_part(std::vector<EntityHandle> &entities, const int proc)
06794   {
06795     EntityHandle part_set;
06796     ErrorCode result = get_part_handle(proc, part_set);
06797     RRA(" Failed to get part handle.");
06798 
06799     if (part_set > 0) {
06800       result = mbImpl->add_entities(part_set, &entities[0], entities.size());
06801       RRA(" Failed to add entities to part set.");
06802     }
06803 
06804     return MB_SUCCESS;
06805   }
06806 
06807   ErrorCode ParallelComm::remove_entities_part(Range &entities, const int proc)
06808   {
06809     EntityHandle part_set;
06810     ErrorCode result = get_part_handle(proc, part_set);
06811     RRA(" Failed to get part handle.");
06812 
06813     if (part_set > 0) {
06814       result = mbImpl->remove_entities(part_set, entities);
06815       RRA(" Failed to remove entities to part set.");
06816     }
06817 
06818     return MB_SUCCESS;
06819   }
06820 
06821   ErrorCode ParallelComm::check_sent_ents(Range &allsent) 
06822   {
06823     // check entities to make sure there are no zero-valued remote handles
06824     // where they shouldn't be
06825     std::vector<unsigned char> pstat(allsent.size());
06826     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), allsent, &pstat[0]);
06827     RRA("Trouble getting pstatus.");
06828     std::vector<EntityHandle> handles(allsent.size());
06829     result = mbImpl->tag_get_data(sharedh_tag(), allsent, &handles[0]);
06830     RRA("Trouble getting shared handles.");
06831     std::vector<int> procs(allsent.size());
06832     result = mbImpl->tag_get_data(sharedp_tag(), allsent, &procs[0]);
06833     RRA("Trouble getting shared procs.");
06834 
06835     Range bad_entities;
06836   
06837     Range::iterator rit;
06838     unsigned int i;
06839     EntityHandle dum_hs[MAX_SHARING_PROCS];
06840     int dum_ps[MAX_SHARING_PROCS];
06841   
06842     for (rit = allsent.begin(), i = 0; rit != allsent.end(); rit++, i++) {
06843       if (-1 != procs[i] && 0 == handles[i]) bad_entities.insert(*rit);
06844       else {
06845         // might be multi-shared...
06846         result = mbImpl->tag_get_data(sharedps_tag(), &(*rit), 1, dum_ps);
06847         if (MB_TAG_NOT_FOUND == result) continue;
06848         RRA("Trouble getting sharedps.");
06849         result = mbImpl->tag_get_data(sharedhs_tag(), &(*rit), 1, dum_hs);
06850         RRA("Trouble getting sharedhs.");
06851 
06852         // find first non-set proc
06853         int *ns_proc = std::find(dum_ps, dum_ps+MAX_SHARING_PROCS, -1);
06854         int num_procs = ns_proc-dum_ps;
06855         assert(num_procs <= MAX_SHARING_PROCS);
06856         // now look for zero handles in active part of dum_hs
06857         EntityHandle *ns_handle = std::find(dum_hs, dum_hs+num_procs, 0);
06858         int num_handles = ns_handle-dum_hs;
06859         assert(num_handles <= num_procs);
06860         if (num_handles != num_procs) bad_entities.insert(*rit);
06861       }
06862     }
06863   
06864     return MB_SUCCESS;
06865   }
06866 
06867   ErrorCode ParallelComm::pack_remote_handles(std::vector<EntityHandle> &L1hloc,
06868                                               std::vector<EntityHandle> &L1hrem,
06869                                               std::vector<int> &L1p,
06870                                               unsigned int /*to_proc*/,
06871                                               Buffer *buff) 
06872   {
06873     assert(std::find(L1hloc.begin(), L1hloc.end(), (EntityHandle)0) == L1hloc.end());
06874     
06875     // 2 vectors of handles plus ints
06876     buff->check_space(((L1p.size()+1)*sizeof(int) + 
06877                        (L1hloc.size()+1)*sizeof(EntityHandle) + 
06878                        (L1hrem.size()+1)*sizeof(EntityHandle)));
06879   
06880     // should be in pairs of handles
06881     PACK_INT(buff->buff_ptr, L1hloc.size());
06882     PACK_INTS(buff->buff_ptr, &L1p[0], L1p.size());
06883       // pack handles in reverse order, (remote, local), so on destination they
06884       // are ordered (local, remote)
06885     PACK_EH(buff->buff_ptr, &L1hrem[0], L1hrem.size());
06886     PACK_EH(buff->buff_ptr, &L1hloc[0], L1hloc.size());
06887   
06888     buff->set_stored_size();
06889   
06890     return MB_SUCCESS;
06891   }
06892 
06893   ErrorCode ParallelComm::unpack_remote_handles(unsigned int from_proc,
06894                                                 unsigned char *&buff_ptr,
06895                                                 std::vector<EntityHandle> &L2hloc,
06896                                                 std::vector<EntityHandle> &L2hrem,
06897                                                 std::vector<unsigned int> &L2p)
06898   {
06899     // incoming remote handles; use to set remote handles
06900     int num_eh;
06901     UNPACK_INT(buff_ptr, num_eh);
06902 
06903     unsigned char *buff_proc = buff_ptr;
06904     buff_ptr += num_eh * sizeof(int);
06905     unsigned char *buff_rem = buff_ptr + num_eh * sizeof(EntityHandle);
06906     ErrorCode result;
06907     EntityHandle hpair[2], new_h;
06908     int proc;
06909     for (int i = 0; i < num_eh; i++) {
06910       UNPACK_INT(buff_proc, proc);
06911         // handles packed (local, remote), though here local is either on this
06912         // proc or owner proc, depending on value of proc (-1 = here, otherwise owner);
06913         // this is decoded in find_existing_entity
06914       UNPACK_EH(buff_ptr, hpair, 1);
06915       UNPACK_EH(buff_rem, hpair+1, 1);
06916 
06917       if (-1 != proc) {
06918         result = find_existing_entity(false, proc, hpair[0], 3, NULL, 0,
06919                                       mbImpl->type_from_handle(hpair[1]),
06920                                       L2hloc, L2hrem, L2p, new_h);
06921         RRA("Didn't get existing entity.");
06922         if (new_h) hpair[0] = new_h;
06923         else hpair[0] = 0;
06924       }
06925       if (!(hpair[0] && hpair[1])) return MB_FAILURE;
06926       int this_proc = from_proc;
06927       result = update_remote_data(hpair[0], &this_proc, hpair+1, 1, 0);
06928       RRA("Trouble setting remote data range on sent entities in ghost exchange.");
06929     }
06930   
06931     return MB_SUCCESS;
06932   }
06933 
06934   ErrorCode ParallelComm::get_ghosted_entities(int bridge_dim,
06935                                                int ghost_dim,
06936                                                int to_proc, 
06937                                                int num_layers,
06938                                                int addl_ents,
06939                                                Range &ghosted_ents) 
06940   {
06941     // get bridge ents on interface(s)
06942     Range from_ents;
06943     ErrorCode result = MB_SUCCESS;
06944     assert(0 < num_layers);
06945     for (Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end();
06946          rit++) {
06947       if (!is_iface_proc(*rit, to_proc)) continue;
06948       
06949       // get starting "from" entities
06950       if (bridge_dim == -1)
06951         result = mbImpl->get_entities_by_handle(*rit, from_ents);
06952       else
06953         result = mbImpl->get_entities_by_dimension(*rit, bridge_dim, from_ents);
06954       RRA("Couldn't get bridge ents in the set.");
06955 
06956       // need to get layers of bridge-adj entities
06957       if (from_ents.empty()) continue;
06958       result = MeshTopoUtil(mbImpl).get_bridge_adjacencies(from_ents, bridge_dim,
06959                                                            ghost_dim, ghosted_ents, 
06960                                                            num_layers);
06961       RRA("Couldn't get bridge adjacencies.");
06962     }
06963   
06964     result = add_verts(ghosted_ents);
06965     RRA("Couldn't add verts.");
06966 
06967     if (addl_ents) {
06968       // first get the ents of ghost_dim
06969       Range tmp_ents, tmp_owned, tmp_notowned;
06970       tmp_owned = ghosted_ents.subset_by_dimension(ghost_dim);
06971       if (tmp_owned.empty()) return result;
06972 
06973       tmp_notowned = tmp_owned;
06974     
06975       // next, filter by pstatus; can only create adj entities for entities I own
06976       result = filter_pstatus(tmp_owned, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &tmp_owned);
06977       RRA("Problem filtering owned entities.");
06978 
06979       tmp_notowned -= tmp_owned;
06980     
06981       // get edges first
06982       if (1 == addl_ents || 3 == addl_ents) {
06983         result = mbImpl->get_adjacencies(tmp_owned, 1, true, tmp_ents, Interface::UNION);
06984         RRA("Couldn't get edge adjacencies for owned ghost entities.");
06985         result = mbImpl->get_adjacencies(tmp_notowned, 1, false, tmp_ents, Interface::UNION);
06986         RRA("Couldn't get edge adjacencies for notowned ghost entities.");
06987       }
06988       if (2 == addl_ents || 3 == addl_ents) {
06989         result = mbImpl->get_adjacencies(tmp_owned, 2, true, tmp_ents, Interface::UNION);
06990         RRA("Couldn't get face adjacencies for owned ghost entities.");
06991         result = mbImpl->get_adjacencies(tmp_notowned, 2, false, tmp_ents, Interface::UNION);
06992         RRA("Couldn't get face adjacencies for notowned ghost entities.");
06993       }
06994 
06995       ghosted_ents.merge(tmp_ents);
06996     }
06997   
06998     return result;
06999   }
07000 
07001   ErrorCode ParallelComm::add_verts(Range &sent_ents) 
07002   {
07003     // get the verts adj to these entities, since we'll have to send those too
07004 
07005     // first check sets
07006     std::pair<Range::const_iterator, Range::const_iterator>
07007       set_range = sent_ents.equal_range(MBENTITYSET);
07008     ErrorCode result = MB_SUCCESS, tmp_result;
07009     for (Range::const_iterator rit = set_range.first; rit != set_range.second; rit++) {
07010       tmp_result = mbImpl->get_entities_by_type(*rit, MBVERTEX, sent_ents);
07011       if (MB_SUCCESS != tmp_result) result = tmp_result;
07012     }
07013     RRA("Failed to get contained verts.");
07014   
07015     // now non-sets
07016     Range tmp_ents;
07017     std::copy(sent_ents.begin(), set_range.first, range_inserter(tmp_ents));
07018     result = mbImpl->get_adjacencies(tmp_ents, 0, false, sent_ents,
07019                                      Interface::UNION);
07020     RRA("Couldn't get vertices adj to ghosted ents.");
07021 
07022     return result;
07023   }
07024 
07025 
07026   ErrorCode ParallelComm::exchange_tags( const std::vector<Tag> &src_tags,
07027                                          const std::vector<Tag> &dst_tags,
07028                                          const Range &entities_in)
07029   {
07030     ErrorCode result;
07031     int success;
07032 
07033     myDebug->tprintf(1, "Entering exchange_tags\n");
07034 
07035     // get all procs interfacing to this proc
07036     std::set<unsigned int> exch_procs;
07037     result = get_comm_procs(exch_procs);  
07038 
07039     // post ghost irecv's for all interface procs
07040     // index greqs the same as buffer/sharing procs indices
07041     std::vector<MPI_Request> recv_tag_reqs(2*buffProcs.size(), MPI_REQUEST_NULL),
07042       sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
07043     std::vector<unsigned int>::iterator sit;
07044     int ind;
07045 
07046     reset_all_buffers();
07047     int incoming = 0;
07048 
07049     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
07050       incoming++;
07051       PRINT_DEBUG_IRECV(*sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr,
07052                         INITIAL_BUFF_SIZE, MB_MESG_TAGS_SIZE, incoming);
07053 
07054       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
07055                           MPI_UNSIGNED_CHAR, *sit,
07056                           MB_MESG_TAGS_SIZE, procConfig.proc_comm(), 
07057                           &recv_tag_reqs[2*ind]);
07058       if (success != MPI_SUCCESS) {
07059         result = MB_FAILURE;
07060         RRA("Failed to post irecv in ghost exchange.");
07061       }
07062 
07063     }
07064   
07065     // pack and send tags from this proc to others
07066     // make sendReqs vector to simplify initialization
07067     sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
07068   
07069     // take all shared entities if incoming list is empty
07070     Range entities;
07071     if (entities_in.empty()) 
07072       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(entities));
07073     else
07074       entities = entities_in;
07075 
07076     int dum_ack_buff;
07077 
07078     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
07079     
07080       Range tag_ents = entities;
07081     
07082       // get ents shared by proc *sit
07083       result = filter_pstatus(tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit);
07084       RRA("Failed pstatus AND check.");
07085     
07086       // remote nonowned entities
07087       if (!tag_ents.empty()) {
07088         result = filter_pstatus(tag_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);
07089         RRA("Failed pstatus NOT check.");
07090       }
07091     
07092       // pack-send; this also posts receives if store_remote_handles is true
07093       std::vector<Range> tag_ranges;
07094       for (std::vector<Tag>::const_iterator vit = src_tags.begin(); vit != src_tags.end(); vit++) {
07095         const void* ptr;
07096         int sz;
07097         if (mbImpl->tag_get_default_value( *vit, ptr, sz ) != MB_SUCCESS) {
07098           Range tagged_ents;
07099           mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE, &*vit, 0, 1, tagged_ents );
07100           tag_ranges.push_back( intersect( tag_ents, tagged_ents ) );
07101         } 
07102         else {
07103           tag_ranges.push_back(tag_ents);
07104         }
07105       }
07106     
07107       // pack the data
07108       // reserve space on front for size and for initial buff size
07109       localOwnedBuffs[ind]->reset_ptr(sizeof(int));
07110     
07111       result = pack_tags(tag_ents,
07112                          src_tags, dst_tags, tag_ranges, 
07113                          localOwnedBuffs[ind], true, *sit);
07114       RRA("Failed to count buffer in pack_send_tag.");
07115 
07116       // now send it
07117       result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[2*ind],
07118                            recv_tag_reqs[2*ind+1], &dum_ack_buff, incoming);
07119       RRA("Failed to send buffer.");
07120                          
07121     }
07122   
07123     // receive/unpack tags
07124     while (incoming) {
07125       MPI_Status status;
07126       PRINT_DEBUG_WAITANY(recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
07127       success = MPI_Waitany(2*buffProcs.size(), &recv_tag_reqs[0], &ind, &status);
07128       if (MPI_SUCCESS != success) {
07129         result = MB_FAILURE;
07130         RRA("Failed in waitany in ghost exchange.");
07131       }
07132     
07133       PRINT_DEBUG_RECD(status);
07134 
07135       // ok, received something; decrement incoming counter
07136       incoming--;
07137     
07138       bool done = false;
07139       std::vector<EntityHandle> dum_vec;
07140       result = recv_buffer(MB_MESG_TAGS_SIZE,
07141                            status,
07142                            remoteOwnedBuffs[ind/2],
07143                            recv_tag_reqs[ind/2 * 2], recv_tag_reqs[ind/2 * 2 + 1],
07144                            incoming,
07145                            localOwnedBuffs[ind/2], sendReqs[ind/2*2], sendReqs[ind/2*2+1],
07146                            done);
07147       RRA("Failed to resize recv buffer.");
07148       if (done) {
07149         remoteOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
07150         result = unpack_tags(remoteOwnedBuffs[ind/2]->buff_ptr,
07151                              dum_vec, true, buffProcs[ind/2]);
07152         RRA("Failed to recv-unpack-tag message.");
07153       }
07154     }
07155   
07156     // ok, now wait
07157     if (myDebug->get_verbosity() == 5) {
07158       success = MPI_Barrier(procConfig.proc_comm());
07159     }
07160     else {
07161       MPI_Status status[2*MAX_SHARING_PROCS];
07162       success = MPI_Waitall(2*buffProcs.size(), &sendReqs[0], status);
07163     }
07164     if (MPI_SUCCESS != success) {
07165       result = MB_FAILURE;
07166       RRA("Failure in waitall in tag exchange.");
07167     }
07168   
07169     // If source tag is not equal to destination tag, then
07170     // do local copy for owned entities (communicate w/ self)
07171     assert(src_tags.size() == dst_tags.size());
07172     if (src_tags != dst_tags) {
07173       std::vector<unsigned char> data;
07174       Range owned_ents;
07175       if (entities_in.empty()) 
07176         std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(entities));
07177       else
07178         owned_ents = entities_in;
07179       result = filter_pstatus(owned_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);
07180       RRA("Failure to get subset of owned entities");
07181   
07182       if (!owned_ents.empty()) { // check this here, otherwise we get 
07183         // unexpected results from get_entities_by_type_and_tag w/ Interface::INTERSECT
07184   
07185         for (size_t i = 0; i < src_tags.size(); ++i) {
07186           if (src_tags[i] == dst_tags[i])
07187             continue;
07188 
07189           Range tagged_ents(owned_ents);
07190           result = mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE,
07191                                                          &src_tags[0], 0, 1, tagged_ents, Interface::INTERSECT );
07192           RRA("get_entities_by_type_and_tag(type == MBMAXTYPE) failed.");
07193 
07194           int sz, size2;
07195           result = mbImpl->tag_get_bytes( src_tags[i], sz );
07196           RRA("tag_get_size failed.");
07197           result = mbImpl->tag_get_bytes( dst_tags[i], size2 );
07198           RRA("tag_get_size failed.");
07199           if (sz != size2) {
07200             result = MB_FAILURE;
07201             RRA("tag sizes don't match")
07202               }
07203 
07204           data.resize( sz * tagged_ents.size() );
07205           result = mbImpl->tag_get_data( src_tags[i], tagged_ents, &data[0] );
07206           RRA("tag_get_data failed.");
07207           result = mbImpl->tag_set_data( dst_tags[i], tagged_ents, &data[0] );
07208           RRA("tag_set_data failed.");
07209         }
07210       }
07211     }
07212 
07213     myDebug->tprintf(1, "Exiting exchange_tags");
07214 
07215     return MB_SUCCESS;
07216   }
07217 
07218   ErrorCode ParallelComm::reduce_tags( const std::vector<Tag> &src_tags,
07219                                        const std::vector<Tag> &dst_tags,
07220                                        const MPI_Op mpi_op,
07221                                        const Range &entities_in)
07222   {
07223     ErrorCode result;
07224     int success;
07225 
07226     myDebug->tprintf(1, "Entering reduce_tags\n");
07227 
07228       // check that restrictions are met: number of source/dst tags...
07229     if (src_tags.size() != dst_tags.size()) {
07230       result = MB_FAILURE;
07231       RRA("Source and destination tag handles must be specified for reduce_tags.");
07232     }
07233 
07234       // ... tag data types
07235     std::vector<Tag>::const_iterator vits, vitd;
07236     int tags_size, tagd_size;
07237     DataType tags_type, tagd_type;
07238     std::vector<unsigned char> vals;
07239     std::vector<int> tags_sizes;
07240     for (vits = src_tags.begin(), vitd = dst_tags.begin(); vits != src_tags.end(); vits++, vitd++) {
07241         // checks on tag characteristics
07242       result = mbImpl->tag_get_data_type(*vits, tags_type);
07243       RRA("Coudln't get src tag data type.");
07244       if (tags_type != MB_TYPE_INTEGER && tags_type != MB_TYPE_DOUBLE &&
07245           tags_type != MB_TYPE_BIT) {
07246         result = MB_FAILURE;
07247         RRA("Src/dst tags must have integer, double, or bit data type.");
07248       }
07249 
07250       result = mbImpl->tag_get_bytes(*vits, tags_size);
07251       RRA("Coudln't get src tag bytes.");
07252       vals.resize(tags_size);
07253       result = mbImpl->tag_get_default_value(*vits, &vals[0]);
07254       RRA("Src tag must have default value.");
07255 
07256       tags_sizes.push_back(tags_size);
07257 
07258         // ok, those passed; now check whether dest tags, if specified, agree with src tags
07259       if (*vits == *vitd) continue;
07260       
07261       result = mbImpl->tag_get_bytes(*vitd, tagd_size);
07262       RRA("Coudln't get dst tag bytes.");
07263       if (tags_size != tagd_size) {
07264         result = MB_FAILURE;
07265         RRA("Sizes between src and dst tags don't match.");
07266       }
07267       result = mbImpl->tag_get_data_type(*vitd, tagd_type);
07268       RRA("Coudln't get dst tag data type.");
07269       if (tags_type != tagd_type) {
07270         result = MB_FAILURE;
07271         RRA("Src and dst tags must be of same data type.");
07272       }
07273 
07274     }
07275 
07276     // get all procs interfacing to this proc
07277     std::set<unsigned int> exch_procs;
07278     result = get_comm_procs(exch_procs);  
07279 
07280     // post ghost irecv's for all interface procs
07281     // index greqs the same as buffer/sharing procs indices
07282     std::vector<MPI_Request> recv_tag_reqs(2*buffProcs.size(), MPI_REQUEST_NULL),
07283       sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
07284     std::vector<unsigned int>::iterator sit;
07285     int ind;
07286 
07287     reset_all_buffers();
07288     int incoming = 0;
07289 
07290     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
07291       incoming++;
07292       PRINT_DEBUG_IRECV(*sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr,
07293                         INITIAL_BUFF_SIZE, MB_MESG_TAGS_SIZE, incoming);
07294 
07295       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
07296                           MPI_UNSIGNED_CHAR, *sit,
07297                           MB_MESG_TAGS_SIZE, procConfig.proc_comm(), 
07298                           &recv_tag_reqs[2*ind]);
07299       if (success != MPI_SUCCESS) {
07300         result = MB_FAILURE;
07301         RRA("Failed to post irecv in ghost exchange.");
07302       }
07303 
07304     }
07305   
07306     // pack and send tags from this proc to others
07307     // make sendReqs vector to simplify initialization
07308     sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
07309   
07310     // take all shared entities if incoming list is empty
07311     Range entities;
07312     if (entities_in.empty()) {
07313       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(entities));
07314     }
07315     else 
07316       entities = entities_in;
07317 
07318       // if the tags are different, copy the source to the dest tag locally
07319     std::vector<Tag>::const_iterator vit = src_tags.begin(), vit2 = dst_tags.begin();
07320     std::vector<int>::const_iterator vsizes = tags_sizes.begin();
07321     for (; vit != src_tags.end(); vit++, vit2++, vsizes++) {
07322       if (*vit == *vit2) continue;
07323       vals.resize(entities.size()*(*vsizes));
07324       result = mbImpl->tag_get_data(*vit, entities, &vals[0]); 
07325       RRA("Didn't get data properly.");
07326       result = mbImpl->tag_set_data(*vit2, entities, &vals[0]); 
07327       RRA("Didn't set data properly.");
07328     }
07329       
07330     int dum_ack_buff;
07331 
07332     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
07333     
07334       Range tag_ents = entities;
07335     
07336       // get ents shared by proc *sit
07337       result = filter_pstatus(tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit);
07338       RRA("Failed pstatus AND check.");
07339     
07340       // pack-send
07341       std::vector<Range> tag_ranges;
07342       for (vit = src_tags.begin(); vit != src_tags.end(); vit++) {
07343         const void* ptr;
07344         int sz;
07345         if (mbImpl->tag_get_default_value( *vit, ptr, sz ) != MB_SUCCESS) {
07346           Range tagged_ents;
07347           mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE, &*vit, 0, 1, tagged_ents );
07348           tag_ranges.push_back( intersect( tag_ents, tagged_ents ) );
07349         } 
07350         else {
07351           tag_ranges.push_back(tag_ents);
07352         }
07353       }
07354     
07355       // pack the data
07356       // reserve space on front for size and for initial buff size
07357       localOwnedBuffs[ind]->reset_ptr(sizeof(int));
07358     
07359       result = pack_tags(tag_ents,
07360                          src_tags, dst_tags, tag_ranges, 
07361                          localOwnedBuffs[ind], true, *sit);
07362       RRA("Failed to count buffer in pack_send_tag.");
07363 
07364       // now send it
07365       result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[2*ind],
07366                            recv_tag_reqs[2*ind+1], &dum_ack_buff, incoming);
07367       RRA("Failed to send buffer.");
07368                          
07369     }
07370 
07371     // receive/unpack tags
07372     while (incoming) {
07373       MPI_Status status;
07374       PRINT_DEBUG_WAITANY(recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
07375       success = MPI_Waitany(2*buffProcs.size(), &recv_tag_reqs[0], &ind, &status);
07376       if (MPI_SUCCESS != success) {
07377         result = MB_FAILURE;
07378         RRA("Failed in waitany in ghost exchange.");
07379       }
07380     
07381       PRINT_DEBUG_RECD(status);
07382 
07383       // ok, received something; decrement incoming counter
07384       incoming--;
07385     
07386       bool done = false;
07387       std::vector<EntityHandle> dum_vec;
07388       result = recv_buffer(MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind/2],
07389                            recv_tag_reqs[ind/2 * 2], recv_tag_reqs[ind/2 * 2 + 1],
07390                            incoming, localOwnedBuffs[ind/2], sendReqs[ind/2*2], sendReqs[ind/2*2+1], 
07391                            done);
07392       RRA("Failed to resize recv buffer.");
07393       if (done) {
07394         remoteOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
07395         result = unpack_tags(remoteOwnedBuffs[ind/2]->buff_ptr,
07396                                dum_vec, true, buffProcs[ind/2], &mpi_op);
07397         RRA("Failed to recv-unpack-tag message.");
07398       }
07399     }
07400   
07401     // ok, now wait
07402     if (myDebug->get_verbosity() == 5) {
07403       success = MPI_Barrier(procConfig.proc_comm());
07404     }
07405     else {
07406       MPI_Status status[2*MAX_SHARING_PROCS];
07407       success = MPI_Waitall(2*buffProcs.size(), &sendReqs[0], status);
07408     }
07409     if (MPI_SUCCESS != success) {
07410       result = MB_FAILURE;
07411       RRA("Failure in waitall in tag exchange.");
07412     }
07413   
07414     myDebug->tprintf(1, "Exiting reduce_tags");
07415 
07416     return MB_SUCCESS;
07417   }
07418 
07419   /*
07420     ErrorCode ParallelComm::exchange_tags( Tag src_tag, 
07421     Tag dst_tag, 
07422     const Range& entities )
07423     {
07424     ErrorCode result;
07425     int success;
07426 
07427     // get all procs interfacing to this proc
07428     std::set<unsigned int> exch_procs;
07429     result = get_comm_procs(exch_procs);  
07430 
07431     // post ghost irecv's for all interface procs
07432     // index greqs the same as buffer/sharing procs indices
07433     std::vector<MPI_Request> recv_reqs(MAX_SHARING_PROCS, MPI_REQUEST_NULL);
07434     std::vector<MPI_Status> gstatus(MAX_SHARING_PROCS);
07435     std::vector<unsigned int>::iterator sit;
07436     int ind;
07437     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
07438     success = MPI_Irecv(&ghostRBuffs[ind][0], ghostRBuffs[ind].size(), 
07439     MPI_UNSIGNED_CHAR, *sit,
07440     MB_MESG_ANY, procConfig.proc_comm(), 
07441     &recv_reqs[ind]);
07442     if (success != MPI_SUCCESS) {
07443     result = MB_FAILURE;
07444     RRA("Failed to post irecv in ghost exchange.");
07445     }
07446     }
07447   
07448     // figure out which entities are shared with which processors
07449     std::map<int,Range> proc_ents;
07450     int other_procs[MAX_SHARING_PROCS], num_sharing;
07451     for (Range::const_iterator i = entities.begin(); i != entities.end(); ++i) {
07452     int owner;
07453     result = get_owner( *i, owner );
07454     RRA("Failed to get entity owner.");
07455 
07456     // only send entities that this proc owns
07457     if ((unsigned)owner != proc_config().proc_rank()) 
07458     continue;
07459     
07460     result = get_sharing_parts( *i, other_procs, num_sharing );
07461     RRA("Failed to get procs sharing entity.");
07462     if (num_sharing == 0) // keep track of non-shared entities for later
07463     proc_ents[proc_config().proc_rank()].insert( *i );
07464     for (int j = 0; j < num_sharing; ++j)
07465     proc_ents[other_procs[j]].insert( *i );
07466     }
07467   
07468     // pack and send tags from this proc to others
07469     // make sendReqs vector to simplify initialization
07470     std::fill(sendReqs, sendReqs+MAX_SHARING_PROCS, MPI_REQUEST_NULL);
07471     std::map<unsigned int,Range>::const_iterator mit;
07472   
07473     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
07474     
07475     // count first
07476     // buffer needs to begin with the number of tags (one)
07477     int buff_size = sizeof(int);
07478     result = packed_tag_size( src_tag, proc_ents[*sit], buff_size );
07479     RRA("Failed to count buffer in pack_send_tag.");
07480 
07481     unsigned char *buff_ptr = &ownerSBuffs[ind][0];
07482     buff->check_space(ownerSBuffs[ind], buff_ptr, buff_size);
07483     PACK_INT( buff_ptr, 1 ); // number of tags
07484     result = pack_tag( src_tag, dst_tag, proc_ents[*sit], proc_ents[*sit],
07485     ownerSBuffs[ind], buff_ptr, true, *sit );
07486     RRA("Failed to pack buffer in pack_send_tag.");
07487 
07488     // if the message is large, send a first message to tell how large
07489     if (INITIAL_BUFF_SIZE < buff_size) {
07490     int tmp_buff_size = -buff_size;
07491     int success = MPI_Send(&tmp_buff_size, sizeof(int), MPI_UNSIGNED_CHAR, 
07492     *sit, MB_MESG_SIZE, procConfig.proc_comm());
07493     if (success != MPI_SUCCESS) return MB_FAILURE;
07494     }
07495     
07496     // send the buffer
07497     success = MPI_Isend(&ownerSBuffs[ind][0], buff_size, MPI_UNSIGNED_CHAR, *sit, 
07498     MB_MESG_TAGS, procConfig.proc_comm(), &sendReqs[ind]);
07499     if (success != MPI_SUCCESS) return MB_FAILURE;
07500     }
07501   
07502     // receive/unpack tags
07503     int num_incoming = exch_procs.size();
07504   
07505     while (num_incoming) {
07506     int ind;
07507     MPI_Status status;
07508     success = MPI_Waitany(MAX_SHARING_PROCS, &recv_reqs[0], &ind, &status);
07509     if (MPI_SUCCESS != success) {
07510     result = MB_FAILURE;
07511     RRA("Failed in waitany in ghost exchange.");
07512     }
07513     
07514     // ok, received something; decrement incoming counter
07515     num_incoming--;
07516     
07517     int new_size;
07518     unsigned char *buff_ptr;
07519     Range dum_range;
07520     
07521     // branch on message type
07522     switch (status.MPI_TAG) {
07523     case MB_MESG_SIZE:
07524     // incoming message just has size; resize buffer and re-call recv,
07525     // then re-increment incoming count
07526     assert(ind < MAX_SHARING_PROCS);
07527     new_size = *((int*)&ghostRBuffs[ind][0]);
07528     assert(0 > new_size);
07529     result = recv_size_buff(buffProcs[ind], ghostRBuffs[ind], recv_reqs[ind],
07530     MB_MESG_TAGS);
07531     RRA("Failed to resize recv buffer.");
07532     num_incoming++;
07533     break;
07534     case MB_MESG_TAGS:
07535     // incoming ghost entities; process
07536     buff_ptr = &ghostRBuffs[ind][0];
07537     result = unpack_tags(buff_ptr, dum_range, true,
07538     buffProcs[ind]);
07539     RRA("Failed to recv-unpack-tag message.");
07540     break;
07541     default:
07542     result = MB_FAILURE;
07543     RRA("Failed to get message of correct type in exch_tags.");
07544     break;
07545     }
07546     }
07547   
07548     // ok, now wait
07549     MPI_Status status[MAX_SHARING_PROCS];
07550     success = MPI_Waitall(MAX_SHARING_PROCS, &sendReqs[0], status);
07551     if (MPI_SUCCESS != success) {
07552     result = MB_FAILURE;
07553     RRA("Failure in waitall in tag exchange.");
07554     }
07555   
07556     // if src and destination tags aren't the same, need to copy 
07557     // values for local entities
07558     if (src_tag != dst_tag) {
07559     const Range& myents = proc_ents[proc_config().proc_rank()];
07560     std::vector<const void*> data_ptrs(myents.size());
07561     std::vector<int> data_sizes(myents.size());
07562     result = get_moab()->tag_get_data( src_tag, myents, &data_ptrs[0], &data_sizes[0] );
07563     RRA("Failure to get pointers to local data.");
07564     result = get_moab()->tag_set_data( dst_tag, myents, &data_ptrs[0], &data_sizes[0] );
07565     RRA("Failure to get pointers to local data.");
07566     }  
07567   
07568     return MB_SUCCESS;
07569     }
07570   */
07571 
07573   Tag ParallelComm::sharedp_tag()
07574   {
07575     if (!sharedpTag) {
07576       int def_val = -1;
07577       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_SHARED_PROC_TAG_NAME, 
07578                                                 1, MB_TYPE_INTEGER, sharedpTag,
07579                                                 MB_TAG_DENSE|MB_TAG_CREAT, &def_val);
07580       if (MB_SUCCESS != result) 
07581         return 0;
07582     }
07583   
07584     return sharedpTag;
07585   }
07586 
07588   Tag ParallelComm::sharedps_tag()
07589   {
07590     if (!sharedpsTag) {
07591       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_SHARED_PROCS_TAG_NAME, 
07592                                                 MAX_SHARING_PROCS, MB_TYPE_INTEGER, 
07593                                                 sharedpsTag, MB_TAG_SPARSE|MB_TAG_CREAT );
07594       if (MB_SUCCESS != result) 
07595         return 0;
07596     }
07597   
07598     return sharedpsTag;
07599   }
07600   
07602   Tag ParallelComm::sharedh_tag()
07603   {
07604     if (!sharedhTag) {
07605       EntityHandle def_val = 0;
07606       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_SHARED_HANDLE_TAG_NAME, 
07607                                                 1, MB_TYPE_HANDLE, sharedhTag,
07608                                                 MB_TAG_DENSE|MB_TAG_CREAT, &def_val);
07609       if (MB_SUCCESS != result)
07610         return 0;
07611     }
07612   
07613     return sharedhTag;
07614   }
07615   
07617   Tag ParallelComm::sharedhs_tag()
07618   {  
07619     if (!sharedhsTag) {
07620       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_SHARED_HANDLES_TAG_NAME, 
07621                                                 MAX_SHARING_PROCS, MB_TYPE_HANDLE, 
07622                                                 sharedhsTag, MB_TAG_SPARSE|MB_TAG_CREAT);
07623       if (MB_SUCCESS != result) 
07624         return 0;
07625     }
07626 
07627     return sharedhsTag;
07628   }
07629   
07631   Tag ParallelComm::pstatus_tag()
07632   {  
07633     if (!pstatusTag) {
07634       unsigned char tmp_pstatus = 0;
07635       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_STATUS_TAG_NAME, 
07636                                                 1, MB_TYPE_OPAQUE, pstatusTag,
07637                                                 MB_TAG_DENSE|MB_TAG_CREAT,
07638                                                 &tmp_pstatus);
07639       if (MB_SUCCESS != result)
07640         return 0;
07641     }
07642   
07643     return pstatusTag;
07644   }
07645   
07647   Tag ParallelComm::partition_tag()
07648   {  
07649     if (!partitionTag) {
07650       int dum_id = -1;
07651       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_PARTITION_TAG_NAME, 
07652                                                 1, MB_TYPE_INTEGER, partitionTag,
07653                                                 MB_TAG_SPARSE|MB_TAG_CREAT, &dum_id);
07654       if (MB_SUCCESS != result)
07655         return 0;
07656     }
07657   
07658     return partitionTag;
07659   }
07660   
07662   Tag ParallelComm::pcomm_tag(Interface *impl,
07663                               bool create_if_missing)
07664   {
07665     Tag this_tag = 0;
07666     ErrorCode result;
07667     if (create_if_missing) {
07668       result = impl->tag_get_handle(PARALLEL_COMM_TAG_NAME, 
07669                                     MAX_SHARING_PROCS*sizeof(ParallelComm*),
07670                                     MB_TYPE_OPAQUE, this_tag, MB_TAG_SPARSE|MB_TAG_CREAT);
07671     }
07672     else {
07673       result = impl->tag_get_handle(PARALLEL_COMM_TAG_NAME, 
07674                                     MAX_SHARING_PROCS*sizeof(ParallelComm*),
07675                                     MB_TYPE_OPAQUE, this_tag, MB_TAG_SPARSE);
07676     }
07677     
07678     if (MB_SUCCESS != result)
07679       return 0;
07680   
07681     return this_tag;
07682   }
07683 
07685   ParallelComm *ParallelComm::get_pcomm(Interface *impl, const int index) 
07686   {
07687     Tag pc_tag = pcomm_tag(impl, false);
07688     if (0 == pc_tag) return NULL;
07689   
07690     const EntityHandle root = 0;
07691     ParallelComm *pc_array[MAX_SHARING_PROCS];
07692     ErrorCode result = impl->tag_get_data(pc_tag, &root, 1, (void*)pc_array);
07693     if (MB_SUCCESS != result) return NULL;
07694   
07695     return pc_array[index];
07696   }
07697 
07698   ErrorCode ParallelComm::get_all_pcomm( Interface* impl, std::vector<ParallelComm*>& list )
07699   {
07700     Tag pc_tag = pcomm_tag(impl, false);
07701     if (0 == pc_tag)
07702       return MB_TAG_NOT_FOUND;
07703   
07704     const EntityHandle root = 0;
07705     ParallelComm *pc_array[MAX_SHARING_PROCS];
07706     ErrorCode rval = impl->tag_get_data( pc_tag, &root, 1, pc_array );
07707     if (MB_SUCCESS != rval)
07708       return rval;
07709   
07710     for (int i = 0; i < MAX_SHARING_PROCS; ++i)
07711       if (pc_array[i])
07712         list.push_back( pc_array[i] );
07713   
07714     return MB_SUCCESS;
07715   }
07716   
07717 
07719   ParallelComm *ParallelComm::get_pcomm( Interface *impl, 
07720                                          EntityHandle prtn,
07721                                          const MPI_Comm* comm ) 
07722   {
07723     ErrorCode rval;
07724     ParallelComm* result = 0;
07725   
07726     Tag prtn_tag;
07727     rval = impl->tag_get_handle( PARTITIONING_PCOMM_TAG_NAME, 
07728                                  1, MB_TYPE_INTEGER, prtn_tag,
07729                                  MB_TAG_SPARSE|MB_TAG_CREAT );
07730     if (MB_SUCCESS != rval)
07731       return 0;
07732   
07733     int pcomm_id;
07734     rval = impl->tag_get_data( prtn_tag, &prtn, 1, &pcomm_id );
07735     if (MB_SUCCESS == rval) {
07736       result= get_pcomm( impl, pcomm_id );
07737     }
07738     else if (MB_TAG_NOT_FOUND == rval && comm) {
07739       result = new ParallelComm( impl, *comm, &pcomm_id );
07740       if (!result)
07741         return 0;
07742       result->set_partitioning( prtn );
07743     
07744       rval = impl->tag_set_data( prtn_tag, &prtn, 1, &pcomm_id );
07745       if (MB_SUCCESS != rval) {
07746         delete result;
07747         result = 0;
07748       }
07749     }
07750   
07751     return result;
07752   }
07753 
07754   ErrorCode ParallelComm::set_partitioning( EntityHandle set) 
07755   {
07756     ErrorCode rval;
07757     Tag prtn_tag;
07758     rval = mbImpl->tag_get_handle( PARTITIONING_PCOMM_TAG_NAME, 
07759                                    1, MB_TYPE_INTEGER, prtn_tag,
07760                                    MB_TAG_SPARSE|MB_TAG_CREAT );
07761     if (MB_SUCCESS != rval)
07762       return rval;
07763 
07764     // get my id
07765     ParallelComm* pcomm_arr[MAX_SHARING_PROCS];
07766     Tag pc_tag = pcomm_tag(mbImpl, false);
07767     if (0 == pc_tag) 
07768       return MB_FAILURE;
07769     const EntityHandle root = 0;
07770     ErrorCode result = mbImpl->tag_get_data(pc_tag, &root, 1, pcomm_arr);
07771     if (MB_SUCCESS != result) 
07772       return MB_FAILURE;  
07773     int id = std::find(pcomm_arr,pcomm_arr+MAX_SHARING_PROCS,this) - pcomm_arr;
07774     if (id == MAX_SHARING_PROCS)
07775       return MB_FAILURE;
07776 
07777     EntityHandle old = partitioningSet;
07778     if (old) {
07779       rval = mbImpl->tag_delete_data( prtn_tag, &old, 1 );
07780       if (MB_SUCCESS != rval)
07781         return rval;
07782       partitioningSet = 0;
07783     }
07784   
07785     if (!set) 
07786       return MB_SUCCESS;
07787   
07788     Range contents;
07789     if (old) {
07790       rval = mbImpl->get_entities_by_handle( old, contents );
07791       if (MB_SUCCESS != rval)
07792         return rval;
07793     }
07794     else {
07795       contents = partition_sets();
07796     }
07797 
07798     rval = mbImpl->add_entities( set, contents );
07799     if (MB_SUCCESS != rval)
07800       return rval;
07801   
07802     // store pcomm id on new partition set
07803     rval = mbImpl->tag_set_data( prtn_tag, &set, 1, &id );
07804     if (MB_SUCCESS != rval)
07805       return rval;
07806   
07807     partitioningSet = set;
07808     return MB_SUCCESS;
07809   }
07810   
07811 
07813   ErrorCode ParallelComm::get_part_entities(Range &ents, int dim) 
07814   {
07815     ErrorCode result;
07816   
07817     for (Range::iterator rit = partitionSets.begin(); 
07818          rit != partitionSets.end(); rit++) {
07819       Range tmp_ents;
07820       if (-1 == dim) 
07821         result = mbImpl->get_entities_by_handle(*rit, tmp_ents, true);
07822       else
07823         result = mbImpl->get_entities_by_dimension(*rit, dim, tmp_ents, true);
07824 
07825       if (MB_SUCCESS != result) return result;
07826       ents.merge(tmp_ents);
07827     }
07828   
07829     return MB_SUCCESS;
07830   }
07831 
07834   ErrorCode ParallelComm::get_owner_handle(EntityHandle entity,
07835                                            int &owner,
07836                                            EntityHandle &handle) 
07837   {
07838     unsigned char pstat;
07839     int sharing_procs[MAX_SHARING_PROCS];
07840     EntityHandle sharing_handles[MAX_SHARING_PROCS];
07841 
07842     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1,
07843                                             &pstat);
07844     if (!(pstat & PSTATUS_NOT_OWNED)) {
07845       owner = proc_config().proc_rank();
07846       handle = entity;
07847     }
07848   
07849     else if (pstat & PSTATUS_MULTISHARED) {
07850       result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1,
07851                                     sharing_procs);
07852       owner = sharing_procs[0];
07853       result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1,
07854                                     sharing_handles);
07855       handle = sharing_handles[0];
07856     }
07857     else if (pstat & PSTATUS_SHARED) {
07858       result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1,
07859                                     sharing_procs);
07860       RRA(" ");
07861       owner = sharing_procs[0];
07862       result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1,
07863                                     sharing_handles);
07864       handle = sharing_handles[0];
07865     }
07866     else {
07867       owner = -1;
07868       handle = 0;
07869     }
07870   
07871     return MB_SUCCESS;
07872   }
07873 
07874   ErrorCode ParallelComm::get_global_part_count( int& count_out ) const
07875   {
07876     count_out = globalPartCount;
07877     return count_out < 0 ? MB_FAILURE : MB_SUCCESS;
07878   }
07879 
07880   ErrorCode ParallelComm::get_part_owner( int part_id, int& owner ) const
07881   {
07882     // FIXME: assumes only 1 local part
07883     owner = part_id;
07884     return MB_SUCCESS;
07885   }
07886 
07887   ErrorCode ParallelComm::get_part_id( EntityHandle /*part*/, int& id_out ) const
07888   {
07889     // FIXME: assumes only 1 local part
07890     id_out = proc_config().proc_rank();
07891     return MB_SUCCESS;
07892   }
07893 
07894   ErrorCode ParallelComm::get_part_handle( int id, EntityHandle& handle_out ) const
07895   {
07896     // FIXME: assumes only 1 local part
07897     if ((unsigned)id != proc_config().proc_rank())
07898       return MB_ENTITY_NOT_FOUND;
07899     handle_out = partition_sets().front();
07900     return MB_SUCCESS;
07901   }
07902 
07903   ErrorCode ParallelComm::create_part( EntityHandle& set_out )
07904   {
07905     // mark as invalid so we know that it needs to be updated
07906     globalPartCount = -1;
07907   
07908     // create set representing part
07909     ErrorCode rval = mbImpl->create_meshset( MESHSET_SET, set_out );
07910     if (MB_SUCCESS != rval)
07911       return rval;
07912   
07913     // set tag on set
07914     // FIXME: need to assign valid global id
07915     int val = 0;
07916     rval = mbImpl->tag_set_data( part_tag(), &set_out, 1, &val );
07917     if (MB_SUCCESS != rval) {
07918       mbImpl->delete_entities( &set_out, 1 );
07919       return rval;
07920     }
07921   
07922     if (get_partitioning()) {
07923       rval = mbImpl->add_entities( get_partitioning(), &set_out, 1 );
07924       if (MB_SUCCESS != rval) {
07925         mbImpl->delete_entities( &set_out, 1 );
07926         return rval;
07927       }
07928     }
07929   
07930     return MB_SUCCESS;
07931   }
07932 
07933   ErrorCode ParallelComm::destroy_part( EntityHandle part_id )
07934   {
07935     // mark as invalid so we know that it needs to be updated
07936     globalPartCount = -1;
07937   
07938     ErrorCode rval;
07939     if (get_partitioning()) {
07940       rval = mbImpl->remove_entities( get_partitioning(), &part_id, 1 );
07941       if (MB_SUCCESS != rval)
07942         return rval;
07943     }
07944     return mbImpl->delete_entities( &part_id, 1 );
07945   }
07946 
07947   ErrorCode ParallelComm::collective_sync_partition()
07948   {
07949     int count = partition_sets().size();
07950     globalPartCount = 0;
07951     int err = MPI_Allreduce( &count, &globalPartCount, 1, MPI_INT, MPI_SUM, 
07952                              proc_config().proc_comm() );
07953     return err ? MB_FAILURE : MB_SUCCESS;
07954   }
07955 
07956   ErrorCode ParallelComm::get_part_neighbor_ids( EntityHandle part,
07957                                                  int neighbors_out[MAX_SHARING_PROCS],
07958                                                  int& num_neighbors_out )
07959   {
07960     ErrorCode rval;
07961     Range iface;
07962     rval = get_interface_sets( part, iface );
07963     if (MB_SUCCESS != rval)
07964       return rval;
07965   
07966     num_neighbors_out = 0;
07967     int n, j = 0;
07968     int tmp[MAX_SHARING_PROCS], curr[MAX_SHARING_PROCS];
07969     int *parts[2] = { neighbors_out, tmp };
07970     for (Range::iterator i = iface.begin(); i != iface.end(); ++i) {
07971       unsigned char pstat;
07972       rval = get_sharing_data( *i, curr, NULL, pstat, n);
07973       if (MB_SUCCESS != rval)
07974         return rval;
07975       std::sort( curr, curr+n );
07976       assert( num_neighbors_out < MAX_SHARING_PROCS );
07977       int* k = std::set_union( parts[j], parts[j]+num_neighbors_out,
07978                                curr, curr + n, parts[1-j] );
07979       j = 1-j;
07980       num_neighbors_out = k - parts[j];
07981     }
07982     if (parts[j] != neighbors_out)
07983       std::copy( parts[j], parts[j]+num_neighbors_out, neighbors_out );
07984     
07985     
07986     // remove input part from list
07987     int id;
07988     rval = get_part_id( part, id );
07989     if (MB_SUCCESS == rval) 
07990       num_neighbors_out = std::remove( neighbors_out, neighbors_out+num_neighbors_out, id ) - neighbors_out;
07991     return rval;
07992   }
07993 
07994   ErrorCode ParallelComm::get_interface_sets( EntityHandle ,
07995                                               Range& iface_sets_out,
07996                                               int* adj_part_id )
07997   {
07998     // FIXME : assumes one part per processor.
07999     // Need to store part iface sets as children to implement
08000     // this correctly.
08001     iface_sets_out = interface_sets();
08002 
08003     if (adj_part_id) {
08004       int part_ids[MAX_SHARING_PROCS], num_parts;
08005       Range::iterator i = iface_sets_out.begin();
08006       while (i != iface_sets_out.end()) {
08007         unsigned char pstat;
08008         ErrorCode rval = get_sharing_data( *i, part_ids, NULL, pstat, num_parts );
08009         if (MB_SUCCESS != rval)
08010           return rval;
08011       
08012         if (std::find(part_ids, part_ids+num_parts, *adj_part_id) - part_ids != num_parts)
08013           ++i;
08014         else
08015           i = iface_sets_out.erase( i );
08016       }
08017     }
08018     
08019     return MB_SUCCESS;
08020   }
08021 
08022   ErrorCode ParallelComm::get_owning_part( EntityHandle handle,
08023                                            int& owning_part_id,
08024                                            EntityHandle* remote_handle )
08025   {
08026 
08027     // FIXME : assumes one part per proc, and therefore part_id == rank
08028   
08029     // If entity is not shared, then we're the owner.
08030     unsigned char pstat;
08031     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &handle, 1,
08032                                             &pstat);
08033     if (!(pstat & PSTATUS_NOT_OWNED)) {
08034       owning_part_id = proc_config().proc_rank();
08035       if (remote_handle)
08036         *remote_handle = handle;
08037       return MB_SUCCESS;
08038     }
08039   
08040     // If entity is shared with one other proc, then
08041     // sharedp_tag will contain a positive value.
08042     result = mbImpl->tag_get_data( sharedp_tag(), &handle, 1, &owning_part_id );
08043     if (MB_SUCCESS != result)
08044       return result;
08045     if (owning_part_id != -1) {
08046       // done?
08047       if (!remote_handle)
08048         return MB_SUCCESS;
08049       
08050       // get handles on remote processors (and this one)
08051       return mbImpl->tag_get_data( sharedh_tag(), &handle, 1, remote_handle );
08052     }
08053   
08054     // If here, then the entity is shared with at least two other processors.
08055     // Get the list from the sharedps_tag
08056     const void* part_id_list = 0;
08057     result = mbImpl->tag_get_by_ptr( sharedps_tag(), &handle, 1, &part_id_list );
08058     if (MB_SUCCESS != result)
08059       return result;
08060     owning_part_id = ((const int*)part_id_list)[0];
08061  
08062     // done?
08063     if (!remote_handle)
08064       return MB_SUCCESS;
08065   
08066     // get remote handles
08067     const void* handle_list = 0;
08068     result = mbImpl->tag_get_by_ptr( sharedhs_tag(), &handle, 1, &handle_list );
08069     if (MB_SUCCESS != result)
08070       return result;
08071   
08072     *remote_handle = ((const EntityHandle*)handle_list)[0];
08073     return MB_SUCCESS;
08074   }    
08075 
08076   ErrorCode ParallelComm::get_sharing_parts( EntityHandle entity,
08077                                              int part_ids_out[MAX_SHARING_PROCS],
08078                                              int& num_part_ids_out,
08079                                              EntityHandle remote_handles[MAX_SHARING_PROCS] )
08080   {
08081 
08082     // FIXME : assumes one part per proc, and therefore part_id == rank
08083   
08084     // If entity is not shared, then we're the owner.
08085     unsigned char pstat;
08086     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1,
08087                                             &pstat);
08088     if (!(pstat & PSTATUS_SHARED)) {
08089       part_ids_out[0] = proc_config().proc_rank();
08090       if (remote_handles)
08091         remote_handles[0] = entity;
08092       num_part_ids_out = 1;
08093       return MB_SUCCESS;
08094     }
08095   
08096     // If entity is shared with one other proc, then
08097     // sharedp_tag will contain a positive value.
08098     result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, part_ids_out );
08099     if (MB_SUCCESS != result)
08100       return result;
08101     if (part_ids_out[0] != -1) {
08102     
08103       num_part_ids_out = 2;
08104       part_ids_out[1] = proc_config().proc_rank();
08105 
08106       // done?
08107       if (!remote_handles)
08108         return MB_SUCCESS;
08109       
08110       // get handles on remote processors (and this one)
08111       remote_handles[1] = entity;
08112       return mbImpl->tag_get_data( sharedh_tag(), &entity, 1, remote_handles );
08113     }
08114   
08115     // If here, then the entity is shared with at least two other processors.
08116     // Get the list from the sharedps_tag
08117     result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, part_ids_out );
08118     if (MB_SUCCESS != result)
08119       return result;
08120     // Count number of valid (positive) entries in sharedps_tag
08121     for (num_part_ids_out = 0; num_part_ids_out < MAX_SHARING_PROCS &&
08122            part_ids_out[num_part_ids_out] >= 0; ++num_part_ids_out);
08123     //part_ids_out[num_part_ids_out++] = proc_config().proc_rank();
08124 #ifndef NDEBUG
08125     int my_idx = std::find(part_ids_out, part_ids_out+num_part_ids_out, proc_config().proc_rank()) - part_ids_out;
08126     assert(my_idx < num_part_ids_out);
08127 #endif
08128   
08129     // done?
08130     if (!remote_handles)
08131       return MB_SUCCESS;
08132   
08133     // get remote handles
08134     result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, remote_handles );
08135     //remote_handles[num_part_ids_out-1] = entity;
08136     assert(remote_handles[my_idx] == entity);
08137 
08138     return result;
08139   }
08140 
08141   ErrorCode ParallelComm::pack_shared_handles(
08142                                               std::vector<std::vector<SharedEntityData> > &send_data) 
08143   {
08144     // build up send buffers
08145     ErrorCode rval = MB_SUCCESS;
08146     int ent_procs[MAX_SHARING_PROCS];
08147     EntityHandle handles[MAX_SHARING_PROCS];
08148     int num_sharing, tmp_int;
08149     SharedEntityData tmp;
08150     send_data.resize(buffProcs.size());
08151     for (std::vector<EntityHandle>::iterator i = sharedEnts.begin(); i != sharedEnts.end(); ++i) {
08152       tmp.remote = *i; // swap local/remote so they're correct on the remote proc.
08153       rval = get_owner( *i, tmp_int );
08154       tmp.owner = tmp_int;
08155       if (MB_SUCCESS != rval)
08156         return rval;
08157 
08158       unsigned char pstat;
08159       rval = get_sharing_data( *i, ent_procs, handles, pstat, num_sharing );
08160       if (MB_SUCCESS != rval)
08161         return rval;
08162       for (int j = 0; j < num_sharing; ++j) {
08163         if (ent_procs[j] == (int)proc_config().proc_rank())
08164           continue;
08165         tmp.local = handles[j];
08166         int ind = get_buffers(ent_procs[j]);
08167         assert(-1 != ind);
08168         if ((int)send_data.size() < ind+1) send_data.resize(ind+1);
08169         send_data[ind].push_back( tmp );
08170       }
08171     }
08172 
08173     return MB_SUCCESS;
08174   }
08175 
08176   ErrorCode ParallelComm::exchange_all_shared_handles(  
08177                                                       std::vector<std::vector<SharedEntityData> > &send_data, 
08178                                                       std::vector<std::vector<SharedEntityData> > &result)
08179   {
08180     int ierr;
08181     const int tag = 0;
08182     const MPI_Comm cm = procConfig.proc_comm();
08183     const int num_proc = buffProcs.size();
08184     const std::vector<int> procs( buffProcs.begin(), buffProcs.end() );
08185     std::vector<MPI_Request> recv_req(buffProcs.size(), MPI_REQUEST_NULL);
08186   
08187     // set up to receive sizes
08188     std::vector<int> sizes_send(num_proc), sizes_recv(num_proc);
08189     for (int i = 0; i < num_proc; ++i) {
08190       ierr = MPI_Irecv( &sizes_recv[i], 1, MPI_INT, procs[i], tag, cm, &recv_req[i] );
08191       if (ierr) 
08192         return MB_FILE_WRITE_ERROR;
08193     }
08194   
08195     // send sizes
08196     assert(num_proc == (int)send_data.size());
08197   
08198     sendReqs.resize(buffProcs.size(), MPI_REQUEST_NULL);
08199     result.resize(num_proc);
08200     for (int i = 0; i < num_proc; ++i) {
08201       sizes_send[i] = send_data[i].size();
08202       ierr = MPI_Isend( &sizes_send[i], 1, MPI_INT, buffProcs[i], tag, cm, &sendReqs[i] );
08203       if (ierr) 
08204         return MB_FILE_WRITE_ERROR;
08205     }
08206   
08207     // receive sizes
08208     std::vector<MPI_Status> stat(num_proc);
08209     ierr = MPI_Waitall( num_proc, &recv_req[0], &stat[0] );
08210     if (ierr)
08211       return MB_FILE_WRITE_ERROR;
08212   
08213     // wait until all sizes are sent (clean up pending req's)
08214     ierr = MPI_Waitall( num_proc, &sendReqs[0], &stat[0] );
08215     if (ierr)
08216       return MB_FILE_WRITE_ERROR;
08217   
08218     // set up to receive data
08219     for (int i = 0; i < num_proc; ++i) {
08220       result[i].resize( sizes_recv[i] );
08221       ierr = MPI_Irecv( &result[i][0], 
08222                         sizeof(SharedEntityData)*sizes_recv[i], 
08223                         MPI_UNSIGNED_CHAR, 
08224                         buffProcs[i], tag, cm, &recv_req[i] );
08225       if (ierr) 
08226         return MB_FILE_WRITE_ERROR;
08227     }
08228   
08229     // send data
08230     for (int i = 0; i < num_proc; ++i) {
08231       ierr = MPI_Isend( &send_data[i][0], 
08232                         sizeof(SharedEntityData)*sizes_send[i], 
08233                         MPI_UNSIGNED_CHAR, 
08234                         buffProcs[i], tag, cm, &sendReqs[i] );
08235       if (ierr) 
08236         return MB_FILE_WRITE_ERROR;
08237     }
08238   
08239     // receive data
08240     ierr = MPI_Waitall( num_proc, &recv_req[0], &stat[0] );
08241     if (ierr)
08242       return MB_FILE_WRITE_ERROR;
08243   
08244     // wait until everything is sent to release send buffers
08245     ierr = MPI_Waitall( num_proc, &sendReqs[0], &stat[0] );
08246     if (ierr)
08247       return MB_FILE_WRITE_ERROR;
08248   
08249     return MB_SUCCESS;
08250   }
08251 
08252   ErrorCode ParallelComm::check_all_shared_handles(bool print_em) 
08253   {
08254     // get all shared ent data from other procs
08255     std::vector<std::vector<SharedEntityData> > shents(buffProcs.size()),
08256       send_data(buffProcs.size());
08257  
08258     ErrorCode result;
08259     bool done = false;
08260   
08261     while (!done) {
08262       result = check_local_shared();
08263       if (MB_SUCCESS != result) {
08264         done = true;
08265         continue;
08266       }
08267  
08268       result = pack_shared_handles(send_data);
08269       if (MB_SUCCESS != result) {
08270         done = true;
08271         continue;
08272       }
08273    
08274       result = exchange_all_shared_handles(send_data, shents);
08275       if (MB_SUCCESS != result) {
08276         done = true;
08277         continue;
08278       }
08279  
08280       if (!shents.empty()) 
08281         result = check_my_shared_handles(shents);
08282       done = true;
08283     }
08284   
08285     if (MB_SUCCESS != result && print_em) {
08286       std::ostringstream ent_str;
08287       ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
08288       mbImpl->write_mesh(ent_str.str().c_str());
08289     }
08290   
08291     return result;
08292   }
08293 
08294   ErrorCode ParallelComm::check_local_shared() 
08295   {
08296     // do some local checks on shared entities to make sure things look
08297     // consistent
08298 
08299     // check that non-vertex shared entities are shared by same procs as all
08300     // their vertices
08301     //std::pair<Range::const_iterator,Range::const_iterator> vert_it =
08302     //    sharedEnts.equal_range(MBVERTEX);
08303     std::vector<EntityHandle> dum_connect;
08304     const EntityHandle *connect;
08305     int num_connect;
08306     int tmp_procs[MAX_SHARING_PROCS];
08307     EntityHandle tmp_hs[MAX_SHARING_PROCS];
08308     std::set<int> tmp_set, vset;
08309     int num_ps;
08310     ErrorCode result;
08311     unsigned char pstat;
08312     Range bad_ents;
08313     std::vector<std::string> errors;
08314     std::string dum_err;
08315   
08316     std::vector<EntityHandle>::const_iterator vit;
08317     for (vit = sharedEnts.begin(); vit != sharedEnts.end(); vit++) {
08318 
08319       // get sharing procs for this ent
08320       result = get_sharing_data(*vit, tmp_procs, tmp_hs, pstat, num_ps);
08321       if (MB_SUCCESS != result) {
08322         bad_ents.insert(*vit);
08323         errors.push_back(std::string("Failure getting sharing data."));
08324         continue;
08325       }
08326 
08327       bool bad = false;
08328       // entity must be shared
08329       if (!(pstat & PSTATUS_SHARED))
08330         errors.push_back(std::string("Entity should be shared but isn't.")), bad = true;
08331 
08332       // if entity is not owned this must not be first proc
08333       if (pstat & PSTATUS_NOT_OWNED && tmp_procs[0] == (int)procConfig.proc_rank())
08334         errors.push_back(std::string("Entity not owned but is first proc.")), bad = true;
08335 
08336       // if entity is owned and multishared, this must be first proc
08337       if (!(pstat & PSTATUS_NOT_OWNED) && pstat & PSTATUS_MULTISHARED && 
08338           (tmp_procs[0] != (int)procConfig.proc_rank() || tmp_hs[0] != *vit))
08339         errors.push_back(std::string("Entity owned and multishared but not first proc or not first handle.")), bad = true;
08340 
08341       if (bad) {
08342         bad_ents.insert(*vit);
08343         continue;
08344       }
08345     
08346       EntityType type = mbImpl->type_from_handle(*vit);
08347       if (type == MBVERTEX || type == MBENTITYSET) continue;
08348 
08349       // copy element's procs to vset and save size
08350       int orig_ps = num_ps; vset.clear(); 
08351       std::copy(tmp_procs, tmp_procs+num_ps, std::inserter(vset, vset.begin()));
08352     
08353       // get vertices for this ent and intersection of sharing procs
08354       result = mbImpl->get_connectivity(*vit, connect, num_connect, false, &dum_connect);
08355       if (MB_SUCCESS != result) {
08356         bad_ents.insert(*vit); 
08357         errors.push_back(std::string("Couldn't get connectivity."));
08358         continue;
08359       }
08360     
08361       for (int i = 0; i < num_connect; i++) {
08362         result = get_sharing_data(connect[i], tmp_procs, NULL, pstat, num_ps);
08363         if (MB_SUCCESS != result) {bad_ents.insert(*vit); continue;}
08364         if (!num_ps) {vset.clear(); break;}
08365         std::sort(tmp_procs, tmp_procs+num_ps);
08366         tmp_set.clear();
08367         std::set_intersection(tmp_procs, tmp_procs+num_ps,
08368                               vset.begin(), vset.end(), std::inserter(tmp_set, tmp_set.end()));
08369         vset.swap(tmp_set);
08370         if (vset.empty()) break;
08371       }
08372     
08373       // intersect them; should be the same size as orig_ps
08374       tmp_set.clear();
08375       std::set_intersection(tmp_procs, tmp_procs+num_ps,
08376                             vset.begin(), vset.end(), std::inserter(tmp_set, tmp_set.end()));
08377       if (orig_ps != (int)tmp_set.size()) {
08378         errors.push_back(std::string("Vertex proc set not same size as entity proc set."));
08379         bad_ents.insert(*vit);
08380       }
08381     }
08382   
08383     if (!bad_ents.empty()) {
08384       std::cout << "Found bad entities in check_local_shared, proc rank "
08385                 << procConfig.proc_rank() << "," << std::endl;
08386       std::vector<std::string>::iterator sit;
08387       Range::iterator rit;
08388       for (rit = bad_ents.begin(), sit = errors.begin(); rit != bad_ents.end(); rit++, sit++) {
08389         list_entities(&(*rit), 1);
08390         std::cout << "Reason: " << *sit << std::endl;
08391       }
08392       return MB_FAILURE;
08393     }
08394 
08395     // to do: check interface sets
08396 
08397     return MB_SUCCESS;
08398   }
08399 
08400   ErrorCode ParallelComm::check_all_shared_handles(ParallelComm **pcs,
08401                                                    int num_pcs) 
08402   {
08403     std::vector<std::vector<std::vector<SharedEntityData> > > shents, send_data;
08404     ErrorCode result = MB_SUCCESS, tmp_result;
08405 
08406     // get all shared ent data from each proc to all other procs
08407     send_data.resize(num_pcs);
08408     for (int p = 0; p < num_pcs; p++) {
08409       tmp_result = pcs[p]->pack_shared_handles(send_data[p]);
08410       if (MB_SUCCESS != tmp_result) result = tmp_result;
08411     }
08412     if (MB_SUCCESS != result) return result;
08413 
08414     // move the data sorted by sending proc to data sorted by receiving proc
08415     shents.resize(num_pcs);
08416     for (int p = 0; p < num_pcs; p++)
08417       shents[p].resize(pcs[p]->buffProcs.size());
08418     
08419     for (int p = 0; p < num_pcs; p++) {
08420       for (unsigned int idx_p = 0; idx_p < pcs[p]->buffProcs.size(); idx_p++) {
08421         // move send_data[p][to_p] to shents[to_p][idx_p]
08422         int to_p = pcs[p]->buffProcs[idx_p];
08423         int top_idx_p = pcs[to_p]->get_buffers(p);
08424         assert(-1 != top_idx_p);
08425         shents[to_p][top_idx_p] = send_data[p][idx_p];
08426       }
08427     }
08428   
08429     for (int p = 0; p < num_pcs; p++) {
08430       std::ostringstream ostr;
08431       ostr << "Processor " << p << " bad entities:";
08432       tmp_result = pcs[p]->check_my_shared_handles(shents[p], ostr.str().c_str());
08433       if (MB_SUCCESS != tmp_result) result = tmp_result;
08434     }
08435   
08436     return result;
08437   }
08438 
08439   ErrorCode ParallelComm::check_my_shared_handles(
08440                                                   std::vector<std::vector<SharedEntityData> > &shents,
08441                                                   const char *prefix) 
08442   {
08443     // now check against what I think data should be
08444     // get all shared entities
08445     ErrorCode result;
08446     Range all_shared;
08447     std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(all_shared));
08448     std::vector<EntityHandle> dum_vec;
08449     all_shared.erase(all_shared.upper_bound(MBPOLYHEDRON), all_shared.end());
08450 
08451     Range bad_ents, local_shared;
08452     std::vector<SharedEntityData>::iterator vit;
08453     unsigned char tmp_pstat;
08454     for (unsigned int i = 0; i < shents.size(); i++) {
08455       int other_proc = buffProcs[i];
08456       result = get_shared_entities(other_proc, local_shared);
08457       if (MB_SUCCESS != result) return result;
08458       for (vit = shents[i].begin(); vit != shents[i].end(); vit++) {
08459         EntityHandle localh = vit->local, remoteh = vit->remote, dumh;
08460         local_shared.erase(localh);
08461         result = get_remote_handles(true, &localh, &dumh, 1, other_proc, dum_vec);
08462         if (MB_SUCCESS != result || dumh != remoteh) 
08463           bad_ents.insert(localh);
08464         result = get_pstatus(localh, tmp_pstat);
08465         if (MB_SUCCESS != result ||
08466             (!tmp_pstat&PSTATUS_NOT_OWNED && vit->owner != rank()) ||
08467             (tmp_pstat&PSTATUS_NOT_OWNED && vit->owner == rank()))
08468           bad_ents.insert(localh);
08469       }
08470 
08471       if (!local_shared.empty()) 
08472         bad_ents.merge(local_shared);
08473     }
08474   
08475     if (!bad_ents.empty()) {
08476       if (prefix)
08477         std::cout << prefix << std::endl;
08478       list_entities(bad_ents);
08479       return MB_FAILURE;
08480     }
08481 
08482     else return MB_SUCCESS;
08483   }
08484 
08485   ErrorCode ParallelComm::get_shared_entities(int other_proc,
08486                                               Range &shared_ents,
08487                                               int dim,
08488                                               const bool iface,
08489                                               const bool owned_filter) 
08490   {
08491     shared_ents.clear();
08492     ErrorCode result = MB_SUCCESS;
08493   
08494     // dimension
08495     if (-1 != dim) {
08496       DimensionPair dp = CN::TypeDimensionMap[dim];
08497       Range dum_range;
08498       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(dum_range));
08499       shared_ents.merge(dum_range.lower_bound(dp.first), 
08500                         dum_range.upper_bound(dp.second));
08501     }
08502     else std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(shared_ents));
08503 
08504     // filter by iface
08505     if (iface) {
08506       result = filter_pstatus(shared_ents, PSTATUS_INTERFACE, PSTATUS_AND);
08507       RRA("");
08508     }
08509   
08510     // filter by owned
08511     if (owned_filter) {
08512       result = filter_pstatus(shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);
08513       RRA("");
08514     }
08515 
08516     // filter by proc
08517     if (-1 != other_proc) {
08518       result = filter_pstatus(shared_ents, PSTATUS_SHARED, PSTATUS_AND, other_proc);
08519       RRA("");
08520     }
08521   
08522     return result;
08523   }
08524 
08525   ErrorCode ParallelComm::clean_shared_tags(std::vector<Range*>& exchange_ents)
08526   {
08527     for (unsigned int i = 0; i < exchange_ents.size(); i++) {
08528       Range* ents = exchange_ents[i];
08529       int num_ents = ents->size();
08530       Range::iterator it = ents->begin();
08531 
08532       for ( int n = 0; n < num_ents; n++ ) {
08533         int sharing_proc;
08534         ErrorCode result = mbImpl->tag_get_data(sharedp_tag(), &(*ents->begin()), 1,
08535                                                 &sharing_proc);
08536         if (result != MB_TAG_NOT_FOUND && sharing_proc == -1) {
08537           result = mbImpl->tag_delete_data(sharedp_tag(), &(*it), 1);
08538           RRA("Couldn't delete shared processor tag data.");
08539           result = mbImpl->tag_delete_data(sharedh_tag(), &(*it), 1);
08540           RRA("Couldn't delete shared handle tag data.");
08541           result = mbImpl->tag_delete_data(pstatus_tag(), &(*it), 1);
08542           RRA("Couldn't delete pstatus tag data.");
08543         }
08544         it++;
08545       }
08546     }
08547 
08548     return MB_SUCCESS;
08549   }
08550 
08551   void ParallelComm::set_debug_verbosity(int verb) 
08552   {
08553     myDebug->set_verbosity(verb);
08554   }
08555 
08556   int ParallelComm::get_debug_verbosity() 
08557   {
08558     return myDebug->get_verbosity();
08559   }
08560 
08561   ErrorCode ParallelComm::get_entityset_procs( EntityHandle set,
08562                                                std::vector<unsigned>& ranks ) const
08563   {
08564     return sharedSetData->get_sharing_procs( set, ranks );
08565   }
08566 
08567   ErrorCode ParallelComm::get_entityset_owner( EntityHandle entity_set,
08568                                                unsigned& owner_rank,
08569                                                EntityHandle* remote_handle ) const
08570   {
08571     if (remote_handle)
08572       return sharedSetData->get_owner( entity_set, owner_rank, *remote_handle );
08573     else
08574       return sharedSetData->get_owner( entity_set, owner_rank );
08575   }
08576 
08577   ErrorCode ParallelComm::get_entityset_local_handle( unsigned owning_rank,
08578                                                       EntityHandle remote_handle,
08579                                                       EntityHandle& local_handle ) const
08580   {
08581     return sharedSetData->get_local_handle( owning_rank, remote_handle, local_handle );
08582   }
08583 
08584   ErrorCode ParallelComm::get_shared_sets( Range& result ) const
08585   {
08586     return sharedSetData->get_shared_sets( result );
08587   }
08588 
08589   ErrorCode ParallelComm::get_entityset_owners( std::vector<unsigned>& ranks ) const
08590   {
08591     return sharedSetData->get_owning_procs( ranks );
08592   }
08593 
08594   ErrorCode ParallelComm::get_owned_sets( unsigned owning_rank, Range& sets_out ) const
08595   {
08596     return sharedSetData->get_shared_sets( owning_rank, sets_out );
08597   }
08598 
08599   ErrorCode ParallelComm::gather_data(Range &gather_ents, Tag &tag_handle, 
08600                       Tag id_tag, EntityHandle gather_set, int root_proc_rank)
08601   {
08602     int dim = mbImpl->dimension_from_handle(*gather_ents.begin());
08603     int bytes_per_tag = 0;
08604     ErrorCode rval = mbImpl->tag_get_bytes(tag_handle, bytes_per_tag);
08605     if (rval != MB_SUCCESS) return rval;
08606 
08607     int sz_buffer = sizeof(int) + gather_ents.size()*(sizeof(int) + bytes_per_tag);
08608     void* senddata = malloc(sz_buffer);
08609     ((int*)senddata)[0] = (int) gather_ents.size();    
08610     int* ptr_int = (int*)senddata + 1;
08611     rval = mbImpl->tag_get_data(id_tag, gather_ents, (void*)ptr_int);
08612     ptr_int = (int*)(senddata) + 1 + gather_ents.size();
08613     rval = mbImpl->tag_get_data(tag_handle, gather_ents, (void*)ptr_int);
08614     std::vector<int> displs(proc_config().proc_size(), 0);
08615     MPI_Gather(&sz_buffer, 1, MPI_INT, &displs[0], 1, MPI_INT, root_proc_rank, comm());
08616     std::vector<int> recvcnts(proc_config().proc_size(), 0);
08617     std::copy(displs.begin(), displs.end(), recvcnts.begin());
08618     std::partial_sum(displs.begin(), displs.end(), displs.begin());
08619     std::vector<int>::iterator lastM1 = displs.end() - 1;
08620     std::copy_backward(displs.begin(), lastM1, displs.end());
08621     //std::copy_backward(displs.begin(), --displs.end(), displs.end());
08622     displs[0] = 0;
08623 
08624     if ((int)rank() != root_proc_rank)
08625       MPI_Gatherv(senddata, sz_buffer, MPI_BYTE, NULL, NULL, NULL, MPI_BYTE, root_proc_rank, comm());
08626     else {
08627       Range gents;
08628       mbImpl->get_entities_by_dimension(gather_set, dim, gents);
08629       int recvbuffsz = gents.size() * (bytes_per_tag + sizeof(int)) + proc_config().proc_size() * sizeof(int);
08630       void* recvbuf = malloc(recvbuffsz);
08631       MPI_Gatherv(senddata, sz_buffer, MPI_BYTE, recvbuf, &recvcnts[0], &displs[0], MPI_BYTE, root_proc_rank, comm());
08632 
08633       void* gvals = NULL;
08634 
08635       // Test whether gents has multiple sequences
08636       bool multiple_sequences = false;
08637       if (gents.psize() > 1)
08638         multiple_sequences = true;
08639       else {
08640         int count;
08641         rval = mbImpl->tag_iterate(tag_handle, gents.begin(), gents.end(), count, gvals);
08642         assert(NULL != gvals);
08643         assert(count > 0);
08644         if ((size_t)count != gents.size()) {
08645           multiple_sequences = true;
08646           gvals = NULL;
08647         }
08648       }
08649 
08650       // If gents has multiple sequences, create a temp buffer for gathered values
08651       if (multiple_sequences) {
08652         gvals = malloc(gents.size() * bytes_per_tag);
08653         assert(NULL != gvals);
08654       }
08655 
08656       for (int i = 0; i != (int)size(); ++i) {
08657         int numents = *(int*)(((char*)recvbuf) + displs[i]);
08658         int* id_ptr = (int*)(((char*)recvbuf) + displs[i] + sizeof(int));
08659         char* val_ptr = (char*)(id_ptr + numents);
08660         for (int j = 0; j != numents; ++j) {
08661           int idx = id_ptr[j];
08662           memcpy((char*)gvals + (idx - 1)*bytes_per_tag, val_ptr + j*bytes_per_tag, bytes_per_tag);
08663         }
08664       }
08665 
08666       // If gents has multiple sequences, copy tag data (stored in the temp buffer) to each sequence separately
08667       if (multiple_sequences) {
08668         Range::iterator iter = gents.begin();
08669         size_t start_idx = 0;
08670         while (iter != gents.end()) {
08671           int count;
08672           void* ptr;
08673           rval = mbImpl->tag_iterate(tag_handle, iter, gents.end(), count, ptr);
08674           assert(NULL != ptr);
08675           assert(count > 0);
08676           memcpy((char*)ptr, (char*)gvals + start_idx * bytes_per_tag, bytes_per_tag * count);
08677 
08678           iter += count;
08679           start_idx += count;
08680         }
08681         assert(start_idx == gents.size());
08682 
08683         // Free the temp buffer
08684         free(gvals);
08685       }
08686     }
08687 
08688     return MB_SUCCESS;
08689   }
08690 
08691   /*
08692    * this call is collective, so we will use the message ids for tag communications;
08693    * they are similar, but simpler
08694    * pack the number of edges, the remote edge handles, then for each edge, the number
08695    *    of intersection points, and then 3 doubles for each intersection point
08696    * on average, there is one intx point per edge, in some cases 2, in some cases 0
08697    *   so on average, the message size is num_edges *( sizeof(eh) + sizeof(int) + 1*3*sizeof(double) )
08698    *          = num_edges * (8+4+24)
08699    */
08700 
08701 ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared_edges_owned,
08702                                                    std::vector<std::vector<EntityHandle> *> & extraNodesVec, double tolerance)
08703 {
08704   // the index of an edge in the edges Range will give the index for extraNodesVec
08705   // the strategy of this follows exchange tags strategy:
08706   ErrorCode result;
08707   int success;
08708 
08709   myDebug->tprintf(1, "Entering settle_intersection_points\n");
08710 
08711   // get all procs interfacing to this proc
08712   std::set<unsigned int> exch_procs;
08713   result = get_comm_procs(exch_procs);
08714 
08715   // post ghost irecv's for all interface procs
08716   // index requests the same as buffer/sharing procs indices
08717   std::vector<MPI_Request>  recv_intx_reqs(2 * buffProcs.size(), MPI_REQUEST_NULL),
08718       sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
08719   std::vector<unsigned int>::iterator sit;
08720   int ind;
08721 
08722   reset_all_buffers();
08723   int incoming = 0;
08724 
08725   for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
08726     incoming++;
08727     PRINT_DEBUG_IRECV(*sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr,
08728         INITIAL_BUFF_SIZE, MB_MESG_TAGS_SIZE, incoming);
08729 
08730     success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
08731         MPI_UNSIGNED_CHAR, *sit, MB_MESG_TAGS_SIZE, procConfig.proc_comm(),
08732         &recv_intx_reqs[2 * ind]);
08733     if (success != MPI_SUCCESS) {
08734       result = MB_FAILURE;
08735       RRA("Failed to post irecv in settle intersection point.");
08736     }
08737 
08738   }
08739 
08740   // pack and send intersection points from this proc to others
08741   // make sendReqs vector to simplify initialization
08742   sendReqs.resize(2 * buffProcs.size(), MPI_REQUEST_NULL);
08743 
08744   // take all shared entities if incoming list is empty
08745   Range & entities = shared_edges_owned;
08746 
08747   int dum_ack_buff;
08748 
08749   for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
08750 
08751     Range edges_to_send = entities;
08752 
08753     // get ents shared by proc *sit
08754     result = filter_pstatus(edges_to_send, PSTATUS_SHARED, PSTATUS_AND, *sit);
08755     RRA("Failed pstatus AND check.");
08756 
08757     // remote nonowned entities; not needed, edges are already owned by this proc
08758 
08759     // pack the data
08760     // reserve space on front for size and for initial buff size
08761     Buffer * buff=localOwnedBuffs[ind];
08762     buff->reset_ptr(sizeof(int));
08763 
08764     /*result = pack_intx_points(edges_to_send, edges, extraNodesVec,
08765         localOwnedBuffs[ind], *sit);*/
08766 
08767     // count first data, and see if it is enough room?
08768     // send the remote handles
08769     std::vector<EntityHandle> dum_remote_edges(edges_to_send.size());
08770     /*
08771      *  get_remote_handles(const bool store_remote_handles,
08772                                EntityHandle *from_vec,
08773                                EntityHandle *to_vec_tmp,
08774                                int num_ents, int to_proc,
08775                                const std::vector<EntityHandle> &new_ents);
08776      */
08777     // we are sending count, num edges, remote edges handles, and then, for each edge:
08778     //          -- nb intx points, 3*nbintPointsforEdge "doubles"
08779     std::vector<EntityHandle> dum_vec;
08780     result = get_remote_handles(true,
08781         edges_to_send, &dum_remote_edges[0], *sit,
08782                                     dum_vec);
08783     RRA("Failed remote handles");
08784     int count = 4; // size of data
08785     count += sizeof(int)*(int)edges_to_send.size();
08786     count += sizeof(EntityHandle)*(int)edges_to_send.size(); // we will send the remote handles
08787     for (Range::iterator eit=edges_to_send.begin(); eit!=edges_to_send.end(); eit++)
08788     {
08789       EntityHandle edge = *eit;
08790       unsigned int indx = edges.find(edge)-edges.begin();
08791       std::vector<EntityHandle> & intx_nodes = *(extraNodesVec[indx]);
08792       count += (int)intx_nodes.size() * 3 * sizeof(double); // 3 integer for each entity handle
08793     }
08794     //
08795     buff->check_space(count);
08796     PACK_INT(buff->buff_ptr, edges_to_send.size());
08797     PACK_EH(buff->buff_ptr, &dum_remote_edges[0], dum_remote_edges.size());
08798     for (Range::iterator eit=edges_to_send.begin(); eit!=edges_to_send.end(); eit++)
08799     {
08800       EntityHandle edge = *eit;
08801       // pack the remote edge
08802 
08803       unsigned int indx = edges.find(edge)-edges.begin();
08804       std::vector<EntityHandle> & intx_nodes = *(extraNodesVec[indx]);
08805       PACK_INT(buff->buff_ptr, intx_nodes.size());
08806 
08807       result = mbImpl->get_coords(&intx_nodes[0], intx_nodes.size(), (double*)buff->buff_ptr);
08808       buff->buff_ptr += 3*sizeof(double) * intx_nodes.size();
08809       //count += (intintx_nodes.size() * 3 * sizeof(double); // 3 integer for each entity handle
08810     }
08811 
08812     // done packing the intx points and remote edges
08813     RRA("Failed to count buffer in pack_intx_points.");
08814     buff->set_stored_size();
08815 
08816     // now send it
08817     result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE,
08818         sendReqs[2 * ind], recv_intx_reqs[2 * ind + 1], &dum_ack_buff, incoming);
08819     RRA("Failed to send buffer.");
08820 
08821   }
08822 
08823   // receive/unpack intx points
08824   while (incoming) {
08825     MPI_Status status;
08826     PRINT_DEBUG_WAITANY(recv_intx_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
08827     success = MPI_Waitany(2 * buffProcs.size(), &recv_intx_reqs[0], &ind,
08828         &status);
08829     if (MPI_SUCCESS != success) {
08830       result = MB_FAILURE;
08831       RRA("Failed in waitany in ghost exchange.");
08832     }
08833 
08834     PRINT_DEBUG_RECD(status);
08835 
08836     // ok, received something; decrement incoming counter
08837     incoming--;
08838 
08839     bool done = false;
08840     std::vector<EntityHandle> dum_vec;
08841     result = recv_buffer(MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind / 2],
08842         recv_intx_reqs[ind / 2 * 2], recv_intx_reqs[ind / 2 * 2 + 1], incoming,
08843         localOwnedBuffs[ind / 2], sendReqs[ind / 2 * 2],
08844         sendReqs[ind / 2 * 2 + 1], done);
08845     RRA("Failed to resize recv buffer.");
08846     if (done) {
08847       Buffer * buff = remoteOwnedBuffs[ind / 2];
08848       buff->reset_ptr(sizeof(int));
08849       /*result = unpack_tags(remoteOwnedBuffs[ind / 2]->buff_ptr, dum_vec, true,
08850           buffProcs[ind / 2]);*/
08851       // unpack now the edges and vertex info; compare with the existing vertex positions
08852 
08853       int num_edges;
08854 
08855       UNPACK_INT(buff->buff_ptr, num_edges);
08856       std::vector<EntityHandle> rec_edges;
08857       rec_edges.resize(num_edges);
08858       UNPACK_EH(buff->buff_ptr, &rec_edges[0], num_edges );
08859       for (int i=0; i<num_edges; i++)
08860       {
08861         EntityHandle edge=rec_edges[i];
08862         unsigned int indx = edges.find(edge)-edges.begin();
08863         std::vector<EntityHandle> & intx_nodes = *(extraNodesVec[indx]);
08864         // now get the number of nodes on this (now local) edge
08865         int nverts;
08866         UNPACK_INT(buff->buff_ptr, nverts);
08867         assert(nverts==(int)intx_nodes.size());
08868         // get the positions communicated
08869         std::vector<double> pos_from_owner;
08870         pos_from_owner.resize(3*nverts);
08871         UNPACK_DBLS(buff->buff_ptr, &pos_from_owner[0], 3*nverts);
08872         std::vector<double> current_positions(3*nverts);
08873         result = mbImpl->get_coords(&intx_nodes[0], nverts, &current_positions[0]  );
08874         RRA("Failed to get current positions");
08875         // now, look at what we have in current pos, compare to pos from owner, and reset
08876         for (int k=0; k<nverts; k++)
08877         {
08878           double * pk = &current_positions[3*k];
08879           // take the current pos k, and settle among the ones from owner:
08880           bool found=false;
08881           for (int j=0; j<nverts&&!found; j++)
08882           {
08883             double * pj = &pos_from_owner[3*j];
08884             double dist2 = (pk[0]-pj[0])*(pk[0]-pj[0])+(pk[1]-pj[1])*(pk[1]-pj[1]) + (pk[2]-pj[2])*(pk[2]-pj[2]);
08885             if (dist2<tolerance)
08886             {
08887               pk[0]=pj[0]; pk[1]=pj[1]; pk[2]= pj[2];// correct it!
08888               found =true;
08889               break;
08890             }
08891           }
08892           if (!found)
08893           {
08894             //
08895             std::cout<<" pk:" << pk[0] << " " << pk[1] << " " << pk[2] << " not found \n";
08896             result = MB_FAILURE;
08897           }
08898 
08899         }
08900         // after we are done resetting, we can set the new positions of nodes:
08901         result = mbImpl->set_coords(&intx_nodes[0], nverts, &current_positions[0]  );
08902         RRA("Failed to set new current positions");
08903 
08904       }
08905       //RRA("Failed to recv-unpack-tag message.");
08906     }
08907   }
08908 
08909   // ok, now wait
08910   if (myDebug->get_verbosity() == 5) {
08911     success = MPI_Barrier(procConfig.proc_comm());
08912   } else {
08913     MPI_Status status[2 * MAX_SHARING_PROCS];
08914     success = MPI_Waitall(2 * buffProcs.size(), &sendReqs[0], status);
08915   }
08916   if (MPI_SUCCESS != success) {
08917     result = MB_FAILURE;
08918     RRA("Failure in waitall in tag exchange.");
08919   }
08920 
08921   myDebug->tprintf(1, "Exiting settle_intersection_points");
08922 
08923   return MB_SUCCESS;
08924   // end copy
08925   }
08926 
08927 void ParallelComm::print_pstatus(unsigned char pstat, std::string &ostr) 
08928 {
08929   std::ostringstream str;
08930   int num = 0;
08931 #define ppstat(a, b) {if (pstat & a) {if (num) str << ", "; str << b; num++;};}
08932     
08933   ppstat(PSTATUS_NOT_OWNED, "NOT_OWNED");
08934   ppstat(PSTATUS_SHARED, "SHARED");
08935   ppstat(PSTATUS_MULTISHARED, "MULTISHARED");
08936   ppstat(PSTATUS_INTERFACE, "INTERFACE");
08937   ppstat(PSTATUS_GHOST, "GHOST");
08938 
08939   ostr = str.str();
08940 }
08941 
08942 void ParallelComm::print_pstatus(unsigned char pstat) 
08943 {
08944   std::string str;
08945   print_pstatus(pstat, str);
08946   std::cout << str.c_str() << std::endl;
08947 }
08948 
08949 } // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines