moab
WriteHDF5.cpp
Go to the documentation of this file.
00001 
00016 //-------------------------------------------------------------------------
00017 // Filename      : WriteHDF5.cpp
00018 //
00019 // Purpose       : TSTT HDF5 Writer 
00020 //
00021 // Special Notes : WriteSLAC used as template for this
00022 //
00023 // Creator       : Jason Kraftcheck
00024 //
00025 // Creation Date : 04/01/04
00026 //-------------------------------------------------------------------------
00027 
00028 #ifndef HDF5_FILE
00029 #  error Attempt to compile WriteHDF5 with HDF5 support disabled
00030 #endif
00031 
00032 #include <assert.h>
00033 #if defined(_MSC_VER) || defined(__MINGW32__)
00034 #include <sys/time.h>
00035 #endif
00036 #include <time.h>
00037 #include <stdlib.h>
00038 #include <string.h>
00039 #include <stdarg.h>
00040 #include <limits>
00041 #include <cstdio>
00042 #include <iostream>
00043 #include "WriteHDF5.hpp"
00044 #include <H5Tpublic.h>
00045 #include <H5Ppublic.h>
00046 #include <H5Epublic.h>
00047 #include "moab/Interface.hpp"
00048 #include "Internals.hpp"
00049 #include "MBTagConventions.hpp"
00050 #include "moab/CN.hpp"
00051 #include "moab/FileOptions.hpp"
00052 #include "moab/Version.h"
00053 #include "moab/CpuTimer.hpp"
00054 #include "IODebugTrack.hpp"
00055 #include "mhdf.h"
00056 
00057 /* Access HDF5 file handle for debugging
00058 #include <H5Fpublic.h>
00059 struct file { uint32_t magic; hid_t handle; };
00060 */
00061 #undef DEBUG
00062 
00063 
00064 #undef BLOCKED_COORD_IO
00065 
00066 
00067 #ifdef DEBUG
00068 /*
00069 # include <H5Epublic.h>
00070   extern "C" herr_t hdf_error_handler( void*  )
00071   {
00072     H5Eprint( stderr );
00073     assert( 0 );
00074   }
00075 */
00076 # define myassert(A) assert(A)
00077 #else
00078 # define myassert(A)
00079 #endif
00080 
00081 
00082 #ifdef VALGRIND
00083 #  include <valgrind/memcheck.h>
00084 #else
00085 #  ifndef VALGRIND_CHECK_MEM_IS_DEFINED
00086 #    define VALGRIND_CHECK_MEM_IS_DEFINED(a, b)
00087 #  endif
00088 #  ifndef VALGRIND_CHECK_MEM_IS_ADDRESSABLE
00089 #    define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(a, b)
00090 #  endif
00091 #  ifndef VALGRIND_MAKE_MEM_UNDEFINED
00092 #    define VALGRIND_MAKE_MEM_UNDEFINED(a, b)
00093 #  endif
00094 #endif
00095 
00096 namespace moab {
00097 
00098 template <typename T> inline 
00099 void VALGRIND_MAKE_VEC_UNDEFINED( std::vector<T>& v ) {
00100     VALGRIND_MAKE_MEM_UNDEFINED( &v[0], v.size() * sizeof(T) );
00101 }
00102 
00103 #define WRITE_HDF5_BUFFER_SIZE (40*1024*1024)
00104 
00105 static hid_t get_id_type()
00106 {
00107   if (8 == sizeof(WriteHDF5::id_t)) {
00108     if (8 == sizeof(long))  
00109       return H5T_NATIVE_ULONG;
00110     else 
00111       return H5T_NATIVE_UINT64;
00112   }
00113   else if (4 == sizeof(WriteHDF5::id_t)) {
00114     if (4 == sizeof(int))
00115       return H5T_NATIVE_UINT;
00116     else
00117       return H5T_NATIVE_UINT32;
00118   }
00119   else {
00120     assert(0);
00121     return (hid_t)-1;
00122   }
00123 }
00124 
00125   // This is the HDF5 type used to store file IDs
00126 const hid_t WriteHDF5::id_type = get_id_type();
00127 
00128 // This function doesn't do anything useful.  It's just a nice
00129 // place to set a break point to determine why the reader fails.
00130 static inline ErrorCode error( ErrorCode rval )
00131   { return rval; }
00132 
00133 // Call \c error function during HDF5 library errors to make
00134 // it easier to trap such errors in the debugger.  This function
00135 // gets registered with the HDF5 library as a callback.  It
00136 // works the same as the default (H5Eprint), except that it 
00137 // also calls the \c error fuction as a no-op.
00138 #if defined(H5E_auto_t_vers) && H5E_auto_t_vers > 1
00139 static herr_t handle_hdf5_error( hid_t stack, void* data )
00140 {
00141   WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast<WriteHDF5::HDF5ErrorHandler*>(data);
00142   herr_t result = 0;
00143   if (h->func)
00144     result = (*h->func)(stack,h->data);
00145   error(MB_FAILURE);
00146   return result;
00147 }
00148 #else
00149 static herr_t handle_hdf5_error( void* data )
00150 {
00151   WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast<WriteHDF5::HDF5ErrorHandler*>(data);
00152   herr_t result = 0;
00153   if (h->func)
00154     result = (*h->func)(h->data);
00155   error(MB_FAILURE);
00156   return result;
00157 }
00158 #endif
00159 
00160   // Some macros to handle error checking.  The
00161   // CHK_MHDF__ERR* macros check the value of an mhdf_Status 
00162   // object.  The CHK_MB_ERR_* check the value of an ErrorCode.
00163   // The *_0 macros accept no other arguments. The *_1
00164   // macros accept a single hdf5 handle to close on error.
00165   // The *_2 macros accept an array of two hdf5 handles to
00166   // close on error.  The _*2C macros accept one hdf5 handle
00167   // to close on error and a bool and an hdf5 handle where
00168   // the latter handle is conditionally closed depending on
00169   // the value of the bool.  All macros contain a "return"
00170   // statement.
00171 #define CHK_MHDF_ERR_0( A )                                 \
00172 do if ( mhdf_isError( &(A) )) {                             \
00173     writeUtil->report_error( "%s\n", mhdf_message( &(A) ) );\
00174     myassert(0);                                            \
00175     return error(MB_FAILURE);                               \
00176 } while(false)                                               
00177 
00178 #define CHK_MHDF_ERR_1( A, B )                              \
00179 do if ( mhdf_isError( &(A) )) {                             \
00180     writeUtil->report_error( "%s\n", mhdf_message( &(A) ) );\
00181     myassert(0);                                            \
00182     mhdf_closeData( filePtr, (B), &(A) );                   \
00183     return error(MB_FAILURE);                               \
00184 } while(false)                                               
00185 
00186 #define CHK_MHDF_ERR_2( A, B )                              \
00187 do if ( mhdf_isError( &(A) )) {                             \
00188     writeUtil->report_error( "%s\n", mhdf_message( &(A) ) );\
00189     myassert(0);                                            \
00190     mhdf_closeData( filePtr, (B)[0], &(A) );                \
00191     mhdf_closeData( filePtr, (B)[1], &(A) );                \
00192     return error(MB_FAILURE);                               \
00193 } while(false)                                               
00194 
00195 #define CHK_MHDF_ERR_3( A, B )                              \
00196 do if ( mhdf_isError( &(A) )) {                             \
00197     writeUtil->report_error( "%s\n", mhdf_message( &(A) ) );\
00198     myassert(0);                                            \
00199     mhdf_closeData( filePtr, (B)[0], &(A) );                \
00200     mhdf_closeData( filePtr, (B)[1], &(A) );                \
00201     mhdf_closeData( filePtr, (B)[2], &(A) );                \
00202     return error(MB_FAILURE);                               \
00203 } while(false)                                               
00204 
00205 #define CHK_MHDF_ERR_2C( A, B, C, D )                       \
00206 do if ( mhdf_isError( &(A) )) {                             \
00207     writeUtil->report_error( "%s\n", mhdf_message( &(A) ) );\
00208     myassert(0);                                            \
00209     mhdf_closeData( filePtr, (B), &(A) );                   \
00210     if (C) mhdf_closeData( filePtr, (D), &(A) );            \
00211     return error(MB_FAILURE);                               \
00212 } while(false)                                               
00213 
00214 
00215 #define CHK_MB_ERR_0( A ) \
00216 do if (MB_SUCCESS != (A)) return error(A); while(false)
00217 
00218 #define CHK_MB_ERR_1( A, B, C )         \
00219 do if (MB_SUCCESS != (A)) {             \
00220   mhdf_closeData( filePtr, (B), &(C) ); \
00221   myassert(0);                          \
00222   return error(A);                      \
00223 } while(false)
00224 
00225 #define CHK_MB_ERR_2( A, B, C )            \
00226 do if (MB_SUCCESS != (A)) {                \
00227   mhdf_closeData( filePtr, (B)[0], &(C) ); \
00228   mhdf_closeData( filePtr, (B)[1], &(C) ); \
00229   write_finished();                        \
00230   myassert(0);                             \
00231   return error(A);                         \
00232 } while(false)
00233 
00234 #define CHK_MB_ERR_3( A, B, C )            \
00235 do if (MB_SUCCESS != (A)) {                \
00236   mhdf_closeData( filePtr, (B)[0], &(C) ); \
00237   mhdf_closeData( filePtr, (B)[1], &(C) ); \
00238   mhdf_closeData( filePtr, (B)[2], &(C) ); \
00239   write_finished();                        \
00240   myassert(0);                             \
00241   return error(A);                         \
00242 } while(false)
00243 
00244 #define CHK_MB_ERR_2C( A, B, C, D, E )          \
00245 do if (MB_SUCCESS != (A)) {                     \
00246   mhdf_closeData( filePtr, (B), &(E) );         \
00247   if (C) mhdf_closeData( filePtr, (D), &(E) );  \
00248   write_finished();                             \
00249   myassert(0);                                  \
00250   return error(A);                              \
00251 } while(false)
00252 
00253 
00254 #define debug_barrier() debug_barrier_line(__LINE__)
00255 void WriteHDF5::debug_barrier_line(int )
00256 {
00257 }
00258 
00259 
00260 class CheckOpenWriteHDF5Handles
00261 {
00262   int fileline;
00263   mhdf_FileHandle handle;
00264   int enter_count;
00265 public:
00266   CheckOpenWriteHDF5Handles(mhdf_FileHandle file, int line)
00267     : fileline(line), handle(file),
00268       enter_count(mhdf_countOpenHandles(file))
00269   {}
00270   ~CheckOpenWriteHDF5Handles()
00271   {
00272     int new_count = mhdf_countOpenHandles(handle);
00273     if (new_count != enter_count) {
00274       std::cout << "Leaked HDF5 object handle in function at " 
00275                 << __FILE__ << ":" << fileline << std::endl
00276                 << "Open at entrance: " << enter_count << std::endl
00277                 << "Open at exit:     " << new_count << std::endl;
00278     }
00279   }
00280 };
00281 
00282 MPEState WriteHDF5::topState;
00283 MPEState WriteHDF5::subState;
00284 
00285 
00286 #ifdef NDEBUG
00287 #define CHECK_OPEN_HANDLES
00288 #else
00289 #define CHECK_OPEN_HANDLES \
00290   CheckOpenWriteHDF5Handles check_open_handles_(filePtr,__LINE__)
00291 #endif
00292 
00293 bool WriteHDF5::convert_handle_tag( const EntityHandle* source,
00294                                     EntityHandle* dest, size_t count ) const
00295 {
00296   bool some_valid = false;
00297   for (size_t i = 0; i < count; ++i) {
00298     if (!source[i])
00299       dest[i] = 0;
00300     else {
00301       dest[i] = idMap.find( source[i] );
00302       if (dest[i])
00303         some_valid = true;
00304     }
00305   }
00306   return some_valid;
00307 }
00308 
00309 bool WriteHDF5::convert_handle_tag( EntityHandle* data, size_t count ) const
00310 {
00311   assert( sizeof(EntityHandle) == sizeof(id_t) );
00312   return convert_handle_tag( data, data, count );
00313 }
00314 
00315 ErrorCode WriteHDF5::assign_ids( const Range& entities, id_t id )
00316 {
00317   Range::const_pair_iterator pi;
00318   for (pi = entities.const_pair_begin(); pi != entities.const_pair_end(); ++pi) {
00319     const EntityHandle n = pi->second - pi->first + 1;
00320     dbgOut.printf( 3, "Assigning %s %lu to %lu to file IDs [%lu,%lu]\n",
00321       CN::EntityTypeName(TYPE_FROM_HANDLE(pi->first)),
00322       (unsigned long)(ID_FROM_HANDLE(pi->first)),
00323       (unsigned long)(ID_FROM_HANDLE(pi->first)+n-1),
00324       (unsigned long)id,
00325       (unsigned long)(id+n-1));
00326     if (!idMap.insert( pi->first, id, n ).second)
00327       return error(MB_FAILURE);
00328     id += n;
00329   }
00330   return MB_SUCCESS;
00331 }
00332 
00333 const char* WriteHDF5::ExportSet::name() const
00334 {
00335   static char buffer[128];
00336   switch (type) {
00337     case MBVERTEX:
00338       return mhdf_node_type_handle();
00339     case MBENTITYSET:
00340       return mhdf_set_type_handle();
00341     default:
00342       sprintf( buffer, "%s%d", CN::EntityTypeName( type ), num_nodes );
00343       return buffer;
00344   }
00345 }
00346   
00347 
00348 WriterIface* WriteHDF5::factory( Interface* iface )
00349   { return new WriteHDF5( iface ); }
00350 
00351 WriteHDF5::WriteHDF5( Interface* iface )
00352   : bufferSize( WRITE_HDF5_BUFFER_SIZE ),
00353     dataBuffer( 0 ),
00354     iFace( iface ), 
00355     writeUtil( 0 ), 
00356     filePtr( 0 ), 
00357     setContentsOffset( 0 ),
00358     setChildrenOffset( 0 ),
00359     setParentsOffset( 0 ),
00360     maxNumSetContents( 0 ),
00361     maxNumSetChildren( 0 ),
00362     maxNumSetParents( 0 ),
00363     writeSets(false),
00364     writeSetContents(false),
00365     writeSetChildren(false),
00366     writeSetParents(false),
00367     parallelWrite(false),
00368     collectiveIO(false),
00369     writeTagDense(false),
00370     writeProp( H5P_DEFAULT ),
00371     dbgOut("H5M ", stderr)
00372 {
00373 }
00374 
00375 ErrorCode WriteHDF5::init()
00376 {
00377   ErrorCode rval;
00378 
00379   if (writeUtil) // init has already been called
00380     return MB_SUCCESS;
00381 /* 
00382 #ifdef DEBUG
00383   H5Eset_auto( &hdf_error_handler, writeUtil );  // HDF5 callback for errors
00384 #endif
00385 */ 
00386     // For known tag types, store the corresponding HDF5 in which
00387     // the tag data is to be written in the file.
00388   //register_known_tag_types( iFace ); 
00389  
00390     // Get the util interface
00391   rval = iFace->query_interface( writeUtil );
00392   CHK_MB_ERR_0(rval);
00393 
00394   idMap.clear();
00395   
00396 #if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1
00397   herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
00398 #else
00399   herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
00400 #endif
00401   if (err < 0) {
00402     errorHandler.func = 0;
00403     errorHandler.data = 0;
00404   }
00405   else {
00406 #if defined(H5Eset_auto_vers) && H5Eset_auto_vers > 1
00407     err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
00408 #else
00409     err = H5Eset_auto( &handle_hdf5_error, &errorHandler );
00410 #endif
00411     if (err < 0) {
00412       errorHandler.func = 0;
00413       errorHandler.data = 0;
00414     }
00415   }
00416 
00417   if (!topState.valid())
00418     topState = MPEState( "WriteHDF5", "yellow" );
00419   if (!subState.valid())
00420     subState = MPEState( "WriteHDF5 subevent", "cyan" );
00421 
00422   return MB_SUCCESS;
00423 }
00424   
00425 ErrorCode WriteHDF5::write_finished()
00426 {
00427     // release memory allocated in lists
00428   exportList.clear();
00429   nodeSet.range.clear();
00430   setSet.range.clear();
00431   tagList.clear();
00432   idMap.clear();
00433   
00434   HDF5ErrorHandler handler;
00435 #if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1
00436   herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
00437 #else
00438   herr_t err = H5Eget_auto( &handler.func, &handler.data );
00439 #endif
00440   if (err >= 0 && handler.func == &handle_hdf5_error) {
00441     assert(handler.data = &errorHandler);
00442 #if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1
00443     H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
00444 #else
00445     H5Eset_auto( errorHandler.func, errorHandler.data );
00446 #endif
00447   }
00448 
00449   return MB_SUCCESS;
00450 }
00451 
00452 WriteHDF5::~WriteHDF5()
00453 {
00454   if (!writeUtil) // init() failed.
00455     return;
00456 
00457   iFace->release_interface( writeUtil );
00458 }
00459 
00460 
00461 ErrorCode WriteHDF5::write_file( const char* filename,
00462                                    bool overwrite,
00463                                    const FileOptions& opts,
00464                                    const EntityHandle* set_array,
00465                                    const int num_sets,
00466                                    const std::vector<std::string>& qa_records,
00467                                    const Tag* tag_list,
00468                                    int num_tags,
00469                                    int user_dimension )
00470 {
00471   mhdf_Status status;
00472   
00473   parallelWrite = false;
00474   collectiveIO = false;
00475 
00476   // Enable debug output
00477   int tmpval = 0;
00478   if (MB_SUCCESS == opts.get_int_option("DEBUG_IO", 1, tmpval))
00479     dbgOut.set_verbosity(tmpval);
00480 
00481   //writeTagDense = (MB_SUCCESS == opts.get_null_option("DENSE_TAGS"));
00482   writeTagDense = true; 
00483 
00484   // Enable some extra checks for reads.  Note: amongst other things this
00485   // will print errors if the entire file is not read, so if doing a 
00486   // partial read that is not a parallel read, this should be disabled.
00487   debugTrack = (MB_SUCCESS == opts.get_null_option("DEBUG_BINIO"));
00488     
00489   bufferSize = WRITE_HDF5_BUFFER_SIZE;
00490   int buf_size;
00491   ErrorCode rval = opts.get_int_option( "BUFFER_SIZE", buf_size );
00492   if (MB_SUCCESS == rval && buf_size >= 24)
00493     bufferSize = buf_size;
00494 
00495     // Allocate internal buffer to use when gathering data to write.
00496   dataBuffer = (char*)malloc( bufferSize );
00497   if (!dataBuffer)
00498     return error(MB_MEMORY_ALLOCATION_FAILED);
00499 
00500     // Clear filePtr so we know if it is open upon failure
00501   filePtr = 0;
00502 
00503     // Do actual write.
00504   writeProp = H5P_DEFAULT;
00505   ErrorCode result = write_file_impl( filename, overwrite, opts, 
00506                                         set_array, num_sets, 
00507                                         qa_records, 
00508                                         tag_list, num_tags,
00509                                         user_dimension );
00510     // close writeProp if it was opened
00511   if (writeProp != H5P_DEFAULT)
00512     H5Pclose(writeProp);
00513   
00514     // Free memory buffer
00515   free( dataBuffer );
00516   dataBuffer = 0;
00517   
00518     // Close file
00519   bool created_file = false;
00520   if (filePtr) {
00521     created_file = true;
00522     mhdf_closeFile( filePtr, &status );
00523     filePtr = 0;
00524     if (mhdf_isError( &status )) {
00525       writeUtil->report_error( "%s\n", mhdf_message( &status ) );
00526       if (MB_SUCCESS == result)
00527         result = MB_FAILURE;
00528     }
00529   }
00530 
00531     // Release other resources
00532   if (MB_SUCCESS == result)
00533     result = write_finished();
00534   else
00535     write_finished();
00536   
00537     // If write failed, remove file unless KEEP option was specified
00538   if (MB_SUCCESS != result && created_file && 
00539       MB_ENTITY_NOT_FOUND == opts.get_null_option( "KEEP" ))
00540     remove( filename );
00541   
00542   return result;
00543 }  
00544 
00545 
00546 ErrorCode WriteHDF5::write_file_impl( const char* filename,
00547                                         bool overwrite,
00548                                         const FileOptions& opts,
00549                                         const EntityHandle* set_array,
00550                                         const int num_sets,
00551                                         const std::vector<std::string>& qa_records,
00552                                         const Tag* tag_list, 
00553                                         int num_tags,
00554                                         int user_dimension )
00555 {
00556   ErrorCode result;
00557   std::list<TagDesc>::const_iterator t_itor;
00558   std::list<ExportSet>::iterator ex_itor;
00559   EntityHandle elem_count, max_id;
00560   double times[NUM_TIMES] = {0};
00561 
00562   if (MB_SUCCESS != init())
00563     return error(MB_FAILURE);
00564 
00565     // see if we need to report times
00566   bool cputime = false;
00567   result = opts.get_null_option("CPUTIME");
00568   if (MB_SUCCESS == result)
00569     cputime = true;
00570 
00571   CpuTimer timer;
00572 
00573   dbgOut.tprint(1,"Gathering Mesh\n");
00574   topState.start("gathering mesh");
00575   
00576     // Gather mesh to export
00577   exportList.clear();
00578   if (0 == num_sets || (1 == num_sets && set_array[0] == 0))
00579   {
00580     result = gather_all_mesh( );
00581   }
00582   else
00583   {
00584     std::vector<EntityHandle> passed_export_list(set_array, set_array+num_sets);
00585     result = gather_mesh_info( passed_export_list );
00586   }
00587   topState.end(result);
00588   CHK_MB_ERR_0(result);
00589   
00590   times[GATHER_TIME] = timer.time_elapsed();
00591   
00592   //if (nodeSet.range.size() == 0)
00593   //  return error(MB_ENTITY_NOT_FOUND);
00594   
00595   dbgOut.tprint(1,"Checking ID space\n");
00596 
00597     // Make sure ID space is sufficient
00598   elem_count = nodeSet.range.size() + setSet.range.size();
00599   for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor)
00600     elem_count += ex_itor->range.size();
00601   max_id = (EntityHandle)1 << (8*sizeof(id_t)-1);
00602   if (elem_count > max_id)
00603   {
00604     writeUtil->report_error("ID space insufficient for mesh size.\n");
00605     return error(result);
00606   }
00607 
00608   dbgOut.tprint(1, "Creating File\n" );  
00609 
00610     // Figure out the dimension in which to write the mesh.  
00611   int mesh_dim;
00612   result = iFace->get_dimension( mesh_dim );
00613   CHK_MB_ERR_0(result);
00614   
00615   if (user_dimension < 1) 
00616     user_dimension = mesh_dim;
00617   user_dimension = user_dimension > mesh_dim ? mesh_dim : user_dimension;
00618   
00619     // Create the file layout, including all tables (zero-ed) and
00620     // all structure and meta information.
00621   const char* optnames[] = { "WRITE_PART", "FORMAT", 0 };
00622   int junk;
00623   parallelWrite = (MB_SUCCESS == opts.match_option( "PARALLEL", optnames, junk ));
00624   if (parallelWrite) {
00625       // Just store Boolean value based on string option here.
00626       // parallel_create_file will set writeProp accordingly.
00627     //collectiveIO =  (MB_SUCCESS == opts.get_null_option("COLLECTIVE"));
00628     //dbgOut.printf(2,"'COLLECTIVE' option = %s\n", collectiveIO ? "YES" : "NO" );
00629       // Do this all the time, as it appears to be much faster than indep in some cases
00630     collectiveIO = true;
00631     result = parallel_create_file( filename, overwrite, qa_records, opts, tag_list, num_tags, user_dimension, times );
00632   }
00633   else {
00634     result = serial_create_file( filename, overwrite, qa_records, tag_list, num_tags, user_dimension );
00635   }
00636   if (MB_SUCCESS != result)
00637     return error(result);
00638 
00639   times[CREATE_TIME] = timer.time_elapsed();
00640 
00641   dbgOut.tprint(1,"Writing Nodes.\n");
00642     // Write node coordinates
00643   if (!nodeSet.range.empty() || parallelWrite) {
00644     topState.start( "writing coords" );
00645     result = write_nodes();
00646     topState.end(result);
00647     if (MB_SUCCESS != result)
00648       return error(result);
00649   }
00650 
00651   times[COORD_TIME] = timer.time_elapsed();
00652 
00653   dbgOut.tprint(1,"Writing connectivity.\n");
00654   
00655     // Write element connectivity
00656   for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) {
00657     topState.start( "writing connectivity for ", ex_itor->name() );
00658     result = write_elems( *ex_itor );
00659     topState.end(result);
00660     if (MB_SUCCESS != result)
00661       return error(result);
00662   }
00663   times[CONN_TIME] = timer.time_elapsed();
00664 
00665   dbgOut.tprint(1,"Writing sets.\n");
00666   
00667     // Write meshsets
00668   result = write_sets(times);
00669   if (MB_SUCCESS != result)
00670     return error(result);
00671   debug_barrier();
00672   
00673   times[SET_TIME] = timer.time_elapsed();
00674   dbgOut.tprint(1,"Writing adjacencies.\n");
00675   
00676     // Write adjacencies
00677   // Tim says don't save node adjacencies!
00678 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
00679   result = write_adjacencies( nodeSet );
00680   if (MB_SUCCESS != result)
00681     return error(result);
00682 #endif
00683   for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) {
00684     topState.start( "writing adjacencies for ", ex_itor->name() );
00685     result = write_adjacencies( *ex_itor );
00686     topState.end(result);
00687     if (MB_SUCCESS != result)
00688       return error(result);
00689   }
00690   times[ADJ_TIME] = timer.time_elapsed();
00691 
00692   dbgOut.tprint(1,"Writing tags.\n");
00693   
00694 
00695     // Write tags
00696   for (t_itor = tagList.begin(); t_itor != tagList.end(); ++t_itor) {
00697     std::string name;
00698     iFace->tag_get_name( t_itor->tag_id, name );
00699     topState.start( "writing tag: ", name.c_str() );
00700     result = write_tag( *t_itor, times );
00701     topState.end(result);
00702     if (MB_SUCCESS != result)
00703       return error(result);
00704   }
00705   times[TAG_TIME] = timer.time_elapsed();
00706   
00707   times[TOTAL_TIME] = timer.time_since_birth();
00708 
00709   if (cputime) {
00710     print_times( times );
00711   }
00712 
00713   return MB_SUCCESS;
00714 }
00715 
00716 ErrorCode WriteHDF5::initialize_mesh( const Range ranges[5] )
00717 {
00718   ErrorCode rval;
00719   
00720   if (!ranges[0].all_of_type(MBVERTEX))
00721     return error(MB_FAILURE);
00722   nodeSet.range = ranges[0];
00723   nodeSet.type = MBVERTEX;
00724   nodeSet.num_nodes = 1;
00725   nodeSet.max_num_ents = nodeSet.max_num_adjs = 0;
00726   
00727   if (!ranges[4].all_of_type(MBENTITYSET))
00728     return error(MB_FAILURE);
00729   setSet.range = ranges[4];
00730   setSet.type = MBENTITYSET;
00731   setSet.num_nodes = 0;
00732   setSet.max_num_ents = setSet.max_num_adjs = 0;
00733   maxNumSetContents = maxNumSetChildren = maxNumSetParents = 0;
00734 
00735   exportList.clear();
00736   std::vector<Range> bins(1024); // sort entities by connectivity length
00737                                    // resize is expensive due to Range copy, so start big
00738   for (EntityType type = MBEDGE; type < MBENTITYSET; ++type)
00739   {
00740     ExportSet set;
00741     set.max_num_ents = set.max_num_adjs = 0;
00742     const int dim = CN::Dimension(type);
00743 
00744       // Group entities by connectivity length
00745     bins.clear();
00746     assert(dim >= 0 && dim <= 4);
00747     std::pair<Range::const_iterator,Range::const_iterator> p = ranges[dim].equal_range(type);
00748     Range::const_iterator i = p.first;
00749     while (i != p.second) {
00750       Range::const_iterator first = i;
00751       EntityHandle const* conn;
00752       int len, firstlen;
00753 
00754       // dummy storage vector for structured mesh "get_connectivity" function
00755       std::vector<EntityHandle> storage;
00756       
00757       rval = iFace->get_connectivity( *i, conn, firstlen, false, &storage );
00758       if (MB_SUCCESS != rval)
00759         return error(rval);
00760       
00761       for (++i; i != p.second; ++i) {
00762         rval = iFace->get_connectivity( *i, conn, len, false, &storage );
00763         if (MB_SUCCESS != rval)
00764           return error(rval);
00765         
00766         if (len != firstlen)
00767           break;
00768       }
00769       
00770       if (firstlen >= (int)bins.size())
00771         bins.resize(firstlen+1);
00772       bins[firstlen].merge( first, i );
00773     }
00774       // Create ExportSet for each group
00775     for (std::vector<Range>::iterator j = bins.begin(); j != bins.end(); ++j) {
00776       if (j->empty())
00777         continue;
00778         
00779       set.range.clear();
00780       set.type = type;
00781       set.num_nodes = j - bins.begin();
00782       exportList.push_back( set );
00783       exportList.back().range.swap( *j );
00784     }
00785   }
00786   return MB_SUCCESS;  
00787 }
00788 
00789                                          
00790   // Gather the mesh to be written from a list of owning meshsets.
00791 ErrorCode WriteHDF5::gather_mesh_info( 
00792                            const std::vector<EntityHandle>& export_sets )
00793 {
00794   ErrorCode rval;
00795   
00796   int dim;
00797   Range range;      // temporary storage
00798   Range ranges[5];  // lists of entities to export, grouped by dimension
00799   
00800     // Gather list of all related sets
00801   std::vector<EntityHandle> stack(export_sets);
00802   std::copy( export_sets.begin(), export_sets.end(), stack.begin() );
00803   std::vector<EntityHandle> set_children;
00804   while( !stack.empty() )
00805   {
00806     EntityHandle meshset = stack.back(); stack.pop_back();
00807     ranges[4].insert( meshset );
00808   
00809       // Get contained sets
00810     range.clear();
00811     rval = iFace->get_entities_by_type( meshset, MBENTITYSET, range );
00812     CHK_MB_ERR_0(rval);
00813     for (Range::iterator ritor = range.begin(); ritor != range.end(); ++ritor)
00814       if (ranges[4].find( *ritor ) == ranges[4].end())
00815         stack.push_back( *ritor );
00816     
00817       // Get child sets
00818     set_children.clear();
00819     rval = iFace->get_child_meshsets( meshset, set_children, 1 );
00820     CHK_MB_ERR_0(rval);
00821     for (std::vector<EntityHandle>::iterator vitor = set_children.begin();
00822          vitor != set_children.end(); ++vitor )
00823       if (ranges[4].find( *vitor ) == ranges[4].end())
00824         stack.push_back( *vitor );
00825   }
00826   
00827     // Gather list of all mesh entities from list of sets,
00828     // grouped by dimension.
00829   for (Range::iterator setitor = ranges[4].begin();
00830        setitor != ranges[4].end(); ++setitor)
00831   {
00832     for (dim = 0; dim < 4; ++dim)
00833     {
00834       range.clear();
00835       rval = iFace->get_entities_by_dimension( *setitor, dim, range, false );
00836       CHK_MB_ERR_0(rval);
00837 
00838       ranges[dim].merge(range);
00839     }
00840   }
00841   
00842     // For each list of elements, append adjacent children and
00843     // nodes to lists.
00844   for (dim = 3; dim > 0; --dim)
00845   {
00846     for (int cdim = 1; cdim < dim; ++cdim)
00847     {
00848       range.clear();
00849       rval = iFace->get_adjacencies( ranges[dim], cdim, false, range );
00850       CHK_MB_ERR_0(rval);
00851       ranges[cdim].merge( range );
00852     }  
00853     range.clear();
00854     rval = writeUtil->gather_nodes_from_elements( ranges[dim], 0, range );
00855     CHK_MB_ERR_0(rval);
00856     ranges[0].merge( range );      
00857   }
00858   
00859   return initialize_mesh( ranges );
00860 }
00861 
00862   // Gather all the mesh and related information to be written.
00863 ErrorCode WriteHDF5::gather_all_mesh( )
00864 {
00865   ErrorCode rval;
00866   Range ranges[5];
00867 
00868   rval = iFace->get_entities_by_type( 0, MBVERTEX, ranges[0] );
00869   if (MB_SUCCESS != rval)
00870     return error(rval);
00871 
00872   rval = iFace->get_entities_by_dimension( 0, 1, ranges[1] );
00873   if (MB_SUCCESS != rval)
00874     return error(rval);
00875 
00876   rval = iFace->get_entities_by_dimension( 0, 2, ranges[2] );
00877   if (MB_SUCCESS != rval)
00878     return error(rval);
00879 
00880   rval = iFace->get_entities_by_dimension( 0, 3, ranges[3] );
00881   if (MB_SUCCESS != rval)
00882     return error(rval);
00883 
00884   rval = iFace->get_entities_by_type( 0, MBENTITYSET, ranges[4] );
00885   if (MB_SUCCESS != rval)
00886     return error(rval);
00887 
00888   return initialize_mesh( ranges );
00889 }
00890   
00891 ErrorCode WriteHDF5::write_nodes( )
00892 {
00893   mhdf_Status status;
00894   int dim, mesh_dim;
00895   ErrorCode rval;
00896   hid_t node_table;
00897   long first_id, num_nodes;
00898 
00899   if (!nodeSet.total_num_ents)
00900     return MB_SUCCESS; // no nodes!
00901 
00902   CHECK_OPEN_HANDLES;
00903   
00904   rval = iFace->get_dimension( mesh_dim );
00905   CHK_MB_ERR_0(rval);
00906   
00907   debug_barrier();
00908   dbgOut.print(3, "Opening Node Coords\n");
00909   node_table = mhdf_openNodeCoords( filePtr, &num_nodes, &dim, &first_id, &status );
00910   CHK_MHDF_ERR_0(status);
00911   IODebugTrack track( debugTrack, "nodes", num_nodes );
00912   
00913   double* buffer = (double*)dataBuffer;
00914 #ifdef BLOCKED_COORD_IO   
00915   int chunk_size = bufferSize / sizeof(double);
00916 #else
00917   int chunk_size = bufferSize / (3*sizeof(double));
00918 #endif 
00919   
00920   long remaining = nodeSet.range.size();
00921   long num_writes = (remaining+chunk_size-1) / chunk_size;
00922   if (nodeSet.max_num_ents) {
00923     assert( nodeSet.max_num_ents >= remaining );
00924     num_writes = (nodeSet.max_num_ents+chunk_size-1) / chunk_size;
00925   }
00926   long remaining_writes = num_writes;
00927 
00928   long offset = nodeSet.offset;
00929   Range::const_iterator iter = nodeSet.range.begin();
00930   dbgOut.printf(3, "Writing %ld nodes in %ld blocks of %d\n", remaining, (remaining+chunk_size-1)/chunk_size, chunk_size);
00931   while (remaining)
00932   {
00933     VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
00934     long count = chunk_size < remaining ? chunk_size : remaining;
00935     remaining -= count;
00936     Range::const_iterator end = iter;
00937     end += count;
00938 
00939 #ifdef BLOCKED_COORD_IO   
00940     for (int d = 0; d < dim; d++)
00941     {
00942       if (d < mesh_dim)
00943       {
00944         rval = writeUtil->get_node_coords( d, iter, end, count, buffer );
00945         CHK_MB_ERR_1(rval, node_table, status);
00946       }
00947       else
00948       {
00949         memset( buffer, 0, count * sizeof(double) );
00950       }
00951     
00952       dbgOut.printf(3,"  writing %c node chunk %ld of %ld, %ld values at %ld\n",
00953              (char)('X'+d), num_writes - remaining_writes + 1, num_writes, count, offset );
00954       mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status );
00955       CHK_MHDF_ERR_1(status, node_table);
00956     }
00957 #else
00958     rval = writeUtil->get_node_coords( -1, iter, end, 3*count, buffer );
00959     CHK_MB_ERR_1(rval, node_table, status);
00960     dbgOut.printf(3,"  writing node chunk %ld of %ld, %ld values at %ld\n",
00961            num_writes - remaining_writes + 1, num_writes, count, offset );
00962     mhdf_writeNodeCoordsWithOpt( node_table, offset, count, buffer, writeProp, &status );
00963     CHK_MHDF_ERR_1(status, node_table);
00964 #endif
00965     track.record_io( offset, count );
00966     
00967     iter = end;
00968     offset += count;
00969     --remaining_writes;
00970   }
00971   
00972   // Do empty writes if necessary for parallel collective IO
00973   if (collectiveIO) {
00974     while (remaining_writes--) {
00975       assert(writeProp != H5P_DEFAULT);
00976 #ifdef BLOCKED_COORD_IO   
00977       for (int d = 0; d < dim; ++d) {
00978         dbgOut.printf(3,"  writing (empty) %c node chunk %ld of %ld.\n",
00979                (char)('X'+d), num_writes - remaining_writes, num_writes );
00980         mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status );
00981         CHK_MHDF_ERR_1(status, node_table);
00982       }
00983 #else
00984       dbgOut.printf(3,"  writing (empty) node chunk %ld of %ld.\n",
00985              num_writes - remaining_writes, num_writes );
00986       mhdf_writeNodeCoordsWithOpt( node_table, offset, 0, 0, writeProp, &status );
00987       CHK_MHDF_ERR_1(status, node_table);
00988 #endif
00989     }
00990   }
00991   
00992   mhdf_closeData( filePtr, node_table, &status );
00993   CHK_MHDF_ERR_0(status);
00994  
00995   track.all_reduce();
00996   return MB_SUCCESS;
00997 }
00998 
00999 ErrorCode WriteHDF5::write_elems( ExportSet& elems )
01000 {
01001   mhdf_Status status;
01002   ErrorCode rval;
01003   long first_id;
01004   int nodes_per_elem;
01005   long table_size;
01006 
01007   CHECK_OPEN_HANDLES;
01008 
01009   debug_barrier();
01010   dbgOut.printf(2,"Writing %lu elements of type %s%d\n",
01011     (unsigned long)elems.range.size(),
01012     CN::EntityTypeName(elems.type), elems.num_nodes );
01013   dbgOut.print(3,"Writing elements",elems.range);
01014 
01015   hid_t elem_table = mhdf_openConnectivity( filePtr, 
01016                                             elems.name(), 
01017                                             &nodes_per_elem,
01018                                             &table_size,
01019                                             &first_id,
01020                                             &status );
01021   IODebugTrack track( debugTrack, elems.name() && strlen(elems.name()) 
01022     ? elems.name() : "<ANONYMOUS ELEM SET?>", table_size );
01023                                             
01024   CHK_MHDF_ERR_0(status);
01025   assert ((unsigned long)first_id <= elems.first_id);
01026   assert ((unsigned long)table_size >= elems.offset + elems.range.size());
01027   
01028   
01029   EntityHandle* buffer = (EntityHandle*)dataBuffer;
01030   int chunk_size = bufferSize / (elems.num_nodes * sizeof(id_t));
01031   long offset = elems.offset;
01032   long remaining = elems.range.size();
01033   long num_writes = (remaining+chunk_size-1) / chunk_size;
01034   if (elems.max_num_ents) {
01035     assert( elems.max_num_ents >= remaining );
01036     num_writes = (elems.max_num_ents+chunk_size-1) / chunk_size;
01037   }
01038   long remaining_writes = num_writes;
01039   Range::iterator iter = elems.range.begin();
01040   
01041   while (remaining)
01042   {
01043     VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01044     long count = chunk_size < remaining ? chunk_size : remaining;
01045     remaining -= count;
01046   
01047     Range::iterator next = iter;
01048     next += count;
01049     rval = writeUtil->get_element_connect( iter, next, elems.num_nodes, 
01050                                          count * elems.num_nodes, buffer );
01051     CHK_MB_ERR_1(rval, elem_table, status);
01052     iter = next;
01053     
01054     for (long i = 0; i < count*nodes_per_elem; ++i) {
01055       buffer[i] = idMap.find( buffer[i] );
01056       if (0 == buffer[i]) {
01057         writeUtil->report_error("Invalid %s element connectivity. Write Aborted\n", elems.name() );
01058         CHK_MB_ERR_1(MB_FAILURE,elem_table,status);
01059       }
01060     }
01061     
01062     dbgOut.printf(3,"  writing node connectivity %ld of %ld, %ld values at %ld\n",
01063            num_writes - remaining_writes + 1, num_writes, count, offset );
01064     track.record_io( offset, count );
01065     mhdf_writeConnectivityWithOpt( elem_table, offset, count, 
01066                                    id_type, buffer, writeProp, &status );
01067     CHK_MHDF_ERR_1(status, elem_table);
01068     
01069     offset += count;
01070     --remaining_writes;
01071   }
01072   
01073   // Do empty writes if necessary for parallel collective IO
01074   if (collectiveIO) {
01075     while (remaining_writes--) {
01076       assert(writeProp != H5P_DEFAULT);
01077       dbgOut.printf(3,"  writing (empty) connectivity chunk %ld of %ld.\n",
01078              num_writes - remaining_writes + 1, num_writes );
01079       mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status );
01080       CHK_MHDF_ERR_1(status, elem_table);
01081     }
01082   }
01083   
01084   mhdf_closeData( filePtr, elem_table, &status );
01085   CHK_MHDF_ERR_0(status);
01086  
01087   track.all_reduce();
01088   return MB_SUCCESS;
01089 }
01090 
01091 ErrorCode WriteHDF5::get_set_info( EntityHandle set,
01092                                      long& num_entities,
01093                                      long& num_children,
01094                                      long& num_parents,
01095                                      unsigned long& flags )
01096 {
01097   ErrorCode rval;
01098   int i;
01099   unsigned int u;
01100   
01101   rval = iFace->get_number_entities_by_handle( set, i, false );
01102   CHK_MB_ERR_0(rval);
01103   num_entities = i;
01104 
01105   rval = iFace->num_child_meshsets( set, &i );
01106   CHK_MB_ERR_0(rval);
01107   num_children = i;
01108 
01109   rval = iFace->num_parent_meshsets( set, &i );
01110   CHK_MB_ERR_0(rval);
01111   num_parents = i;
01112 
01113   rval = iFace->get_meshset_options( set, u );
01114   CHK_MB_ERR_0(rval);
01115   flags = u;
01116   
01117   return MB_SUCCESS;
01118 }
01119 
01120 ErrorCode WriteHDF5::write_set_data( const WriteUtilIface::EntityListType which_data,
01121                                      const hid_t handle,
01122                                      IODebugTrack& track,
01123                                      Range* ranged,
01124                                      Range* null_stripped,
01125                                      std::vector<long>* set_sizes )
01126 {
01127   // ranged must be non-null for CONTENTS and null for anything else
01128   assert((which_data == WriteUtilIface::CONTENTS) == (0 != ranged));
01129   ErrorCode rval;
01130   mhdf_Status status;
01131 
01132   debug_barrier();
01133 
01134   // Function  pointer type used to write set data
01135   void (*write_func)( hid_t, long, long, hid_t, const void*, hid_t, mhdf_Status* );
01136   long max_vals; // max over all procs of number of values to write to data set 
01137   long offset;   // offset in HDF5 dataset at which to write next block of data
01138   switch (which_data) {
01139     case WriteUtilIface::CONTENTS:
01140       assert(ranged != 0 && null_stripped != 0 && set_sizes != 0);
01141       write_func = &mhdf_writeSetDataWithOpt;
01142       max_vals = maxNumSetContents;
01143       offset = setContentsOffset;
01144       dbgOut.print(2, "Writing set contents\n" );
01145       break;
01146     case WriteUtilIface::CHILDREN:
01147       assert(!ranged && !null_stripped && !set_sizes);
01148       write_func = &mhdf_writeSetParentsChildrenWithOpt;
01149       max_vals = maxNumSetChildren;
01150       offset = setChildrenOffset;
01151       dbgOut.print(2, "Writing set child lists\n" );
01152       break;
01153     case WriteUtilIface::PARENTS:
01154       assert(!ranged && !null_stripped && !set_sizes);
01155       write_func = &mhdf_writeSetParentsChildrenWithOpt;
01156       max_vals = maxNumSetParents;
01157       offset = setParentsOffset;
01158       dbgOut.print(2, "Writing set parent lists\n" );
01159       break;
01160     default:
01161       assert(false);
01162       return MB_FAILURE;
01163   }
01164   //assert(max_vals > 0); // should have skipped this function otherwise
01165   
01166   // buffer to use for IO
01167   id_t* buffer = reinterpret_cast<id_t*>(dataBuffer);
01168   // number of handles that will fit in the buffer
01169   const size_t buffer_size = bufferSize / sizeof(EntityHandle);
01170   // the total number of write calls that must be made, including no-ops for collective io
01171   const size_t num_total_writes = (max_vals + buffer_size-1)/buffer_size;
01172   
01173   std::vector<SpecialSetData>::iterator si = specialSets.begin();
01174   
01175   std::vector<id_t> remaining; // data left over from prev iteration because it didn't fit in buffer
01176   size_t remaining_offset = 0; // avoid erasing from front of 'remaining'
01177   const EntityHandle* remaining_ptr = 0; // remaining for non-ranged data
01178   size_t remaining_count = 0;
01179   const id_t* special_rem_ptr = 0;
01180   Range::const_iterator i = setSet.range.begin(), j, rhint, nshint;
01181   if (ranged) rhint = ranged->begin();
01182   if (null_stripped) nshint = null_stripped->begin();
01183   for (size_t w = 0; w < num_total_writes; ++w) {
01184     if (i == setSet.range.end() && !remaining.empty() && !remaining_ptr) { 
01185         // If here, then we've written everything but we need to
01186         // make more write calls because we're doing collective IO
01187         // in parallel
01188       (*write_func)( handle, 0, 0, id_type, 0, writeProp, &status );
01189       CHK_MHDF_ERR_0( status );
01190       continue;
01191     }
01192     
01193       // If we had some left-over data from a range-compacted set
01194       // from the last iteration, add it to the buffer now
01195     size_t count = 0;
01196     if (!remaining.empty()) {
01197       count = remaining.size() - remaining_offset;
01198       if (count > buffer_size) {
01199         memcpy( buffer, &remaining[remaining_offset], buffer_size*sizeof(id_t) );
01200         count = buffer_size;
01201         remaining_offset += buffer_size;
01202       }
01203       else {
01204         memcpy( buffer, &remaining[remaining_offset], count*sizeof(id_t) );
01205         remaining_offset = 0;
01206         remaining.clear();
01207       }
01208     }
01209       // If we had some left-over data from a non-range-compacted set
01210       // from the last iteration, add it to the buffer now
01211     else if (remaining_ptr) {
01212       if (remaining_count > buffer_size) {
01213         rval = vector_to_id_list( remaining_ptr, buffer, buffer_size );
01214         CHK_MB_ERR_0(rval);
01215         count = buffer_size;
01216         remaining_ptr += count;
01217         remaining_count -= count;
01218       }
01219       else {
01220         rval = vector_to_id_list( remaining_ptr, buffer, remaining_count );
01221         CHK_MB_ERR_0(rval);
01222         count = remaining_count;
01223         remaining_ptr = 0;
01224         remaining_count = 0;
01225       }
01226     }
01227       // If we had some left-over data from a "special" (i.e. parallel shared)
01228       // set.
01229     else if (special_rem_ptr) {
01230       if (remaining_count > buffer_size) {
01231         memcpy( buffer, special_rem_ptr, buffer_size*sizeof(id_t) );
01232         count = buffer_size;
01233         special_rem_ptr += count;
01234         remaining_count -= count;
01235       }
01236       else {
01237         memcpy( buffer, special_rem_ptr, remaining_count*sizeof(id_t) );
01238         count = remaining_count;
01239         special_rem_ptr = 0;
01240         remaining_count = 0;
01241       }
01242     }
01243     
01244       // While there is both space remaining in the buffer and 
01245       // more sets to write, append more set data to buffer.
01246     
01247     while (count < buffer_size && i != setSet.range.end()) {
01248       
01249         // Special case for "special" (i.e. parallel shared) sets:
01250         // we already have the data in a vector, just copy it.
01251       if (si != specialSets.end() && si->setHandle == *i) {
01252         std::vector<id_t>& list = 
01253           (which_data == WriteUtilIface::CONTENTS) ? si->contentIds :
01254           (which_data == WriteUtilIface::PARENTS ) ? si->parentIds  :
01255                                                      si->childIds   ;
01256         size_t append = list.size();
01257         if (count + list.size() > buffer_size) {
01258           append = buffer_size - count;
01259           special_rem_ptr = &list[append];
01260           remaining_count = list.size() - append;
01261         }
01262         memcpy( buffer+count, &list[0], append*sizeof(id_t) );    
01263         ++i;
01264         ++si;
01265         count += append;
01266         continue;    
01267       } 
01268       
01269       j = i; ++i;
01270       const EntityHandle* ptr;
01271       int len;
01272       unsigned char flags;
01273       rval = writeUtil->get_entity_list_pointers( j, i, &ptr, which_data, &len, &flags );
01274       if (MB_SUCCESS != rval) return rval;
01275       if (which_data == WriteUtilIface::CONTENTS && !(flags&MESHSET_ORDERED)) {
01276         bool compacted;
01277         remaining.clear();
01278         if (len == 0) {
01279           compacted = false;
01280         }
01281         else {
01282           assert(!(len%2));
01283           rval = range_to_blocked_list( ptr, len/2, remaining, compacted );
01284           if (MB_SUCCESS != rval) return rval;
01285         }
01286         if (compacted) {
01287           rhint = ranged->insert( rhint, *j );
01288           set_sizes->push_back( remaining.size() );
01289         }
01290         else if (remaining.size() != (unsigned)len) {
01291           nshint = null_stripped->insert( nshint, *j );
01292           set_sizes->push_back( remaining.size() );
01293         }
01294           
01295         
01296         if (count + remaining.size() <= buffer_size) {
01297           memcpy( buffer + count, &remaining[0], sizeof(id_t)*remaining.size() );
01298           count += remaining.size();
01299           remaining.clear();
01300           remaining_offset = 0;
01301         }
01302         else {
01303           remaining_offset = buffer_size - count;
01304           memcpy( buffer + count, &remaining[0], sizeof(id_t)*remaining_offset );
01305           count += remaining_offset;
01306         }
01307       }
01308       else {
01309         if (count + len > buffer_size) {
01310           size_t append = buffer_size - count;
01311           remaining_ptr = ptr + append;
01312           remaining_count = len - append;
01313           len = append;
01314         }
01315         
01316         rval = vector_to_id_list( ptr, buffer+count, len );
01317         count += len;
01318       }
01319     }
01320     
01321       // Write the buffer.
01322     (*write_func)( handle, offset, count, id_type, buffer, writeProp, &status );
01323     CHK_MHDF_ERR_0( status );
01324     track.record_io( offset, count );
01325     offset += count;
01326   }
01327   
01328   return MB_SUCCESS;
01329 }
01330 
01331 ErrorCode WriteHDF5::write_sets( double* times )
01332 {
01333   mhdf_Status status;
01334   ErrorCode rval;
01335   long first_id, size;
01336   hid_t table;
01337   CpuTimer timer;
01338 
01339   CHECK_OPEN_HANDLES;
01340   /* If no sets, just return success */
01341   if (!writeSets)
01342     return MB_SUCCESS;
01343 
01344   debug_barrier();
01345   dbgOut.printf(2,"Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01346   dbgOut.print(3,"Non-shared sets", setSet.range );
01347   
01348     /* Write set parents */
01349   if (writeSetParents)
01350   {
01351     topState.start( "writing parent lists for local sets" );
01352     table = mhdf_openSetParents( filePtr, &size, &status );
01353     CHK_MHDF_ERR_0(status);
01354     IODebugTrack track( debugTrack, "SetParents", size );
01355     
01356     rval = write_set_data( WriteUtilIface::PARENTS, table, track );
01357     topState.end(rval);
01358     CHK_MB_ERR_1(rval,table,status);
01359     
01360     mhdf_closeData( filePtr, table, &status );
01361     CHK_MHDF_ERR_0(status);
01362    
01363     times[SET_PARENT] = timer.time_elapsed();
01364     track.all_reduce();
01365   }
01366   
01367     /* Write set children */
01368   if (writeSetChildren)
01369   {
01370     topState.start( "writing child lists for local sets" );
01371     table = mhdf_openSetChildren( filePtr, &size, &status );
01372     CHK_MHDF_ERR_0(status);
01373     IODebugTrack track( debugTrack, "SetChildren", size );
01374     
01375     rval = write_set_data( WriteUtilIface::CHILDREN, table, track );
01376     topState.end(rval);
01377     CHK_MB_ERR_1(rval,table,status);
01378     
01379     mhdf_closeData( filePtr, table, &status );
01380     CHK_MHDF_ERR_0(status);
01381    
01382     times[SET_CHILD] = timer.time_elapsed();
01383     track.all_reduce();
01384   }
01385   
01386     /* Write set contents */
01387   Range ranged_sets, null_stripped_sets;
01388   std::vector<long> set_sizes;
01389   if (writeSetContents) 
01390   {
01391     topState.start( "writing content lists for local sets" );
01392     table = mhdf_openSetData( filePtr, &size, &status );
01393     CHK_MHDF_ERR_0(status);
01394     IODebugTrack track( debugTrack, "SetContents", size );
01395     
01396     rval = write_set_data( WriteUtilIface::CONTENTS, table, track, 
01397                            &ranged_sets, &null_stripped_sets, &set_sizes );
01398     topState.end(rval);
01399     CHK_MB_ERR_1(rval,table,status);
01400     
01401     mhdf_closeData( filePtr, table, &status );
01402     CHK_MHDF_ERR_0(status);
01403    
01404     times[SET_CONTENT] = timer.time_elapsed();
01405     track.all_reduce();
01406   }
01407   assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
01408     
01409     /* Write set description table */
01410   
01411   debug_barrier();
01412   topState.start( "writing descriptions of local sets" );
01413   dbgOut.printf(2,"Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01414   dbgOut.print(3,"Non-shared sets", setSet.range );
01415   
01416     /* Open the table */
01417   table = mhdf_openSetMeta( filePtr, &size, &first_id, &status );
01418   CHK_MHDF_ERR_0(status);
01419   IODebugTrack track_meta( debugTrack, "SetMeta", size );
01420 
01421     /* Some debug stuff */
01422   debug_barrier();
01423   dbgOut.printf(2,"Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01424   dbgOut.print(3,"Non-shared sets", setSet.range );
01425    
01426     /* counts and buffers and such */
01427   mhdf_index_t* const buffer = reinterpret_cast<mhdf_index_t*>(dataBuffer);
01428   const size_t buffer_size = bufferSize / (4*sizeof(mhdf_index_t));
01429   const size_t num_local_writes = (setSet.range.size() + buffer_size - 1) / buffer_size;
01430   const size_t num_global_writes = (setSet.max_num_ents + buffer_size-1) / buffer_size;
01431   assert(num_local_writes <= num_global_writes);
01432   assert(num_global_writes > 0);
01433 
01434     /* data about sets for which number of handles written is
01435      * not the same as the number of handles in the set
01436      * (range-compacted or null handles stripped out)
01437      */
01438   Range::const_iterator i = setSet.range.begin();
01439   Range::const_iterator r = ranged_sets.begin();
01440   Range::const_iterator s = null_stripped_sets.begin();
01441   std::vector<mhdf_index_t>::const_iterator n = set_sizes.begin();
01442   assert(ranged_sets.size() + null_stripped_sets.size() == set_sizes.size());
01443 
01444     /* we write the end index for each list, rather than the count */
01445   mhdf_index_t prev_contents_end = setContentsOffset - 1;
01446   mhdf_index_t prev_children_end = setChildrenOffset - 1;
01447   mhdf_index_t prev_parents_end = setParentsOffset - 1;
01448   
01449     /* while there is more data to write */
01450   size_t offset = setSet.offset;
01451   std::vector<SpecialSetData>::const_iterator si = specialSets.begin();
01452   for (size_t w = 0; w < num_local_writes; ++w) {
01453       // get a buffer full of data
01454     size_t count = 0;
01455     while (count < buffer_size && i != setSet.range.end()) {
01456         // get set properties
01457       long num_ent, num_child, num_parent;
01458       unsigned long flags;
01459       if (si != specialSets.end() && si->setHandle == *i) {
01460         flags = si->setFlags;
01461         num_ent = si->contentIds.size();
01462         num_child = si->childIds.size();
01463         num_parent = si->parentIds.size();
01464         ++si;
01465         if (r != ranged_sets.end() && *i == *r) {
01466           assert(flags & mhdf_SET_RANGE_BIT);
01467           ++r;
01468           ++n;
01469         }
01470         else if (s != null_stripped_sets.end() && *i == *s) {
01471            ++s;
01472           ++n;
01473         }
01474       }
01475       else {
01476         assert(si == specialSets.end() || si->setHandle > *i);
01477       
01478           // get set properties
01479         rval = get_set_info( *i, num_ent, num_child, num_parent, flags );
01480         CHK_MB_ERR_1(rval, table,status);
01481 
01482           // check if size is something other than num handles in set
01483         if (r != ranged_sets.end() && *i == *r) {
01484           num_ent = *n;
01485           ++r;
01486           ++n;
01487           flags |= mhdf_SET_RANGE_BIT;
01488         }
01489         else if (s != null_stripped_sets.end() && *i == *s) {
01490           num_ent = *n;
01491           ++s;
01492           ++n;
01493         }
01494       }
01495 
01496         // put data in buffer
01497       mhdf_index_t* local = buffer + 4*count;
01498       prev_contents_end += num_ent;
01499       prev_children_end += num_child;
01500       prev_parents_end += num_parent;
01501       local[0] = prev_contents_end;
01502       local[1] = prev_children_end;
01503       local[2] = prev_parents_end;
01504       local[3] = flags;
01505       
01506         // iterate
01507       ++count;
01508       ++i;
01509     }
01510     
01511       // write the data
01512     mhdf_writeSetMetaWithOpt( table, offset, count, MHDF_INDEX_TYPE, buffer, writeProp, &status );
01513     CHK_MHDF_ERR_1(status, table);
01514     track_meta.record_io( offset, count );
01515     offset += count;
01516   }
01517   assert( r == ranged_sets.end() );
01518   assert( s == null_stripped_sets.end() );
01519   assert( n == set_sizes.end() );
01520 
01521     /* if doing parallel write with collective IO, do null write
01522      * calls because other procs aren't done yet and write calls
01523      * are collective */
01524   for (size_t w = num_local_writes; w != num_global_writes; ++w) {
01525     mhdf_writeSetMetaWithOpt( table, 0, 0, MHDF_INDEX_TYPE, 0, writeProp, &status );
01526     CHK_MHDF_ERR_1(status, table);    
01527   }
01528   
01529   topState.end();
01530   mhdf_closeData( filePtr, table, &status );
01531   CHK_MHDF_ERR_0(status);
01532 
01533   times[SET_META] = timer.time_elapsed();
01534   track_meta.all_reduce();
01535 
01536   return MB_SUCCESS;
01537 }
01538 
01539 template <class HandleRangeIter> inline
01540 size_t count_num_handles( HandleRangeIter iter, HandleRangeIter end )
01541 {
01542   size_t result = 0;
01543   for (; iter != end; ++iter)
01544     result += iter->second - iter->first + 1;
01545   return result;
01546 }
01547 
01548 template <class HandleRangeIter> inline
01549 ErrorCode range_to_id_list_templ( HandleRangeIter begin,
01550                             HandleRangeIter end,
01551                             const RangeMap<EntityHandle,WriteHDF5::id_t>& idMap,
01552                             WriteHDF5::id_t* array )
01553 {
01554   ErrorCode rval = MB_SUCCESS;
01555   RangeMap<EntityHandle,WriteHDF5::id_t>::iterator ri = idMap.begin();
01556   WriteHDF5::id_t* i = array;
01557   for (HandleRangeIter pi = begin; pi != end; ++pi) {
01558     EntityHandle h = pi->first;
01559     while (h <= pi->second) {
01560       ri = idMap.lower_bound( ri, idMap.end(), h );
01561       if (ri == idMap.end() || ri->begin > h) {
01562         rval = MB_ENTITY_NOT_FOUND;
01563         *i = 0; 
01564         ++i;
01565         ++h;
01566         continue;
01567       }
01568 
01569       id_t n = pi->second - h + 1;
01570       if (n > ri->count)
01571         n = ri->count;
01572 
01573       id_t id = ri->value + (h - ri->begin);
01574       for (id_t j = 0; j < n; ++i, ++j)
01575         *i = id + j;
01576       h += n;
01577     }
01578   }
01579   assert( i == array + count_num_handles(begin,end) );
01580   return rval;
01581 }
01582 
01583 template <class HandleRangeIter> inline
01584 ErrorCode range_to_blocked_list_templ( HandleRangeIter begin,
01585                                  HandleRangeIter end,
01586                                  const RangeMap<EntityHandle,WriteHDF5::id_t>& idMap,
01587                                  std::vector<WriteHDF5::id_t>& output_id_list,
01588                                  bool& ranged_list )
01589 {
01590   output_id_list.clear();
01591   if (begin == end) {
01592     ranged_list = false;
01593     return MB_SUCCESS;
01594   }
01595 
01596     // first try ranged format, but give up if we reach the 
01597     // non-range format size.
01598   RangeMap<EntityHandle,WriteHDF5::id_t>::iterator ri = idMap.begin();
01599   
01600   const size_t num_handles = count_num_handles( begin, end );
01601     // if we end up with more than this many range blocks, then
01602     // we're better off just writing the set as a simple list
01603   size_t pairs_remaining = num_handles / 2;
01604   for (HandleRangeIter pi = begin; pi != end; ++pi) {
01605     EntityHandle h = pi->first;
01606     while (h <= pi->second) {
01607       ri = idMap.lower_bound( ri, idMap.end(), h );
01608       if (ri == idMap.end() || ri->begin > h) {
01609         ++h;
01610         continue;
01611       }
01612 
01613       id_t n = pi->second - pi->first + 1;
01614       if (n > ri->count)
01615         n = ri->count;
01616   
01617         // see if we can append it to the previous range
01618       id_t id = ri->value + (h - ri->begin);
01619       if (!output_id_list.empty() &&
01620           output_id_list[output_id_list.size()-2] + output_id_list.back() == id) {
01621         output_id_list.back() += n;
01622       }
01623   
01624   
01625         // if we ran out of space, (or set is empty) just do list format
01626       else if (!pairs_remaining) {
01627         ranged_list = false;
01628         output_id_list.resize( num_handles );
01629         range_to_id_list_templ( begin, end, idMap, &output_id_list[0] );
01630         output_id_list.erase( std::remove( output_id_list.begin(), 
01631                                            output_id_list.end(), 
01632                                            0u ), 
01633                               output_id_list.end() );
01634         return MB_SUCCESS;
01635       }
01636 
01637         // 
01638       else {
01639         --pairs_remaining;
01640         output_id_list.push_back(id);
01641         output_id_list.push_back(n);
01642       }
01643       h += n;
01644     }
01645   }
01646   
01647   ranged_list = true;
01648   return MB_SUCCESS;
01649 }
01650 
01651 
01652 ErrorCode WriteHDF5::range_to_blocked_list( const Range& input_range,
01653                                             std::vector<id_t>& output_id_list, 
01654                                             bool& ranged_list )
01655 {
01656   return range_to_blocked_list_templ( input_range.const_pair_begin(),
01657                                       input_range.const_pair_end(),
01658                                       idMap, output_id_list, ranged_list );
01659 }
01660 
01661 ErrorCode WriteHDF5::range_to_blocked_list( const EntityHandle* array,
01662                                             size_t num_input_ranges,
01663                                             std::vector<id_t>& output_id_list, 
01664                                             bool& ranged_list )
01665 {
01666   // we assume this in the cast on the following line
01667   typedef std::pair<EntityHandle,EntityHandle> mtype;
01668   assert(sizeof(mtype) == 2*sizeof(EntityHandle));
01669   const mtype* arr = reinterpret_cast<const mtype*>(array);
01670   return range_to_blocked_list_templ( arr, arr+num_input_ranges,
01671                                       idMap, output_id_list, ranged_list );
01672 }
01673   
01674 
01675 ErrorCode WriteHDF5::range_to_id_list( const Range& range,
01676                                          id_t* array )
01677 {
01678   return range_to_id_list_templ( range.const_pair_begin(),
01679                                  range.const_pair_end(),
01680                                  idMap, array );
01681 }
01682 
01683 ErrorCode WriteHDF5::vector_to_id_list( const EntityHandle* input,
01684                                         size_t input_len,
01685                                         id_t* output,
01686                                         size_t& output_len,
01687                                         bool remove_zeros )
01688 {
01689   const EntityHandle* i_iter = input;
01690   const EntityHandle* i_end = input + input_len;
01691   id_t* o_iter = output;
01692   for (; i_iter != i_end; ++i_iter) {
01693     id_t id = idMap.find( *i_iter );
01694     if (!remove_zeros || id != 0) {
01695       *o_iter = id;
01696       ++o_iter;
01697     }
01698   }
01699   output_len = o_iter - output;
01700   return MB_SUCCESS;
01701 }
01702 
01703 ErrorCode WriteHDF5::vector_to_id_list( 
01704                                  const std::vector<EntityHandle>& input,
01705                                  std::vector<id_t>& output,
01706                                  bool remove_zeros )
01707 {
01708   output.resize( input.size() );
01709   size_t output_size = 0;
01710   ErrorCode rval = vector_to_id_list( &input[0], input.size(),
01711                                       &output[0], output_size,
01712                                       remove_zeros );
01713   output.resize( output_size );
01714   return rval;
01715 }
01716 
01717 ErrorCode WriteHDF5::vector_to_id_list( const EntityHandle* input,
01718                                         id_t* output,
01719                                         size_t count )
01720 {
01721   size_t output_len;
01722   return vector_to_id_list( input, count, output, output_len, false );
01723 }
01724 
01725 
01726 inline ErrorCode WriteHDF5::get_adjacencies( EntityHandle entity,
01727                                         std::vector<id_t>& adj )
01728 {
01729   const EntityHandle* adj_array;
01730   int num_adj;
01731   ErrorCode rval = writeUtil->get_adjacencies( entity, adj_array, num_adj );
01732   if (MB_SUCCESS != rval)
01733     return error(rval);
01734   
01735   size_t j = 0;
01736   adj.resize( num_adj );
01737   for (int i = 0; i < num_adj; ++i) 
01738     if (id_t id = idMap.find( adj_array[i] ))
01739       adj[j++] = id;
01740   adj.resize( j );
01741   return MB_SUCCESS;
01742 }
01743 
01744 
01745 ErrorCode WriteHDF5::write_adjacencies( const ExportSet& elements )
01746 {
01747   ErrorCode rval;
01748   mhdf_Status status;
01749   Range::const_iterator iter;
01750   const Range::const_iterator end = elements.range.end();
01751   std::vector<id_t> adj_list;
01752   
01753   CHECK_OPEN_HANDLES;
01754 
01755   debug_barrier();
01756   
01757   /* Count Adjacencies */
01758   long count = 0;
01759   //for (iter = elements.range.begin(); iter != end; ++iter)
01760   //{
01761   //  adj_list.clear();
01762   //  rval = get_adjacencies( *iter, adj_list);
01763   //  CHK_MB_ERR_0(rval);
01764   //
01765   //  if (adj_list.size() > 0)
01766   //    count += adj_list.size() + 2;
01767   //}
01768   
01769   //if (count == 0)
01770   //  return MB_SUCCESS;
01771 
01772   long offset = elements.adj_offset;
01773   if (elements.max_num_adjs == 0)
01774     return MB_SUCCESS;
01775   
01776   /* Create data list */
01777   hid_t table = mhdf_openAdjacency( filePtr, elements.name(), &count, &status );
01778   CHK_MHDF_ERR_0(status);
01779   IODebugTrack track( debugTrack, "Adjacencies", count );
01780   
01781   /* Write data */
01782   id_t* buffer = (id_t*)dataBuffer;
01783   long chunk_size = bufferSize / sizeof(id_t); 
01784   long num_writes = (elements.max_num_adjs + chunk_size - 1)/chunk_size;
01785   VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01786   count = 0;
01787   for (iter = elements.range.begin(); iter != end; ++iter)
01788   {
01789     adj_list.clear();
01790     rval = get_adjacencies( *iter, adj_list );
01791     CHK_MB_ERR_1(rval, table, status);
01792     if (adj_list.size() == 0)
01793       continue;
01794     
01795       // If buffer is full, flush it
01796     if (count + adj_list.size() + 2 > (unsigned long)chunk_size)
01797     {
01798       dbgOut.print(3,"  writing adjacency chunk.\n");
01799       track.record_io( offset, count );
01800       mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
01801       CHK_MHDF_ERR_1(status, table);
01802       VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01803       
01804       offset += count;
01805       count = 0;
01806     }
01807     
01808     buffer[count++] = idMap.find( *iter );
01809     buffer[count++] = adj_list.size();
01810     
01811     assert (adj_list.size()+2 < (unsigned long)chunk_size);
01812     memcpy( buffer + count, &adj_list[0], adj_list.size() * sizeof(id_t) );
01813     count += adj_list.size();
01814   }
01815   
01816   if (count)
01817   {
01818     dbgOut.print(2,"  writing final adjacency chunk.\n");
01819     mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
01820     CHK_MHDF_ERR_1(status, table);
01821 
01822     offset += count;
01823     count = 0;
01824     --num_writes;
01825   }
01826 
01827   // Do empty writes if necessary for parallel collective IO
01828   if (collectiveIO) {
01829     while (num_writes > 0) {
01830       --num_writes;
01831       assert(writeProp != H5P_DEFAULT);
01832       dbgOut.print(2,"  writing empty adjacency chunk.\n");
01833       mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status );
01834       CHK_MHDF_ERR_1(status, table );
01835     }
01836   }
01837   
01838   mhdf_closeData( filePtr, table, &status );
01839   CHK_MHDF_ERR_0(status);
01840   
01841   track.all_reduce();
01842   return MB_SUCCESS;
01843 }
01844 
01845 ErrorCode WriteHDF5::write_tag( const TagDesc& tag_data,
01846                                 double* times )
01847 {
01848   std::string name;
01849   ErrorCode rval = iFace->tag_get_name( tag_data.tag_id, name );
01850   if (MB_SUCCESS != rval)
01851     return error(rval);
01852 
01853   CHECK_OPEN_HANDLES;
01854   debug_barrier();
01855   dbgOut.tprintf( 1, "Writing tag: \"%s\"\n", name.c_str() );
01856  
01857   int moab_size, elem_size, array_len;
01858   DataType moab_type;
01859   mhdf_TagDataType mhdf_type;
01860   hid_t hdf5_type;
01861   rval = get_tag_size( tag_data.tag_id, moab_type, moab_size, elem_size,
01862                        array_len, mhdf_type, hdf5_type );
01863   if (MB_SUCCESS != rval)
01864     return error(rval);
01865 
01866   CpuTimer timer;
01867   if (array_len == MB_VARIABLE_LENGTH && tag_data.write_sparse) {
01868     dbgOut.printf( 2, "Writing sparse data for var-len tag: \"%s\"\n", name.c_str() );
01869     rval = write_var_len_tag( tag_data, name, moab_type, hdf5_type, elem_size );
01870     times[VARLEN_TAG_TIME] += timer.time_elapsed();
01871   }
01872   else {
01873     int data_len = elem_size;
01874     if (moab_type != MB_TYPE_BIT)
01875       data_len *= array_len;
01876     if (tag_data.write_sparse) {
01877       dbgOut.printf( 2, "Writing sparse data for tag: \"%s\"\n", name.c_str() );
01878       rval = write_sparse_tag( tag_data, name, moab_type, hdf5_type, data_len );
01879       times[SPARSE_TAG_TIME] += timer.time_elapsed();
01880     }
01881     for (size_t i = 0; MB_SUCCESS == rval && i < tag_data.dense_list.size(); ++i) {
01882       const ExportSet* set = find( tag_data.dense_list[i] );
01883       assert(0 != set);
01884       debug_barrier();
01885       dbgOut.printf( 2, "Writing dense data for tag: \"%s\" on group \"%s\"\n", name.c_str(), set->name() );
01886       subState.start( "writing dense data for tag: ", (name + ":" + set->name()).c_str() );
01887       rval = write_dense_tag( tag_data, *set, name, moab_type, hdf5_type, data_len );
01888       subState.end(rval);
01889     }
01890     times[DENSE_TAG_TIME] += timer.time_elapsed();
01891   }
01892  
01893   H5Tclose( hdf5_type );
01894   return MB_SUCCESS == rval ? MB_SUCCESS : error(rval);
01895 }
01896 
01897 ErrorCode WriteHDF5::write_sparse_ids( const TagDesc& tag_data,
01898                                        const Range& range,
01899                                        hid_t id_table,
01900                                        size_t table_size,
01901                                        const char* name )
01902 {
01903   ErrorCode rval;
01904   mhdf_Status status;
01905 
01906   CHECK_OPEN_HANDLES;
01907 
01908   std::string tname(name ? name : "<UNKNOWN TAG?>");
01909   tname += " - Ids";
01910   IODebugTrack track( debugTrack, tname, table_size );
01911 
01912     // Set up data buffer for writing IDs
01913   size_t chunk_size = bufferSize / sizeof(id_t);
01914   id_t* id_buffer = (id_t*)dataBuffer;
01915   
01916     // Write IDs of tagged entities.
01917   long remaining = range.size();
01918   long offset = tag_data.sparse_offset;
01919   long num_writes = (remaining + chunk_size - 1)/chunk_size;
01920   if (tag_data.max_num_ents) {
01921     assert(tag_data.max_num_ents >= (unsigned long)remaining);
01922     num_writes = (tag_data.max_num_ents + chunk_size - 1)/chunk_size;
01923   }
01924   Range::const_iterator iter = range.begin();
01925   while (remaining)
01926   {
01927     VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01928 
01929       // write "chunk_size" blocks of data
01930     long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
01931     remaining -= count;
01932     Range::const_iterator stop = iter;
01933     stop += count;
01934     Range tmp;;
01935     tmp.merge( iter, stop );
01936     iter = stop;
01937     assert(tmp.size() == (unsigned)count);
01938     
01939     rval = range_to_id_list( tmp, id_buffer );
01940     CHK_MB_ERR_0( rval );
01941     
01942       // write the data
01943     dbgOut.print(3,"  writing sparse tag entity chunk.\n");
01944     track.record_io( offset, count );
01945     mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type, 
01946                                         id_buffer, writeProp, &status );
01947     CHK_MHDF_ERR_0( status );
01948    
01949     offset += count;
01950     --num_writes;
01951   } // while (remaining)
01952 
01953   // Do empty writes if necessary for parallel collective IO
01954   if (collectiveIO) {
01955     while (num_writes--) {
01956       assert(writeProp != H5P_DEFAULT);
01957       dbgOut.print(3,"  writing empty sparse tag entity chunk.\n");
01958       mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type, 
01959                                           0, writeProp, &status );
01960       CHK_MHDF_ERR_0( status );
01961     }
01962   }
01963   
01964   track.all_reduce();
01965   return MB_SUCCESS;
01966 }
01967 
01968 ErrorCode WriteHDF5::write_sparse_tag( const TagDesc& tag_data,
01969                                        const std::string& name,
01970                                        DataType mb_data_type,
01971                                        hid_t value_type,
01972                                        int   value_type_size )
01973 {
01974   ErrorCode rval;
01975   mhdf_Status status;
01976   hid_t tables[3];
01977   long table_size, data_size;
01978   
01979   CHECK_OPEN_HANDLES;
01980 
01981     // get entities for which to write tag values
01982   Range range;
01983   rval = get_sparse_tagged_entities( tag_data, range );
01984   
01985     //open tables to write info
01986   mhdf_openSparseTagData( filePtr,
01987                           name.c_str(),
01988                           &table_size,
01989                           &data_size,
01990                           tables,
01991                           &status);
01992   CHK_MHDF_ERR_0(status);
01993   assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
01994     // fixed-length tag
01995   assert( table_size == data_size );
01996 
01997     // Write IDs for tagged entities
01998   subState.start( "writing sparse ids for tag: ", name.c_str() );
01999   rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
02000   subState.end(rval);
02001   CHK_MB_ERR_2( rval, tables, status );
02002   mhdf_closeData( filePtr, tables[0], &status );
02003   CHK_MHDF_ERR_1(status, tables[1]);
02004   
02005     // Set up data buffer for writing tag values
02006   IODebugTrack track( debugTrack, name + " Data", data_size );
02007   subState.start( "writing sparse values for tag: ", name.c_str() );
02008   rval = write_tag_values( tag_data.tag_id,
02009                            tables[1], 
02010                            tag_data.sparse_offset,
02011                            range,
02012                            mb_data_type,
02013                            value_type,
02014                            value_type_size,
02015                            tag_data.max_num_ents,
02016                            track );
02017   subState.end(rval);
02018   mhdf_closeData( filePtr, tables[1], &status );
02019   CHK_MB_ERR_0(rval);
02020   CHK_MHDF_ERR_0(status);
02021   
02022   track.all_reduce();
02023   return MB_SUCCESS;
02024 }
02025 
02026 
02027 ErrorCode WriteHDF5::write_var_len_indices( const TagDesc& tag_data,
02028                                             const Range& range,
02029                                             hid_t idx_table,
02030                                             size_t table_size,
02031                                             int /*type_size*/,
02032                                             const char* name )
02033 {
02034   ErrorCode rval;
02035   mhdf_Status status;
02036 
02037   CHECK_OPEN_HANDLES;
02038 
02039   std::string tname(name ? name : "<UNKNOWN TAG?>");
02040   tname += " - End Indices";
02041   IODebugTrack track( debugTrack, tname, table_size );
02042 
02043     // Set up data buffer for writing indices
02044   size_t chunk_size = bufferSize / (std::max(sizeof(void*),sizeof(long)) + sizeof(int));
02045   mhdf_index_t* idx_buffer = (mhdf_index_t*)dataBuffer;
02046   const void** junk = (const void**)dataBuffer;
02047   int* size_buffer = (int*)(dataBuffer + chunk_size*std::max(sizeof(void*),sizeof(mhdf_index_t)));
02048   
02049     // Write IDs of tagged entities.
02050   long data_offset = tag_data.var_data_offset - 1; // offset at which to write data buffer
02051   size_t remaining = range.size();
02052   size_t offset = tag_data.sparse_offset;
02053   size_t num_writes = (remaining + chunk_size - 1)/chunk_size;
02054   if (tag_data.max_num_ents) {
02055     assert(tag_data.max_num_ents >= (unsigned long)remaining);
02056     num_writes = (tag_data.max_num_ents + chunk_size - 1)/chunk_size;
02057   }
02058   Range::const_iterator iter = range.begin();
02059   while (remaining)
02060   {
02061     VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
02062 
02063       // write "chunk_size" blocks of data
02064     size_t count = remaining > chunk_size ? chunk_size : remaining;
02065     remaining -= count;
02066     Range::const_iterator stop = iter;
02067     stop += count;
02068     Range tmp;
02069     tmp.merge( iter, stop );
02070     iter = stop;
02071     assert(tmp.size() == (unsigned)count);
02072     
02073     rval = iFace->tag_get_by_ptr( tag_data.tag_id, tmp, junk, size_buffer );
02074     CHK_MB_ERR_0( rval );
02075     
02076       // calculate end indices
02077     dbgOut.print(3,"  writing var-len tag offset chunk.\n");
02078     track.record_io( offset, count );
02079     for (size_t i = 0; i < count; ++i) {
02080       data_offset += size_buffer[i];
02081       idx_buffer[i] = data_offset;
02082     }
02083     
02084       // write
02085     mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, count, MHDF_INDEX_TYPE, 
02086                                        idx_buffer, writeProp, &status );
02087     CHK_MHDF_ERR_0( status );
02088    
02089     offset += count;
02090     --num_writes;
02091   } // while (remaining)
02092 
02093   // Do empty writes if necessary for parallel collective IO
02094   if (collectiveIO) {
02095     while (num_writes--) {
02096       assert(writeProp != H5P_DEFAULT);
02097       dbgOut.print(3,"  writing empty sparse tag entity chunk.\n");
02098       mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, 0, id_type, 
02099                                          0, writeProp, &status );
02100       CHK_MHDF_ERR_0( status );
02101     }
02102   }
02103   
02104   track.all_reduce();
02105   return MB_SUCCESS;
02106 }
02107 
02108 ErrorCode WriteHDF5::write_var_len_data( const TagDesc& tag_data,
02109                                          const Range& range,
02110                                          hid_t table,
02111                                          size_t table_size,
02112                                          bool handle_tag,
02113                                          hid_t hdf_type,
02114                                          int type_size,
02115                                          const char* name )
02116 {
02117   ErrorCode rval;
02118   mhdf_Status status;
02119 
02120   CHECK_OPEN_HANDLES;
02121   assert(!handle_tag || sizeof(EntityHandle) == type_size);
02122   
02123   std::string tname(name ? name : "<UNKNOWN TAG?>");
02124   tname += " - Values";
02125   IODebugTrack track( debugTrack, tname, table_size );
02126   
02127   const size_t buffer_size = bufferSize / type_size;
02128   
02129   size_t num_writes = (table_size + buffer_size - 1) / buffer_size;
02130   if (collectiveIO) {
02131     assert(tag_data.max_num_vals > 0);
02132     num_writes = (tag_data.max_num_vals + buffer_size - 1) / buffer_size;
02133   }
02134   
02135   unsigned char* buffer = (unsigned char*)dataBuffer;
02136   const void* prev_data = 0; // data left over from prev iteration
02137   size_t prev_len = 0;
02138   Range::const_iterator iter = range.begin();
02139   long offset = tag_data.var_data_offset;    
02140   while (prev_data || iter != range.end()) {
02141     size_t count = 0;
02142     if (prev_data) {
02143       size_t len;
02144       const void* ptr = prev_data;
02145       if (prev_len <= buffer_size) {
02146         len = prev_len;
02147         prev_data = 0;
02148         prev_len = 0;
02149       }
02150       else {
02151         len = buffer_size;
02152         prev_data = ((const char*)prev_data) + buffer_size*type_size;
02153         prev_len -= buffer_size;
02154       }
02155         
02156       if (handle_tag) 
02157         convert_handle_tag( (const EntityHandle*)ptr, (EntityHandle*)buffer, len );
02158       else
02159         memcpy( buffer, ptr, len * type_size );
02160     }
02161   
02162     for ( ; count < buffer_size && iter != range.end(); ++iter) {
02163       int len;
02164       const void* ptr;
02165       rval = iFace->tag_get_by_ptr( tag_data.tag_id, &*iter, 1, &ptr, &len );
02166       int bytes = len * type_size;
02167       CHK_MB_ERR_0(rval);
02168       if (len+count > buffer_size) {
02169         prev_len = len + count - buffer_size;
02170         prev_data = ((const char*)ptr) + prev_len*type_size;
02171         len = buffer_size - count;
02172       }
02173 
02174       if (handle_tag) 
02175         convert_handle_tag( (const EntityHandle*)ptr, ((EntityHandle*)buffer) + count, len );
02176       else
02177         memcpy( buffer + count*type_size, ptr, bytes );
02178       count += len;
02179     }
02180     
02181     track.record_io( offset, count );
02182     mhdf_writeTagValuesWithOpt( table, offset, count, hdf_type, buffer, writeProp, &status );
02183     CHK_MHDF_ERR_0(status);
02184     --num_writes;
02185   }
02186 
02187   // Do empty writes if necessary for parallel collective IO
02188   if (collectiveIO) {
02189     while (num_writes--) {
02190       assert(writeProp != H5P_DEFAULT);
02191       dbgOut.print(3,"  writing empty var-len tag data chunk.\n");
02192       mhdf_writeTagValuesWithOpt( table, 0, 0, hdf_type, 0, writeProp, &status );
02193       CHK_MHDF_ERR_0( status );
02194     }
02195   }
02196   
02197   track.all_reduce();
02198   return MB_SUCCESS;
02199 }
02200 
02201 ErrorCode WriteHDF5::write_var_len_tag( const TagDesc& tag_data,
02202                                         const std::string& name,
02203                                         DataType mb_data_type,
02204                                         hid_t hdf_type,
02205                                         int type_size )
02206 {
02207   ErrorCode rval;
02208   mhdf_Status status;
02209   hid_t tables[3];
02210   long table_size;
02211   long data_table_size;
02212   
02213   CHECK_OPEN_HANDLES;
02214 
02215     // get entities for which to write tag values
02216   Range range;
02217   rval = get_sparse_tagged_entities( tag_data, range );
02218   
02219     //open tables to write info
02220   mhdf_openSparseTagData( filePtr,
02221                           name.c_str(),
02222                           &table_size,
02223                           &data_table_size,
02224                           tables,
02225                           &status);
02226   CHK_MHDF_ERR_0(status);
02227   assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
02228 
02229     // Write IDs for tagged entities
02230   subState.start( "writing ids for var-len tag: ", name.c_str() );
02231   rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
02232   subState.end(rval);
02233   CHK_MB_ERR_2( rval, tables, status );
02234   mhdf_closeData( filePtr, tables[0], &status );
02235   CHK_MHDF_ERR_2(status, tables + 1);
02236 
02237     // Write offsets for tagged entities
02238   subState.start( "writing indices for var-len tag: ", name.c_str() );
02239   rval = write_var_len_indices( tag_data, range, tables[2], table_size, type_size, name.c_str() );
02240   subState.end(rval);
02241   mhdf_closeData( filePtr, tables[2], &status );
02242   CHK_MB_ERR_1( rval, tables[1], status );
02243   CHK_MHDF_ERR_1(status, tables[1]);
02244 
02245     // Write the actual tag data
02246   subState.start( "writing values for var-len tag: ", name.c_str() );
02247   rval = write_var_len_data( tag_data, range, tables[1], data_table_size, 
02248                              mb_data_type == MB_TYPE_HANDLE,
02249                              hdf_type, type_size, name.c_str() );
02250   subState.end(rval);
02251   mhdf_closeData( filePtr, tables[1], &status );
02252   CHK_MB_ERR_0( rval );
02253   CHK_MHDF_ERR_0(status);
02254   
02255   return MB_SUCCESS;
02256 }
02257 
02258 
02259 ErrorCode WriteHDF5::write_dense_tag( const TagDesc& tag_data,
02260                                       const ExportSet& elem_data,
02261                                       const std::string& name,
02262                                       DataType mb_data_type,
02263                                       hid_t value_type,
02264                                       int value_type_size )
02265 {
02266   CHECK_OPEN_HANDLES;
02267 
02268     //open tables to write info
02269   mhdf_Status status;
02270   long table_size;
02271   hid_t table = mhdf_openDenseTagData( filePtr,
02272                                        name.c_str(),
02273                                        elem_data.name(),
02274                                        &table_size,
02275                                        &status);
02276   CHK_MHDF_ERR_0(status);
02277   assert( elem_data.range.size() + elem_data.offset <= (unsigned long)table_size );
02278  
02279   IODebugTrack track( debugTrack, name + " " + elem_data.name() + " Data", table_size );
02280   ErrorCode rval = write_tag_values( tag_data.tag_id, 
02281                                      table, 
02282                                      elem_data.offset,
02283                                      elem_data.range,
02284                                      mb_data_type,
02285                                      value_type,
02286                                      value_type_size,
02287                                      elem_data.max_num_ents,
02288                                      track );
02289   mhdf_closeData( filePtr, table, &status );
02290   CHK_MB_ERR_0(rval);
02291   CHK_MHDF_ERR_0(status);
02292   return MB_SUCCESS;
02293 }
02294   
02295 ErrorCode WriteHDF5::write_tag_values( Tag tag_id,
02296                                        hid_t data_table,
02297                                        unsigned long offset_in,
02298                                        const Range& range_in,
02299                                        DataType mb_data_type,
02300                                        hid_t value_type,
02301                                        int   value_type_size,
02302                                        unsigned long max_num_ents,
02303                                        IODebugTrack& track )
02304 {
02305   mhdf_Status status;
02306 
02307   CHECK_OPEN_HANDLES;
02308  
02309     // Set up data buffer for writing tag values
02310   size_t chunk_size = bufferSize / value_type_size;
02311   assert( chunk_size > 0 );
02312   char* tag_buffer = (char*)dataBuffer;
02313   
02314     // Write the tag values
02315   size_t remaining = range_in.size();
02316   size_t offset = offset_in;
02317   Range::const_iterator iter = range_in.begin();
02318   long num_writes = (remaining + chunk_size - 1)/chunk_size;
02319   if (max_num_ents) {
02320     assert( max_num_ents >= remaining );
02321     num_writes = (max_num_ents + chunk_size - 1)/chunk_size;
02322   }
02323   while (remaining)
02324   {
02325     VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
02326  
02327       // write "chunk_size" blocks of data
02328     long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
02329     remaining -= count;
02330     memset( tag_buffer, 0, count * value_type_size );
02331     Range::const_iterator stop = iter;
02332     stop += count;
02333     Range range;
02334     range.merge( iter, stop );
02335     iter = stop;
02336     assert(range.size() == (unsigned)count);
02337  
02338     ErrorCode rval = iFace->tag_get_data( tag_id, range, tag_buffer );
02339     CHK_MB_ERR_0(rval);
02340     
02341       // Convert EntityHandles to file ids
02342     if (mb_data_type == MB_TYPE_HANDLE)
02343       convert_handle_tag( reinterpret_cast<EntityHandle*>(tag_buffer), 
02344                           count * value_type_size / sizeof(EntityHandle) );
02345     
02346       // write the data
02347     dbgOut.print(2,"  writing tag value chunk.\n");
02348     track.record_io( offset, count );
02349     assert(value_type > 0);
02350     mhdf_writeTagValuesWithOpt( data_table, offset, count,
02351                                 value_type, tag_buffer, writeProp, &status );
02352     CHK_MHDF_ERR_0(status);
02353    
02354     offset += count;
02355     --num_writes;
02356   } // while (remaining)
02357 
02358   // Do empty writes if necessary for parallel collective IO
02359   if (collectiveIO) {
02360     while (num_writes--) {
02361       assert(writeProp != H5P_DEFAULT);
02362       dbgOut.print(2,"  writing empty tag value chunk.\n");
02363       assert(value_type > 0);
02364       mhdf_writeTagValuesWithOpt( data_table, offset, 0,
02365                                   value_type, 0, writeProp, &status );
02366       CHK_MHDF_ERR_0( status );
02367     }
02368   }
02369   
02370   track.all_reduce();
02371   return MB_SUCCESS;
02372 }
02373 
02374 ErrorCode WriteHDF5::write_qa( const std::vector<std::string>& list )
02375 {
02376   const char* app = "MOAB";
02377   const char* vers = MB_VERSION;
02378   char date_str[64];
02379   char time_str[64];
02380   
02381   CHECK_OPEN_HANDLES;
02382 
02383   std::vector<const char*> strs(list.size() ? list.size() : 4);
02384   if (list.size() == 0)
02385   {
02386     time_t t = time(NULL);
02387     tm* lt = localtime( &t );
02388     strftime( date_str, sizeof(date_str), "%D", lt );
02389     strftime( time_str, sizeof(time_str), "%T", lt );
02390     
02391     strs[0] = app;
02392     strs[1] = vers;
02393     strs[2] = date_str;
02394     strs[3] = time_str;
02395   }
02396   else
02397   {
02398     for (unsigned int i = 0; i < list.size(); ++i)
02399       strs[i] = list[i].c_str();
02400   }
02401   
02402   mhdf_Status status;
02403   dbgOut.print(2,"  writing QA history.\n");
02404   mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status );
02405   CHK_MHDF_ERR_0(status);
02406   
02407   return MB_SUCCESS;
02408 }
02409 
02410 /*
02411 ErrorCode WriteHDF5::register_known_tag_types( Interface* iface )
02412 {
02413   hid_t int4, double16;
02414   hsize_t dim[1];
02415   int error = 0;
02416   ErrorCode rval;
02417   
02418   dim[0] = 4;
02419   int4 = H5Tarray_create( H5T_NATIVE_INT, 1, dim, NULL );
02420   
02421   dim[0] = 16;
02422   double16 = H5Tarray_create( H5T_NATIVE_DOUBLE, 1, dim, NULL );
02423   
02424   if (int4 < 0 || double16 < 0)
02425     error = 1;
02426   
02427   struct { const char* name; hid_t type; } list[] = {
02428     { GLOBAL_ID_TAG_NAME, H5T_NATIVE_INT } ,
02429     { MATERIAL_SET_TAG_NAME, H5T_NATIVE_INT },
02430     { DIRICHLET_SET_TAG_NAME, H5T_NATIVE_INT },
02431     { NEUMANN_SET_TAG_NAME, H5T_NATIVE_INT },
02432     { HAS_MID_NODES_TAG_NAME, int4 },
02433     { GEOM_DIMENSION_TAG_NAME, H5T_NATIVE_INT },
02434     { MESH_TRANSFORM_TAG_NAME, double16 },
02435     { 0, 0 } };
02436   
02437   for (int i = 0; list[i].name; ++i)
02438   {
02439     if (list[i].type < 1)
02440       { ++error; continue; }
02441     
02442     Tag handle;
02443     
02444     std::string name("__hdf5_tag_type_");
02445     name += list[i].name;
02446     
02447     rval = iface->tag_get_handle( name.c_str(), handle );
02448     if (MB_TAG_NOT_FOUND == rval)
02449     {
02450       rval = iface->tag_create( name.c_str(), sizeof(hid_t), MB_TAG_SPARSE, handle, NULL );
02451       if (MB_SUCCESS != rval)
02452         { ++error; continue; }
02453       
02454       hid_t copy_id = H5Tcopy( list[i].type );
02455       const EntityHandle mesh = 0;
02456       rval = iface->tag_set_data( handle, &mesh, 1, &copy_id );
02457       if (MB_SUCCESS != rval)
02458         { ++error; continue; }
02459     }
02460   }
02461   
02462   H5Tclose( int4 );
02463   H5Tclose( double16 );
02464   return error ? MB_FAILURE : MB_SUCCESS;
02465 }
02466 */
02467 
02468 ErrorCode WriteHDF5::gather_tags( const Tag* user_tag_list, int num_tags )
02469 {
02470   ErrorCode result;
02471   std::string tagname;
02472   std::vector<Tag> tag_list;
02473   std::vector<Tag>::iterator t_itor;
02474   Range range;
02475     
02476     // Get list of Tags to write
02477   result = writeUtil->get_tag_list( tag_list, user_tag_list, num_tags );
02478   CHK_MB_ERR_0(result);
02479 
02480     // Get list of tags
02481   for (t_itor = tag_list.begin(); t_itor != tag_list.end(); ++t_itor)
02482   {
02483       // Add tag to export list
02484     TagDesc tag_data; tag_data.write_sparse = false;
02485     tag_data.tag_id = *t_itor;
02486     tag_data.sparse_offset = 0;
02487     tag_data.var_data_offset = 0;
02488     tag_data.max_num_ents = 0;
02489     tag_data.max_num_vals = 0;
02490     tagList.push_back( tag_data );
02491   }
02492 
02493   return MB_SUCCESS;
02494 }
02495 
02496   // If we support paralle, then this function will have been
02497   // overridden with an alternate version in WriteHDF5Parallel
02498   // that supports parallel I/O.  If we're here 
02499   // then MOAB was not built with support for parallel HDF5 I/O.
02500 ErrorCode WriteHDF5::parallel_create_file( const char* ,
02501                                     bool ,
02502                                     const std::vector<std::string>& ,
02503                                     const FileOptions&,
02504                                     const Tag*,
02505                                     int ,
02506                                     int,
02507                                     double*  )
02508 {
02509   writeUtil->report_error("WriteHDF5 does not support parallel writing.\n");
02510   return error(MB_NOT_IMPLEMENTED);
02511 }
02512 
02513 ErrorCode WriteHDF5::serial_create_file( const char* filename,
02514                                     bool overwrite,
02515                                     const std::vector<std::string>& qa_records,
02516                                     const Tag* user_tag_list,
02517                                     int num_user_tags,
02518                                     int dimension )
02519 {
02520   long first_id;
02521   mhdf_Status status;
02522   hid_t handle;
02523   std::list<ExportSet>::iterator ex_itor;
02524   ErrorCode rval;
02525   
02526   topState.start( "creating file" );
02527   
02528   const char* type_names[MBMAXTYPE];
02529   memset( type_names, 0, MBMAXTYPE * sizeof(char*) );
02530   for (EntityType i = MBEDGE; i < MBENTITYSET; ++i)
02531     type_names[i] = CN::EntityTypeName( i );
02532  
02533     // Create the file
02534   filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status );
02535   CHK_MHDF_ERR_0(status);
02536   assert(!!filePtr);
02537 
02538   rval = write_qa( qa_records );
02539   CHK_MB_ERR_0(rval);
02540   
02541     // Create node table
02542   if (nodeSet.range.size()) {
02543     nodeSet.total_num_ents = nodeSet.range.size();
02544     handle = mhdf_createNodeCoords( filePtr, dimension, nodeSet.total_num_ents,
02545                                     &first_id, &status );
02546     CHK_MHDF_ERR_0(status);
02547     mhdf_closeData( filePtr, handle, &status );
02548     CHK_MHDF_ERR_0(status);
02549     nodeSet.first_id = (id_t)first_id;
02550     rval = assign_ids( nodeSet.range, nodeSet.first_id );
02551     CHK_MB_ERR_0(rval);
02552   }
02553   else {
02554     nodeSet.first_id = std::numeric_limits<id_t>::max();
02555   } 
02556   nodeSet.offset = 0;
02557 
02558     // Create element tables
02559   for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor)
02560   {
02561     ex_itor->total_num_ents = ex_itor->range.size();
02562     rval = create_elem_table( *ex_itor, ex_itor->total_num_ents, first_id );
02563     CHK_MB_ERR_0(rval);
02564       
02565     ex_itor->first_id = (id_t)first_id;
02566     ex_itor->offset = 0;
02567     rval = assign_ids( ex_itor->range, ex_itor->first_id );
02568     CHK_MB_ERR_0(rval);
02569   }
02570 
02571     // create node adjacency table
02572   id_t num_adjacencies;
02573 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES  
02574   rval = count_adjacencies( nodeSet.range, num_adjacencies );
02575   CHK_MB_ERR_0(rval);
02576   nodeSet.adj_offset = 0;
02577   nodeSet.max_num_adjs = num_adjacencies;
02578   if (num_adjacencies > 0)
02579   {
02580     handle = mhdf_createAdjacency( filePtr,
02581                                    mhdf_node_type_handle(),
02582                                    num_adjacencies,
02583                                    &status );
02584     CHK_MHDF_ERR_0(status);
02585     mhdf_closeData( filePtr, handle, &status );
02586   }
02587 #endif
02588   
02589     // create element adjacency tables
02590   for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor)
02591   {
02592     rval = count_adjacencies( ex_itor->range, num_adjacencies );
02593     CHK_MB_ERR_0(rval);
02594     
02595     ex_itor->adj_offset = 0;
02596     ex_itor->max_num_adjs = num_adjacencies;
02597     if (num_adjacencies > 0)
02598     {
02599       handle = mhdf_createAdjacency( filePtr,
02600                                      ex_itor->name(),
02601                                      num_adjacencies,
02602                                      &status );
02603       CHK_MHDF_ERR_0(status);
02604       mhdf_closeData( filePtr, handle, &status );
02605     }
02606   }
02607   
02608     // create set tables
02609   writeSets = !setSet.range.empty();
02610   if (writeSets)
02611   {
02612     long contents_len, children_len, parents_len;
02613     
02614     setSet.total_num_ents = setSet.range.size();
02615     setSet.max_num_ents = setSet.total_num_ents;
02616     rval = create_set_meta( setSet.total_num_ents, first_id );
02617     CHK_MB_ERR_0(rval);
02618 
02619     setSet.first_id = (id_t)first_id;
02620     rval = assign_ids( setSet.range, setSet.first_id );
02621     CHK_MB_ERR_0(rval);
02622     
02623     rval = count_set_size( setSet.range, contents_len, children_len, parents_len );
02624     CHK_MB_ERR_0(rval);
02625     
02626     rval = create_set_tables( contents_len, children_len, parents_len );
02627     CHK_MB_ERR_0(rval);
02628    
02629     setSet.offset = 0;
02630     setContentsOffset = 0;
02631     setChildrenOffset = 0;
02632     setParentsOffset = 0;
02633     writeSetContents = !!contents_len;
02634     writeSetChildren = !!children_len;
02635     writeSetParents = !!parents_len;
02636     
02637     maxNumSetContents = contents_len;
02638     maxNumSetChildren = children_len;
02639     maxNumSetParents = parents_len;
02640   } // if(!setSet.range.empty())
02641   
02642   
02643   dbgOut.tprint( 1, "Gathering Tags\n" );
02644   
02645   rval = gather_tags( user_tag_list, num_user_tags );
02646   CHK_MB_ERR_0(rval);
02647 
02648     // Create the tags and tag data tables
02649   std::list<TagDesc>::iterator tag_iter = tagList.begin();
02650   for ( ; tag_iter != tagList.end(); ++tag_iter)
02651   {
02652       // As we haven't yet added any ExportSets for which to write
02653       // dense tag data to the TagDesc struct pointed to by
02654       // tag_iter, this call will initially return all tagged entities
02655       // in the set of entities to be written.
02656     Range range;
02657     rval = get_sparse_tagged_entities( *tag_iter, range );
02658     CHK_MB_ERR_0(rval);
02659     
02660     int s;
02661     bool var_len = (MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ));
02662     
02663       // Determine which ExportSets we want to write dense
02664       // data for. We never write dense data for variable-length
02665       // tag data.
02666     if (!var_len && writeTagDense) {
02667       // Check if we want to write this tag in dense format even if not
02668       // all of the entities have a tag value.  The criterion of this
02669       // is that the tag be dense, have a default value, and have at
02670       // least 2/3 of the entities tagged.
02671       bool prefer_dense = false;
02672       TagType type;
02673       rval = iFace->tag_get_type( tag_iter->tag_id, type );
02674       CHK_MB_ERR_0(rval);
02675       if (MB_TAG_DENSE == type) {
02676         const void* defval = 0;
02677         rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s );
02678         if (MB_SUCCESS == rval)
02679           prefer_dense = true;
02680       } 
02681     
02682       if (check_dense_format_tag( nodeSet, range, prefer_dense )) {
02683         range -= nodeSet.range;
02684         tag_iter->dense_list.push_back( nodeSet );
02685       }
02686 
02687       std::list<ExportSet>::const_iterator ex = exportList.begin();
02688       for ( ; ex != exportList.end(); ++ex) {
02689         if (check_dense_format_tag( *ex, range, prefer_dense )) {
02690           range -= ex->range;
02691           tag_iter->dense_list.push_back( *ex );
02692         }
02693       }
02694 
02695       if (check_dense_format_tag( setSet, range, prefer_dense )) {
02696         range -= setSet.range;
02697         tag_iter->dense_list.push_back( setSet );
02698       }
02699     }
02700     
02701     tag_iter->write_sparse = !range.empty();
02702   
02703     unsigned long var_len_total = 0;
02704     if (var_len) {
02705       rval = get_tag_data_length( *tag_iter, range, var_len_total ); 
02706       CHK_MB_ERR_0(rval);
02707     }
02708   
02709     rval = create_tag( *tag_iter, range.size(), var_len_total );
02710     CHK_MB_ERR_0(rval);
02711   } // for(tags)
02712   
02713   topState.end();
02714   return MB_SUCCESS;
02715 }
02716 
02717 
02718 bool WriteHDF5::check_dense_format_tag( const ExportSet& ents, 
02719                                         const Range& all_tagged, 
02720                                         bool prefer_dense )
02721 {
02722     // if there are no tagged entities, then don't write anything
02723   if (ents.range.empty())
02724     return false;
02725   
02726     // if all of the entities are tagged, then write in dense format
02727   if (all_tagged.contains(ents.range))
02728     return true;
02729   
02730     // unless asked for more lenient choice of dense format, return false
02731   if (!prefer_dense)
02732     return false;
02733   
02734     // if we're being lenient about choosing dense format, then
02735     // return true if at least 2/3 of the entities are tagged.
02736   Range xsect = intersect( setSet.range, all_tagged );
02737   if (3*xsect.size() >= 2*setSet.range.size())
02738     return true;
02739   
02740   return false;
02741 }
02742 
02743 ErrorCode WriteHDF5::count_adjacencies( const Range& set, id_t& result )
02744 {
02745   ErrorCode rval;
02746   std::vector<id_t> adj_list;
02747   Range::const_iterator iter = set.begin();
02748   const Range::const_iterator end = set.end();
02749   result = 0;
02750   for ( ; iter != end; ++iter )
02751   {
02752     adj_list.clear();
02753     rval = get_adjacencies( *iter, adj_list );
02754     CHK_MB_ERR_0(rval);
02755     
02756     if (adj_list.size() > 0)
02757       result += 2 + adj_list.size();
02758   }
02759   return MB_SUCCESS;
02760 }
02761 
02762 ErrorCode WriteHDF5::create_elem_table( const ExportSet& block,
02763                                         long num_entities,
02764                                         long& first_id_out )
02765 {
02766   mhdf_Status status;
02767   hid_t handle;
02768 
02769   CHECK_OPEN_HANDLES;
02770   
02771   mhdf_addElement( filePtr, block.name(), block.type, &status );
02772   CHK_MHDF_ERR_0(status);
02773   
02774   handle = mhdf_createConnectivity( filePtr, 
02775                                     block.name(),
02776                                     block.num_nodes,
02777                                     num_entities,
02778                                     &first_id_out,
02779                                     &status );
02780   CHK_MHDF_ERR_0(status);
02781   mhdf_closeData( filePtr, handle, &status );
02782   CHK_MHDF_ERR_0(status);
02783   
02784   return MB_SUCCESS;
02785 }
02786 
02787 
02788 ErrorCode WriteHDF5::count_set_size( const Range& sets, 
02789                                      long& contents_length_out,
02790                                      long& children_length_out,
02791                                      long& parents_length_out )
02792 {
02793   ErrorCode rval;
02794   Range set_contents;
02795   long contents_length_set, children_length_set, parents_length_set;
02796   unsigned long flags;
02797   std::vector<id_t> set_contents_ids;
02798   std::vector<SpecialSetData>::const_iterator si = specialSets.begin();
02799   
02800   contents_length_out = 0;
02801   children_length_out = 0;
02802   parents_length_out = 0;
02803   
02804   for (Range::const_iterator iter = sets.begin(); iter != sets.end(); ++iter)
02805   {
02806     while (si != specialSets.end() && si->setHandle < *iter)
02807       ++si;
02808       
02809     if (si != specialSets.end() && si->setHandle == *iter) {
02810       contents_length_out += si->contentIds.size();
02811       children_length_out += si->childIds.size();
02812       parents_length_out += si->parentIds.size();
02813       ++si;
02814       continue;
02815     }
02816   
02817     rval = get_set_info( *iter, contents_length_set, children_length_set,
02818                          parents_length_set, flags );
02819     CHK_MB_ERR_0(rval);
02820     
02821       // check if can and should compress as ranges
02822     if (!(flags&MESHSET_ORDERED) && contents_length_set)
02823     {
02824       set_contents.clear();
02825       rval = iFace->get_entities_by_handle( *iter, set_contents, false );
02826       CHK_MB_ERR_0(rval);
02827       
02828       bool blocked_list;
02829       rval = range_to_blocked_list( set_contents, set_contents_ids, blocked_list );
02830       CHK_MB_ERR_0(rval);
02831       
02832       if (blocked_list)
02833       {
02834         assert (set_contents_ids.size() % 2 == 0);
02835         contents_length_set = set_contents_ids.size();
02836       }
02837     }
02838 
02839     contents_length_out += contents_length_set;
02840     children_length_out += children_length_set;
02841     parents_length_out += parents_length_set;
02842   }
02843   
02844   return MB_SUCCESS;
02845 }
02846 
02847 ErrorCode WriteHDF5::create_set_meta( long num_sets, long& first_id_out )
02848 {
02849   hid_t handle;
02850   mhdf_Status status;
02851   
02852   CHECK_OPEN_HANDLES;
02853 
02854   handle = mhdf_createSetMeta( filePtr, num_sets, &first_id_out, &status );
02855   CHK_MHDF_ERR_0(status);
02856   mhdf_closeData( filePtr, handle, &status );
02857   
02858   return MB_SUCCESS;
02859 }
02860 
02861 WriteHDF5::SpecialSetData* WriteHDF5::find_set_data( EntityHandle h )
02862 {
02863   std::vector<SpecialSetData>::iterator i;
02864   i = std::lower_bound( specialSets.begin(), specialSets.end(), h, SpecSetLess() );
02865   return (i == specialSets.end() || i->setHandle != h) ? 0 : &*i;
02866 }
02867 
02868 ErrorCode WriteHDF5::create_set_tables( long num_set_contents,
02869                                         long num_set_children,
02870                                         long num_set_parents )
02871 {
02872   hid_t handle;
02873   mhdf_Status status;
02874   
02875   CHECK_OPEN_HANDLES;
02876 
02877   if (num_set_contents > 0)
02878   {
02879     handle = mhdf_createSetData( filePtr, num_set_contents, &status );
02880     CHK_MHDF_ERR_0(status);
02881     mhdf_closeData( filePtr, handle, &status );
02882   }
02883   
02884   if (num_set_children > 0)
02885   {
02886     handle = mhdf_createSetChildren( filePtr, num_set_children, &status );
02887     CHK_MHDF_ERR_0(status);
02888     mhdf_closeData( filePtr, handle, &status );
02889   }
02890   
02891   if (num_set_parents > 0)
02892   {
02893     handle = mhdf_createSetParents( filePtr, num_set_parents, &status );
02894     CHK_MHDF_ERR_0(status);
02895     mhdf_closeData( filePtr, handle, &status );
02896   }
02897   
02898   return MB_SUCCESS;
02899 }
02900 
02901 ErrorCode WriteHDF5::get_tag_size( Tag tag,
02902                                    DataType& moab_type,
02903                                    int& num_bytes,
02904                                    int& type_size,
02905                                    int& array_length,
02906                                    mhdf_TagDataType& file_type,
02907                                    hid_t& hdf_type )
02908 {
02909   ErrorCode rval;
02910   Tag type_handle;
02911   std::string tag_name, tag_type_name;
02912    
02913   CHECK_OPEN_HANDLES;
02914 
02915     // We return NULL for hdf_type if it can be determined from
02916     // the file_type.  The only case where it is non-zero is
02917     // if the user specified a specific type via a mesh tag.
02918   hdf_type = (hid_t)0;
02919   bool close_hdf_type = false;
02920   
02921   rval = iFace->tag_get_data_type( tag, moab_type ); CHK_MB_ERR_0(rval);
02922   rval = iFace->tag_get_length( tag, array_length );     
02923   if (MB_VARIABLE_DATA_LENGTH == rval) {
02924     array_length = MB_VARIABLE_LENGTH;
02925   }
02926   else if (MB_SUCCESS != rval)
02927     return error(rval);
02928   rval = iFace->tag_get_bytes( tag, num_bytes );     
02929   if (MB_VARIABLE_DATA_LENGTH == rval) {
02930     num_bytes = MB_VARIABLE_LENGTH;
02931   }
02932   else if (MB_SUCCESS != rval)
02933     return error(rval);
02934 
02935   switch (moab_type)
02936   {
02937   case MB_TYPE_INTEGER:
02938     type_size = sizeof(int);
02939     file_type = mhdf_INTEGER;
02940     hdf_type = H5T_NATIVE_INT;
02941     close_hdf_type = false;
02942     break;
02943   case MB_TYPE_DOUBLE:
02944     type_size = sizeof(double);
02945     file_type = mhdf_FLOAT;
02946     hdf_type = H5T_NATIVE_DOUBLE;
02947     close_hdf_type = false;
02948     break;
02949   case MB_TYPE_BIT:
02950     type_size = sizeof(bool);
02951     file_type = mhdf_BITFIELD;
02952     assert(array_length <= 8);
02953     hdf_type = H5Tcopy( H5T_NATIVE_B8 );
02954     H5Tset_precision( hdf_type, array_length );
02955     close_hdf_type = true;
02956     break;
02957   case MB_TYPE_HANDLE:
02958     type_size = sizeof(EntityHandle);
02959     file_type = mhdf_ENTITY_ID;
02960     hdf_type = id_type;
02961     close_hdf_type = false;
02962     break;
02963   case MB_TYPE_OPAQUE:
02964     file_type = mhdf_OPAQUE;
02965 
02966     rval = iFace->tag_get_name( tag, tag_name ); CHK_MB_ERR_0(rval);
02967     tag_type_name = "__hdf5_tag_type_";
02968     tag_type_name += tag_name;
02969     rval = iFace->tag_get_handle( tag_type_name.c_str(), 0, MB_TYPE_OPAQUE, type_handle, MB_TAG_ANY );
02970     if (MB_TAG_NOT_FOUND == rval) {
02971       if (num_bytes == MB_VARIABLE_LENGTH)
02972         type_size = 1;
02973       else
02974         type_size = num_bytes;
02975       hdf_type = H5Tcreate( H5T_OPAQUE, type_size );
02976       close_hdf_type = true;
02977     }
02978     else if (MB_SUCCESS == rval) {
02979       int hsize;
02980       rval = iFace->tag_get_bytes( type_handle, hsize );
02981       if (hsize != sizeof(hid_t))
02982         return error(MB_FAILURE);
02983       
02984       const EntityHandle root = 0;
02985       rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
02986       if (rval != MB_SUCCESS)
02987         return error(rval);
02988         
02989       type_size = H5Tget_size(hdf_type);
02990       if (type_size != num_bytes)
02991         return error(MB_FAILURE);
02992         
02993       close_hdf_type = false;
02994     }
02995     else {
02996       return error(rval);
02997     }
02998     num_bytes = array_length;
02999     array_length = (num_bytes == MB_VARIABLE_LENGTH) ? MB_VARIABLE_LENGTH : 1;
03000   }
03001   
03002   assert(num_bytes == MB_VARIABLE_LENGTH || 
03003          (moab_type == MB_TYPE_BIT && num_bytes == 1) ||
03004          array_length * type_size == num_bytes );
03005   
03006   if (num_bytes == MB_VARIABLE_LENGTH) {
03007     array_length = MB_VARIABLE_LENGTH;
03008     if (!close_hdf_type) {
03009       hdf_type = H5Tcopy( hdf_type );
03010       close_hdf_type = true;
03011     }
03012   }
03013   else if (array_length > 1 && moab_type != MB_TYPE_BIT) {
03014     hsize_t len = array_length;
03015 #if defined(H5Tarray_create_vers) && (H5Tarray_create_vers > 1)
03016     hid_t temp_id = H5Tarray_create2( hdf_type, 1, &len);
03017 #else
03018     hid_t temp_id = H5Tarray_create( hdf_type, 1, &len, NULL );
03019 #endif
03020     if (close_hdf_type)
03021       H5Tclose( hdf_type );
03022     hdf_type = temp_id;
03023   }
03024   else if (!close_hdf_type) {
03025     hdf_type = H5Tcopy( hdf_type );
03026     close_hdf_type = true;
03027   }
03028   
03029   return MB_SUCCESS;
03030 }
03031 
03032 ErrorCode WriteHDF5::get_tag_data_length( const TagDesc& tag_info, 
03033                                           const Range& range,
03034                                           unsigned long& result )
03035 {
03036   ErrorCode rval;
03037   result = 0;
03038 
03039     // split buffer into two pieces, one for pointers and one for sizes
03040   size_t step, remaining;
03041   step = bufferSize / (sizeof(int) + sizeof(void*));
03042   const void** ptr_buffer = reinterpret_cast<const void**>(dataBuffer);
03043   int* size_buffer = reinterpret_cast<int*>(ptr_buffer + step); 
03044   Range subrange;
03045   Range::const_iterator iter = range.begin();
03046   for (remaining = range.size(); remaining >= step; remaining -= step) {
03047       // get subset of range containing 'count' entities
03048     Range::const_iterator end = iter; end += step;
03049     subrange.clear();
03050     subrange.merge( iter, end );
03051     iter = end;
03052       // get tag sizes for entities
03053     rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
03054     if (MB_SUCCESS != rval)
03055       return error(rval);
03056       // sum lengths
03057     for (size_t i = 0; i < step; ++i)
03058       result += size_buffer[i];
03059   }
03060     // process remaining
03061   subrange.clear();
03062   subrange.merge( iter, range.end() );
03063   assert( subrange.size() == remaining );
03064   rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
03065   if (MB_SUCCESS != rval)
03066     return error(rval);
03067   for (size_t i= 0; i < remaining; ++i)
03068     result += size_buffer[i];
03069     
03070   return MB_SUCCESS;
03071 }
03072     
03073                                      
03074 
03075 ErrorCode WriteHDF5::create_tag( const TagDesc& tag_data,
03076                                  unsigned long num_sparse_entities,
03077                                  unsigned long data_table_size )
03078 {
03079   TagType mb_storage;
03080   DataType mb_type;
03081   mhdf_TagDataType mhdf_type;
03082   int tag_bytes, type_size, num_vals, storage;
03083   hid_t hdf_type = (hid_t)0;
03084   hid_t handles[3];
03085   std::string tag_name;
03086   ErrorCode rval;
03087   mhdf_Status status;
03088   
03089   CHECK_OPEN_HANDLES;
03090 
03091 
03092     // get tag properties
03093   rval = iFace->tag_get_type( tag_data.tag_id, mb_storage  ); CHK_MB_ERR_0(rval);
03094   switch (mb_storage) {
03095     case MB_TAG_DENSE :  storage = mhdf_DENSE_TYPE ; break;
03096     case MB_TAG_SPARSE:  storage = mhdf_SPARSE_TYPE; break;
03097     case MB_TAG_BIT:     storage = mhdf_BIT_TYPE;    break;
03098     case MB_TAG_MESH:    storage = mhdf_MESH_TYPE;   break;
03099     default: return error(MB_FAILURE);
03100   }
03101   rval = iFace->tag_get_name( tag_data.tag_id, tag_name ); CHK_MB_ERR_0(rval);
03102   rval = get_tag_size( tag_data.tag_id, mb_type, tag_bytes, type_size, num_vals, mhdf_type, hdf_type );
03103   CHK_MB_ERR_0(rval);
03104   
03105     // get default value
03106   const void *def_value, *mesh_value;
03107   int def_val_len, mesh_val_len;
03108   rval = iFace->tag_get_default_value( tag_data.tag_id, def_value, def_val_len );
03109   if (MB_ENTITY_NOT_FOUND == rval) {
03110     def_value = 0;
03111     def_val_len = 0;
03112   }
03113   else if (MB_SUCCESS != rval) {
03114     H5Tclose( hdf_type );
03115     return error(rval);
03116   }
03117     
03118     // get mesh value
03119   unsigned char byte;
03120   const EntityHandle root = 0;
03121   if (mb_storage == MB_TAG_BIT) {
03122     rval = iFace->tag_get_data( tag_data.tag_id, &root, 1, &byte );
03123     mesh_value = &byte;
03124     mesh_val_len = 1;
03125   }
03126   else {
03127     rval = iFace->tag_get_by_ptr( tag_data.tag_id, &root, 1, &mesh_value, &mesh_val_len );
03128   }
03129   if (MB_TAG_NOT_FOUND == rval) {
03130     mesh_value = 0;
03131     mesh_val_len = 0;
03132   }
03133   else if (MB_SUCCESS != rval) {
03134     H5Tclose( hdf_type );
03135     return error(rval);
03136   }
03137   
03138     // for handle-type tags, need to convert from handles to file ids
03139   if (MB_TYPE_HANDLE == mb_type) {
03140       // make sure there's room in the buffer for both
03141     assert( (def_val_len + mesh_val_len) * sizeof(long) < (size_t)bufferSize );
03142 
03143       // convert default value
03144     if (def_value) {
03145       memcpy( dataBuffer, def_value, def_val_len*sizeof(EntityHandle) );
03146       convert_handle_tag( reinterpret_cast<EntityHandle*>(dataBuffer), def_val_len );
03147       def_value = dataBuffer;
03148     }
03149     
03150       // convert mesh value
03151     if (mesh_value) {
03152       EntityHandle* ptr = reinterpret_cast<EntityHandle*>(dataBuffer) + def_val_len;
03153       memcpy( ptr, mesh_value, mesh_val_len*sizeof(EntityHandle) );
03154       if (convert_handle_tag( ptr, mesh_val_len ))
03155         mesh_value = ptr;
03156       else
03157         mesh_value = 0;
03158     }
03159   }
03160      
03161  
03162   if (MB_VARIABLE_LENGTH != tag_bytes) {
03163       // write the tag description to the file
03164     mhdf_createTag( filePtr,
03165                     tag_name.c_str(),
03166                     mhdf_type,
03167                     num_vals,
03168                     storage,
03169                     def_value,
03170                     mesh_value,
03171                     hdf_type,
03172                     mb_type == MB_TYPE_HANDLE ? id_type : 0,
03173                     &status );
03174     H5Tclose(hdf_type);
03175     CHK_MHDF_ERR_0(status);
03176 
03177 
03178       // create empty table for tag data
03179     if (num_sparse_entities)
03180     {
03181       mhdf_createSparseTagData( filePtr, 
03182                                 tag_name.c_str(), 
03183                                 num_sparse_entities,
03184                                 handles,
03185                                 &status );
03186       CHK_MHDF_ERR_0(status);
03187       mhdf_closeData( filePtr, handles[0], &status );
03188       mhdf_closeData( filePtr, handles[1], &status );
03189     }
03190     
03191     for (size_t i = 0; i < tag_data.dense_list.size(); ++i) {
03192       const ExportSet* ex = find( tag_data.dense_list[i] );
03193       assert(0 != ex);
03194       handles[0] = mhdf_createDenseTagData( filePtr,
03195                                             tag_name.c_str(),
03196                                             ex->name(),
03197                                             ex->total_num_ents,
03198                                             &status );
03199       CHK_MHDF_ERR_0(status);
03200       mhdf_closeData( filePtr, handles[0], &status );
03201     }
03202   }
03203   else {
03204     mhdf_createVarLenTag( filePtr,
03205                           tag_name.c_str(),
03206                           mhdf_type,
03207                           storage,
03208                           def_value, def_val_len,
03209                           mesh_value, mesh_val_len,
03210                           hdf_type, mb_type == MB_TYPE_HANDLE ? id_type : 0,
03211                           &status );
03212     H5Tclose(hdf_type);
03213     CHK_MHDF_ERR_0(status);
03214     
03215       // create empty table for tag data
03216     if (num_sparse_entities) {
03217       mhdf_createVarLenTagData( filePtr, 
03218                                 tag_name.c_str(),
03219                                 num_sparse_entities,
03220                                 data_table_size,
03221                                 handles,
03222                                 &status );
03223       CHK_MHDF_ERR_0(status);
03224       mhdf_closeData( filePtr, handles[0], &status );
03225       mhdf_closeData( filePtr, handles[1], &status );
03226       mhdf_closeData( filePtr, handles[2], &status );
03227     }
03228   }
03229     
03230   return MB_SUCCESS;
03231 }
03232 
03233 ErrorCode WriteHDF5::get_num_sparse_tagged_entities( const TagDesc& tag, 
03234                                                      size_t& count )
03235 {
03236   Range tmp;
03237   ErrorCode rval = get_sparse_tagged_entities( tag, tmp );
03238   count = tmp.size();
03239   return rval;
03240 }
03241 
03242 ErrorCode WriteHDF5::get_sparse_tagged_entities( const TagDesc& tag,
03243                                                     Range& results )
03244 {
03245   results.clear();
03246   if (!tag.have_dense(setSet))
03247     results.merge( setSet.range );
03248   std::list<ExportSet>::reverse_iterator e;
03249   for (e = exportList.rbegin(); e != exportList.rend(); ++e) 
03250     if (!tag.have_dense(*e))
03251       results.merge( e->range );
03252   if (!tag.have_dense(nodeSet))
03253     results.merge( nodeSet.range );
03254   if (results.empty())
03255     return MB_SUCCESS;  
03256     
03257   return iFace->get_entities_by_type_and_tag( 0, MBMAXTYPE, 
03258                                               &tag.tag_id, 0, 1, 
03259                                               results, Interface::INTERSECT );
03260 }
03261 
03262 void WriteHDF5::get_write_entities( Range& range )
03263 {
03264   range.clear();
03265   range.merge( setSet.range );
03266   std::list<ExportSet>::reverse_iterator e;
03267   for (e = exportList.rbegin(); e != exportList.rend(); ++e) 
03268     range.merge( e->range );
03269   range.merge( nodeSet.range );
03270 }
03271 
03272 void WriteHDF5::print_id_map( ) const
03273 {
03274   print_id_map( std::cout, "" ) ;
03275 }
03276 
03277 void WriteHDF5::print_id_map( std::ostream& s, const char* pfx ) const
03278 {
03279   RangeMap<EntityHandle,id_t>::const_iterator i;
03280   for (i = idMap.begin(); i != idMap.end(); ++i) {
03281     const char* n1 = CN::EntityTypeName(TYPE_FROM_HANDLE(i->begin));
03282     EntityID id = ID_FROM_HANDLE(i->begin);
03283     if (i->count == 1) {
03284       s << pfx << n1 << " " << id << " -> " << i->value << std::endl;
03285     }
03286     else {
03287       const char* n2 = CN::EntityTypeName(TYPE_FROM_HANDLE(i->begin + i->count - 1));
03288       if (n1 == n2) {
03289         s << pfx << n1 << " " << id << "-" << id + i->count-1
03290           << " -> " << i->value << "-" << i->value + i->count-1 << std::endl;
03291       }
03292       else {
03293         s << pfx << n1 << " " << id << "-" 
03294           << n1 << " " << ID_FROM_HANDLE(i->begin + i->count-1)
03295           << " -> " << i->value << "-" << i->value + i->count-1 << std::endl;
03296       }
03297     }
03298   }
03299 }
03300 
03301 void WriteHDF5::print_times( const double* t ) const
03302 {
03303   std::cout << "WriteHDF5:           " << t[TOTAL_TIME] << std::endl
03304             << "  gather mesh:       " << t[GATHER_TIME] << std::endl
03305             << "  create file:       " << t[CREATE_TIME] << std::endl
03306             << "    create nodes:    " << t[CREATE_NODE_TIME] << std::endl
03307             << "    negotiate types: " << t[NEGOTIATE_TYPES_TIME] << std::endl
03308             << "    craete elem:     " << t[CREATE_ELEM_TIME] << std::endl
03309             << "    file id exch:    " << t[FILEID_EXCHANGE_TIME] << std::endl
03310             << "    create adj:      " << t[CREATE_ADJ_TIME] << std::endl
03311             << "    create set:      " << t[CREATE_SET_TIME] << std::endl
03312             << "      shared ids:    " << t[SHARED_SET_IDS] << std::endl
03313             << "      shared data:   " << t[SHARED_SET_CONTENTS] << std::endl
03314             << "      set offsets:   " << t[SET_OFFSET_TIME] << std::endl
03315             << "    create tags:     " << t[CREATE_TAG_TIME] << std::endl
03316             << "  coordinates:       " << t[COORD_TIME] << std::endl
03317             << "  connectivity:      " << t[CONN_TIME] << std::endl
03318             << "  sets:              " << t[SET_TIME] << std::endl
03319             << "    set descrip:     " << t[SET_META] << std::endl
03320             << "    set content:     " << t[SET_CONTENT] << std::endl
03321             << "    set parent:      " << t[SET_PARENT] << std::endl
03322             << "    set child:       " << t[SET_CHILD] << std::endl
03323             << "  adjacencies:       " << t[ADJ_TIME] << std::endl
03324             << "  tags:              " << t[TAG_TIME] << std::endl
03325             << "    dense data:      " << t[DENSE_TAG_TIME] << std::endl
03326             << "    sparse data:     " << t[SPARSE_TAG_TIME] << std::endl
03327             << "    var-len data:    " << t[VARLEN_TAG_TIME] << std::endl;
03328 }
03329 
03330 } // namespace moab
03331 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines