moab
moab::ReadHDF5 Class Reference

Read mesh from MOAB HDF5 (.h5m) file. More...

#include <ReadHDF5.hpp>

Inheritance diagram for moab::ReadHDF5:
moab::ReaderIface

List of all members.

Classes

struct  HDF5ErrorHandler
 Store old HDF5 error handling function. More...
struct  IDConnectivity

Public Types

enum  SetMode { CONTENT = 0, CHILD = 1, PARENT = 2 }
typedef int Comm

Public Member Functions

 ReadHDF5 (Interface *iface)
virtual ~ReadHDF5 ()
ErrorCode load_file (const char *file_name, const EntityHandle *file_set, const FileOptions &opts, const SubsetList *subset_list=0, const Tag *file_id_tag=0)
ErrorCode read_tag_values (const char *file_name, const char *tag_name, const FileOptions &opts, std::vector< int > &tag_values_out, const SubsetList *subset_list=0)
 Read tag values from a file.
Interfacemoab () const
ErrorCode convert_id_to_handle (EntityHandle *in_out_array, size_t array_length)
void convert_id_to_handle (EntityHandle *in_out_array, size_t array_length, size_t &array_length_out) const
ErrorCode convert_range_to_handle (const EntityHandle *ranges, size_t num_ranges, Range &merge)
ErrorCode insert_in_id_map (const Range &file_ids, EntityHandle start_id)
ErrorCode insert_in_id_map (long file_id, EntityHandle handle)

Static Public Member Functions

static ReaderIfacefactory (Interface *)
static void convert_id_to_handle (EntityHandle *in_out_array, size_t array_length, const RangeMap< long, EntityHandle > &id_map)
static void convert_id_to_handle (EntityHandle *in_out_array, size_t array_length, size_t &array_length_out, const RangeMap< long, EntityHandle > &id_map)
static void convert_range_to_handle (const EntityHandle *ranges, size_t num_ranges, const RangeMap< long, EntityHandle > &id_map, Range &merge)

Protected Member Functions

ErrorCode load_file_impl (const FileOptions &opts)
ErrorCode load_file_partial (const ReaderIface::IDTag *subset_list, int subset_list_length, int num_parts, int part_number, const FileOptions &opts)
ErrorCode read_tag_values_all (int tag_index, std::vector< int > &results)
ErrorCode read_tag_values_partial (int tag_index, const Range &file_ids, std::vector< int > &results)

Private Types

typedef RangeMap< long,
EntityHandle
IDMap
 Map from File ID to MOAB handle.

Private Member Functions

ErrorCode init ()
int is_error (mhdf_Status &status)
ErrorCode read_all_set_meta ()
ErrorCode set_up_read (const char *file_name, const FileOptions &opts)
ErrorCode clean_up_read (const FileOptions &opts)
ErrorCode get_subset_ids (const ReaderIface::IDTag *subset_list, int subset_list_length, Range &file_ids_out)
ErrorCode get_partition (Range &tmp_file_ids, int num_parts, int part_number)
 Remove all but the specified fraction of sets from the passed range.
ErrorCode read_nodes (const Range &node_file_ids)
ErrorCode read_elems (int index)
ErrorCode read_elems (int index, const Range &file_ids, Range *node_ids=0)
ErrorCode read_elems (const mhdf_ElemDesc &elems, const Range &file_ids, Range *node_ids=0)
ErrorCode update_connectivity ()
ErrorCode read_elems (int index, const Range &element_file_ids, Range &node_file_ids)
ErrorCode read_node_adj_elems (const mhdf_ElemDesc &group, Range *read_entities=0)
ErrorCode read_node_adj_elems (const mhdf_ElemDesc &group, hid_t connectivity_handle, Range *read_entities=0)
ErrorCode read_poly (const mhdf_ElemDesc &elems, const Range &file_ids)
 Read poly(gons|hedra)
ErrorCode delete_non_side_elements (const Range &side_ents)
ErrorCode read_sets (const Range &set_file_ids)
 Read sets.
ErrorCode read_adjacencies (hid_t adjacency_table, long table_length)
ErrorCode read_tag (int index)
 Create tag and read all data.
ErrorCode create_tag (const mhdf_TagDesc &info, Tag &handle, hid_t &type)
 Create new tag or varify type matches existing tag.
ErrorCode read_dense_tag (Tag tag_handle, const char *ent_name, hid_t hdf_read_type, hid_t data_table, long start_id, long count)
 Read dense tag for all entities.
ErrorCode read_sparse_tag (Tag tag_handle, hid_t hdf_read_type, hid_t ent_table, hid_t val_table, long num_entities)
 Read sparse tag for all entities.
ErrorCode read_var_len_tag (Tag tag_handle, hid_t hdf_read_type, hid_t ent_table, hid_t val_table, hid_t off_table, long num_entities, long num_values)
 Read variable-length tag for all entities.
ErrorCode read_sparse_tag_indices (const char *name, hid_t id_table, EntityHandle start_offset, Range &offset_range, Range &handle_range, std::vector< EntityHandle > &handle_vect)
 Read index table for sparse tag.
ErrorCode read_qa (EntityHandle file_set)
ErrorCode search_tag_values (int tag_index, const std::vector< int > &sorted_values, Range &file_ids_out, bool sets_only=false)
 Search for entities with specified tag values.
ErrorCode get_tagged_entities (int tag_index, Range &file_ids_out)
 Search for entities with specified tag.
ErrorCode search_tag_values (hid_t tag_table, unsigned long table_size, const std::vector< int > &sorted_values, std::vector< EntityHandle > &value_indices)
 Search a table of tag data for a specified set of values.
ErrorCode get_set_contents (const Range &sets, Range &file_ids)
 Get the file IDs for nodes and elements contained in sets.
ErrorCode read_set_ids_recursive (Range &sets_in_out, bool containted_sets, bool child_sets)
ErrorCode find_sets_containing (Range &sets_out, bool read_set_containing_parents)
ErrorCode read_sets_partial (const Range &sets_in)
 Read sets from file into MOAB for partial read of file.
ErrorCode find_sets_containing (hid_t content_handle, hid_t content_type, long content_len, bool read_set_containing_parents, Range &file_ids)
ErrorCode read_set_data (const Range &set_file_ids, EntityHandle set_start_handle, ReadHDF5Dataset &set_data_set, SetMode which_data, Range *file_ids_out=0)
ErrorCode store_file_ids (Tag tag)
 Store file IDS in tag values.
ErrorCode find_int_tag (const char *name, int &index_out)
 Find index in mhdf_FileDesc* fileInfo for specified tag name.
void debug_barrier_line (int lineno)

Private Attributes

int bufferSize
 The size of the data buffer (dataBuffer).
char * dataBuffer
 A memory buffer to use for all I/O operations.
InterfaceiFace
 Interface pointer passed to constructor.
mhdf_FileHandle filePtr
 The file handle from the mhdf library.
mhdf_FileDescfileInfo
 File summary.
IDMap idMap
ReadUtilIfacereadUtil
 Cache pointer to read util.
hid_t handleType
 The type of an EntityHandle.
std::vector< IDConnectivityidConnectivityList
hid_t indepIO
hid_t collIO
ParallelCommmyPcomm
bool debugTrack
DebugOutput dbgOut
 Debug output. Verbosity controlled with DEBUG_FORMAT option.
bool nativeParallel
 Doing true parallel read (PARALLEL=READ_PART)
CommmpiComm
 MPI_Comm value (unused if !nativeParallel)
bool blockedCoordinateIO
bool bcastSummary
bool bcastDuplicateReads
HDF5ErrorHandler errorHandler
 Store old HDF5 error handling function.
long(* setMeta )[4]

Detailed Description

Read mesh from MOAB HDF5 (.h5m) file.

Author:
Jason Kraftcheck
Date:
18 April 2004

Definition at line 45 of file ReadHDF5.hpp.


Member Typedef Documentation

typedef int moab::ReadHDF5::Comm

Definition at line 52 of file ReadHDF5.hpp.

typedef RangeMap< long, EntityHandle > moab::ReadHDF5::IDMap [private]

Map from File ID to MOAB handle.

Definition at line 126 of file ReadHDF5.hpp.


Member Enumeration Documentation

Enumerator:
CONTENT 
CHILD 
PARENT 

Definition at line 438 of file ReadHDF5.hpp.

{ CONTENT = 0, CHILD = 1, PARENT = 2 };

Constructor & Destructor Documentation

Definition at line 262 of file ReadHDF5.cpp.

{
  if (!readUtil) // init() failed.
    return;

  delete [] setMeta;
  setMeta = 0;
  iFace->release_interface( readUtil );
  H5Tclose( handleType );
}

Member Function Documentation

Definition at line 534 of file ReadHDF5.cpp.

{
  HDF5ErrorHandler handler;
#if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1
  herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
#else
  herr_t err = H5Eget_auto( &handler.func, &handler.data );
#endif
  if (err >= 0 && handler.func == &handle_hdf5_error) {
    assert(handler.data = &errorHandler);
#if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1
    H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
#else
    H5Eset_auto( errorHandler.func, errorHandler.data );
#endif
  }

  free( dataBuffer );
  dataBuffer = NULL;
  free( fileInfo );
  fileInfo = NULL;
  delete mpiComm;
  mpiComm = 0;

  if (indepIO != H5P_DEFAULT)
    H5Pclose( indepIO );
  if (collIO != indepIO)
    H5Pclose( collIO );
  collIO = indepIO = H5P_DEFAULT;

  delete [] setMeta;
  setMeta = 0;

  mhdf_Status status;
  mhdf_closeFile( filePtr, &status );
  filePtr = 0;
  return is_error(status) ? MB_FAILURE : MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::convert_id_to_handle ( EntityHandle in_out_array,
size_t  array_length 
)

Definition at line 3402 of file ReadHDF5.cpp.

{
  convert_id_to_handle( array, size, idMap );
  return MB_SUCCESS;
}
void moab::ReadHDF5::convert_id_to_handle ( EntityHandle in_out_array,
size_t  array_length,
size_t &  array_length_out 
) const [inline]

Definition at line 324 of file ReadHDF5.hpp.

    { return convert_id_to_handle( in_out_array, array_length, array_length_out, idMap ); }
void moab::ReadHDF5::convert_id_to_handle ( EntityHandle in_out_array,
size_t  array_length,
const RangeMap< long, EntityHandle > &  id_map 
) [static]

Definition at line 3409 of file ReadHDF5.cpp.

{
  for (EntityHandle* const end = array + size; array != end; ++array)
    *array = id_map.find( *array );
}
void moab::ReadHDF5::convert_id_to_handle ( EntityHandle in_out_array,
size_t  array_length,
size_t &  array_length_out,
const RangeMap< long, EntityHandle > &  id_map 
) [static]

Definition at line 3417 of file ReadHDF5.cpp.

{
  RangeMap<long,EntityHandle>::const_iterator it;
  new_size = 0;
  for (size_t i = 0; i < size; ++i) {
    it = id_map.lower_bound( array[i] );
    if (it != id_map.end() && it->begin <= (long)array[i])
      array[new_size++] = it->value + (array[i] - it->begin);
  }
}
ErrorCode moab::ReadHDF5::convert_range_to_handle ( const EntityHandle ranges,
size_t  num_ranges,
Range merge 
)

Definition at line 3470 of file ReadHDF5.cpp.

{
  convert_range_to_handle( array, num_ranges, idMap, range );
  return MB_SUCCESS;
}
void moab::ReadHDF5::convert_range_to_handle ( const EntityHandle ranges,
size_t  num_ranges,
const RangeMap< long, EntityHandle > &  id_map,
Range merge 
) [static]

Definition at line 3430 of file ReadHDF5.cpp.

{
  RangeMap<long,EntityHandle>::iterator it = id_map.begin();
  Range::iterator hint = merge.begin();
  for (size_t i = 0; i < num_ranges; ++i) {
    long id = ranges[2*i];
    const long end = id + ranges[2*i+1];
      // we assume that 'ranges' is sorted, but check just in case it isn't.
    if (it == id_map.end() || it->begin > id)
      it = id_map.begin();
    it = id_map.lower_bound( it, id_map.end(), id );
    if (it == id_map.end())
      continue;
    if (id < it->begin)
      id = it->begin;
    while (id < end) {
      if (id < it->begin) id = it->begin;
      const long off = id - it->begin;
      long count = std::min( it->count - off,  end - id );
      // it is possible that this new subrange is starting after the end
      // it will result in negative count, which does not make sense
      // we are done with this range, go to the next one
      if (count <= 0)
        break;
      hint = merge.insert( hint, it->value + off, it->value + off + count - 1 );
      id += count;
      if (id < end)
      {
        if (++it == id_map.end())
          break;
        if (it->begin > end)
          break; //
      }
    }
  }
}
ErrorCode moab::ReadHDF5::create_tag ( const mhdf_TagDesc info,
Tag handle,
hid_t &  type 
) [private]

Create new tag or varify type matches existing tag.

Definition at line 2840 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  ErrorCode rval;
  mhdf_Status status;
  TagType storage;
  DataType mb_type;
  bool re_read_default = false;

  switch (info.storage) {
    case mhdf_DENSE_TYPE : storage = MB_TAG_DENSE ; break;
    case mhdf_SPARSE_TYPE: storage = MB_TAG_SPARSE; break;
    case mhdf_BIT_TYPE   : storage = MB_TAG_BIT;    break;
    case mhdf_MESH_TYPE  : storage = MB_TAG_MESH;   break;
    default:
      readUtil->report_error( "Invalid storage type for tag '%s': %d\n", info.name, info.storage );
      return error(MB_FAILURE);
  }

    // Type-specific stuff
  if (info.type == mhdf_BITFIELD) {
    if (info.size < 1 || info.size > 8)
    {
      readUtil->report_error( "Invalid bit tag:  class is MB_TAG_BIT, num bits = %d\n", info.size );
      return error(MB_FAILURE);
    }
    hdf_type = H5Tcopy(H5T_NATIVE_B8);
    mb_type = MB_TYPE_BIT;
    if (hdf_type < 0)
      return error(MB_FAILURE);
  }
  else if (info.type == mhdf_OPAQUE) {
    mb_type = MB_TYPE_OPAQUE;

      // Check for user-provided type
    Tag type_handle;
    std::string tag_type_name = "__hdf5_tag_type_";
    tag_type_name += info.name;
    rval = iFace->tag_get_handle( tag_type_name.c_str(), sizeof(hid_t), MB_TYPE_OPAQUE, type_handle );
    if (MB_SUCCESS == rval) {
      EntityHandle root = 0;
      rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
      if (MB_SUCCESS != rval)
        return error(rval);
      hdf_type = H5Tcopy( hdf_type );
      re_read_default = true;
    }
    else if (MB_TAG_NOT_FOUND == rval) {
      hdf_type = 0;
    }
    else
      return error(rval);
      
    if (hdf_type < 0)
      return error(MB_FAILURE);
  }
  else {
    switch (info.type)
    {
      case mhdf_INTEGER:
        hdf_type = H5T_NATIVE_INT;
        mb_type = MB_TYPE_INTEGER;
        break;

      case mhdf_FLOAT:
        hdf_type = H5T_NATIVE_DOUBLE;
        mb_type = MB_TYPE_DOUBLE;
        break;

      case mhdf_BOOLEAN:
        hdf_type = H5T_NATIVE_UINT;
        mb_type = MB_TYPE_INTEGER;
        break;

      case mhdf_ENTITY_ID:
        hdf_type = handleType;
        mb_type = MB_TYPE_HANDLE;
        break;

      default:
        return error(MB_FAILURE);
    }
    
    if (info.size > 1) { // array
        hsize_t tmpsize = info.size;
#if defined(H5Tarray_create_vers) && H5Tarray_create_vers > 1  
        hdf_type = H5Tarray_create2( hdf_type, 1, &tmpsize );
#else
        hdf_type = H5Tarray_create( hdf_type, 1, &tmpsize, NULL );
#endif
    }
    else {
      hdf_type = H5Tcopy( hdf_type );
    }
    if (hdf_type < 0)
      return error(MB_FAILURE);
  }

  
    // If default or global/mesh value in file, read it.
  if (info.default_value || info.global_value)
  {
    if (re_read_default) {
      mhdf_getTagValues( filePtr, info.name, hdf_type, info.default_value, info.global_value, &status );
      if (mhdf_isError( &status ))
      {
        readUtil->report_error( "%s", mhdf_message( &status ) );
        if (hdf_type) H5Tclose( hdf_type );
        return error(MB_FAILURE);
      }
    }
    
    if (MB_TYPE_HANDLE == mb_type) {
      if (info.default_value) {
        rval = convert_id_to_handle( (EntityHandle*)info.default_value, info.default_value_size );
        if (MB_SUCCESS != rval) {
          if (hdf_type) H5Tclose( hdf_type );
          return error(rval);
        }
      }
      if (info.global_value) {
        rval = convert_id_to_handle( (EntityHandle*)info.global_value, info.global_value_size );
        if (MB_SUCCESS != rval) {
          if (hdf_type) H5Tclose( hdf_type );
          return error(rval);
        }
      }
    }
  }
  
    // get tag handle, creating if necessary
  if (info.size < 0) 
    rval = iFace->tag_get_handle( info.name, info.default_value_size, 
                                  mb_type, handle, storage|MB_TAG_CREAT|MB_TAG_VARLEN|MB_TAG_DFTOK,
                                  info.default_value );
  else
    rval = iFace->tag_get_handle( info.name, info.size, mb_type, handle,
                                  storage|MB_TAG_CREAT|MB_TAG_DFTOK, info.default_value );
  if (MB_SUCCESS != rval)
  {
    readUtil->report_error( "Tag type in file does not match type in "
                            "database for \"%s\"\n", info.name );
    if (hdf_type) H5Tclose( hdf_type );
    return error(MB_FAILURE);
  }
    
  if (info.global_value) {
    EntityHandle root = 0;
    if (info.size > 0) { // fixed-length tag
      rval = iFace->tag_set_data( handle, &root, 1, info.global_value );
    }
    else {
      int tag_size = info.global_value_size;
      rval = iFace->tag_set_by_ptr( handle, &root, 1, &info.global_value, &tag_size );
    }
    if (MB_SUCCESS != rval) {
      if (hdf_type) H5Tclose( hdf_type );
      return error(rval);
    }
  }
  
  return MB_SUCCESS;
}
void moab::ReadHDF5::debug_barrier_line ( int  lineno) [private]

Definition at line 158 of file ReadHDF5.cpp.

{
#ifdef USE_MPI
  if (mpiComm) {
    const unsigned threshold = 2;
    static unsigned long count = 0;
    if (dbgOut.get_verbosity() >= threshold) {
      dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno);
      MPI_Barrier( *mpiComm );
    }
  }
#else
  if (lineno) {}
#endif
}
ErrorCode moab::ReadHDF5::delete_non_side_elements ( const Range side_ents) [private]

Clean up elements that were a) read because we had read all of the nodes and b) weren't actually sides of the top-dimension elements we were trying to read.

Definition at line 1853 of file ReadHDF5.cpp.

{
  ErrorCode rval;

  // build list of entities that we need to find the sides of
  Range explicit_ents;
  Range::iterator hint = explicit_ents.begin();
  for (IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i) {
    EntityHandle start = i->value;
    EntityHandle end = i->value + i->count - 1;
    EntityType type = TYPE_FROM_HANDLE(start);
    assert( type == TYPE_FROM_HANDLE(end) ); // otherwise handle space entirely full!!
    if (type != MBVERTEX && type != MBENTITYSET)
      hint = explicit_ents.insert( hint, start, end );
  }
  explicit_ents = subtract( explicit_ents, side_ents );
  
    // figure out which entities we want to delete
  Range dead_ents( side_ents );
  Range::iterator ds, de, es;
  ds = dead_ents.lower_bound( CN::TypeDimensionMap[1].first );
  de = dead_ents.lower_bound( CN::TypeDimensionMap[2].first, ds );
  if (ds != de) {
    // get subset of explicit ents of dimension greater than 1
    es = explicit_ents.lower_bound( CN::TypeDimensionMap[2].first );
    Range subset, adj;
    subset.insert( es, explicit_ents.end() );
    rval = iFace->get_adjacencies( subset, 1, false, adj, Interface::UNION );
    if (MB_SUCCESS != rval)
      return rval;
    dead_ents = subtract( dead_ents, adj );
  }
  ds = dead_ents.lower_bound( CN::TypeDimensionMap[2].first );
  de = dead_ents.lower_bound( CN::TypeDimensionMap[3].first, ds );
  assert(de == dead_ents.end());
  if (ds != de) {
    // get subset of explicit ents of dimension 3
    es = explicit_ents.lower_bound( CN::TypeDimensionMap[3].first );
    Range subset, adj;
    subset.insert( es, explicit_ents.end() );
    rval = iFace->get_adjacencies( subset, 2, false, adj, Interface::UNION );
    if (MB_SUCCESS != rval)
      return rval;
    dead_ents = subtract( dead_ents, adj );
  }
  
    // now delete anything remaining in dead_ents
  dbgOut.printf( 2, "Deleting %lu elements\n", (unsigned long)dead_ents.size() );
  dbgOut.print( 4, "\tDead entities: ", dead_ents );
  rval = iFace->delete_entities( dead_ents );
  if (MB_SUCCESS != rval)
    return error(rval);
  
    // remove dead entities from ID map
  while (!dead_ents.empty()) {
    EntityHandle start = dead_ents.front();
    EntityID count = dead_ents.const_pair_begin()->second - start + 1;
    IDMap::iterator rit;
    for (rit = idMap.begin(); rit != idMap.end(); ++rit) 
      if (rit->value <= start && (long)(start - rit->value) < rit->count)
        break;
    if (rit == idMap.end())
      return error(MB_FAILURE);
  
    EntityID offset = start - rit->value;
    EntityID avail = rit->count - offset;
    if (avail < count)
      count = avail;
    
    dead_ents.erase( dead_ents.begin(), dead_ents.begin() + count );
    idMap.erase( rit->begin + offset, count );
  }
  
  return MB_SUCCESS;
}

Definition at line 204 of file ReadHDF5.cpp.

  { return new ReadHDF5( iface ); }
ErrorCode moab::ReadHDF5::find_int_tag ( const char *  name,
int &  index_out 
) [private]

Find index in mhdf_FileDesc* fileInfo for specified tag name.

Given a tag name, find its index in fileInfo and verify that each tag value is a single integer.

Definition at line 717 of file ReadHDF5.cpp.

{
  for (index = 0; index < fileInfo->num_tag_desc; ++index) 
    if (!strcmp( name, fileInfo->tags[index].name))
      break;

  if (index == fileInfo->num_tag_desc) {
    readUtil->report_error( "File does not contain subset tag '%s'", name );
    return error(MB_TAG_NOT_FOUND);
  }

  if (fileInfo->tags[index].type != mhdf_INTEGER ||
      fileInfo->tags[index].size != 1) {
    readUtil->report_error( "Tag '%s' does not containa single integer value", name );
    return error(MB_TYPE_OUT_OF_RANGE);
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::find_sets_containing ( Range sets_out,
bool  read_set_containing_parents 
) [private]

Find all sets containing one or more entities read from the file and added to idMap

Definition at line 2149 of file ReadHDF5.cpp.

{
  ErrorCode rval;
  mhdf_Status status;

  CHECK_OPEN_HANDLES;

  if (!fileInfo->have_set_contents)
    return MB_SUCCESS;
  assert( fileInfo->sets.count );

    // open data tables
  long content_len = 0;
  hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
  if (is_error(status))
    return error(MB_FAILURE);

  hid_t data_type = H5Dget_type( content_handle );

  rval = find_sets_containing( content_handle, data_type, content_len, 
                               read_set_containing_parents, sets_out );
  
  H5Tclose( data_type );

  mhdf_closeData( filePtr, content_handle, &status );
  if(MB_SUCCESS == rval && is_error(status))
    return error(MB_FAILURE);
    
  return rval;
}
ErrorCode moab::ReadHDF5::find_sets_containing ( hid_t  content_handle,
hid_t  content_type,
long  content_len,
bool  read_set_containing_parents,
Range file_ids 
) [private]

Find file IDs of sets containing any entities in the passed id_map

Definition at line 2215 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;
  
  // Scan all set contents data
  
  const size_t content_size = H5Tget_size( content_type );
  const long num_sets = fileInfo->sets.count;
  dbgOut.printf( 2, "Searching contents of %ld\n", num_sets ); 
  mhdf_Status status;

  int rank = 0;
  bool bcast = false;
#ifdef USE_MPI
  MPI_Comm comm = 0;
  if (nativeParallel) {
    rank = myPcomm->proc_config().proc_rank();
    comm = myPcomm->proc_config().proc_comm();
    bcast = bcastDuplicateReads;
  }
#endif
  
  // check offsets so that we don't read past end of table or
  // walk off end of array.
  long prev = -1;
  for (long  i = 0; i < num_sets; ++i) {
    if (setMeta[i][CONTENT] < prev) {
      std::cerr << "Invalid data in set contents offsets at position "
                << i << ": index " << setMeta[i][CONTENT] 
                << " is less than previous index " << prev << std::endl;
      std::cerr.flush();
      return error(MB_FAILURE);
    }
    prev = setMeta[i][CONTENT];
  }
  if (setMeta[num_sets-1][CONTENT] >= contents_len) {
    std::cerr << "Maximum set content index " << setMeta[num_sets-1][CONTENT]
              << " exceeds contents table length of " << contents_len
              << std::endl;
    std::cerr.flush();
    return error(MB_FAILURE);
  }

  // set up buffer for reading set contents 
  long* const content_buffer = (long*)dataBuffer;
  const long content_len = bufferSize / std::max( content_size, sizeof(long) );

    // scan set table  
  Range::iterator hint = file_ids.begin();
  Range tmp_range;
  long prev_idx = -1;
  int mm = 0;
  long sets_offset = 0;
  while (sets_offset < num_sets) {
    long sets_count = std::lower_bound( setMeta + sets_offset, 
                                        setMeta + num_sets,
                                        content_len + prev_idx,
                                        SetContOffComp() 
                                       ) - setMeta - sets_offset;
    assert(sets_count >= 0 && sets_offset + sets_count <= num_sets);
    if (!sets_count) { // contents of single set don't fit in buffer
      long content_remaining = setMeta[sets_offset][CONTENT] - prev_idx;
      long content_offset = prev_idx+1;
      while (content_remaining) {
        long content_count = content_len < content_remaining ?
                             2*(content_len/2) : content_remaining;
        assert_range( content_buffer, content_count );
        dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, content_count);
        if (!bcast || 0 == rank) {
          if (!bcast)
            mhdf_readSetDataWithOpt( contents_handle, content_offset,
                                     content_count, content_type, 
                                     content_buffer, collIO, &status );
          else 
            mhdf_readSetData( contents_handle, content_offset,
                              content_count, content_type, 
                              content_buffer, &status );
          if (is_error(status))
            return error(MB_FAILURE);

          H5Tconvert( content_type, H5T_NATIVE_LONG, content_count, content_buffer, 0, H5P_DEFAULT );
        }
        if (bcast) {
          #ifdef USE_MPI
            int ierr = MPI_Bcast( content_buffer, content_count, MPI_LONG, 0, comm );
            if (MPI_SUCCESS != ierr)
              return error(MB_FAILURE);
          #else
            assert(rank == 0); // if not MPI, then only one proc
          #endif
        }
        
        if (read_set_containing_parents) {
          tmp_range.clear();
          if (setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT) tmp_range.insert(*content_buffer, *(content_buffer+1));
          else std::copy(content_buffer, content_buffer+content_count, range_inserter(tmp_range));
          tmp_range = intersect(tmp_range, file_ids);
        }

        if (!tmp_range.empty() ||
            set_map_intersect( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT,
                               content_buffer, content_count, idMap )) {
          long id = fileInfo->sets.start_id + sets_offset;
          hint = file_ids.insert( hint, id, id );
          if (!nativeParallel) // don't stop if doing READ_PART because we need to read collectively
            break;
        }
        content_remaining -= content_count;
        content_offset += content_count;
      }
      prev_idx = setMeta[sets_offset][CONTENT];
      sets_count = 1;
    }
    else if (long read_num = setMeta[sets_offset + sets_count - 1][CONTENT] - prev_idx) {
      assert(sets_count > 0);
      assert_range( content_buffer, read_num );
      dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, read_num);
      if (!bcast || 0 == rank) {
        if (!bcast)
          mhdf_readSetDataWithOpt( contents_handle, prev_idx+1, read_num, 
                                   content_type, content_buffer, collIO, &status );
        else
          mhdf_readSetData( contents_handle, prev_idx+1, read_num, 
                            content_type, content_buffer, &status );
        if (is_error(status))
          return error(MB_FAILURE);
       
        H5Tconvert( content_type, H5T_NATIVE_LONG, read_num, content_buffer, 0, H5P_DEFAULT );
      }
      if (bcast) {
        #ifdef USE_MPI
          int ierr = MPI_Bcast( content_buffer, read_num, MPI_LONG, 0, comm );
          if (MPI_SUCCESS != ierr)
            return error(MB_FAILURE);
        #else
          assert(rank == 0); // if not MPI, then only one proc
        #endif
      }

      long* buff_iter = content_buffer;
      for (long i = 0; i < sets_count; ++i) {
        long set_size = setMeta[i+sets_offset][CONTENT] - prev_idx;
        prev_idx += set_size;

          // check whether contents include set already being loaded
        if (read_set_containing_parents) {
          tmp_range.clear();
          if (setMeta[sets_offset+i][3] & mhdf_SET_RANGE_BIT) tmp_range.insert(*buff_iter, *(buff_iter+1));
          else std::copy(buff_iter, buff_iter+set_size, range_inserter(tmp_range));
          tmp_range = intersect(tmp_range, file_ids);
        }
        
        if (!tmp_range.empty() ||
            set_map_intersect( setMeta[sets_offset+i][3] & mhdf_SET_RANGE_BIT,
                               buff_iter, set_size, idMap )) {
          long id = fileInfo->sets.start_id + sets_offset + i;
          hint = file_ids.insert( hint, id, id );
        }
        buff_iter += set_size;
      }
    }

    sets_offset += sets_count;
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::get_partition ( Range tmp_file_ids,
int  num_parts,
int  part_number 
) [private]

Remove all but the specified fraction of sets from the passed range.

Select a subset of the gathered set of file ids to read in based on communicator size and rank.

Parameters:
tmp_file_idsAs input: sets to be read on all procs. As output: sets to read on this proc.
num_partscommunicator size
part_numbercommunicator rank

Definition at line 775 of file ReadHDF5.cpp.

{    

  CHECK_OPEN_HANDLES;

     // check that the tag only identified sets
   if ((unsigned long)fileInfo->sets.start_id > tmp_file_ids.front()) {
     dbgOut.print(2,"Ignoreing non-set entities with partition set tag\n");
     tmp_file_ids.erase( tmp_file_ids.begin(), 
                         tmp_file_ids.lower_bound( 
                           (EntityHandle)fileInfo->sets.start_id ) );
   }
   unsigned long set_end = (unsigned long)fileInfo->sets.start_id + fileInfo->sets.count;
   if (tmp_file_ids.back() >= set_end) {
     dbgOut.print(2,"Ignoreing non-set entities with partition set tag\n");
     tmp_file_ids.erase( tmp_file_ids.upper_bound( (EntityHandle)set_end ),
                         tmp_file_ids.end() );
   }
      
  Range::iterator s = tmp_file_ids.begin();
  size_t num_per_proc = tmp_file_ids.size() / num_parts;
  size_t num_extra = tmp_file_ids.size() % num_parts;
  Range::iterator e;
  if (part_number < (long)num_extra) {
    s += (num_per_proc+1) * part_number;
    e = s;
    e += (num_per_proc+1);
  }
  else {
    s += num_per_proc * part_number + num_extra;
    e = s;
    e += num_per_proc;
  }
  tmp_file_ids.erase(e, tmp_file_ids.end());
  tmp_file_ids.erase(tmp_file_ids.begin(), s);

  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::get_set_contents ( const Range sets,
Range file_ids 
) [private]

Get the file IDs for nodes and elements contained in sets.

Read the contents for the specified sets and return the file IDs of all nodes and elements contained within those sets.

Parameters:
setsContainer of file IDs designating entity sets.
file_idsOutput: File IDs of entities contained in sets.

Definition at line 2582 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;
  
  if (!fileInfo->have_set_contents)
    return MB_SUCCESS;
  dbgOut.tprint(2,"Reading set contained file IDs\n");
  try {
    mhdf_Status status;
    long content_len;
    hid_t contents = mhdf_openSetData( filePtr, &content_len, &status );
    if (is_error(status)) 
      return error(MB_FAILURE);
    ReadHDF5Dataset data( "set contents", contents, nativeParallel, mpiComm, true );
    

    return read_set_data( sets, 0, data, CONTENT, &file_ids );
  }
  catch (ReadHDF5Dataset::Exception) {
    return error(MB_FAILURE);
  }
}
ErrorCode moab::ReadHDF5::get_subset_ids ( const ReaderIface::IDTag subset_list,
int  subset_list_length,
Range file_ids_out 
) [private]

Given a list of tags and values, get the file ids for the corresponding entities in the file.

Definition at line 737 of file ReadHDF5.cpp.

{
  ErrorCode rval;

  for (int i = 0; i < subset_list_length; ++i) {  
    
    int tag_index;
    rval = find_int_tag( subset_list[i].tag_name, tag_index );
    if (MB_SUCCESS != rval)
      return error(rval);
  
    Range tmp_file_ids;
    if (!subset_list[i].num_tag_values) {
      rval = get_tagged_entities( tag_index, tmp_file_ids );
    }
    else {
      std::vector<int> ids( subset_list[i].tag_values, 
                            subset_list[i].tag_values + subset_list[i].num_tag_values );
      std::sort( ids.begin(), ids.end() );
      rval = search_tag_values( tag_index, ids, tmp_file_ids );
      if (MB_SUCCESS != rval)
        return error(rval);
    }
    
    if (tmp_file_ids.empty())
      return error(MB_ENTITY_NOT_FOUND);
    
    if (i == 0) 
      file_ids.swap( tmp_file_ids );
    else 
      file_ids = intersect( tmp_file_ids, file_ids );
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::get_tagged_entities ( int  tag_index,
Range file_ids_out 
) [private]

Search for entities with specified tag.

For parallel reads, this function does collective IO.

Parameters:
tag_indexIndex into info->tags specifying which tag to search.
file_ids_outFile IDs for entities with specified tag values.

Definition at line 1285 of file ReadHDF5.cpp.

{
  const mhdf_TagDesc& tag = fileInfo->tags[tag_index];

  CHECK_OPEN_HANDLES;
   
    // do dense data
  Range::iterator hint = file_ids.begin();
  for (int i = 0; i < tag.num_dense_indices; ++i)
  {
    int idx = tag.dense_elem_indices[i];
    mhdf_EntDesc* ents;
    if (idx == -2)
      ents = &fileInfo->sets;
    else if (idx == -1) 
      ents = &fileInfo->nodes;
    else {
      if (idx < 0 || idx >= fileInfo->num_elem_desc) 
        return error(MB_FAILURE);
      ents = &(fileInfo->elems[idx].desc);
    }
    
    EntityHandle h = (EntityHandle)ents->start_id;
    hint = file_ids.insert( hint, h, h + ents->count - 1 );
  }
  
  if (!tag.have_sparse)
    return MB_SUCCESS;
  
    // do sparse data
    
  mhdf_Status status;
  hid_t tables[2]; 
  long size, junk; 
  mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
  if (is_error(status))
    return error(MB_FAILURE);
  mhdf_closeData( filePtr, tables[1], &status );
  if (is_error(status)) {
    mhdf_closeData( filePtr, tables[0], &status );
    return error(MB_FAILURE);
  }
  
  hid_t file_type = H5Dget_type( tables[0] );
  if (file_type < 0) 
    return error(MB_FAILURE);
  
  hint = file_ids.begin();
  EntityHandle* buffer = reinterpret_cast<EntityHandle*>(dataBuffer);
  const long buffer_size = bufferSize / std::max(sizeof(EntityHandle),H5Tget_size(file_type));
  long remaining = size, offset = 0;
  while (remaining) {
    long count = std::min( buffer_size, remaining );
    assert_range( buffer, count );
    mhdf_readSparseTagEntitiesWithOpt( *tables, offset, count, 
                                file_type, buffer, collIO, &status );
    if (is_error(status)) {
      H5Tclose(file_type);
      mhdf_closeData( filePtr, *tables, &status );
      return error(MB_FAILURE);
    }
    H5Tconvert( file_type, handleType, count, buffer, NULL, H5P_DEFAULT );
    
    std::sort( buffer, buffer + count );
    for (long i = 0; i < count; ++i)
      hint = file_ids.insert( hint, buffer[i], buffer[i] );
    
    remaining -= count;
    offset += count;
  }

  H5Tclose(file_type);
  mhdf_closeData( filePtr, *tables, &status );
  if (is_error(status))
    return error(MB_FAILURE);
  
  return MB_SUCCESS;  
}

Definition at line 226 of file ReadHDF5.cpp.

{
  ErrorCode rval;

  if (readUtil) 
    return MB_SUCCESS;
  
  indepIO = collIO = H5P_DEFAULT;
  //WriteHDF5::register_known_tag_types( iFace );
  
  handleType = H5Tcopy( H5T_NATIVE_ULONG );
  if (handleType < 0)
    return error(MB_FAILURE);
  
  if (H5Tset_size( handleType, sizeof(EntityHandle)) < 0)
  {
    H5Tclose( handleType );
    return error(MB_FAILURE);
  }
  
  rval = iFace->query_interface( readUtil );
  if (MB_SUCCESS != rval)
  {
    H5Tclose( handleType );
    return error(rval);
  }
  
  idMap.clear();
  fileInfo = 0;
  debugTrack = false;
  myPcomm = 0;
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::insert_in_id_map ( const Range file_ids,
EntityHandle  start_id 
)

Definition at line 3479 of file ReadHDF5.cpp.

{
  IDMap tmp_map;
  bool merge = !idMap.empty() && !file_ids.empty() && idMap.back().begin > (long)file_ids.front();
  IDMap& map = merge ? tmp_map : idMap;
  Range::const_pair_iterator p;
  for (p = file_ids.const_pair_begin(); p != file_ids.const_pair_end(); ++p) {
    size_t count = p->second - p->first + 1;
    if (!map.insert( p->first, start_id, count ).second) 
      return error(MB_FAILURE);
    start_id += count;
  }
  if (merge && !idMap.merge( tmp_map ))
    return error(MB_FAILURE);
  
  return MB_SUCCESS;
}

Definition at line 3498 of file ReadHDF5.cpp.

{
  if (!idMap.insert( file_id, handle, 1 ).second) 
      return error(MB_FAILURE);
  return MB_SUCCESS;
}
int moab::ReadHDF5::is_error ( mhdf_Status status) [inline, private]

Definition at line 104 of file ReadHDF5.hpp.

                                             {
    int i;
    if ((i = mhdf_isError(&status))) 
      readUtil->report_error( "%s", mhdf_message(&status) );
    return i;
  }
ErrorCode moab::ReadHDF5::load_file ( const char *  file_name,
const EntityHandle file_set,
const FileOptions opts,
const SubsetList subset_list = 0,
const Tag file_id_tag = 0 
) [virtual]

Export specified meshsets to file

Parameters:
filenameThe filename to export. Must end in .mhdf
export_setsArray of handles to sets to export, or NULL to export all.
export_set_countLength of export_sets array.

Implements moab::ReaderIface.

Definition at line 573 of file ReadHDF5.cpp.

{
  ErrorCode rval;
 
  rval = set_up_read( filename, opts );
  if (MB_SUCCESS != rval) {
    clean_up_read(opts);
    return rval;
  }
    
    // We read the entire set description table regarless of partial
    // or complete reads or serial vs parallel reads
  rval = read_all_set_meta();
 
  if (subset_list && MB_SUCCESS == rval) 
    rval = load_file_partial( subset_list->tag_list, 
                              subset_list->tag_list_length, 
                              subset_list->num_parts,
                              subset_list->part_number,
                              opts );
  else
    rval = load_file_impl( opts );
    
  if (MB_SUCCESS == rval && file_id_tag) {
    dbgOut.tprint( 1, "Storing file IDs in tag\n" );
    rval = store_file_ids( *file_id_tag );
  }
  
  if (MB_SUCCESS == rval && 0 != file_set) {
    dbgOut.tprint( 1, "Reading QA records\n" );
    rval = read_qa( *file_set );
  }
  
  
  dbgOut.tprint( 1, "Cleaning up\n" );
  ErrorCode rval2 = clean_up_read( opts );
  if (rval == MB_SUCCESS && rval2 != MB_SUCCESS)
    rval = rval2;
  
  if (MB_SUCCESS == rval)
    dbgOut.tprint(1, "Read finished.\n");
  else {
    std::string msg;
    iFace->get_last_error(msg);
    dbgOut.tprintf(1,"READ FAILED (ERROR CODE %s): %s\n", ErrorCodeStr[rval], msg.c_str());
  }
  
  if (H5P_DEFAULT != collIO)
    H5Pclose( collIO );
  if (H5P_DEFAULT != indepIO)
    H5Pclose( indepIO );
  collIO = indepIO = H5P_DEFAULT;
  
  return rval;
}
ErrorCode moab::ReadHDF5::load_file_impl ( const FileOptions opts) [protected]

Definition at line 635 of file ReadHDF5.cpp.

{
  ErrorCode rval;
  mhdf_Status status;
  std::string tagname;
  int i;

  CHECK_OPEN_HANDLES;

  dbgOut.tprint(1, "Reading all nodes...\n");
  Range ids;
  if (fileInfo->nodes.count) {
    ids.insert( fileInfo->nodes.start_id,
                fileInfo->nodes.start_id + fileInfo->nodes.count - 1);
    rval = read_nodes( ids );
    if (MB_SUCCESS != rval)
      return error(rval);
  }


  dbgOut.tprint(1, "Reading all element connectivity...\n");
  std::vector<int> polyhedra; // need to do these last so that faces are loaded
  for (i = 0; i < fileInfo->num_elem_desc; ++i) {
    if (CN::EntityTypeFromName(fileInfo->elems[i].type) == MBPOLYHEDRON) {
      polyhedra.push_back(i);
      continue;
    }
    
    rval = read_elems( i );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  for (std::vector<int>::iterator it = polyhedra.begin();
       it != polyhedra.end(); ++it) {
    rval = read_elems( *it );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
  dbgOut.tprint(1, "Reading all sets...\n");
  ids.clear();
  if (fileInfo->sets.count) {
    ids.insert( fileInfo->sets.start_id,
                fileInfo->sets.start_id + fileInfo->sets.count - 1);
    rval = read_sets( ids );
    if (rval != MB_SUCCESS) {
      return error(rval);
    }
  }
  
  dbgOut.tprint(1, "Reading all adjacencies...\n");
  for (i = 0; i < fileInfo->num_elem_desc; ++i) {
    if (!fileInfo->elems[i].have_adj)
      continue;
    
    long table_len;
    hid_t table = mhdf_openAdjacency( filePtr, 
                                      fileInfo->elems[i].handle,
                                      &table_len,
                                      &status );
    if (is_error(status))
      return error(MB_FAILURE);
      
    rval = read_adjacencies( table, table_len );
    mhdf_closeData( filePtr, table, &status );
    if (MB_SUCCESS != rval)
      return error(rval);
    if (is_error(status))
      return error(MB_FAILURE);
  }

  dbgOut.tprint(1, "Reading all tags...\n");
  for (i = 0; i < fileInfo->num_tag_desc; ++i) {
    rval = read_tag( i );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
  dbgOut.tprint(1, "Core read finished.  Cleaning up...\n");
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::load_file_partial ( const ReaderIface::IDTag subset_list,
int  subset_list_length,
int  num_parts,
int  part_number,
const FileOptions opts 
) [protected]

Definition at line 815 of file ReadHDF5.cpp.

{
  mhdf_Status status;

  static MPEState mpe_event( "ReadHDF5", "yellow" );
 
  mpe_event.start( "gather parts" );

  CHECK_OPEN_HANDLES;
  
  for (int i = 0; i < subset_list_length; ++i) {
    dbgOut.printf( 2, "Select by \"%s\" with num_tag_values = %d\n",
                   subset_list[i].tag_name, subset_list[i].num_tag_values );
    if (subset_list[i].num_tag_values) {
      assert(0 != subset_list[i].tag_values);
      dbgOut.printf( 2, "  \"%s\" values = { %d",
        subset_list[i].tag_name, subset_list[i].tag_values[0] );
      for (int j = 1; j < subset_list[i].num_tag_values; ++j)
        dbgOut.printf( 2, ", %d", subset_list[i].tag_values[j] );
      dbgOut.printf(2," }\n");
    }
  }
  if (num_parts) 
    dbgOut.printf( 2, "Partition with num_parts = %d and part_number = %d\n", 
                   num_parts, part_number );
  
  dbgOut.tprint( 1, "RETRIEVING TAGGED ENTITIES\n" );
    
  Range file_ids;
  ErrorCode rval = get_subset_ids( subset_list, subset_list_length, file_ids );
  if (MB_SUCCESS != rval)
    return error(rval);
  
  if (num_parts) {
    rval = get_partition( file_ids, num_parts, part_number );
    if (MB_SUCCESS != rval)
      return error(rval);
  }

  dbgOut.print_ints( 4, "Set file IDs for partial read: ", file_ids );
  mpe_event.end();
  mpe_event.start( "gather related sets" );
  dbgOut.tprint( 1, "GATHERING ADDITIONAL ENTITIES\n" );
  
  enum RecusiveSetMode { RSM_NONE, RSM_SETS, RSM_CONTENTS };
  const char* const set_opts[] = { "NONE", "SETS", "CONTENTS", NULL };
  int child_mode;
  rval = opts.match_option( "CHILDREN", set_opts, child_mode );
  if (MB_ENTITY_NOT_FOUND == rval)
    child_mode = RSM_CONTENTS;
  else if (MB_SUCCESS != rval) {
    readUtil->report_error( "Invalid value for 'CHILDREN' option" );
    return error(rval);
  }
  int content_mode;
  rval = opts.match_option( "SETS", set_opts, content_mode );
  if (MB_ENTITY_NOT_FOUND == rval)
    content_mode = RSM_CONTENTS;
  else if (MB_SUCCESS != rval) {
    readUtil->report_error( "Invalid value for 'SETS' option" );
    return error(rval);
  }
  
    // If we want the contents of contained/child sets, 
    // search for them now (before gathering the non-set contents
    // of the sets.)
  Range sets;
  intersect( fileInfo->sets, file_ids, sets );
  if (content_mode == RSM_CONTENTS || child_mode == RSM_CONTENTS) {
    dbgOut.tprint( 1, "  doing read_set_ids_recursive\n" );
    rval = read_set_ids_recursive( sets, content_mode == RSM_CONTENTS, child_mode == RSM_CONTENTS );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
  debug_barrier();
  
    // get elements and vertices contained in sets
  dbgOut.tprint( 1, "  doing get_set_contents\n" );
  rval = get_set_contents( sets, file_ids );
  if (MB_SUCCESS != rval)
    return error(rval);

  dbgOut.print_ints( 5, "File IDs for partial read: ", file_ids );
  debug_barrier();
  mpe_event.end();
  dbgOut.tprint( 1, "GATHERING NODE IDS\n" );
  
    // Figure out the maximum dimension of entity to be read
  int max_dim = 0;
  for (int i = 0; i < fileInfo->num_elem_desc; ++i) {
    EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
    if (type <= MBVERTEX || type >= MBENTITYSET) {
      assert( false ); // for debug code die for unknown element tyoes
      continue; // for release code, skip unknown element types
    }
    int dim = CN::Dimension(type);
    if (dim > max_dim) {
      Range subset;
      intersect( fileInfo->elems[i].desc, file_ids, subset );
      if (!subset.empty())
        max_dim = dim;
    }
  }
#ifdef USE_MPI
  if (nativeParallel) {
    int send = max_dim;
    MPI_Allreduce( &send, &max_dim, 1, MPI_INT, MPI_MAX, *mpiComm );
  }
#endif
  
    // if input contained any polyhedra, then need to get faces
    // of the polyhedra before the next loop because we need to 
    // read said faces in that loop.
  for (int i = 0; i < fileInfo->num_elem_desc; ++i) {
    EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
    if (type != MBPOLYHEDRON)
      continue;
    
    debug_barrier();
    dbgOut.print( 2, "    Getting polyhedra faces\n" );
    mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
    
    Range polyhedra;
    intersect( fileInfo->elems[i].desc, file_ids, polyhedra );
    rval = read_elems( i, polyhedra, &file_ids );
    mpe_event.end(rval);
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
    // get node file ids for all elements
  Range nodes;
  intersect( fileInfo->nodes, file_ids, nodes );
  for (int i = 0; i < fileInfo->num_elem_desc; ++i) {
    EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
    if (type <= MBVERTEX || type >= MBENTITYSET) {
      assert( false ); // for debug code die for unknown element tyoes
      continue; // for release code, skip unknown element types
    }
    if (MBPOLYHEDRON == type)
      continue;
      
    debug_barrier();
    dbgOut.printf( 2, "    Getting element node IDs for: %s\n", fileInfo->elems[i].handle );
    
    Range subset;
    intersect( fileInfo->elems[i].desc, file_ids, subset );
    mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
    
      // If dimension is max_dim, then we can create the elements now
      // so we don't have to read the table again later (connectivity 
      // will be fixed up after nodes are created when update_connectivity())
      // is called.  For elements of a smaller dimension, we just build
      // the node ID range now because a) we'll have to read the whole 
      // connectivity table again later, and b) we don't want to worry
      // about accidentally creating multiple copies of the same element.
    if (CN::Dimension(type) == max_dim)
      rval = read_elems( i, subset, &nodes );
    else
      rval = read_elems( i, subset, nodes );
    mpe_event.end(rval);
    if (MB_SUCCESS != rval)
      return error(rval);
  }
    
  debug_barrier();
  mpe_event.start( "read coords" );
  dbgOut.tprintf( 1, "READING NODE COORDINATES (%lu nodes in %lu selects)\n", 
                     (unsigned long)nodes.size(), (unsigned long)nodes.psize() );
  
    // Read node coordinates and create vertices in MOAB
    // NOTE:  This populates the RangeMap with node file ids,
    //        which is expected by read_node_adj_elems.
  rval = read_nodes( nodes );
  mpe_event.end(rval);
  if (MB_SUCCESS != rval)
    return error(rval);
 
  debug_barrier();
  dbgOut.tprint( 1, "READING ELEMENTS\n" );
 
    // decide if we need to read additional elements
  enum SideMode { SM_EXPLICIT, SM_NODES, SM_SIDES };
  int side_mode;
  const char* const options[] = { "EXPLICIT", "NODES", "SIDES", 0 };
  rval = opts.match_option( "ELEMENTS", options, side_mode );
  if (MB_ENTITY_NOT_FOUND == rval) {
      // If only nodes were specified, then default to "NODES", otherwise
      // default to "SIDES".
    if (0 == max_dim)
      side_mode = SM_NODES;
    else
      side_mode = SM_SIDES;
  }
  else if (MB_SUCCESS != rval) {
    readUtil->report_error( "Invalid value for 'ELEMENTS' option" );
    return error(rval);
  }
  
  if (side_mode == SM_SIDES /*ELEMENTS=SIDES*/ && max_dim == 0 /*node-based*/) {
      // Read elements until we find something.  Once we find someting,
      // read only elements of the same dimension.  NOTE: loop termination
      // criterion changes on both sides (max_dim can be changed in loop
      // body).
    for (int dim = 3; dim >= max_dim; --dim) {
      for (int i = 0; i < fileInfo->num_elem_desc; ++i) {
        EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
        if (CN::Dimension(type) == dim) {
          debug_barrier();
          dbgOut.tprintf( 2, "    Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
          mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
          Range ents;
          rval = read_node_adj_elems( fileInfo->elems[i] );
          mpe_event.end(rval);
          if (MB_SUCCESS != rval)
            return error(rval);
          if (!ents.empty())
            max_dim = 3;
        }
      }
    }
  }

  Range side_entities;
  if (side_mode != SM_EXPLICIT /*ELEMENTS=NODES || ELEMENTS=SIDES*/) {
    if (0 == max_dim)
      max_dim = 4;
      // now read any additional elements for which we've already read all
      // of the nodes.
    for (int dim = max_dim - 1; dim > 0; --dim) {
      for (int i = 0; i < fileInfo->num_elem_desc; ++i) {
        EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
        if (CN::Dimension(type) == dim) {
          debug_barrier();
          dbgOut.tprintf( 2, "    Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
          mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
          rval = read_node_adj_elems( fileInfo->elems[i], &side_entities );
          mpe_event.end(rval);
          if (MB_SUCCESS != rval)
            return error(rval);
        }
      }
    }
  }

    // We need to do this here for polyhedra to be handled correctly.
    // We have to wait until the faces are read in the above code block,
    // but need to create the connectivity before doing update_connectivity, 
    // which might otherwise delete polyhedra faces.
  debug_barrier();
  dbgOut.tprint( 1, "UPDATING CONNECTIVITY ARRAYS FOR READ ELEMENTS\n" );
  mpe_event.start( "updating connectivity for elements read before vertices");
  rval = update_connectivity();
  mpe_event.end();
  if (MB_SUCCESS != rval)
    return error(rval);

  
  dbgOut.tprint( 1, "READING ADJACENCIES\n" );
  for (int i = 0; i < fileInfo->num_elem_desc; ++i) {
    if (fileInfo->elems[i].have_adj &&
        idMap.intersects( fileInfo->elems[i].desc.start_id, fileInfo->elems[i].desc.count )) {
      mpe_event.start( "reading adjacencies for ", fileInfo->elems[i].handle );
      long len;
      hid_t th = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &len, &status );
      if (is_error(status))
        return error(MB_FAILURE);

      rval = read_adjacencies( th, len );
      mhdf_closeData( filePtr, th, &status );
      mpe_event.end(rval);
      if (MB_SUCCESS != rval)
        return error(rval);
    }
  }
  
    // If doing ELEMENTS=SIDES then we need to delete any entities
    // that we read that aren't actually sides (e.g. an interior face
    // that connects two disjoint portions of the part).  Both
    // update_connectivity and reading of any explicit adjacencies must
    // happen before this.
  if (side_mode == SM_SIDES) {
    debug_barrier();
    mpe_event.start( "cleaning up non-side lower-dim elements" );
    dbgOut.tprint( 1, "CHECKING FOR AND DELETING NON-SIDE ELEMENTS\n" );
    rval = delete_non_side_elements( side_entities );
    mpe_event.end(rval);
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
  debug_barrier();
  dbgOut.tprint( 1, "READING SETS\n" );
    
    // If reading contained/child sets but not their contents then find
    // them now. If we were also reading their contents we would
    // have found them already.
  if (content_mode == RSM_SETS || child_mode == RSM_SETS) {
    dbgOut.tprint( 1, "  doing read_set_ids_recursive\n" );
    mpe_event.start( "finding recursively contained sets" );
    rval = read_set_ids_recursive( sets, content_mode == RSM_SETS, child_mode == RSM_SETS );
    mpe_event.end(rval);
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
  dbgOut.tprint( 1, "  doing find_sets_containing\n" );
  mpe_event.start( "finding sets containing any read entities" );

    // decide whether to read set-containing parents
  bool read_set_containing_parents = true;
  std::string tmp_opt;
  rval = opts.get_option( "NO_SET_CONTAINING_PARENTS", tmp_opt );
  if (MB_SUCCESS == rval)
    read_set_containing_parents = false;
  
    // Append file IDs of sets containing any of the nodes or elements
    // we've read up to this point.
  rval = find_sets_containing( sets, read_set_containing_parents );
  mpe_event.end(rval);
  if (MB_SUCCESS != rval)
    return error(rval);
    // Now actually read all set data and instantiate sets in MOAB.
    // Get any contained sets out of file_ids.
  mpe_event.start( "reading set contents/parents/children" );
  EntityHandle first_set = fileInfo->sets.start_id;
  sets.merge( file_ids.lower_bound( first_set ),
              file_ids.lower_bound( first_set + fileInfo->sets.count ) );
  dbgOut.tprint( 1, "  doing read_sets\n" );
  rval = read_sets( sets );
  mpe_event.end(rval);
  if (MB_SUCCESS != rval)
    return error(rval);
  
  dbgOut.tprint( 1, "READING TAGS\n" );
  
  for (int i = 0; i < fileInfo->num_tag_desc; ++i) {
    mpe_event.start( "reading tag: ", fileInfo->tags[i].name );
    rval = read_tag( i );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
  dbgOut.tprint( 1, "PARTIAL READ COMPLETE.\n" );
  
  return MB_SUCCESS;
}
Interface* moab::ReadHDF5::moab ( ) const [inline]

Definition at line 77 of file ReadHDF5.hpp.

    { return iFace; }
ErrorCode moab::ReadHDF5::read_adjacencies ( hid_t  adjacency_table,
long  table_length 
) [private]

Definition at line 2606 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  ErrorCode rval;
  mhdf_Status status;

  debug_barrier();

  hid_t read_type = H5Dget_type( table );
  if (read_type < 0) 
    return error(MB_FAILURE);
  const bool convert = !H5Tequal( read_type, handleType );
  
  EntityHandle* buffer = (EntityHandle*)dataBuffer;
  size_t chunk_size = bufferSize / H5Tget_size(read_type);
  size_t remaining = table_len;
  size_t left_over = 0;
  size_t offset = 0;
  dbgOut.printf( 3, "Reading adjacency list in %lu chunks\n",
    (unsigned long)(remaining + chunk_size - 1)/chunk_size );
  int nn = 0;
  while (remaining)
  {
    dbgOut.printf( 3, "Reading chunk %d of adjacency list\n", ++nn );
  
    size_t count = std::min( chunk_size, remaining );
    count -= left_over;
    remaining -= count;
    
    assert_range( buffer + left_over, count );
    mhdf_readAdjacencyWithOpt( table, offset, count, read_type, buffer + left_over,
                               collIO, &status );
    if (is_error(status))
      return error(MB_FAILURE);
    
    if (convert) {
      herr_t err = H5Tconvert( read_type, handleType, count, buffer + left_over, 0, H5P_DEFAULT );
      if (err < 0)
        return error(MB_FAILURE);
    }
    
    EntityHandle* iter = buffer;
    EntityHandle* end = buffer + count + left_over;
    while (end - iter >= 3)
    {
      EntityHandle h = idMap.find( *iter++ );
      EntityHandle count2 = *iter++;
      if (!h) {
        iter += count2;
        continue;
      }

      if (count2 < 1)
        return error(MB_FAILURE);

      if (end < count2 + iter)
      {
        iter -= 2;
        break;
      }
      
      size_t valid;
      convert_id_to_handle( iter, count2, valid, idMap );
      rval = iFace->add_adjacencies( h, iter, valid, false );
      if (MB_SUCCESS != rval)
        return error(rval);
     
      iter += count2;
    }
    
    left_over = end - iter;
    assert_range( (char*)buffer, left_over );
    assert_range( (char*)iter, left_over );
    memmove( buffer, iter, left_over );
  }
  
  assert(!left_over);  // unexpected truncation of data
  
  return MB_SUCCESS;  
}

Definition at line 1998 of file ReadHDF5.cpp.

{
  CHECK_OPEN_HANDLES;

  assert(!setMeta);
  const long num_sets = fileInfo->sets.count;
  if (!num_sets)
    return MB_SUCCESS;
  
  mhdf_Status status;
  hid_t handle = mhdf_openSetMetaSimple( filePtr, &status );
  if (is_error(status)) {
    return error(MB_FAILURE);
  }
  
    // Allocate extra space if we need it for data conversion
  hid_t meta_type = H5Dget_type( handle );
  size_t size = H5Tget_size( meta_type );
  if (size > sizeof(long)) 
    setMeta = new long[(num_sets * size + (sizeof(long)-1)) / sizeof(long)][4];
   else
    setMeta = new long[num_sets][4];

    // set some parameters based on whether or not each proc reads the
    // table or only the root reads it and bcasts it to the others
  int rank = 0;
  bool bcast = false;
  hid_t ioprop = H5P_DEFAULT;
#ifdef USE_MPI
  MPI_Comm comm = 0;
  if (nativeParallel) {
    rank = myPcomm->proc_config().proc_rank();
    comm = myPcomm->proc_config().proc_comm();
    bcast = bcastDuplicateReads;
    if (!bcast)
      ioprop = collIO;
  }
#endif

  if (!bcast || 0 == rank) {
    mhdf_readSetMetaWithOpt( handle, 0, num_sets, meta_type, setMeta, ioprop, &status );
    if (is_error(status)) {
      H5Tclose( meta_type );
      mhdf_closeData( filePtr, handle, &status );
      return error(MB_FAILURE);
    }
  
     H5Tconvert( meta_type, H5T_NATIVE_LONG, num_sets*4, setMeta, 0, H5P_DEFAULT );
  }
  mhdf_closeData( filePtr, handle, &status );
  if (is_error(status))
    return error(MB_FAILURE);
  H5Tclose( meta_type );
  
  if (bcast) {
#ifdef USE_MPI
    int ierr = MPI_Bcast( setMeta, num_sets*4, MPI_LONG, 0, comm );
    if (MPI_SUCCESS != ierr)
      return error(MB_FAILURE);
#else
    assert(rank == 0); // if not MPI, then only one proc
#endif
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_dense_tag ( Tag  tag_handle,
const char *  ent_name,
hid_t  hdf_read_type,
hid_t  data_table,
long  start_id,
long  count 
) [private]

Read dense tag for all entities.

Definition at line 3008 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  ErrorCode rval;
  DataType mb_type;
  
  rval = iFace->tag_get_data_type( tag_handle, mb_type );
  if (MB_SUCCESS != rval) 
    return error(rval);

  
  int read_size;
  rval = iFace->tag_get_bytes( tag_handle, read_size );
  if (MB_SUCCESS != rval) // wrong function for variable-length tags
    return error(rval);
  //if (MB_TYPE_BIT == mb_type) 
  //  read_size = (read_size + 7)/8; // convert bits to bytes, plus 7 for ceiling
    
  if (hdf_read_type) { // if not opaque
    hsize_t hdf_size = H5Tget_size( hdf_read_type );
    if (hdf_size != (hsize_t)read_size) 
      return error(MB_FAILURE);
  }
  
    // get actual entities read from file
  Range file_ids, handles;
  Range::iterator f_ins = file_ids.begin(), h_ins = handles.begin();
  IDMap::iterator l, u;
  l = idMap.lower_bound( start_id );
  u = idMap.lower_bound( start_id + num_values - 1 );
  if (l != idMap.end() && start_id + num_values > l->begin) {
    if (l == u) {
      size_t beg = std::max(start_id, l->begin);
      size_t end = std::min(start_id + num_values, u->begin + u->count) - 1;
      f_ins = file_ids.insert( f_ins, beg, end );
      h_ins = handles.insert( h_ins, l->value + (beg - l->begin),
                                     l->value + (end - l->begin) );
    }
    else {
      size_t beg = std::max(start_id, l->begin);
      f_ins = file_ids.insert( f_ins, beg, l->begin + l->count - 1 );
      h_ins = handles.insert( h_ins, l->value + (beg - l->begin), l->value + l->count - 1 );
      for (++l; l != u; ++l) {
        f_ins = file_ids.insert( f_ins, l->begin, l->begin + l->count - 1 );
        h_ins = handles.insert( h_ins, l->value, l->value + l->count - 1 );
      }
      if (u != idMap.end() && u->begin < start_id + num_values) {
        size_t end = std::min( start_id + num_values, u->begin + u->count - 1 );
        f_ins = file_ids.insert( f_ins, u->begin, end );
        h_ins = handles.insert( h_ins, u->value, u->value + end - u->begin );
      }
    }
  }
  
    // Given that all of the entities for this dense tag data should
    // have been created as a single contiguous block, the resulting
    // MOAB handle range should be contiguous. 
    // THE ABOVE IS NOT NECESSARILY TRUE.  SOMETIMES LOWER-DIMENSION
    // ENTS ARE READ AND THEN DELETED FOR PARTIAL READS.
  //assert( handles.empty() || handles.size() == (handles.back() - handles.front() + 1));
  
  std::string tn("<error>");
  iFace->tag_get_name( tag_handle, tn );
  tn += " data for ";
  tn += ent_name;
  try {
    h_ins = handles.begin();
    ReadHDF5Dataset reader( tn.c_str(), data, nativeParallel, mpiComm, false );
    long buffer_size = bufferSize / read_size;
    reader.set_file_ids( file_ids, start_id, buffer_size, hdf_read_type );
    dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n",
                       tn.c_str(), ent_name, reader.get_read_count() );
    int nn = 0;
    while (!reader.done()) {
      dbgOut.printf( 3, "Reading chunk %d of \"%s\" data\n", ++nn, tn.c_str() );
    
      size_t count;
      reader.read( dataBuffer, count );

      if (MB_TYPE_HANDLE == mb_type) {
        rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * read_size / sizeof(EntityHandle) );
        if (MB_SUCCESS != rval)
          return error(rval);
      }

      Range ents;
      Range::iterator end = h_ins;
      end += count;
      ents.insert( h_ins, end );
      h_ins = end;

      rval = iFace->tag_set_data( tag_handle, ents, dataBuffer );
      if (MB_SUCCESS != rval) {
        dbgOut.printf(1,"Internal error setting data for tag \"%s\"\n", tn.c_str());
        return error(rval);
      }
    }
  }
  catch (ReadHDF5Dataset::Exception) {
    dbgOut.printf(1,"Internal error reading dense data for tag \"%s\"\n",tn.c_str());
    return error(MB_FAILURE);
  }
    
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_elems ( int  index) [private]

Definition at line 1500 of file ReadHDF5.cpp.

{
  Range ids;
  ids.insert( fileInfo->elems[i].desc.start_id,
              fileInfo->elems[i].desc.start_id + fileInfo->elems[i].desc.count - 1);
  return read_elems( i, ids );
}
ErrorCode moab::ReadHDF5::read_elems ( int  index,
const Range file_ids,
Range node_ids = 0 
) [private]

Definition at line 1508 of file ReadHDF5.cpp.

{
  if (fileInfo->elems[i].desc.vals_per_ent < 0) {
    if (node_ids != 0) // not implemented for version 3 format of poly data
      return error(MB_TYPE_OUT_OF_RANGE);
    return read_poly( fileInfo->elems[i], file_ids );
  }
  else
    return read_elems( fileInfo->elems[i], file_ids, node_ids );
}
ErrorCode moab::ReadHDF5::read_elems ( const mhdf_ElemDesc elems,
const Range file_ids,
Range node_ids = 0 
) [private]

Read element connectivity.

Parameters:
node_idsIf this is non-null, the union of the connectivity list for all elements is passed back as FILE IDS in this range AND the connectivity list is left in the form of file IDs (NOT NODE HANDLES).

Definition at line 1519 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  debug_barrier();
  dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", 
                     elems.handle, (unsigned long)file_ids.size(), (unsigned long)file_ids.psize() );

  ErrorCode rval = MB_SUCCESS;
  mhdf_Status status;
  
  EntityType type = CN::EntityTypeFromName( elems.type );
  if (type == MBMAXTYPE)
  {
    readUtil->report_error( "Unknown element type: \"%s\".\n", elems.type );
    return error(MB_FAILURE);
  }
  
  const int nodes_per_elem = elems.desc.vals_per_ent;
  const size_t count = file_ids.size();
  hid_t data_id = mhdf_openConnectivitySimple( filePtr, elems.handle, &status );
  if (is_error(status))
    return error(MB_FAILURE);

  EntityHandle handle;
  EntityHandle* array = 0;
  if (count>0)
    rval = readUtil->get_element_connect( count, nodes_per_elem, type,
                                        0, handle, array );
  if (MB_SUCCESS != rval)
    return error(rval);
  
  try {
    EntityHandle* buffer = reinterpret_cast<EntityHandle*>(dataBuffer);
    const size_t buffer_size = bufferSize/(sizeof(EntityHandle)*nodes_per_elem);
    ReadHDF5Dataset reader( elems.handle, data_id, nativeParallel, mpiComm );
    reader.set_file_ids( file_ids, elems.desc.start_id, buffer_size, handleType ); 
    dbgOut.printf( 3, "Reading connectivity in %lu chunks for element group \"%s\"\n",
      reader.get_read_count(), elems.handle );
    EntityHandle* iter = array;
    int nn = 0;
    while (!reader.done()) {
      dbgOut.printf( 3, "Reading chunk %d for \"%s\"\n", ++nn, elems.handle );
    
      size_t num_read;
      reader.read( buffer, num_read );
      iter = std::copy( buffer, buffer+num_read*nodes_per_elem, iter );
      
      if (node_ids) {
        std::sort( buffer, buffer + num_read*nodes_per_elem );
        num_read = std::unique( buffer, buffer + num_read*nodes_per_elem ) - buffer;
        copy_sorted_file_ids( buffer, num_read, *node_ids );
      }
    }
    assert(iter - array == (ptrdiff_t)count * nodes_per_elem);
  }
  catch (ReadHDF5Dataset::Exception) {
    return error(MB_FAILURE);
  }
  
  if (!node_ids) {
    rval = convert_id_to_handle( array, count*nodes_per_elem );
    if (MB_SUCCESS != rval)
      return error(rval);

    rval = readUtil->update_adjacencies( handle, count, nodes_per_elem, array );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  else {
    IDConnectivity t;
    t.handle = handle;
    t.count = count;
    t.nodes_per_elem = nodes_per_elem;
    t.array = array;
    idConnectivityList.push_back(t);
  }
  
  return insert_in_id_map( file_ids, handle );
}
ErrorCode moab::ReadHDF5::read_elems ( int  index,
const Range element_file_ids,
Range node_file_ids 
) [private]

Definition at line 1744 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  debug_barrier();
  dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", fileInfo->elems[i].handle, 
                     (unsigned long)elems_in.size(), (unsigned long)elems_in.psize() );

  EntityHandle* const buffer = reinterpret_cast<EntityHandle*>(dataBuffer);
  const int node_per_elem = fileInfo->elems[i].desc.vals_per_ent;
  const size_t buffer_size = bufferSize / (node_per_elem*sizeof(EntityHandle));
  
  if (elems_in.empty())
    return MB_SUCCESS;
    
  assert( (long)elems_in.front() >= fileInfo->elems[i].desc.start_id );
  assert( (long)elems_in.back() - fileInfo->elems[i].desc.start_id < fileInfo->elems[i].desc.count );
  
    // we don't support version 3 style poly element data
  if (fileInfo->elems[i].desc.vals_per_ent <= 0)
    return error(MB_TYPE_OUT_OF_RANGE);
  
  mhdf_Status status;
  hid_t table = mhdf_openConnectivitySimple( filePtr, fileInfo->elems[i].handle, &status );
  if (is_error(status))
    return error(MB_FAILURE);
  
  try {
    ReadHDF5Dataset reader( fileInfo->elems[i].handle, table, nativeParallel, mpiComm );
    reader.set_file_ids( elems_in, fileInfo->elems[i].desc.start_id, 
                         buffer_size, handleType );
    dbgOut.printf( 3, "Reading node list in %lu chunks for \"%s\"\n", reader.get_read_count(), fileInfo->elems[i].handle );
    int nn = 0;
    while (!reader.done()) {
      dbgOut.printf( 3, "Reading chunk %d of \"%s\" connectivity\n", ++nn, fileInfo->elems[i].handle );
      size_t num_read;
      reader.read( buffer, num_read );
      std::sort( buffer, buffer + num_read*node_per_elem );
      num_read = std::unique( buffer, buffer + num_read*node_per_elem ) - buffer;
      copy_sorted_file_ids( buffer, num_read, nodes );
    }
  } 
  catch (ReadHDF5Dataset::Exception) {
    return error(MB_FAILURE);
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_node_adj_elems ( const mhdf_ElemDesc group,
Range read_entities = 0 
) [private]

Definition at line 1618 of file ReadHDF5.cpp.

{
  mhdf_Status status;
  ErrorCode rval;

  CHECK_OPEN_HANDLES;
  
  hid_t table = mhdf_openConnectivitySimple( filePtr, group.handle, &status );
  if (is_error(status))
    return error(MB_FAILURE);
    
  rval = read_node_adj_elems( group, table, handles_out );
  
  mhdf_closeData( filePtr, table, &status );
  if (MB_SUCCESS == rval && is_error(status))
    return error(rval = MB_FAILURE);
  return rval;
}
ErrorCode moab::ReadHDF5::read_node_adj_elems ( const mhdf_ElemDesc group,
hid_t  connectivity_handle,
Range read_entities = 0 
) [private]

Definition at line 1637 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  debug_barrier();

  mhdf_Status status;
  ErrorCode rval;
  IODebugTrack debug_track( debugTrack, std::string(group.handle) );

    // copy data to local variables (makes other code clearer)
  const int node_per_elem = group.desc.vals_per_ent;
  long start_id = group.desc.start_id;
  long remaining = group.desc.count;
  const EntityType type = CN::EntityTypeFromName( group.type );
  
    // figure out how many elements we can read in each pass
  long* const buffer = reinterpret_cast<long*>( dataBuffer );
  const long buffer_size = bufferSize / (node_per_elem * sizeof(buffer[0]));
    // read all element connectivity in buffer_size blocks
  long offset = 0;
  dbgOut.printf( 3, "Reading node-adjacent elements from \"%s\" in %ld chunks\n",
    group.handle, (remaining + buffer_size - 1) / buffer_size );
  int nn = 0;
  Range::iterator hint;
  if (handles_out)
    hint = handles_out->begin();
  while (remaining) {
    dbgOut.printf( 3, "Reading chunk %d of connectivity data for \"%s\"\n", ++nn, group.handle );
  
      // read a block of connectivity data
    const long count = std::min( remaining, buffer_size );
    debug_track.record_io( offset, count );
    assert_range( buffer, count*node_per_elem );
    mhdf_readConnectivityWithOpt( table_handle, offset, count, H5T_NATIVE_LONG, buffer, collIO, &status );
    if (is_error(status))
      return error(MB_FAILURE);
    offset += count;
    remaining -= count;
    
      // count the number of elements in the block that we want,
      // zero connectivity for other elements
    long num_elem = 0;
    long* iter = buffer;
    for (long i = 0; i < count; ++i) {
      for (int j = 0; j < node_per_elem; ++j) {
        iter[j] = (long)idMap.find( iter[j] );
        if (!iter[j]) {
          iter[0] = 0;
          break;
        }
      }
      if (iter[0])
        ++num_elem;
      iter += node_per_elem;
    }
    
    if (!num_elem) {
      start_id += count;
      continue;
    }
    
      // create elements
    EntityHandle handle;
    EntityHandle* array;
    rval = readUtil->get_element_connect( (int)num_elem,
                                         node_per_elem,
                                         type,
                                         0,
                                         handle, 
                                         array );
    if (MB_SUCCESS != rval)
      return error(rval);
   
      // copy all non-zero connectivity values
    iter = buffer;
    EntityHandle* iter2 = array;
    EntityHandle h = handle;
    for (long i = 0; i < count; ++i) {
      if (!*iter) {
        iter += node_per_elem;
        continue;
      }
      if (!idMap.insert( start_id + i, h++, 1 ).second) 
        return error(MB_FAILURE);
        
      long* const end = iter + node_per_elem;
      for (; iter != end; ++iter, ++iter2)
        *iter2 = (EntityHandle)*iter;
    }
    assert( iter2 - array == num_elem * node_per_elem );
    start_id += count;
    
    rval = readUtil->update_adjacencies( handle, num_elem, node_per_elem, array );
    if (MB_SUCCESS != rval) return error(rval);
    if (handles_out)
      hint = handles_out->insert( hint, handle, handle + num_elem - 1 );
   }
  
  debug_track.all_reduce();
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_nodes ( const Range node_file_ids) [private]

Definition at line 1398 of file ReadHDF5.cpp.

{
  ErrorCode rval;
  mhdf_Status status;
  const int dim = fileInfo->nodes.vals_per_ent;
  Range range;

  CHECK_OPEN_HANDLES;
  
  if (node_file_ids.empty())
    return MB_SUCCESS;
  
  int cdim;
  rval = iFace->get_dimension( cdim );
  if (MB_SUCCESS != rval)
    return error(rval);
  
  if (cdim < dim)
  {
    rval = iFace->set_dimension( dim );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
  hid_t data_id = mhdf_openNodeCoordsSimple( filePtr, &status );
  if (is_error(status))
    return error(MB_FAILURE);

  EntityHandle handle;
  std::vector<double*> arrays(dim);
  const size_t num_nodes = node_file_ids.size();
  rval = readUtil->get_node_coords( dim, (int)num_nodes, 0, handle, arrays );
  if (MB_SUCCESS != rval)
  {
    mhdf_closeData( filePtr, data_id, &status );
    return error(rval);
  }

  if (blockedCoordinateIO) {
    try {
      for (int d = 0; d < dim; ++d) {
        ReadHDF5Dataset reader( "blocked coords", data_id, nativeParallel, mpiComm, false );
        reader.set_column( d );
        reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, num_nodes, H5T_NATIVE_DOUBLE );
        dbgOut.printf( 3, "Reading %lu chunks for coordinate dimension %d\n", reader.get_read_count(), d );
        // should normally only have one read call, unless sparse nature
        // of file_ids caused reader to do something strange
        size_t count, offset = 0;
        int nn = 0;
        while (!reader.done()) {
          dbgOut.printf(3,"Reading chunk %d for dimension %d\n", ++nn, d );
          reader.read( arrays[d]+offset, count );
          offset += count;
        }
        if (offset != num_nodes) {
          mhdf_closeData( filePtr, data_id, &status );
          assert(false);
          return MB_FAILURE;
        }
      }
    }
    catch (ReadHDF5Dataset::Exception) {
      mhdf_closeData( filePtr, data_id, &status );
      return error(MB_FAILURE);
    }
  }
  else { // !blockedCoordinateIO
    double* buffer = (double*)dataBuffer;
    long chunk_size = bufferSize / (3*sizeof(double));
    long coffset = 0;
    int nn = 0;
    try {
      ReadHDF5Dataset reader( "interleaved coords", data_id, nativeParallel, mpiComm, false );
      reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, chunk_size, H5T_NATIVE_DOUBLE );
      dbgOut.printf( 3, "Reading %lu chunks for coordinate coordinates\n", reader.get_read_count() );
      while (!reader.done()) {
        dbgOut.tprintf(3,"Reading chunk %d of node coords\n", ++nn);

        size_t count;
        reader.read( buffer, count );

        for (size_t i = 0; i < count; ++i)
          for (int d = 0; d < dim; ++d) 
            arrays[d][coffset+i] = buffer[dim*i+d];
        coffset += count;
      }
    }
    catch (ReadHDF5Dataset::Exception) {
      mhdf_closeData( filePtr, data_id, &status );
      return error(MB_FAILURE);
    }
  }

  dbgOut.print(3,"Closing node coordinate table\n");
  mhdf_closeData( filePtr, data_id, &status );
  for (int d = dim; d < cdim; ++d)
    memset( arrays[d], 0, num_nodes*sizeof(double) );
    
  dbgOut.printf(3,"Updating ID to handle map for %lu nodes\n", (unsigned long)node_file_ids.size());
  return insert_in_id_map( node_file_ids, handle );
}
ErrorCode moab::ReadHDF5::read_poly ( const mhdf_ElemDesc elems,
const Range file_ids 
) [private]

Read poly(gons|hedra)

Definition at line 1795 of file ReadHDF5.cpp.

{
  class PolyReader : public ReadHDF5VarLen {
    private:
      const EntityType type;
      ReadHDF5* readHDF5;
    public:
    PolyReader( EntityType elem_type, void* buffer, size_t buffer_size,
                ReadHDF5* owner, DebugOutput& dbg )
               : ReadHDF5VarLen( dbg, buffer, buffer_size ),
                 type(elem_type), readHDF5(owner) 
               {}
    virtual ~PolyReader() {}
    ErrorCode store_data( EntityHandle file_id, void* data, long len, bool )
    {
      size_t valid;
      EntityHandle* conn = reinterpret_cast<EntityHandle*>(data);
      readHDF5->convert_id_to_handle( conn, len, valid );
      if (valid != (size_t)len)
        return error(MB_ENTITY_NOT_FOUND);
      EntityHandle handle;
      ErrorCode rval = readHDF5->moab()->create_element( type, conn, len, handle );
      if (MB_SUCCESS != rval)
        return error(rval);
      
      rval = readHDF5->insert_in_id_map( file_id, handle );
      return rval;
    }
  };

  CHECK_OPEN_HANDLES;

  debug_barrier();
  
  EntityType type = CN::EntityTypeFromName( elems.type );
  if (type == MBMAXTYPE)
  {
    readUtil->report_error( "Unknown element type: \"%s\".\n", elems.type );
    return error(MB_FAILURE);
  }
  
  hid_t handles[2];
  mhdf_Status status;
  long num_poly, num_conn, first_id;
  mhdf_openPolyConnectivity( filePtr, elems.handle, &num_poly, &num_conn, &first_id, 
                             handles, &status );
  if (is_error(status))
    return error(MB_FAILURE);

  std::string nm(elems.handle);
  ReadHDF5Dataset offset_reader( (nm + " offsets").c_str(), handles[0], nativeParallel, mpiComm, true );
  ReadHDF5Dataset connect_reader( (nm + " data").c_str(), handles[1], nativeParallel, mpiComm, true );
  
  PolyReader tool( type, dataBuffer, bufferSize, this, dbgOut );
  return tool.read( offset_reader, connect_reader, file_ids, first_id, handleType );
}

FIX ME - how to put QA list on set??

Definition at line 3507 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  mhdf_Status status;
  std::vector<std::string> qa_list;
  
  int qa_len;
  char** qa = mhdf_readHistory( filePtr, &qa_len, &status );
  if (mhdf_isError( &status ))
  {
    readUtil->report_error( "%s", mhdf_message( &status ) );
    return error(MB_FAILURE);
  }
  qa_list.resize(qa_len);
  for (int i = 0; i < qa_len; i++)
  {
    qa_list[i] = qa[i];
    free( qa[i] );
  }
  free( qa );
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_set_data ( const Range set_file_ids,
EntityHandle  set_start_handle,
ReadHDF5Dataset set_data_set,
SetMode  which_data,
Range file_ids_out = 0 
) [private]

Definition at line 2408 of file ReadHDF5.cpp.

{
  ErrorCode rval;
  Range::const_pair_iterator pi;
  Range::iterator out_hint;
  if (file_ids_out)
    out_hint = file_ids_out->begin();

    // Construct range of offsets into data table at which to read
    // Note: all offsets are incremented by TWEAK because Range cannot
    // store zeros.
  const long TWEAK = 1;
  Range data_offsets;
  Range::iterator hint = data_offsets.begin();
  pi = set_file_ids.const_pair_begin();
  if ((long)pi->first == fileInfo->sets.start_id) {
    long second = pi->second - fileInfo->sets.start_id;
    if  (setMeta[second][mode] >= 0)
      hint = data_offsets.insert( hint, TWEAK, setMeta[second][mode]+TWEAK );
    ++pi;
  }
  for ( ; pi != set_file_ids.const_pair_end(); ++pi) {
    long first = pi->first - fileInfo->sets.start_id;
    long second = pi->second - fileInfo->sets.start_id;
    long idx1 = setMeta[first-1][mode]+1;
    long idx2 = setMeta[second][mode];
    if (idx2 >= idx1)
      hint = data_offsets.insert( hint, idx1+TWEAK, idx2+TWEAK );
  }
  try { 
    data.set_file_ids( data_offsets, TWEAK, bufferSize/sizeof(EntityHandle), handleType );
  }
  catch (ReadHDF5Dataset::Exception ) {
    return MB_FAILURE;
  }
  
    // we need to increment this for each processed set because
    // the sets were created in the order of the ids in file_ids.
  EntityHandle h = start_handle;
  
  const long ranged_flag = (mode == CONTENT) ? mhdf_SET_RANGE_BIT : 0;
  
  std::vector<EntityHandle> partial; // for when we read only part of the contents of a set/entity
  Range::const_iterator fileid_iter = set_file_ids.begin();
  EntityHandle* buffer = reinterpret_cast<EntityHandle*>(dataBuffer);
  size_t count, offset;

  int nn = 0;  
  while (!data.done()) {
    dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data.get_debug_desc() );
    try { 
      data.read( buffer, count );
    }
    catch (ReadHDF5Dataset::Exception ) {
      return MB_FAILURE;
    }
    
    // assert not appropriate here - I might have treated all my file ids, but maybe
    // another proc hasn't; for me, count will be zero, so I won't do anything, but
    // I still need to go through the motions to make the read work
    
      // Handle 'special' case where we read some, but not all
      // of the data for an entity during the last iteration.
    offset = 0;
    if (!partial.empty()) { // didn't read all of previous entity
      assert( fileid_iter != set_file_ids.end() );
      size_t num_prev = partial.size();
      size_t idx = *fileid_iter - fileInfo->sets.start_id;
      size_t len = idx ? setMeta[idx][mode] - setMeta[idx-1][mode] : setMeta[idx][mode] + 1;
      offset = len - num_prev;
      if (offset > count) { // still don't have all
        partial.insert( partial.end(), buffer, buffer+count );
        continue;
      }
      
      partial.insert( partial.end(), buffer, buffer+offset );
      if (file_ids_out) {
        out_hint = copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag,
                         &partial[0], partial.size(), *file_ids_out);
      }
      else {
        switch (mode) {
          size_t valid;
          case CONTENT:
            if (setMeta[idx][3] & ranged_flag) {
              if (len % 2) 
                return error(MB_INDEX_OUT_OF_RANGE);
              Range range;
              convert_range_to_handle( &partial[0], len/2, range );
              rval = moab()->add_entities( h, range );
            }
            else {
              convert_id_to_handle( &partial[0], len, valid );
              rval = moab()->add_entities( h, &partial[0], valid );
            }
            break;
          case CHILD:
            convert_id_to_handle( &partial[0], len, valid );
            rval = moab()->add_child_meshsets( h, &partial[0], valid );
            break;
          case PARENT:
            convert_id_to_handle( &partial[0], len, valid );
            rval = moab()->add_parent_meshsets( h, &partial[0], valid );
            break;
        }
        if (MB_SUCCESS != rval)
          return error(rval);
      }

      ++fileid_iter;
      ++h;
      partial.clear();
    }
    
      // Process contents for all entities for which we 
      // have read the complete list
    while (offset < count) {
      assert( fileid_iter != set_file_ids.end() );
      size_t idx = *fileid_iter - fileInfo->sets.start_id;
      size_t len = idx ? setMeta[idx][mode] - setMeta[idx-1][mode] : setMeta[idx][mode] + 1;
        // If we did not read all of the final entity,
        // store what we did read to be processed in the
        // next iteration
      if (offset + len > count) {
        partial.insert( partial.end(), buffer + offset, buffer + count );
        break;
      }
      
      if (file_ids_out) {
        out_hint = copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag,
                         buffer + offset, len, *file_ids_out);
      }
      else {
        switch (mode) {
          size_t valid;
          case CONTENT:
            if (setMeta[idx][3] & ranged_flag) {
              if (len % 2) 
                return error(MB_INDEX_OUT_OF_RANGE);
              Range range;
              convert_range_to_handle( buffer+offset, len/2, range );
              rval = moab()->add_entities( h, range );
            }
            else {
              convert_id_to_handle( buffer+offset, len, valid );
              rval = moab()->add_entities( h, buffer+offset, valid );
            }
            break;
          case CHILD:
            convert_id_to_handle( buffer+offset, len, valid );
            rval = moab()->add_child_meshsets( h, buffer+offset, valid );
            break;
          case PARENT:
            convert_id_to_handle( buffer+offset, len, valid );
            rval = moab()->add_parent_meshsets( h, buffer+offset, valid );
            break;
        }
        if (MB_SUCCESS != rval)
          return error(rval);
      }

      ++fileid_iter;
      ++h;
      offset += len;
    }
  }

  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_set_ids_recursive ( Range sets_in_out,
bool  containted_sets,
bool  child_sets 
) [private]

Given a list of file IDs for entity sets, find all contained or child sets (at any depth) and append them to the Range of file IDs.

Definition at line 2066 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;
  mhdf_Status status;

  if (!fileInfo->have_set_children)
    child_sets = false;
  if (!fileInfo->have_set_contents)
    contained_sets = false;
  if (!child_sets && !contained_sets)
    return MB_SUCCESS;

    // open data tables
  if (fileInfo->sets.count == 0) {
    assert( sets_in_out.empty() );
    return MB_SUCCESS;
  }
  
  if (!contained_sets && !child_sets)
    return MB_SUCCESS;
  
  ReadHDF5Dataset cont( "set contents", false, mpiComm );
  ReadHDF5Dataset child( "set children", false, mpiComm );
  
  if (contained_sets) {
    long content_len = 0;
    hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
    if (is_error(status))
       return error(MB_FAILURE);
    try {
      cont.init( content_handle, true );
    }
    catch ( ReadHDF5Dataset::Exception ) {
      return error(MB_FAILURE);
    }
  }
  
  if (child_sets) {
    long child_len = 0;
    hid_t child_handle = mhdf_openSetChildren( filePtr, &child_len, &status );
    if (is_error(status))
      return error(MB_FAILURE);
    try {
      child.init( child_handle, true );
    }
    catch ( ReadHDF5Dataset::Exception ) {
      return error(MB_FAILURE);
    }
  }
  
  ErrorCode rval = MB_SUCCESS;
  Range children, new_children(sets_in_out);
  int iteration_count = 0;
  do {
    ++iteration_count;
    dbgOut.tprintf(2,"Iteration %d of read_set_ids_recursive\n",iteration_count);
    children.clear();
    if (child_sets) {
      rval = read_set_data( new_children, 0, child, CHILD, &children );
      if (MB_SUCCESS != rval)
        break;
    }
    if (contained_sets) {
      rval = read_set_data( new_children, 0, cont, CONTENT, &children );
        // remove any non-set values
      Range::iterator it = children.lower_bound( fileInfo->sets.start_id );
      children.erase( children.begin(), it );
      it = children.lower_bound( fileInfo->sets.start_id + fileInfo->sets.count );
      children.erase( it, children.end() );
      if (MB_SUCCESS != rval)
        break;
    }
    new_children = subtract( children,  sets_in_out );
    dbgOut.print_ints( 2, "Adding additional contained/child sets", new_children );
    sets_in_out.merge( new_children );
  } while (!new_children.empty());
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_sets ( const Range set_file_ids) [private]

Read sets.

Definition at line 1929 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  debug_barrier();

  mhdf_Status status;
  ErrorCode rval;

  const size_t num_sets = fileInfo->sets.count;
  if (!num_sets) // If no sets at all!
    return MB_SUCCESS;

    // create sets 
  std::vector<unsigned> flags(file_ids.size());
  Range::iterator si = file_ids.begin();
  for (size_t i = 0; i < flags.size(); ++i, ++si) 
    flags[i] = setMeta[*si - fileInfo->sets.start_id][3] & ~(long)mhdf_SET_RANGE_BIT;
  EntityHandle start_handle;
  rval = readUtil->create_entity_sets( flags.size(), &flags[0], 0, start_handle );
  if (MB_SUCCESS != rval)
    return error(rval);
  rval = insert_in_id_map( file_ids, start_handle );
  if (MB_SUCCESS != rval)
    return error(rval);
    
    // read contents
  if (fileInfo->have_set_contents) {
    long len = 0;
    hid_t handle = mhdf_openSetData( filePtr, &len, &status );
    if (is_error(status))
      return error(MB_FAILURE);

    ReadHDF5Dataset dat( "set contents", handle, nativeParallel, mpiComm, true );
    rval = read_set_data( file_ids, start_handle, dat, CONTENT );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
    // read set child lists
  if (fileInfo->have_set_children) {
    long len = 0;
    hid_t handle = mhdf_openSetChildren( filePtr, &len, &status );
    if (is_error(status))
      return error(MB_FAILURE);
    
    ReadHDF5Dataset dat( "set children", handle, nativeParallel, mpiComm, true );
    rval = read_set_data( file_ids, start_handle, dat, CHILD );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  
    // read set parent lists
  if (fileInfo->have_set_parents) {
    long len = 0;
    hid_t handle = mhdf_openSetParents( filePtr, &len, &status );
    if (is_error(status))
      return error(MB_FAILURE);
    
    ReadHDF5Dataset dat( "set parents", handle, nativeParallel, mpiComm, true );
    rval = read_set_data( file_ids, start_handle, dat, PARENT );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
    
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_sets_partial ( const Range sets_in) [private]

Read sets from file into MOAB for partial read of file.

Given the file IDs for entity sets (sets_in) and elements and nodes (id_map), read in all sets containing any of the elements or nodes and all sets that are (recursively) children of any other set to be read (those in sets_in or those containging any already-read element or node.)

Parameters:
sets_inFile IDs for sets to read (unconditionally)
ErrorCode moab::ReadHDF5::read_sparse_tag ( Tag  tag_handle,
hid_t  hdf_read_type,
hid_t  ent_table,
hid_t  val_table,
long  num_entities 
) [private]

Read sparse tag for all entities.

Definition at line 3200 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

    // Read entire ID table and for those file IDs corresponding
    // to entities that we have read from the file add both the
    // offset into the offset range and the handle into the handle 
    // range.  If handles are not ordered, switch to using a vector.
  const EntityHandle base_offset = 1; // can't put zero in a Range
  std::vector<EntityHandle> handle_vect;
  Range handle_range, offset_range;
  std::string tn("<error>");
  iFace->tag_get_name( tag_handle, tn );
  ErrorCode rval = read_sparse_tag_indices( tn.c_str(),
                                            id_table, base_offset,
                                            offset_range, handle_range,
                                            handle_vect );
  
  DataType mbtype;
  rval = iFace->tag_get_data_type( tag_handle, mbtype );
  if (MB_SUCCESS != rval) 
    return error(rval);
  
  int read_size;
  rval = iFace->tag_get_bytes( tag_handle, read_size );
  if (MB_SUCCESS != rval) // wrong function for variable-length tags
    return error(rval);
  //if (MB_TYPE_BIT == mbtype) 
  //  read_size = (read_size + 7)/8; // convert bits to bytes, plus 7 for ceiling
    
  if (hdf_read_type) { // if not opaque
    hsize_t hdf_size = H5Tget_size( hdf_read_type );
    if (hdf_size != (hsize_t)read_size) 
      return error(MB_FAILURE);
  }

  const int handles_per_tag = read_size/sizeof(EntityHandle);

    // Now read data values
  size_t chunk_size = bufferSize / read_size;
  try {
    ReadHDF5Dataset val_reader( (tn + " values").c_str(), value_table, nativeParallel, mpiComm, false );
    val_reader.set_file_ids( offset_range, base_offset, chunk_size, hdf_read_type );
    dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tn.c_str(), val_reader.get_read_count() );
    int nn = 0;
    size_t offset = 0;
    while (!val_reader.done()) {
      dbgOut.printf( 3, "Reading chunk %d of \"%s\" values\n", ++nn, tn.c_str() );
      size_t count;
      val_reader.read( dataBuffer, count );
      if (MB_TYPE_HANDLE == mbtype) {
        rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count*handles_per_tag );
        if (MB_SUCCESS != rval)
          return error(rval);
      }
    
      if (!handle_vect.empty()) {
        rval = iFace->tag_set_data( tag_handle, &handle_vect[offset], count, dataBuffer );
        offset += count;
      }
      else {
        Range r;
        r.merge( handle_range.begin(), handle_range.begin() + count );
        handle_range.erase( handle_range.begin(), handle_range.begin() + count );
        rval = iFace->tag_set_data( tag_handle, r, dataBuffer );
      }
      if (MB_SUCCESS != rval)
        return error(rval);
    }
  }
  catch (ReadHDF5Dataset::Exception) {
    return error(MB_FAILURE);
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_sparse_tag_indices ( const char *  name,
hid_t  id_table,
EntityHandle  start_offset,
Range offset_range,
Range handle_range,
std::vector< EntityHandle > &  handle_vect 
) [private]

Read index table for sparse tag.

Read ID table for a sparse or veriable-length tag, returning the handles and offsets within the table for each file ID that corresponds to an entity we've read from the file (an entity that is in idMap ).

Parameters:
id_tableThe MOAB handle for the tag
start_offsetSome non-zero value because ranges (in this case the offset_range) cannot contain zeros.
offset_rangeOutput: The offsets in the id table for which IDs that occur in idMap were found. All values are increased by start_offset to avoid putting zeros in the range.
handle_rangeOutput: For each valid ID read from the table, the corresponding entity handle. Note: if the IDs did not occur in handle order, then this will be empty. Use handle_vect instead.
handle_vectOutput: For each valid ID read from the table, the corresponding entity handle. Note: if the IDs occured in handle order, then this will be empty. Use handle_range instead.

Definition at line 3126 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  offset_range.clear();
  handle_range.clear();
  handle_vect.clear();

  ErrorCode rval;
  Range::iterator handle_hint = handle_range.begin();
  Range::iterator offset_hint = offset_range.begin();
  
  EntityHandle* idbuf = (EntityHandle*)dataBuffer;
  size_t idbuf_size = bufferSize / sizeof(EntityHandle);

  std::string tn(name);
  tn += " indices";

  assert(start_offset > 0); // can't put zero in a Range
  try {
    ReadHDF5Dataset id_reader( tn.c_str(), id_table, nativeParallel, mpiComm, false );
    id_reader.set_all_file_ids( idbuf_size, handleType );
    size_t offset = start_offset;
    dbgOut.printf( 3, "Reading file ids for sparse tag \"%s\" in %lu chunks\n", name, id_reader.get_read_count() );
    int nn = 0;
    while (!id_reader.done()) {\
      dbgOut.printf( 3, "Reading chunk %d of \"%s\" IDs\n", ++nn, name );
      size_t count;
      id_reader.read( idbuf, count ); 

      rval = convert_id_to_handle( idbuf, count );
      if (MB_SUCCESS != rval)
        return error(rval);

        // idbuf will now contain zero-valued handles for those
        // tag values that correspond to entities we are not reading
        // from the file.
      for (size_t i = 0; i < count; ++i) {
        if (idbuf[i]) {
          offset_hint = offset_range.insert( offset_hint, offset+i );
          if (!handle_vect.empty()) {
            handle_vect.push_back( idbuf[i] );
          }
          else if (handle_range.empty() || idbuf[i] > handle_range.back()) {
            handle_hint = handle_range.insert( handle_hint, idbuf[i] );
          }
          else {
            handle_vect.resize( handle_range.size() );
            std::copy( handle_range.begin(), handle_range.end(), handle_vect.begin() );
            handle_range.clear();
            handle_vect.push_back( idbuf[i] );
            dbgOut.print(2,"Switching to unordered list for tag handle list\n");
          }
        }
      }
      
      offset += count;
    }  
  }
  catch (ReadHDF5Dataset::Exception) {
    return error(MB_FAILURE);
  }

  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_tag ( int  index) [private]

Create tag and read all data.

Definition at line 2690 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  dbgOut.tprintf(2, "Reading tag \"%s\"\n", fileInfo->tags[tag_index].name );

  debug_barrier();


  ErrorCode rval;
  mhdf_Status status;
  Tag tag = 0;
  hid_t read_type = -1;
  bool table_type;
  rval = create_tag( fileInfo->tags[tag_index], tag, read_type ); 
  if (MB_SUCCESS != rval)
    return error(rval);

  if (fileInfo->tags[tag_index].have_sparse) {
    hid_t handles[3];
    long num_ent, num_val;
    mhdf_openSparseTagData( filePtr, 
                            fileInfo->tags[tag_index].name,
                            &num_ent, &num_val,
                            handles, &status );
    if (is_error(status)) {
      if (read_type) H5Tclose( read_type );
      return error(MB_FAILURE);
    }
    
    table_type = false;
    if (read_type == 0) {
      read_type = H5Dget_type( handles[1] );
      if (read_type == 0) {
        mhdf_closeData( filePtr, handles[0], &status );
        mhdf_closeData( filePtr, handles[0], &status );
        if (fileInfo->tags[tag_index].size <= 0) 
          mhdf_closeData( filePtr, handles[2], &status );
        return error(MB_FAILURE);
      }
      table_type = true;
    }

    if (fileInfo->tags[tag_index].size > 0) {
      dbgOut.printf(2, "Reading sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
      rval = read_sparse_tag( tag, read_type, handles[0], handles[1], num_ent );
    }
    else {
      dbgOut.printf(2, "Reading var-len sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
      rval = read_var_len_tag( tag, read_type, handles[0], handles[1], handles[2], num_ent, num_val );
    }

    if (table_type) {
      H5Tclose(read_type);
      read_type = 0;
    }
    
    mhdf_closeData( filePtr, handles[0], &status );
    if (MB_SUCCESS == rval && is_error(status))
      rval = MB_FAILURE;
    mhdf_closeData( filePtr, handles[1], &status );
    if (MB_SUCCESS == rval && is_error(status))
      rval = MB_FAILURE;
    if (fileInfo->tags[tag_index].size <= 0) {
      mhdf_closeData( filePtr, handles[2], &status );
      if (MB_SUCCESS == rval && is_error(status))
        rval = MB_FAILURE;
    }
    if (MB_SUCCESS != rval) {
      if (read_type) H5Tclose( read_type );
      return error(rval);
    }
  }
  
  for (int j = 0; j < fileInfo->tags[tag_index].num_dense_indices; ++j) {
    long count;
    const char* name = 0;
    mhdf_EntDesc* desc;
    int elem_idx = fileInfo->tags[tag_index].dense_elem_indices[j];
    if (elem_idx == -2) {
      desc = &fileInfo->sets;
      name = mhdf_set_type_handle();
    }
    else if (elem_idx == -1) {
      desc = &fileInfo->nodes;
      name = mhdf_node_type_handle();
    }
    else if (elem_idx >= 0 && elem_idx < fileInfo->num_elem_desc) {
      desc = &fileInfo->elems[elem_idx].desc;
      name = fileInfo->elems[elem_idx].handle;
    }
    else {
      return error(MB_FAILURE);
    }
    
    dbgOut.printf(2, "Read dense data block for tag \"%s\" on \"%s\"\n", fileInfo->tags[tag_index].name, name );
    
    hid_t handle = mhdf_openDenseTagData( filePtr, 
                                          fileInfo->tags[tag_index].name,
                                          name,
                                          &count, &status );
    if (is_error(status)) {
      rval = error(MB_FAILURE);
      break;
    }
    
    if (count > desc->count) {
      readUtil->report_error( "Invalid data length for dense tag data: %s/%s\n",
                              name, fileInfo->tags[tag_index].name );
      mhdf_closeData( filePtr, handle, &status );
      rval = error(MB_FAILURE);
      break;
    }
    
    table_type = false;
    if (read_type == 0) {
      read_type = H5Dget_type( handle );
      if (read_type == 0) {
        mhdf_closeData( filePtr, handle, &status );
        return error(MB_FAILURE);
      }
      table_type = true;
    }

    rval = read_dense_tag( tag, name, read_type, handle, desc->start_id, count );
    
    if (table_type) {
      H5Tclose( read_type );
      read_type = 0;
    }
    
    mhdf_closeData( filePtr, handle, &status );
    if (MB_SUCCESS != rval)
      break;
    if (is_error(status)) {
      rval = error(MB_FAILURE);
      break;
    }
  }
  
  if (read_type) 
    H5Tclose( read_type );
  return rval;
}
ErrorCode moab::ReadHDF5::read_tag_values ( const char *  file_name,
const char *  tag_name,
const FileOptions opts,
std::vector< int > &  tag_values_out,
const SubsetList subset_list = 0 
) [virtual]

Read tag values from a file.

Read the list if all integer tag values from the file for a tag that is a single integer value per entity.

Parameters:
file_nameThe file to read.
tag_nameThe tag for which to read values
tag_values_outOutput: The list of tag values.
subset_listAn array of tag name and value sets specifying the subset of the file to read. If multiple tags are specified, the sets that match all tags (intersection) should be read.
subset_list_lengthThe length of the 'subset_list' array.

Implements moab::ReaderIface.

Definition at line 3572 of file ReadHDF5.cpp.

{
  ErrorCode rval;
  
  rval = set_up_read( file_name, opts );
  if (MB_SUCCESS != rval)
    return error(rval);

  int tag_index;
  rval = find_int_tag( tag_name, tag_index );
  if (MB_SUCCESS != rval) {
    clean_up_read( opts );
    return error(rval);
  }
  
  if (subset_list) {
    Range file_ids;
    rval = get_subset_ids( subset_list->tag_list, subset_list->tag_list_length, file_ids );
    if (MB_SUCCESS != rval) {
      clean_up_read( opts );
      return error(rval);
    }
    
    rval = read_tag_values_partial( tag_index, file_ids, tag_values_out );
    if (MB_SUCCESS != rval) {
      clean_up_read( opts );
      return error(rval);
    }
  }
  else {
    rval = read_tag_values_all( tag_index, tag_values_out );
    if (MB_SUCCESS != rval) {
      clean_up_read( opts );
      return error(rval);
    }
  }
    
  return clean_up_read( opts );
}
ErrorCode moab::ReadHDF5::read_tag_values_all ( int  tag_index,
std::vector< int > &  results 
) [protected]

Definition at line 3762 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  mhdf_Status status;
  const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
  long junk, num_val;
  
    // read sparse values
  if (tag.have_sparse) {
    hid_t handles[3];
    mhdf_openSparseTagData( filePtr, tag.name, &junk, &num_val, handles, &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      return error(MB_FAILURE);
    }
    
    mhdf_closeData( filePtr, handles[0], &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      mhdf_closeData( filePtr, handles[1], &status );
      return error(MB_FAILURE);
    }
    
    hid_t file_type = H5Dget_type( handles[1] );
    tag_values.resize( num_val );
    mhdf_readTagValuesWithOpt( handles[1], 0, num_val, file_type,
                               &tag_values[0], collIO, &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      H5Tclose( file_type );
      mhdf_closeData( filePtr, handles[1], &status );
      return error(MB_FAILURE);
    }
    H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &tag_values[0], 0, H5P_DEFAULT );
    H5Tclose( file_type );
    
    mhdf_closeData( filePtr, handles[1], &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      return error(MB_FAILURE);
    }
  }
  
  std::sort( tag_values.begin(), tag_values.end() );
  tag_values.erase( std::unique(tag_values.begin(), tag_values.end()), tag_values.end() );
  
    // read dense values
  std::vector<int> prev_data, curr_data;
  for (int i = 0; i < tag.num_dense_indices; ++i) {
    int grp = tag.dense_elem_indices[i];
    const char* gname = 0;
    if (grp == -1)
      gname = mhdf_node_type_handle();
    else if (grp == -2)
      gname = mhdf_set_type_handle();
    else
      gname = fileInfo->elems[grp].handle;
    hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      return error(MB_FAILURE);
    }
    
    hid_t file_type = H5Dget_type( handle );
    curr_data.resize( num_val );
    mhdf_readTagValuesWithOpt( handle, 0, num_val, file_type, &curr_data[0], collIO, &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      H5Tclose( file_type );
      mhdf_closeData( filePtr, handle, &status );
      return error(MB_FAILURE);
    }
    
    H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &curr_data[0], 0, H5P_DEFAULT );
    H5Tclose( file_type );
    mhdf_closeData( filePtr, handle, &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      return error(MB_FAILURE);
    }
 
    std::sort( curr_data.begin(), curr_data.end() );
    curr_data.erase( std::unique(curr_data.begin(), curr_data.end()), curr_data.end() );
    
    prev_data.clear();
    tag_values.swap( prev_data );
    std::set_union( prev_data.begin(), prev_data.end(),
                    curr_data.begin(), curr_data.end(),
                    std::back_inserter( tag_values ) );
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_tag_values_partial ( int  tag_index,
const Range file_ids,
std::vector< int > &  results 
) [protected]

Definition at line 3616 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  mhdf_Status status;
  const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
  long num_ent, num_val;
  size_t count;
  std::string tn(tag.name);
  
    // read sparse values
  if (tag.have_sparse) {
    hid_t handles[3];
    mhdf_openSparseTagData( filePtr, tag.name, &num_ent, &num_val, handles, &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      return error(MB_FAILURE);
    }
    
    try {
        // read all entity handles and fill 'offsets' with ranges of
        // offsets into the data table for entities that we want.
      Range offsets;
      long* buffer = reinterpret_cast<long*>(dataBuffer);
      const long buffer_size = bufferSize/sizeof(long);
      ReadHDF5Dataset ids( (tn + " ids").c_str(), handles[0], nativeParallel, mpiComm );
      ids.set_all_file_ids( buffer_size, H5T_NATIVE_LONG );
      size_t offset = 0;
      dbgOut.printf( 3, "Reading sparse IDs for tag \"%s\" in %lu chunks\n",
                     tag.name, ids.get_read_count() );
      int nn = 0;
      while (!ids.done()) {
        dbgOut.printf( 3, "Reading chunk %d of IDs for \"%s\"\n", ++nn, tag.name );
        ids.read( buffer, count );

        std::sort( buffer, buffer+count );
        Range::iterator ins = offsets.begin();
        Range::const_iterator i = file_ids.begin();
        for (size_t j = 0; j < count; ++j) {
          while (i != file_ids.end() && (long)*i < buffer[j])
            ++i;
          if (i == file_ids.end())
            break;
          if ((long)*i == buffer[j]) {
            ins = offsets.insert( ins, j+offset, j+offset );
          }
        }
        
        offset += count;
      }

      tag_values.clear();
      tag_values.reserve( offsets.size() );
      const size_t data_buffer_size = bufferSize/sizeof(int);
      int* data_buffer = reinterpret_cast<int*>(dataBuffer);
      ReadHDF5Dataset vals( (tn + " sparse vals").c_str(), handles[1], nativeParallel, mpiComm );
      vals.set_file_ids( offsets, 0, data_buffer_size, H5T_NATIVE_INT );
      dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n",
                     tag.name, vals.get_read_count() );
      nn = 0;
      // should normally only have one read call, unless sparse nature
      // of file_ids caused reader to do something strange
      while (!vals.done()) {
        dbgOut.printf( 3, "Reading chunk %d of values for \"%s\"\n", ++nn, tag.name );
        vals.read( data_buffer, count );
        tag_values.insert( tag_values.end(), data_buffer, data_buffer+count );
      }
    }
    catch (ReadHDF5Dataset::Exception) {
      return error(MB_FAILURE);
    }
  }
  
  std::sort( tag_values.begin(), tag_values.end() );
  tag_values.erase( std::unique(tag_values.begin(), tag_values.end()), tag_values.end() );
  
    // read dense values
  std::vector<int> prev_data, curr_data;
  for (int i = 0; i < tag.num_dense_indices; ++i) {
    int grp = tag.dense_elem_indices[i];
    const char* gname = 0;
    mhdf_EntDesc* desc = 0;
    if (grp == -1) {
      gname = mhdf_node_type_handle();
      desc = &fileInfo->nodes;
    }
    else if (grp == -2) {
      gname = mhdf_set_type_handle();
      desc = &fileInfo->sets;
    }
    else {
      assert(grp >= 0 && grp < fileInfo->num_elem_desc);
      gname = fileInfo->elems[grp].handle;
      desc = &fileInfo->elems[grp].desc;
    }
    
    Range::iterator s = file_ids.lower_bound( (EntityHandle)(desc->start_id) );
    Range::iterator e = Range::lower_bound( s, file_ids.end(),  
                                   (EntityHandle)(desc->start_id) + desc->count );
    Range subset;
    subset.merge( s, e );
    
    hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
    if (mhdf_isError( &status )) {
      readUtil->report_error( "%s", mhdf_message( &status ) );
      return error(MB_FAILURE);
    }
    
    try {
      curr_data.clear();
      tag_values.reserve( subset.size() );
      const size_t data_buffer_size = bufferSize/sizeof(int);
      int* data_buffer = reinterpret_cast<int*>(dataBuffer);

      ReadHDF5Dataset reader( (tn + " dense vals").c_str(), handle, nativeParallel, mpiComm );
      reader.set_file_ids( subset, desc->start_id, data_buffer_size, H5T_NATIVE_INT );
      dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n",
        tag.name, fileInfo->elems[grp].handle, reader.get_read_count() );
      int nn = 0;
      // should normally only have one read call, unless sparse nature
      // of file_ids caused reader to do something strange
      while (!reader.done()) {
        dbgOut.printf( 3, "Reading chunk %d of \"%s\"/\"%s\"\n", ++nn, tag.name, fileInfo->elems[grp].handle );
        reader.read( data_buffer, count );
        curr_data.insert( curr_data.end(), data_buffer, data_buffer + count );
      }
    }
    catch (ReadHDF5Dataset::Exception) {
      return error(MB_FAILURE);
    }
    
    std::sort( curr_data.begin(), curr_data.end() );
    curr_data.erase( std::unique(curr_data.begin(), curr_data.end()), curr_data.end() );
    prev_data.clear();
    tag_values.swap( prev_data );
    std::set_union( prev_data.begin(), prev_data.end(),
                    curr_data.begin(), curr_data.end(),
                    std::back_inserter( tag_values ) );
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::read_var_len_tag ( Tag  tag_handle,
hid_t  hdf_read_type,
hid_t  ent_table,
hid_t  val_table,
hid_t  off_table,
long  num_entities,
long  num_values 
) [private]

Read variable-length tag for all entities.

Definition at line 3282 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  ErrorCode rval;
  DataType mbtype;
  
  rval = iFace->tag_get_data_type( tag_handle, mbtype );
  if (MB_SUCCESS != rval) 
    return error(rval);
    
    // can't do variable-length bit tags
  if (MB_TYPE_BIT == mbtype)
    return error(MB_VARIABLE_DATA_LENGTH);

    // if here, MOAB tag must be variable-length
  int mbsize;
  if (MB_VARIABLE_DATA_LENGTH != iFace->tag_get_bytes( tag_handle, mbsize )) {
    assert(false);
    return error(MB_VARIABLE_DATA_LENGTH);
  }
  
  int read_size;
  if (hdf_read_type) {
    hsize_t hdf_size = H5Tget_size( hdf_read_type );
    if (hdf_size < 1)
      return error(MB_FAILURE);
    read_size = hdf_size;
  }
  else {
    // opaque
    read_size = 1;
  }
  
  std::string tn("<error>");
  iFace->tag_get_name( tag_handle, tn );

    // Read entire ID table and for those file IDs corresponding
    // to entities that we have read from the file add both the
    // offset into the offset range and the handle into the handle 
    // range.  If handles are not ordered, switch to using a vector.
  const EntityHandle base_offset = 1; // can't put zero in a Range
  std::vector<EntityHandle> handle_vect;
  Range handle_range, offset_range;
  rval = read_sparse_tag_indices( tn.c_str(),
                                  ent_table, base_offset,
                                  offset_range, handle_range,
                                  handle_vect );

    // This code only works if the id_table is an ordered list.
    // This assumption was also true for the previous iteration
    // of this code, but wasn't checked.  MOAB's file writer
    // always writes an ordered list for id_table.
  if (!handle_vect.empty()) {
    readUtil->report_error("Unordered file ids for variable length tag not supported.\n");
    return MB_FAILURE;
  }
  
  class VTReader : public ReadHDF5VarLen {
      Tag tagHandle;
      bool isHandle;
      size_t readSize;
      ReadHDF5* readHDF5;
    public:
      ErrorCode store_data( EntityHandle file_id, void* data, long count, bool )
      {
        ErrorCode rval1;
        if (isHandle) {
          assert(readSize == sizeof(EntityHandle));
          rval1 = readHDF5->convert_id_to_handle( (EntityHandle*)data, count );
          if (MB_SUCCESS != rval1)
            return error(rval1);
        }
        int n = count;
        return readHDF5->moab()->tag_set_by_ptr( tagHandle, &file_id, 1, &data, &n );
      }
      VTReader( DebugOutput& debug_output, void* buffer, size_t buffer_size,
                Tag tag, bool is_handle_tag, size_t read_size1, ReadHDF5* owner )
        : ReadHDF5VarLen( debug_output, buffer, buffer_size ),
          tagHandle(tag),
          isHandle(is_handle_tag),
          readSize(read_size1),
          readHDF5(owner)
      {}
  };
  
  VTReader tool( dbgOut, dataBuffer, bufferSize, tag_handle, 
                 MB_TYPE_HANDLE == mbtype, read_size, this );
  try {
      // Read offsets into value table.
    std::vector<unsigned> counts;
    Range offsets;
    ReadHDF5Dataset off_reader( (tn + " offsets").c_str(), off_table, nativeParallel, mpiComm, false );
    rval = tool.read_offsets( off_reader, offset_range, base_offset,
                              base_offset, offsets, counts );
    if (MB_SUCCESS != rval)
      return error(rval);
  
      // Read tag values
    Range empty;
    ReadHDF5Dataset val_reader( (tn + " values").c_str(), val_table, nativeParallel, mpiComm, false );
    rval = tool.read_data( val_reader, offsets, base_offset, hdf_read_type,
                           handle_range, counts, empty );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  catch (ReadHDF5Dataset::Exception) {
    return error(MB_FAILURE);
  }
  
  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::search_tag_values ( int  tag_index,
const std::vector< int > &  sorted_values,
Range file_ids_out,
bool  sets_only = false 
) [private]

Search for entities with specified tag values.

For parallel reads, this function does collective IO.

Parameters:
tag_indexIndex into info->tags specifying which tag to search.
sorted_valuesList of tag values to check for, in ascending sorted order.
file_ids_outFile IDs for entities with specified tag values.

Definition at line 1168 of file ReadHDF5.cpp.

{
  ErrorCode rval;
  mhdf_Status status;
  std::vector<EntityHandle>::iterator iter;
  const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
  long size;
  long start_id;

  CHECK_OPEN_HANDLES;

  debug_barrier();
   
    // do dense data
    
  hid_t table;
  const char* name;
  std::vector<EntityHandle> indices;
    // These are probably in order of dimension, so iterate
    // in reverse order to make Range insertions more efficient.
  std::vector<int> grp_indices( tag.dense_elem_indices, tag.dense_elem_indices+tag.num_dense_indices );
  for (std::vector<int>::reverse_iterator i = grp_indices.rbegin(); i != grp_indices.rend(); ++i)
  {
    int idx = *i;
    if (idx == -2) {
      name = mhdf_set_type_handle();
      start_id = fileInfo->sets.start_id;
    }
    else if (sets_only) {
      continue;
    }
    else if (idx == -1) {
      name = mhdf_node_type_handle();
     start_id = fileInfo->nodes.start_id;
    }
    else {
      if (idx < 0 || idx >= fileInfo->num_elem_desc) 
        return error(MB_FAILURE);
      name = fileInfo->elems[idx].handle;
      start_id = fileInfo->elems[idx].desc.start_id;
    }
    table = mhdf_openDenseTagData( filePtr, tag.name, name, &size, &status );
    if (is_error(status))
      return error(MB_FAILURE);
    rval = search_tag_values( table, size, sorted_values, indices );
    mhdf_closeData( filePtr, table, &status );
    if (MB_SUCCESS != rval || is_error(status))
      return error(MB_FAILURE);
      // Convert from table indices to file IDs and add to result list
    std::sort( indices.begin(), indices.end(), std::greater<EntityHandle>() );
    std::transform( indices.begin(), indices.end(), range_inserter(file_ids),
                    std::bind1st( std::plus<long>(), start_id ) );
    indices.clear();
  }
  
  if (!tag.have_sparse)
    return MB_SUCCESS;
  
    // do sparse data
    
  hid_t tables[2]; 
  long junk; // redundant value for non-variable-length tags
  mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
  if (is_error(status))
    return error(MB_FAILURE);
  rval = search_tag_values( tables[1], size, sorted_values, indices );
  mhdf_closeData( filePtr, tables[1], &status );
  if (MB_SUCCESS != rval || is_error(status)) {
    mhdf_closeData( filePtr, tables[0], &status );
    return error(MB_FAILURE);
  }
    // convert to ranges
  std::sort( indices.begin(), indices.end() );
  std::vector<EntityHandle> ranges;
  iter = indices.begin();
  while (iter != indices.end()) {
    ranges.push_back( *iter );
    EntityHandle last = *iter;
    for (++iter; iter != indices.end() && (last + 1) == *iter; ++iter, ++last);
    ranges.push_back( last );
  }
    // read file ids
  iter = ranges.begin();
  unsigned long offset = 0;
  while (iter != ranges.end()) {
    long begin = *iter; ++iter;
    long end   = *iter; ++iter;
    mhdf_readSparseTagEntitiesWithOpt( tables[0], begin, end - begin + 1, 
                                handleType, &indices[offset], indepIO, &status );
    if (is_error(status)) {
      mhdf_closeData( filePtr, tables[0], &status );
      return error(MB_FAILURE);
    }
    offset += end - begin + 1;
  }
  mhdf_closeData( filePtr, tables[0], &status );
  if (is_error(status))
    return error(MB_FAILURE);
  assert( offset == indices.size() );
  std::sort( indices.begin(), indices.end() );
  
  if (sets_only) {
    iter = std::lower_bound( indices.begin(), indices.end(), 
              (EntityHandle)(fileInfo->sets.start_id + fileInfo->sets.count) );
    indices.erase( iter, indices.end() );
    iter = std::lower_bound( indices.begin(), indices.end(), 
                             fileInfo->sets.start_id );
    indices.erase( indices.begin(), iter );
  }
  copy_sorted_file_ids( &indices[0], indices.size(), file_ids );
  
  return MB_SUCCESS;  
}
ErrorCode moab::ReadHDF5::search_tag_values ( hid_t  tag_table,
unsigned long  table_size,
const std::vector< int > &  sorted_values,
std::vector< EntityHandle > &  value_indices 
) [private]

Search a table of tag data for a specified set of values.

Search a table of tag values, returning the indices into the table at which matches were found. For parallel reads, this function does collective IO.

Parameters:
infoSummary of data contained in file.
tag_tableHDF5/mhdf handle for tag values
table_sizeNumber of values in table
sorted_valuesSorted list of values to search for.
value_indicesOutput: Offsets into the table of data at which matching values were found.

Definition at line 1364 of file ReadHDF5.cpp.

{

  debug_barrier();

  CHECK_OPEN_HANDLES;

  mhdf_Status status;
  size_t chunk_size = bufferSize / sizeof(int);
  int * buffer = reinterpret_cast<int*>(dataBuffer);
  size_t remaining = table_size, offset = 0;
  while (remaining) {
      // Get a block of tag values
    size_t count = std::min( chunk_size, remaining );
    assert_range( buffer, count );
    mhdf_readTagValuesWithOpt( tag_table, offset, count, H5T_NATIVE_INT, buffer, collIO, &status );
    if (is_error(status))
      return error(MB_FAILURE);
    
      // search tag values
    for (size_t i = 0; i < count; ++i)
      if (std::binary_search( sorted_values.begin(), sorted_values.end(), (int)buffer[i] ))
        value_indices.push_back( i + offset );
    
    offset += count;
    remaining -= count;
  }

  return MB_SUCCESS;
}
ErrorCode moab::ReadHDF5::set_up_read ( const char *  file_name,
const FileOptions opts 
) [private]

Definition at line 273 of file ReadHDF5.cpp.

{
  ErrorCode rval;
  mhdf_Status status;
  indepIO = collIO = H5P_DEFAULT;
  mpiComm = 0;

  if (MB_SUCCESS != init())
    return error(MB_FAILURE);
  
#if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1
  herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
#else
  herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
#endif
  if (err < 0) {
    errorHandler.func = 0;
    errorHandler.data = 0;
  }
  else {
#if defined(H5Eset_auto_vers) && H5Eset_auto_vers > 1
    err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
#else
    err = H5Eset_auto( &handle_hdf5_error, &errorHandler );
#endif
    if (err < 0) {
      errorHandler.func = 0;
      errorHandler.data = 0;
    }
  }
      
  
  // Set up debug output
  int tmpval;
  if (MB_SUCCESS == opts.get_int_option("DEBUG_IO", 1, tmpval)) {
    dbgOut.set_verbosity(tmpval);
    dbgOut.set_prefix("H5M ");
  }
  dbgOut.limit_output_to_first_N_procs( 32 );
  
  // Enable some extra checks for reads.  Note: amongst other things this
  // will print errors if the entire file is not read, so if doing a 
  // partial read that is not a parallel read, this should be disabled.
  debugTrack = (MB_SUCCESS == opts.get_null_option("DEBUG_BINIO"));
  
  opts.get_toggle_option("BLOCKED_COORDINATE_IO",DEFAULT_BLOCKED_COORDINATE_IO,blockedCoordinateIO);
  opts.get_toggle_option("BCAST_SUMMARY",        DEFAULT_BCAST_SUMMARY,        bcastSummary);
  opts.get_toggle_option("BCAST_DUPLICATE_READS",DEFAULT_BCAST_DUPLICATE_READS,bcastDuplicateReads);
  bool bglockless = (MB_SUCCESS == opts.get_null_option("BGLOCKLESS"));
    
    // Handle parallel options
  std::string junk;
  bool use_mpio = (MB_SUCCESS == opts.get_null_option("USE_MPIO"));
  rval = opts.match_option("PARALLEL", "READ_PART");
  bool parallel = (rval != MB_ENTITY_NOT_FOUND);
  nativeParallel = (rval == MB_SUCCESS);
  if (use_mpio && !parallel) {
    readUtil->report_error( "'USE_MPIO' option specified w/out 'PARALLEL' option" );
    return MB_NOT_IMPLEMENTED;
  }

  // This option is intended for testing purposes only, and thus
  // is not documented anywhere.  Decreasing the buffer size can
  // expose bugs that would otherwise only be seen when reading
  // very large files.
  rval = opts.get_int_option( "BUFFER_SIZE", bufferSize );
  if (MB_SUCCESS != rval) {
    bufferSize = READ_HDF5_BUFFER_SIZE;
  }
  else if (bufferSize < (int)std::max( sizeof(EntityHandle), sizeof(void*) )) {
    return error(MB_INVALID_SIZE);
  }
  
  dataBuffer = (char*)malloc( bufferSize );
  if (!dataBuffer)
    return error(MB_MEMORY_ALLOCATION_FAILED);
  
  if (use_mpio || nativeParallel) {
  
      // lockless file IO on IBM BlueGene
    std::string pfilename(filename);
#ifdef BLUEGENE
    if (!bglockless && 0 != pfilename.find("bglockless:")) {
        // check for GPFS file system
      struct statfs fsdata;
      statfs( filename, &fsdata );
      if (fsdata.f_type == BG_LOCKLESS_GPFS) {
        bglockless = true;
      }
    }
#endif
    if (bglockless) {
      pfilename = std::string("bglockless:") + pfilename;
    }    
    
#ifndef HDF5_PARALLEL
    readUtil->report_error("MOAB not configured with parallel HDF5 support");
    free(dataBuffer);
    dataBuffer = NULL;
    return MB_NOT_IMPLEMENTED;
#else

    MPI_Info info = MPI_INFO_NULL;
    std::string cb_size;
    rval = opts.get_str_option("CB_BUFFER_SIZE", cb_size);
    if (MB_SUCCESS == rval) {
      MPI_Info_create (&info);
      MPI_Info_set (info, const_cast<char*>("cb_buffer_size"), const_cast<char*>(cb_size.c_str()));
    }
  
    int pcomm_no = 0;
    rval = opts.get_int_option("PARALLEL_COMM", pcomm_no);
    if (rval == MB_TYPE_OUT_OF_RANGE) {
      readUtil->report_error("Invalid value for PARALLEL_COMM option");
      return rval;
    }
    myPcomm = ParallelComm::get_pcomm(iFace, pcomm_no);
    if (0 == myPcomm) {
      myPcomm = new ParallelComm(iFace, MPI_COMM_WORLD);
    }
    const int rank = myPcomm->proc_config().proc_rank();
    dbgOut.set_rank(rank);
    dbgOut.limit_output_to_first_N_procs( 32 );
    mpiComm = new MPI_Comm(myPcomm->proc_config().proc_comm());
    if (bglockless) {
      dbgOut.printf( 1, "Enabling lockless IO for BlueGene (filename: \"%s\")\n", pfilename.c_str() );
    }    

#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS 
    dbgOut.print(1,"H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS is not defined\n");
#endif

      // Open the file in serial on root to read summary
    dbgOut.tprint( 1, "Getting file summary\n" );
    fileInfo = 0;


    hid_t file_prop;
    if (bcastSummary) {
      unsigned long size = 0;
      if (rank == 0) {

        file_prop = H5Pcreate(H5P_FILE_ACCESS);
        err = H5Pset_fapl_mpio(file_prop, MPI_COMM_SELF, MPI_INFO_NULL);
        assert(file_prop >= 0);
        assert(err >= 0);
        filePtr = mhdf_openFileWithOpt( pfilename.c_str(), 0, NULL, handleType, file_prop, &status );
        H5Pclose( file_prop );

        if (filePtr) {  
          fileInfo = mhdf_getFileSummary( filePtr, handleType, &status );
          if (!is_error(status)) {
            size = fileInfo->total_size;
            fileInfo->offset = (unsigned char*)fileInfo;
          }
        }
        mhdf_closeFile( filePtr, &status );
        if (fileInfo && mhdf_isError(&status)) {
          free(fileInfo);
          fileInfo = NULL;
        }
      }

      dbgOut.tprint( 1, "Communicating file summary\n" );
      int mpi_err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, myPcomm->proc_config().proc_comm() );
      if (mpi_err || !size)
        return MB_FAILURE;


      if (rank != 0) 
        fileInfo = reinterpret_cast<mhdf_FileDesc*>( malloc( size ) );

      MPI_Bcast( fileInfo, size, MPI_BYTE, 0, myPcomm->proc_config().proc_comm() );

      if (rank != 0)
        mhdf_fixFileDesc( fileInfo, reinterpret_cast<mhdf_FileDesc*>(fileInfo->offset) );
    }
  
    file_prop = H5Pcreate(H5P_FILE_ACCESS);
    err = H5Pset_fapl_mpio(file_prop, myPcomm->proc_config().proc_comm(), info);
    assert(file_prop >= 0);
    assert(err >= 0);

    collIO = H5Pcreate(H5P_DATASET_XFER);
    assert(collIO > 0);
    err = H5Pset_dxpl_mpio(collIO, H5FD_MPIO_COLLECTIVE);
    assert(err >= 0);
    indepIO = nativeParallel ? H5P_DEFAULT : collIO;

      // re-open file in parallel
    dbgOut.tprintf( 1, "Opening \"%s\" for parallel IO\n", pfilename.c_str() );
    filePtr = mhdf_openFileWithOpt( pfilename.c_str(), 0, NULL, handleType, file_prop, &status );

    H5Pclose( file_prop );
    if (!filePtr)
    {
      readUtil->report_error("%s", mhdf_message( &status ));
      free( dataBuffer );
      dataBuffer = NULL;
      H5Pclose( indepIO ); 
      if (collIO != indepIO)
        H5Pclose( collIO );
      collIO = indepIO = H5P_DEFAULT;
      return error(MB_FAILURE);
    }
    
    if (!bcastSummary) {
      fileInfo = mhdf_getFileSummary( filePtr, handleType, &status );
      if (is_error(status)) {
        readUtil->report_error( "%s", mhdf_message( &status ) );
        free( dataBuffer );
        dataBuffer = NULL;
        mhdf_closeFile( filePtr, &status );
        return error(MB_FAILURE);
      }
    }

#endif // HDF5_PARALLEL
  }
  else {
  
      // Open the file
    filePtr = mhdf_openFile( filename, 0, NULL, handleType, &status );
    if (!filePtr)
    {
      readUtil->report_error( "%s", mhdf_message( &status ));
      free( dataBuffer );
      dataBuffer = NULL;
      return error(MB_FAILURE);
    }

      // get file info
    fileInfo = mhdf_getFileSummary( filePtr, handleType, &status );
    if (is_error(status)) {
      free( dataBuffer );
      dataBuffer = NULL;
      mhdf_closeFile( filePtr, &status );
      return error(MB_FAILURE);
    }
  }
  
  ReadHDF5Dataset::default_hyperslab_selection_limit();
  int hslimit;
  rval = opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit );
  if (MB_SUCCESS == rval && hslimit > 0)
    ReadHDF5Dataset::set_hyperslab_selection_limit( hslimit );
  else
    ReadHDF5Dataset::default_hyperslab_selection_limit();
  if (MB_SUCCESS != opts.get_null_option("HYPERSLAB_OR") &&
     (MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" )
      || HDF5_can_append_hyperslabs())) {
    ReadHDF5Dataset::append_hyperslabs();
    if (MB_SUCCESS != opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit ))
      ReadHDF5Dataset::set_hyperslab_selection_limit( std::numeric_limits<int>::max() );
    dbgOut.print(1,"Using H5S_APPEND for hyperslab selection\n");
  }
  
  return MB_SUCCESS;
}

Store file IDS in tag values.

Copy fild ID from IDMap for each entity read from file into a tag value on the entity.

Definition at line 3535 of file ReadHDF5.cpp.

{

  CHECK_OPEN_HANDLES;

  typedef int tag_type;
  tag_type* buffer = reinterpret_cast<tag_type*>(dataBuffer);
  const long buffer_size = bufferSize / sizeof(tag_type);
  for (IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i) {
    IDMap::Range range = *i;
    
      // make sure the values will fit in the tag type
    IDMap::key_type rv = range.begin + (range.count - 1);
    tag_type tv = (tag_type)rv;
    if ((IDMap::key_type)tv != rv) {
      assert(false);
      return MB_INDEX_OUT_OF_RANGE;
    }
    
    while (range.count) {
      long count = buffer_size < range.count ? buffer_size : range.count;

      Range handles;
      handles.insert( range.value, range.value + count - 1 );
      range.value += count;
      range.count -= count;
      for (long j = 0; j < count; ++j) 
        buffer[j] = (tag_type)range.begin++;

      ErrorCode rval = iFace->tag_set_data( tag, handles, buffer );
      if (MB_SUCCESS != rval)
        return rval;
    }
  }
  return MB_SUCCESS;
}

Update connectivity data for all element groups for which read_elems was called with a non-null node_ids argument.

Definition at line 1601 of file ReadHDF5.cpp.

{
  ErrorCode rval;
  std::vector<IDConnectivity>::iterator i;
  for (i = idConnectivityList.begin(); i != idConnectivityList.end(); ++i) {
    rval = convert_id_to_handle( i->array, i->count * i->nodes_per_elem );
    if (MB_SUCCESS != rval)
      return error(rval);
    
    rval = readUtil->update_adjacencies( i->handle, i->count, i->nodes_per_elem, i->array );
    if (MB_SUCCESS != rval)
      return error(rval);
  }
  idConnectivityList.clear();
  return MB_SUCCESS;    
}

Member Data Documentation

Definition at line 170 of file ReadHDF5.hpp.

Definition at line 169 of file ReadHDF5.hpp.

Flags for some behavior that can be changed through reader options

Definition at line 168 of file ReadHDF5.hpp.

The size of the data buffer (dataBuffer).

Definition at line 112 of file ReadHDF5.hpp.

hid_t moab::ReadHDF5::collIO [private]

Definition at line 152 of file ReadHDF5.hpp.

char* moab::ReadHDF5::dataBuffer [private]

A memory buffer to use for all I/O operations.

Definition at line 114 of file ReadHDF5.hpp.

Debug output. Verbosity controlled with DEBUG_FORMAT option.

Definition at line 160 of file ReadHDF5.hpp.

Use IODebugTrack instances to verify reads. Enable with the DEBUG_OVERLAPS option.

Definition at line 158 of file ReadHDF5.hpp.

Store old HDF5 error handling function.

Definition at line 173 of file ReadHDF5.hpp.

File summary.

Definition at line 123 of file ReadHDF5.hpp.

The file handle from the mhdf library.

Definition at line 120 of file ReadHDF5.hpp.

hid_t moab::ReadHDF5::handleType [private]

The type of an EntityHandle.

Definition at line 133 of file ReadHDF5.hpp.

List of connectivity arrays for which conversion from file ID to handle was deferred until later.

Definition at line 145 of file ReadHDF5.hpp.

Definition at line 127 of file ReadHDF5.hpp.

Interface pointer passed to constructor.

Definition at line 117 of file ReadHDF5.hpp.

hid_t moab::ReadHDF5::indepIO [private]

read/write property handle indepIO -> idependent IO during true parallel read collIO -> collective IO during true parallel read Both are H5P_DEFAULT for serial IO and collective when reading the entire file on all processors.

Definition at line 152 of file ReadHDF5.hpp.

MPI_Comm value (unused if !nativeParallel)

Definition at line 164 of file ReadHDF5.hpp.

Definition at line 154 of file ReadHDF5.hpp.

Doing true parallel read (PARALLEL=READ_PART)

Definition at line 162 of file ReadHDF5.hpp.

Cache pointer to read util.

Definition at line 130 of file ReadHDF5.hpp.

long(* moab::ReadHDF5::setMeta)[4] [private]

Definition at line 175 of file ReadHDF5.hpp.


The documentation for this class was generated from the following files:
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines