moab
|
Write mesh database to MOAB's native HDF5-based file format. More...
#include <WriteHDF5.hpp>
Classes | |
struct | ExportSet |
Range of entities, grouped by type, to export. More... | |
struct | ExportType |
struct | HDF5ErrorHandler |
Store old HDF5 error handling function. More... | |
struct | SpecialSetData |
struct | SpecSetLess |
struct | TagDesc |
Tag to write to file. More... | |
Public Types | |
typedef EntityHandle | id_t |
Public Member Functions | |
WriteHDF5 (Interface *iface) | |
virtual | ~WriteHDF5 () |
ErrorCode | write_file (const char *filename, const bool overwrite, const FileOptions &opts, const EntityHandle *export_sets, const int export_set_count, const std::vector< std::string > &qa_records, const Tag *tag_list=NULL, int num_tags=0, int user_dimension=3) |
mhdf_FileHandle | file_ptr () |
WriteUtilIface * | write_util () |
ErrorCode | create_elem_table (const ExportSet &block, long num_ents, long &first_id_out) |
ErrorCode | create_set_meta (long num_sets, long &first_id_out) |
Static Public Member Functions | |
static WriterIface * | factory (Interface *) |
Static Public Attributes | |
static const hid_t | id_type = get_id_type() |
Protected Types | |
enum | TimingValues { TOTAL_TIME = 0, GATHER_TIME, CREATE_TIME, CREATE_NODE_TIME, NEGOTIATE_TYPES_TIME, CREATE_ELEM_TIME, FILEID_EXCHANGE_TIME, CREATE_ADJ_TIME, CREATE_SET_TIME, SHARED_SET_IDS, SHARED_SET_CONTENTS, SET_OFFSET_TIME, CREATE_TAG_TIME, COORD_TIME, CONN_TIME, SET_TIME, SET_META, SET_CONTENT, SET_PARENT, SET_CHILD, ADJ_TIME, TAG_TIME, DENSE_TAG_TIME, SPARSE_TAG_TIME, VARLEN_TAG_TIME, NUM_TIMES } |
Protected Member Functions | |
virtual ErrorCode | parallel_create_file (const char *filename, bool overwrite, const std::vector< std::string > &qa_records, const FileOptions &opts, const Tag *tag_list, int num_tags, int dimension=3, double *times=0) |
virtual ErrorCode | write_finished () |
virtual void | debug_barrier_line (int lineno) |
ErrorCode | gather_tags (const Tag *user_tag_list, int user_tag_list_length) |
Gather tags. | |
bool | check_dense_format_tag (const ExportSet &ents, const Range &all_tagged, bool prefer_dense) |
ErrorCode | count_adjacencies (const Range &elements, id_t &result) |
ErrorCode | count_set_size (const Range &sets, long &contents_length_out, long &children_length_out, long &parents_length_out) |
ErrorCode | get_set_info (EntityHandle set, long &num_entities, long &num_children, long &num_parents, unsigned long &flags) |
Get information about a meshset. | |
ErrorCode | create_set_tables (long contents_length, long children_length, long parents_length) |
ErrorCode | write_qa (const std::vector< std::string > &list) |
Write exodus-type QA info. | |
ErrorCode | get_num_sparse_tagged_entities (const TagDesc &tag, size_t &count) |
Get tagged entities for which to write tag values. | |
ErrorCode | get_sparse_tagged_entities (const TagDesc &tag, Range &range) |
Get tagged entities for which to write tag values. | |
void | get_write_entities (Range &range) |
Get entities that will be written to file. | |
const ExportSet * | find (ExportType type) const |
const SpecialSetData * | find_set_data (EntityHandle h) const |
SpecialSetData * | find_set_data (EntityHandle h) |
void | print_id_map () const |
void | print_id_map (std::ostream &str, const char *prefix="") const |
ErrorCode | create_tag (const TagDesc &tag_data, unsigned long num_entities, unsigned long var_len_total) |
ErrorCode | assign_ids (const Range &entities, id_t first_id) |
add entities to idMap | |
ErrorCode | range_to_blocked_list (const Range &input_range, std::vector< id_t > &output_id_list, bool &ranged_list) |
ErrorCode | range_to_blocked_list (const EntityHandle *input_ranges, size_t num_input_ranges, std::vector< id_t > &output_id_list, bool &ranged_list) |
ErrorCode | range_to_id_list (const Range &input_range, id_t *array) |
ErrorCode | vector_to_id_list (const std::vector< EntityHandle > &input, std::vector< id_t > &output, bool remove_non_written=false) |
Get IDs for entities. | |
ErrorCode | vector_to_id_list (const EntityHandle *input, id_t *output, size_t num_entities) |
Get IDs for entities. | |
ErrorCode | vector_to_id_list (const EntityHandle *input, size_t input_len, id_t *output, size_t &output_len, bool remove_non_written) |
Get IDs for entities. | |
bool | convert_handle_tag (EntityHandle *data, size_t count) const |
bool | convert_handle_tag (const EntityHandle *source, EntityHandle *dest, size_t count) const |
ErrorCode | get_adjacencies (EntityHandle entity, std::vector< id_t > &adj) |
ErrorCode | get_tag_data_length (const TagDesc &tag_info, const Range &range, unsigned long &result) |
virtual void | print_times (const double times[NUM_TIMES]) const |
Protected Attributes | |
HDF5ErrorHandler | errorHandler |
Store old HDF5 error handling function. | |
size_t | bufferSize |
The size of the data buffer (dataBuffer ). | |
char * | dataBuffer |
A memory buffer to use for all I/O operations. | |
Interface * | iFace |
Interface pointer passed to constructor. | |
WriteUtilIface * | writeUtil |
Cached pointer to writeUtil interface. | |
mhdf_FileHandle | filePtr |
The file handle from the mhdf library. | |
RangeMap< EntityHandle, id_t > | idMap |
Map from entity handles to file IDs. | |
std::list< ExportSet > | exportList |
The list elements to export. | |
ExportSet | nodeSet |
The list of nodes to export. | |
ExportSet | setSet |
The list of sets to export. | |
unsigned long | setContentsOffset |
Offset into set contents table (zero except for parallel) | |
unsigned long | setChildrenOffset |
Offset into set children table (zero except for parallel) | |
unsigned long | setParentsOffset |
long | maxNumSetContents |
long | maxNumSetChildren |
long | maxNumSetParents |
bool | writeSets |
bool | writeSetContents |
bool | writeSetChildren |
bool | writeSetParents |
std::vector< SpecialSetData > | specialSets |
Array of special/shared sets, in order of handle value. | |
std::list< TagDesc > | tagList |
The list of tags to export. | |
bool | parallelWrite |
True if doing parallel write. | |
bool | collectiveIO |
True if using collective IO calls for parallel write. | |
bool | writeTagDense |
True if writing dense-formatted tag data. | |
hid_t | writeProp |
DebugOutput | dbgOut |
Utility to log debug output. | |
bool | debugTrack |
Look for overlapping and/or missing writes. | |
Static Protected Attributes | |
static MPEState | topState |
static MPEState | subState |
Private Member Functions | |
ErrorCode | write_file_impl (const char *filename, const bool overwrite, const FileOptions &opts, const EntityHandle *export_sets, const int export_set_count, const std::vector< std::string > &qa_records, const Tag *tag_list, int num_tags, int user_dimension=3) |
ErrorCode | init () |
ErrorCode | serial_create_file (const char *filename, bool overwrite, const std::vector< std::string > &qa_records, const Tag *tag_list, int num_tags, int dimension=3) |
ErrorCode | gather_mesh_info (const std::vector< EntityHandle > &export_sets) |
ErrorCode | gather_all_mesh () |
Same as gather_mesh_info, except for entire mesh. | |
ErrorCode | initialize_mesh (const Range entities_by_dim[5]) |
Initialize internal data structures from gathered mesh. | |
ErrorCode | write_nodes () |
ErrorCode | write_elems (ExportSet &elemset) |
ErrorCode | write_sets (double *times) |
ErrorCode | write_set_data (const WriteUtilIface::EntityListType which_data, const hid_t handle, IODebugTrack &track, Range *ranged=0, Range *null_stripped=0, std::vector< long > *set_sizes=0) |
ErrorCode | write_adjacencies (const ExportSet &export_set) |
ErrorCode | write_tag (const TagDesc &tag_data, double *times) |
Write tag for all entities. | |
ErrorCode | get_connectivity (Range::const_iterator begin, Range::const_iterator end, int nodes_per_element, id_t *id_data_out) |
Get element connectivity. | |
ErrorCode | get_tag_size (Tag tag, DataType &moab_type, int &num_bytes, int &elem_size, int &file_size, mhdf_TagDataType &file_type, hid_t &hdf_type) |
ErrorCode | write_sparse_ids (const TagDesc &tag_data, const Range &range, hid_t table_handle, size_t table_size, const char *name=0) |
Write ID table for sparse tag. | |
ErrorCode | write_sparse_tag (const TagDesc &tag_data, const std::string &tag_name, DataType tag_data_type, hid_t hdf5_data_type, int hdf5_type_size) |
Write fixed-length tag data in sparse format. | |
ErrorCode | write_var_len_indices (const TagDesc &tag_data, const Range &range, hid_t idx_table, size_t table_size, int type_size, const char *name=0) |
Write end index data_set for a variable-length tag. | |
ErrorCode | write_var_len_data (const TagDesc &tag_data, const Range &range, hid_t table, size_t table_size, bool handle_tag, hid_t hdf_type, int type_size, const char *name=0) |
Write tag value data_set for a variable-length tag. | |
ErrorCode | write_var_len_tag (const TagDesc &tag_info, const std::string &tag_name, DataType tag_data_type, hid_t hdf5_type, int hdf5_type_size) |
Write varialbe-length tag data. | |
ErrorCode | write_dense_tag (const TagDesc &tag_data, const ExportSet &elem_data, const std::string &tag_name, DataType tag_data_type, hid_t hdf5_data_type, int hdf5_type_size) |
Write dense-formatted tag data. | |
ErrorCode | write_tag_values (Tag tag_id, hid_t data_table, unsigned long data_offset, const Range &range, DataType tag_data_type, hid_t hdf5_data_type, int hdf5_type_size, unsigned long max_num_ents, IODebugTrack &debug_track) |
Write data for fixed-size tag. |
Write mesh database to MOAB's native HDF5-based file format.
Definition at line 46 of file WriteHDF5.hpp.
typedef EntityHandle moab::WriteHDF5::id_t |
The type to use for entity IDs w/in the file.
NOTE: If this is changed, the value of id_type MUST be changed accordingly.
Definition at line 77 of file WriteHDF5.hpp.
enum moab::WriteHDF5::TimingValues [protected] |
Definition at line 681 of file WriteHDF5.hpp.
{ TOTAL_TIME = 0, GATHER_TIME, CREATE_TIME, CREATE_NODE_TIME, NEGOTIATE_TYPES_TIME, CREATE_ELEM_TIME, FILEID_EXCHANGE_TIME, CREATE_ADJ_TIME, CREATE_SET_TIME, SHARED_SET_IDS, SHARED_SET_CONTENTS, SET_OFFSET_TIME, CREATE_TAG_TIME, COORD_TIME, CONN_TIME, SET_TIME, SET_META, SET_CONTENT, SET_PARENT, SET_CHILD, ADJ_TIME, TAG_TIME, DENSE_TAG_TIME, SPARSE_TAG_TIME, VARLEN_TAG_TIME, NUM_TIMES };
moab::WriteHDF5::WriteHDF5 | ( | Interface * | iface | ) |
Definition at line 351 of file WriteHDF5.cpp.
: bufferSize( WRITE_HDF5_BUFFER_SIZE ), dataBuffer( 0 ), iFace( iface ), writeUtil( 0 ), filePtr( 0 ), setContentsOffset( 0 ), setChildrenOffset( 0 ), setParentsOffset( 0 ), maxNumSetContents( 0 ), maxNumSetChildren( 0 ), maxNumSetParents( 0 ), writeSets(false), writeSetContents(false), writeSetChildren(false), writeSetParents(false), parallelWrite(false), collectiveIO(false), writeTagDense(false), writeProp( H5P_DEFAULT ), dbgOut("H5M ", stderr) { }
moab::WriteHDF5::~WriteHDF5 | ( | ) | [virtual] |
Definition at line 452 of file WriteHDF5.cpp.
{ if (!writeUtil) // init() failed. return; iFace->release_interface( writeUtil ); }
ErrorCode moab::WriteHDF5::assign_ids | ( | const Range & | entities, |
id_t | first_id | ||
) | [protected] |
add entities to idMap
Definition at line 315 of file WriteHDF5.cpp.
{ Range::const_pair_iterator pi; for (pi = entities.const_pair_begin(); pi != entities.const_pair_end(); ++pi) { const EntityHandle n = pi->second - pi->first + 1; dbgOut.printf( 3, "Assigning %s %lu to %lu to file IDs [%lu,%lu]\n", CN::EntityTypeName(TYPE_FROM_HANDLE(pi->first)), (unsigned long)(ID_FROM_HANDLE(pi->first)), (unsigned long)(ID_FROM_HANDLE(pi->first)+n-1), (unsigned long)id, (unsigned long)(id+n-1)); if (!idMap.insert( pi->first, id, n ).second) return error(MB_FAILURE); id += n; } return MB_SUCCESS; }
bool moab::WriteHDF5::check_dense_format_tag | ( | const ExportSet & | ents, |
const Range & | all_tagged, | ||
bool | prefer_dense | ||
) | [protected] |
Check if tag values for a given ExportSet should be written in dense format
ents | ExportSet to consider |
all_tagged | Range containing all the entities in ents.range for which an explicit tag value is stored. Range may also contain entities not in ents.range, but may not contain entities in ents.range for which no tag value is stored. |
prefer_dense | If true, will return true if at least 2/3 of the entities are tagged. This should not be passed as true if the tag does not have a default value, as tag values must be stored for all entities in the ExportSet for dense-formatted data. |
Definition at line 2718 of file WriteHDF5.cpp.
{ // if there are no tagged entities, then don't write anything if (ents.range.empty()) return false; // if all of the entities are tagged, then write in dense format if (all_tagged.contains(ents.range)) return true; // unless asked for more lenient choice of dense format, return false if (!prefer_dense) return false; // if we're being lenient about choosing dense format, then // return true if at least 2/3 of the entities are tagged. Range xsect = intersect( setSet.range, all_tagged ); if (3*xsect.size() >= 2*setSet.range.size()) return true; return false; }
bool moab::WriteHDF5::convert_handle_tag | ( | EntityHandle * | data, |
size_t | count | ||
) | const [protected] |
When writing tags containing EntityHandles to file, need to convert tag data from EntityHandles to file IDs. This function does that.
If the handle is not valid or does not correspond to an entity that will be written to the file, the file ID is set to zero.
data | The data buffer. As input, an array of EntityHandles. As output an array of file IDS, where the size of each integral file ID is the same as the size of EntityHandle. |
count | The number of handles in the buffer. |
Definition at line 309 of file WriteHDF5.cpp.
{ assert( sizeof(EntityHandle) == sizeof(id_t) ); return convert_handle_tag( data, data, count ); }
bool moab::WriteHDF5::convert_handle_tag | ( | const EntityHandle * | source, |
EntityHandle * | dest, | ||
size_t | count | ||
) | const [protected] |
Definition at line 293 of file WriteHDF5.cpp.
ErrorCode moab::WriteHDF5::count_adjacencies | ( | const Range & | elements, |
id_t & | result | ||
) | [protected] |
Helper function for create-file
Calculate the sum of the number of non-set adjacencies of all entities in the passed range.
Definition at line 2743 of file WriteHDF5.cpp.
{ ErrorCode rval; std::vector<id_t> adj_list; Range::const_iterator iter = set.begin(); const Range::const_iterator end = set.end(); result = 0; for ( ; iter != end; ++iter ) { adj_list.clear(); rval = get_adjacencies( *iter, adj_list ); CHK_MB_ERR_0(rval); if (adj_list.size() > 0) result += 2 + adj_list.size(); } return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::count_set_size | ( | const Range & | sets, |
long & | contents_length_out, | ||
long & | children_length_out, | ||
long & | parents_length_out | ||
) | [protected] |
Helper function for create-file
Calculate total length of set contents and child tables.
Definition at line 2788 of file WriteHDF5.cpp.
{ ErrorCode rval; Range set_contents; long contents_length_set, children_length_set, parents_length_set; unsigned long flags; std::vector<id_t> set_contents_ids; std::vector<SpecialSetData>::const_iterator si = specialSets.begin(); contents_length_out = 0; children_length_out = 0; parents_length_out = 0; for (Range::const_iterator iter = sets.begin(); iter != sets.end(); ++iter) { while (si != specialSets.end() && si->setHandle < *iter) ++si; if (si != specialSets.end() && si->setHandle == *iter) { contents_length_out += si->contentIds.size(); children_length_out += si->childIds.size(); parents_length_out += si->parentIds.size(); ++si; continue; } rval = get_set_info( *iter, contents_length_set, children_length_set, parents_length_set, flags ); CHK_MB_ERR_0(rval); // check if can and should compress as ranges if (!(flags&MESHSET_ORDERED) && contents_length_set) { set_contents.clear(); rval = iFace->get_entities_by_handle( *iter, set_contents, false ); CHK_MB_ERR_0(rval); bool blocked_list; rval = range_to_blocked_list( set_contents, set_contents_ids, blocked_list ); CHK_MB_ERR_0(rval); if (blocked_list) { assert (set_contents_ids.size() % 2 == 0); contents_length_set = set_contents_ids.size(); } } contents_length_out += contents_length_set; children_length_out += children_length_set; parents_length_out += parents_length_set; } return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::create_elem_table | ( | const ExportSet & | block, |
long | num_ents, | ||
long & | first_id_out | ||
) |
Helper function for create-file
Create zero-ed tables where element connectivity and adjacency data will be stored.
Definition at line 2762 of file WriteHDF5.cpp.
{ mhdf_Status status; hid_t handle; CHECK_OPEN_HANDLES; mhdf_addElement( filePtr, block.name(), block.type, &status ); CHK_MHDF_ERR_0(status); handle = mhdf_createConnectivity( filePtr, block.name(), block.num_nodes, num_entities, &first_id_out, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handle, &status ); CHK_MHDF_ERR_0(status); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::create_set_meta | ( | long | num_sets, |
long & | first_id_out | ||
) |
Helper function for create-file
Create zero-ed table where set descriptions will be written
Definition at line 2847 of file WriteHDF5.cpp.
{ hid_t handle; mhdf_Status status; CHECK_OPEN_HANDLES; handle = mhdf_createSetMeta( filePtr, num_sets, &first_id_out, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handle, &status ); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::create_set_tables | ( | long | contents_length, |
long | children_length, | ||
long | parents_length | ||
) | [protected] |
Helper function for create-file
Create zero-ed tables where set data will be written.
Definition at line 2868 of file WriteHDF5.cpp.
{ hid_t handle; mhdf_Status status; CHECK_OPEN_HANDLES; if (num_set_contents > 0) { handle = mhdf_createSetData( filePtr, num_set_contents, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handle, &status ); } if (num_set_children > 0) { handle = mhdf_createSetChildren( filePtr, num_set_children, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handle, &status ); } if (num_set_parents > 0) { handle = mhdf_createSetParents( filePtr, num_set_parents, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handle, &status ); } return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::create_tag | ( | const TagDesc & | tag_data, |
unsigned long | num_entities, | ||
unsigned long | var_len_total | ||
) | [protected] |
Helper function for create-file
Write tag meta-info and create zero-ed table where tag values will be written.
num_entities | Number of entities for which to write tag data. |
var_len_total | For variable-length tags, the total number of values in the data table. |
Definition at line 3075 of file WriteHDF5.cpp.
{ TagType mb_storage; DataType mb_type; mhdf_TagDataType mhdf_type; int tag_bytes, type_size, num_vals, storage; hid_t hdf_type = (hid_t)0; hid_t handles[3]; std::string tag_name; ErrorCode rval; mhdf_Status status; CHECK_OPEN_HANDLES; // get tag properties rval = iFace->tag_get_type( tag_data.tag_id, mb_storage ); CHK_MB_ERR_0(rval); switch (mb_storage) { case MB_TAG_DENSE : storage = mhdf_DENSE_TYPE ; break; case MB_TAG_SPARSE: storage = mhdf_SPARSE_TYPE; break; case MB_TAG_BIT: storage = mhdf_BIT_TYPE; break; case MB_TAG_MESH: storage = mhdf_MESH_TYPE; break; default: return error(MB_FAILURE); } rval = iFace->tag_get_name( tag_data.tag_id, tag_name ); CHK_MB_ERR_0(rval); rval = get_tag_size( tag_data.tag_id, mb_type, tag_bytes, type_size, num_vals, mhdf_type, hdf_type ); CHK_MB_ERR_0(rval); // get default value const void *def_value, *mesh_value; int def_val_len, mesh_val_len; rval = iFace->tag_get_default_value( tag_data.tag_id, def_value, def_val_len ); if (MB_ENTITY_NOT_FOUND == rval) { def_value = 0; def_val_len = 0; } else if (MB_SUCCESS != rval) { H5Tclose( hdf_type ); return error(rval); } // get mesh value unsigned char byte; const EntityHandle root = 0; if (mb_storage == MB_TAG_BIT) { rval = iFace->tag_get_data( tag_data.tag_id, &root, 1, &byte ); mesh_value = &byte; mesh_val_len = 1; } else { rval = iFace->tag_get_by_ptr( tag_data.tag_id, &root, 1, &mesh_value, &mesh_val_len ); } if (MB_TAG_NOT_FOUND == rval) { mesh_value = 0; mesh_val_len = 0; } else if (MB_SUCCESS != rval) { H5Tclose( hdf_type ); return error(rval); } // for handle-type tags, need to convert from handles to file ids if (MB_TYPE_HANDLE == mb_type) { // make sure there's room in the buffer for both assert( (def_val_len + mesh_val_len) * sizeof(long) < (size_t)bufferSize ); // convert default value if (def_value) { memcpy( dataBuffer, def_value, def_val_len*sizeof(EntityHandle) ); convert_handle_tag( reinterpret_cast<EntityHandle*>(dataBuffer), def_val_len ); def_value = dataBuffer; } // convert mesh value if (mesh_value) { EntityHandle* ptr = reinterpret_cast<EntityHandle*>(dataBuffer) + def_val_len; memcpy( ptr, mesh_value, mesh_val_len*sizeof(EntityHandle) ); if (convert_handle_tag( ptr, mesh_val_len )) mesh_value = ptr; else mesh_value = 0; } } if (MB_VARIABLE_LENGTH != tag_bytes) { // write the tag description to the file mhdf_createTag( filePtr, tag_name.c_str(), mhdf_type, num_vals, storage, def_value, mesh_value, hdf_type, mb_type == MB_TYPE_HANDLE ? id_type : 0, &status ); H5Tclose(hdf_type); CHK_MHDF_ERR_0(status); // create empty table for tag data if (num_sparse_entities) { mhdf_createSparseTagData( filePtr, tag_name.c_str(), num_sparse_entities, handles, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handles[0], &status ); mhdf_closeData( filePtr, handles[1], &status ); } for (size_t i = 0; i < tag_data.dense_list.size(); ++i) { const ExportSet* ex = find( tag_data.dense_list[i] ); assert(0 != ex); handles[0] = mhdf_createDenseTagData( filePtr, tag_name.c_str(), ex->name(), ex->total_num_ents, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handles[0], &status ); } } else { mhdf_createVarLenTag( filePtr, tag_name.c_str(), mhdf_type, storage, def_value, def_val_len, mesh_value, mesh_val_len, hdf_type, mb_type == MB_TYPE_HANDLE ? id_type : 0, &status ); H5Tclose(hdf_type); CHK_MHDF_ERR_0(status); // create empty table for tag data if (num_sparse_entities) { mhdf_createVarLenTagData( filePtr, tag_name.c_str(), num_sparse_entities, data_table_size, handles, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handles[0], &status ); mhdf_closeData( filePtr, handles[1], &status ); mhdf_closeData( filePtr, handles[2], &status ); } } return MB_SUCCESS; }
void moab::WriteHDF5::debug_barrier_line | ( | int | lineno | ) | [protected, virtual] |
WriterIface * moab::WriteHDF5::factory | ( | Interface * | iface | ) | [static] |
Reimplemented in moab::WriteHDF5Parallel.
Definition at line 348 of file WriteHDF5.cpp.
mhdf_FileHandle moab::WriteHDF5::file_ptr | ( | ) | [inline] |
Definition at line 183 of file WriteHDF5.hpp.
{ return filePtr; }
const ExportSet* moab::WriteHDF5::find | ( | ExportType | type | ) | const [inline, protected] |
Definition at line 308 of file WriteHDF5.hpp.
{ if (type.type == MBVERTEX) return &nodeSet; else if (type.type == MBENTITYSET) return &setSet; else { std::list<ExportSet>::const_iterator it; it = std::find( exportList.begin(), exportList.end(), type ); return it == exportList.end() ? 0 : &*it; } }
const SpecialSetData* moab::WriteHDF5::find_set_data | ( | EntityHandle | h | ) | const [inline, protected] |
Definition at line 354 of file WriteHDF5.hpp.
{ return const_cast<WriteHDF5*>(this)->find_set_data(h); }
WriteHDF5::SpecialSetData * moab::WriteHDF5::find_set_data | ( | EntityHandle | h | ) | [protected] |
Definition at line 2861 of file WriteHDF5.cpp.
{ std::vector<SpecialSetData>::iterator i; i = std::lower_bound( specialSets.begin(), specialSets.end(), h, SpecSetLess() ); return (i == specialSets.end() || i->setHandle != h) ? 0 : &*i; }
ErrorCode moab::WriteHDF5::gather_all_mesh | ( | ) | [private] |
Same as gather_mesh_info, except for entire mesh.
Definition at line 863 of file WriteHDF5.cpp.
{ ErrorCode rval; Range ranges[5]; rval = iFace->get_entities_by_type( 0, MBVERTEX, ranges[0] ); if (MB_SUCCESS != rval) return error(rval); rval = iFace->get_entities_by_dimension( 0, 1, ranges[1] ); if (MB_SUCCESS != rval) return error(rval); rval = iFace->get_entities_by_dimension( 0, 2, ranges[2] ); if (MB_SUCCESS != rval) return error(rval); rval = iFace->get_entities_by_dimension( 0, 3, ranges[3] ); if (MB_SUCCESS != rval) return error(rval); rval = iFace->get_entities_by_type( 0, MBENTITYSET, ranges[4] ); if (MB_SUCCESS != rval) return error(rval); return initialize_mesh( ranges ); }
ErrorCode moab::WriteHDF5::gather_mesh_info | ( | const std::vector< EntityHandle > & | export_sets | ) | [private] |
Get all mesh to export from given list of sets.
Populate exportSets, nodeSet and setSet with lists of entities to write.
export_sets | The list of meshsets to export |
Definition at line 791 of file WriteHDF5.cpp.
{ ErrorCode rval; int dim; Range range; // temporary storage Range ranges[5]; // lists of entities to export, grouped by dimension // Gather list of all related sets std::vector<EntityHandle> stack(export_sets); std::copy( export_sets.begin(), export_sets.end(), stack.begin() ); std::vector<EntityHandle> set_children; while( !stack.empty() ) { EntityHandle meshset = stack.back(); stack.pop_back(); ranges[4].insert( meshset ); // Get contained sets range.clear(); rval = iFace->get_entities_by_type( meshset, MBENTITYSET, range ); CHK_MB_ERR_0(rval); for (Range::iterator ritor = range.begin(); ritor != range.end(); ++ritor) if (ranges[4].find( *ritor ) == ranges[4].end()) stack.push_back( *ritor ); // Get child sets set_children.clear(); rval = iFace->get_child_meshsets( meshset, set_children, 1 ); CHK_MB_ERR_0(rval); for (std::vector<EntityHandle>::iterator vitor = set_children.begin(); vitor != set_children.end(); ++vitor ) if (ranges[4].find( *vitor ) == ranges[4].end()) stack.push_back( *vitor ); } // Gather list of all mesh entities from list of sets, // grouped by dimension. for (Range::iterator setitor = ranges[4].begin(); setitor != ranges[4].end(); ++setitor) { for (dim = 0; dim < 4; ++dim) { range.clear(); rval = iFace->get_entities_by_dimension( *setitor, dim, range, false ); CHK_MB_ERR_0(rval); ranges[dim].merge(range); } } // For each list of elements, append adjacent children and // nodes to lists. for (dim = 3; dim > 0; --dim) { for (int cdim = 1; cdim < dim; ++cdim) { range.clear(); rval = iFace->get_adjacencies( ranges[dim], cdim, false, range ); CHK_MB_ERR_0(rval); ranges[cdim].merge( range ); } range.clear(); rval = writeUtil->gather_nodes_from_elements( ranges[dim], 0, range ); CHK_MB_ERR_0(rval); ranges[0].merge( range ); } return initialize_mesh( ranges ); }
ErrorCode moab::WriteHDF5::gather_tags | ( | const Tag * | user_tag_list, |
int | user_tag_list_length | ||
) | [protected] |
Gather tags.
Definition at line 2468 of file WriteHDF5.cpp.
{ ErrorCode result; std::string tagname; std::vector<Tag> tag_list; std::vector<Tag>::iterator t_itor; Range range; // Get list of Tags to write result = writeUtil->get_tag_list( tag_list, user_tag_list, num_tags ); CHK_MB_ERR_0(result); // Get list of tags for (t_itor = tag_list.begin(); t_itor != tag_list.end(); ++t_itor) { // Add tag to export list TagDesc tag_data; tag_data.write_sparse = false; tag_data.tag_id = *t_itor; tag_data.sparse_offset = 0; tag_data.var_data_offset = 0; tag_data.max_num_ents = 0; tag_data.max_num_vals = 0; tagList.push_back( tag_data ); } return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::get_adjacencies | ( | EntityHandle | entity, |
std::vector< id_t > & | adj | ||
) | [inline, protected] |
Get IDs of adjacent entities.
For all entities adjacent to the passed entity, if the adjacent entity is to be exported (ID is not zero), append the ID to the passed list.
Definition at line 1726 of file WriteHDF5.cpp.
{ const EntityHandle* adj_array; int num_adj; ErrorCode rval = writeUtil->get_adjacencies( entity, adj_array, num_adj ); if (MB_SUCCESS != rval) return error(rval); size_t j = 0; adj.resize( num_adj ); for (int i = 0; i < num_adj; ++i) if (id_t id = idMap.find( adj_array[i] )) adj[j++] = id; adj.resize( j ); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::get_connectivity | ( | Range::const_iterator | begin, |
Range::const_iterator | end, | ||
int | nodes_per_element, | ||
id_t * | id_data_out | ||
) | [private] |
Get element connectivity.
ErrorCode moab::WriteHDF5::get_num_sparse_tagged_entities | ( | const TagDesc & | tag, |
size_t & | count | ||
) | [protected] |
Get tagged entities for which to write tag values.
Definition at line 3233 of file WriteHDF5.cpp.
{ Range tmp; ErrorCode rval = get_sparse_tagged_entities( tag, tmp ); count = tmp.size(); return rval; }
ErrorCode moab::WriteHDF5::get_set_info | ( | EntityHandle | set, |
long & | num_entities, | ||
long & | num_children, | ||
long & | num_parents, | ||
unsigned long & | flags | ||
) | [protected] |
Get information about a meshset.
Definition at line 1091 of file WriteHDF5.cpp.
{ ErrorCode rval; int i; unsigned int u; rval = iFace->get_number_entities_by_handle( set, i, false ); CHK_MB_ERR_0(rval); num_entities = i; rval = iFace->num_child_meshsets( set, &i ); CHK_MB_ERR_0(rval); num_children = i; rval = iFace->num_parent_meshsets( set, &i ); CHK_MB_ERR_0(rval); num_parents = i; rval = iFace->get_meshset_options( set, u ); CHK_MB_ERR_0(rval); flags = u; return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::get_sparse_tagged_entities | ( | const TagDesc & | tag, |
Range & | range | ||
) | [protected] |
Get tagged entities for which to write tag values.
Definition at line 3242 of file WriteHDF5.cpp.
{ results.clear(); if (!tag.have_dense(setSet)) results.merge( setSet.range ); std::list<ExportSet>::reverse_iterator e; for (e = exportList.rbegin(); e != exportList.rend(); ++e) if (!tag.have_dense(*e)) results.merge( e->range ); if (!tag.have_dense(nodeSet)) results.merge( nodeSet.range ); if (results.empty()) return MB_SUCCESS; return iFace->get_entities_by_type_and_tag( 0, MBMAXTYPE, &tag.tag_id, 0, 1, results, Interface::INTERSECT ); }
ErrorCode moab::WriteHDF5::get_tag_data_length | ( | const TagDesc & | tag_info, |
const Range & | range, | ||
unsigned long & | result | ||
) | [protected] |
get sum of lengths of tag values (as number of type) for variable length tag data.
Definition at line 3032 of file WriteHDF5.cpp.
{ ErrorCode rval; result = 0; // split buffer into two pieces, one for pointers and one for sizes size_t step, remaining; step = bufferSize / (sizeof(int) + sizeof(void*)); const void** ptr_buffer = reinterpret_cast<const void**>(dataBuffer); int* size_buffer = reinterpret_cast<int*>(ptr_buffer + step); Range subrange; Range::const_iterator iter = range.begin(); for (remaining = range.size(); remaining >= step; remaining -= step) { // get subset of range containing 'count' entities Range::const_iterator end = iter; end += step; subrange.clear(); subrange.merge( iter, end ); iter = end; // get tag sizes for entities rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer ); if (MB_SUCCESS != rval) return error(rval); // sum lengths for (size_t i = 0; i < step; ++i) result += size_buffer[i]; } // process remaining subrange.clear(); subrange.merge( iter, range.end() ); assert( subrange.size() == remaining ); rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer ); if (MB_SUCCESS != rval) return error(rval); for (size_t i= 0; i < remaining; ++i) result += size_buffer[i]; return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::get_tag_size | ( | Tag | tag, |
DataType & | moab_type, | ||
int & | num_bytes, | ||
int & | elem_size, | ||
int & | file_size, | ||
mhdf_TagDataType & | file_type, | ||
hid_t & | hdf_type | ||
) | [private] |
Get size data for tag
tag | MOAB tag ID |
moab_type | Output: DataType for tag |
num_bytes | Output: MOAB tag size (bits for bit tags). MB_VARIABLE_LENGTH for variable-length tags. |
elem_size | Output: Size of of the base data type of the tag data (e.g. sizeof(double) if moab_type == MB_TYPE_DOUBLE). One for bit and opaque tags. |
array_size | Output: The number of valeus of size elem_size for each tag. Always 1 for opaque data. Nubmer of bits for bit tags. |
file_type | Output: mhdf type enumeration |
hdf_type | Output: Handle to HDF5 type object. Caller is responsible for releasing this object (calling H5Tclose). |
Definition at line 2901 of file WriteHDF5.cpp.
{ ErrorCode rval; Tag type_handle; std::string tag_name, tag_type_name; CHECK_OPEN_HANDLES; // We return NULL for hdf_type if it can be determined from // the file_type. The only case where it is non-zero is // if the user specified a specific type via a mesh tag. hdf_type = (hid_t)0; bool close_hdf_type = false; rval = iFace->tag_get_data_type( tag, moab_type ); CHK_MB_ERR_0(rval); rval = iFace->tag_get_length( tag, array_length ); if (MB_VARIABLE_DATA_LENGTH == rval) { array_length = MB_VARIABLE_LENGTH; } else if (MB_SUCCESS != rval) return error(rval); rval = iFace->tag_get_bytes( tag, num_bytes ); if (MB_VARIABLE_DATA_LENGTH == rval) { num_bytes = MB_VARIABLE_LENGTH; } else if (MB_SUCCESS != rval) return error(rval); switch (moab_type) { case MB_TYPE_INTEGER: type_size = sizeof(int); file_type = mhdf_INTEGER; hdf_type = H5T_NATIVE_INT; close_hdf_type = false; break; case MB_TYPE_DOUBLE: type_size = sizeof(double); file_type = mhdf_FLOAT; hdf_type = H5T_NATIVE_DOUBLE; close_hdf_type = false; break; case MB_TYPE_BIT: type_size = sizeof(bool); file_type = mhdf_BITFIELD; assert(array_length <= 8); hdf_type = H5Tcopy( H5T_NATIVE_B8 ); H5Tset_precision( hdf_type, array_length ); close_hdf_type = true; break; case MB_TYPE_HANDLE: type_size = sizeof(EntityHandle); file_type = mhdf_ENTITY_ID; hdf_type = id_type; close_hdf_type = false; break; case MB_TYPE_OPAQUE: file_type = mhdf_OPAQUE; rval = iFace->tag_get_name( tag, tag_name ); CHK_MB_ERR_0(rval); tag_type_name = "__hdf5_tag_type_"; tag_type_name += tag_name; rval = iFace->tag_get_handle( tag_type_name.c_str(), 0, MB_TYPE_OPAQUE, type_handle, MB_TAG_ANY ); if (MB_TAG_NOT_FOUND == rval) { if (num_bytes == MB_VARIABLE_LENGTH) type_size = 1; else type_size = num_bytes; hdf_type = H5Tcreate( H5T_OPAQUE, type_size ); close_hdf_type = true; } else if (MB_SUCCESS == rval) { int hsize; rval = iFace->tag_get_bytes( type_handle, hsize ); if (hsize != sizeof(hid_t)) return error(MB_FAILURE); const EntityHandle root = 0; rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type ); if (rval != MB_SUCCESS) return error(rval); type_size = H5Tget_size(hdf_type); if (type_size != num_bytes) return error(MB_FAILURE); close_hdf_type = false; } else { return error(rval); } num_bytes = array_length; array_length = (num_bytes == MB_VARIABLE_LENGTH) ? MB_VARIABLE_LENGTH : 1; } assert(num_bytes == MB_VARIABLE_LENGTH || (moab_type == MB_TYPE_BIT && num_bytes == 1) || array_length * type_size == num_bytes ); if (num_bytes == MB_VARIABLE_LENGTH) { array_length = MB_VARIABLE_LENGTH; if (!close_hdf_type) { hdf_type = H5Tcopy( hdf_type ); close_hdf_type = true; } } else if (array_length > 1 && moab_type != MB_TYPE_BIT) { hsize_t len = array_length; #if defined(H5Tarray_create_vers) && (H5Tarray_create_vers > 1) hid_t temp_id = H5Tarray_create2( hdf_type, 1, &len); #else hid_t temp_id = H5Tarray_create( hdf_type, 1, &len, NULL ); #endif if (close_hdf_type) H5Tclose( hdf_type ); hdf_type = temp_id; } else if (!close_hdf_type) { hdf_type = H5Tcopy( hdf_type ); close_hdf_type = true; } return MB_SUCCESS; }
void moab::WriteHDF5::get_write_entities | ( | Range & | range | ) | [protected] |
Get entities that will be written to file.
Definition at line 3262 of file WriteHDF5.cpp.
{ range.clear(); range.merge( setSet.range ); std::list<ExportSet>::reverse_iterator e; for (e = exportList.rbegin(); e != exportList.rend(); ++e) range.merge( e->range ); range.merge( nodeSet.range ); }
ErrorCode moab::WriteHDF5::init | ( | ) | [private] |
Definition at line 375 of file WriteHDF5.cpp.
{ ErrorCode rval; if (writeUtil) // init has already been called return MB_SUCCESS; /* #ifdef DEBUG H5Eset_auto( &hdf_error_handler, writeUtil ); // HDF5 callback for errors #endif */ // For known tag types, store the corresponding HDF5 in which // the tag data is to be written in the file. //register_known_tag_types( iFace ); // Get the util interface rval = iFace->query_interface( writeUtil ); CHK_MB_ERR_0(rval); idMap.clear(); #if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1 herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data ); #else herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data ); #endif if (err < 0) { errorHandler.func = 0; errorHandler.data = 0; } else { #if defined(H5Eset_auto_vers) && H5Eset_auto_vers > 1 err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler ); #else err = H5Eset_auto( &handle_hdf5_error, &errorHandler ); #endif if (err < 0) { errorHandler.func = 0; errorHandler.data = 0; } } if (!topState.valid()) topState = MPEState( "WriteHDF5", "yellow" ); if (!subState.valid()) subState = MPEState( "WriteHDF5 subevent", "cyan" ); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::initialize_mesh | ( | const Range | entities_by_dim[5] | ) | [private] |
Initialize internal data structures from gathered mesh.
Definition at line 716 of file WriteHDF5.cpp.
{ ErrorCode rval; if (!ranges[0].all_of_type(MBVERTEX)) return error(MB_FAILURE); nodeSet.range = ranges[0]; nodeSet.type = MBVERTEX; nodeSet.num_nodes = 1; nodeSet.max_num_ents = nodeSet.max_num_adjs = 0; if (!ranges[4].all_of_type(MBENTITYSET)) return error(MB_FAILURE); setSet.range = ranges[4]; setSet.type = MBENTITYSET; setSet.num_nodes = 0; setSet.max_num_ents = setSet.max_num_adjs = 0; maxNumSetContents = maxNumSetChildren = maxNumSetParents = 0; exportList.clear(); std::vector<Range> bins(1024); // sort entities by connectivity length // resize is expensive due to Range copy, so start big for (EntityType type = MBEDGE; type < MBENTITYSET; ++type) { ExportSet set; set.max_num_ents = set.max_num_adjs = 0; const int dim = CN::Dimension(type); // Group entities by connectivity length bins.clear(); assert(dim >= 0 && dim <= 4); std::pair<Range::const_iterator,Range::const_iterator> p = ranges[dim].equal_range(type); Range::const_iterator i = p.first; while (i != p.second) { Range::const_iterator first = i; EntityHandle const* conn; int len, firstlen; // dummy storage vector for structured mesh "get_connectivity" function std::vector<EntityHandle> storage; rval = iFace->get_connectivity( *i, conn, firstlen, false, &storage ); if (MB_SUCCESS != rval) return error(rval); for (++i; i != p.second; ++i) { rval = iFace->get_connectivity( *i, conn, len, false, &storage ); if (MB_SUCCESS != rval) return error(rval); if (len != firstlen) break; } if (firstlen >= (int)bins.size()) bins.resize(firstlen+1); bins[firstlen].merge( first, i ); } // Create ExportSet for each group for (std::vector<Range>::iterator j = bins.begin(); j != bins.end(); ++j) { if (j->empty()) continue; set.range.clear(); set.type = type; set.num_nodes = j - bins.begin(); exportList.push_back( set ); exportList.back().range.swap( *j ); } } return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::parallel_create_file | ( | const char * | filename, |
bool | overwrite, | ||
const std::vector< std::string > & | qa_records, | ||
const FileOptions & | opts, | ||
const Tag * | tag_list, | ||
int | num_tags, | ||
int | dimension = 3 , |
||
double * | times = 0 |
||
) | [protected, virtual] |
Function to create the file. Virtual to allow override for parallel version.
Reimplemented in moab::WriteHDF5Parallel.
Definition at line 2500 of file WriteHDF5.cpp.
{ writeUtil->report_error("WriteHDF5 does not support parallel writing.\n"); return error(MB_NOT_IMPLEMENTED); }
void moab::WriteHDF5::print_id_map | ( | ) | const [protected] |
Definition at line 3272 of file WriteHDF5.cpp.
{ print_id_map( std::cout, "" ) ; }
void moab::WriteHDF5::print_id_map | ( | std::ostream & | str, |
const char * | prefix = "" |
||
) | const [protected] |
Definition at line 3277 of file WriteHDF5.cpp.
{ RangeMap<EntityHandle,id_t>::const_iterator i; for (i = idMap.begin(); i != idMap.end(); ++i) { const char* n1 = CN::EntityTypeName(TYPE_FROM_HANDLE(i->begin)); EntityID id = ID_FROM_HANDLE(i->begin); if (i->count == 1) { s << pfx << n1 << " " << id << " -> " << i->value << std::endl; } else { const char* n2 = CN::EntityTypeName(TYPE_FROM_HANDLE(i->begin + i->count - 1)); if (n1 == n2) { s << pfx << n1 << " " << id << "-" << id + i->count-1 << " -> " << i->value << "-" << i->value + i->count-1 << std::endl; } else { s << pfx << n1 << " " << id << "-" << n1 << " " << ID_FROM_HANDLE(i->begin + i->count-1) << " -> " << i->value << "-" << i->value + i->count-1 << std::endl; } } } }
void moab::WriteHDF5::print_times | ( | const double | times[NUM_TIMES] | ) | const [protected, virtual] |
Definition at line 3301 of file WriteHDF5.cpp.
{ std::cout << "WriteHDF5: " << t[TOTAL_TIME] << std::endl << " gather mesh: " << t[GATHER_TIME] << std::endl << " create file: " << t[CREATE_TIME] << std::endl << " create nodes: " << t[CREATE_NODE_TIME] << std::endl << " negotiate types: " << t[NEGOTIATE_TYPES_TIME] << std::endl << " craete elem: " << t[CREATE_ELEM_TIME] << std::endl << " file id exch: " << t[FILEID_EXCHANGE_TIME] << std::endl << " create adj: " << t[CREATE_ADJ_TIME] << std::endl << " create set: " << t[CREATE_SET_TIME] << std::endl << " shared ids: " << t[SHARED_SET_IDS] << std::endl << " shared data: " << t[SHARED_SET_CONTENTS] << std::endl << " set offsets: " << t[SET_OFFSET_TIME] << std::endl << " create tags: " << t[CREATE_TAG_TIME] << std::endl << " coordinates: " << t[COORD_TIME] << std::endl << " connectivity: " << t[CONN_TIME] << std::endl << " sets: " << t[SET_TIME] << std::endl << " set descrip: " << t[SET_META] << std::endl << " set content: " << t[SET_CONTENT] << std::endl << " set parent: " << t[SET_PARENT] << std::endl << " set child: " << t[SET_CHILD] << std::endl << " adjacencies: " << t[ADJ_TIME] << std::endl << " tags: " << t[TAG_TIME] << std::endl << " dense data: " << t[DENSE_TAG_TIME] << std::endl << " sparse data: " << t[SPARSE_TAG_TIME] << std::endl << " var-len data: " << t[VARLEN_TAG_TIME] << std::endl; }
ErrorCode moab::WriteHDF5::range_to_blocked_list | ( | const Range & | input_range, |
std::vector< id_t > & | output_id_list, | ||
bool & | ranged_list | ||
) | [protected] |
Get possibly compacted list of IDs for passed entities
For the passed range of entities, determine if IDs can be compacted and write IDs to passed list.
If the IDs are not compacted, the output list will contain a simple ordered list of IDs.
If IDs are compacted, the output list will contain {start,count} pairs.
If the ID list is compacted, ranged_list will be 'true'. Otherwise it will be 'false'.
Definition at line 1652 of file WriteHDF5.cpp.
{ return range_to_blocked_list_templ( input_range.const_pair_begin(), input_range.const_pair_end(), idMap, output_id_list, ranged_list ); }
ErrorCode moab::WriteHDF5::range_to_blocked_list | ( | const EntityHandle * | input_ranges, |
size_t | num_input_ranges, | ||
std::vector< id_t > & | output_id_list, | ||
bool & | ranged_list | ||
) | [protected] |
Get possibly compacted list of IDs for passed entities
For the passed range of entities, determine if IDs can be compacted and write IDs to passed list.
If the IDs are not compacted, the output list will contain a simple ordered list of IDs.
If IDs are compacted, the output list will contain {start,count} pairs.
If the ID list is compacted, ranged_list will be 'true'. Otherwise it will be 'false'.
Definition at line 1661 of file WriteHDF5.cpp.
{ // we assume this in the cast on the following line typedef std::pair<EntityHandle,EntityHandle> mtype; assert(sizeof(mtype) == 2*sizeof(EntityHandle)); const mtype* arr = reinterpret_cast<const mtype*>(array); return range_to_blocked_list_templ( arr, arr+num_input_ranges, idMap, output_id_list, ranged_list ); }
ErrorCode moab::WriteHDF5::range_to_id_list | ( | const Range & | input_range, |
id_t * | array | ||
) | [protected] |
Definition at line 1675 of file WriteHDF5.cpp.
{ return range_to_id_list_templ( range.const_pair_begin(), range.const_pair_end(), idMap, array ); }
ErrorCode moab::WriteHDF5::serial_create_file | ( | const char * | filename, |
bool | overwrite, | ||
const std::vector< std::string > & | qa_records, | ||
const Tag * | tag_list, | ||
int | num_tags, | ||
int | dimension = 3 |
||
) | [private] |
Definition at line 2513 of file WriteHDF5.cpp.
{ long first_id; mhdf_Status status; hid_t handle; std::list<ExportSet>::iterator ex_itor; ErrorCode rval; topState.start( "creating file" ); const char* type_names[MBMAXTYPE]; memset( type_names, 0, MBMAXTYPE * sizeof(char*) ); for (EntityType i = MBEDGE; i < MBENTITYSET; ++i) type_names[i] = CN::EntityTypeName( i ); // Create the file filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status ); CHK_MHDF_ERR_0(status); assert(!!filePtr); rval = write_qa( qa_records ); CHK_MB_ERR_0(rval); // Create node table if (nodeSet.range.size()) { nodeSet.total_num_ents = nodeSet.range.size(); handle = mhdf_createNodeCoords( filePtr, dimension, nodeSet.total_num_ents, &first_id, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handle, &status ); CHK_MHDF_ERR_0(status); nodeSet.first_id = (id_t)first_id; rval = assign_ids( nodeSet.range, nodeSet.first_id ); CHK_MB_ERR_0(rval); } else { nodeSet.first_id = std::numeric_limits<id_t>::max(); } nodeSet.offset = 0; // Create element tables for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) { ex_itor->total_num_ents = ex_itor->range.size(); rval = create_elem_table( *ex_itor, ex_itor->total_num_ents, first_id ); CHK_MB_ERR_0(rval); ex_itor->first_id = (id_t)first_id; ex_itor->offset = 0; rval = assign_ids( ex_itor->range, ex_itor->first_id ); CHK_MB_ERR_0(rval); } // create node adjacency table id_t num_adjacencies; #ifdef MB_H5M_WRITE_NODE_ADJACENCIES rval = count_adjacencies( nodeSet.range, num_adjacencies ); CHK_MB_ERR_0(rval); nodeSet.adj_offset = 0; nodeSet.max_num_adjs = num_adjacencies; if (num_adjacencies > 0) { handle = mhdf_createAdjacency( filePtr, mhdf_node_type_handle(), num_adjacencies, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handle, &status ); } #endif // create element adjacency tables for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) { rval = count_adjacencies( ex_itor->range, num_adjacencies ); CHK_MB_ERR_0(rval); ex_itor->adj_offset = 0; ex_itor->max_num_adjs = num_adjacencies; if (num_adjacencies > 0) { handle = mhdf_createAdjacency( filePtr, ex_itor->name(), num_adjacencies, &status ); CHK_MHDF_ERR_0(status); mhdf_closeData( filePtr, handle, &status ); } } // create set tables writeSets = !setSet.range.empty(); if (writeSets) { long contents_len, children_len, parents_len; setSet.total_num_ents = setSet.range.size(); setSet.max_num_ents = setSet.total_num_ents; rval = create_set_meta( setSet.total_num_ents, first_id ); CHK_MB_ERR_0(rval); setSet.first_id = (id_t)first_id; rval = assign_ids( setSet.range, setSet.first_id ); CHK_MB_ERR_0(rval); rval = count_set_size( setSet.range, contents_len, children_len, parents_len ); CHK_MB_ERR_0(rval); rval = create_set_tables( contents_len, children_len, parents_len ); CHK_MB_ERR_0(rval); setSet.offset = 0; setContentsOffset = 0; setChildrenOffset = 0; setParentsOffset = 0; writeSetContents = !!contents_len; writeSetChildren = !!children_len; writeSetParents = !!parents_len; maxNumSetContents = contents_len; maxNumSetChildren = children_len; maxNumSetParents = parents_len; } // if(!setSet.range.empty()) dbgOut.tprint( 1, "Gathering Tags\n" ); rval = gather_tags( user_tag_list, num_user_tags ); CHK_MB_ERR_0(rval); // Create the tags and tag data tables std::list<TagDesc>::iterator tag_iter = tagList.begin(); for ( ; tag_iter != tagList.end(); ++tag_iter) { // As we haven't yet added any ExportSets for which to write // dense tag data to the TagDesc struct pointed to by // tag_iter, this call will initially return all tagged entities // in the set of entities to be written. Range range; rval = get_sparse_tagged_entities( *tag_iter, range ); CHK_MB_ERR_0(rval); int s; bool var_len = (MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s )); // Determine which ExportSets we want to write dense // data for. We never write dense data for variable-length // tag data. if (!var_len && writeTagDense) { // Check if we want to write this tag in dense format even if not // all of the entities have a tag value. The criterion of this // is that the tag be dense, have a default value, and have at // least 2/3 of the entities tagged. bool prefer_dense = false; TagType type; rval = iFace->tag_get_type( tag_iter->tag_id, type ); CHK_MB_ERR_0(rval); if (MB_TAG_DENSE == type) { const void* defval = 0; rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s ); if (MB_SUCCESS == rval) prefer_dense = true; } if (check_dense_format_tag( nodeSet, range, prefer_dense )) { range -= nodeSet.range; tag_iter->dense_list.push_back( nodeSet ); } std::list<ExportSet>::const_iterator ex = exportList.begin(); for ( ; ex != exportList.end(); ++ex) { if (check_dense_format_tag( *ex, range, prefer_dense )) { range -= ex->range; tag_iter->dense_list.push_back( *ex ); } } if (check_dense_format_tag( setSet, range, prefer_dense )) { range -= setSet.range; tag_iter->dense_list.push_back( setSet ); } } tag_iter->write_sparse = !range.empty(); unsigned long var_len_total = 0; if (var_len) { rval = get_tag_data_length( *tag_iter, range, var_len_total ); CHK_MB_ERR_0(rval); } rval = create_tag( *tag_iter, range.size(), var_len_total ); CHK_MB_ERR_0(rval); } // for(tags) topState.end(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::vector_to_id_list | ( | const std::vector< EntityHandle > & | input, |
std::vector< id_t > & | output, | ||
bool | remove_non_written = false |
||
) | [protected] |
Get IDs for entities.
Definition at line 1703 of file WriteHDF5.cpp.
{ output.resize( input.size() ); size_t output_size = 0; ErrorCode rval = vector_to_id_list( &input[0], input.size(), &output[0], output_size, remove_zeros ); output.resize( output_size ); return rval; }
ErrorCode moab::WriteHDF5::vector_to_id_list | ( | const EntityHandle * | input, |
id_t * | output, | ||
size_t | num_entities | ||
) | [protected] |
Get IDs for entities.
Definition at line 1717 of file WriteHDF5.cpp.
{ size_t output_len; return vector_to_id_list( input, count, output, output_len, false ); }
ErrorCode moab::WriteHDF5::vector_to_id_list | ( | const EntityHandle * | input, |
size_t | input_len, | ||
id_t * | output, | ||
size_t & | output_len, | ||
bool | remove_non_written | ||
) | [protected] |
Get IDs for entities.
Definition at line 1683 of file WriteHDF5.cpp.
{ const EntityHandle* i_iter = input; const EntityHandle* i_end = input + input_len; id_t* o_iter = output; for (; i_iter != i_end; ++i_iter) { id_t id = idMap.find( *i_iter ); if (!remove_zeros || id != 0) { *o_iter = id; ++o_iter; } } output_len = o_iter - output; return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_adjacencies | ( | const ExportSet & | export_set | ) | [private] |
Write adjacency info for passed set of elements
Note: Must have written element connectivity so elements have IDs assigned.
Definition at line 1745 of file WriteHDF5.cpp.
{ ErrorCode rval; mhdf_Status status; Range::const_iterator iter; const Range::const_iterator end = elements.range.end(); std::vector<id_t> adj_list; CHECK_OPEN_HANDLES; debug_barrier(); /* Count Adjacencies */ long count = 0; //for (iter = elements.range.begin(); iter != end; ++iter) //{ // adj_list.clear(); // rval = get_adjacencies( *iter, adj_list); // CHK_MB_ERR_0(rval); // // if (adj_list.size() > 0) // count += adj_list.size() + 2; //} //if (count == 0) // return MB_SUCCESS; long offset = elements.adj_offset; if (elements.max_num_adjs == 0) return MB_SUCCESS; /* Create data list */ hid_t table = mhdf_openAdjacency( filePtr, elements.name(), &count, &status ); CHK_MHDF_ERR_0(status); IODebugTrack track( debugTrack, "Adjacencies", count ); /* Write data */ id_t* buffer = (id_t*)dataBuffer; long chunk_size = bufferSize / sizeof(id_t); long num_writes = (elements.max_num_adjs + chunk_size - 1)/chunk_size; VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); count = 0; for (iter = elements.range.begin(); iter != end; ++iter) { adj_list.clear(); rval = get_adjacencies( *iter, adj_list ); CHK_MB_ERR_1(rval, table, status); if (adj_list.size() == 0) continue; // If buffer is full, flush it if (count + adj_list.size() + 2 > (unsigned long)chunk_size) { dbgOut.print(3," writing adjacency chunk.\n"); track.record_io( offset, count ); mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status ); CHK_MHDF_ERR_1(status, table); VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); offset += count; count = 0; } buffer[count++] = idMap.find( *iter ); buffer[count++] = adj_list.size(); assert (adj_list.size()+2 < (unsigned long)chunk_size); memcpy( buffer + count, &adj_list[0], adj_list.size() * sizeof(id_t) ); count += adj_list.size(); } if (count) { dbgOut.print(2," writing final adjacency chunk.\n"); mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status ); CHK_MHDF_ERR_1(status, table); offset += count; count = 0; --num_writes; } // Do empty writes if necessary for parallel collective IO if (collectiveIO) { while (num_writes > 0) { --num_writes; assert(writeProp != H5P_DEFAULT); dbgOut.print(2," writing empty adjacency chunk.\n"); mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status ); CHK_MHDF_ERR_1(status, table ); } } mhdf_closeData( filePtr, table, &status ); CHK_MHDF_ERR_0(status); track.all_reduce(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_dense_tag | ( | const TagDesc & | tag_data, |
const ExportSet & | elem_data, | ||
const std::string & | tag_name, | ||
DataType | tag_data_type, | ||
hid_t | hdf5_data_type, | ||
int | hdf5_type_size | ||
) | [private] |
Write dense-formatted tag data.
Definition at line 2259 of file WriteHDF5.cpp.
{ CHECK_OPEN_HANDLES; //open tables to write info mhdf_Status status; long table_size; hid_t table = mhdf_openDenseTagData( filePtr, name.c_str(), elem_data.name(), &table_size, &status); CHK_MHDF_ERR_0(status); assert( elem_data.range.size() + elem_data.offset <= (unsigned long)table_size ); IODebugTrack track( debugTrack, name + " " + elem_data.name() + " Data", table_size ); ErrorCode rval = write_tag_values( tag_data.tag_id, table, elem_data.offset, elem_data.range, mb_data_type, value_type, value_type_size, elem_data.max_num_ents, track ); mhdf_closeData( filePtr, table, &status ); CHK_MB_ERR_0(rval); CHK_MHDF_ERR_0(status); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_elems | ( | ExportSet & | elemset | ) | [private] |
Write out element connectivity.
Write connectivity for passed set of elements.
Note: Assigns element IDs. Note: Must do write_nodes first so node IDs get assigned.
Definition at line 999 of file WriteHDF5.cpp.
{ mhdf_Status status; ErrorCode rval; long first_id; int nodes_per_elem; long table_size; CHECK_OPEN_HANDLES; debug_barrier(); dbgOut.printf(2,"Writing %lu elements of type %s%d\n", (unsigned long)elems.range.size(), CN::EntityTypeName(elems.type), elems.num_nodes ); dbgOut.print(3,"Writing elements",elems.range); hid_t elem_table = mhdf_openConnectivity( filePtr, elems.name(), &nodes_per_elem, &table_size, &first_id, &status ); IODebugTrack track( debugTrack, elems.name() && strlen(elems.name()) ? elems.name() : "<ANONYMOUS ELEM SET?>", table_size ); CHK_MHDF_ERR_0(status); assert ((unsigned long)first_id <= elems.first_id); assert ((unsigned long)table_size >= elems.offset + elems.range.size()); EntityHandle* buffer = (EntityHandle*)dataBuffer; int chunk_size = bufferSize / (elems.num_nodes * sizeof(id_t)); long offset = elems.offset; long remaining = elems.range.size(); long num_writes = (remaining+chunk_size-1) / chunk_size; if (elems.max_num_ents) { assert( elems.max_num_ents >= remaining ); num_writes = (elems.max_num_ents+chunk_size-1) / chunk_size; } long remaining_writes = num_writes; Range::iterator iter = elems.range.begin(); while (remaining) { VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); long count = chunk_size < remaining ? chunk_size : remaining; remaining -= count; Range::iterator next = iter; next += count; rval = writeUtil->get_element_connect( iter, next, elems.num_nodes, count * elems.num_nodes, buffer ); CHK_MB_ERR_1(rval, elem_table, status); iter = next; for (long i = 0; i < count*nodes_per_elem; ++i) { buffer[i] = idMap.find( buffer[i] ); if (0 == buffer[i]) { writeUtil->report_error("Invalid %s element connectivity. Write Aborted\n", elems.name() ); CHK_MB_ERR_1(MB_FAILURE,elem_table,status); } } dbgOut.printf(3," writing node connectivity %ld of %ld, %ld values at %ld\n", num_writes - remaining_writes + 1, num_writes, count, offset ); track.record_io( offset, count ); mhdf_writeConnectivityWithOpt( elem_table, offset, count, id_type, buffer, writeProp, &status ); CHK_MHDF_ERR_1(status, elem_table); offset += count; --remaining_writes; } // Do empty writes if necessary for parallel collective IO if (collectiveIO) { while (remaining_writes--) { assert(writeProp != H5P_DEFAULT); dbgOut.printf(3," writing (empty) connectivity chunk %ld of %ld.\n", num_writes - remaining_writes + 1, num_writes ); mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status ); CHK_MHDF_ERR_1(status, elem_table); } } mhdf_closeData( filePtr, elem_table, &status ); CHK_MHDF_ERR_0(status); track.all_reduce(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_file | ( | const char * | filename, |
const bool | overwrite, | ||
const FileOptions & | opts, | ||
const EntityHandle * | export_sets, | ||
const int | export_set_count, | ||
const std::vector< std::string > & | qa_records, | ||
const Tag * | tag_list = NULL , |
||
int | num_tags = 0 , |
||
int | user_dimension = 3 |
||
) | [virtual] |
Export specified meshsets to file
filename | The filename to export. |
export_sets | Array of handles to sets to export, or NULL to export all. |
export_set_count | Length of export_sets array. |
Implements moab::WriterIface.
Definition at line 461 of file WriteHDF5.cpp.
{ mhdf_Status status; parallelWrite = false; collectiveIO = false; // Enable debug output int tmpval = 0; if (MB_SUCCESS == opts.get_int_option("DEBUG_IO", 1, tmpval)) dbgOut.set_verbosity(tmpval); //writeTagDense = (MB_SUCCESS == opts.get_null_option("DENSE_TAGS")); writeTagDense = true; // Enable some extra checks for reads. Note: amongst other things this // will print errors if the entire file is not read, so if doing a // partial read that is not a parallel read, this should be disabled. debugTrack = (MB_SUCCESS == opts.get_null_option("DEBUG_BINIO")); bufferSize = WRITE_HDF5_BUFFER_SIZE; int buf_size; ErrorCode rval = opts.get_int_option( "BUFFER_SIZE", buf_size ); if (MB_SUCCESS == rval && buf_size >= 24) bufferSize = buf_size; // Allocate internal buffer to use when gathering data to write. dataBuffer = (char*)malloc( bufferSize ); if (!dataBuffer) return error(MB_MEMORY_ALLOCATION_FAILED); // Clear filePtr so we know if it is open upon failure filePtr = 0; // Do actual write. writeProp = H5P_DEFAULT; ErrorCode result = write_file_impl( filename, overwrite, opts, set_array, num_sets, qa_records, tag_list, num_tags, user_dimension ); // close writeProp if it was opened if (writeProp != H5P_DEFAULT) H5Pclose(writeProp); // Free memory buffer free( dataBuffer ); dataBuffer = 0; // Close file bool created_file = false; if (filePtr) { created_file = true; mhdf_closeFile( filePtr, &status ); filePtr = 0; if (mhdf_isError( &status )) { writeUtil->report_error( "%s\n", mhdf_message( &status ) ); if (MB_SUCCESS == result) result = MB_FAILURE; } } // Release other resources if (MB_SUCCESS == result) result = write_finished(); else write_finished(); // If write failed, remove file unless KEEP option was specified if (MB_SUCCESS != result && created_file && MB_ENTITY_NOT_FOUND == opts.get_null_option( "KEEP" )) remove( filename ); return result; }
ErrorCode moab::WriteHDF5::write_file_impl | ( | const char * | filename, |
const bool | overwrite, | ||
const FileOptions & | opts, | ||
const EntityHandle * | export_sets, | ||
const int | export_set_count, | ||
const std::vector< std::string > & | qa_records, | ||
const Tag * | tag_list, | ||
int | num_tags, | ||
int | user_dimension = 3 |
||
) | [private] |
Do the actual work of write_file. Separated from write_file for easier resource cleanup.
Definition at line 546 of file WriteHDF5.cpp.
{ ErrorCode result; std::list<TagDesc>::const_iterator t_itor; std::list<ExportSet>::iterator ex_itor; EntityHandle elem_count, max_id; double times[NUM_TIMES] = {0}; if (MB_SUCCESS != init()) return error(MB_FAILURE); // see if we need to report times bool cputime = false; result = opts.get_null_option("CPUTIME"); if (MB_SUCCESS == result) cputime = true; CpuTimer timer; dbgOut.tprint(1,"Gathering Mesh\n"); topState.start("gathering mesh"); // Gather mesh to export exportList.clear(); if (0 == num_sets || (1 == num_sets && set_array[0] == 0)) { result = gather_all_mesh( ); } else { std::vector<EntityHandle> passed_export_list(set_array, set_array+num_sets); result = gather_mesh_info( passed_export_list ); } topState.end(result); CHK_MB_ERR_0(result); times[GATHER_TIME] = timer.time_elapsed(); //if (nodeSet.range.size() == 0) // return error(MB_ENTITY_NOT_FOUND); dbgOut.tprint(1,"Checking ID space\n"); // Make sure ID space is sufficient elem_count = nodeSet.range.size() + setSet.range.size(); for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) elem_count += ex_itor->range.size(); max_id = (EntityHandle)1 << (8*sizeof(id_t)-1); if (elem_count > max_id) { writeUtil->report_error("ID space insufficient for mesh size.\n"); return error(result); } dbgOut.tprint(1, "Creating File\n" ); // Figure out the dimension in which to write the mesh. int mesh_dim; result = iFace->get_dimension( mesh_dim ); CHK_MB_ERR_0(result); if (user_dimension < 1) user_dimension = mesh_dim; user_dimension = user_dimension > mesh_dim ? mesh_dim : user_dimension; // Create the file layout, including all tables (zero-ed) and // all structure and meta information. const char* optnames[] = { "WRITE_PART", "FORMAT", 0 }; int junk; parallelWrite = (MB_SUCCESS == opts.match_option( "PARALLEL", optnames, junk )); if (parallelWrite) { // Just store Boolean value based on string option here. // parallel_create_file will set writeProp accordingly. //collectiveIO = (MB_SUCCESS == opts.get_null_option("COLLECTIVE")); //dbgOut.printf(2,"'COLLECTIVE' option = %s\n", collectiveIO ? "YES" : "NO" ); // Do this all the time, as it appears to be much faster than indep in some cases collectiveIO = true; result = parallel_create_file( filename, overwrite, qa_records, opts, tag_list, num_tags, user_dimension, times ); } else { result = serial_create_file( filename, overwrite, qa_records, tag_list, num_tags, user_dimension ); } if (MB_SUCCESS != result) return error(result); times[CREATE_TIME] = timer.time_elapsed(); dbgOut.tprint(1,"Writing Nodes.\n"); // Write node coordinates if (!nodeSet.range.empty() || parallelWrite) { topState.start( "writing coords" ); result = write_nodes(); topState.end(result); if (MB_SUCCESS != result) return error(result); } times[COORD_TIME] = timer.time_elapsed(); dbgOut.tprint(1,"Writing connectivity.\n"); // Write element connectivity for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) { topState.start( "writing connectivity for ", ex_itor->name() ); result = write_elems( *ex_itor ); topState.end(result); if (MB_SUCCESS != result) return error(result); } times[CONN_TIME] = timer.time_elapsed(); dbgOut.tprint(1,"Writing sets.\n"); // Write meshsets result = write_sets(times); if (MB_SUCCESS != result) return error(result); debug_barrier(); times[SET_TIME] = timer.time_elapsed(); dbgOut.tprint(1,"Writing adjacencies.\n"); // Write adjacencies // Tim says don't save node adjacencies! #ifdef MB_H5M_WRITE_NODE_ADJACENCIES result = write_adjacencies( nodeSet ); if (MB_SUCCESS != result) return error(result); #endif for (ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor) { topState.start( "writing adjacencies for ", ex_itor->name() ); result = write_adjacencies( *ex_itor ); topState.end(result); if (MB_SUCCESS != result) return error(result); } times[ADJ_TIME] = timer.time_elapsed(); dbgOut.tprint(1,"Writing tags.\n"); // Write tags for (t_itor = tagList.begin(); t_itor != tagList.end(); ++t_itor) { std::string name; iFace->tag_get_name( t_itor->tag_id, name ); topState.start( "writing tag: ", name.c_str() ); result = write_tag( *t_itor, times ); topState.end(result); if (MB_SUCCESS != result) return error(result); } times[TAG_TIME] = timer.time_elapsed(); times[TOTAL_TIME] = timer.time_since_birth(); if (cputime) { print_times( times ); } return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_finished | ( | ) | [protected, virtual] |
Definition at line 425 of file WriteHDF5.cpp.
{ // release memory allocated in lists exportList.clear(); nodeSet.range.clear(); setSet.range.clear(); tagList.clear(); idMap.clear(); HDF5ErrorHandler handler; #if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1 herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data ); #else herr_t err = H5Eget_auto( &handler.func, &handler.data ); #endif if (err >= 0 && handler.func == &handle_hdf5_error) { assert(handler.data = &errorHandler); #if defined(H5Eget_auto_vers) && H5Eget_auto_vers > 1 H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data ); #else H5Eset_auto( errorHandler.func, errorHandler.data ); #endif } return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_nodes | ( | ) | [private] |
Write out the nodes.
Note: Assigns IDs to nodes.
Definition at line 891 of file WriteHDF5.cpp.
{ mhdf_Status status; int dim, mesh_dim; ErrorCode rval; hid_t node_table; long first_id, num_nodes; if (!nodeSet.total_num_ents) return MB_SUCCESS; // no nodes! CHECK_OPEN_HANDLES; rval = iFace->get_dimension( mesh_dim ); CHK_MB_ERR_0(rval); debug_barrier(); dbgOut.print(3, "Opening Node Coords\n"); node_table = mhdf_openNodeCoords( filePtr, &num_nodes, &dim, &first_id, &status ); CHK_MHDF_ERR_0(status); IODebugTrack track( debugTrack, "nodes", num_nodes ); double* buffer = (double*)dataBuffer; #ifdef BLOCKED_COORD_IO int chunk_size = bufferSize / sizeof(double); #else int chunk_size = bufferSize / (3*sizeof(double)); #endif long remaining = nodeSet.range.size(); long num_writes = (remaining+chunk_size-1) / chunk_size; if (nodeSet.max_num_ents) { assert( nodeSet.max_num_ents >= remaining ); num_writes = (nodeSet.max_num_ents+chunk_size-1) / chunk_size; } long remaining_writes = num_writes; long offset = nodeSet.offset; Range::const_iterator iter = nodeSet.range.begin(); dbgOut.printf(3, "Writing %ld nodes in %ld blocks of %d\n", remaining, (remaining+chunk_size-1)/chunk_size, chunk_size); while (remaining) { VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); long count = chunk_size < remaining ? chunk_size : remaining; remaining -= count; Range::const_iterator end = iter; end += count; #ifdef BLOCKED_COORD_IO for (int d = 0; d < dim; d++) { if (d < mesh_dim) { rval = writeUtil->get_node_coords( d, iter, end, count, buffer ); CHK_MB_ERR_1(rval, node_table, status); } else { memset( buffer, 0, count * sizeof(double) ); } dbgOut.printf(3," writing %c node chunk %ld of %ld, %ld values at %ld\n", (char)('X'+d), num_writes - remaining_writes + 1, num_writes, count, offset ); mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status ); CHK_MHDF_ERR_1(status, node_table); } #else rval = writeUtil->get_node_coords( -1, iter, end, 3*count, buffer ); CHK_MB_ERR_1(rval, node_table, status); dbgOut.printf(3," writing node chunk %ld of %ld, %ld values at %ld\n", num_writes - remaining_writes + 1, num_writes, count, offset ); mhdf_writeNodeCoordsWithOpt( node_table, offset, count, buffer, writeProp, &status ); CHK_MHDF_ERR_1(status, node_table); #endif track.record_io( offset, count ); iter = end; offset += count; --remaining_writes; } // Do empty writes if necessary for parallel collective IO if (collectiveIO) { while (remaining_writes--) { assert(writeProp != H5P_DEFAULT); #ifdef BLOCKED_COORD_IO for (int d = 0; d < dim; ++d) { dbgOut.printf(3," writing (empty) %c node chunk %ld of %ld.\n", (char)('X'+d), num_writes - remaining_writes, num_writes ); mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status ); CHK_MHDF_ERR_1(status, node_table); } #else dbgOut.printf(3," writing (empty) node chunk %ld of %ld.\n", num_writes - remaining_writes, num_writes ); mhdf_writeNodeCoordsWithOpt( node_table, offset, 0, 0, writeProp, &status ); CHK_MHDF_ERR_1(status, node_table); #endif } } mhdf_closeData( filePtr, node_table, &status ); CHK_MHDF_ERR_0(status); track.all_reduce(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_qa | ( | const std::vector< std::string > & | list | ) | [protected] |
Write exodus-type QA info.
Definition at line 2374 of file WriteHDF5.cpp.
{ const char* app = "MOAB"; const char* vers = MB_VERSION; char date_str[64]; char time_str[64]; CHECK_OPEN_HANDLES; std::vector<const char*> strs(list.size() ? list.size() : 4); if (list.size() == 0) { time_t t = time(NULL); tm* lt = localtime( &t ); strftime( date_str, sizeof(date_str), "%D", lt ); strftime( time_str, sizeof(time_str), "%T", lt ); strs[0] = app; strs[1] = vers; strs[2] = date_str; strs[3] = time_str; } else { for (unsigned int i = 0; i < list.size(); ++i) strs[i] = list[i].c_str(); } mhdf_Status status; dbgOut.print(2," writing QA history.\n"); mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status ); CHK_MHDF_ERR_0(status); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_set_data | ( | const WriteUtilIface::EntityListType | which_data, |
const hid_t | handle, | ||
IODebugTrack & | track, | ||
Range * | ranged = 0 , |
||
Range * | null_stripped = 0 , |
||
std::vector< long > * | set_sizes = 0 |
||
) | [private] |
Write set contents/parents/children lists
which_data | Which set data to write (contents, parents, or children) |
handle | HDF5 handle for data set in which to write data |
track | Debugging tool |
ranged | Will be populated with handles of sets for which contents were written in a range-compacted format. (mhdf_SET_RANGE_BIT). Should be null for parents/children. |
null_stripped | Will be populated with handles of sets for which invalid or null handles were stripped from the contents list. This is only done for unordered sets. This argument should be null if writing parents/children because those lists are always ordered. |
set_sizes | Will be populated with the length of the data written for those sets for which the handles were added to either ranged or null_stripped . Values are in handle order. |
Definition at line 1120 of file WriteHDF5.cpp.
{ // ranged must be non-null for CONTENTS and null for anything else assert((which_data == WriteUtilIface::CONTENTS) == (0 != ranged)); ErrorCode rval; mhdf_Status status; debug_barrier(); // Function pointer type used to write set data void (*write_func)( hid_t, long, long, hid_t, const void*, hid_t, mhdf_Status* ); long max_vals; // max over all procs of number of values to write to data set long offset; // offset in HDF5 dataset at which to write next block of data switch (which_data) { case WriteUtilIface::CONTENTS: assert(ranged != 0 && null_stripped != 0 && set_sizes != 0); write_func = &mhdf_writeSetDataWithOpt; max_vals = maxNumSetContents; offset = setContentsOffset; dbgOut.print(2, "Writing set contents\n" ); break; case WriteUtilIface::CHILDREN: assert(!ranged && !null_stripped && !set_sizes); write_func = &mhdf_writeSetParentsChildrenWithOpt; max_vals = maxNumSetChildren; offset = setChildrenOffset; dbgOut.print(2, "Writing set child lists\n" ); break; case WriteUtilIface::PARENTS: assert(!ranged && !null_stripped && !set_sizes); write_func = &mhdf_writeSetParentsChildrenWithOpt; max_vals = maxNumSetParents; offset = setParentsOffset; dbgOut.print(2, "Writing set parent lists\n" ); break; default: assert(false); return MB_FAILURE; } //assert(max_vals > 0); // should have skipped this function otherwise // buffer to use for IO id_t* buffer = reinterpret_cast<id_t*>(dataBuffer); // number of handles that will fit in the buffer const size_t buffer_size = bufferSize / sizeof(EntityHandle); // the total number of write calls that must be made, including no-ops for collective io const size_t num_total_writes = (max_vals + buffer_size-1)/buffer_size; std::vector<SpecialSetData>::iterator si = specialSets.begin(); std::vector<id_t> remaining; // data left over from prev iteration because it didn't fit in buffer size_t remaining_offset = 0; // avoid erasing from front of 'remaining' const EntityHandle* remaining_ptr = 0; // remaining for non-ranged data size_t remaining_count = 0; const id_t* special_rem_ptr = 0; Range::const_iterator i = setSet.range.begin(), j, rhint, nshint; if (ranged) rhint = ranged->begin(); if (null_stripped) nshint = null_stripped->begin(); for (size_t w = 0; w < num_total_writes; ++w) { if (i == setSet.range.end() && !remaining.empty() && !remaining_ptr) { // If here, then we've written everything but we need to // make more write calls because we're doing collective IO // in parallel (*write_func)( handle, 0, 0, id_type, 0, writeProp, &status ); CHK_MHDF_ERR_0( status ); continue; } // If we had some left-over data from a range-compacted set // from the last iteration, add it to the buffer now size_t count = 0; if (!remaining.empty()) { count = remaining.size() - remaining_offset; if (count > buffer_size) { memcpy( buffer, &remaining[remaining_offset], buffer_size*sizeof(id_t) ); count = buffer_size; remaining_offset += buffer_size; } else { memcpy( buffer, &remaining[remaining_offset], count*sizeof(id_t) ); remaining_offset = 0; remaining.clear(); } } // If we had some left-over data from a non-range-compacted set // from the last iteration, add it to the buffer now else if (remaining_ptr) { if (remaining_count > buffer_size) { rval = vector_to_id_list( remaining_ptr, buffer, buffer_size ); CHK_MB_ERR_0(rval); count = buffer_size; remaining_ptr += count; remaining_count -= count; } else { rval = vector_to_id_list( remaining_ptr, buffer, remaining_count ); CHK_MB_ERR_0(rval); count = remaining_count; remaining_ptr = 0; remaining_count = 0; } } // If we had some left-over data from a "special" (i.e. parallel shared) // set. else if (special_rem_ptr) { if (remaining_count > buffer_size) { memcpy( buffer, special_rem_ptr, buffer_size*sizeof(id_t) ); count = buffer_size; special_rem_ptr += count; remaining_count -= count; } else { memcpy( buffer, special_rem_ptr, remaining_count*sizeof(id_t) ); count = remaining_count; special_rem_ptr = 0; remaining_count = 0; } } // While there is both space remaining in the buffer and // more sets to write, append more set data to buffer. while (count < buffer_size && i != setSet.range.end()) { // Special case for "special" (i.e. parallel shared) sets: // we already have the data in a vector, just copy it. if (si != specialSets.end() && si->setHandle == *i) { std::vector<id_t>& list = (which_data == WriteUtilIface::CONTENTS) ? si->contentIds : (which_data == WriteUtilIface::PARENTS ) ? si->parentIds : si->childIds ; size_t append = list.size(); if (count + list.size() > buffer_size) { append = buffer_size - count; special_rem_ptr = &list[append]; remaining_count = list.size() - append; } memcpy( buffer+count, &list[0], append*sizeof(id_t) ); ++i; ++si; count += append; continue; } j = i; ++i; const EntityHandle* ptr; int len; unsigned char flags; rval = writeUtil->get_entity_list_pointers( j, i, &ptr, which_data, &len, &flags ); if (MB_SUCCESS != rval) return rval; if (which_data == WriteUtilIface::CONTENTS && !(flags&MESHSET_ORDERED)) { bool compacted; remaining.clear(); if (len == 0) { compacted = false; } else { assert(!(len%2)); rval = range_to_blocked_list( ptr, len/2, remaining, compacted ); if (MB_SUCCESS != rval) return rval; } if (compacted) { rhint = ranged->insert( rhint, *j ); set_sizes->push_back( remaining.size() ); } else if (remaining.size() != (unsigned)len) { nshint = null_stripped->insert( nshint, *j ); set_sizes->push_back( remaining.size() ); } if (count + remaining.size() <= buffer_size) { memcpy( buffer + count, &remaining[0], sizeof(id_t)*remaining.size() ); count += remaining.size(); remaining.clear(); remaining_offset = 0; } else { remaining_offset = buffer_size - count; memcpy( buffer + count, &remaining[0], sizeof(id_t)*remaining_offset ); count += remaining_offset; } } else { if (count + len > buffer_size) { size_t append = buffer_size - count; remaining_ptr = ptr + append; remaining_count = len - append; len = append; } rval = vector_to_id_list( ptr, buffer+count, len ); count += len; } } // Write the buffer. (*write_func)( handle, offset, count, id_type, buffer, writeProp, &status ); CHK_MHDF_ERR_0( status ); track.record_io( offset, count ); offset += count; } return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_sets | ( | double * | times | ) | [private] |
Write out meshsets
Write passed set of meshsets, including parent/child relations.
Note: Must have written nodes and element connectivity so entities have assigned IDs.
Definition at line 1331 of file WriteHDF5.cpp.
{ mhdf_Status status; ErrorCode rval; long first_id, size; hid_t table; CpuTimer timer; CHECK_OPEN_HANDLES; /* If no sets, just return success */ if (!writeSets) return MB_SUCCESS; debug_barrier(); dbgOut.printf(2,"Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() ); dbgOut.print(3,"Non-shared sets", setSet.range ); /* Write set parents */ if (writeSetParents) { topState.start( "writing parent lists for local sets" ); table = mhdf_openSetParents( filePtr, &size, &status ); CHK_MHDF_ERR_0(status); IODebugTrack track( debugTrack, "SetParents", size ); rval = write_set_data( WriteUtilIface::PARENTS, table, track ); topState.end(rval); CHK_MB_ERR_1(rval,table,status); mhdf_closeData( filePtr, table, &status ); CHK_MHDF_ERR_0(status); times[SET_PARENT] = timer.time_elapsed(); track.all_reduce(); } /* Write set children */ if (writeSetChildren) { topState.start( "writing child lists for local sets" ); table = mhdf_openSetChildren( filePtr, &size, &status ); CHK_MHDF_ERR_0(status); IODebugTrack track( debugTrack, "SetChildren", size ); rval = write_set_data( WriteUtilIface::CHILDREN, table, track ); topState.end(rval); CHK_MB_ERR_1(rval,table,status); mhdf_closeData( filePtr, table, &status ); CHK_MHDF_ERR_0(status); times[SET_CHILD] = timer.time_elapsed(); track.all_reduce(); } /* Write set contents */ Range ranged_sets, null_stripped_sets; std::vector<long> set_sizes; if (writeSetContents) { topState.start( "writing content lists for local sets" ); table = mhdf_openSetData( filePtr, &size, &status ); CHK_MHDF_ERR_0(status); IODebugTrack track( debugTrack, "SetContents", size ); rval = write_set_data( WriteUtilIface::CONTENTS, table, track, &ranged_sets, &null_stripped_sets, &set_sizes ); topState.end(rval); CHK_MB_ERR_1(rval,table,status); mhdf_closeData( filePtr, table, &status ); CHK_MHDF_ERR_0(status); times[SET_CONTENT] = timer.time_elapsed(); track.all_reduce(); } assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() ); /* Write set description table */ debug_barrier(); topState.start( "writing descriptions of local sets" ); dbgOut.printf(2,"Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() ); dbgOut.print(3,"Non-shared sets", setSet.range ); /* Open the table */ table = mhdf_openSetMeta( filePtr, &size, &first_id, &status ); CHK_MHDF_ERR_0(status); IODebugTrack track_meta( debugTrack, "SetMeta", size ); /* Some debug stuff */ debug_barrier(); dbgOut.printf(2,"Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() ); dbgOut.print(3,"Non-shared sets", setSet.range ); /* counts and buffers and such */ mhdf_index_t* const buffer = reinterpret_cast<mhdf_index_t*>(dataBuffer); const size_t buffer_size = bufferSize / (4*sizeof(mhdf_index_t)); const size_t num_local_writes = (setSet.range.size() + buffer_size - 1) / buffer_size; const size_t num_global_writes = (setSet.max_num_ents + buffer_size-1) / buffer_size; assert(num_local_writes <= num_global_writes); assert(num_global_writes > 0); /* data about sets for which number of handles written is * not the same as the number of handles in the set * (range-compacted or null handles stripped out) */ Range::const_iterator i = setSet.range.begin(); Range::const_iterator r = ranged_sets.begin(); Range::const_iterator s = null_stripped_sets.begin(); std::vector<mhdf_index_t>::const_iterator n = set_sizes.begin(); assert(ranged_sets.size() + null_stripped_sets.size() == set_sizes.size()); /* we write the end index for each list, rather than the count */ mhdf_index_t prev_contents_end = setContentsOffset - 1; mhdf_index_t prev_children_end = setChildrenOffset - 1; mhdf_index_t prev_parents_end = setParentsOffset - 1; /* while there is more data to write */ size_t offset = setSet.offset; std::vector<SpecialSetData>::const_iterator si = specialSets.begin(); for (size_t w = 0; w < num_local_writes; ++w) { // get a buffer full of data size_t count = 0; while (count < buffer_size && i != setSet.range.end()) { // get set properties long num_ent, num_child, num_parent; unsigned long flags; if (si != specialSets.end() && si->setHandle == *i) { flags = si->setFlags; num_ent = si->contentIds.size(); num_child = si->childIds.size(); num_parent = si->parentIds.size(); ++si; if (r != ranged_sets.end() && *i == *r) { assert(flags & mhdf_SET_RANGE_BIT); ++r; ++n; } else if (s != null_stripped_sets.end() && *i == *s) { ++s; ++n; } } else { assert(si == specialSets.end() || si->setHandle > *i); // get set properties rval = get_set_info( *i, num_ent, num_child, num_parent, flags ); CHK_MB_ERR_1(rval, table,status); // check if size is something other than num handles in set if (r != ranged_sets.end() && *i == *r) { num_ent = *n; ++r; ++n; flags |= mhdf_SET_RANGE_BIT; } else if (s != null_stripped_sets.end() && *i == *s) { num_ent = *n; ++s; ++n; } } // put data in buffer mhdf_index_t* local = buffer + 4*count; prev_contents_end += num_ent; prev_children_end += num_child; prev_parents_end += num_parent; local[0] = prev_contents_end; local[1] = prev_children_end; local[2] = prev_parents_end; local[3] = flags; // iterate ++count; ++i; } // write the data mhdf_writeSetMetaWithOpt( table, offset, count, MHDF_INDEX_TYPE, buffer, writeProp, &status ); CHK_MHDF_ERR_1(status, table); track_meta.record_io( offset, count ); offset += count; } assert( r == ranged_sets.end() ); assert( s == null_stripped_sets.end() ); assert( n == set_sizes.end() ); /* if doing parallel write with collective IO, do null write * calls because other procs aren't done yet and write calls * are collective */ for (size_t w = num_local_writes; w != num_global_writes; ++w) { mhdf_writeSetMetaWithOpt( table, 0, 0, MHDF_INDEX_TYPE, 0, writeProp, &status ); CHK_MHDF_ERR_1(status, table); } topState.end(); mhdf_closeData( filePtr, table, &status ); CHK_MHDF_ERR_0(status); times[SET_META] = timer.time_elapsed(); track_meta.all_reduce(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_sparse_ids | ( | const TagDesc & | tag_data, |
const Range & | range, | ||
hid_t | table_handle, | ||
size_t | table_size, | ||
const char * | name = 0 |
||
) | [private] |
Write ID table for sparse tag.
Definition at line 1897 of file WriteHDF5.cpp.
{ ErrorCode rval; mhdf_Status status; CHECK_OPEN_HANDLES; std::string tname(name ? name : "<UNKNOWN TAG?>"); tname += " - Ids"; IODebugTrack track( debugTrack, tname, table_size ); // Set up data buffer for writing IDs size_t chunk_size = bufferSize / sizeof(id_t); id_t* id_buffer = (id_t*)dataBuffer; // Write IDs of tagged entities. long remaining = range.size(); long offset = tag_data.sparse_offset; long num_writes = (remaining + chunk_size - 1)/chunk_size; if (tag_data.max_num_ents) { assert(tag_data.max_num_ents >= (unsigned long)remaining); num_writes = (tag_data.max_num_ents + chunk_size - 1)/chunk_size; } Range::const_iterator iter = range.begin(); while (remaining) { VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); // write "chunk_size" blocks of data long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining; remaining -= count; Range::const_iterator stop = iter; stop += count; Range tmp;; tmp.merge( iter, stop ); iter = stop; assert(tmp.size() == (unsigned)count); rval = range_to_id_list( tmp, id_buffer ); CHK_MB_ERR_0( rval ); // write the data dbgOut.print(3," writing sparse tag entity chunk.\n"); track.record_io( offset, count ); mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type, id_buffer, writeProp, &status ); CHK_MHDF_ERR_0( status ); offset += count; --num_writes; } // while (remaining) // Do empty writes if necessary for parallel collective IO if (collectiveIO) { while (num_writes--) { assert(writeProp != H5P_DEFAULT); dbgOut.print(3," writing empty sparse tag entity chunk.\n"); mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type, 0, writeProp, &status ); CHK_MHDF_ERR_0( status ); } } track.all_reduce(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_sparse_tag | ( | const TagDesc & | tag_data, |
const std::string & | tag_name, | ||
DataType | tag_data_type, | ||
hid_t | hdf5_data_type, | ||
int | hdf5_type_size | ||
) | [private] |
Write fixed-length tag data in sparse format.
Definition at line 1968 of file WriteHDF5.cpp.
{ ErrorCode rval; mhdf_Status status; hid_t tables[3]; long table_size, data_size; CHECK_OPEN_HANDLES; // get entities for which to write tag values Range range; rval = get_sparse_tagged_entities( tag_data, range ); //open tables to write info mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_size, tables, &status); CHK_MHDF_ERR_0(status); assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size ); // fixed-length tag assert( table_size == data_size ); // Write IDs for tagged entities subState.start( "writing sparse ids for tag: ", name.c_str() ); rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() ); subState.end(rval); CHK_MB_ERR_2( rval, tables, status ); mhdf_closeData( filePtr, tables[0], &status ); CHK_MHDF_ERR_1(status, tables[1]); // Set up data buffer for writing tag values IODebugTrack track( debugTrack, name + " Data", data_size ); subState.start( "writing sparse values for tag: ", name.c_str() ); rval = write_tag_values( tag_data.tag_id, tables[1], tag_data.sparse_offset, range, mb_data_type, value_type, value_type_size, tag_data.max_num_ents, track ); subState.end(rval); mhdf_closeData( filePtr, tables[1], &status ); CHK_MB_ERR_0(rval); CHK_MHDF_ERR_0(status); track.all_reduce(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_tag | ( | const TagDesc & | tag_data, |
double * | times | ||
) | [private] |
Write tag for all entities.
Write tag information and data.
Note: Must have already written nodes, elem connectivity and sets so that entities have IDs assigned.
Definition at line 1845 of file WriteHDF5.cpp.
{ std::string name; ErrorCode rval = iFace->tag_get_name( tag_data.tag_id, name ); if (MB_SUCCESS != rval) return error(rval); CHECK_OPEN_HANDLES; debug_barrier(); dbgOut.tprintf( 1, "Writing tag: \"%s\"\n", name.c_str() ); int moab_size, elem_size, array_len; DataType moab_type; mhdf_TagDataType mhdf_type; hid_t hdf5_type; rval = get_tag_size( tag_data.tag_id, moab_type, moab_size, elem_size, array_len, mhdf_type, hdf5_type ); if (MB_SUCCESS != rval) return error(rval); CpuTimer timer; if (array_len == MB_VARIABLE_LENGTH && tag_data.write_sparse) { dbgOut.printf( 2, "Writing sparse data for var-len tag: \"%s\"\n", name.c_str() ); rval = write_var_len_tag( tag_data, name, moab_type, hdf5_type, elem_size ); times[VARLEN_TAG_TIME] += timer.time_elapsed(); } else { int data_len = elem_size; if (moab_type != MB_TYPE_BIT) data_len *= array_len; if (tag_data.write_sparse) { dbgOut.printf( 2, "Writing sparse data for tag: \"%s\"\n", name.c_str() ); rval = write_sparse_tag( tag_data, name, moab_type, hdf5_type, data_len ); times[SPARSE_TAG_TIME] += timer.time_elapsed(); } for (size_t i = 0; MB_SUCCESS == rval && i < tag_data.dense_list.size(); ++i) { const ExportSet* set = find( tag_data.dense_list[i] ); assert(0 != set); debug_barrier(); dbgOut.printf( 2, "Writing dense data for tag: \"%s\" on group \"%s\"\n", name.c_str(), set->name() ); subState.start( "writing dense data for tag: ", (name + ":" + set->name()).c_str() ); rval = write_dense_tag( tag_data, *set, name, moab_type, hdf5_type, data_len ); subState.end(rval); } times[DENSE_TAG_TIME] += timer.time_elapsed(); } H5Tclose( hdf5_type ); return MB_SUCCESS == rval ? MB_SUCCESS : error(rval); }
ErrorCode moab::WriteHDF5::write_tag_values | ( | Tag | tag_id, |
hid_t | data_table, | ||
unsigned long | data_offset, | ||
const Range & | range, | ||
DataType | tag_data_type, | ||
hid_t | hdf5_data_type, | ||
int | hdf5_type_size, | ||
unsigned long | max_num_ents, | ||
IODebugTrack & | debug_track | ||
) | [private] |
Write data for fixed-size tag.
Definition at line 2295 of file WriteHDF5.cpp.
{ mhdf_Status status; CHECK_OPEN_HANDLES; // Set up data buffer for writing tag values size_t chunk_size = bufferSize / value_type_size; assert( chunk_size > 0 ); char* tag_buffer = (char*)dataBuffer; // Write the tag values size_t remaining = range_in.size(); size_t offset = offset_in; Range::const_iterator iter = range_in.begin(); long num_writes = (remaining + chunk_size - 1)/chunk_size; if (max_num_ents) { assert( max_num_ents >= remaining ); num_writes = (max_num_ents + chunk_size - 1)/chunk_size; } while (remaining) { VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); // write "chunk_size" blocks of data long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining; remaining -= count; memset( tag_buffer, 0, count * value_type_size ); Range::const_iterator stop = iter; stop += count; Range range; range.merge( iter, stop ); iter = stop; assert(range.size() == (unsigned)count); ErrorCode rval = iFace->tag_get_data( tag_id, range, tag_buffer ); CHK_MB_ERR_0(rval); // Convert EntityHandles to file ids if (mb_data_type == MB_TYPE_HANDLE) convert_handle_tag( reinterpret_cast<EntityHandle*>(tag_buffer), count * value_type_size / sizeof(EntityHandle) ); // write the data dbgOut.print(2," writing tag value chunk.\n"); track.record_io( offset, count ); assert(value_type > 0); mhdf_writeTagValuesWithOpt( data_table, offset, count, value_type, tag_buffer, writeProp, &status ); CHK_MHDF_ERR_0(status); offset += count; --num_writes; } // while (remaining) // Do empty writes if necessary for parallel collective IO if (collectiveIO) { while (num_writes--) { assert(writeProp != H5P_DEFAULT); dbgOut.print(2," writing empty tag value chunk.\n"); assert(value_type > 0); mhdf_writeTagValuesWithOpt( data_table, offset, 0, value_type, 0, writeProp, &status ); CHK_MHDF_ERR_0( status ); } } track.all_reduce(); return MB_SUCCESS; }
WriteUtilIface* moab::WriteHDF5::write_util | ( | ) | [inline] |
Definition at line 185 of file WriteHDF5.hpp.
{ return writeUtil; }
ErrorCode moab::WriteHDF5::write_var_len_data | ( | const TagDesc & | tag_data, |
const Range & | range, | ||
hid_t | table, | ||
size_t | table_size, | ||
bool | handle_tag, | ||
hid_t | hdf_type, | ||
int | type_size, | ||
const char * | name = 0 |
||
) | [private] |
Write tag value data_set for a variable-length tag.
Definition at line 2108 of file WriteHDF5.cpp.
{ ErrorCode rval; mhdf_Status status; CHECK_OPEN_HANDLES; assert(!handle_tag || sizeof(EntityHandle) == type_size); std::string tname(name ? name : "<UNKNOWN TAG?>"); tname += " - Values"; IODebugTrack track( debugTrack, tname, table_size ); const size_t buffer_size = bufferSize / type_size; size_t num_writes = (table_size + buffer_size - 1) / buffer_size; if (collectiveIO) { assert(tag_data.max_num_vals > 0); num_writes = (tag_data.max_num_vals + buffer_size - 1) / buffer_size; } unsigned char* buffer = (unsigned char*)dataBuffer; const void* prev_data = 0; // data left over from prev iteration size_t prev_len = 0; Range::const_iterator iter = range.begin(); long offset = tag_data.var_data_offset; while (prev_data || iter != range.end()) { size_t count = 0; if (prev_data) { size_t len; const void* ptr = prev_data; if (prev_len <= buffer_size) { len = prev_len; prev_data = 0; prev_len = 0; } else { len = buffer_size; prev_data = ((const char*)prev_data) + buffer_size*type_size; prev_len -= buffer_size; } if (handle_tag) convert_handle_tag( (const EntityHandle*)ptr, (EntityHandle*)buffer, len ); else memcpy( buffer, ptr, len * type_size ); } for ( ; count < buffer_size && iter != range.end(); ++iter) { int len; const void* ptr; rval = iFace->tag_get_by_ptr( tag_data.tag_id, &*iter, 1, &ptr, &len ); int bytes = len * type_size; CHK_MB_ERR_0(rval); if (len+count > buffer_size) { prev_len = len + count - buffer_size; prev_data = ((const char*)ptr) + prev_len*type_size; len = buffer_size - count; } if (handle_tag) convert_handle_tag( (const EntityHandle*)ptr, ((EntityHandle*)buffer) + count, len ); else memcpy( buffer + count*type_size, ptr, bytes ); count += len; } track.record_io( offset, count ); mhdf_writeTagValuesWithOpt( table, offset, count, hdf_type, buffer, writeProp, &status ); CHK_MHDF_ERR_0(status); --num_writes; } // Do empty writes if necessary for parallel collective IO if (collectiveIO) { while (num_writes--) { assert(writeProp != H5P_DEFAULT); dbgOut.print(3," writing empty var-len tag data chunk.\n"); mhdf_writeTagValuesWithOpt( table, 0, 0, hdf_type, 0, writeProp, &status ); CHK_MHDF_ERR_0( status ); } } track.all_reduce(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_var_len_indices | ( | const TagDesc & | tag_data, |
const Range & | range, | ||
hid_t | idx_table, | ||
size_t | table_size, | ||
int | type_size, | ||
const char * | name = 0 |
||
) | [private] |
Write end index data_set for a variable-length tag.
Definition at line 2027 of file WriteHDF5.cpp.
{ ErrorCode rval; mhdf_Status status; CHECK_OPEN_HANDLES; std::string tname(name ? name : "<UNKNOWN TAG?>"); tname += " - End Indices"; IODebugTrack track( debugTrack, tname, table_size ); // Set up data buffer for writing indices size_t chunk_size = bufferSize / (std::max(sizeof(void*),sizeof(long)) + sizeof(int)); mhdf_index_t* idx_buffer = (mhdf_index_t*)dataBuffer; const void** junk = (const void**)dataBuffer; int* size_buffer = (int*)(dataBuffer + chunk_size*std::max(sizeof(void*),sizeof(mhdf_index_t))); // Write IDs of tagged entities. long data_offset = tag_data.var_data_offset - 1; // offset at which to write data buffer size_t remaining = range.size(); size_t offset = tag_data.sparse_offset; size_t num_writes = (remaining + chunk_size - 1)/chunk_size; if (tag_data.max_num_ents) { assert(tag_data.max_num_ents >= (unsigned long)remaining); num_writes = (tag_data.max_num_ents + chunk_size - 1)/chunk_size; } Range::const_iterator iter = range.begin(); while (remaining) { VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); // write "chunk_size" blocks of data size_t count = remaining > chunk_size ? chunk_size : remaining; remaining -= count; Range::const_iterator stop = iter; stop += count; Range tmp; tmp.merge( iter, stop ); iter = stop; assert(tmp.size() == (unsigned)count); rval = iFace->tag_get_by_ptr( tag_data.tag_id, tmp, junk, size_buffer ); CHK_MB_ERR_0( rval ); // calculate end indices dbgOut.print(3," writing var-len tag offset chunk.\n"); track.record_io( offset, count ); for (size_t i = 0; i < count; ++i) { data_offset += size_buffer[i]; idx_buffer[i] = data_offset; } // write mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, count, MHDF_INDEX_TYPE, idx_buffer, writeProp, &status ); CHK_MHDF_ERR_0( status ); offset += count; --num_writes; } // while (remaining) // Do empty writes if necessary for parallel collective IO if (collectiveIO) { while (num_writes--) { assert(writeProp != H5P_DEFAULT); dbgOut.print(3," writing empty sparse tag entity chunk.\n"); mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, 0, id_type, 0, writeProp, &status ); CHK_MHDF_ERR_0( status ); } } track.all_reduce(); return MB_SUCCESS; }
ErrorCode moab::WriteHDF5::write_var_len_tag | ( | const TagDesc & | tag_info, |
const std::string & | tag_name, | ||
DataType | tag_data_type, | ||
hid_t | hdf5_type, | ||
int | hdf5_type_size | ||
) | [private] |
Write varialbe-length tag data.
Definition at line 2201 of file WriteHDF5.cpp.
{ ErrorCode rval; mhdf_Status status; hid_t tables[3]; long table_size; long data_table_size; CHECK_OPEN_HANDLES; // get entities for which to write tag values Range range; rval = get_sparse_tagged_entities( tag_data, range ); //open tables to write info mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_table_size, tables, &status); CHK_MHDF_ERR_0(status); assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size ); // Write IDs for tagged entities subState.start( "writing ids for var-len tag: ", name.c_str() ); rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() ); subState.end(rval); CHK_MB_ERR_2( rval, tables, status ); mhdf_closeData( filePtr, tables[0], &status ); CHK_MHDF_ERR_2(status, tables + 1); // Write offsets for tagged entities subState.start( "writing indices for var-len tag: ", name.c_str() ); rval = write_var_len_indices( tag_data, range, tables[2], table_size, type_size, name.c_str() ); subState.end(rval); mhdf_closeData( filePtr, tables[2], &status ); CHK_MB_ERR_1( rval, tables[1], status ); CHK_MHDF_ERR_1(status, tables[1]); // Write the actual tag data subState.start( "writing values for var-len tag: ", name.c_str() ); rval = write_var_len_data( tag_data, range, tables[1], data_table_size, mb_data_type == MB_TYPE_HANDLE, hdf_type, type_size, name.c_str() ); subState.end(rval); mhdf_closeData( filePtr, tables[1], &status ); CHK_MB_ERR_0( rval ); CHK_MHDF_ERR_0(status); return MB_SUCCESS; }
size_t moab::WriteHDF5::bufferSize [protected] |
The size of the data buffer (dataBuffer
).
Definition at line 286 of file WriteHDF5.hpp.
bool moab::WriteHDF5::collectiveIO [protected] |
True if using collective IO calls for parallel write.
Definition at line 364 of file WriteHDF5.hpp.
char* moab::WriteHDF5::dataBuffer [protected] |
A memory buffer to use for all I/O operations.
Definition at line 288 of file WriteHDF5.hpp.
DebugOutput moab::WriteHDF5::dbgOut [protected] |
Utility to log debug output.
Definition at line 374 of file WriteHDF5.hpp.
bool moab::WriteHDF5::debugTrack [protected] |
Look for overlapping and/or missing writes.
Definition at line 380 of file WriteHDF5.hpp.
HDF5ErrorHandler moab::WriteHDF5::errorHandler [protected] |
Store old HDF5 error handling function.
Definition at line 190 of file WriteHDF5.hpp.
std::list<ExportSet> moab::WriteHDF5::exportList [protected] |
The list elements to export.
Definition at line 302 of file WriteHDF5.hpp.
mhdf_FileHandle moab::WriteHDF5::filePtr [protected] |
The file handle from the mhdf library.
Definition at line 296 of file WriteHDF5.hpp.
const hid_t moab::WriteHDF5::id_type = get_id_type() [static] |
HDF5 type corresponding to type of id_t
Definition at line 80 of file WriteHDF5.hpp.
RangeMap<EntityHandle,id_t> moab::WriteHDF5::idMap [protected] |
Map from entity handles to file IDs.
Definition at line 299 of file WriteHDF5.hpp.
Interface* moab::WriteHDF5::iFace [protected] |
Interface pointer passed to constructor.
Definition at line 291 of file WriteHDF5.hpp.
long moab::WriteHDF5::maxNumSetChildren [protected] |
Definition at line 326 of file WriteHDF5.hpp.
long moab::WriteHDF5::maxNumSetContents [protected] |
The largest number of values to write for any processor (needed to do collective IO).
Definition at line 326 of file WriteHDF5.hpp.
long moab::WriteHDF5::maxNumSetParents [protected] |
Definition at line 326 of file WriteHDF5.hpp.
ExportSet moab::WriteHDF5::nodeSet [protected] |
The list of nodes to export.
Definition at line 304 of file WriteHDF5.hpp.
bool moab::WriteHDF5::parallelWrite [protected] |
True if doing parallel write.
Definition at line 362 of file WriteHDF5.hpp.
unsigned long moab::WriteHDF5::setChildrenOffset [protected] |
Offset into set children table (zero except for parallel)
Definition at line 323 of file WriteHDF5.hpp.
unsigned long moab::WriteHDF5::setContentsOffset [protected] |
Offset into set contents table (zero except for parallel)
Definition at line 321 of file WriteHDF5.hpp.
unsigned long moab::WriteHDF5::setParentsOffset [protected] |
Definition at line 323 of file WriteHDF5.hpp.
ExportSet moab::WriteHDF5::setSet [protected] |
The list of sets to export.
Definition at line 306 of file WriteHDF5.hpp.
std::vector<SpecialSetData> moab::WriteHDF5::specialSets [protected] |
Array of special/shared sets, in order of handle value.
Definition at line 353 of file WriteHDF5.hpp.
MPEState moab::WriteHDF5::subState [static, protected] |
Definition at line 377 of file WriteHDF5.hpp.
std::list<TagDesc> moab::WriteHDF5::tagList [protected] |
The list of tags to export.
Definition at line 359 of file WriteHDF5.hpp.
MPEState moab::WriteHDF5::topState [static, protected] |
Definition at line 376 of file WriteHDF5.hpp.
hid_t moab::WriteHDF5::writeProp [protected] |
Property set to pass to H5Dwrite calls. For serial, should be H5P_DEFAULTS. For parallel, may request collective IO.
Definition at line 371 of file WriteHDF5.hpp.
bool moab::WriteHDF5::writeSetChildren [protected] |
Definition at line 334 of file WriteHDF5.hpp.
bool moab::WriteHDF5::writeSetContents [protected] |
Definition at line 334 of file WriteHDF5.hpp.
bool moab::WriteHDF5::writeSetParents [protected] |
Definition at line 334 of file WriteHDF5.hpp.
bool moab::WriteHDF5::writeSets [protected] |
Flags idicating if set data should be written. For the normal (non-parallel) case, these values will depend only on whether or not there is any data to be written. For parallel-meshes, opening the data table is collective so the values must depend on whether or not any processor has meshsets to be written.
Definition at line 334 of file WriteHDF5.hpp.
bool moab::WriteHDF5::writeTagDense [protected] |
True if writing dense-formatted tag data.
Definition at line 366 of file WriteHDF5.hpp.
WriteUtilIface* moab::WriteHDF5::writeUtil [protected] |
Cached pointer to writeUtil interface.
Definition at line 293 of file WriteHDF5.hpp.