Performed a few function relocations and deletions from src/hdf5_lib.py into src/hdf5_ops.py and made a copy of previous version as src/hdf5_lib_part2.py
This commit is contained in:
292
src/hdf5_lib.py
292
src/hdf5_lib.py
@ -11,146 +11,8 @@ import logging
|
|||||||
import utils.g5505_utils as utils
|
import utils.g5505_utils as utils
|
||||||
import instruments.readers.filereader_registry as filereader_registry
|
import instruments.readers.filereader_registry as filereader_registry
|
||||||
|
|
||||||
def read_mtable_as_dataframe(filename):
|
|
||||||
|
|
||||||
""" Reconstruct a Matlab Table encoded in a .h5 file as a Pandas DataFrame. The input .h5 file
|
|
||||||
contains as many groups as rows in the Matlab Table, and each group stores dataset-like variables in the Table as
|
|
||||||
Datasets while categorical and numerical variables in the table are represented as attributes of each group.
|
|
||||||
|
|
||||||
Note: DataFrame is constructed columnwise to ensure homogenous data columns.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
|
|
||||||
filename (str): .h5 file's name. It may include location-path information.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
|
|
||||||
output_dataframe (pd.DataFrame): Matlab's Table as a Pandas DataFrame
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
#contructs dataframe by filling out entries columnwise. This way we can ensure homogenous data columns"""
|
|
||||||
|
|
||||||
with h5py.File(filename,'r') as file:
|
|
||||||
|
|
||||||
# Define group's attributes and datasets. This should hold
|
|
||||||
# for all groups. TODO: implement verification and noncompliance error if needed.
|
|
||||||
group_list = list(file.keys())
|
|
||||||
group_attrs = list(file[group_list[0]].attrs.keys())
|
|
||||||
#
|
|
||||||
column_attr_names = [item[item.find('_')+1::] for item in group_attrs]
|
|
||||||
column_attr_names_idx = [int(item[4:(item.find('_'))]) for item in group_attrs]
|
|
||||||
|
|
||||||
group_datasets = list(file[group_list[0]].keys()) if not 'DS_EMPTY' in file[group_list[0]].keys() else []
|
|
||||||
#
|
|
||||||
column_dataset_names = [file[group_list[0]][item].attrs['column_name'] for item in group_datasets]
|
|
||||||
column_dataset_names_idx = [int(item[2:]) for item in group_datasets]
|
|
||||||
|
|
||||||
|
|
||||||
# Define data_frame as group_attrs + group_datasets
|
|
||||||
#pd_series_index = group_attrs + group_datasets
|
|
||||||
pd_series_index = column_attr_names + column_dataset_names
|
|
||||||
|
|
||||||
output_dataframe = pd.DataFrame(columns=pd_series_index,index=group_list)
|
|
||||||
|
|
||||||
tmp_col = []
|
|
||||||
|
|
||||||
for meas_prop in group_attrs + group_datasets:
|
|
||||||
if meas_prop in group_attrs:
|
|
||||||
column_label = meas_prop[meas_prop.find('_')+1:]
|
|
||||||
# Create numerical or categorical column from group's attributes
|
|
||||||
tmp_col = [file[group_key].attrs[meas_prop][()][0] for group_key in group_list]
|
|
||||||
else:
|
|
||||||
# Create dataset column from group's datasets
|
|
||||||
column_label = file[group_list[0] + '/' + meas_prop].attrs['column_name']
|
|
||||||
#tmp_col = [file[group_key + '/' + meas_prop][()][0] for group_key in group_list]
|
|
||||||
tmp_col = [file[group_key + '/' + meas_prop][()] for group_key in group_list]
|
|
||||||
|
|
||||||
output_dataframe.loc[:,column_label] = tmp_col
|
|
||||||
|
|
||||||
return output_dataframe
|
|
||||||
|
|
||||||
def create_group_hierarchy(obj, df, columns):
|
|
||||||
|
|
||||||
"""
|
|
||||||
Input:
|
|
||||||
obj (h5py.File or h5py.Group)
|
|
||||||
columns (list of strs): denote categorical columns in df to be used to define hdf5 file group hierarchy
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not columns:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Determine categories associated with first categorical column
|
|
||||||
unique_values = df[columns[0]].unique()
|
|
||||||
|
|
||||||
if obj.name == '/':
|
|
||||||
obj.attrs.create('count',df.shape[0])
|
|
||||||
obj.attrs.create('file_list',df['filename'].tolist())
|
|
||||||
|
|
||||||
for group_name in unique_values:
|
|
||||||
|
|
||||||
group = obj.require_group(group_name)
|
|
||||||
group.attrs.create('column_name', columns[0])
|
|
||||||
|
|
||||||
sub_df = df[df[columns[0]]==group_name] # same as df.loc[df[columns[0]]==group_name,:]
|
|
||||||
group.attrs.create('count',sub_df.shape[0])
|
|
||||||
group.attrs.create('file_list',sub_df['filename'].tolist())
|
|
||||||
|
|
||||||
# if group_name == 'MgO powder,H2O,HCl':
|
|
||||||
# print('Here:',sub_df.shape)
|
|
||||||
create_group_hierarchy(group, sub_df, columns[1::])
|
|
||||||
|
|
||||||
def is_nested_hierarchy(df) -> bool:
|
|
||||||
"""receives a dataframe with categorical columns and checks whether rows form a nested group hierarchy.
|
|
||||||
That is, from bottom to top, subsequent hierarchical levels contain nested groups. The lower level groups belong to exactly one group in the higher level group.
|
|
||||||
"""
|
|
||||||
# TODO: generalize the code to check for deeper group hierachies.
|
|
||||||
def are_nested(df, col, col_nxt):
|
|
||||||
""" Checks whether low level LL groups can be separated in terms of high level HL groups.
|
|
||||||
That is, elements of low-level groups do not belong to more than one HL group."""
|
|
||||||
|
|
||||||
# Compute higher level group names/categories
|
|
||||||
memberships = df[col_nxt].unique().tolist()
|
|
||||||
|
|
||||||
# Compute upper-level group memberships of low-level groups
|
|
||||||
col_avg_memberships = df.groupby(col).mean()[col_nxt].unique()
|
|
||||||
|
|
||||||
# Check whether all low-level groups have an actual hlg membership. That is, their avg. hlg membership is in the hlg membership.
|
|
||||||
return all([col_avg_memberships[group_idx] in memberships for group_idx in range(len(col_avg_memberships))])
|
|
||||||
|
|
||||||
df_tmp = df.copy()
|
|
||||||
|
|
||||||
# Create relabeling map
|
|
||||||
for column_name in df_tmp.columns:
|
|
||||||
category_index = pd.Series(np.arange(len(df_tmp[column_name].unique())), index=df_tmp[column_name].unique())
|
|
||||||
df_tmp[column_name] = category_index[df_tmp[column_name].tolist()].tolist()
|
|
||||||
|
|
||||||
df_tmp.plot()
|
|
||||||
|
|
||||||
return all([are_nested(df_tmp,'level_'+str(i)+'_groups','level_'+str(i+1)+'_groups') for i in range(len(df_tmp.columns)-1)])
|
|
||||||
|
|
||||||
|
|
||||||
def get_groups_at_a_level(file: h5py.File, level: str):
|
|
||||||
|
|
||||||
groups = []
|
|
||||||
def node_selector(name, obj):
|
|
||||||
if name.count('/') == level:
|
|
||||||
print(name)
|
|
||||||
groups.append(obj.name)
|
|
||||||
|
|
||||||
file.visititems(node_selector)
|
|
||||||
#file.visititems()
|
|
||||||
return groups
|
|
||||||
|
|
||||||
|
|
||||||
def annotate_root_dir(filename,annotation_dict: dict):
|
|
||||||
with h5py.File(filename,'r+') as file:
|
|
||||||
file.attrs.update(annotation_dict)
|
|
||||||
#for key in annotation_dict:
|
|
||||||
# file.attrs.create('metadata_'+key, annotation_dict[key])
|
|
||||||
|
|
||||||
|
|
||||||
def transfer_file_dict_to_hdf5(h5file, group_name, file_dict):
|
def transfer_file_dict_to_hdf5(h5file, group_name, file_dict):
|
||||||
"""
|
"""
|
||||||
Transfers data from a file_dict to an HDF5 file.
|
Transfers data from a file_dict to an HDF5 file.
|
||||||
@ -374,7 +236,7 @@ def save_processed_dataframe_to_hdf5(df, annotator, output_filename): # src_hdf5
|
|||||||
df[datetime_cols] = df[datetime_cols].map(str)
|
df[datetime_cols] = df[datetime_cols].map(str)
|
||||||
|
|
||||||
# Convert dataframe to structured array
|
# Convert dataframe to structured array
|
||||||
icad_data_table = utils.dataframe_to_np_structured_array(df)
|
icad_data_table = utils.convert_dataframe_to_np_structured_array(df)
|
||||||
|
|
||||||
# Get metadata
|
# Get metadata
|
||||||
metadata_dict = annotator.get_metadata()
|
metadata_dict = annotator.get_metadata()
|
||||||
@ -396,7 +258,7 @@ def save_processed_dataframe_to_hdf5(df, annotator, output_filename): # src_hdf5
|
|||||||
|
|
||||||
for key, value in data_level_attributes.items():
|
for key, value in data_level_attributes.items():
|
||||||
if isinstance(value,dict):
|
if isinstance(value,dict):
|
||||||
data_level_attributes[key] = utils.convert_dict_to_np_structured_array(value)
|
data_level_attributes[key] = utils.convert_attrdict_to_np_structured_array(value)
|
||||||
|
|
||||||
|
|
||||||
# Prepare file dictionary
|
# Prepare file dictionary
|
||||||
@ -426,148 +288,4 @@ def save_processed_dataframe_to_hdf5(df, annotator, output_filename): # src_hdf5
|
|||||||
h5file.attrs.update(project_level_attributes)
|
h5file.attrs.update(project_level_attributes)
|
||||||
transfer_file_dict_to_hdf5(h5file, '/', file_dict)
|
transfer_file_dict_to_hdf5(h5file, '/', file_dict)
|
||||||
|
|
||||||
|
#if __name__ == '__main__':
|
||||||
def create_hdf5_file_from_dataframe(ofilename, input_data, approach : str, group_by_funcs : list, extract_attrs_func = None):
|
|
||||||
|
|
||||||
""" Creates an hdf5 file with as many levels as indicated by len(group_by_funcs).
|
|
||||||
Top level denotes the root group/directory and bottom level denotes measurement level groups.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
input_data (pd.DataFrame) :
|
|
||||||
group_by_funcs (list of callables or strs) : contains a list of callables or dataframe's column names that will be used
|
|
||||||
to partition or group files from top to bottom.
|
|
||||||
|
|
||||||
Callables in the list must assign a categorical value to each file in a file list, internally represented as a DataFrame,
|
|
||||||
and they thus return a pd.Series of categorical values.
|
|
||||||
|
|
||||||
On the other hand, strings in the list refer to the name of categorical columns in the input_data (when this is a DataFrame)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Check whether input_data is a valid file-system path or a DataFrame
|
|
||||||
is_valid_path = lambda x : os.path.exists(input_data) if isinstance(input_data,str) else False
|
|
||||||
|
|
||||||
if is_valid_path(input_data):
|
|
||||||
|
|
||||||
file_list = os.listdir(input_data)
|
|
||||||
|
|
||||||
# Navigates file-system folders/directories from top to bottom.
|
|
||||||
#for dirpath, dirnames, filenames in os.walk(input_data,topdown=True):
|
|
||||||
|
|
||||||
|
|
||||||
#df = pd.DataFrame(file_list,columns=['filename'])
|
|
||||||
df = utils.augment_with_filetype(df)
|
|
||||||
|
|
||||||
elif isinstance(input_data,pd.DataFrame):
|
|
||||||
df = input_data.copy()
|
|
||||||
else:
|
|
||||||
raise ValueError("input_data must be either a valid file-system path or a dataframe.")
|
|
||||||
|
|
||||||
# Create group columns to form paths
|
|
||||||
if utils.is_callable_list(group_by_funcs):
|
|
||||||
grouping_cols = []
|
|
||||||
for i, func in enumerate(group_by_funcs):
|
|
||||||
grouping_cols.append('level_'+str(i)+'_groups')
|
|
||||||
df['level_'+str(i)+'_groups'] = func(df)
|
|
||||||
elif utils.is_str_list(group_by_funcs) and all([item in df.columns for item in group_by_funcs]):
|
|
||||||
grouping_cols = group_by_funcs
|
|
||||||
else:
|
|
||||||
raise ValueError("'group_by_funcs' must be a list of callables (or str) that takes input_data as input an returns a valid categorical output.")
|
|
||||||
|
|
||||||
# Concatenate group columns to form paths
|
|
||||||
df['group_path'] = df[grouping_cols].apply(lambda row: '/'.join(row.values.astype(str)), axis=1)
|
|
||||||
|
|
||||||
if approach == 'botton-up':
|
|
||||||
# TODO: implement botton-up approach
|
|
||||||
if is_nested_hierarchy(df.loc[:,grouping_cols]):
|
|
||||||
print('Do something')
|
|
||||||
else:
|
|
||||||
raise ValueError("group_by_funcs do not define a valid group hierarchy. Please reprocess the input_data or choose different grouping functions.")
|
|
||||||
|
|
||||||
elif approach == 'top-down':
|
|
||||||
# Check the length of group_by_funcs list is at most 2
|
|
||||||
#if len(group_by_funcs) > 2:
|
|
||||||
# # TODO: extend to more than 2 callable elements.
|
|
||||||
# raise ValueError("group_by_funcs can only contain at most two grouping elements.")
|
|
||||||
|
|
||||||
with h5py.File(ofilename, 'w') as file:
|
|
||||||
|
|
||||||
# Create groups based on concatenated paths
|
|
||||||
for path in df['group_path'].unique():
|
|
||||||
file.create_group(path)
|
|
||||||
# TODO: incorporate remaining cols (i.e., excluding the group columns) as either metadata or datasets
|
|
||||||
|
|
||||||
#create_group_hierarchy(file, df, grouping_cols)
|
|
||||||
|
|
||||||
file.attrs.create(name='depth', data=len(grouping_cols)-1)
|
|
||||||
|
|
||||||
print(':)')
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError("'approach' must take values in ['top-down','bottom-up']")
|
|
||||||
|
|
||||||
|
|
||||||
#for i, value in enumerate(df['level_'+str(0)+'_groups'].unique().tolist()):
|
|
||||||
|
|
||||||
# 2. Validate group hierarchy, lower level groups must be embedded in higher level groups
|
|
||||||
|
|
||||||
# 3. Create hdf5 file with groups defined by the 'file_group' column
|
|
||||||
#
|
|
||||||
# Add datasets to groups and the groups and the group's attributes
|
|
||||||
|
|
||||||
#return 0
|
|
||||||
|
|
||||||
def main_mtable_h5_from_dataframe():
|
|
||||||
|
|
||||||
#import os
|
|
||||||
ROOT_DIR = os.path.abspath(os.curdir)
|
|
||||||
# Read BeamTimeMetaData.h5, containing Thorsten's Matlab Table
|
|
||||||
input_data_df = read_mtable_as_dataframe(os.path.join(ROOT_DIR,'input_files\\BeamTimeMetaData.h5'))
|
|
||||||
|
|
||||||
# Preprocess Thorsten's input_data dataframe so that i can be used to create a newer .h5 file
|
|
||||||
# under certain grouping specificiations.
|
|
||||||
input_data_df = input_data_df.rename(columns = {'name':'filename'})
|
|
||||||
input_data_df = utils.augment_with_filenumber(input_data_df)
|
|
||||||
input_data_df = utils.augment_with_filetype(input_data_df)
|
|
||||||
input_data_df = utils.split_sample_col_into_sample_and_data_quality_cols(input_data_df)
|
|
||||||
input_data_df['lastModifiedDatestr'] = input_data_df['lastModifiedDatestr'].astype('datetime64[s]')
|
|
||||||
|
|
||||||
# Define grouping functions to be passed into create_hdf5_file function. These can also be set
|
|
||||||
# as strings refering to categorical columns in input_data_df.
|
|
||||||
|
|
||||||
test_grouping_funcs = True
|
|
||||||
if test_grouping_funcs:
|
|
||||||
group_by_sample = lambda x : utils.group_by_df_column(x,'sample')
|
|
||||||
group_by_type = lambda x : utils.group_by_df_column(x,'filetype')
|
|
||||||
#group_by_filenumber = lambda x : utils.group_by_df_column(x,'filenumber')
|
|
||||||
else:
|
|
||||||
group_by_sample = 'sample'
|
|
||||||
group_by_type = 'filetype'
|
|
||||||
group_by_filenumber = 'filenumber'
|
|
||||||
|
|
||||||
output_filename_path = os.path.join('output_files','thorsten_file_list.h5')
|
|
||||||
|
|
||||||
create_hdf5_file_from_dataframe(output_filename_path,input_data_df, 'top-down', group_by_funcs = [group_by_sample, group_by_type])
|
|
||||||
#create_hdf5_file_from_dataframe('test.h5',input_data_df, 'top-down', group_by_funcs = [group_by_sample, group_by_type, group_by_filenumber])
|
|
||||||
|
|
||||||
annotation_dict = {'1-Campaign name': '**SLS-Campaign-2023**',
|
|
||||||
'2-Users':'Thorsten, Luca, Zoe',
|
|
||||||
'3-Startdate': str(input_data_df['lastModifiedDatestr'].min()),
|
|
||||||
'4-Enddate': str(input_data_df['lastModifiedDatestr'].max())
|
|
||||||
}
|
|
||||||
annotate_root_dir(output_filename_path, annotation_dict)
|
|
||||||
|
|
||||||
#display_group_hierarchy_on_a_treemap(output_filename_path)
|
|
||||||
|
|
||||||
print(':)')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
|
||||||
#main()
|
|
||||||
main_mtable_h5_from_dataframe()
|
|
||||||
#main_5505()
|
|
||||||
|
|
||||||
print(':)')
|
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ import logging
|
|||||||
|
|
||||||
import utils.g5505_utils as utils
|
import utils.g5505_utils as utils
|
||||||
import instruments.readers.filereader_registry as filereader_registry
|
import instruments.readers.filereader_registry as filereader_registry
|
||||||
|
import src.hdf5_ops as hdf5_ops
|
||||||
|
|
||||||
def read_mtable_as_dataframe(filename):
|
def read_mtable_as_dataframe(filename):
|
||||||
|
|
||||||
@ -131,26 +132,7 @@ def is_nested_hierarchy(df) -> bool:
|
|||||||
return all([are_nested(df_tmp,'level_'+str(i)+'_groups','level_'+str(i+1)+'_groups') for i in range(len(df_tmp.columns)-1)])
|
return all([are_nested(df_tmp,'level_'+str(i)+'_groups','level_'+str(i+1)+'_groups') for i in range(len(df_tmp.columns)-1)])
|
||||||
|
|
||||||
|
|
||||||
def get_groups_at_a_level(file: h5py.File, level: str):
|
|
||||||
|
|
||||||
groups = []
|
|
||||||
def node_selector(name, obj):
|
|
||||||
if name.count('/') == level:
|
|
||||||
print(name)
|
|
||||||
groups.append(obj.name)
|
|
||||||
|
|
||||||
file.visititems(node_selector)
|
|
||||||
#file.visititems()
|
|
||||||
return groups
|
|
||||||
|
|
||||||
|
|
||||||
def annotate_root_dir(filename,annotation_dict: dict):
|
|
||||||
with h5py.File(filename,'r+') as file:
|
|
||||||
file.attrs.update(annotation_dict)
|
|
||||||
#for key in annotation_dict:
|
|
||||||
# file.attrs.create('metadata_'+key, annotation_dict[key])
|
|
||||||
|
|
||||||
|
|
||||||
def transfer_file_dict_to_hdf5(h5file, group_name, file_dict):
|
def transfer_file_dict_to_hdf5(h5file, group_name, file_dict):
|
||||||
"""
|
"""
|
||||||
Transfers data from a file_dict to an HDF5 file.
|
Transfers data from a file_dict to an HDF5 file.
|
||||||
@ -351,7 +333,6 @@ def create_hdf5_file_from_filesystem_path(path_to_input_directory: str,
|
|||||||
#if key in h5file.attrs:
|
#if key in h5file.attrs:
|
||||||
# del h5file.attrs[key]
|
# del h5file.attrs[key]
|
||||||
h5file.attrs.create(key, value)
|
h5file.attrs.create(key, value)
|
||||||
#annotate_root_dir(output_filename,root_metadata_dict)
|
|
||||||
|
|
||||||
|
|
||||||
#output_yml_filename_path = hdf5_vis.take_yml_snapshot_of_hdf5_file(output_filename)
|
#output_yml_filename_path = hdf5_vis.take_yml_snapshot_of_hdf5_file(output_filename)
|
||||||
@ -557,7 +538,7 @@ def main_mtable_h5_from_dataframe():
|
|||||||
'3-Startdate': str(input_data_df['lastModifiedDatestr'].min()),
|
'3-Startdate': str(input_data_df['lastModifiedDatestr'].min()),
|
||||||
'4-Enddate': str(input_data_df['lastModifiedDatestr'].max())
|
'4-Enddate': str(input_data_df['lastModifiedDatestr'].max())
|
||||||
}
|
}
|
||||||
annotate_root_dir(output_filename_path, annotation_dict)
|
hdf5_ops.annotate_root_dir(output_filename_path, annotation_dict)
|
||||||
|
|
||||||
#display_group_hierarchy_on_a_treemap(output_filename_path)
|
#display_group_hierarchy_on_a_treemap(output_filename_path)
|
||||||
|
|
||||||
|
@ -323,5 +323,24 @@ def to_yaml(input_filename_path,folder_depth: int = 4):
|
|||||||
return output_filename_tail+".yaml"
|
return output_filename_tail+".yaml"
|
||||||
|
|
||||||
|
|
||||||
|
def get_groups_at_a_level(file: h5py.File, level: str):
|
||||||
|
|
||||||
|
groups = []
|
||||||
|
def node_selector(name, obj):
|
||||||
|
if name.count('/') == level:
|
||||||
|
print(name)
|
||||||
|
groups.append(obj.name)
|
||||||
|
|
||||||
|
file.visititems(node_selector)
|
||||||
|
#file.visititems()
|
||||||
|
return groups
|
||||||
|
|
||||||
|
|
||||||
|
def annotate_root_dir(filename,annotation_dict: dict):
|
||||||
|
with h5py.File(filename,'r+') as file:
|
||||||
|
file.attrs.update(annotation_dict)
|
||||||
|
#for key in annotation_dict:
|
||||||
|
# file.attrs.create('metadata_'+key, annotation_dict[key])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user