Files
dima/src/hdf5_lib.py

606 lines
26 KiB
Python

import sys
import os
root_dir = os.path.abspath(os.curdir)
sys.path.append(root_dir)
import pandas as pd
import numpy as np
#import g5505_file_reader
import src.g5505_utils as utils
import input_files.config_file as config_file
import src.hdf5_vis as hdf5_vis
import h5py
import yaml
def progressBar(count_value, total, suffix=''):
bar_length = 100
filled_up_Length = int(round(bar_length* count_value / float(total)))
percentage = round(100.0 * count_value/float(total),1)
bar = '=' * filled_up_Length + '-' * (bar_length - filled_up_Length)
sys.stdout.write('[%s] %s%s ...%s\r' %(bar, percentage, '%', suffix))
sys.stdout.flush()
def read_mtable_as_dataframe(filename):
""" Reconstruct a Matlab Table encoded in a .h5 file as a Pandas DataFrame. The input .h5 file
contains as many groups as rows in the Matlab Table, and each group stores dataset-like variables in the Table as
Datasets while categorical and numerical variables in the table are represented as attributes of each group.
Note: DataFrame is constructed columnwise to ensure homogenous data columns.
Parameters:
filename (str): .h5 file's name. It may include location-path information.
Returns:
output_dataframe (pd.DataFrame): Matlab's Table as a Pandas DataFrame
"""
#contructs dataframe by filling out entries columnwise. This way we can ensure homogenous data columns"""
with h5py.File(filename,'r') as file:
# Define group's attributes and datasets. This should hold
# for all groups. TODO: implement verification and noncompliance error if needed.
group_list = list(file.keys())
group_attrs = list(file[group_list[0]].attrs.keys())
#
column_attr_names = [item[item.find('_')+1::] for item in group_attrs]
column_attr_names_idx = [int(item[4:(item.find('_'))]) for item in group_attrs]
group_datasets = list(file[group_list[0]].keys()) if not 'DS_EMPTY' in file[group_list[0]].keys() else []
#
column_dataset_names = [file[group_list[0]][item].attrs['column_name'] for item in group_datasets]
column_dataset_names_idx = [int(item[2:]) for item in group_datasets]
# Define data_frame as group_attrs + group_datasets
#pd_series_index = group_attrs + group_datasets
pd_series_index = column_attr_names + column_dataset_names
output_dataframe = pd.DataFrame(columns=pd_series_index,index=group_list)
tmp_col = []
for meas_prop in group_attrs + group_datasets:
if meas_prop in group_attrs:
column_label = meas_prop[meas_prop.find('_')+1:]
# Create numerical or categorical column from group's attributes
tmp_col = [file[group_key].attrs[meas_prop][()][0] for group_key in group_list]
else:
# Create dataset column from group's datasets
column_label = file[group_list[0] + '/' + meas_prop].attrs['column_name']
#tmp_col = [file[group_key + '/' + meas_prop][()][0] for group_key in group_list]
tmp_col = [file[group_key + '/' + meas_prop][()] for group_key in group_list]
output_dataframe.loc[:,column_label] = tmp_col
return output_dataframe
def create_group_hierarchy(obj, df, columns):
"""
Input:
obj (h5py.File or h5py.Group)
columns (list of strs): denote categorical columns in df to be used to define hdf5 file group hierarchy
"""
if not columns:
return
# Determine categories associated with first categorical column
unique_values = df[columns[0]].unique()
if obj.name == '/':
obj.attrs.create('count',df.shape[0])
obj.attrs.create('file_list',df['filename'].tolist())
for group_name in unique_values:
group = obj.require_group(group_name)
group.attrs.create('column_name', columns[0])
sub_df = df[df[columns[0]]==group_name] # same as df.loc[df[columns[0]]==group_name,:]
group.attrs.create('count',sub_df.shape[0])
group.attrs.create('file_list',sub_df['filename'].tolist())
# if group_name == 'MgO powder,H2O,HCl':
# print('Here:',sub_df.shape)
create_group_hierarchy(group, sub_df, columns[1::])
def is_nested_hierarchy(df) -> bool:
"""receives a dataframe with categorical columns and checks whether rows form a nested group hierarchy.
That is, from bottom to top, subsequent hierarchical levels contain nested groups. The lower level groups belong to exactly one group in the higher level group.
"""
# TODO: generalize the code to check for deeper group hierachies.
def are_nested(df, col, col_nxt):
""" Checks whether low level LL groups can be separated in terms of high level HL groups.
That is, elements of low-level groups do not belong to more than one HL group."""
# Compute higher level group names/categories
memberships = df[col_nxt].unique().tolist()
# Compute upper-level group memberships of low-level groups
col_avg_memberships = df.groupby(col).mean()[col_nxt].unique()
# Check whether all low-level groups have an actual hlg membership. That is, their avg. hlg membership is in the hlg membership.
return all([col_avg_memberships[group_idx] in memberships for group_idx in range(len(col_avg_memberships))])
df_tmp = df.copy()
# Create relabeling map
for column_name in df_tmp.columns:
category_index = pd.Series(np.arange(len(df_tmp[column_name].unique())), index=df_tmp[column_name].unique())
df_tmp[column_name] = category_index[df_tmp[column_name].tolist()].tolist()
df_tmp.plot()
return all([are_nested(df_tmp,'level_'+str(i)+'_groups','level_'+str(i+1)+'_groups') for i in range(len(df_tmp.columns)-1)])
def get_attr_names(input_data):
# TODO: extend this to file-system paths
if not isinstance(input_data,pd.DataFrame):
raise ValueError("input_data must be a pd.DataFrame")
return input_data.columns
def get_parent_child_relationships(file: h5py.File):
nodes = ['/']
parent = ['']
#values = [file.attrs['count']]
# TODO: maybe we should make this more general and not dependent on file_list attribute?
#if 'file_list' in file.attrs.keys():
# values = [len(file.attrs['file_list'])]
#else:
# values = [1]
values = [len(file.keys())]
def node_visitor(name,obj):
if name.count('/') <=2:
nodes.append(obj.name)
parent.append(obj.parent.name)
#nodes.append(os.path.split(obj.name)[1])
#parent.append(os.path.split(obj.parent.name)[1])
if isinstance(obj,h5py.Dataset):# or not 'file_list' in obj.attrs.keys():
values.append(1)
else:
print(obj.name)
try:
values.append(len(obj.keys()))
except:
values.append(0)
file.visititems(node_visitor)
return nodes, parent, values
def get_groups_at_a_level(file: h5py.File, level: str):
groups = []
def node_selector(name, obj):
if name.count('/') == level:
print(name)
groups.append(obj.name)
file.visititems(node_selector)
#file.visititems()
return groups
def format_group_names(names: list):
formated_names = []
for name in names:
idx = name.rfind('/')
if len(name) > 1:
formated_names.append(name[idx+1::])
else:
formated_names.append(name)
return pd.DataFrame(formated_names,columns=['formated_names'],index=names)
def annotate_root_dir(filename,annotation_dict: dict):
with h5py.File(filename,'r+') as file:
for key in annotation_dict:
file.attrs.create('metadata_'+key, annotation_dict[key])
import shutil
def create_hdf5_file_from_filesystem_path(config_param : dict ,
input_file_system_path : str,
select_dir_keywords = [],
select_file_keywords =[],
top_sub_dir_mask : bool = True):
#def create_hdf5_file_from_filesystem_path(output_filename : str,
# input_file_system_path : str,
# select_dir_keywords = [],
# select_file_keywords =[],
# top_sub_dir_mask : bool = True):
"""
Creates an .h5 file with name "output_filename" that preserves the directory tree (or folder structure) of given a filesystem path.
When file and directory keywords are non-empty, the keywords enable filtering of directories and files that do not contain the specified keywords.
In the .h5 file, only files that are admissible file formats will be stored in the form of datasets and attributes.
Parameters:
ofilename (str):
input_file_system_path (str) : path to root directory, specified with forwards slashes, e.g., path/to/root
select_dir_keywords (list): default value [],
list of string elements to consider or select only directory paths that contain a word in 'select_dir_keywords'.
When empty, all directory paths are considered to be included in the hdf5 file group hierarchy.
select_file_keywords (list): default value [],
list of string elements to consider or select only files that contain a word in 'select_file_keywords'.
When empty, all files are considered to be stored in the hdf5 file.
Returns:
"""
# Ensure OS compliant paths and keywords
# TODO: validate config_param dict, make sure output_filename is a valid file_path
group_id = config_param['group_id']
user_initials = config_param['user_initials']
created_at = config_file.created_at()
output_dir = config_param['output_dir']
output_filename = output_dir + config_file.output_filename_tempate(group_id,created_at,user_initials)
admissible_file_ext_list = list(config_file.select_file_readers(group_id).keys())
if '/' in input_file_system_path:
input_file_system_path = input_file_system_path.replace('/',os.sep)
else:
raise ValueError('input_file_system_path needs to be specified using forward slashes "/".' )
for i, keyword in enumerate(select_dir_keywords):
select_dir_keywords[i] = keyword.replace('/',os.sep)
# Visit each subdirectory from top to bottom, root directory defined by input_file_sytem_path to the lower
# level directories.
# Constrain walkable paths on the specified directory tree by allowing walks that start from root
# through subdirectories specified by dir_keywords. This improves efficiency especially, in deep
# directory trees with many leaves.
paths = []
if top_sub_dir_mask:
for item in os.listdir(input_file_system_path):
if any([item in keyword for keyword in select_dir_keywords]):
paths.append(os.path.join(input_file_system_path,item))
else:
paths.append(input_file_system_path)
with h5py.File(output_filename, 'w') as h5file:
for item in paths:
root_dir = input_file_system_path
# Create dictionary with directory-files pairs where files satisfy keyword and admisible type contraints
# It requires an extra pass over directory three and additional memory for dictionary, but it may be useful
# to speed up subsequent step and prune resulting directory tree.
# For each directory and/or subdirectory, keep files that satisfy file_keyword constraints, and store
# (directory_path, suitable files) relationships in a dictionary.
file_paths_dict = {}
check_file_ext = lambda filename: any([ext in filename for ext in admissible_file_ext_list])
for dirpath, _, filenames in os.walk(item,topdown=False):
file_paths_dict[dirpath] = []
# Check files that have an admissible extension and store them in admissible_filenames list
admissible_filenames = []
for fn in filenames:
if check_file_ext(fn):
admissible_filenames.append(fn)
if select_file_keywords: # when select_file_keywords = [], all files are considered
for filename in admissible_filenames:
# Do not consider files with types for which there is still no file_reader. TODO: extend file_reader library.
#if not any([ext in filename for ext in admissible_file_ext_list]):
# continue
# Add files with name, that contains any of the file_keywords
if any([keyword in filename for keyword in select_file_keywords]):
file_paths_dict[dirpath].append(filename)
else:
file_paths_dict[dirpath] = admissible_filenames
for node_number, node in enumerate(os.walk(item, topdown=True)):
dirpath, dirnames, filenames_list = node
#if node_number == 0:
# offset = dirpath.count(os.sep)
# Filter out files with filenames not containing a keyword specified in the parameter 'select_file_keywords'.
# When select_file_keywords is an empty, i.e., [], do not apply any filter on the filenames.
#filtered_filename_list = []
#if select_file_keywords:
# for filename in filenames_list:
# if any([keyword in filename for keyword in select_file_keywords]):
# filtered_filename_list.append(filename)
#else:
# filtered_filename_list = filenames_list.copy()
filtered_filename_list = file_paths_dict.get(dirpath,filenames_list.copy())
# Skip subdirectories that do not contain a keyword in the parameter 'select_dir_keywords' when it is nonempty
if select_dir_keywords:
#if (dirpath.count(os.sep) > offset) and not any([item in dirpath for item in select_dir_keywords]):
#tail, dirname = os.path.split(dirpath)
#if not any([item in dirname for item in select_dir_keywords]):
if not any([item in dirpath for item in select_dir_keywords]):
continue
group_name = dirpath.replace(os.sep,'/')
group_name = group_name.replace(root_dir.replace(os.sep,'/') + '/', '/')
# flatten group name to one level
tmp_list = group_name.split('/')
if len(tmp_list)>2:
group_name = '/'.join([tmp_list[0],tmp_list[1]])
# Group hierarchy is implicitly defined by the forward slashes
if not group_name in h5file.keys():
h5file.create_group(group_name)
h5file[group_name].attrs.create(name='filtered_file_list',data=filtered_filename_list)
h5file[group_name].attrs.create(name='file_list',data=filenames_list)
else:
print(group_name,' was already created.')
# TODO: for each "admissible" file in filenames, create an associated dataset in the corresponding group (subdirectory)
for filenumber, filename in enumerate(filtered_filename_list):
# Get file extension (or file type)
file_name, file_ext = os.path.splitext(filename)
#print(filename)
#try:
if not 'h5' in filename:
file_dict = config_file.select_file_readers(group_id)[file_ext](os.path.join(dirpath,filename))
if not file_dict:
continue
try:
# Create group and add their attributes
h5file[group_name].create_group(name=file_dict['name'])
for key in file_dict['attributes_dict'].keys():
# Represent string values as fixed length strings in the HDF5 file, which need
# to be decoded as string when we read them. It provides better control than variable strings,
# at the expense of flexibility.
# https://docs.h5py.org/en/stable/strings.html
value = file_dict['attributes_dict'][key]
if isinstance(value,str):
utf8_type = h5py.string_dtype('utf-8', len(value))
value = np.array(value.encode('utf-8'),dtype=utf8_type)
h5file[group_name][file_dict['name']].attrs.create(name=key,
data=value)
# Add datasets to just created group
for dataset in file_dict['datasets']:
h5file[group_name][file_dict['name']].create_dataset(name = dataset['name'],
data = dataset['data'],
#dtype = file_dict['dtype'],
shape = dataset['shape'])
except Exception as inst:
# TODO: log when a file could not be stored as a dataset
print(inst)
else:
config_file.select_file_readers(group_id)[file_ext](source_file_path = os.path.join(dirpath,filename),
dest_file_obj = h5file,
dest_group_name = group_name +'/'+filename)
#print(filename,file_ext, ':)')
progressBar(filenumber,len(filtered_filename_list), 'Uploading files in ' + dirpath)
output_yml_filename_path = hdf5_vis.take_yml_snapshot_of_hdf5_file(output_filename)
return output_filename, output_yml_filename_path
def create_hdf5_file_from_dataframe(ofilename, input_data, approach : str, group_by_funcs : list, extract_attrs_func = None):
""" Creates an hdf5 file with as many levels as indicated by len(group_by_funcs).
Top level denotes the root group/directory and bottom level denotes measurement level groups.
Parameters:
input_data (pd.DataFrame | file-system path) :
group_by_funcs (list of callables or strs) : contains a list of callables or dataframe's column names that will be used
to partition or group files from top to bottom.
Callables in the list must assign a categorical value to each file in a file list, internally represented as a DataFrame,
and they thus return a pd.Series of categorical values.
On the other hand, strings in the list refer to the name of categorical columns in the input_data (when this is a DataFrame)
Returns:
"""
# Check whether input_data is a valid file-system path or a DataFrame
is_valid_path = lambda x : os.path.exists(input_data) if isinstance(input_data,str) else False
if is_valid_path(input_data):
file_list = os.listdir(input_data)
# Navigates file-system folders/directories from top to bottom.
#for dirpath, dirnames, filenames in os.walk(input_data,topdown=True):
#df = pd.DataFrame(file_list,columns=['filename'])
df = utils.augment_with_filetype(df)
elif isinstance(input_data,pd.DataFrame):
df = input_data.copy()
else:
raise ValueError("input_data must be either a valid file-system path or a dataframe.")
#
if utils.is_callable_list(group_by_funcs):
grouping_cols = []
for i, func in enumerate(group_by_funcs):
grouping_cols.append('level_'+str(i)+'_groups')
df['level_'+str(i)+'_groups'] = func(df)
elif utils.is_str_list(group_by_funcs) and all([item in df.columns for item in group_by_funcs]):
grouping_cols = group_by_funcs
else:
raise ValueError("'group_by_funcs' must be a list of callables (or str) that takes input_data as input an returns a valid categorical output.")
if approach == 'botton-up':
# TODO: implement botton-up approach
if is_nested_hierarchy(df.loc[:,grouping_cols]):
print('Do something')
else:
raise ValueError("group_by_funcs do not define a valid group hierarchy. Please reprocess the input_data or choose different grouping functions.")
elif approach == 'top-down':
# Check the length of group_by_funcs list is at most 2
#if len(group_by_funcs) > 2:
# # TODO: extend to more than 2 callable elements.
# raise ValueError("group_by_funcs can only contain at most two grouping elements.")
with h5py.File(ofilename, 'w') as file:
create_group_hierarchy(file, df, grouping_cols)
file.attrs.create(name='depth', data=len(grouping_cols)-1)
#join_path = lambda x,y: '/' + x + '/' + y
#for group_name in df[grouping_cols[0]].unique():
# group_filter = df[grouping_cols[0]]==group_name
# for subgroup_name in df.loc[group_filter,grouping_cols[1]].unique():
# # Create group subgroup folder structure implicitly.
# # Explicitly, grp = f.create_group(group_name), subgrp = grp.create_group(subgroup_name)
# print(join_path(group_name,subgroup_name))
# f.create_group(join_path(group_name,subgroup_name))
# Get groups at the bottom of the hierarchy
#bottom_level_groups = get_groups_at_a_level(file, file.attrs['depth'])
#nodes, parents, values = get_parent_child_relationships(file)
print(':)')
#fig = px.treemap(values=values,names=nodes, parents= parents)
#fig.update_traces(root_color="lightgrey")
#fig.update_layout(width = 800, height=600, margin = dict(t=50, l=25, r=25, b=25))
#fig.show()
else:
raise ValueError("'approach' must take values in ['top-down','bottom-up']")
#for i, value in enumerate(df['level_'+str(0)+'_groups'].unique().tolist()):
# 2. Validate group hierarchy, lower level groups must be embedded in higher level groups
# 3. Create hdf5 file with groups defined by the 'file_group' column
#
# Add datasets to groups and the groups and the group's attributes
#return 0
def main():
inputfile_dir = config_file.inputfile_dir #'\\\\fs03\\Iron_Sulphate'
select_dir_keywords = config_file.select_dir_keywords #['gas','smps\\20220726','htof\\2022.07.26','ptr\\2022.07.26','ams\\2022.07.26']
select_file_keywords = config_file.select_file_keywords #['20220726','2022.07.26']
output_filename_path = os.path.join(config_file.outputfile_dir,config_file.output_filename)
if not os.path.exists(output_filename_path):
create_hdf5_file_from_filesystem_path(output_filename_path,inputfile_dir,select_dir_keywords,select_file_keywords)
# hdf5_vis.display_group_hierarchy_on_a_treemap(output_filename_path)
output_yml_filename_path = hdf5_vis.take_yml_snapshot_of_hdf5_file(output_filename_path)
return output_filename_path, output_yml_filename_path
def main_mtable_h5_from_dataframe():
#import os
ROOT_DIR = os.path.abspath(os.curdir)
# Read BeamTimeMetaData.h5, containing Thorsten's Matlab Table
input_data_df = read_mtable_as_dataframe(os.path.join(ROOT_DIR,'input_files\\BeamTimeMetaData.h5'))
# Preprocess Thorsten's input_data dataframe so that i can be used to create a newer .h5 file
# under certain grouping specificiations.
input_data_df = input_data_df.rename(columns = {'name':'filename'})
input_data_df = utils.augment_with_filenumber(input_data_df)
input_data_df = utils.augment_with_filetype(input_data_df)
input_data_df = utils.split_sample_col_into_sample_and_data_quality_cols(input_data_df)
input_data_df['lastModifiedDatestr'] = input_data_df['lastModifiedDatestr'].astype('datetime64[s]')
# Define grouping functions to be passed into create_hdf5_file function. These can also be set
# as strings refering to categorical columns in input_data_df.
test_grouping_funcs = True
if test_grouping_funcs:
group_by_sample = lambda x : utils.group_by_df_column(x,'sample')
group_by_type = lambda x : utils.group_by_df_column(x,'filetype')
#group_by_filenumber = lambda x : utils.group_by_df_column(x,'filenumber')
else:
group_by_sample = 'sample'
group_by_type = 'filetype'
group_by_filenumber = 'filenumber'
output_filename_path = os.path.join(config_file.outputfile_dir,'thorsten_file_list.h5')
create_hdf5_file_from_dataframe(output_filename_path,input_data_df, 'top-down', group_by_funcs = [group_by_sample, group_by_type])
#create_hdf5_file_from_dataframe('test.h5',input_data_df, 'top-down', group_by_funcs = [group_by_sample, group_by_type, group_by_filenumber])
annotation_dict = {'1-Campaign name': '**SLS-Campaign-2023**',
'2-Users':'Thorsten, Luca, Zoe',
'3-Startdate': str(input_data_df['lastModifiedDatestr'].min()),
'4-Enddate': str(input_data_df['lastModifiedDatestr'].max())
}
annotate_root_dir(output_filename_path, annotation_dict)
#display_group_hierarchy_on_a_treemap(output_filename_path)
print(':)')
if __name__ == '__main__':
main()
#main_mtable_h5_from_dataframe()
#main_5505()
print(':)')