Files
dima/src/hdf5_lib.py

723 lines
30 KiB
Python

import sys
import os
root_dir = os.path.abspath(os.curdir)
sys.path.append(root_dir)
import pandas as pd
import numpy as np
#import g5505_file_reader
import src.g5505_utils as utils
#import input_files.config_file as config_file
import src.hdf5_vis as hdf5_vis
import src.g5505_file_reader as g5505f_reader
import h5py
import yaml
import shutil
import logging
# Define mapping from extension to their file reader
ext_to_reader_dict = {'.ibw': g5505f_reader.read_xps_ibw_file_as_dict,
'.txt': lambda a1: g5505f_reader.read_txt_files_as_dict(a1,False),
'.TXT': lambda a1: g5505f_reader.read_txt_files_as_dict(a1,False),
'.dat': lambda a1: g5505f_reader.read_txt_files_as_dict(a1,False),
'.h5': lambda a1,a2,a3: g5505f_reader.copy_file_in_group(a1,a2,a3,False)}
def progressBar(count_value, total, suffix=''):
bar_length = 100
filled_up_Length = int(round(bar_length* count_value / float(total)))
percentage = round(100.0 * count_value/float(total),1)
bar = '=' * filled_up_Length + '-' * (bar_length - filled_up_Length)
sys.stdout.write('[%s] %s%s ...%s\r' %(bar, percentage, '%', suffix))
sys.stdout.flush()
def read_mtable_as_dataframe(filename):
""" Reconstruct a Matlab Table encoded in a .h5 file as a Pandas DataFrame. The input .h5 file
contains as many groups as rows in the Matlab Table, and each group stores dataset-like variables in the Table as
Datasets while categorical and numerical variables in the table are represented as attributes of each group.
Note: DataFrame is constructed columnwise to ensure homogenous data columns.
Parameters:
filename (str): .h5 file's name. It may include location-path information.
Returns:
output_dataframe (pd.DataFrame): Matlab's Table as a Pandas DataFrame
"""
#contructs dataframe by filling out entries columnwise. This way we can ensure homogenous data columns"""
with h5py.File(filename,'r') as file:
# Define group's attributes and datasets. This should hold
# for all groups. TODO: implement verification and noncompliance error if needed.
group_list = list(file.keys())
group_attrs = list(file[group_list[0]].attrs.keys())
#
column_attr_names = [item[item.find('_')+1::] for item in group_attrs]
column_attr_names_idx = [int(item[4:(item.find('_'))]) for item in group_attrs]
group_datasets = list(file[group_list[0]].keys()) if not 'DS_EMPTY' in file[group_list[0]].keys() else []
#
column_dataset_names = [file[group_list[0]][item].attrs['column_name'] for item in group_datasets]
column_dataset_names_idx = [int(item[2:]) for item in group_datasets]
# Define data_frame as group_attrs + group_datasets
#pd_series_index = group_attrs + group_datasets
pd_series_index = column_attr_names + column_dataset_names
output_dataframe = pd.DataFrame(columns=pd_series_index,index=group_list)
tmp_col = []
for meas_prop in group_attrs + group_datasets:
if meas_prop in group_attrs:
column_label = meas_prop[meas_prop.find('_')+1:]
# Create numerical or categorical column from group's attributes
tmp_col = [file[group_key].attrs[meas_prop][()][0] for group_key in group_list]
else:
# Create dataset column from group's datasets
column_label = file[group_list[0] + '/' + meas_prop].attrs['column_name']
#tmp_col = [file[group_key + '/' + meas_prop][()][0] for group_key in group_list]
tmp_col = [file[group_key + '/' + meas_prop][()] for group_key in group_list]
output_dataframe.loc[:,column_label] = tmp_col
return output_dataframe
def create_group_hierarchy(obj, df, columns):
"""
Input:
obj (h5py.File or h5py.Group)
columns (list of strs): denote categorical columns in df to be used to define hdf5 file group hierarchy
"""
if not columns:
return
# Determine categories associated with first categorical column
unique_values = df[columns[0]].unique()
if obj.name == '/':
obj.attrs.create('count',df.shape[0])
obj.attrs.create('file_list',df['filename'].tolist())
for group_name in unique_values:
group = obj.require_group(group_name)
group.attrs.create('column_name', columns[0])
sub_df = df[df[columns[0]]==group_name] # same as df.loc[df[columns[0]]==group_name,:]
group.attrs.create('count',sub_df.shape[0])
group.attrs.create('file_list',sub_df['filename'].tolist())
# if group_name == 'MgO powder,H2O,HCl':
# print('Here:',sub_df.shape)
create_group_hierarchy(group, sub_df, columns[1::])
def is_nested_hierarchy(df) -> bool:
"""receives a dataframe with categorical columns and checks whether rows form a nested group hierarchy.
That is, from bottom to top, subsequent hierarchical levels contain nested groups. The lower level groups belong to exactly one group in the higher level group.
"""
# TODO: generalize the code to check for deeper group hierachies.
def are_nested(df, col, col_nxt):
""" Checks whether low level LL groups can be separated in terms of high level HL groups.
That is, elements of low-level groups do not belong to more than one HL group."""
# Compute higher level group names/categories
memberships = df[col_nxt].unique().tolist()
# Compute upper-level group memberships of low-level groups
col_avg_memberships = df.groupby(col).mean()[col_nxt].unique()
# Check whether all low-level groups have an actual hlg membership. That is, their avg. hlg membership is in the hlg membership.
return all([col_avg_memberships[group_idx] in memberships for group_idx in range(len(col_avg_memberships))])
df_tmp = df.copy()
# Create relabeling map
for column_name in df_tmp.columns:
category_index = pd.Series(np.arange(len(df_tmp[column_name].unique())), index=df_tmp[column_name].unique())
df_tmp[column_name] = category_index[df_tmp[column_name].tolist()].tolist()
df_tmp.plot()
return all([are_nested(df_tmp,'level_'+str(i)+'_groups','level_'+str(i+1)+'_groups') for i in range(len(df_tmp.columns)-1)])
def get_attr_names(input_data):
# TODO: extend this to file-system paths
if not isinstance(input_data,pd.DataFrame):
raise ValueError("input_data must be a pd.DataFrame")
return input_data.columns
def get_parent_child_relationships(file: h5py.File):
nodes = ['/']
parent = ['']
#values = [file.attrs['count']]
# TODO: maybe we should make this more general and not dependent on file_list attribute?
#if 'file_list' in file.attrs.keys():
# values = [len(file.attrs['file_list'])]
#else:
# values = [1]
values = [len(file.keys())]
def node_visitor(name,obj):
if name.count('/') <=2:
nodes.append(obj.name)
parent.append(obj.parent.name)
#nodes.append(os.path.split(obj.name)[1])
#parent.append(os.path.split(obj.parent.name)[1])
if isinstance(obj,h5py.Dataset):# or not 'file_list' in obj.attrs.keys():
values.append(1)
else:
print(obj.name)
try:
values.append(len(obj.keys()))
except:
values.append(0)
file.visititems(node_visitor)
return nodes, parent, values
def get_groups_at_a_level(file: h5py.File, level: str):
groups = []
def node_selector(name, obj):
if name.count('/') == level:
print(name)
groups.append(obj.name)
file.visititems(node_selector)
#file.visititems()
return groups
def format_group_names(names: list):
formated_names = []
for name in names:
idx = name.rfind('/')
if len(name) > 1:
formated_names.append(name[idx+1::])
else:
formated_names.append(name)
return pd.DataFrame(formated_names,columns=['formated_names'],index=names)
def annotate_root_dir(filename,annotation_dict: dict):
with h5py.File(filename,'r+') as file:
file.attrs.update(annotation_dict)
#for key in annotation_dict:
# file.attrs.create('metadata_'+key, annotation_dict[key])
def is_valid_directory_path(dirpath,select_dir_keywords):
activated_keywords = []
if select_dir_keywords:
for item in select_dir_keywords:
if len(item.split(os.sep))>1:
is_sublist = all([x in dirpath.split(os.sep) for x in item.split(os.sep)])
activated_keywords.append(is_sublist)
else:
activated_keywords.append(item in dirpath)
else:
activated_keywords.append(True)
return any(activated_keywords)
def copy_directory_with_contraints(input_dir_path, output_dir_path, select_dir_keywords, select_file_keywords, allowed_file_extensions):
"""
Copies files from input_dir_path to output_dir_path based on specified constraints.
Parameters:
input_dir_path (str): Path to the input directory.
output_dir_path (str): Path to the output directory.
select_dir_keywords (list): List of keywords for selecting directories.
select_file_keywords (list): List of keywords for selecting files.
allowed_file_extensions (list): List of allowed file extensions.
Returns:
path_to_files_dict (dict): dictionary mapping directory paths to lists of copied file names satisfying the constraints.
"""
date = utils.created_at()
log_dir='logs/'
utils.setup_logging(log_dir, f"copy_directory_with_contraints_{date}.log")
def has_allowed_extension(filename):
return os.path.splitext(filename)[1] in allowed_file_extensions
def file_is_selected(filename):
return any(keyword in filename for keyword in select_file_keywords) if select_file_keywords else True
# Collect paths of directories, which are directly connected to the root dir and match select_dir_keywords
paths = []
if select_dir_keywords:
for item in os.listdir(input_dir_path): #Path(input_dir_path).iterdir():
if any([item in keyword for keyword in select_dir_keywords]):
paths.append(os.path.join(input_dir_path,item))
else:
paths.append(input_dir_path) #paths.append(Path(input_dir_path))
ROOT_DIR = input_dir_path
path_to_files_dict = {} # Dictionary to store directory-file pairs satisfying constraints
for subpath in paths:
for dirpath, _, filenames in os.walk(subpath,topdown=False):
# Reduce filenames to those that are admissible
admissible_filenames = [filename for filename in filenames if has_allowed_extension(filename) and file_is_selected(filename)]
if admissible_filenames: # Only create directory if there are files to copy
relative_dirpath = os.path.relpath(dirpath, ROOT_DIR)
target_dirpath = os.path.join(output_dir_path, relative_dirpath)
#path_to_files_dict[dirpath] = admissible_filenames
path_to_files_dict[target_dirpath] = admissible_filenames
os.makedirs(target_dirpath, exist_ok=True)
for filename in admissible_filenames:
src_file_path = os.path.join(dirpath, filename)
dest_file_path = os.path.join(target_dirpath, filename)
try:
shutil.copy2(src_file_path, dest_file_path)
except Exception as e:
logging.error("Failed to copy %s: %s", src_file_path, e)
return path_to_files_dict
def transfer_file_dict_to_hdf5(h5file, group_name, file_dict):
"""
Transfers data from a file_dict to an HDF5 file.
Parameters:
h5file (h5py.File): HDF5 file object where the data will be written.
group_name (str): Name of the HDF5 group where data will be stored.
file_dict (dict): Dictionary containing file data to be transferred.
Required structure:
{
'name': str,
'attributes_dict': dict,
'datasets': [
{
'name': str,
'data': array-like,
'shape': tuple,
'attributes': dict (optional)
},
...
]
}
"""
if not file_dict:
return
try:
# Create group and add their attributes
group = h5file[group_name].create_group(name=file_dict['name'])
# Add group attributes
group.attrs.update(file_dict['attributes_dict'])
# Add datasets to the just created group
for dataset in file_dict['datasets']:
dataset_obj = group.create_dataset(
name=dataset['name'],
data=dataset['data'],
shape=dataset['shape']
)
# Add dataset's attributes
attributes = dataset.get('attributes', {})
dataset_obj.attrs.update(attributes)
except Exception as inst:
print(inst)
logging.error('Failed to transfer data into HDF5: %s', inst)
def create_hdf5_file_from_filesystem_path(output_filename : str,
input_file_system_path : str,
select_dir_keywords = [],
select_file_keywords =[],
top_sub_dir_mask : bool = True,
root_metadata_dict : dict = {}):
"""
Creates an .h5 file with name "output_filename" that preserves the directory tree (or folder structure)
of a given filesystem path. When file and directory keywords are non-empty, the keywords enable filtering
of directory paths and file paths that do not contain the specified keywords.
The data integration capabilities are limited by our file reader, which can only access data from a list of
admissible file formats. These, however, can be extended. Directories are groups in the resulting HDF5 file.
Files are formatted as composite objects consisting of a group, file, and attributes.
Parameters:
output_filename (str): Name of the output HDF5 file.
input_file_system_path (str): Path to root directory, specified with forward slashes, e.g., path/to/root.
select_dir_keywords (list): List of string elements to consider or select only directory paths that contain
a word in 'select_dir_keywords'. When empty, all directory paths are considered
to be included in the HDF5 file group hierarchy.
select_file_keywords (list): List of string elements to consider or select only files that contain a word in
'select_file_keywords'. When empty, all files are considered to be stored in the HDF5 file.
top_sub_dir_mask (bool): Mask for top-level subdirectories.
root_metadata_dict (dict): Metadata to include at the root level of the HDF5 file.
Returns:
str: Path to the created HDF5 file.
"""
allowed_file_extensions = list(ext_to_reader_dict.keys()) # list(config_file.select_file_readers(group_id).keys())
if '/' in input_file_system_path:
input_file_system_path = input_file_system_path.replace('/',os.sep)
else:
raise ValueError('input_file_system_path needs to be specified using forward slashes "/".' )
for i, keyword in enumerate(select_dir_keywords):
select_dir_keywords[i] = keyword.replace('/',os.sep)
# Copy input_directory into the output_dir_path, and work with it from now on
output_dir_path = os.path.splitext(output_filename)[0].replace('/',os.sep)
path_to_filenames_dict = copy_directory_with_contraints(input_file_system_path,
output_dir_path,
select_dir_keywords,
select_file_keywords,
allowed_file_extensions)
# Set input_directory as copied input directory
root_dir = output_dir_path
with h5py.File(output_filename, 'w') as h5file:
number_of_dirs = len(path_to_filenames_dict.keys())
dir_number = 1
for dirpath, filtered_filenames_list in path_to_filenames_dict.items():
start_message = f'Starting to transfer files in directory: {dirpath}'
end_message = f'\nCompleted transferring files in directory: {dirpath}'
# Print and log the start message
print(start_message)
logging.info(start_message)
# Check if dirpath is valid. TODO: This is perhaps redundant by design of path_to_filenames_dict.
if not is_valid_directory_path(dirpath,select_dir_keywords):
continue
# Check if filtered_filenames_list is nonempty. TODO: This is perhaps redundant by design of path_to_filenames_dict.
if not filtered_filenames_list:
continue
group_name = dirpath.replace(os.sep,'/')
group_name = group_name.replace(root_dir.replace(os.sep,'/') + '/', '/')
# Flatten group name to one level
offset = sum([len(i.split(os.sep)) if i in dirpath else 0 for i in select_dir_keywords])
tmp_list = group_name.split('/')
if len(tmp_list) > offset+1:
group_name = '/'.join([tmp_list[i] for i in range(offset+1)])
# Group hierarchy is implicitly defined by the forward slashes
if not group_name in h5file.keys():
h5file.create_group(group_name)
#h5file[group_name].attrs.create(name='filtered_file_list',data=convert_string_to_bytes(filtered_filename_list))
#h5file[group_name].attrs.create(name='file_list',data=convert_string_to_bytes(filenames_list))
else:
print(group_name,' was already created.')
for filenumber, filename in enumerate(filtered_filenames_list):
file_ext = os.path.splitext(filename)[1]
#try:
if not 'h5' in filename:
#file_dict = config_file.select_file_readers(group_id)[file_ext](os.path.join(dirpath,filename))
file_dict = ext_to_reader_dict[file_ext](os.path.join(dirpath,filename))
transfer_file_dict_to_hdf5(h5file, group_name, file_dict)
else:
source_file_path = os.path.join(dirpath,filename)
dest_file_obj = h5file
dest_group_name = f'{group_name}/{filename}' #group_name +'/'+filename
ext_to_reader_dict[file_ext](source_file_path, dest_file_obj, dest_group_name)
# Update the progress bar and log the end message
progressBar(dir_number, number_of_dirs, end_message)
logging.info(end_message)
dir_number = dir_number + 1
if len(root_metadata_dict.keys())>0:
annotate_root_dir(output_filename,root_metadata_dict)
#output_yml_filename_path = hdf5_vis.take_yml_snapshot_of_hdf5_file(output_filename)
return output_filename #, output_yml_filename_path
import os
#import src.hdf5_lib as h5lib
import src.g5505_utils as utils
import h5py
import src.metadata_review_lib as metadata_lib
def save_processed_dataframe_to_hdf5(df, annotator, output_filename): # src_hdf5_path, script_date, script_name):
"""
Save processed dataframe columns with annotations to an HDF5 file.
Parameters:
df (pd.DataFrame): DataFrame containing processed time series.
annotator (): Annotator object with get_metadata method.
output_filename (str): Path to the source HDF5 file.
"""
# Convert datetime columns to string
datetime_cols = df.select_dtypes(include=['datetime64']).columns
if list(datetime_cols):
df[datetime_cols] = df[datetime_cols].map(str)
# Convert dataframe to structured array
icad_data_table = utils.dataframe_to_np_structured_array(df)
# Get metadata
metadata_dict = annotator.get_metadata()
# Prepare project level attributes to be added at the root level
project_level_attributes = metadata_dict['metadata']['project']
# Prepare high-level attributes
high_level_attributes = {
'parent_files': metadata_dict['parent_files'],
**metadata_dict['metadata']['sample'],
**metadata_dict['metadata']['environment'],
**metadata_dict['metadata']['instruments']
}
# Prepare data level attributes
data_level_attributes = metadata_dict['metadata']['datasets']
for key, value in data_level_attributes.items():
if isinstance(value,dict):
data_level_attributes[key] = metadata_lib.parse_attribute(value)
# Prepare file dictionary
file_dict = {
'name': project_level_attributes['processing_file'],
'attributes_dict': high_level_attributes,
'datasets': [{
'name': "data_table",
'data': icad_data_table,
'shape': icad_data_table.shape,
'attributes': data_level_attributes
}]
}
# Check if the file exists
if os.path.exists(output_filename):
mode = "a"
print(f"File {output_filename} exists. Opening in append mode.")
else:
mode = "w"
print(f"File {output_filename} does not exist. Creating a new file.")
# Write to HDF5
with h5py.File(output_filename, mode) as h5file:
# Add project level attributes at the root/top level
h5file.attrs.update(project_level_attributes)
transfer_file_dict_to_hdf5(h5file, '/', file_dict)
def create_hdf5_file_from_dataframe(ofilename, input_data, approach : str, group_by_funcs : list, extract_attrs_func = None):
""" Creates an hdf5 file with as many levels as indicated by len(group_by_funcs).
Top level denotes the root group/directory and bottom level denotes measurement level groups.
Parameters:
input_data (pd.DataFrame | file-system path) :
group_by_funcs (list of callables or strs) : contains a list of callables or dataframe's column names that will be used
to partition or group files from top to bottom.
Callables in the list must assign a categorical value to each file in a file list, internally represented as a DataFrame,
and they thus return a pd.Series of categorical values.
On the other hand, strings in the list refer to the name of categorical columns in the input_data (when this is a DataFrame)
Returns:
"""
# Check whether input_data is a valid file-system path or a DataFrame
is_valid_path = lambda x : os.path.exists(input_data) if isinstance(input_data,str) else False
if is_valid_path(input_data):
file_list = os.listdir(input_data)
# Navigates file-system folders/directories from top to bottom.
#for dirpath, dirnames, filenames in os.walk(input_data,topdown=True):
#df = pd.DataFrame(file_list,columns=['filename'])
df = utils.augment_with_filetype(df)
elif isinstance(input_data,pd.DataFrame):
df = input_data.copy()
else:
raise ValueError("input_data must be either a valid file-system path or a dataframe.")
#
if utils.is_callable_list(group_by_funcs):
grouping_cols = []
for i, func in enumerate(group_by_funcs):
grouping_cols.append('level_'+str(i)+'_groups')
df['level_'+str(i)+'_groups'] = func(df)
elif utils.is_str_list(group_by_funcs) and all([item in df.columns for item in group_by_funcs]):
grouping_cols = group_by_funcs
else:
raise ValueError("'group_by_funcs' must be a list of callables (or str) that takes input_data as input an returns a valid categorical output.")
if approach == 'botton-up':
# TODO: implement botton-up approach
if is_nested_hierarchy(df.loc[:,grouping_cols]):
print('Do something')
else:
raise ValueError("group_by_funcs do not define a valid group hierarchy. Please reprocess the input_data or choose different grouping functions.")
elif approach == 'top-down':
# Check the length of group_by_funcs list is at most 2
#if len(group_by_funcs) > 2:
# # TODO: extend to more than 2 callable elements.
# raise ValueError("group_by_funcs can only contain at most two grouping elements.")
with h5py.File(ofilename, 'w') as file:
create_group_hierarchy(file, df, grouping_cols)
file.attrs.create(name='depth', data=len(grouping_cols)-1)
#join_path = lambda x,y: '/' + x + '/' + y
#for group_name in df[grouping_cols[0]].unique():
# group_filter = df[grouping_cols[0]]==group_name
# for subgroup_name in df.loc[group_filter,grouping_cols[1]].unique():
# # Create group subgroup folder structure implicitly.
# # Explicitly, grp = f.create_group(group_name), subgrp = grp.create_group(subgroup_name)
# print(join_path(group_name,subgroup_name))
# f.create_group(join_path(group_name,subgroup_name))
# Get groups at the bottom of the hierarchy
#bottom_level_groups = get_groups_at_a_level(file, file.attrs['depth'])
#nodes, parents, values = get_parent_child_relationships(file)
print(':)')
#fig = px.treemap(values=values,names=nodes, parents= parents)
#fig.update_traces(root_color="lightgrey")
#fig.update_layout(width = 800, height=600, margin = dict(t=50, l=25, r=25, b=25))
#fig.show()
else:
raise ValueError("'approach' must take values in ['top-down','bottom-up']")
#for i, value in enumerate(df['level_'+str(0)+'_groups'].unique().tolist()):
# 2. Validate group hierarchy, lower level groups must be embedded in higher level groups
# 3. Create hdf5 file with groups defined by the 'file_group' column
#
# Add datasets to groups and the groups and the group's attributes
#return 0
#def main():
# inputfile_dir = config_file.inputfile_dir #'\\\\fs03\\Iron_Sulphate'
# select_dir_keywords = config_file.select_dir_keywords #['gas','smps\\20220726','htof\\2022.07.26','ptr\\2022.07.26','ams\\2022.07.26']
# select_file_keywords = config_file.select_file_keywords #['20220726','2022.07.26']
# output_filename_path = os.path.join(config_file.outputfile_dir,config_file.output_filename)
# if not os.path.exists(output_filename_path):
# create_hdf5_file_from_filesystem_path(output_filename_path,inputfile_dir,select_dir_keywords,select_file_keywords)
# # hdf5_vis.display_group_hierarchy_on_a_treemap(output_filename_path)
# output_yml_filename_path = hdf5_vis.take_yml_snapshot_of_hdf5_file(output_filename_path)
# return output_filename_path, output_yml_filename_path
def main_mtable_h5_from_dataframe():
#import os
ROOT_DIR = os.path.abspath(os.curdir)
# Read BeamTimeMetaData.h5, containing Thorsten's Matlab Table
input_data_df = read_mtable_as_dataframe(os.path.join(ROOT_DIR,'input_files\\BeamTimeMetaData.h5'))
# Preprocess Thorsten's input_data dataframe so that i can be used to create a newer .h5 file
# under certain grouping specificiations.
input_data_df = input_data_df.rename(columns = {'name':'filename'})
input_data_df = utils.augment_with_filenumber(input_data_df)
input_data_df = utils.augment_with_filetype(input_data_df)
input_data_df = utils.split_sample_col_into_sample_and_data_quality_cols(input_data_df)
input_data_df['lastModifiedDatestr'] = input_data_df['lastModifiedDatestr'].astype('datetime64[s]')
# Define grouping functions to be passed into create_hdf5_file function. These can also be set
# as strings refering to categorical columns in input_data_df.
test_grouping_funcs = True
if test_grouping_funcs:
group_by_sample = lambda x : utils.group_by_df_column(x,'sample')
group_by_type = lambda x : utils.group_by_df_column(x,'filetype')
#group_by_filenumber = lambda x : utils.group_by_df_column(x,'filenumber')
else:
group_by_sample = 'sample'
group_by_type = 'filetype'
group_by_filenumber = 'filenumber'
output_filename_path = os.path.join('output_files','thorsten_file_list.h5')
create_hdf5_file_from_dataframe(output_filename_path,input_data_df, 'top-down', group_by_funcs = [group_by_sample, group_by_type])
#create_hdf5_file_from_dataframe('test.h5',input_data_df, 'top-down', group_by_funcs = [group_by_sample, group_by_type, group_by_filenumber])
annotation_dict = {'1-Campaign name': '**SLS-Campaign-2023**',
'2-Users':'Thorsten, Luca, Zoe',
'3-Startdate': str(input_data_df['lastModifiedDatestr'].min()),
'4-Enddate': str(input_data_df['lastModifiedDatestr'].max())
}
annotate_root_dir(output_filename_path, annotation_dict)
#display_group_hierarchy_on_a_treemap(output_filename_path)
print(':)')
if __name__ == '__main__':
#main()
main_mtable_h5_from_dataframe()
#main_5505()
print(':)')