152 lines
5.4 KiB
Plaintext
152 lines
5.4 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"from nbutils import add_project_path_to_sys_path\n",
|
|
"\n",
|
|
"\n",
|
|
"# Add project root to sys.path\n",
|
|
"add_project_path_to_sys_path()\n",
|
|
"\n",
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"\n",
|
|
"try:\n",
|
|
" import src.hdf5_writer as hdf5_writer\n",
|
|
" import src.hdf5_ops as hdf5_ops\n",
|
|
" import visualization.hdf5_vis as h5vis\n",
|
|
" import visualization.napp_plotlib as napp\n",
|
|
"\n",
|
|
" import utils.g5505_utils as utils\n",
|
|
" #import pipelines.metadata_revision as metadata_revision\n",
|
|
" print(\"Imports successful!\")\n",
|
|
"except ImportError as e:\n",
|
|
" print(f\"Import error: {e}\")\n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Read the above specified input_file_path as a dataframe. \n",
|
|
"\n",
|
|
"Since we know this file was created from a Thorsten Table's format, we can use h5lib.read_mtable_as_dataframe() to read it.\n",
|
|
"\n",
|
|
"Then, we rename the 'name' column as 'filename', as this is the column's name use to idenfify files in subsequent functions.\n",
|
|
"Also, we augment the dataframe with a few categorical columns to be used as grouping variables when creating the hdf5 file's group hierarchy. "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Define input file directory\n",
|
|
"\n",
|
|
"input_file_path = '../input_files/BeamTimeMetaData.h5'\n",
|
|
"output_dir_path = '../output_files'\n",
|
|
"if not os.path.exists(output_dir_path):\n",
|
|
" os.makedirs(output_dir_path)\n",
|
|
"\n",
|
|
"# Read BeamTimeMetaData.h5, containing Thorsten's Matlab Table\n",
|
|
"input_data_df = hdf5_ops.read_mtable_as_dataframe(input_file_path)\n",
|
|
"\n",
|
|
"# Preprocess Thorsten's input_data dataframe so that i can be used to create a newer .h5 file\n",
|
|
"# under certain grouping specificiations.\n",
|
|
"input_data_df = input_data_df.rename(columns = {'name':'filename'})\n",
|
|
"input_data_df = utils.augment_with_filenumber(input_data_df)\n",
|
|
"input_data_df = utils.augment_with_filetype(input_data_df)\n",
|
|
"input_data_df = utils.split_sample_col_into_sample_and_data_quality_cols(input_data_df)\n",
|
|
"input_data_df['lastModifiedDatestr'] = input_data_df['lastModifiedDatestr'].astype('datetime64[s]')\n",
|
|
"\n",
|
|
"input_data_df.columns\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"We now create a hdf5 file with a 3-level group hierarchy based on the input_data and three grouping functions. Then\n",
|
|
"we visualize the group hierarchy of the created file as a treemap."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Define grouping functions to be passed into create_hdf5_file function. These can also be set\n",
|
|
"# as strings refering to categorical columns in input_data_df.\n",
|
|
"\n",
|
|
"test_grouping_funcs = True\n",
|
|
"if test_grouping_funcs:\n",
|
|
" group_by_sample = lambda x : utils.group_by_df_column(x,'sample')\n",
|
|
" group_by_type = lambda x : utils.group_by_df_column(x,'filetype')\n",
|
|
" group_by_filenumber = lambda x : utils.group_by_df_column(x,'filenumber')\n",
|
|
"else:\n",
|
|
" group_by_sample = 'sample'\n",
|
|
" group_by_type = 'filetype'\n",
|
|
" group_by_filenumber = 'filenumber'\n",
|
|
"\n",
|
|
"import pandas as pd\n",
|
|
"import h5py\n",
|
|
"\n",
|
|
"path_to_output_filename = os.path.normpath(os.path.join(output_dir_path, 'test.h5'))\n",
|
|
"\n",
|
|
"grouping_by_vars = ['sample', 'filenumber']\n",
|
|
"\n",
|
|
"path_to_output_filename = hdf5_writer.create_hdf5_file_from_dataframe(path_to_output_filename, \n",
|
|
" input_data_df, \n",
|
|
" grouping_by_vars\n",
|
|
" )\n",
|
|
"\n",
|
|
"annotation_dict = {'Campaign name': 'SLS-Campaign-2023',\n",
|
|
" 'Producers':'Thorsten, Luca, Zoe',\n",
|
|
" 'Startdate': str(input_data_df['lastModifiedDatestr'].min()),\n",
|
|
" 'Enddate': str(input_data_df['lastModifiedDatestr'].max())\n",
|
|
" }\n",
|
|
"\n",
|
|
"dataOpsObj = hdf5_ops.HDF5DataOpsManager(path_to_output_filename)\n",
|
|
"dataOpsObj.load_file_obj()\n",
|
|
"# Annotate root folder with annotation_dict\n",
|
|
"dataOpsObj.append_metadata('/',annotation_dict)\n",
|
|
"dataOpsObj.unload_file_obj()\n",
|
|
"\n",
|
|
"\n",
|
|
"\n",
|
|
"h5vis.display_group_hierarchy_on_a_treemap(path_to_output_filename)\n"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "multiphase_chemistry_env",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.11.9"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|