Created file reader for acsm tofware files, updated registry and updated yaml file with instrument specific terms and reader config params.

This commit is contained in:
2024-10-07 16:18:14 +02:00
parent c321a17943
commit 9a3bf77f37
3 changed files with 278 additions and 9 deletions

View File

@ -1,10 +1,47 @@
table_header:
't_start_Buf':
t_start_Buf:
desc: Date and time of the measurement
unit: YYYY-MM-DD HH:MM:SS
rename_as: date_time
description: Start time of the buffer (includes milliseconds)
datetime_format: "%d.%m.%Y %H:%M:%S.%f"
data_type: 'datetime'
t_base:
description: Base time of the measurement (without milliseconds)
datetime_format: "%d.%m.%Y %H:%M:%S"
data_type: 'datetime'
tseries:
description: Time series reference (without milliseconds)
datetime_format: "%d.%m.%Y %H:%M:%S"
data_type: 'datetime'
config_text_reader:
table_header:
#txt:
- 't_base VaporizerTemp_C HeaterBias_V FlowRefWave FlowRate_mb FlowRate_ccs FilamentEmission_mA Detector_V AnalogInput06_V ABRefWave ABsamp ABCorrFact'
- 't_start_Buf,Chl_11000,NH4_11000,SO4_11000,NO3_11000,Org_11000,SO4_48_11000,SO4_62_11000,SO4_82_11000,SO4_81_11000,SO4_98_11000,NO3_30_11000,Org_60_11000,Org_43_11000,Org_44_11000'
#csv:
- "X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 X32 X33 X34 X35 X36 X37 X38 X39 X40 X41 X42 X43 X44 X45 X46 X47 X48 X49 X50 X51 X52 X53 X54 X55 X56 X57 X58 X59 X60 X61 X62 X63 X64 X65 X66 X67 X68 X69 X70 X71 X72 X73 X74 X75 X76 X77 X78 X79 X80 X81 X82 X83 X84 X85 X86 X87 X88 X89 X90 X91 X92 X93 X94 X95 X96 X97 X98 X99 X100 X101 X102 X103 X104 X105 X106 X107 X108 X109 X110 X111 X112 X113 X114 X115 X116 X117 X118 X119 X120 X121 X122 X123 X124 X125 X126 X127 X128 X129 X130 X131 X132 X133 X134 X135 X136 X137 X138 X139 X140 X141 X142 X143 X144 X145 X146 X147 X148 X149 X150 X151 X152 X153 X154 X155 X156 X157 X158 X159 X160 X161 X162 X163 X164 X165 X166 X167 X168 X169 X170 X171 X172 X173 X174 X175 X176 X177 X178 X179 X180 X181 X182 X183 X184 X185 X186 X187 X188 X189 X190 X191 X192 X193 X194 X195 X196 X197 X198 X199 X200 X201 X202 X203 X204 X205 X206 X207 X208 X209 X210 X211 X212 X213 X214 X215 X216 X217 X218 X219"
- "X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 X32 X33 X34 X35 X36 X37 X38 X39 X40 X41 X42 X43 X44 X45 X46 X47 X48 X49 X50 X51 X52 X53 X54 X55 X56 X57 X58 X59 X60 X61 X62 X63 X64 X65 X66 X67 X68 X69 X70 X71 X72 X73 X74 X75 X76 X77 X78 X79 X80 X81 X82 X83 X84 X85 X86 X87 X88 X89 X90 X91 X92 X93 X94 X95 X96 X97 X98 X99 X100 X101 X102 X103 X104 X105 X106 X107 X108 X109 X110 X111 X112 X113 X114 X115 X116 X117 X118 X119 X120 X121 X122 X123 X124 X125 X126 X127 X128 X129 X130 X131 X132 X133 X134 X135 X136 X137 X138 X139 X140 X141 X142 X143 X144 X145 X146 X147 X148 X149 X150 X151 X152 X153 X154 X155 X156 X157 X158 X159 X160 X161 X162 X163 X164 X165 X166 X167 X168 X169 X170 X171 X172 X173 X174 X175 X176 X177 X178 X179 X180 X181 X182 X183 X184 X185 X186 X187 X188 X189 X190 X191 X192 X193 X194 X195 X196 X197 X198 X199 X200 X201 X202 X203 X204 X205 X206 X207 X208 X209 X210 X211 X212 X213 X214 X215 X216 X217 X218 X219"
- 'MSS_base'
- 'tseries'
separator:
#txt:
- "\t"
- ","
#csv:
- "\t"
- "\t"
- "None"
- "None"
file_encoding:
#txt:
- "utf-8"
- "utf-8"
#csv:
- "utf-8"
- "utf-8"
- "utf-8"
- "utf-8"
df1['t_start_Buf'] = pd.to_datetime(df1['t_start_Buf'].apply(lambda a: a.decode()), format='%d.%m.%Y %H:%M:%S.%f', errors='coerce')
df2['t_base'] = pd.to_datetime(df2['t_base'].apply(lambda a: a.decode()), format='%d.%m.%Y %H:%M:%S', errors='coerce')

View File

@ -0,0 +1,225 @@
import sys
import os
root_dir = os.path.abspath(os.curdir)
sys.path.append(root_dir)
import pandas as pd
import collections
import utils.g5505_utils as utils
#import src.metadata_review_lib as metadata
#from src.metadata_review_lib import parse_attribute
import yaml
def read_acsm_files_as_dict(filename: str, instruments_dir: str = None, work_with_copy: bool = True):
# If instruments_dir is not provided, use the default path relative to the module directory
if not instruments_dir:
# Assuming the instruments folder is one level up from the source module directory
module_dir = os.path.dirname(__file__)
instruments_dir = os.path.join(module_dir, '..')
# Normalize the path (resolves any '..' in the path)
instrument_configs_path = os.path.abspath(os.path.join(instruments_dir,'dictionaries','ACSM_TOFWARE.yaml'))
with open(instrument_configs_path,'r') as stream:
try:
config_dict = yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
# Verify if file can be read by available intrument configurations.
#if not any(key in filename.replace(os.sep,'/') for key in config_dict.keys()):
# return {}
#TODO: this may be prone to error if assumed folder structure is non compliant
description_dict = config_dict.get('table_header',{})
file_encoding = config_dict['config_text_reader'].get('file_encoding','utf-8')
separator = config_dict['config_text_reader'].get('separator',None)
table_header = config_dict['config_text_reader'].get('table_header',None)
timestamp_variables = config_dict['config_text_reader'].get('timestamp',[])
datetime_format = config_dict['config_text_reader'].get('datetime_format',[])
# Read header as a dictionary and detect where data table starts
header_dict = {}
data_start = False
# Work with copy of the file for safety
if work_with_copy:
tmp_filename = utils.make_file_copy(source_file_path=filename)
else:
tmp_filename = filename
if not isinstance(table_header, list):
table_header = [table_header]
file_encoding = [file_encoding]
separator = [separator]
with open(tmp_filename,'rb') as f:
table_preamble = []
for line_number, line in enumerate(f):
for tb_idx, tb in enumerate(table_header):
if tb in line.decode(file_encoding[tb_idx]):
break
if tb in line.decode(file_encoding[tb_idx]):
list_of_substrings = line.decode(file_encoding[tb_idx]).split(separator[tb_idx].replace('\\t','\t'))
# Count occurrences of each substring
substring_counts = collections.Counter(list_of_substrings)
data_start = True
# Generate column names with appended index only for repeated substrings
column_names = [f"{i}_{name.strip()}" if substring_counts[name] > 1 else name.strip() for i, name in enumerate(list_of_substrings)]
#column_names = [str(i)+'_'+name.strip() for i, name in enumerate(list_of_substrings)]
#column_names = []
#for i, name in enumerate(list_of_substrings):
# column_names.append(str(i)+'_'+name)
#print(line_number, len(column_names ),'\n')
break
# Subdivide line into words, and join them by single space.
# I asumme this can produce a cleaner line that contains no weird separator characters \t \r or extra spaces and so on.
list_of_substrings = line.decode(file_encoding[tb_idx]).split()
# TODO: ideally we should use a multilinear string but the yalm parser is not recognizing \n as special character
#line = ' '.join(list_of_substrings+['\n'])
#line = ' '.join(list_of_substrings)
table_preamble.append(' '.join([item for item in list_of_substrings]))# += new_line
# TODO: it does not work with separator as none :(. fix for RGA
try:
df = pd.read_csv(tmp_filename,
delimiter = separator[tb_idx].replace('\\t','\t'),
header=line_number,
#encoding='latin-1',
encoding = file_encoding[tb_idx],
names=column_names,
skip_blank_lines=True)
df_numerical_attrs = df.select_dtypes(include ='number')
df_categorical_attrs = df.select_dtypes(exclude='number')
numerical_variables = [item for item in df_numerical_attrs.columns]
# Consolidate into single timestamp column the separate columns 'date' 'time' specified in text_data_source.yaml
if timestamp_variables:
#df_categorical_attrs['timestamps'] = [' '.join(df_categorical_attrs.loc[i,timestamp_variables].to_numpy()) for i in df.index]
#df_categorical_attrs['timestamps'] = [ df_categorical_attrs.loc[i,'0_Date']+' '+df_categorical_attrs.loc[i,'1_Time'] for i in df.index]
#df_categorical_attrs['timestamps'] = df_categorical_attrs[timestamp_variables].astype(str).agg(' '.join, axis=1)
timestamps_name = ' '.join(timestamp_variables)
df_categorical_attrs[ timestamps_name] = df_categorical_attrs[timestamp_variables].astype(str).agg(' '.join, axis=1)
valid_indices = []
if datetime_format:
df_categorical_attrs[ timestamps_name] = pd.to_datetime(df_categorical_attrs[ timestamps_name],format=datetime_format,errors='coerce')
valid_indices = df_categorical_attrs.dropna(subset=[timestamps_name]).index
df_categorical_attrs = df_categorical_attrs.loc[valid_indices,:]
df_numerical_attrs = df_numerical_attrs.loc[valid_indices,:]
df_categorical_attrs[timestamps_name] = df_categorical_attrs[timestamps_name].dt.strftime(config_dict['default']['desired_format'])
startdate = df_categorical_attrs[timestamps_name].min()
enddate = df_categorical_attrs[timestamps_name].max()
df_categorical_attrs[timestamps_name] = df_categorical_attrs[timestamps_name].astype(str)
#header_dict.update({'stastrrtdate':startdate,'enddate':enddate})
header_dict['startdate']= str(startdate)
header_dict['enddate']=str(enddate)
if len(timestamp_variables) > 1:
df_categorical_attrs = df_categorical_attrs.drop(columns = timestamp_variables)
#df_categorical_attrs.reindex(drop=True)
#df_numerical_attrs.reindex(drop=True)
categorical_variables = [item for item in df_categorical_attrs.columns]
####
#elif 'RGA' in filename:
# df_categorical_attrs = df_categorical_attrs.rename(columns={'0_Time(s)' : 'timestamps'})
###
file_dict = {}
path_tail, path_head = os.path.split(tmp_filename)
file_dict['name'] = path_head
# TODO: review this header dictionary, it may not be the best way to represent header data
file_dict['attributes_dict'] = header_dict
file_dict['datasets'] = []
####
df = pd.concat((df_categorical_attrs,df_numerical_attrs),axis=1)
#if numerical_variables:
dataset = {}
dataset['name'] = 'data_table'#_numerical_variables'
dataset['data'] = utils.convert_dataframe_to_np_structured_array(df) #df_numerical_attrs.to_numpy()
dataset['shape'] = dataset['data'].shape
dataset['dtype'] = type(dataset['data'])
#dataset['data_units'] = file_obj['wave']['data_units']
#
# Create attribute descriptions based on description_dict
dataset['attributes'] = {}
# Annotate column headers if description_dict is non empty
if description_dict:
for column_name in df.columns:
column_attr_dict = description_dict.get(column_name,
{'note':'there was no description available. Review instrument files.'})
dataset['attributes'].update({column_name: utils.convert_attrdict_to_np_structured_array(column_attr_dict)})
#try:
# dataset['attributes'] = description_dict['table_header'].copy()
# for key in description_dict['table_header'].keys():
# if not key in numerical_variables:
# dataset['attributes'].pop(key) # delete key
# else:
# dataset['attributes'][key] = utils.parse_attribute(dataset['attributes'][key])
# if timestamps_name in categorical_variables:
# dataset['attributes'][timestamps_name] = utils.parse_attribute({'unit':'YYYY-MM-DD HH:MM:SS.ffffff'})
#except ValueError as err:
# print(err)
# Represent string values as fixed length strings in the HDF5 file, which need
# to be decoded as string when we read them. It provides better control than variable strings,
# at the expense of flexibility.
# https://docs.h5py.org/en/stable/strings.html
if table_preamble:
#header_dict["table_preamble"] = utils.convert_string_to_bytes(table_preamble)
tp_dataset = {}
tp_dataset['name'] = "table_preamble"
tp_dataset['data'] = utils.convert_string_to_bytes(table_preamble)
tp_dataset['shape'] = tp_dataset['data'].shape
tp_dataset['dtype'] = type(tp_dataset['data'])
tp_dataset['attributes'] = {}
file_dict['datasets'].append(tp_dataset)
file_dict['datasets'].append(dataset)
#if categorical_variables:
# dataset = {}
# dataset['name'] = 'table_categorical_variables'
# dataset['data'] = dataframe_to_np_structured_array(df_categorical_attrs) #df_categorical_attrs.loc[:,categorical_variables].to_numpy()
# dataset['shape'] = dataset['data'].shape
# dataset['dtype'] = type(dataset['data'])
# if timestamps_name in categorical_variables:
# dataset['attributes'] = {timestamps_name: utils.parse_attribute({'unit':'YYYY-MM-DD HH:MM:SS.ffffff'})}
# file_dict['datasets'].append(dataset)
except:
return {}
return file_dict

View File

@ -17,12 +17,19 @@ file_readers = {
'txt': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
'TXT': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
'dat': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
#'h5': lambda a1, a2, a3: copy_file_in_group(a1, a2, a3, work_with_copy=False),
'ACSM_TOFWARE_txt': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
'ACSM_TOFWARE_csv': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False)
#'ACSM_TOFWARE_txt': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
#'ACSM_TOFWARE_csv': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False)
}
# Add new "instrument reader (Data flagging app data)"
from instruments.readers.acsm_tofware_reader import read_acsm_files_as_dict
file_extensions.append('.txt')
file_readers.update({'ACSM_TOFWARE_txt' : lambda x: read_acsm_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False)})
file_extensions.append('.csv')
file_readers.update({'ACSM_TOFWARE_csv' : lambda x: read_acsm_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False)})
from instruments.readers.flag_reader import read_jsonflag_as_dict
file_extensions.append('.json')
file_readers.update({'ACSM_TOFWARE_flags_json' : lambda x: read_jsonflag_as_dict(x)})