diff --git a/pyzebra/anatric.py b/pyzebra/anatric.py
index 885c120..0f6d0d1 100644
--- a/pyzebra/anatric.py
+++ b/pyzebra/anatric.py
@@ -1,6 +1,9 @@
+import logging
import subprocess
import xml.etree.ElementTree as ET
+logger = logging.getLogger(__name__)
+
DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"]
REFLECTION_PRINTER_FORMATS = [
@@ -20,7 +23,7 @@ ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/anatric"
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
-def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None):
+def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger):
comp_proc = subprocess.run(
[anatric_path, config_file],
stdout=subprocess.PIPE,
@@ -29,8 +32,8 @@ def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None):
check=True,
text=True,
)
- print(" ".join(comp_proc.args))
- print(comp_proc.stdout)
+ log.info(" ".join(comp_proc.args))
+ log.info(comp_proc.stdout)
class AnatricConfig:
diff --git a/pyzebra/app/app_hooks.py b/pyzebra/app/app_hooks.py
deleted file mode 100644
index 38bf798..0000000
--- a/pyzebra/app/app_hooks.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import sys
-from io import StringIO
-
-
-def on_server_loaded(_server_context):
- sys.stdout = StringIO()
diff --git a/pyzebra/app/fit_controls.py b/pyzebra/app/fit_controls.py
index 4eebdea..e28a413 100644
--- a/pyzebra/app/fit_controls.py
+++ b/pyzebra/app/fit_controls.py
@@ -1,5 +1,6 @@
import types
+from bokeh.io import curdoc
from bokeh.models import (
Button,
CellEditor,
@@ -51,6 +52,7 @@ def _params_factory(function):
class FitControls:
def __init__(self):
+ self.log = curdoc().logger
self.params = {}
def add_function_button_callback(click):
@@ -145,7 +147,11 @@ class FitControls:
def _process_scan(self, scan):
pyzebra.fit_scan(
- scan, self.params, fit_from=self.from_spinner.value, fit_to=self.to_spinner.value
+ scan,
+ self.params,
+ fit_from=self.from_spinner.value,
+ fit_to=self.to_spinner.value,
+ log=self.log,
)
pyzebra.get_area(
scan,
diff --git a/pyzebra/app/input_controls.py b/pyzebra/app/input_controls.py
index a74f59f..7e374ec 100644
--- a/pyzebra/app/input_controls.py
+++ b/pyzebra/app/input_controls.py
@@ -11,6 +11,7 @@ import pyzebra
class InputControls:
def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None):
doc = curdoc()
+ log = doc.logger
def filelist_select_update_for_proposal():
proposal_path = proposal_textinput.name
@@ -45,19 +46,19 @@ class InputControls:
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
try:
- file_data = pyzebra.parse_1D(file, ext)
- except:
- print(f"Error loading {f_name}")
+ file_data = pyzebra.parse_1D(file, ext, log=log)
+ except Exception as e:
+ log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
- pyzebra.merge_duplicates(new_data)
+ pyzebra.merge_duplicates(new_data, log=log)
dlfiles.set_names([base] * dlfiles.n_files)
else:
- pyzebra.merge_datasets(new_data, file_data)
+ pyzebra.merge_datasets(new_data, file_data, log=log)
if new_data:
dataset.clear()
@@ -76,13 +77,13 @@ class InputControls:
f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name)
try:
- file_data = pyzebra.parse_1D(file, ext)
- except:
- print(f"Error loading {f_name}")
+ file_data = pyzebra.parse_1D(file, ext, log=log)
+ except Exception as e:
+ log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
- pyzebra.merge_datasets(dataset, file_data)
+ pyzebra.merge_datasets(dataset, file_data, log=log)
if file_data:
on_file_open()
@@ -97,19 +98,19 @@ class InputControls:
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
try:
- file_data = pyzebra.parse_1D(file, ext)
- except:
- print(f"Error loading {f_name}")
+ file_data = pyzebra.parse_1D(file, ext, log=log)
+ except Exception as e:
+ log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
- pyzebra.merge_duplicates(new_data)
+ pyzebra.merge_duplicates(new_data, log=log)
dlfiles.set_names([base] * dlfiles.n_files)
else:
- pyzebra.merge_datasets(new_data, file_data)
+ pyzebra.merge_datasets(new_data, file_data, log=log)
if new_data:
dataset.clear()
@@ -129,13 +130,13 @@ class InputControls:
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
try:
- file_data = pyzebra.parse_1D(file, ext)
- except:
- print(f"Error loading {f_name}")
+ file_data = pyzebra.parse_1D(file, ext, log=log)
+ except Exception as e:
+ log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
- pyzebra.merge_datasets(dataset, file_data)
+ pyzebra.merge_datasets(dataset, file_data, log=log)
if file_data:
on_file_open()
diff --git a/pyzebra/app/main.py b/pyzebra/app/main.py
index 9d0c050..8fd4fdd 100644
--- a/pyzebra/app/main.py
+++ b/pyzebra/app/main.py
@@ -1,5 +1,6 @@
import argparse
-import sys
+import logging
+from io import StringIO
from bokeh.io import curdoc
from bokeh.layouts import column, row
@@ -42,6 +43,16 @@ doc.anatric_path = args.anatric_path
doc.spind_path = args.spind_path
doc.sxtal_refgen_path = args.sxtal_refgen_path
+stream = StringIO()
+handler = logging.StreamHandler(stream)
+handler.setFormatter(
+ logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
+)
+logger = logging.getLogger(str(id(doc)))
+logger.setLevel(logging.INFO)
+logger.addHandler(handler)
+doc.logger = logger
+
log_textareainput = TextAreaInput(title="Logging output:")
@@ -60,7 +71,7 @@ def apply_button_callback():
try:
proposal_path = pyzebra.find_proposal_path(proposal)
except ValueError as e:
- print(e)
+ logger.exception(e)
return
apply_button.disabled = True
else:
@@ -95,7 +106,7 @@ doc.add_root(
def update_stdout():
- log_textareainput.value = sys.stdout.getvalue()
+ log_textareainput.value = stream.getvalue()
doc.add_periodic_callback(update_stdout, 1000)
diff --git a/pyzebra/app/panel_ccl_compare.py b/pyzebra/app/panel_ccl_compare.py
index 75a00ad..0ea7fa3 100644
--- a/pyzebra/app/panel_ccl_compare.py
+++ b/pyzebra/app/panel_ccl_compare.py
@@ -33,6 +33,7 @@ from pyzebra import EXPORT_TARGETS, app
def create():
doc = curdoc()
+ log = doc.logger
dataset1 = []
dataset2 = []
app_dlfiles = app.DownloadFiles(n_files=2)
@@ -94,7 +95,7 @@ def create():
def file_open_button_callback():
if len(file_select.value) != 2:
- print("WARNING: Select exactly 2 .ccl files.")
+ log.warning("Select exactly 2 .ccl files.")
return
new_data1 = []
@@ -104,13 +105,13 @@ def create():
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
try:
- file_data = pyzebra.parse_1D(file, ext)
- except:
- print(f"Error loading {f_name}")
+ file_data = pyzebra.parse_1D(file, ext, log=log)
+ except Exception as e:
+ log.exception(e)
return
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
- pyzebra.merge_duplicates(file_data)
+ pyzebra.merge_duplicates(file_data, log=log)
if ind == 0:
app_dlfiles.set_names([base, base])
@@ -133,7 +134,7 @@ def create():
def upload_button_callback(_attr, _old, _new):
if len(upload_button.filename) != 2:
- print("WARNING: Upload exactly 2 .ccl files.")
+ log.warning("Upload exactly 2 .ccl files.")
return
new_data1 = []
@@ -142,13 +143,13 @@ def create():
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
try:
- file_data = pyzebra.parse_1D(file, ext)
- except:
- print(f"Error loading {f_name}")
+ file_data = pyzebra.parse_1D(file, ext, log=log)
+ except Exception as e:
+ log.exception(e)
return
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
- pyzebra.merge_duplicates(file_data)
+ pyzebra.merge_duplicates(file_data, log=log)
if ind == 0:
app_dlfiles.set_names([base, base])
@@ -377,11 +378,11 @@ def create():
scan_from2 = dataset2[int(merge_from_select.value)]
if scan_into1 is scan_from1:
- print("WARNING: Selected scans for merging are identical")
+ log.warning("Selected scans for merging are identical")
return
- pyzebra.merge_scans(scan_into1, scan_from1)
- pyzebra.merge_scans(scan_into2, scan_from2)
+ pyzebra.merge_scans(scan_into1, scan_from1, log=log)
+ pyzebra.merge_scans(scan_into2, scan_from2, log=log)
_update_table()
_update_plot()
diff --git a/pyzebra/app/panel_ccl_integrate.py b/pyzebra/app/panel_ccl_integrate.py
index eb95e7d..a067a30 100644
--- a/pyzebra/app/panel_ccl_integrate.py
+++ b/pyzebra/app/panel_ccl_integrate.py
@@ -2,6 +2,7 @@ import os
import tempfile
import numpy as np
+from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
Button,
@@ -25,6 +26,8 @@ from pyzebra import EXPORT_TARGETS, app
def create():
+ doc = curdoc()
+ log = doc.logger
dataset = []
app_dlfiles = app.DownloadFiles(n_files=2)
@@ -214,10 +217,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
- print("WARNING: Selected scans for merging are identical")
+ log.warning("Selected scans for merging are identical")
return
- pyzebra.merge_scans(scan_into, scan_from)
+ pyzebra.merge_scans(scan_into, scan_from, log=log)
_update_table()
_update_plot()
diff --git a/pyzebra/app/panel_ccl_prepare.py b/pyzebra/app/panel_ccl_prepare.py
index a58fbe6..d1ec1c2 100644
--- a/pyzebra/app/panel_ccl_prepare.py
+++ b/pyzebra/app/panel_ccl_prepare.py
@@ -5,6 +5,7 @@ import subprocess
import tempfile
import numpy as np
+from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
Arrow,
@@ -39,6 +40,8 @@ SORT_OPT_NB = ["gamma", "nu", "omega"]
def create():
+ doc = curdoc()
+ log = doc.logger
ang_lims = {}
cif_data = {}
params = {}
@@ -132,7 +135,11 @@ def create():
params = dict()
params["SPGR"] = cryst_space_group.value
params["CELL"] = cryst_cell.value
- ub = pyzebra.calc_ub_matrix(params)
+ try:
+ ub = pyzebra.calc_ub_matrix(params, log=log)
+ except Exception as e:
+ log.exception(e)
+ return
ub_matrix.value = " ".join(ub)
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
@@ -221,9 +228,9 @@ def create():
geom_template = None
pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
- print(f"Content of {geom_path}:")
+ log.info(f"Content of {geom_path}:")
with open(geom_path) as f:
- print(f.read())
+ log.info(f.read())
priority = [sorting_0.value, sorting_1.value, sorting_2.value]
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
@@ -248,9 +255,9 @@ def create():
cfl_template = None
pyzebra.export_cfl_file(cfl_path, params, cfl_template)
- print(f"Content of {cfl_path}:")
+ log.info(f"Content of {cfl_path}:")
with open(cfl_path) as f:
- print(f.read())
+ log.info(f.read())
comp_proc = subprocess.run(
[pyzebra.SXTAL_REFGEN_PATH, cfl_path],
@@ -260,8 +267,8 @@ def create():
stderr=subprocess.STDOUT,
text=True,
)
- print(" ".join(comp_proc.args))
- print(comp_proc.stdout)
+ log.info(" ".join(comp_proc.args))
+ log.info(comp_proc.stdout)
if i == 1: # all hkl files are identical, so keep only one
hkl_fname = base_fname + ".hkl"
@@ -591,8 +598,8 @@ def create():
_, ext = os.path.splitext(fname)
try:
file_data = pyzebra.parse_hkl(file, ext)
- except:
- print(f"Error loading {fname}")
+ except Exception as e:
+ log.exception(e)
return
fnames.append(fname)
diff --git a/pyzebra/app/panel_hdf_anatric.py b/pyzebra/app/panel_hdf_anatric.py
index 592255c..e0db141 100644
--- a/pyzebra/app/panel_hdf_anatric.py
+++ b/pyzebra/app/panel_hdf_anatric.py
@@ -24,6 +24,7 @@ from pyzebra import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
def create():
doc = curdoc()
+ log = doc.logger
config = pyzebra.AnatricConfig()
def _load_config_file(file):
@@ -347,7 +348,11 @@ def create():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/config.xml"
config.save_as(temp_file)
- pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir)
+ try:
+ pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir, log=log)
+ except Exception as e:
+ log.exception(e)
+ return
with open(os.path.join(temp_dir, config.logfile)) as f_log:
output_log.value = f_log.read()
diff --git a/pyzebra/app/panel_hdf_param_study.py b/pyzebra/app/panel_hdf_param_study.py
index 6a1bcdd..1fcf567 100644
--- a/pyzebra/app/panel_hdf_param_study.py
+++ b/pyzebra/app/panel_hdf_param_study.py
@@ -36,6 +36,7 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
def create():
doc = curdoc()
+ log = doc.logger
dataset = []
cami_meta = {}
@@ -133,8 +134,8 @@ def create():
for f_name in file_select.value:
try:
new_data.append(pyzebra.read_detector_data(f_name))
- except KeyError:
- print("Could not read data from the file.")
+ except KeyError as e:
+ log.exception(e)
return
dataset.extend(new_data)
diff --git a/pyzebra/app/panel_hdf_viewer.py b/pyzebra/app/panel_hdf_viewer.py
index a6cc92a..cc6381e 100644
--- a/pyzebra/app/panel_hdf_viewer.py
+++ b/pyzebra/app/panel_hdf_viewer.py
@@ -43,6 +43,7 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
def create():
doc = curdoc()
+ log = doc.logger
dataset = []
cami_meta = {}
@@ -102,8 +103,8 @@ def create():
nonlocal dataset
try:
scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None)
- except KeyError:
- print("Could not read data from the file.")
+ except Exception as e:
+ log.exception(e)
return
dataset = [scan]
@@ -137,8 +138,8 @@ def create():
f_name = os.path.basename(f_path)
try:
file_data = [pyzebra.read_detector_data(f_path, cm)]
- except:
- print(f"Error loading {f_name}")
+ except Exception as e:
+ log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
@@ -146,7 +147,7 @@ def create():
if not new_data: # first file
new_data = file_data
else:
- pyzebra.merge_datasets(new_data, file_data)
+ pyzebra.merge_datasets(new_data, file_data, log=log)
if new_data:
dataset = new_data
@@ -161,12 +162,12 @@ def create():
f_name = os.path.basename(f_path)
try:
file_data = [pyzebra.read_detector_data(f_path, None)]
- except:
- print(f"Error loading {f_name}")
+ except Exception as e:
+ log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
- pyzebra.merge_datasets(dataset, file_data)
+ pyzebra.merge_datasets(dataset, file_data, log=log)
if file_data:
_init_datatable()
@@ -292,10 +293,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
- print("WARNING: Selected scans for merging are identical")
+ log.warning("Selected scans for merging are identical")
return
- pyzebra.merge_h5_scans(scan_into, scan_from)
+ pyzebra.merge_h5_scans(scan_into, scan_from, log=log)
_update_table()
_update_image()
_update_proj_plots()
diff --git a/pyzebra/app/panel_param_study.py b/pyzebra/app/panel_param_study.py
index 40e9c88..5a8fe6f 100644
--- a/pyzebra/app/panel_param_study.py
+++ b/pyzebra/app/panel_param_study.py
@@ -3,6 +3,7 @@ import os
import tempfile
import numpy as np
+from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
Button,
@@ -38,6 +39,8 @@ def color_palette(n_colors):
def create():
+ doc = curdoc()
+ log = doc.logger
dataset = []
app_dlfiles = app.DownloadFiles(n_files=1)
@@ -361,10 +364,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
- print("WARNING: Selected scans for merging are identical")
+ log.warning("Selected scans for merging are identical")
return
- pyzebra.merge_scans(scan_into, scan_from)
+ pyzebra.merge_scans(scan_into, scan_from, log=log)
_update_table()
_update_single_scan_plot()
_update_overview()
diff --git a/pyzebra/app/panel_plot_data.py b/pyzebra/app/panel_plot_data.py
index 67c8241..cc40f78 100644
--- a/pyzebra/app/panel_plot_data.py
+++ b/pyzebra/app/panel_plot_data.py
@@ -3,6 +3,7 @@ import io
import os
import numpy as np
+from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
Button,
@@ -31,6 +32,8 @@ from pyzebra.app.panel_hdf_viewer import calculate_hkl
def create():
+ doc = curdoc()
+ log = doc.logger
_update_slice = None
measured_data_div = Div(text="Measured HDF data:")
measured_data = FileInput(accept=".hdf", multiple=True, width=200)
@@ -59,8 +62,8 @@ def create():
# Read data
try:
det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata)))
- except:
- print(f"Error loading {fname}")
+ except Exception as e:
+ log.exception(e)
return None
if ind == 0:
@@ -179,8 +182,8 @@ def create():
_, ext = os.path.splitext(fname)
try:
fdata = pyzebra.parse_hkl(file, ext)
- except:
- print(f"Error loading {fname}")
+ except Exception as e:
+ log.exception(e)
return
for ind in range(len(fdata["counts"])):
diff --git a/pyzebra/app/panel_spind.py b/pyzebra/app/panel_spind.py
index b44f7ff..b3f2c22 100644
--- a/pyzebra/app/panel_spind.py
+++ b/pyzebra/app/panel_spind.py
@@ -21,6 +21,7 @@ import pyzebra
def create():
doc = curdoc()
+ log = doc.logger
events_data = doc.events_data
npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True)
@@ -63,8 +64,8 @@ def create():
stderr=subprocess.STDOUT,
text=True,
)
- print(" ".join(comp_proc.args))
- print(comp_proc.stdout)
+ log.info(" ".join(comp_proc.args))
+ log.info(comp_proc.stdout)
# prepare an event file
diff_vec = []
@@ -94,9 +95,9 @@ def create():
f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n"
)
- print(f"Content of {temp_event_file}:")
+ log.info(f"Content of {temp_event_file}:")
with open(temp_event_file) as f:
- print(f.read())
+ log.info(f.read())
comp_proc = subprocess.run(
[
@@ -123,8 +124,8 @@ def create():
stderr=subprocess.STDOUT,
text=True,
)
- print(" ".join(comp_proc.args))
- print(comp_proc.stdout)
+ log.info(" ".join(comp_proc.args))
+ log.info(comp_proc.stdout)
spind_out_file = os.path.join(temp_dir, "spind.txt")
spind_res = dict(
@@ -146,12 +147,12 @@ def create():
ub_matrices.append(ub_matrix_spind)
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
- print(f"Content of {spind_out_file}:")
+ log.info(f"Content of {spind_out_file}:")
with open(spind_out_file) as f:
- print(f.read())
+ log.info(f.read())
except FileNotFoundError:
- print("No results from spind")
+ log.warning("No results from spind")
results_table_source.data.update(spind_res)
diff --git a/pyzebra/app/plot_hkl.py b/pyzebra/app/plot_hkl.py
index 444a80a..f4349df 100644
--- a/pyzebra/app/plot_hkl.py
+++ b/pyzebra/app/plot_hkl.py
@@ -3,6 +3,7 @@ import io
import os
import numpy as np
+from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
Arrow,
@@ -30,6 +31,9 @@ import pyzebra
class PlotHKL:
def __init__(self):
+ doc = curdoc()
+ log = doc.logger
+
_update_slice = None
measured_data_div = Div(text="Measured CCL data:")
measured_data = FileInput(accept=".ccl", multiple=True, width=200)
@@ -62,9 +66,9 @@ class PlotHKL:
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
_, ext = os.path.splitext(md_fnames[0])
try:
- file_data = pyzebra.parse_1D(file, ext)
- except:
- print(f"Error loading {md_fnames[0]}")
+ file_data = pyzebra.parse_1D(file, ext, log=log)
+ except Exception as e:
+ log.exception(e)
return None
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
@@ -144,9 +148,9 @@ class PlotHKL:
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
_, ext = os.path.splitext(md_fname)
try:
- file_data = pyzebra.parse_1D(file, ext)
- except:
- print(f"Error loading {md_fname}")
+ file_data = pyzebra.parse_1D(file, ext, log=log)
+ except Exception as e:
+ log.exception(e)
return None
pyzebra.normalize_dataset(file_data)
@@ -291,8 +295,8 @@ class PlotHKL:
_, ext = os.path.splitext(fname)
try:
fdata = pyzebra.parse_hkl(file, ext)
- except:
- print(f"Error loading {fname}")
+ except Exception as e:
+ log.exception(e)
return
for ind in range(len(fdata["counts"])):
diff --git a/pyzebra/ccl_io.py b/pyzebra/ccl_io.py
index b6d001f..bc0e2b5 100644
--- a/pyzebra/ccl_io.py
+++ b/pyzebra/ccl_io.py
@@ -1,3 +1,4 @@
+import logging
import os
import re
from ast import literal_eval
@@ -5,6 +6,8 @@ from collections import defaultdict
import numpy as np
+logger = logging.getLogger(__name__)
+
META_VARS_STR = (
"instrument",
"title",
@@ -110,7 +113,7 @@ def load_1D(filepath):
return dataset
-def parse_1D(fileobj, data_type):
+def parse_1D(fileobj, data_type, log=logger):
metadata = {"data_type": data_type}
# read metadata
@@ -156,7 +159,7 @@ def parse_1D(fileobj, data_type):
metadata["ub"][row, :] = list(map(float, value.split()))
except Exception:
- print(f"Error reading {var_name} with value '{value}'")
+ log.error(f"Error reading {var_name} with value '{value}'")
metadata[var_name] = 0
# handle older files that don't contain "zebra_mode" metadata
@@ -294,7 +297,7 @@ def parse_1D(fileobj, data_type):
dataset.append({**metadata, **scan})
else:
- print("Unknown file extention")
+ log.error("Unknown file extention")
return dataset
diff --git a/pyzebra/ccl_process.py b/pyzebra/ccl_process.py
index 6557164..435d2d0 100644
--- a/pyzebra/ccl_process.py
+++ b/pyzebra/ccl_process.py
@@ -1,3 +1,4 @@
+import logging
import os
import numpy as np
@@ -6,6 +7,8 @@ from scipy.integrate import simpson, trapezoid
from pyzebra import CCL_ANGLES
+logger = logging.getLogger(__name__)
+
PARAM_PRECISIONS = {
"twotheta": 0.1,
"chi": 0.1,
@@ -33,12 +36,12 @@ def normalize_dataset(dataset, monitor=100_000):
scan["monitor"] = monitor
-def merge_duplicates(dataset):
+def merge_duplicates(dataset, log=logger):
merged = np.zeros(len(dataset), dtype=bool)
for ind_into, scan_into in enumerate(dataset):
for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1):
if _parameters_match(scan_into, scan_from) and not merged[ind_from]:
- merge_scans(scan_into, scan_from)
+ merge_scans(scan_into, scan_from, log=log)
merged[ind_from] = True
@@ -75,11 +78,13 @@ def _parameters_match(scan1, scan2):
return True
-def merge_datasets(dataset_into, dataset_from):
+def merge_datasets(dataset_into, dataset_from, log=logger):
scan_motors_into = dataset_into[0]["scan_motors"]
scan_motors_from = dataset_from[0]["scan_motors"]
if scan_motors_into != scan_motors_from:
- print(f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}")
+ log.warning(
+ f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}"
+ )
return
merged = np.zeros(len(dataset_from), dtype=bool)
@@ -96,7 +101,7 @@ def merge_datasets(dataset_into, dataset_from):
dataset_into.append(scan_from)
-def merge_scans(scan_into, scan_from):
+def merge_scans(scan_into, scan_from, log=logger):
if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy()
@@ -148,10 +153,10 @@ def merge_scans(scan_into, scan_from):
fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"])
- print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
+ log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
-def merge_h5_scans(scan_into, scan_from):
+def merge_h5_scans(scan_into, scan_from, log=logger):
if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy()
@@ -160,7 +165,7 @@ def merge_h5_scans(scan_into, scan_from):
for scan in scan_into["merged_scans"]:
if scan_from is scan:
- print("Already merged scan")
+ log.warning("Already merged scan")
return
scan_into["merged_scans"].append(scan_from)
@@ -212,7 +217,7 @@ def merge_h5_scans(scan_into, scan_from):
fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"])
- print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
+ log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def restore_scan(scan):
@@ -230,7 +235,7 @@ def restore_scan(scan):
scan["export"] = True
-def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
+def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger):
if fit_from is None:
fit_from = -np.inf
if fit_to is None:
@@ -243,7 +248,7 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
# apply fitting range
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
if not np.any(fit_ind):
- print(f"No data in fit range for scan {scan['idx']}")
+ log.warning(f"No data in fit range for scan {scan['idx']}")
return
y_fit = y_fit[fit_ind]
diff --git a/pyzebra/sxtal_refgen.py b/pyzebra/sxtal_refgen.py
index 6058b7a..44cdb4e 100644
--- a/pyzebra/sxtal_refgen.py
+++ b/pyzebra/sxtal_refgen.py
@@ -1,4 +1,5 @@
import io
+import logging
import os
import subprocess
import tempfile
@@ -6,6 +7,8 @@ from math import ceil, floor
import numpy as np
+logger = logging.getLogger(__name__)
+
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen"
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
@@ -144,7 +147,7 @@ def export_geom_file(path, ang_lims, template=None):
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
-def calc_ub_matrix(params):
+def calc_ub_matrix(params, log=logger):
with tempfile.TemporaryDirectory() as temp_dir:
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
@@ -160,8 +163,8 @@ def calc_ub_matrix(params):
stderr=subprocess.STDOUT,
text=True,
)
- print(" ".join(comp_proc.args))
- print(comp_proc.stdout)
+ log.info(" ".join(comp_proc.args))
+ log.info(comp_proc.stdout)
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
ub_matrix = []