Isolate loggers per document

This commit is contained in:
usov_i 2023-11-21 18:54:59 +01:00
parent 14d122b947
commit 9b48fb7a24
18 changed files with 163 additions and 108 deletions

View File

@ -1,6 +1,9 @@
import logging
import subprocess import subprocess
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
logger = logging.getLogger(__name__)
DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"] DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"]
REFLECTION_PRINTER_FORMATS = [ REFLECTION_PRINTER_FORMATS = [
@ -20,7 +23,7 @@ ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/anatric"
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"] ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None): def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger):
comp_proc = subprocess.run( comp_proc = subprocess.run(
[anatric_path, config_file], [anatric_path, config_file],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
@ -29,8 +32,8 @@ def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None):
check=True, check=True,
text=True, text=True,
) )
print(" ".join(comp_proc.args)) log.info(" ".join(comp_proc.args))
print(comp_proc.stdout) log.info(comp_proc.stdout)
class AnatricConfig: class AnatricConfig:

View File

@ -1,6 +0,0 @@
import sys
from io import StringIO
def on_server_loaded(_server_context):
sys.stdout = StringIO()

View File

@ -1,5 +1,6 @@
import types import types
from bokeh.io import curdoc
from bokeh.models import ( from bokeh.models import (
Button, Button,
CellEditor, CellEditor,
@ -51,6 +52,7 @@ def _params_factory(function):
class FitControls: class FitControls:
def __init__(self): def __init__(self):
self.log = curdoc().logger
self.params = {} self.params = {}
def add_function_button_callback(click): def add_function_button_callback(click):
@ -145,7 +147,11 @@ class FitControls:
def _process_scan(self, scan): def _process_scan(self, scan):
pyzebra.fit_scan( pyzebra.fit_scan(
scan, self.params, fit_from=self.from_spinner.value, fit_to=self.to_spinner.value scan,
self.params,
fit_from=self.from_spinner.value,
fit_to=self.to_spinner.value,
log=self.log,
) )
pyzebra.get_area( pyzebra.get_area(
scan, scan,

View File

@ -11,6 +11,7 @@ import pyzebra
class InputControls: class InputControls:
def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None): def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None):
doc = curdoc() doc = curdoc()
log = doc.logger
def filelist_select_update_for_proposal(): def filelist_select_update_for_proposal():
proposal_path = proposal_textinput.name proposal_path = proposal_textinput.name
@ -45,19 +46,19 @@ class InputControls:
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext) file_data = pyzebra.parse_1D(file, ext, log=log)
except: except Exception as e:
print(f"Error loading {f_name}") log.exception(e)
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
pyzebra.merge_duplicates(new_data) pyzebra.merge_duplicates(new_data, log=log)
dlfiles.set_names([base] * dlfiles.n_files) dlfiles.set_names([base] * dlfiles.n_files)
else: else:
pyzebra.merge_datasets(new_data, file_data) pyzebra.merge_datasets(new_data, file_data, log=log)
if new_data: if new_data:
dataset.clear() dataset.clear()
@ -76,13 +77,13 @@ class InputControls:
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name) _, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext) file_data = pyzebra.parse_1D(file, ext, log=log)
except: except Exception as e:
print(f"Error loading {f_name}") log.exception(e)
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data) pyzebra.merge_datasets(dataset, file_data, log=log)
if file_data: if file_data:
on_file_open() on_file_open()
@ -97,19 +98,19 @@ class InputControls:
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext) file_data = pyzebra.parse_1D(file, ext, log=log)
except: except Exception as e:
print(f"Error loading {f_name}") log.exception(e)
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
pyzebra.merge_duplicates(new_data) pyzebra.merge_duplicates(new_data, log=log)
dlfiles.set_names([base] * dlfiles.n_files) dlfiles.set_names([base] * dlfiles.n_files)
else: else:
pyzebra.merge_datasets(new_data, file_data) pyzebra.merge_datasets(new_data, file_data, log=log)
if new_data: if new_data:
dataset.clear() dataset.clear()
@ -129,13 +130,13 @@ class InputControls:
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name) _, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext) file_data = pyzebra.parse_1D(file, ext, log=log)
except: except Exception as e:
print(f"Error loading {f_name}") log.exception(e)
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data) pyzebra.merge_datasets(dataset, file_data, log=log)
if file_data: if file_data:
on_file_open() on_file_open()

View File

@ -1,5 +1,6 @@
import argparse import argparse
import sys import logging
from io import StringIO
from bokeh.io import curdoc from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
@ -42,6 +43,16 @@ doc.anatric_path = args.anatric_path
doc.spind_path = args.spind_path doc.spind_path = args.spind_path
doc.sxtal_refgen_path = args.sxtal_refgen_path doc.sxtal_refgen_path = args.sxtal_refgen_path
stream = StringIO()
handler = logging.StreamHandler(stream)
handler.setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
)
logger = logging.getLogger(str(id(doc)))
logger.setLevel(logging.INFO)
logger.addHandler(handler)
doc.logger = logger
log_textareainput = TextAreaInput(title="Logging output:") log_textareainput = TextAreaInput(title="Logging output:")
@ -60,7 +71,7 @@ def apply_button_callback():
try: try:
proposal_path = pyzebra.find_proposal_path(proposal) proposal_path = pyzebra.find_proposal_path(proposal)
except ValueError as e: except ValueError as e:
print(e) logger.exception(e)
return return
apply_button.disabled = True apply_button.disabled = True
else: else:
@ -95,7 +106,7 @@ doc.add_root(
def update_stdout(): def update_stdout():
log_textareainput.value = sys.stdout.getvalue() log_textareainput.value = stream.getvalue()
doc.add_periodic_callback(update_stdout, 1000) doc.add_periodic_callback(update_stdout, 1000)

View File

@ -33,6 +33,7 @@ from pyzebra import EXPORT_TARGETS, app
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset1 = [] dataset1 = []
dataset2 = [] dataset2 = []
app_dlfiles = app.DownloadFiles(n_files=2) app_dlfiles = app.DownloadFiles(n_files=2)
@ -94,7 +95,7 @@ def create():
def file_open_button_callback(): def file_open_button_callback():
if len(file_select.value) != 2: if len(file_select.value) != 2:
print("WARNING: Select exactly 2 .ccl files.") log.warning("Select exactly 2 .ccl files.")
return return
new_data1 = [] new_data1 = []
@ -104,13 +105,13 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext) file_data = pyzebra.parse_1D(file, ext, log=log)
except: except Exception as e:
print(f"Error loading {f_name}") log.exception(e)
return return
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data) pyzebra.merge_duplicates(file_data, log=log)
if ind == 0: if ind == 0:
app_dlfiles.set_names([base, base]) app_dlfiles.set_names([base, base])
@ -133,7 +134,7 @@ def create():
def upload_button_callback(_attr, _old, _new): def upload_button_callback(_attr, _old, _new):
if len(upload_button.filename) != 2: if len(upload_button.filename) != 2:
print("WARNING: Upload exactly 2 .ccl files.") log.warning("Upload exactly 2 .ccl files.")
return return
new_data1 = [] new_data1 = []
@ -142,13 +143,13 @@ def create():
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext) file_data = pyzebra.parse_1D(file, ext, log=log)
except: except Exception as e:
print(f"Error loading {f_name}") log.exception(e)
return return
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data) pyzebra.merge_duplicates(file_data, log=log)
if ind == 0: if ind == 0:
app_dlfiles.set_names([base, base]) app_dlfiles.set_names([base, base])
@ -377,11 +378,11 @@ def create():
scan_from2 = dataset2[int(merge_from_select.value)] scan_from2 = dataset2[int(merge_from_select.value)]
if scan_into1 is scan_from1: if scan_into1 is scan_from1:
print("WARNING: Selected scans for merging are identical") log.warning("Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into1, scan_from1) pyzebra.merge_scans(scan_into1, scan_from1, log=log)
pyzebra.merge_scans(scan_into2, scan_from2) pyzebra.merge_scans(scan_into2, scan_from2, log=log)
_update_table() _update_table()
_update_plot() _update_plot()

View File

@ -2,6 +2,7 @@ import os
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -25,6 +26,8 @@ from pyzebra import EXPORT_TARGETS, app
def create(): def create():
doc = curdoc()
log = doc.logger
dataset = [] dataset = []
app_dlfiles = app.DownloadFiles(n_files=2) app_dlfiles = app.DownloadFiles(n_files=2)
@ -214,10 +217,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
print("WARNING: Selected scans for merging are identical") log.warning("Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into, scan_from) pyzebra.merge_scans(scan_into, scan_from, log=log)
_update_table() _update_table()
_update_plot() _update_plot()

View File

@ -5,6 +5,7 @@ import subprocess
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Arrow, Arrow,
@ -39,6 +40,8 @@ SORT_OPT_NB = ["gamma", "nu", "omega"]
def create(): def create():
doc = curdoc()
log = doc.logger
ang_lims = {} ang_lims = {}
cif_data = {} cif_data = {}
params = {} params = {}
@ -132,7 +135,11 @@ def create():
params = dict() params = dict()
params["SPGR"] = cryst_space_group.value params["SPGR"] = cryst_space_group.value
params["CELL"] = cryst_cell.value params["CELL"] = cryst_cell.value
ub = pyzebra.calc_ub_matrix(params) try:
ub = pyzebra.calc_ub_matrix(params, log=log)
except Exception as e:
log.exception(e)
return
ub_matrix.value = " ".join(ub) ub_matrix.value = " ".join(ub)
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100) ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
@ -221,9 +228,9 @@ def create():
geom_template = None geom_template = None
pyzebra.export_geom_file(geom_path, ang_lims, geom_template) pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
print(f"Content of {geom_path}:") log.info(f"Content of {geom_path}:")
with open(geom_path) as f: with open(geom_path) as f:
print(f.read()) log.info(f.read())
priority = [sorting_0.value, sorting_1.value, sorting_2.value] priority = [sorting_0.value, sorting_1.value, sorting_2.value]
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value] chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
@ -248,9 +255,9 @@ def create():
cfl_template = None cfl_template = None
pyzebra.export_cfl_file(cfl_path, params, cfl_template) pyzebra.export_cfl_file(cfl_path, params, cfl_template)
print(f"Content of {cfl_path}:") log.info(f"Content of {cfl_path}:")
with open(cfl_path) as f: with open(cfl_path) as f:
print(f.read()) log.info(f.read())
comp_proc = subprocess.run( comp_proc = subprocess.run(
[pyzebra.SXTAL_REFGEN_PATH, cfl_path], [pyzebra.SXTAL_REFGEN_PATH, cfl_path],
@ -260,8 +267,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
print(" ".join(comp_proc.args)) log.info(" ".join(comp_proc.args))
print(comp_proc.stdout) log.info(comp_proc.stdout)
if i == 1: # all hkl files are identical, so keep only one if i == 1: # all hkl files are identical, so keep only one
hkl_fname = base_fname + ".hkl" hkl_fname = base_fname + ".hkl"
@ -591,8 +598,8 @@ def create():
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
file_data = pyzebra.parse_hkl(file, ext) file_data = pyzebra.parse_hkl(file, ext)
except: except Exception as e:
print(f"Error loading {fname}") log.exception(e)
return return
fnames.append(fname) fnames.append(fname)

View File

@ -24,6 +24,7 @@ from pyzebra import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
config = pyzebra.AnatricConfig() config = pyzebra.AnatricConfig()
def _load_config_file(file): def _load_config_file(file):
@ -347,7 +348,11 @@ def create():
with tempfile.TemporaryDirectory() as temp_dir: with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/config.xml" temp_file = temp_dir + "/config.xml"
config.save_as(temp_file) config.save_as(temp_file)
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir) try:
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir, log=log)
except Exception as e:
log.exception(e)
return
with open(os.path.join(temp_dir, config.logfile)) as f_log: with open(os.path.join(temp_dir, config.logfile)) as f_log:
output_log.value = f_log.read() output_log.value = f_log.read()

View File

@ -36,6 +36,7 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset = [] dataset = []
cami_meta = {} cami_meta = {}
@ -133,8 +134,8 @@ def create():
for f_name in file_select.value: for f_name in file_select.value:
try: try:
new_data.append(pyzebra.read_detector_data(f_name)) new_data.append(pyzebra.read_detector_data(f_name))
except KeyError: except KeyError as e:
print("Could not read data from the file.") log.exception(e)
return return
dataset.extend(new_data) dataset.extend(new_data)

View File

@ -43,6 +43,7 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset = [] dataset = []
cami_meta = {} cami_meta = {}
@ -102,8 +103,8 @@ def create():
nonlocal dataset nonlocal dataset
try: try:
scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None) scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None)
except KeyError: except Exception as e:
print("Could not read data from the file.") log.exception(e)
return return
dataset = [scan] dataset = [scan]
@ -137,8 +138,8 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
try: try:
file_data = [pyzebra.read_detector_data(f_path, cm)] file_data = [pyzebra.read_detector_data(f_path, cm)]
except: except Exception as e:
print(f"Error loading {f_name}") log.exception(e)
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
@ -146,7 +147,7 @@ def create():
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
else: else:
pyzebra.merge_datasets(new_data, file_data) pyzebra.merge_datasets(new_data, file_data, log=log)
if new_data: if new_data:
dataset = new_data dataset = new_data
@ -161,12 +162,12 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
try: try:
file_data = [pyzebra.read_detector_data(f_path, None)] file_data = [pyzebra.read_detector_data(f_path, None)]
except: except Exception as e:
print(f"Error loading {f_name}") log.exception(e)
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data) pyzebra.merge_datasets(dataset, file_data, log=log)
if file_data: if file_data:
_init_datatable() _init_datatable()
@ -292,10 +293,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
print("WARNING: Selected scans for merging are identical") log.warning("Selected scans for merging are identical")
return return
pyzebra.merge_h5_scans(scan_into, scan_from) pyzebra.merge_h5_scans(scan_into, scan_from, log=log)
_update_table() _update_table()
_update_image() _update_image()
_update_proj_plots() _update_proj_plots()

View File

@ -3,6 +3,7 @@ import os
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -38,6 +39,8 @@ def color_palette(n_colors):
def create(): def create():
doc = curdoc()
log = doc.logger
dataset = [] dataset = []
app_dlfiles = app.DownloadFiles(n_files=1) app_dlfiles = app.DownloadFiles(n_files=1)
@ -361,10 +364,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
print("WARNING: Selected scans for merging are identical") log.warning("Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into, scan_from) pyzebra.merge_scans(scan_into, scan_from, log=log)
_update_table() _update_table()
_update_single_scan_plot() _update_single_scan_plot()
_update_overview() _update_overview()

View File

@ -3,6 +3,7 @@ import io
import os import os
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -31,6 +32,8 @@ from pyzebra.app.panel_hdf_viewer import calculate_hkl
def create(): def create():
doc = curdoc()
log = doc.logger
_update_slice = None _update_slice = None
measured_data_div = Div(text="Measured <b>HDF</b> data:") measured_data_div = Div(text="Measured <b>HDF</b> data:")
measured_data = FileInput(accept=".hdf", multiple=True, width=200) measured_data = FileInput(accept=".hdf", multiple=True, width=200)
@ -59,8 +62,8 @@ def create():
# Read data # Read data
try: try:
det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata))) det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata)))
except: except Exception as e:
print(f"Error loading {fname}") log.exception(e)
return None return None
if ind == 0: if ind == 0:
@ -179,8 +182,8 @@ def create():
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
fdata = pyzebra.parse_hkl(file, ext) fdata = pyzebra.parse_hkl(file, ext)
except: except Exception as e:
print(f"Error loading {fname}") log.exception(e)
return return
for ind in range(len(fdata["counts"])): for ind in range(len(fdata["counts"])):

View File

@ -21,6 +21,7 @@ import pyzebra
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
events_data = doc.events_data events_data = doc.events_data
npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True) npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True)
@ -63,8 +64,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
print(" ".join(comp_proc.args)) log.info(" ".join(comp_proc.args))
print(comp_proc.stdout) log.info(comp_proc.stdout)
# prepare an event file # prepare an event file
diff_vec = [] diff_vec = []
@ -94,9 +95,9 @@ def create():
f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n" f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n"
) )
print(f"Content of {temp_event_file}:") log.info(f"Content of {temp_event_file}:")
with open(temp_event_file) as f: with open(temp_event_file) as f:
print(f.read()) log.info(f.read())
comp_proc = subprocess.run( comp_proc = subprocess.run(
[ [
@ -123,8 +124,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
print(" ".join(comp_proc.args)) log.info(" ".join(comp_proc.args))
print(comp_proc.stdout) log.info(comp_proc.stdout)
spind_out_file = os.path.join(temp_dir, "spind.txt") spind_out_file = os.path.join(temp_dir, "spind.txt")
spind_res = dict( spind_res = dict(
@ -146,12 +147,12 @@ def create():
ub_matrices.append(ub_matrix_spind) ub_matrices.append(ub_matrix_spind)
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10)) spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
print(f"Content of {spind_out_file}:") log.info(f"Content of {spind_out_file}:")
with open(spind_out_file) as f: with open(spind_out_file) as f:
print(f.read()) log.info(f.read())
except FileNotFoundError: except FileNotFoundError:
print("No results from spind") log.warning("No results from spind")
results_table_source.data.update(spind_res) results_table_source.data.update(spind_res)

View File

@ -3,6 +3,7 @@ import io
import os import os
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Arrow, Arrow,
@ -30,6 +31,9 @@ import pyzebra
class PlotHKL: class PlotHKL:
def __init__(self): def __init__(self):
doc = curdoc()
log = doc.logger
_update_slice = None _update_slice = None
measured_data_div = Div(text="Measured <b>CCL</b> data:") measured_data_div = Div(text="Measured <b>CCL</b> data:")
measured_data = FileInput(accept=".ccl", multiple=True, width=200) measured_data = FileInput(accept=".ccl", multiple=True, width=200)
@ -62,9 +66,9 @@ class PlotHKL:
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file: with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
_, ext = os.path.splitext(md_fnames[0]) _, ext = os.path.splitext(md_fnames[0])
try: try:
file_data = pyzebra.parse_1D(file, ext) file_data = pyzebra.parse_1D(file, ext, log=log)
except: except Exception as e:
print(f"Error loading {md_fnames[0]}") log.exception(e)
return None return None
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0 alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
@ -144,9 +148,9 @@ class PlotHKL:
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file: with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
_, ext = os.path.splitext(md_fname) _, ext = os.path.splitext(md_fname)
try: try:
file_data = pyzebra.parse_1D(file, ext) file_data = pyzebra.parse_1D(file, ext, log=log)
except: except Exception as e:
print(f"Error loading {md_fname}") log.exception(e)
return None return None
pyzebra.normalize_dataset(file_data) pyzebra.normalize_dataset(file_data)
@ -291,8 +295,8 @@ class PlotHKL:
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
fdata = pyzebra.parse_hkl(file, ext) fdata = pyzebra.parse_hkl(file, ext)
except: except Exception as e:
print(f"Error loading {fname}") log.exception(e)
return return
for ind in range(len(fdata["counts"])): for ind in range(len(fdata["counts"])):

View File

@ -1,3 +1,4 @@
import logging
import os import os
import re import re
from ast import literal_eval from ast import literal_eval
@ -5,6 +6,8 @@ from collections import defaultdict
import numpy as np import numpy as np
logger = logging.getLogger(__name__)
META_VARS_STR = ( META_VARS_STR = (
"instrument", "instrument",
"title", "title",
@ -110,7 +113,7 @@ def load_1D(filepath):
return dataset return dataset
def parse_1D(fileobj, data_type): def parse_1D(fileobj, data_type, log=logger):
metadata = {"data_type": data_type} metadata = {"data_type": data_type}
# read metadata # read metadata
@ -156,7 +159,7 @@ def parse_1D(fileobj, data_type):
metadata["ub"][row, :] = list(map(float, value.split())) metadata["ub"][row, :] = list(map(float, value.split()))
except Exception: except Exception:
print(f"Error reading {var_name} with value '{value}'") log.error(f"Error reading {var_name} with value '{value}'")
metadata[var_name] = 0 metadata[var_name] = 0
# handle older files that don't contain "zebra_mode" metadata # handle older files that don't contain "zebra_mode" metadata
@ -294,7 +297,7 @@ def parse_1D(fileobj, data_type):
dataset.append({**metadata, **scan}) dataset.append({**metadata, **scan})
else: else:
print("Unknown file extention") log.error("Unknown file extention")
return dataset return dataset

View File

@ -1,3 +1,4 @@
import logging
import os import os
import numpy as np import numpy as np
@ -6,6 +7,8 @@ from scipy.integrate import simpson, trapezoid
from pyzebra import CCL_ANGLES from pyzebra import CCL_ANGLES
logger = logging.getLogger(__name__)
PARAM_PRECISIONS = { PARAM_PRECISIONS = {
"twotheta": 0.1, "twotheta": 0.1,
"chi": 0.1, "chi": 0.1,
@ -33,12 +36,12 @@ def normalize_dataset(dataset, monitor=100_000):
scan["monitor"] = monitor scan["monitor"] = monitor
def merge_duplicates(dataset): def merge_duplicates(dataset, log=logger):
merged = np.zeros(len(dataset), dtype=bool) merged = np.zeros(len(dataset), dtype=bool)
for ind_into, scan_into in enumerate(dataset): for ind_into, scan_into in enumerate(dataset):
for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1): for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1):
if _parameters_match(scan_into, scan_from) and not merged[ind_from]: if _parameters_match(scan_into, scan_from) and not merged[ind_from]:
merge_scans(scan_into, scan_from) merge_scans(scan_into, scan_from, log=log)
merged[ind_from] = True merged[ind_from] = True
@ -75,11 +78,13 @@ def _parameters_match(scan1, scan2):
return True return True
def merge_datasets(dataset_into, dataset_from): def merge_datasets(dataset_into, dataset_from, log=logger):
scan_motors_into = dataset_into[0]["scan_motors"] scan_motors_into = dataset_into[0]["scan_motors"]
scan_motors_from = dataset_from[0]["scan_motors"] scan_motors_from = dataset_from[0]["scan_motors"]
if scan_motors_into != scan_motors_from: if scan_motors_into != scan_motors_from:
print(f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}") log.warning(
f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}"
)
return return
merged = np.zeros(len(dataset_from), dtype=bool) merged = np.zeros(len(dataset_from), dtype=bool)
@ -96,7 +101,7 @@ def merge_datasets(dataset_into, dataset_from):
dataset_into.append(scan_from) dataset_into.append(scan_from)
def merge_scans(scan_into, scan_from): def merge_scans(scan_into, scan_from, log=logger):
if "init_scan" not in scan_into: if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy() scan_into["init_scan"] = scan_into.copy()
@ -148,10 +153,10 @@ def merge_scans(scan_into, scan_from):
fname1 = os.path.basename(scan_into["original_filename"]) fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"]) fname2 = os.path.basename(scan_from["original_filename"])
print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})') log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def merge_h5_scans(scan_into, scan_from): def merge_h5_scans(scan_into, scan_from, log=logger):
if "init_scan" not in scan_into: if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy() scan_into["init_scan"] = scan_into.copy()
@ -160,7 +165,7 @@ def merge_h5_scans(scan_into, scan_from):
for scan in scan_into["merged_scans"]: for scan in scan_into["merged_scans"]:
if scan_from is scan: if scan_from is scan:
print("Already merged scan") log.warning("Already merged scan")
return return
scan_into["merged_scans"].append(scan_from) scan_into["merged_scans"].append(scan_from)
@ -212,7 +217,7 @@ def merge_h5_scans(scan_into, scan_from):
fname1 = os.path.basename(scan_into["original_filename"]) fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"]) fname2 = os.path.basename(scan_from["original_filename"])
print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})') log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def restore_scan(scan): def restore_scan(scan):
@ -230,7 +235,7 @@ def restore_scan(scan):
scan["export"] = True scan["export"] = True
def fit_scan(scan, model_dict, fit_from=None, fit_to=None): def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger):
if fit_from is None: if fit_from is None:
fit_from = -np.inf fit_from = -np.inf
if fit_to is None: if fit_to is None:
@ -243,7 +248,7 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
# apply fitting range # apply fitting range
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to) fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
if not np.any(fit_ind): if not np.any(fit_ind):
print(f"No data in fit range for scan {scan['idx']}") log.warning(f"No data in fit range for scan {scan['idx']}")
return return
y_fit = y_fit[fit_ind] y_fit = y_fit[fit_ind]

View File

@ -1,4 +1,5 @@
import io import io
import logging
import os import os
import subprocess import subprocess
import tempfile import tempfile
@ -6,6 +7,8 @@ from math import ceil, floor
import numpy as np import numpy as np
logger = logging.getLogger(__name__)
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen" SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen"
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI _zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
@ -144,7 +147,7 @@ def export_geom_file(path, ang_lims, template=None):
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n") out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
def calc_ub_matrix(params): def calc_ub_matrix(params, log=logger):
with tempfile.TemporaryDirectory() as temp_dir: with tempfile.TemporaryDirectory() as temp_dir:
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl") cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
@ -160,8 +163,8 @@ def calc_ub_matrix(params):
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
print(" ".join(comp_proc.args)) log.info(" ".join(comp_proc.args))
print(comp_proc.stdout) log.info(comp_proc.stdout)
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa") sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
ub_matrix = [] ub_matrix = []