Compare commits

..

No commits in common. "main" and "0.7.3" have entirely different histories.
main ... 0.7.3

29 changed files with 280 additions and 354 deletions

View File

@ -1,53 +0,0 @@
name: pyzebra CI/CD pipeline
on:
push:
branches:
- main
tags:
- '*'
env:
CONDA: /opt/miniforge3
jobs:
prepare:
runs-on: pyzebra
steps:
- run: $CONDA/bin/conda config --add channels conda-forge
- run: $CONDA/bin/conda config --set solver libmamba
test-env:
runs-on: pyzebra
needs: prepare
if: github.ref == 'refs/heads/main'
env:
BUILD_DIR: ${{ runner.temp }}/conda_build
steps:
- name: Checkout repository
uses: actions/checkout@v4
- run: $CONDA/bin/conda build --no-anaconda-upload --output-folder $BUILD_DIR ./conda-recipe
- run: $CONDA/bin/conda remove --name test --all --keep-env -y
- run: $CONDA/bin/conda install --name test --channel $BUILD_DIR python=3.8 pyzebra -y
- run: sudo systemctl restart pyzebra-test.service
prod-env:
runs-on: pyzebra
needs: prepare
if: startsWith(github.ref, 'refs/tags/')
env:
BUILD_DIR: ${{ runner.temp }}/conda_build
steps:
- name: Checkout repository
uses: actions/checkout@v4
- run: $CONDA/bin/conda build --token ${{ secrets.ANACONDA_TOKEN }} --output-folder $BUILD_DIR ./conda-recipe
- run: $CONDA/bin/conda remove --name prod --all --keep-env -y
- run: $CONDA/bin/conda install --name prod --channel $BUILD_DIR python=3.8 pyzebra -y
- run: sudo systemctl restart pyzebra-prod.service
cleanup:
runs-on: pyzebra
needs: [test-env, prod-env]
if: always()
steps:
- run: $CONDA/bin/conda build purge-all

25
.github/workflows/deployment.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
name: Deployment
on:
push:
tags:
- '*'
jobs:
publish-conda-package:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Prepare
run: |
$CONDA/bin/conda install --quiet --yes conda-build anaconda-client
$CONDA/bin/conda config --append channels conda-forge
$CONDA/bin/conda config --set anaconda_upload yes
- name: Build and upload
env:
ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }}
run: |
$CONDA/bin/conda build --token $ANACONDA_TOKEN conda-recipe

View File

@ -15,10 +15,10 @@ build:
requirements: requirements:
build: build:
- python >=3.8 - python >=3.7
- setuptools - setuptools
run: run:
- python >=3.8 - python >=3.7
- numpy - numpy
- scipy - scipy
- h5py - h5py
@ -28,7 +28,7 @@ requirements:
about: about:
home: https://gitlab.psi.ch/zebra/pyzebra home: https://github.com/paulscherrerinstitute/pyzebra
summary: {{ data['description'] }} summary: {{ data['description'] }}
license: GNU GPLv3 license: GNU GPLv3
license_file: LICENSE license_file: LICENSE

View File

@ -7,19 +7,18 @@ import subprocess
def main(): def main():
default_branch = "main"
branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip() branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip()
if branch != default_branch: if branch != "master":
print(f"Aborting, not on '{default_branch}' branch.") print("Aborting, not on 'master' branch.")
return return
version_filepath = os.path.join(os.path.basename(os.path.dirname(__file__)), "__init__.py") filepath = "pyzebra/__init__.py"
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("level", type=str, choices=["patch", "minor", "major"]) parser.add_argument("level", type=str, choices=["patch", "minor", "major"])
args = parser.parse_args() args = parser.parse_args()
with open(version_filepath) as f: with open(filepath) as f:
file_content = f.read() file_content = f.read()
version = re.search(r'__version__ = "(.*?)"', file_content).group(1) version = re.search(r'__version__ = "(.*?)"', file_content).group(1)
@ -37,12 +36,11 @@ def main():
new_version = f"{major}.{minor}.{patch}" new_version = f"{major}.{minor}.{patch}"
with open(version_filepath, "w") as f: with open(filepath, "w") as f:
f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content)) f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content))
os.system(f"git commit {version_filepath} -m 'Updating for version {new_version}'") os.system(f"git commit {filepath} -m 'Updating for version {new_version}'")
os.system(f"git tag -a {new_version} -m 'Release {new_version}'") os.system(f"git tag -a {new_version} -m 'Release {new_version}'")
os.system("git push --follow-tags")
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -6,4 +6,4 @@ from pyzebra.sxtal_refgen import *
from pyzebra.utils import * from pyzebra.utils import *
from pyzebra.xtal import * from pyzebra.xtal import *
__version__ = "0.7.11" __version__ = "0.7.3"

View File

@ -1,9 +1,6 @@
import logging
import subprocess import subprocess
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
logger = logging.getLogger(__name__)
DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"] DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"]
REFLECTION_PRINTER_FORMATS = [ REFLECTION_PRINTER_FORMATS = [
@ -19,11 +16,11 @@ REFLECTION_PRINTER_FORMATS = [
"oksana", "oksana",
] ]
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/anatric" ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/anatric"
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"] ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger): def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None):
comp_proc = subprocess.run( comp_proc = subprocess.run(
[anatric_path, config_file], [anatric_path, config_file],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
@ -32,8 +29,8 @@ def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger):
check=True, check=True,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
class AnatricConfig: class AnatricConfig:

17
pyzebra/app/app_hooks.py Normal file
View File

@ -0,0 +1,17 @@
import logging
import sys
from io import StringIO
def on_server_loaded(_server_context):
formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
sys.stdout = StringIO()
bokeh_handler = logging.StreamHandler(StringIO())
bokeh_handler.setFormatter(formatter)
bokeh_logger = logging.getLogger("bokeh")
bokeh_logger.setLevel(logging.WARNING)
bokeh_logger.addHandler(bokeh_handler)

View File

@ -4,7 +4,7 @@ import sys
def main(): def main():
app_path = os.path.dirname(os.path.abspath(__file__)) app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)))
subprocess.run(["bokeh", "serve", app_path, *sys.argv[1:]], check=True) subprocess.run(["bokeh", "serve", app_path, *sys.argv[1:]], check=True)

View File

@ -1,6 +1,5 @@
import types import types
from bokeh.io import curdoc
from bokeh.models import ( from bokeh.models import (
Button, Button,
CellEditor, CellEditor,
@ -52,7 +51,6 @@ def _params_factory(function):
class FitControls: class FitControls:
def __init__(self): def __init__(self):
self.log = curdoc().logger
self.params = {} self.params = {}
def add_function_button_callback(click): def add_function_button_callback(click):
@ -147,11 +145,7 @@ class FitControls:
def _process_scan(self, scan): def _process_scan(self, scan):
pyzebra.fit_scan( pyzebra.fit_scan(
scan, scan, self.params, fit_from=self.from_spinner.value, fit_to=self.to_spinner.value
self.params,
fit_from=self.from_spinner.value,
fit_to=self.to_spinner.value,
log=self.log,
) )
pyzebra.get_area( pyzebra.get_area(
scan, scan,

View File

@ -11,7 +11,6 @@ import pyzebra
class InputControls: class InputControls:
def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None): def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None):
doc = curdoc() doc = curdoc()
log = doc.logger
def filelist_select_update_for_proposal(): def filelist_select_update_for_proposal():
proposal_path = proposal_textinput.name proposal_path = proposal_textinput.name
@ -46,19 +45,19 @@ class InputControls:
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
pyzebra.merge_duplicates(new_data, log=log) pyzebra.merge_duplicates(new_data)
dlfiles.set_names([base] * dlfiles.n_files) dlfiles.set_names([base] * dlfiles.n_files)
else: else:
pyzebra.merge_datasets(new_data, file_data, log=log) pyzebra.merge_datasets(new_data, file_data)
if new_data: if new_data:
dataset.clear() dataset.clear()
@ -77,13 +76,13 @@ class InputControls:
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name) _, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log) pyzebra.merge_datasets(dataset, file_data)
if file_data: if file_data:
on_file_open() on_file_open()
@ -98,19 +97,19 @@ class InputControls:
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
pyzebra.merge_duplicates(new_data, log=log) pyzebra.merge_duplicates(new_data)
dlfiles.set_names([base] * dlfiles.n_files) dlfiles.set_names([base] * dlfiles.n_files)
else: else:
pyzebra.merge_datasets(new_data, file_data, log=log) pyzebra.merge_datasets(new_data, file_data)
if new_data: if new_data:
dataset.clear() dataset.clear()
@ -130,13 +129,13 @@ class InputControls:
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name) _, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log) pyzebra.merge_datasets(dataset, file_data)
if file_data: if file_data:
on_file_open() on_file_open()

View File

@ -1,6 +1,6 @@
import argparse import argparse
import logging import logging
from io import StringIO import sys
from bokeh.io import curdoc from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
@ -43,17 +43,11 @@ doc.anatric_path = args.anatric_path
doc.spind_path = args.spind_path doc.spind_path = args.spind_path
doc.sxtal_refgen_path = args.sxtal_refgen_path doc.sxtal_refgen_path = args.sxtal_refgen_path
stream = StringIO() # In app_hooks.py a StreamHandler was added to "bokeh" logger
handler = logging.StreamHandler(stream) bokeh_stream = logging.getLogger("bokeh").handlers[0].stream
handler.setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
)
logger = logging.getLogger(str(id(doc)))
logger.setLevel(logging.INFO)
logger.addHandler(handler)
doc.logger = logger
log_textareainput = TextAreaInput(title="Logging output:") log_textareainput = TextAreaInput(title="logging output:")
bokeh_log_textareainput = TextAreaInput(title="server output:")
def proposal_textinput_callback(_attr, _old, _new): def proposal_textinput_callback(_attr, _old, _new):
@ -71,7 +65,7 @@ def apply_button_callback():
try: try:
proposal_path = pyzebra.find_proposal_path(proposal) proposal_path = pyzebra.find_proposal_path(proposal)
except ValueError as e: except ValueError as e:
logger.exception(e) print(e)
return return
apply_button.disabled = True apply_button.disabled = True
else: else:
@ -100,13 +94,14 @@ doc.add_root(
panel_spind.create(), panel_spind.create(),
] ]
), ),
row(log_textareainput, sizing_mode="scale_both"), row(log_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
) )
) )
def update_stdout(): def update_stdout():
log_textareainput.value = stream.getvalue() log_textareainput.value = sys.stdout.getvalue()
bokeh_log_textareainput.value = bokeh_stream.getvalue()
doc.add_periodic_callback(update_stdout, 1000) doc.add_periodic_callback(update_stdout, 1000)

View File

@ -33,7 +33,6 @@ from pyzebra import EXPORT_TARGETS, app
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset1 = [] dataset1 = []
dataset2 = [] dataset2 = []
app_dlfiles = app.DownloadFiles(n_files=2) app_dlfiles = app.DownloadFiles(n_files=2)
@ -95,7 +94,7 @@ def create():
def file_open_button_callback(): def file_open_button_callback():
if len(file_select.value) != 2: if len(file_select.value) != 2:
log.warning("Select exactly 2 .ccl files.") print("WARNING: Select exactly 2 .ccl files.")
return return
new_data1 = [] new_data1 = []
@ -105,13 +104,13 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
return return
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data, log=log) pyzebra.merge_duplicates(file_data)
if ind == 0: if ind == 0:
app_dlfiles.set_names([base, base]) app_dlfiles.set_names([base, base])
@ -134,7 +133,7 @@ def create():
def upload_button_callback(_attr, _old, _new): def upload_button_callback(_attr, _old, _new):
if len(upload_button.filename) != 2: if len(upload_button.filename) != 2:
log.warning("Upload exactly 2 .ccl files.") print("WARNING: Upload exactly 2 .ccl files.")
return return
new_data1 = [] new_data1 = []
@ -143,13 +142,13 @@ def create():
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
return return
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data, log=log) pyzebra.merge_duplicates(file_data)
if ind == 0: if ind == 0:
app_dlfiles.set_names([base, base]) app_dlfiles.set_names([base, base])
@ -244,8 +243,8 @@ def create():
plot = figure( plot = figure(
x_axis_label="Scan motor", x_axis_label="Scan motor",
y_axis_label="Counts", y_axis_label="Counts",
height=470, plot_height=470,
width=700, plot_width=700,
tools="pan,wheel_zoom,reset", tools="pan,wheel_zoom,reset",
) )
@ -378,11 +377,11 @@ def create():
scan_from2 = dataset2[int(merge_from_select.value)] scan_from2 = dataset2[int(merge_from_select.value)]
if scan_into1 is scan_from1: if scan_into1 is scan_from1:
log.warning("Selected scans for merging are identical") print("WARNING: Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into1, scan_from1, log=log) pyzebra.merge_scans(scan_into1, scan_from1)
pyzebra.merge_scans(scan_into2, scan_from2, log=log) pyzebra.merge_scans(scan_into2, scan_from2)
_update_table() _update_table()
_update_plot() _update_plot()

View File

@ -2,7 +2,6 @@ import os
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -26,8 +25,6 @@ from pyzebra import EXPORT_TARGETS, app
def create(): def create():
doc = curdoc()
log = doc.logger
dataset = [] dataset = []
app_dlfiles = app.DownloadFiles(n_files=2) app_dlfiles = app.DownloadFiles(n_files=2)
@ -115,8 +112,8 @@ def create():
plot = figure( plot = figure(
x_axis_label="Scan motor", x_axis_label="Scan motor",
y_axis_label="Counts", y_axis_label="Counts",
height=470, plot_height=470,
width=700, plot_width=700,
tools="pan,wheel_zoom,reset", tools="pan,wheel_zoom,reset",
) )
@ -217,10 +214,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
log.warning("Selected scans for merging are identical") print("WARNING: Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into, scan_from, log=log) pyzebra.merge_scans(scan_into, scan_from)
_update_table() _update_table()
_update_plot() _update_plot()

View File

@ -5,7 +5,6 @@ import subprocess
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Arrow, Arrow,
@ -40,8 +39,6 @@ SORT_OPT_NB = ["gamma", "nu", "omega"]
def create(): def create():
doc = curdoc()
log = doc.logger
ang_lims = {} ang_lims = {}
cif_data = {} cif_data = {}
params = {} params = {}
@ -135,11 +132,7 @@ def create():
params = dict() params = dict()
params["SPGR"] = cryst_space_group.value params["SPGR"] = cryst_space_group.value
params["CELL"] = cryst_cell.value params["CELL"] = cryst_cell.value
try: ub = pyzebra.calc_ub_matrix(params)
ub = pyzebra.calc_ub_matrix(params, log=log)
except Exception as e:
log.exception(e)
return
ub_matrix.value = " ".join(ub) ub_matrix.value = " ".join(ub)
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100) ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
@ -228,9 +221,9 @@ def create():
geom_template = None geom_template = None
pyzebra.export_geom_file(geom_path, ang_lims, geom_template) pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
log.info(f"Content of {geom_path}:") print(f"Content of {geom_path}:")
with open(geom_path) as f: with open(geom_path) as f:
log.info(f.read()) print(f.read())
priority = [sorting_0.value, sorting_1.value, sorting_2.value] priority = [sorting_0.value, sorting_1.value, sorting_2.value]
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value] chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
@ -255,9 +248,9 @@ def create():
cfl_template = None cfl_template = None
pyzebra.export_cfl_file(cfl_path, params, cfl_template) pyzebra.export_cfl_file(cfl_path, params, cfl_template)
log.info(f"Content of {cfl_path}:") print(f"Content of {cfl_path}:")
with open(cfl_path) as f: with open(cfl_path) as f:
log.info(f.read()) print(f.read())
comp_proc = subprocess.run( comp_proc = subprocess.run(
[pyzebra.SXTAL_REFGEN_PATH, cfl_path], [pyzebra.SXTAL_REFGEN_PATH, cfl_path],
@ -267,8 +260,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
if i == 1: # all hkl files are identical, so keep only one if i == 1: # all hkl files are identical, so keep only one
hkl_fname = base_fname + ".hkl" hkl_fname = base_fname + ".hkl"
@ -598,8 +591,8 @@ def create():
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
file_data = pyzebra.parse_hkl(file, ext) file_data = pyzebra.parse_hkl(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {fname}")
return return
fnames.append(fname) fnames.append(fname)
@ -611,7 +604,7 @@ def create():
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200) plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
plot_file.on_click(plot_file_callback) plot_file.on_click(plot_file_callback)
plot = figure(height=550, width=550 + 32, tools="pan,wheel_zoom,reset") plot = figure(plot_height=550, plot_width=550 + 32, tools="pan,wheel_zoom,reset")
plot.toolbar.logo = None plot.toolbar.logo = None
plot.xaxis.visible = False plot.xaxis.visible = False

View File

@ -24,7 +24,6 @@ from pyzebra import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
config = pyzebra.AnatricConfig() config = pyzebra.AnatricConfig()
def _load_config_file(file): def _load_config_file(file):
@ -348,11 +347,7 @@ def create():
with tempfile.TemporaryDirectory() as temp_dir: with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/config.xml" temp_file = temp_dir + "/config.xml"
config.save_as(temp_file) config.save_as(temp_file)
try: pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir)
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir, log=log)
except Exception as e:
log.exception(e)
return
with open(os.path.join(temp_dir, config.logfile)) as f_log: with open(os.path.join(temp_dir, config.logfile)) as f_log:
output_log.value = f_log.read() output_log.value = f_log.read()

View File

@ -36,7 +36,6 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset = [] dataset = []
cami_meta = {} cami_meta = {}
@ -134,8 +133,8 @@ def create():
for f_name in file_select.value: for f_name in file_select.value:
try: try:
new_data.append(pyzebra.read_detector_data(f_name)) new_data.append(pyzebra.read_detector_data(f_name))
except KeyError as e: except KeyError:
log.exception(e) print("Could not read data from the file.")
return return
dataset.extend(new_data) dataset.extend(new_data)
@ -276,7 +275,7 @@ def create():
frame_range.bounds = (0, n_im) frame_range.bounds = (0, n_im)
scan_motor = scan["scan_motor"] scan_motor = scan["scan_motor"]
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}" proj_y_plot.axis[1].axis_label = f"Scanning motor, {scan_motor}"
var = scan[scan_motor] var = scan[scan_motor]
var_start = var[0] var_start = var[0]
@ -302,8 +301,8 @@ def create():
x_range=det_x_range, x_range=det_x_range,
y_range=frame_range, y_range=frame_range,
extra_y_ranges={"scanning_motor": scanning_motor_range}, extra_y_ranges={"scanning_motor": scanning_motor_range},
height=540, plot_height=540,
width=IMAGE_PLOT_W - 3, plot_width=IMAGE_PLOT_W - 3,
tools="pan,box_zoom,wheel_zoom,reset", tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom", active_scroll="wheel_zoom",
) )
@ -326,8 +325,8 @@ def create():
x_range=det_y_range, x_range=det_y_range,
y_range=frame_range, y_range=frame_range,
extra_y_ranges={"scanning_motor": scanning_motor_range}, extra_y_ranges={"scanning_motor": scanning_motor_range},
height=540, plot_height=540,
width=IMAGE_PLOT_H + 22, plot_width=IMAGE_PLOT_H + 22,
tools="pan,box_zoom,wheel_zoom,reset", tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom", active_scroll="wheel_zoom",
) )
@ -353,8 +352,8 @@ def create():
colormap_select.on_change("value", colormap_select_callback) colormap_select.on_change("value", colormap_select_callback)
colormap_select.value = "Plasma256" colormap_select.value = "Plasma256"
def proj_auto_checkbox_callback(_attr, _old, new): def proj_auto_checkbox_callback(state):
if 0 in new: if state:
proj_display_min_spinner.disabled = True proj_display_min_spinner.disabled = True
proj_display_max_spinner.disabled = True proj_display_max_spinner.disabled = True
else: else:
@ -366,7 +365,7 @@ def create():
proj_auto_checkbox = CheckboxGroup( proj_auto_checkbox = CheckboxGroup(
labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5] labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
) )
proj_auto_checkbox.on_change("active", proj_auto_checkbox_callback) proj_auto_checkbox.on_click(proj_auto_checkbox_callback)
def proj_display_max_spinner_callback(_attr, _old, new): def proj_display_max_spinner_callback(_attr, _old, new):
color_mapper_proj.high = new color_mapper_proj.high = new
@ -412,8 +411,8 @@ def create():
param_plot = figure( param_plot = figure(
x_axis_label="Parameter", x_axis_label="Parameter",
y_axis_label="Fit parameter", y_axis_label="Fit parameter",
height=400, plot_height=400,
width=700, plot_width=700,
tools="pan,wheel_zoom,reset", tools="pan,wheel_zoom,reset",
) )

View File

@ -43,7 +43,6 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset = [] dataset = []
cami_meta = {} cami_meta = {}
@ -103,8 +102,8 @@ def create():
nonlocal dataset nonlocal dataset
try: try:
scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None) scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None)
except Exception as e: except KeyError:
log.exception(e) print("Could not read data from the file.")
return return
dataset = [scan] dataset = [scan]
@ -138,8 +137,8 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
try: try:
file_data = [pyzebra.read_detector_data(f_path, cm)] file_data = [pyzebra.read_detector_data(f_path, cm)]
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
@ -147,7 +146,7 @@ def create():
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
else: else:
pyzebra.merge_datasets(new_data, file_data, log=log) pyzebra.merge_datasets(new_data, file_data)
if new_data: if new_data:
dataset = new_data dataset = new_data
@ -162,12 +161,12 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
try: try:
file_data = [pyzebra.read_detector_data(f_path, None)] file_data = [pyzebra.read_detector_data(f_path, None)]
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log) pyzebra.merge_datasets(dataset, file_data)
if file_data: if file_data:
_init_datatable() _init_datatable()
@ -293,10 +292,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
log.warning("Selected scans for merging are identical") print("WARNING: Selected scans for merging are identical")
return return
pyzebra.merge_h5_scans(scan_into, scan_from, log=log) pyzebra.merge_h5_scans(scan_into, scan_from)
_update_table() _update_table()
_update_image() _update_image()
_update_proj_plots() _update_proj_plots()
@ -357,8 +356,8 @@ def create():
gamma_c = gamma[det_c_y, det_c_x] gamma_c = gamma[det_c_y, det_c_x]
nu_c = nu[det_c_y, det_c_x] nu_c = nu[det_c_y, det_c_x]
omega_c = omega[det_c_y, det_c_x] omega_c = omega[det_c_y, det_c_x]
chi_c = scan["chi"][index] chi_c = None
phi_c = scan["phi"][index] phi_c = None
else: # zebra_mode == "bi" else: # zebra_mode == "bi"
wave = scan["wave"] wave = scan["wave"]
@ -407,7 +406,7 @@ def create():
frame_range.bounds = (0, n_im) frame_range.bounds = (0, n_im)
scan_motor = scan["scan_motor"] scan_motor = scan["scan_motor"]
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}" proj_y_plot.axis[1].axis_label = f"Scanning motor, {scan_motor}"
var = scan[scan_motor] var = scan[scan_motor]
var_start = var[0] var_start = var[0]
@ -459,8 +458,8 @@ def create():
y_range=Range1d(0, IMAGE_H, bounds=(0, IMAGE_H)), y_range=Range1d(0, IMAGE_H, bounds=(0, IMAGE_H)),
x_axis_location="above", x_axis_location="above",
y_axis_location="right", y_axis_location="right",
height=IMAGE_PLOT_H, plot_height=IMAGE_PLOT_H,
width=IMAGE_PLOT_W, plot_width=IMAGE_PLOT_W,
toolbar_location="left", toolbar_location="left",
tools="pan,box_zoom,wheel_zoom,reset", tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom", active_scroll="wheel_zoom",
@ -510,8 +509,8 @@ def create():
proj_v = figure( proj_v = figure(
x_range=plot.x_range, x_range=plot.x_range,
y_axis_location="right", y_axis_location="right",
height=150, plot_height=150,
width=IMAGE_PLOT_W, plot_width=IMAGE_PLOT_W,
tools="", tools="",
toolbar_location=None, toolbar_location=None,
) )
@ -525,8 +524,8 @@ def create():
proj_h = figure( proj_h = figure(
x_axis_location="above", x_axis_location="above",
y_range=plot.y_range, y_range=plot.y_range,
height=IMAGE_PLOT_H, plot_height=IMAGE_PLOT_H,
width=150, plot_width=150,
tools="", tools="",
toolbar_location=None, toolbar_location=None,
) )
@ -590,8 +589,8 @@ def create():
y_range=frame_range, y_range=frame_range,
extra_x_ranges={"gamma": gamma_range}, extra_x_ranges={"gamma": gamma_range},
extra_y_ranges={"scanning_motor": scanning_motor_range}, extra_y_ranges={"scanning_motor": scanning_motor_range},
height=540, plot_height=540,
width=IMAGE_PLOT_W - 3, plot_width=IMAGE_PLOT_W - 3,
tools="pan,box_zoom,wheel_zoom,reset", tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom", active_scroll="wheel_zoom",
) )
@ -618,8 +617,8 @@ def create():
y_range=frame_range, y_range=frame_range,
extra_x_ranges={"nu": nu_range}, extra_x_ranges={"nu": nu_range},
extra_y_ranges={"scanning_motor": scanning_motor_range}, extra_y_ranges={"scanning_motor": scanning_motor_range},
height=540, plot_height=540,
width=IMAGE_PLOT_H + 22, plot_width=IMAGE_PLOT_H + 22,
tools="pan,box_zoom,wheel_zoom,reset", tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom", active_scroll="wheel_zoom",
) )
@ -637,7 +636,7 @@ def create():
proj_y_image = proj_y_plot.image(source=proj_y_image_source, color_mapper=lin_color_mapper_proj) proj_y_image = proj_y_plot.image(source=proj_y_image_source, color_mapper=lin_color_mapper_proj)
# ROI slice plot # ROI slice plot
roi_avg_plot = figure(height=150, width=IMAGE_PLOT_W, tools="", toolbar_location=None) roi_avg_plot = figure(plot_height=150, plot_width=IMAGE_PLOT_W, tools="", toolbar_location=None)
roi_avg_plot_line_source = ColumnDataSource(dict(x=[], y=[])) roi_avg_plot_line_source = ColumnDataSource(dict(x=[], y=[]))
roi_avg_plot.line(source=roi_avg_plot_line_source, line_color="steelblue") roi_avg_plot.line(source=roi_avg_plot_line_source, line_color="steelblue")
@ -656,8 +655,8 @@ def create():
colormap_select.on_change("value", colormap_select_callback) colormap_select.on_change("value", colormap_select_callback)
colormap_select.value = "Plasma256" colormap_select.value = "Plasma256"
def colormap_scale_rg_callback(_attr, _old, new): def colormap_scale_rg_callback(selection):
if new == 0: # Linear if selection == 0: # Linear
plot_image.glyph.color_mapper = lin_color_mapper plot_image.glyph.color_mapper = lin_color_mapper
proj_x_image.glyph.color_mapper = lin_color_mapper_proj proj_x_image.glyph.color_mapper = lin_color_mapper_proj
proj_y_image.glyph.color_mapper = lin_color_mapper_proj proj_y_image.glyph.color_mapper = lin_color_mapper_proj
@ -676,10 +675,10 @@ def create():
colormap_scale_rg.active = 0 colormap_scale_rg.active = 0
colormap_scale_rg = RadioGroup(labels=["Linear", "Logarithmic"], active=0, width=100) colormap_scale_rg = RadioGroup(labels=["Linear", "Logarithmic"], active=0, width=100)
colormap_scale_rg.on_change("active", colormap_scale_rg_callback) colormap_scale_rg.on_click(colormap_scale_rg_callback)
def main_auto_checkbox_callback(_attr, _old, new): def main_auto_checkbox_callback(state):
if 0 in new: if state:
display_min_spinner.disabled = True display_min_spinner.disabled = True
display_max_spinner.disabled = True display_max_spinner.disabled = True
else: else:
@ -691,7 +690,7 @@ def create():
main_auto_checkbox = CheckboxGroup( main_auto_checkbox = CheckboxGroup(
labels=["Frame Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5] labels=["Frame Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
) )
main_auto_checkbox.on_change("active", main_auto_checkbox_callback) main_auto_checkbox.on_click(main_auto_checkbox_callback)
def display_max_spinner_callback(_attr, _old, new): def display_max_spinner_callback(_attr, _old, new):
lin_color_mapper.high = new lin_color_mapper.high = new
@ -710,8 +709,8 @@ def create():
display_min_spinner = Spinner(value=0, disabled=bool(main_auto_checkbox.active), width=100) display_min_spinner = Spinner(value=0, disabled=bool(main_auto_checkbox.active), width=100)
display_min_spinner.on_change("value", display_min_spinner_callback) display_min_spinner.on_change("value", display_min_spinner_callback)
def proj_auto_checkbox_callback(_attr, _old, new): def proj_auto_checkbox_callback(state):
if 0 in new: if state:
proj_display_min_spinner.disabled = True proj_display_min_spinner.disabled = True
proj_display_max_spinner.disabled = True proj_display_max_spinner.disabled = True
else: else:
@ -723,7 +722,7 @@ def create():
proj_auto_checkbox = CheckboxGroup( proj_auto_checkbox = CheckboxGroup(
labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5] labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
) )
proj_auto_checkbox.on_change("active", proj_auto_checkbox_callback) proj_auto_checkbox.on_click(proj_auto_checkbox_callback)
def proj_display_max_spinner_callback(_attr, _old, new): def proj_display_max_spinner_callback(_attr, _old, new):
lin_color_mapper_proj.high = new lin_color_mapper_proj.high = new
@ -812,7 +811,7 @@ def create():
gamma = scan["gamma"][0] gamma = scan["gamma"][0]
omega = scan["omega"][0] omega = scan["omega"][0]
nu = scan["nu"] nu = scan["nu"][0]
chi = scan["chi"][0] chi = scan["chi"][0]
phi = scan["phi"][0] phi = scan["phi"][0]
@ -843,6 +842,10 @@ def create():
x_pos = scan["fit"]["x_pos"] x_pos = scan["fit"]["x_pos"]
y_pos = scan["fit"]["y_pos"] y_pos = scan["fit"]["y_pos"]
if scan["zebra_mode"] == "nb":
chi = None
phi = None
events_data["wave"].append(wave) events_data["wave"].append(wave)
events_data["ddist"].append(ddist) events_data["ddist"].append(ddist)
events_data["cell"].append(cell) events_data["cell"].append(cell)

View File

@ -3,7 +3,6 @@ import os
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -39,8 +38,6 @@ def color_palette(n_colors):
def create(): def create():
doc = curdoc()
log = doc.logger
dataset = [] dataset = []
app_dlfiles = app.DownloadFiles(n_files=1) app_dlfiles = app.DownloadFiles(n_files=1)
@ -212,8 +209,8 @@ def create():
plot = figure( plot = figure(
x_axis_label="Scan motor", x_axis_label="Scan motor",
y_axis_label="Counts", y_axis_label="Counts",
height=450, plot_height=450,
width=700, plot_width=700,
tools="pan,wheel_zoom,reset", tools="pan,wheel_zoom,reset",
) )
@ -246,8 +243,8 @@ def create():
ov_plot = figure( ov_plot = figure(
x_axis_label="Scan motor", x_axis_label="Scan motor",
y_axis_label="Counts", y_axis_label="Counts",
height=450, plot_height=450,
width=700, plot_width=700,
tools="pan,wheel_zoom,reset", tools="pan,wheel_zoom,reset",
) )
@ -264,8 +261,8 @@ def create():
y_axis_label="Param", y_axis_label="Param",
x_range=Range1d(), x_range=Range1d(),
y_range=Range1d(), y_range=Range1d(),
height=450, plot_height=450,
width=700, plot_width=700,
tools="pan,wheel_zoom,reset", tools="pan,wheel_zoom,reset",
) )
@ -282,8 +279,8 @@ def create():
param_plot = figure( param_plot = figure(
x_axis_label="Parameter", x_axis_label="Parameter",
y_axis_label="Fit parameter", y_axis_label="Fit parameter",
height=400, plot_height=400,
width=700, plot_width=700,
tools="pan,wheel_zoom,reset", tools="pan,wheel_zoom,reset",
) )
@ -364,10 +361,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
log.warning("Selected scans for merging are identical") print("WARNING: Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into, scan_from, log=log) pyzebra.merge_scans(scan_into, scan_from)
_update_table() _update_table()
_update_single_scan_plot() _update_single_scan_plot()
_update_overview() _update_overview()

View File

@ -3,7 +3,6 @@ import io
import os import os
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -32,8 +31,6 @@ from pyzebra.app.panel_hdf_viewer import calculate_hkl
def create(): def create():
doc = curdoc()
log = doc.logger
_update_slice = None _update_slice = None
measured_data_div = Div(text="Measured <b>HDF</b> data:") measured_data_div = Div(text="Measured <b>HDF</b> data:")
measured_data = FileInput(accept=".hdf", multiple=True, width=200) measured_data = FileInput(accept=".hdf", multiple=True, width=200)
@ -62,8 +59,8 @@ def create():
# Read data # Read data
try: try:
det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata))) det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata)))
except Exception as e: except:
log.exception(e) print(f"Error loading {fname}")
return None return None
if ind == 0: if ind == 0:
@ -182,8 +179,8 @@ def create():
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
fdata = pyzebra.parse_hkl(file, ext) fdata = pyzebra.parse_hkl(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {fname}")
return return
for ind in range(len(fdata["counts"])): for ind in range(len(fdata["counts"])):
@ -293,8 +290,8 @@ def create():
plot = figure( plot = figure(
x_range=DataRange1d(), x_range=DataRange1d(),
y_range=DataRange1d(), y_range=DataRange1d(),
height=550 + 27, plot_height=550 + 27,
width=550 + 117, plot_width=550 + 117,
tools="pan,wheel_zoom,reset", tools="pan,wheel_zoom,reset",
) )
plot.toolbar.logo = None plot.toolbar.logo = None
@ -327,7 +324,7 @@ def create():
hkl_in_plane_y = TextInput(title="in-plane Y", value="", width=100, disabled=True) hkl_in_plane_y = TextInput(title="in-plane Y", value="", width=100, disabled=True)
def redef_lattice_cb_callback(_attr, _old, new): def redef_lattice_cb_callback(_attr, _old, new):
if 0 in new: if new:
redef_lattice_ti.disabled = False redef_lattice_ti.disabled = False
else: else:
redef_lattice_ti.disabled = True redef_lattice_ti.disabled = True
@ -337,7 +334,7 @@ def create():
redef_lattice_ti = TextInput(width=490, disabled=True) redef_lattice_ti = TextInput(width=490, disabled=True)
def redef_ub_cb_callback(_attr, _old, new): def redef_ub_cb_callback(_attr, _old, new):
if 0 in new: if new:
redef_ub_ti.disabled = False redef_ub_ti.disabled = False
else: else:
redef_ub_ti.disabled = True redef_ub_ti.disabled = True
@ -372,8 +369,8 @@ def create():
display_max_ni = NumericInput(title="max:", value=1, mode="float", width=70) display_max_ni = NumericInput(title="max:", value=1, mode="float", width=70)
display_max_ni.on_change("value", display_max_ni_callback) display_max_ni.on_change("value", display_max_ni_callback)
def colormap_scale_rg_callback(_attr, _old, new): def colormap_scale_rg_callback(selection):
if new == 0: # Linear if selection == 0: # Linear
plot_image.glyph.color_mapper = lin_color_mapper plot_image.glyph.color_mapper = lin_color_mapper
lin_color_bar.visible = True lin_color_bar.visible = True
log_color_bar.visible = False log_color_bar.visible = False
@ -387,7 +384,7 @@ def create():
colormap_scale_rg.active = 0 colormap_scale_rg.active = 0
colormap_scale_rg = RadioGroup(labels=["Linear", "Logarithmic"], active=0, width=100) colormap_scale_rg = RadioGroup(labels=["Linear", "Logarithmic"], active=0, width=100)
colormap_scale_rg.on_change("active", colormap_scale_rg_callback) colormap_scale_rg.on_click(colormap_scale_rg_callback)
xrange_min_ni = NumericInput(title="x range min:", value=0, mode="float", width=70) xrange_min_ni = NumericInput(title="x range min:", value=0, mode="float", width=70)
xrange_max_ni = NumericInput(title="max:", value=1, mode="float", width=70) xrange_max_ni = NumericInput(title="max:", value=1, mode="float", width=70)
@ -398,7 +395,7 @@ def create():
yrange_step_ni = NumericInput(title="y mesh:", value=0.01, mode="float", width=70) yrange_step_ni = NumericInput(title="y mesh:", value=0.01, mode="float", width=70)
def auto_range_cb_callback(_attr, _old, new): def auto_range_cb_callback(_attr, _old, new):
if 0 in new: if new:
xrange_min_ni.disabled = True xrange_min_ni.disabled = True
xrange_max_ni.disabled = True xrange_max_ni.disabled = True
yrange_min_ni.disabled = True yrange_min_ni.disabled = True

View File

@ -21,7 +21,6 @@ import pyzebra
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
events_data = doc.events_data events_data = doc.events_data
npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True) npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True)
@ -64,8 +63,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
# prepare an event file # prepare an event file
diff_vec = [] diff_vec = []
@ -95,9 +94,9 @@ def create():
f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n" f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n"
) )
log.info(f"Content of {temp_event_file}:") print(f"Content of {temp_event_file}:")
with open(temp_event_file) as f: with open(temp_event_file) as f:
log.info(f.read()) print(f.read())
comp_proc = subprocess.run( comp_proc = subprocess.run(
[ [
@ -124,8 +123,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
spind_out_file = os.path.join(temp_dir, "spind.txt") spind_out_file = os.path.join(temp_dir, "spind.txt")
spind_res = dict( spind_res = dict(
@ -147,12 +146,12 @@ def create():
ub_matrices.append(ub_matrix_spind) ub_matrices.append(ub_matrix_spind)
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10)) spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
log.info(f"Content of {spind_out_file}:") print(f"Content of {spind_out_file}:")
with open(spind_out_file) as f: with open(spind_out_file) as f:
log.info(f.read()) print(f.read())
except FileNotFoundError: except FileNotFoundError:
log.warning("No results from spind") print("No results from spind")
results_table_source.data.update(spind_res) results_table_source.data.update(spind_res)

View File

@ -3,7 +3,6 @@ import io
import os import os
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Arrow, Arrow,
@ -31,9 +30,6 @@ import pyzebra
class PlotHKL: class PlotHKL:
def __init__(self): def __init__(self):
doc = curdoc()
log = doc.logger
_update_slice = None _update_slice = None
measured_data_div = Div(text="Measured <b>CCL</b> data:") measured_data_div = Div(text="Measured <b>CCL</b> data:")
measured_data = FileInput(accept=".ccl", multiple=True, width=200) measured_data = FileInput(accept=".ccl", multiple=True, width=200)
@ -66,9 +62,9 @@ class PlotHKL:
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file: with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
_, ext = os.path.splitext(md_fnames[0]) _, ext = os.path.splitext(md_fnames[0])
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {md_fnames[0]}")
return None return None
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0 alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
@ -148,9 +144,9 @@ class PlotHKL:
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file: with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
_, ext = os.path.splitext(md_fname) _, ext = os.path.splitext(md_fname)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {md_fname}")
return None return None
pyzebra.normalize_dataset(file_data) pyzebra.normalize_dataset(file_data)
@ -295,8 +291,8 @@ class PlotHKL:
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
fdata = pyzebra.parse_hkl(file, ext) fdata = pyzebra.parse_hkl(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {fname}")
return return
for ind in range(len(fdata["counts"])): for ind in range(len(fdata["counts"])):
@ -445,7 +441,7 @@ class PlotHKL:
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200) plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
plot_file.on_click(plot_file_callback) plot_file.on_click(plot_file_callback)
plot = figure(height=550, width=550 + 32, tools="pan,wheel_zoom,reset") plot = figure(plot_height=550, plot_width=550 + 32, tools="pan,wheel_zoom,reset")
plot.toolbar.logo = None plot.toolbar.logo = None
plot.xaxis.visible = False plot.xaxis.visible = False
@ -521,7 +517,7 @@ class PlotHKL:
tol_k_ni = NumericInput(title="k tolerance:", value=0.01, mode="float", width=100) tol_k_ni = NumericInput(title="k tolerance:", value=0.01, mode="float", width=100)
def show_legend_cb_callback(_attr, _old, new): def show_legend_cb_callback(_attr, _old, new):
plot.legend.visible = 0 in new plot.legend.visible = bool(new)
show_legend_cb = CheckboxGroup(labels=["Show legend"], active=[0]) show_legend_cb = CheckboxGroup(labels=["Show legend"], active=[0])
show_legend_cb.on_change("active", show_legend_cb_callback) show_legend_cb.on_change("active", show_legend_cb_callback)

View File

@ -1,4 +1,3 @@
import logging
import os import os
import re import re
from ast import literal_eval from ast import literal_eval
@ -6,51 +5,44 @@ from collections import defaultdict
import numpy as np import numpy as np
logger = logging.getLogger(__name__)
META_VARS_STR = ( META_VARS_STR = (
"instrument", "instrument",
"title", "title",
"comment", "sample",
"user", "user",
"proposal_id", "ProposalID",
"original_filename", "original_filename",
"date", "date",
"zebra_mode", "zebra_mode",
"zebramode", "proposal",
"sample_name", "proposal_user",
"proposal_title",
"proposal_email",
"detectorDistance",
) )
META_VARS_FLOAT = ( META_VARS_FLOAT = (
"omega",
"mf",
"2-theta",
"chi",
"phi",
"nu",
"temp",
"wavelength",
"a", "a",
"b", "b",
"c", "c",
"alpha", "alpha",
"beta", "beta",
"gamma", "gamma",
"omega",
"chi",
"phi",
"temp",
"mf",
"temperature",
"magnetic_field",
"cex1", "cex1",
"cex2", "cex2",
"wavelength",
"mexz", "mexz",
"moml", "moml",
"mcvl", "mcvl",
"momu", "momu",
"mcvu", "mcvu",
"2-theta",
"twotheta",
"nu",
"gamma_angle",
"polar_angle",
"tilt_angle",
"distance",
"distance_an",
"snv", "snv",
"snh", "snh",
"snvm", "snvm",
@ -63,13 +55,6 @@ META_VARS_FLOAT = (
"s2vb", "s2vb",
"s2hr", "s2hr",
"s2hl", "s2hl",
"a5",
"a6",
"a4t",
"s2ant",
"s2anb",
"s2anl",
"s2anr",
) )
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j", "UB") META_UB_MATRIX = ("ub1j", "ub2j", "ub3j", "UB")
@ -114,7 +99,7 @@ def load_1D(filepath):
return dataset return dataset
def parse_1D(fileobj, data_type, log=logger): def parse_1D(fileobj, data_type):
metadata = {"data_type": data_type} metadata = {"data_type": data_type}
# read metadata # read metadata
@ -131,23 +116,13 @@ def parse_1D(fileobj, data_type, log=logger):
var_name = var_name.strip() var_name = var_name.strip()
value = value.strip() value = value.strip()
if value == "UNKNOWN":
metadata[var_name] = None
continue
try: try:
if var_name in META_VARS_STR: if var_name in META_VARS_STR:
if var_name == "zebramode":
var_name = "zebra_mode"
metadata[var_name] = value metadata[var_name] = value
elif var_name in META_VARS_FLOAT: elif var_name in META_VARS_FLOAT:
if var_name == "2-theta": # fix that angle name not to be an expression if var_name == "2-theta": # fix that angle name not to be an expression
var_name = "twotheta" var_name = "twotheta"
if var_name == "temperature":
var_name = "temp"
if var_name == "magnetic_field":
var_name = "mf"
if var_name in ("a", "b", "c", "alpha", "beta", "gamma"): if var_name in ("a", "b", "c", "alpha", "beta", "gamma"):
var_name += "_cell" var_name += "_cell"
metadata[var_name] = float(value) metadata[var_name] = float(value)
@ -162,7 +137,7 @@ def parse_1D(fileobj, data_type, log=logger):
metadata["ub"][row, :] = list(map(float, value.split())) metadata["ub"][row, :] = list(map(float, value.split()))
except Exception: except Exception:
log.error(f"Error reading {var_name} with value '{value}'") print(f"Error reading {var_name} with value '{value}'")
metadata[var_name] = 0 metadata[var_name] = 0
# handle older files that don't contain "zebra_mode" metadata # handle older files that don't contain "zebra_mode" metadata
@ -227,18 +202,15 @@ def parse_1D(fileobj, data_type, log=logger):
dataset.append({**metadata, **scan}) dataset.append({**metadata, **scan})
elif data_type == ".dat": elif data_type == ".dat":
# TODO: this might need to be adapted in the future, when "gamma" will be added to dat files
if metadata["zebra_mode"] == "nb": if metadata["zebra_mode"] == "nb":
if "gamma_angle" in metadata: metadata["gamma"] = metadata["twotheta"]
# support for the new format
metadata["gamma"] = metadata["gamma_angle"]
else:
metadata["gamma"] = metadata["twotheta"]
scan = defaultdict(list) scan = defaultdict(list)
scan["export"] = True scan["export"] = True
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj)) match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
motors = [motor.strip().lower() for motor in match.group(1).split(",")] motors = [motor.lower() for motor in match.group(1).split(", ")]
# Steps can be separated by " " or ", " # Steps can be separated by " " or ", "
steps = [float(step.strip(",")) for step in match.group(2).split()] steps = [float(step.strip(",")) for step in match.group(2).split()]
@ -300,7 +272,7 @@ def parse_1D(fileobj, data_type, log=logger):
dataset.append({**metadata, **scan}) dataset.append({**metadata, **scan})
else: else:
log.error("Unknown file extention") print("Unknown file extention")
return dataset return dataset

View File

@ -1,4 +1,3 @@
import logging
import os import os
import numpy as np import numpy as np
@ -7,8 +6,6 @@ from scipy.integrate import simpson, trapezoid
from pyzebra import CCL_ANGLES from pyzebra import CCL_ANGLES
logger = logging.getLogger(__name__)
PARAM_PRECISIONS = { PARAM_PRECISIONS = {
"twotheta": 0.1, "twotheta": 0.1,
"chi": 0.1, "chi": 0.1,
@ -36,12 +33,12 @@ def normalize_dataset(dataset, monitor=100_000):
scan["monitor"] = monitor scan["monitor"] = monitor
def merge_duplicates(dataset, log=logger): def merge_duplicates(dataset):
merged = np.zeros(len(dataset), dtype=bool) merged = np.zeros(len(dataset), dtype=np.bool)
for ind_into, scan_into in enumerate(dataset): for ind_into, scan_into in enumerate(dataset):
for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1): for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1):
if _parameters_match(scan_into, scan_from) and not merged[ind_from]: if _parameters_match(scan_into, scan_from) and not merged[ind_from]:
merge_scans(scan_into, scan_from, log=log) merge_scans(scan_into, scan_from)
merged[ind_from] = True merged[ind_from] = True
@ -78,30 +75,28 @@ def _parameters_match(scan1, scan2):
return True return True
def merge_datasets(dataset_into, dataset_from, log=logger): def merge_datasets(dataset_into, dataset_from):
scan_motors_into = dataset_into[0]["scan_motors"] scan_motors_into = dataset_into[0]["scan_motors"]
scan_motors_from = dataset_from[0]["scan_motors"] scan_motors_from = dataset_from[0]["scan_motors"]
if scan_motors_into != scan_motors_from: if scan_motors_into != scan_motors_from:
log.warning( print(f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}")
f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}"
)
return return
merged = np.zeros(len(dataset_from), dtype=bool) merged = np.zeros(len(dataset_from), dtype=np.bool)
for scan_into in dataset_into: for scan_into in dataset_into:
for ind, scan_from in enumerate(dataset_from): for ind, scan_from in enumerate(dataset_from):
if _parameters_match(scan_into, scan_from) and not merged[ind]: if _parameters_match(scan_into, scan_from) and not merged[ind]:
if scan_into["counts"].ndim == 3: if scan_into["counts"].ndim == 3:
merge_h5_scans(scan_into, scan_from, log=log) merge_h5_scans(scan_into, scan_from)
else: # scan_into["counts"].ndim == 1 else: # scan_into["counts"].ndim == 1
merge_scans(scan_into, scan_from, log=log) merge_scans(scan_into, scan_from)
merged[ind] = True merged[ind] = True
for scan_from in dataset_from: for scan_from in dataset_from:
dataset_into.append(scan_from) dataset_into.append(scan_from)
def merge_scans(scan_into, scan_from, log=logger): def merge_scans(scan_into, scan_from):
if "init_scan" not in scan_into: if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy() scan_into["init_scan"] = scan_into.copy()
@ -153,10 +148,10 @@ def merge_scans(scan_into, scan_from, log=logger):
fname1 = os.path.basename(scan_into["original_filename"]) fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"]) fname2 = os.path.basename(scan_from["original_filename"])
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})') print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def merge_h5_scans(scan_into, scan_from, log=logger): def merge_h5_scans(scan_into, scan_from):
if "init_scan" not in scan_into: if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy() scan_into["init_scan"] = scan_into.copy()
@ -165,7 +160,7 @@ def merge_h5_scans(scan_into, scan_from, log=logger):
for scan in scan_into["merged_scans"]: for scan in scan_into["merged_scans"]:
if scan_from is scan: if scan_from is scan:
log.warning("Already merged scan") print("Already merged scan")
return return
scan_into["merged_scans"].append(scan_from) scan_into["merged_scans"].append(scan_from)
@ -217,7 +212,7 @@ def merge_h5_scans(scan_into, scan_from, log=logger):
fname1 = os.path.basename(scan_into["original_filename"]) fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"]) fname2 = os.path.basename(scan_from["original_filename"])
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})') print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def restore_scan(scan): def restore_scan(scan):
@ -235,7 +230,7 @@ def restore_scan(scan):
scan["export"] = True scan["export"] = True
def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger): def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
if fit_from is None: if fit_from is None:
fit_from = -np.inf fit_from = -np.inf
if fit_to is None: if fit_to is None:
@ -248,7 +243,7 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger):
# apply fitting range # apply fitting range
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to) fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
if not np.any(fit_ind): if not np.any(fit_ind):
log.warning(f"No data in fit range for scan {scan['idx']}") print(f"No data in fit range for scan {scan['idx']}")
return return
y_fit = y_fit[fit_ind] y_fit = y_fit[fit_ind]

View File

@ -47,9 +47,9 @@ def parse_h5meta(file):
if variable in META_STR: if variable in META_STR:
pass pass
elif variable in META_CELL: elif variable in META_CELL:
value = np.array(value.split(",")[:6], dtype=float) value = np.array(value.split(",")[:6], dtype=np.float)
elif variable in META_MATRIX: elif variable in META_MATRIX:
value = np.array(value.split(",")[:9], dtype=float).reshape(3, 3) value = np.array(value.split(",")[:9], dtype=np.float).reshape(3, 3)
else: # default is a single float number else: # default is a single float number
value = float(value) value = float(value)
content[section][variable] = value content[section][variable] = value
@ -69,7 +69,7 @@ def read_detector_data(filepath, cami_meta=None):
ndarray: A 3D array of data, omega, gamma, nu. ndarray: A 3D array of data, omega, gamma, nu.
""" """
with h5py.File(filepath, "r") as h5f: with h5py.File(filepath, "r") as h5f:
counts = h5f["/entry1/area_detector2/data"][:].astype(float) counts = h5f["/entry1/area_detector2/data"][:].astype(np.float64)
n, cols, rows = counts.shape n, cols, rows = counts.shape
if "/entry1/experiment_identifier" in h5f: # old format if "/entry1/experiment_identifier" in h5f: # old format
@ -114,14 +114,10 @@ def read_detector_data(filepath, cami_meta=None):
scan["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][0] scan["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][0]
scan["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][0] scan["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][0]
scan["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][0] scan["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][0]
if scan["zebra_mode"] == "nb": scan["chi"] = h5f["/entry1/sample/chi"][:]
scan["chi"] = np.array([180])
scan["phi"] = np.array([0])
elif scan["zebra_mode"] == "bi":
scan["chi"] = h5f["/entry1/sample/chi"][:]
scan["phi"] = h5f["/entry1/sample/phi"][:]
if len(scan["chi"]) == 1: if len(scan["chi"]) == 1:
scan["chi"] = np.ones(n) * scan["chi"] scan["chi"] = np.ones(n) * scan["chi"]
scan["phi"] = h5f["/entry1/sample/phi"][:]
if len(scan["phi"]) == 1: if len(scan["phi"]) == 1:
scan["phi"] = np.ones(n) * scan["phi"] scan["phi"] = np.ones(n) * scan["phi"]
if h5f["/entry1/sample/UB"].size == 0: if h5f["/entry1/sample/UB"].size == 0:
@ -148,21 +144,11 @@ def read_detector_data(filepath, cami_meta=None):
if "/entry1/sample/magnetic_field" in h5f: if "/entry1/sample/magnetic_field" in h5f:
scan["mf"] = h5f["/entry1/sample/magnetic_field"][:] scan["mf"] = h5f["/entry1/sample/magnetic_field"][:]
if "mf" in scan:
# TODO: NaNs are not JSON compliant, so replace them with None
# this is not a great solution, but makes it safe to use the array in bokeh
scan["mf"] = np.where(np.isnan(scan["mf"]), None, scan["mf"])
if "/entry1/sample/temperature" in h5f: if "/entry1/sample/temperature" in h5f:
scan["temp"] = h5f["/entry1/sample/temperature"][:] scan["temp"] = h5f["/entry1/sample/temperature"][:]
elif "/entry1/sample/Ts/value" in h5f: elif "/entry1/sample/Ts/value" in h5f:
scan["temp"] = h5f["/entry1/sample/Ts/value"][:] scan["temp"] = h5f["/entry1/sample/Ts/value"][:]
if "temp" in scan:
# TODO: NaNs are not JSON compliant, so replace them with None
# this is not a great solution, but makes it safe to use the array in bokeh
scan["temp"] = np.where(np.isnan(scan["temp"]), None, scan["temp"])
# overwrite metadata from .cami # overwrite metadata from .cami
if cami_meta is not None: if cami_meta is not None:
if "crystal" in cami_meta: if "crystal" in cami_meta:

View File

@ -1,5 +1,4 @@
import io import io
import logging
import os import os
import subprocess import subprocess
import tempfile import tempfile
@ -7,9 +6,7 @@ from math import ceil, floor
import numpy as np import numpy as np
logger = logging.getLogger(__name__) SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen"
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/Sxtal_Refgen"
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI _zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
BLFR z-up BLFR z-up
@ -147,7 +144,7 @@ def export_geom_file(path, ang_lims, template=None):
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n") out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
def calc_ub_matrix(params, log=logger): def calc_ub_matrix(params):
with tempfile.TemporaryDirectory() as temp_dir: with tempfile.TemporaryDirectory() as temp_dir:
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl") cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
@ -163,8 +160,8 @@ def calc_ub_matrix(params, log=logger):
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa") sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
ub_matrix = [] ub_matrix = []

View File

@ -0,0 +1,11 @@
[Unit]
Description=pyzebra-test web server
[Service]
Type=simple
User=pyzebra
ExecStart=/bin/bash /usr/local/sbin/pyzebra-test.sh
Restart=always
[Install]
WantedBy=multi-user.target

4
scripts/pyzebra-test.sh Normal file
View File

@ -0,0 +1,4 @@
source /opt/miniconda3/etc/profile.d/conda.sh
conda activate test
python /opt/pyzebra/pyzebra/app/cli.py --port=5010 --allow-websocket-origin=pyzebra.psi.ch:5010 --args --spind-path=/opt/spind

10
scripts/pyzebra.service Normal file
View File

@ -0,0 +1,10 @@
[Unit]
Description=pyzebra web server
[Service]
Type=simple
ExecStart=/bin/bash /usr/local/sbin/pyzebra.sh
Restart=always
[Install]
WantedBy=multi-user.target

4
scripts/pyzebra.sh Normal file
View File

@ -0,0 +1,4 @@
source /opt/miniconda3/etc/profile.d/conda.sh
conda activate prod
pyzebra --port=80 --allow-websocket-origin=pyzebra.psi.ch:80 --args --spind-path=/opt/spind