Compare commits

..

No commits in common. "main" and "0.7.5" have entirely different histories.
main ... 0.7.5

29 changed files with 196 additions and 247 deletions

View File

@ -1,53 +0,0 @@
name: pyzebra CI/CD pipeline
on:
push:
branches:
- main
tags:
- '*'
env:
CONDA: /opt/miniforge3
jobs:
prepare:
runs-on: pyzebra
steps:
- run: $CONDA/bin/conda config --add channels conda-forge
- run: $CONDA/bin/conda config --set solver libmamba
test-env:
runs-on: pyzebra
needs: prepare
if: github.ref == 'refs/heads/main'
env:
BUILD_DIR: ${{ runner.temp }}/conda_build
steps:
- name: Checkout repository
uses: actions/checkout@v4
- run: $CONDA/bin/conda build --no-anaconda-upload --output-folder $BUILD_DIR ./conda-recipe
- run: $CONDA/bin/conda remove --name test --all --keep-env -y
- run: $CONDA/bin/conda install --name test --channel $BUILD_DIR python=3.8 pyzebra -y
- run: sudo systemctl restart pyzebra-test.service
prod-env:
runs-on: pyzebra
needs: prepare
if: startsWith(github.ref, 'refs/tags/')
env:
BUILD_DIR: ${{ runner.temp }}/conda_build
steps:
- name: Checkout repository
uses: actions/checkout@v4
- run: $CONDA/bin/conda build --token ${{ secrets.ANACONDA_TOKEN }} --output-folder $BUILD_DIR ./conda-recipe
- run: $CONDA/bin/conda remove --name prod --all --keep-env -y
- run: $CONDA/bin/conda install --name prod --channel $BUILD_DIR python=3.8 pyzebra -y
- run: sudo systemctl restart pyzebra-prod.service
cleanup:
runs-on: pyzebra
needs: [test-env, prod-env]
if: always()
steps:
- run: $CONDA/bin/conda build purge-all

26
.github/workflows/deployment.yaml vendored Normal file
View File

@ -0,0 +1,26 @@
name: Deployment
on:
push:
tags:
- '*'
jobs:
publish-conda-package:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Prepare
run: |
$CONDA/bin/conda install --quiet --yes conda-build anaconda-client conda-libmamba-solver
$CONDA/bin/conda config --append channels conda-forge
$CONDA/bin/conda config --set solver libmamba
$CONDA/bin/conda config --set anaconda_upload yes
- name: Build and upload
env:
ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }}
run: |
$CONDA/bin/conda build --token $ANACONDA_TOKEN conda-recipe

View File

@ -28,7 +28,7 @@ requirements:
about: about:
home: https://gitlab.psi.ch/zebra/pyzebra home: https://github.com/paulscherrerinstitute/pyzebra
summary: {{ data['description'] }} summary: {{ data['description'] }}
license: GNU GPLv3 license: GNU GPLv3
license_file: LICENSE license_file: LICENSE

View File

@ -7,19 +7,18 @@ import subprocess
def main(): def main():
default_branch = "main"
branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip() branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip()
if branch != default_branch: if branch != "master":
print(f"Aborting, not on '{default_branch}' branch.") print("Aborting, not on 'master' branch.")
return return
version_filepath = os.path.join(os.path.basename(os.path.dirname(__file__)), "__init__.py") filepath = "pyzebra/__init__.py"
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("level", type=str, choices=["patch", "minor", "major"]) parser.add_argument("level", type=str, choices=["patch", "minor", "major"])
args = parser.parse_args() args = parser.parse_args()
with open(version_filepath) as f: with open(filepath) as f:
file_content = f.read() file_content = f.read()
version = re.search(r'__version__ = "(.*?)"', file_content).group(1) version = re.search(r'__version__ = "(.*?)"', file_content).group(1)
@ -37,12 +36,11 @@ def main():
new_version = f"{major}.{minor}.{patch}" new_version = f"{major}.{minor}.{patch}"
with open(version_filepath, "w") as f: with open(filepath, "w") as f:
f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content)) f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content))
os.system(f"git commit {version_filepath} -m 'Updating for version {new_version}'") os.system(f"git commit {filepath} -m 'Updating for version {new_version}'")
os.system(f"git tag -a {new_version} -m 'Release {new_version}'") os.system(f"git tag -a {new_version} -m 'Release {new_version}'")
os.system("git push --follow-tags")
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -6,4 +6,4 @@ from pyzebra.sxtal_refgen import *
from pyzebra.utils import * from pyzebra.utils import *
from pyzebra.xtal import * from pyzebra.xtal import *
__version__ = "0.7.11" __version__ = "0.7.5"

View File

@ -1,9 +1,6 @@
import logging
import subprocess import subprocess
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
logger = logging.getLogger(__name__)
DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"] DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"]
REFLECTION_PRINTER_FORMATS = [ REFLECTION_PRINTER_FORMATS = [
@ -19,11 +16,11 @@ REFLECTION_PRINTER_FORMATS = [
"oksana", "oksana",
] ]
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/anatric" ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/anatric"
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"] ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger): def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None):
comp_proc = subprocess.run( comp_proc = subprocess.run(
[anatric_path, config_file], [anatric_path, config_file],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
@ -32,8 +29,8 @@ def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger):
check=True, check=True,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
class AnatricConfig: class AnatricConfig:

17
pyzebra/app/app_hooks.py Normal file
View File

@ -0,0 +1,17 @@
import logging
import sys
from io import StringIO
def on_server_loaded(_server_context):
formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
sys.stdout = StringIO()
bokeh_handler = logging.StreamHandler(StringIO())
bokeh_handler.setFormatter(formatter)
bokeh_logger = logging.getLogger("bokeh")
bokeh_logger.setLevel(logging.WARNING)
bokeh_logger.addHandler(bokeh_handler)

View File

@ -4,7 +4,7 @@ import sys
def main(): def main():
app_path = os.path.dirname(os.path.abspath(__file__)) app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)))
subprocess.run(["bokeh", "serve", app_path, *sys.argv[1:]], check=True) subprocess.run(["bokeh", "serve", app_path, *sys.argv[1:]], check=True)

View File

@ -1,6 +1,5 @@
import types import types
from bokeh.io import curdoc
from bokeh.models import ( from bokeh.models import (
Button, Button,
CellEditor, CellEditor,
@ -52,7 +51,6 @@ def _params_factory(function):
class FitControls: class FitControls:
def __init__(self): def __init__(self):
self.log = curdoc().logger
self.params = {} self.params = {}
def add_function_button_callback(click): def add_function_button_callback(click):
@ -147,11 +145,7 @@ class FitControls:
def _process_scan(self, scan): def _process_scan(self, scan):
pyzebra.fit_scan( pyzebra.fit_scan(
scan, scan, self.params, fit_from=self.from_spinner.value, fit_to=self.to_spinner.value
self.params,
fit_from=self.from_spinner.value,
fit_to=self.to_spinner.value,
log=self.log,
) )
pyzebra.get_area( pyzebra.get_area(
scan, scan,

View File

@ -11,7 +11,6 @@ import pyzebra
class InputControls: class InputControls:
def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None): def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None):
doc = curdoc() doc = curdoc()
log = doc.logger
def filelist_select_update_for_proposal(): def filelist_select_update_for_proposal():
proposal_path = proposal_textinput.name proposal_path = proposal_textinput.name
@ -46,19 +45,19 @@ class InputControls:
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
pyzebra.merge_duplicates(new_data, log=log) pyzebra.merge_duplicates(new_data)
dlfiles.set_names([base] * dlfiles.n_files) dlfiles.set_names([base] * dlfiles.n_files)
else: else:
pyzebra.merge_datasets(new_data, file_data, log=log) pyzebra.merge_datasets(new_data, file_data)
if new_data: if new_data:
dataset.clear() dataset.clear()
@ -77,13 +76,13 @@ class InputControls:
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name) _, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log) pyzebra.merge_datasets(dataset, file_data)
if file_data: if file_data:
on_file_open() on_file_open()
@ -98,19 +97,19 @@ class InputControls:
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
pyzebra.merge_duplicates(new_data, log=log) pyzebra.merge_duplicates(new_data)
dlfiles.set_names([base] * dlfiles.n_files) dlfiles.set_names([base] * dlfiles.n_files)
else: else:
pyzebra.merge_datasets(new_data, file_data, log=log) pyzebra.merge_datasets(new_data, file_data)
if new_data: if new_data:
dataset.clear() dataset.clear()
@ -130,13 +129,13 @@ class InputControls:
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name) _, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log) pyzebra.merge_datasets(dataset, file_data)
if file_data: if file_data:
on_file_open() on_file_open()

View File

@ -1,6 +1,6 @@
import argparse import argparse
import logging import logging
from io import StringIO import sys
from bokeh.io import curdoc from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
@ -43,17 +43,11 @@ doc.anatric_path = args.anatric_path
doc.spind_path = args.spind_path doc.spind_path = args.spind_path
doc.sxtal_refgen_path = args.sxtal_refgen_path doc.sxtal_refgen_path = args.sxtal_refgen_path
stream = StringIO() # In app_hooks.py a StreamHandler was added to "bokeh" logger
handler = logging.StreamHandler(stream) bokeh_stream = logging.getLogger("bokeh").handlers[0].stream
handler.setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
)
logger = logging.getLogger(str(id(doc)))
logger.setLevel(logging.INFO)
logger.addHandler(handler)
doc.logger = logger
log_textareainput = TextAreaInput(title="Logging output:") log_textareainput = TextAreaInput(title="logging output:")
bokeh_log_textareainput = TextAreaInput(title="server output:")
def proposal_textinput_callback(_attr, _old, _new): def proposal_textinput_callback(_attr, _old, _new):
@ -71,7 +65,7 @@ def apply_button_callback():
try: try:
proposal_path = pyzebra.find_proposal_path(proposal) proposal_path = pyzebra.find_proposal_path(proposal)
except ValueError as e: except ValueError as e:
logger.exception(e) print(e)
return return
apply_button.disabled = True apply_button.disabled = True
else: else:
@ -100,13 +94,14 @@ doc.add_root(
panel_spind.create(), panel_spind.create(),
] ]
), ),
row(log_textareainput, sizing_mode="scale_both"), row(log_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
) )
) )
def update_stdout(): def update_stdout():
log_textareainput.value = stream.getvalue() log_textareainput.value = sys.stdout.getvalue()
bokeh_log_textareainput.value = bokeh_stream.getvalue()
doc.add_periodic_callback(update_stdout, 1000) doc.add_periodic_callback(update_stdout, 1000)

View File

@ -33,7 +33,6 @@ from pyzebra import EXPORT_TARGETS, app
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset1 = [] dataset1 = []
dataset2 = [] dataset2 = []
app_dlfiles = app.DownloadFiles(n_files=2) app_dlfiles = app.DownloadFiles(n_files=2)
@ -95,7 +94,7 @@ def create():
def file_open_button_callback(): def file_open_button_callback():
if len(file_select.value) != 2: if len(file_select.value) != 2:
log.warning("Select exactly 2 .ccl files.") print("WARNING: Select exactly 2 .ccl files.")
return return
new_data1 = [] new_data1 = []
@ -105,13 +104,13 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
return return
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data, log=log) pyzebra.merge_duplicates(file_data)
if ind == 0: if ind == 0:
app_dlfiles.set_names([base, base]) app_dlfiles.set_names([base, base])
@ -134,7 +133,7 @@ def create():
def upload_button_callback(_attr, _old, _new): def upload_button_callback(_attr, _old, _new):
if len(upload_button.filename) != 2: if len(upload_button.filename) != 2:
log.warning("Upload exactly 2 .ccl files.") print("WARNING: Upload exactly 2 .ccl files.")
return return
new_data1 = [] new_data1 = []
@ -143,13 +142,13 @@ def create():
with io.StringIO(base64.b64decode(f_str).decode()) as file: with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name) base, ext = os.path.splitext(f_name)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
return return
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data, log=log) pyzebra.merge_duplicates(file_data)
if ind == 0: if ind == 0:
app_dlfiles.set_names([base, base]) app_dlfiles.set_names([base, base])
@ -378,11 +377,11 @@ def create():
scan_from2 = dataset2[int(merge_from_select.value)] scan_from2 = dataset2[int(merge_from_select.value)]
if scan_into1 is scan_from1: if scan_into1 is scan_from1:
log.warning("Selected scans for merging are identical") print("WARNING: Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into1, scan_from1, log=log) pyzebra.merge_scans(scan_into1, scan_from1)
pyzebra.merge_scans(scan_into2, scan_from2, log=log) pyzebra.merge_scans(scan_into2, scan_from2)
_update_table() _update_table()
_update_plot() _update_plot()

View File

@ -2,7 +2,6 @@ import os
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -26,8 +25,6 @@ from pyzebra import EXPORT_TARGETS, app
def create(): def create():
doc = curdoc()
log = doc.logger
dataset = [] dataset = []
app_dlfiles = app.DownloadFiles(n_files=2) app_dlfiles = app.DownloadFiles(n_files=2)
@ -217,10 +214,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
log.warning("Selected scans for merging are identical") print("WARNING: Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into, scan_from, log=log) pyzebra.merge_scans(scan_into, scan_from)
_update_table() _update_table()
_update_plot() _update_plot()

View File

@ -5,7 +5,6 @@ import subprocess
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Arrow, Arrow,
@ -40,8 +39,6 @@ SORT_OPT_NB = ["gamma", "nu", "omega"]
def create(): def create():
doc = curdoc()
log = doc.logger
ang_lims = {} ang_lims = {}
cif_data = {} cif_data = {}
params = {} params = {}
@ -135,11 +132,7 @@ def create():
params = dict() params = dict()
params["SPGR"] = cryst_space_group.value params["SPGR"] = cryst_space_group.value
params["CELL"] = cryst_cell.value params["CELL"] = cryst_cell.value
try: ub = pyzebra.calc_ub_matrix(params)
ub = pyzebra.calc_ub_matrix(params, log=log)
except Exception as e:
log.exception(e)
return
ub_matrix.value = " ".join(ub) ub_matrix.value = " ".join(ub)
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100) ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
@ -228,9 +221,9 @@ def create():
geom_template = None geom_template = None
pyzebra.export_geom_file(geom_path, ang_lims, geom_template) pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
log.info(f"Content of {geom_path}:") print(f"Content of {geom_path}:")
with open(geom_path) as f: with open(geom_path) as f:
log.info(f.read()) print(f.read())
priority = [sorting_0.value, sorting_1.value, sorting_2.value] priority = [sorting_0.value, sorting_1.value, sorting_2.value]
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value] chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
@ -255,9 +248,9 @@ def create():
cfl_template = None cfl_template = None
pyzebra.export_cfl_file(cfl_path, params, cfl_template) pyzebra.export_cfl_file(cfl_path, params, cfl_template)
log.info(f"Content of {cfl_path}:") print(f"Content of {cfl_path}:")
with open(cfl_path) as f: with open(cfl_path) as f:
log.info(f.read()) print(f.read())
comp_proc = subprocess.run( comp_proc = subprocess.run(
[pyzebra.SXTAL_REFGEN_PATH, cfl_path], [pyzebra.SXTAL_REFGEN_PATH, cfl_path],
@ -267,8 +260,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
if i == 1: # all hkl files are identical, so keep only one if i == 1: # all hkl files are identical, so keep only one
hkl_fname = base_fname + ".hkl" hkl_fname = base_fname + ".hkl"
@ -598,8 +591,8 @@ def create():
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
file_data = pyzebra.parse_hkl(file, ext) file_data = pyzebra.parse_hkl(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {fname}")
return return
fnames.append(fname) fnames.append(fname)

View File

@ -24,7 +24,6 @@ from pyzebra import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
config = pyzebra.AnatricConfig() config = pyzebra.AnatricConfig()
def _load_config_file(file): def _load_config_file(file):
@ -348,11 +347,7 @@ def create():
with tempfile.TemporaryDirectory() as temp_dir: with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/config.xml" temp_file = temp_dir + "/config.xml"
config.save_as(temp_file) config.save_as(temp_file)
try: pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir)
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir, log=log)
except Exception as e:
log.exception(e)
return
with open(os.path.join(temp_dir, config.logfile)) as f_log: with open(os.path.join(temp_dir, config.logfile)) as f_log:
output_log.value = f_log.read() output_log.value = f_log.read()

View File

@ -36,7 +36,6 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset = [] dataset = []
cami_meta = {} cami_meta = {}
@ -134,8 +133,8 @@ def create():
for f_name in file_select.value: for f_name in file_select.value:
try: try:
new_data.append(pyzebra.read_detector_data(f_name)) new_data.append(pyzebra.read_detector_data(f_name))
except KeyError as e: except KeyError:
log.exception(e) print("Could not read data from the file.")
return return
dataset.extend(new_data) dataset.extend(new_data)
@ -276,7 +275,7 @@ def create():
frame_range.bounds = (0, n_im) frame_range.bounds = (0, n_im)
scan_motor = scan["scan_motor"] scan_motor = scan["scan_motor"]
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}" proj_y_plot.axis[1].axis_label = f"Scanning motor, {scan_motor}"
var = scan[scan_motor] var = scan[scan_motor]
var_start = var[0] var_start = var[0]

View File

@ -43,7 +43,6 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
dataset = [] dataset = []
cami_meta = {} cami_meta = {}
@ -103,8 +102,8 @@ def create():
nonlocal dataset nonlocal dataset
try: try:
scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None) scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None)
except Exception as e: except KeyError:
log.exception(e) print("Could not read data from the file.")
return return
dataset = [scan] dataset = [scan]
@ -138,8 +137,8 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
try: try:
file_data = [pyzebra.read_detector_data(f_path, cm)] file_data = [pyzebra.read_detector_data(f_path, cm)]
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
@ -147,7 +146,7 @@ def create():
if not new_data: # first file if not new_data: # first file
new_data = file_data new_data = file_data
else: else:
pyzebra.merge_datasets(new_data, file_data, log=log) pyzebra.merge_datasets(new_data, file_data)
if new_data: if new_data:
dataset = new_data dataset = new_data
@ -162,12 +161,12 @@ def create():
f_name = os.path.basename(f_path) f_name = os.path.basename(f_path)
try: try:
file_data = [pyzebra.read_detector_data(f_path, None)] file_data = [pyzebra.read_detector_data(f_path, None)]
except Exception as e: except:
log.exception(e) print(f"Error loading {f_name}")
continue continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value) pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log) pyzebra.merge_datasets(dataset, file_data)
if file_data: if file_data:
_init_datatable() _init_datatable()
@ -293,10 +292,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
log.warning("Selected scans for merging are identical") print("WARNING: Selected scans for merging are identical")
return return
pyzebra.merge_h5_scans(scan_into, scan_from, log=log) pyzebra.merge_h5_scans(scan_into, scan_from)
_update_table() _update_table()
_update_image() _update_image()
_update_proj_plots() _update_proj_plots()
@ -407,7 +406,7 @@ def create():
frame_range.bounds = (0, n_im) frame_range.bounds = (0, n_im)
scan_motor = scan["scan_motor"] scan_motor = scan["scan_motor"]
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}" proj_y_plot.axis[1].axis_label = f"Scanning motor, {scan_motor}"
var = scan[scan_motor] var = scan[scan_motor]
var_start = var[0] var_start = var[0]

View File

@ -3,7 +3,6 @@ import os
import tempfile import tempfile
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -39,8 +38,6 @@ def color_palette(n_colors):
def create(): def create():
doc = curdoc()
log = doc.logger
dataset = [] dataset = []
app_dlfiles = app.DownloadFiles(n_files=1) app_dlfiles = app.DownloadFiles(n_files=1)
@ -364,10 +361,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)] scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from: if scan_into is scan_from:
log.warning("Selected scans for merging are identical") print("WARNING: Selected scans for merging are identical")
return return
pyzebra.merge_scans(scan_into, scan_from, log=log) pyzebra.merge_scans(scan_into, scan_from)
_update_table() _update_table()
_update_single_scan_plot() _update_single_scan_plot()
_update_overview() _update_overview()

View File

@ -3,7 +3,6 @@ import io
import os import os
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Button, Button,
@ -32,8 +31,6 @@ from pyzebra.app.panel_hdf_viewer import calculate_hkl
def create(): def create():
doc = curdoc()
log = doc.logger
_update_slice = None _update_slice = None
measured_data_div = Div(text="Measured <b>HDF</b> data:") measured_data_div = Div(text="Measured <b>HDF</b> data:")
measured_data = FileInput(accept=".hdf", multiple=True, width=200) measured_data = FileInput(accept=".hdf", multiple=True, width=200)
@ -62,8 +59,8 @@ def create():
# Read data # Read data
try: try:
det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata))) det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata)))
except Exception as e: except:
log.exception(e) print(f"Error loading {fname}")
return None return None
if ind == 0: if ind == 0:
@ -182,8 +179,8 @@ def create():
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
fdata = pyzebra.parse_hkl(file, ext) fdata = pyzebra.parse_hkl(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {fname}")
return return
for ind in range(len(fdata["counts"])): for ind in range(len(fdata["counts"])):

View File

@ -21,7 +21,6 @@ import pyzebra
def create(): def create():
doc = curdoc() doc = curdoc()
log = doc.logger
events_data = doc.events_data events_data = doc.events_data
npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True) npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True)
@ -64,8 +63,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
# prepare an event file # prepare an event file
diff_vec = [] diff_vec = []
@ -95,9 +94,9 @@ def create():
f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n" f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n"
) )
log.info(f"Content of {temp_event_file}:") print(f"Content of {temp_event_file}:")
with open(temp_event_file) as f: with open(temp_event_file) as f:
log.info(f.read()) print(f.read())
comp_proc = subprocess.run( comp_proc = subprocess.run(
[ [
@ -124,8 +123,8 @@ def create():
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
spind_out_file = os.path.join(temp_dir, "spind.txt") spind_out_file = os.path.join(temp_dir, "spind.txt")
spind_res = dict( spind_res = dict(
@ -147,12 +146,12 @@ def create():
ub_matrices.append(ub_matrix_spind) ub_matrices.append(ub_matrix_spind)
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10)) spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
log.info(f"Content of {spind_out_file}:") print(f"Content of {spind_out_file}:")
with open(spind_out_file) as f: with open(spind_out_file) as f:
log.info(f.read()) print(f.read())
except FileNotFoundError: except FileNotFoundError:
log.warning("No results from spind") print("No results from spind")
results_table_source.data.update(spind_res) results_table_source.data.update(spind_res)

View File

@ -3,7 +3,6 @@ import io
import os import os
import numpy as np import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row from bokeh.layouts import column, row
from bokeh.models import ( from bokeh.models import (
Arrow, Arrow,
@ -31,9 +30,6 @@ import pyzebra
class PlotHKL: class PlotHKL:
def __init__(self): def __init__(self):
doc = curdoc()
log = doc.logger
_update_slice = None _update_slice = None
measured_data_div = Div(text="Measured <b>CCL</b> data:") measured_data_div = Div(text="Measured <b>CCL</b> data:")
measured_data = FileInput(accept=".ccl", multiple=True, width=200) measured_data = FileInput(accept=".ccl", multiple=True, width=200)
@ -66,9 +62,9 @@ class PlotHKL:
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file: with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
_, ext = os.path.splitext(md_fnames[0]) _, ext = os.path.splitext(md_fnames[0])
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {md_fnames[0]}")
return None return None
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0 alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
@ -148,9 +144,9 @@ class PlotHKL:
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file: with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
_, ext = os.path.splitext(md_fname) _, ext = os.path.splitext(md_fname)
try: try:
file_data = pyzebra.parse_1D(file, ext, log=log) file_data = pyzebra.parse_1D(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {md_fname}")
return None return None
pyzebra.normalize_dataset(file_data) pyzebra.normalize_dataset(file_data)
@ -295,8 +291,8 @@ class PlotHKL:
_, ext = os.path.splitext(fname) _, ext = os.path.splitext(fname)
try: try:
fdata = pyzebra.parse_hkl(file, ext) fdata = pyzebra.parse_hkl(file, ext)
except Exception as e: except:
log.exception(e) print(f"Error loading {fname}")
return return
for ind in range(len(fdata["counts"])): for ind in range(len(fdata["counts"])):

View File

@ -1,4 +1,3 @@
import logging
import os import os
import re import re
from ast import literal_eval from ast import literal_eval
@ -6,8 +5,6 @@ from collections import defaultdict
import numpy as np import numpy as np
logger = logging.getLogger(__name__)
META_VARS_STR = ( META_VARS_STR = (
"instrument", "instrument",
"title", "title",
@ -17,7 +14,6 @@ META_VARS_STR = (
"original_filename", "original_filename",
"date", "date",
"zebra_mode", "zebra_mode",
"zebramode",
"sample_name", "sample_name",
) )
@ -114,7 +110,7 @@ def load_1D(filepath):
return dataset return dataset
def parse_1D(fileobj, data_type, log=logger): def parse_1D(fileobj, data_type):
metadata = {"data_type": data_type} metadata = {"data_type": data_type}
# read metadata # read metadata
@ -137,8 +133,6 @@ def parse_1D(fileobj, data_type, log=logger):
try: try:
if var_name in META_VARS_STR: if var_name in META_VARS_STR:
if var_name == "zebramode":
var_name = "zebra_mode"
metadata[var_name] = value metadata[var_name] = value
elif var_name in META_VARS_FLOAT: elif var_name in META_VARS_FLOAT:
@ -162,7 +156,7 @@ def parse_1D(fileobj, data_type, log=logger):
metadata["ub"][row, :] = list(map(float, value.split())) metadata["ub"][row, :] = list(map(float, value.split()))
except Exception: except Exception:
log.error(f"Error reading {var_name} with value '{value}'") print(f"Error reading {var_name} with value '{value}'")
metadata[var_name] = 0 metadata[var_name] = 0
# handle older files that don't contain "zebra_mode" metadata # handle older files that don't contain "zebra_mode" metadata
@ -238,7 +232,7 @@ def parse_1D(fileobj, data_type, log=logger):
scan["export"] = True scan["export"] = True
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj)) match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
motors = [motor.strip().lower() for motor in match.group(1).split(",")] motors = [motor.lower() for motor in match.group(1).split(", ")]
# Steps can be separated by " " or ", " # Steps can be separated by " " or ", "
steps = [float(step.strip(",")) for step in match.group(2).split()] steps = [float(step.strip(",")) for step in match.group(2).split()]
@ -300,7 +294,7 @@ def parse_1D(fileobj, data_type, log=logger):
dataset.append({**metadata, **scan}) dataset.append({**metadata, **scan})
else: else:
log.error("Unknown file extention") print("Unknown file extention")
return dataset return dataset

View File

@ -1,4 +1,3 @@
import logging
import os import os
import numpy as np import numpy as np
@ -7,8 +6,6 @@ from scipy.integrate import simpson, trapezoid
from pyzebra import CCL_ANGLES from pyzebra import CCL_ANGLES
logger = logging.getLogger(__name__)
PARAM_PRECISIONS = { PARAM_PRECISIONS = {
"twotheta": 0.1, "twotheta": 0.1,
"chi": 0.1, "chi": 0.1,
@ -36,12 +33,12 @@ def normalize_dataset(dataset, monitor=100_000):
scan["monitor"] = monitor scan["monitor"] = monitor
def merge_duplicates(dataset, log=logger): def merge_duplicates(dataset):
merged = np.zeros(len(dataset), dtype=bool) merged = np.zeros(len(dataset), dtype=bool)
for ind_into, scan_into in enumerate(dataset): for ind_into, scan_into in enumerate(dataset):
for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1): for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1):
if _parameters_match(scan_into, scan_from) and not merged[ind_from]: if _parameters_match(scan_into, scan_from) and not merged[ind_from]:
merge_scans(scan_into, scan_from, log=log) merge_scans(scan_into, scan_from)
merged[ind_from] = True merged[ind_from] = True
@ -78,13 +75,11 @@ def _parameters_match(scan1, scan2):
return True return True
def merge_datasets(dataset_into, dataset_from, log=logger): def merge_datasets(dataset_into, dataset_from):
scan_motors_into = dataset_into[0]["scan_motors"] scan_motors_into = dataset_into[0]["scan_motors"]
scan_motors_from = dataset_from[0]["scan_motors"] scan_motors_from = dataset_from[0]["scan_motors"]
if scan_motors_into != scan_motors_from: if scan_motors_into != scan_motors_from:
log.warning( print(f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}")
f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}"
)
return return
merged = np.zeros(len(dataset_from), dtype=bool) merged = np.zeros(len(dataset_from), dtype=bool)
@ -92,16 +87,16 @@ def merge_datasets(dataset_into, dataset_from, log=logger):
for ind, scan_from in enumerate(dataset_from): for ind, scan_from in enumerate(dataset_from):
if _parameters_match(scan_into, scan_from) and not merged[ind]: if _parameters_match(scan_into, scan_from) and not merged[ind]:
if scan_into["counts"].ndim == 3: if scan_into["counts"].ndim == 3:
merge_h5_scans(scan_into, scan_from, log=log) merge_h5_scans(scan_into, scan_from)
else: # scan_into["counts"].ndim == 1 else: # scan_into["counts"].ndim == 1
merge_scans(scan_into, scan_from, log=log) merge_scans(scan_into, scan_from)
merged[ind] = True merged[ind] = True
for scan_from in dataset_from: for scan_from in dataset_from:
dataset_into.append(scan_from) dataset_into.append(scan_from)
def merge_scans(scan_into, scan_from, log=logger): def merge_scans(scan_into, scan_from):
if "init_scan" not in scan_into: if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy() scan_into["init_scan"] = scan_into.copy()
@ -153,10 +148,10 @@ def merge_scans(scan_into, scan_from, log=logger):
fname1 = os.path.basename(scan_into["original_filename"]) fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"]) fname2 = os.path.basename(scan_from["original_filename"])
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})') print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def merge_h5_scans(scan_into, scan_from, log=logger): def merge_h5_scans(scan_into, scan_from):
if "init_scan" not in scan_into: if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy() scan_into["init_scan"] = scan_into.copy()
@ -165,7 +160,7 @@ def merge_h5_scans(scan_into, scan_from, log=logger):
for scan in scan_into["merged_scans"]: for scan in scan_into["merged_scans"]:
if scan_from is scan: if scan_from is scan:
log.warning("Already merged scan") print("Already merged scan")
return return
scan_into["merged_scans"].append(scan_from) scan_into["merged_scans"].append(scan_from)
@ -217,7 +212,7 @@ def merge_h5_scans(scan_into, scan_from, log=logger):
fname1 = os.path.basename(scan_into["original_filename"]) fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"]) fname2 = os.path.basename(scan_from["original_filename"])
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})') print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def restore_scan(scan): def restore_scan(scan):
@ -235,7 +230,7 @@ def restore_scan(scan):
scan["export"] = True scan["export"] = True
def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger): def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
if fit_from is None: if fit_from is None:
fit_from = -np.inf fit_from = -np.inf
if fit_to is None: if fit_to is None:
@ -248,7 +243,7 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger):
# apply fitting range # apply fitting range
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to) fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
if not np.any(fit_ind): if not np.any(fit_ind):
log.warning(f"No data in fit range for scan {scan['idx']}") print(f"No data in fit range for scan {scan['idx']}")
return return
y_fit = y_fit[fit_ind] y_fit = y_fit[fit_ind]

View File

@ -148,21 +148,11 @@ def read_detector_data(filepath, cami_meta=None):
if "/entry1/sample/magnetic_field" in h5f: if "/entry1/sample/magnetic_field" in h5f:
scan["mf"] = h5f["/entry1/sample/magnetic_field"][:] scan["mf"] = h5f["/entry1/sample/magnetic_field"][:]
if "mf" in scan:
# TODO: NaNs are not JSON compliant, so replace them with None
# this is not a great solution, but makes it safe to use the array in bokeh
scan["mf"] = np.where(np.isnan(scan["mf"]), None, scan["mf"])
if "/entry1/sample/temperature" in h5f: if "/entry1/sample/temperature" in h5f:
scan["temp"] = h5f["/entry1/sample/temperature"][:] scan["temp"] = h5f["/entry1/sample/temperature"][:]
elif "/entry1/sample/Ts/value" in h5f: elif "/entry1/sample/Ts/value" in h5f:
scan["temp"] = h5f["/entry1/sample/Ts/value"][:] scan["temp"] = h5f["/entry1/sample/Ts/value"][:]
if "temp" in scan:
# TODO: NaNs are not JSON compliant, so replace them with None
# this is not a great solution, but makes it safe to use the array in bokeh
scan["temp"] = np.where(np.isnan(scan["temp"]), None, scan["temp"])
# overwrite metadata from .cami # overwrite metadata from .cami
if cami_meta is not None: if cami_meta is not None:
if "crystal" in cami_meta: if "crystal" in cami_meta:

View File

@ -1,5 +1,4 @@
import io import io
import logging
import os import os
import subprocess import subprocess
import tempfile import tempfile
@ -7,9 +6,7 @@ from math import ceil, floor
import numpy as np import numpy as np
logger = logging.getLogger(__name__) SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen"
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/Sxtal_Refgen"
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI _zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
BLFR z-up BLFR z-up
@ -147,7 +144,7 @@ def export_geom_file(path, ang_lims, template=None):
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n") out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
def calc_ub_matrix(params, log=logger): def calc_ub_matrix(params):
with tempfile.TemporaryDirectory() as temp_dir: with tempfile.TemporaryDirectory() as temp_dir:
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl") cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
@ -163,8 +160,8 @@ def calc_ub_matrix(params, log=logger):
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
log.info(" ".join(comp_proc.args)) print(" ".join(comp_proc.args))
log.info(comp_proc.stdout) print(comp_proc.stdout)
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa") sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
ub_matrix = [] ub_matrix = []

View File

@ -0,0 +1,11 @@
[Unit]
Description=pyzebra-test web server
[Service]
Type=simple
User=pyzebra
ExecStart=/bin/bash /usr/local/sbin/pyzebra-test.sh
Restart=always
[Install]
WantedBy=multi-user.target

4
scripts/pyzebra-test.sh Normal file
View File

@ -0,0 +1,4 @@
source /opt/miniconda3/etc/profile.d/conda.sh
conda activate test
python /opt/pyzebra/pyzebra/app/cli.py --port=5010 --allow-websocket-origin=pyzebra.psi.ch:5010 --args --spind-path=/opt/spind

10
scripts/pyzebra.service Normal file
View File

@ -0,0 +1,10 @@
[Unit]
Description=pyzebra web server
[Service]
Type=simple
ExecStart=/bin/bash /usr/local/sbin/pyzebra.sh
Restart=always
[Install]
WantedBy=multi-user.target

4
scripts/pyzebra.sh Normal file
View File

@ -0,0 +1,4 @@
source /opt/miniconda3/etc/profile.d/conda.sh
conda activate prod
pyzebra --port=80 --allow-websocket-origin=pyzebra.psi.ch:80 --args --spind-path=/opt/spind