Compare commits
49 Commits
Author | SHA1 | Date | |
---|---|---|---|
108c1aae2f | |||
b82184b9e7 | |||
b6a43c3f3b | |||
18b692a62e | |||
c2d6f6b259 | |||
60b90ec9e5 | |||
1fc30ae3e1 | |||
dfa6bfe926 | |||
ed3f58436b | |||
68f7b429f7 | |||
e5030902c7 | |||
60f01d9dd8 | |||
bd429393a5 | |||
9e3ffd6230 | |||
bdc71f15c1 | |||
c3398ef4e5 | |||
9b33f1152b | |||
dc1f2a92cc | |||
e9ae52bb60 | |||
982887ab85 | |||
19e934e873 | |||
8604d695c6 | |||
a55295829f | |||
4181d597a8 | |||
c4869fb0cd | |||
80e75d9ef9 | |||
eb2177215b | |||
89fb4f054f | |||
019a36bbb7 | |||
58a704764a | |||
144b37ba09 | |||
48114a0dd9 | |||
0fee06f2d6 | |||
31b4b0bb5f | |||
a6611976e1 | |||
9b48fb7a24 | |||
14d122b947 | |||
bff44a7461 | |||
eae8a1bde4 | |||
![]() |
a1c1de4adf | ||
9e6fc04d63 | |||
779426f4bb | |||
1a5d61a9f7 | |||
b41ab102b1 | |||
bc791b1028 | |||
3ab4420912 | |||
90552cee2c | |||
6164be16f0 | |||
07f03a2a04 |
53
.gitea/workflows/deploy.yaml
Normal file
53
.gitea/workflows/deploy.yaml
Normal file
@ -0,0 +1,53 @@
|
||||
name: pyzebra CI/CD pipeline
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
CONDA: /opt/miniforge3
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: pyzebra
|
||||
steps:
|
||||
- run: $CONDA/bin/conda config --add channels conda-forge
|
||||
- run: $CONDA/bin/conda config --set solver libmamba
|
||||
|
||||
test-env:
|
||||
runs-on: pyzebra
|
||||
needs: prepare
|
||||
if: github.ref == 'refs/heads/main'
|
||||
env:
|
||||
BUILD_DIR: ${{ runner.temp }}/conda_build
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- run: $CONDA/bin/conda build --no-anaconda-upload --output-folder $BUILD_DIR ./conda-recipe
|
||||
- run: $CONDA/bin/conda remove --name test --all --keep-env -y
|
||||
- run: $CONDA/bin/conda install --name test --channel $BUILD_DIR python=3.8 pyzebra -y
|
||||
- run: sudo systemctl restart pyzebra-test.service
|
||||
|
||||
prod-env:
|
||||
runs-on: pyzebra
|
||||
needs: prepare
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
env:
|
||||
BUILD_DIR: ${{ runner.temp }}/conda_build
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- run: $CONDA/bin/conda build --token ${{ secrets.ANACONDA_TOKEN }} --output-folder $BUILD_DIR ./conda-recipe
|
||||
- run: $CONDA/bin/conda remove --name prod --all --keep-env -y
|
||||
- run: $CONDA/bin/conda install --name prod --channel $BUILD_DIR python=3.8 pyzebra -y
|
||||
- run: sudo systemctl restart pyzebra-prod.service
|
||||
|
||||
cleanup:
|
||||
runs-on: pyzebra
|
||||
needs: [test-env, prod-env]
|
||||
if: always()
|
||||
steps:
|
||||
- run: $CONDA/bin/conda build purge-all
|
26
.github/workflows/deployment.yaml
vendored
26
.github/workflows/deployment.yaml
vendored
@ -1,26 +0,0 @@
|
||||
name: Deployment
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
publish-conda-package:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
$CONDA/bin/conda install --quiet --yes conda-build anaconda-client conda-libmamba-solver
|
||||
$CONDA/bin/conda config --append channels conda-forge
|
||||
$CONDA/bin/conda config --set solver libmamba
|
||||
$CONDA/bin/conda config --set anaconda_upload yes
|
||||
|
||||
- name: Build and upload
|
||||
env:
|
||||
ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }}
|
||||
run: |
|
||||
$CONDA/bin/conda build --token $ANACONDA_TOKEN conda-recipe
|
@ -28,7 +28,7 @@ requirements:
|
||||
|
||||
|
||||
about:
|
||||
home: https://github.com/paulscherrerinstitute/pyzebra
|
||||
home: https://gitlab.psi.ch/zebra/pyzebra
|
||||
summary: {{ data['description'] }}
|
||||
license: GNU GPLv3
|
||||
license_file: LICENSE
|
||||
|
@ -7,18 +7,19 @@ import subprocess
|
||||
|
||||
|
||||
def main():
|
||||
default_branch = "main"
|
||||
branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip()
|
||||
if branch != "master":
|
||||
print("Aborting, not on 'master' branch.")
|
||||
if branch != default_branch:
|
||||
print(f"Aborting, not on '{default_branch}' branch.")
|
||||
return
|
||||
|
||||
filepath = "pyzebra/__init__.py"
|
||||
version_filepath = os.path.join(os.path.basename(os.path.dirname(__file__)), "__init__.py")
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("level", type=str, choices=["patch", "minor", "major"])
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(filepath) as f:
|
||||
with open(version_filepath) as f:
|
||||
file_content = f.read()
|
||||
|
||||
version = re.search(r'__version__ = "(.*?)"', file_content).group(1)
|
||||
@ -36,11 +37,12 @@ def main():
|
||||
|
||||
new_version = f"{major}.{minor}.{patch}"
|
||||
|
||||
with open(filepath, "w") as f:
|
||||
with open(version_filepath, "w") as f:
|
||||
f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content))
|
||||
|
||||
os.system(f"git commit {filepath} -m 'Updating for version {new_version}'")
|
||||
os.system(f"git commit {version_filepath} -m 'Updating for version {new_version}'")
|
||||
os.system(f"git tag -a {new_version} -m 'Release {new_version}'")
|
||||
os.system("git push --follow-tags")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -6,4 +6,4 @@ from pyzebra.sxtal_refgen import *
|
||||
from pyzebra.utils import *
|
||||
from pyzebra.xtal import *
|
||||
|
||||
__version__ = "0.7.5"
|
||||
__version__ = "0.7.11"
|
||||
|
@ -1,6 +1,9 @@
|
||||
import logging
|
||||
import subprocess
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"]
|
||||
|
||||
REFLECTION_PRINTER_FORMATS = [
|
||||
@ -16,11 +19,11 @@ REFLECTION_PRINTER_FORMATS = [
|
||||
"oksana",
|
||||
]
|
||||
|
||||
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/anatric"
|
||||
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/anatric"
|
||||
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
|
||||
|
||||
|
||||
def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None):
|
||||
def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger):
|
||||
comp_proc = subprocess.run(
|
||||
[anatric_path, config_file],
|
||||
stdout=subprocess.PIPE,
|
||||
@ -29,8 +32,8 @@ def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None):
|
||||
check=True,
|
||||
text=True,
|
||||
)
|
||||
print(" ".join(comp_proc.args))
|
||||
print(comp_proc.stdout)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
|
||||
class AnatricConfig:
|
||||
|
@ -1,17 +0,0 @@
|
||||
import logging
|
||||
import sys
|
||||
from io import StringIO
|
||||
|
||||
|
||||
def on_server_loaded(_server_context):
|
||||
formatter = logging.Formatter(
|
||||
fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
|
||||
sys.stdout = StringIO()
|
||||
|
||||
bokeh_handler = logging.StreamHandler(StringIO())
|
||||
bokeh_handler.setFormatter(formatter)
|
||||
bokeh_logger = logging.getLogger("bokeh")
|
||||
bokeh_logger.setLevel(logging.WARNING)
|
||||
bokeh_logger.addHandler(bokeh_handler)
|
@ -4,7 +4,7 @@ import sys
|
||||
|
||||
|
||||
def main():
|
||||
app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)))
|
||||
app_path = os.path.dirname(os.path.abspath(__file__))
|
||||
subprocess.run(["bokeh", "serve", app_path, *sys.argv[1:]], check=True)
|
||||
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
import types
|
||||
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
CellEditor,
|
||||
@ -51,6 +52,7 @@ def _params_factory(function):
|
||||
|
||||
class FitControls:
|
||||
def __init__(self):
|
||||
self.log = curdoc().logger
|
||||
self.params = {}
|
||||
|
||||
def add_function_button_callback(click):
|
||||
@ -145,7 +147,11 @@ class FitControls:
|
||||
|
||||
def _process_scan(self, scan):
|
||||
pyzebra.fit_scan(
|
||||
scan, self.params, fit_from=self.from_spinner.value, fit_to=self.to_spinner.value
|
||||
scan,
|
||||
self.params,
|
||||
fit_from=self.from_spinner.value,
|
||||
fit_to=self.to_spinner.value,
|
||||
log=self.log,
|
||||
)
|
||||
pyzebra.get_area(
|
||||
scan,
|
||||
|
@ -11,6 +11,7 @@ import pyzebra
|
||||
class InputControls:
|
||||
def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None):
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
|
||||
def filelist_select_update_for_proposal():
|
||||
proposal_path = proposal_textinput.name
|
||||
@ -45,19 +46,19 @@ class InputControls:
|
||||
f_name = os.path.basename(f_path)
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
pyzebra.merge_duplicates(new_data)
|
||||
pyzebra.merge_duplicates(new_data, log=log)
|
||||
dlfiles.set_names([base] * dlfiles.n_files)
|
||||
else:
|
||||
pyzebra.merge_datasets(new_data, file_data)
|
||||
pyzebra.merge_datasets(new_data, file_data, log=log)
|
||||
|
||||
if new_data:
|
||||
dataset.clear()
|
||||
@ -76,13 +77,13 @@ class InputControls:
|
||||
f_name = os.path.basename(f_path)
|
||||
_, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(dataset, file_data)
|
||||
pyzebra.merge_datasets(dataset, file_data, log=log)
|
||||
|
||||
if file_data:
|
||||
on_file_open()
|
||||
@ -97,19 +98,19 @@ class InputControls:
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
pyzebra.merge_duplicates(new_data)
|
||||
pyzebra.merge_duplicates(new_data, log=log)
|
||||
dlfiles.set_names([base] * dlfiles.n_files)
|
||||
else:
|
||||
pyzebra.merge_datasets(new_data, file_data)
|
||||
pyzebra.merge_datasets(new_data, file_data, log=log)
|
||||
|
||||
if new_data:
|
||||
dataset.clear()
|
||||
@ -129,13 +130,13 @@ class InputControls:
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
_, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(dataset, file_data)
|
||||
pyzebra.merge_datasets(dataset, file_data, log=log)
|
||||
|
||||
if file_data:
|
||||
on_file_open()
|
||||
|
@ -1,6 +1,6 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
from io import StringIO
|
||||
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
@ -43,11 +43,17 @@ doc.anatric_path = args.anatric_path
|
||||
doc.spind_path = args.spind_path
|
||||
doc.sxtal_refgen_path = args.sxtal_refgen_path
|
||||
|
||||
# In app_hooks.py a StreamHandler was added to "bokeh" logger
|
||||
bokeh_stream = logging.getLogger("bokeh").handlers[0].stream
|
||||
stream = StringIO()
|
||||
handler = logging.StreamHandler(stream)
|
||||
handler.setFormatter(
|
||||
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||
)
|
||||
logger = logging.getLogger(str(id(doc)))
|
||||
logger.setLevel(logging.INFO)
|
||||
logger.addHandler(handler)
|
||||
doc.logger = logger
|
||||
|
||||
log_textareainput = TextAreaInput(title="logging output:")
|
||||
bokeh_log_textareainput = TextAreaInput(title="server output:")
|
||||
log_textareainput = TextAreaInput(title="Logging output:")
|
||||
|
||||
|
||||
def proposal_textinput_callback(_attr, _old, _new):
|
||||
@ -65,7 +71,7 @@ def apply_button_callback():
|
||||
try:
|
||||
proposal_path = pyzebra.find_proposal_path(proposal)
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
logger.exception(e)
|
||||
return
|
||||
apply_button.disabled = True
|
||||
else:
|
||||
@ -94,14 +100,13 @@ doc.add_root(
|
||||
panel_spind.create(),
|
||||
]
|
||||
),
|
||||
row(log_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
|
||||
row(log_textareainput, sizing_mode="scale_both"),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def update_stdout():
|
||||
log_textareainput.value = sys.stdout.getvalue()
|
||||
bokeh_log_textareainput.value = bokeh_stream.getvalue()
|
||||
log_textareainput.value = stream.getvalue()
|
||||
|
||||
|
||||
doc.add_periodic_callback(update_stdout, 1000)
|
||||
|
@ -33,6 +33,7 @@ from pyzebra import EXPORT_TARGETS, app
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset1 = []
|
||||
dataset2 = []
|
||||
app_dlfiles = app.DownloadFiles(n_files=2)
|
||||
@ -94,7 +95,7 @@ def create():
|
||||
|
||||
def file_open_button_callback():
|
||||
if len(file_select.value) != 2:
|
||||
print("WARNING: Select exactly 2 .ccl files.")
|
||||
log.warning("Select exactly 2 .ccl files.")
|
||||
return
|
||||
|
||||
new_data1 = []
|
||||
@ -104,13 +105,13 @@ def create():
|
||||
f_name = os.path.basename(f_path)
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_duplicates(file_data)
|
||||
pyzebra.merge_duplicates(file_data, log=log)
|
||||
|
||||
if ind == 0:
|
||||
app_dlfiles.set_names([base, base])
|
||||
@ -133,7 +134,7 @@ def create():
|
||||
|
||||
def upload_button_callback(_attr, _old, _new):
|
||||
if len(upload_button.filename) != 2:
|
||||
print("WARNING: Upload exactly 2 .ccl files.")
|
||||
log.warning("Upload exactly 2 .ccl files.")
|
||||
return
|
||||
|
||||
new_data1 = []
|
||||
@ -142,13 +143,13 @@ def create():
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_duplicates(file_data)
|
||||
pyzebra.merge_duplicates(file_data, log=log)
|
||||
|
||||
if ind == 0:
|
||||
app_dlfiles.set_names([base, base])
|
||||
@ -377,11 +378,11 @@ def create():
|
||||
scan_from2 = dataset2[int(merge_from_select.value)]
|
||||
|
||||
if scan_into1 is scan_from1:
|
||||
print("WARNING: Selected scans for merging are identical")
|
||||
log.warning("Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_scans(scan_into1, scan_from1)
|
||||
pyzebra.merge_scans(scan_into2, scan_from2)
|
||||
pyzebra.merge_scans(scan_into1, scan_from1, log=log)
|
||||
pyzebra.merge_scans(scan_into2, scan_from2, log=log)
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
|
@ -2,6 +2,7 @@ import os
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
@ -25,6 +26,8 @@ from pyzebra import EXPORT_TARGETS, app
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset = []
|
||||
app_dlfiles = app.DownloadFiles(n_files=2)
|
||||
|
||||
@ -214,10 +217,10 @@ def create():
|
||||
scan_from = dataset[int(merge_from_select.value)]
|
||||
|
||||
if scan_into is scan_from:
|
||||
print("WARNING: Selected scans for merging are identical")
|
||||
log.warning("Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_scans(scan_into, scan_from)
|
||||
pyzebra.merge_scans(scan_into, scan_from, log=log)
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
|
@ -5,6 +5,7 @@ import subprocess
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Arrow,
|
||||
@ -39,6 +40,8 @@ SORT_OPT_NB = ["gamma", "nu", "omega"]
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
ang_lims = {}
|
||||
cif_data = {}
|
||||
params = {}
|
||||
@ -132,7 +135,11 @@ def create():
|
||||
params = dict()
|
||||
params["SPGR"] = cryst_space_group.value
|
||||
params["CELL"] = cryst_cell.value
|
||||
ub = pyzebra.calc_ub_matrix(params)
|
||||
try:
|
||||
ub = pyzebra.calc_ub_matrix(params, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
ub_matrix.value = " ".join(ub)
|
||||
|
||||
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
|
||||
@ -221,9 +228,9 @@ def create():
|
||||
geom_template = None
|
||||
pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
|
||||
|
||||
print(f"Content of {geom_path}:")
|
||||
log.info(f"Content of {geom_path}:")
|
||||
with open(geom_path) as f:
|
||||
print(f.read())
|
||||
log.info(f.read())
|
||||
|
||||
priority = [sorting_0.value, sorting_1.value, sorting_2.value]
|
||||
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
|
||||
@ -248,9 +255,9 @@ def create():
|
||||
cfl_template = None
|
||||
pyzebra.export_cfl_file(cfl_path, params, cfl_template)
|
||||
|
||||
print(f"Content of {cfl_path}:")
|
||||
log.info(f"Content of {cfl_path}:")
|
||||
with open(cfl_path) as f:
|
||||
print(f.read())
|
||||
log.info(f.read())
|
||||
|
||||
comp_proc = subprocess.run(
|
||||
[pyzebra.SXTAL_REFGEN_PATH, cfl_path],
|
||||
@ -260,8 +267,8 @@ def create():
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
print(" ".join(comp_proc.args))
|
||||
print(comp_proc.stdout)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
if i == 1: # all hkl files are identical, so keep only one
|
||||
hkl_fname = base_fname + ".hkl"
|
||||
@ -591,8 +598,8 @@ def create():
|
||||
_, ext = os.path.splitext(fname)
|
||||
try:
|
||||
file_data = pyzebra.parse_hkl(file, ext)
|
||||
except:
|
||||
print(f"Error loading {fname}")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
fnames.append(fname)
|
||||
|
@ -24,6 +24,7 @@ from pyzebra import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
config = pyzebra.AnatricConfig()
|
||||
|
||||
def _load_config_file(file):
|
||||
@ -347,7 +348,11 @@ def create():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file = temp_dir + "/config.xml"
|
||||
config.save_as(temp_file)
|
||||
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir)
|
||||
try:
|
||||
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
with open(os.path.join(temp_dir, config.logfile)) as f_log:
|
||||
output_log.value = f_log.read()
|
||||
|
@ -36,6 +36,7 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset = []
|
||||
cami_meta = {}
|
||||
|
||||
@ -133,8 +134,8 @@ def create():
|
||||
for f_name in file_select.value:
|
||||
try:
|
||||
new_data.append(pyzebra.read_detector_data(f_name))
|
||||
except KeyError:
|
||||
print("Could not read data from the file.")
|
||||
except KeyError as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
dataset.extend(new_data)
|
||||
@ -275,7 +276,7 @@ def create():
|
||||
frame_range.bounds = (0, n_im)
|
||||
|
||||
scan_motor = scan["scan_motor"]
|
||||
proj_y_plot.axis[1].axis_label = f"Scanning motor, {scan_motor}"
|
||||
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}"
|
||||
|
||||
var = scan[scan_motor]
|
||||
var_start = var[0]
|
||||
|
@ -43,6 +43,7 @@ IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset = []
|
||||
cami_meta = {}
|
||||
|
||||
@ -102,8 +103,8 @@ def create():
|
||||
nonlocal dataset
|
||||
try:
|
||||
scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None)
|
||||
except KeyError:
|
||||
print("Could not read data from the file.")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
dataset = [scan]
|
||||
@ -137,8 +138,8 @@ def create():
|
||||
f_name = os.path.basename(f_path)
|
||||
try:
|
||||
file_data = [pyzebra.read_detector_data(f_path, cm)]
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
@ -146,7 +147,7 @@ def create():
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
else:
|
||||
pyzebra.merge_datasets(new_data, file_data)
|
||||
pyzebra.merge_datasets(new_data, file_data, log=log)
|
||||
|
||||
if new_data:
|
||||
dataset = new_data
|
||||
@ -161,12 +162,12 @@ def create():
|
||||
f_name = os.path.basename(f_path)
|
||||
try:
|
||||
file_data = [pyzebra.read_detector_data(f_path, None)]
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(dataset, file_data)
|
||||
pyzebra.merge_datasets(dataset, file_data, log=log)
|
||||
|
||||
if file_data:
|
||||
_init_datatable()
|
||||
@ -292,10 +293,10 @@ def create():
|
||||
scan_from = dataset[int(merge_from_select.value)]
|
||||
|
||||
if scan_into is scan_from:
|
||||
print("WARNING: Selected scans for merging are identical")
|
||||
log.warning("Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_h5_scans(scan_into, scan_from)
|
||||
pyzebra.merge_h5_scans(scan_into, scan_from, log=log)
|
||||
_update_table()
|
||||
_update_image()
|
||||
_update_proj_plots()
|
||||
@ -406,7 +407,7 @@ def create():
|
||||
frame_range.bounds = (0, n_im)
|
||||
|
||||
scan_motor = scan["scan_motor"]
|
||||
proj_y_plot.axis[1].axis_label = f"Scanning motor, {scan_motor}"
|
||||
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}"
|
||||
|
||||
var = scan[scan_motor]
|
||||
var_start = var[0]
|
||||
|
@ -3,6 +3,7 @@ import os
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
@ -38,6 +39,8 @@ def color_palette(n_colors):
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset = []
|
||||
app_dlfiles = app.DownloadFiles(n_files=1)
|
||||
|
||||
@ -361,10 +364,10 @@ def create():
|
||||
scan_from = dataset[int(merge_from_select.value)]
|
||||
|
||||
if scan_into is scan_from:
|
||||
print("WARNING: Selected scans for merging are identical")
|
||||
log.warning("Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_scans(scan_into, scan_from)
|
||||
pyzebra.merge_scans(scan_into, scan_from, log=log)
|
||||
_update_table()
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
@ -3,6 +3,7 @@ import io
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
@ -31,6 +32,8 @@ from pyzebra.app.panel_hdf_viewer import calculate_hkl
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
_update_slice = None
|
||||
measured_data_div = Div(text="Measured <b>HDF</b> data:")
|
||||
measured_data = FileInput(accept=".hdf", multiple=True, width=200)
|
||||
@ -59,8 +62,8 @@ def create():
|
||||
# Read data
|
||||
try:
|
||||
det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata)))
|
||||
except:
|
||||
print(f"Error loading {fname}")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return None
|
||||
|
||||
if ind == 0:
|
||||
@ -179,8 +182,8 @@ def create():
|
||||
_, ext = os.path.splitext(fname)
|
||||
try:
|
||||
fdata = pyzebra.parse_hkl(file, ext)
|
||||
except:
|
||||
print(f"Error loading {fname}")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
for ind in range(len(fdata["counts"])):
|
||||
|
@ -21,6 +21,7 @@ import pyzebra
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
events_data = doc.events_data
|
||||
|
||||
npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True)
|
||||
@ -63,8 +64,8 @@ def create():
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
print(" ".join(comp_proc.args))
|
||||
print(comp_proc.stdout)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
# prepare an event file
|
||||
diff_vec = []
|
||||
@ -94,9 +95,9 @@ def create():
|
||||
f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n"
|
||||
)
|
||||
|
||||
print(f"Content of {temp_event_file}:")
|
||||
log.info(f"Content of {temp_event_file}:")
|
||||
with open(temp_event_file) as f:
|
||||
print(f.read())
|
||||
log.info(f.read())
|
||||
|
||||
comp_proc = subprocess.run(
|
||||
[
|
||||
@ -123,8 +124,8 @@ def create():
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
print(" ".join(comp_proc.args))
|
||||
print(comp_proc.stdout)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
spind_out_file = os.path.join(temp_dir, "spind.txt")
|
||||
spind_res = dict(
|
||||
@ -146,12 +147,12 @@ def create():
|
||||
ub_matrices.append(ub_matrix_spind)
|
||||
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
|
||||
|
||||
print(f"Content of {spind_out_file}:")
|
||||
log.info(f"Content of {spind_out_file}:")
|
||||
with open(spind_out_file) as f:
|
||||
print(f.read())
|
||||
log.info(f.read())
|
||||
|
||||
except FileNotFoundError:
|
||||
print("No results from spind")
|
||||
log.warning("No results from spind")
|
||||
|
||||
results_table_source.data.update(spind_res)
|
||||
|
||||
|
@ -3,6 +3,7 @@ import io
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Arrow,
|
||||
@ -30,6 +31,9 @@ import pyzebra
|
||||
|
||||
class PlotHKL:
|
||||
def __init__(self):
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
|
||||
_update_slice = None
|
||||
measured_data_div = Div(text="Measured <b>CCL</b> data:")
|
||||
measured_data = FileInput(accept=".ccl", multiple=True, width=200)
|
||||
@ -62,9 +66,9 @@ class PlotHKL:
|
||||
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
|
||||
_, ext = os.path.splitext(md_fnames[0])
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {md_fnames[0]}")
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return None
|
||||
|
||||
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
|
||||
@ -144,9 +148,9 @@ class PlotHKL:
|
||||
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
|
||||
_, ext = os.path.splitext(md_fname)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {md_fname}")
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return None
|
||||
|
||||
pyzebra.normalize_dataset(file_data)
|
||||
@ -291,8 +295,8 @@ class PlotHKL:
|
||||
_, ext = os.path.splitext(fname)
|
||||
try:
|
||||
fdata = pyzebra.parse_hkl(file, ext)
|
||||
except:
|
||||
print(f"Error loading {fname}")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
for ind in range(len(fdata["counts"])):
|
||||
|
@ -1,3 +1,4 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from ast import literal_eval
|
||||
@ -5,6 +6,8 @@ from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
META_VARS_STR = (
|
||||
"instrument",
|
||||
"title",
|
||||
@ -14,6 +17,7 @@ META_VARS_STR = (
|
||||
"original_filename",
|
||||
"date",
|
||||
"zebra_mode",
|
||||
"zebramode",
|
||||
"sample_name",
|
||||
)
|
||||
|
||||
@ -110,7 +114,7 @@ def load_1D(filepath):
|
||||
return dataset
|
||||
|
||||
|
||||
def parse_1D(fileobj, data_type):
|
||||
def parse_1D(fileobj, data_type, log=logger):
|
||||
metadata = {"data_type": data_type}
|
||||
|
||||
# read metadata
|
||||
@ -133,6 +137,8 @@ def parse_1D(fileobj, data_type):
|
||||
|
||||
try:
|
||||
if var_name in META_VARS_STR:
|
||||
if var_name == "zebramode":
|
||||
var_name = "zebra_mode"
|
||||
metadata[var_name] = value
|
||||
|
||||
elif var_name in META_VARS_FLOAT:
|
||||
@ -156,7 +162,7 @@ def parse_1D(fileobj, data_type):
|
||||
metadata["ub"][row, :] = list(map(float, value.split()))
|
||||
|
||||
except Exception:
|
||||
print(f"Error reading {var_name} with value '{value}'")
|
||||
log.error(f"Error reading {var_name} with value '{value}'")
|
||||
metadata[var_name] = 0
|
||||
|
||||
# handle older files that don't contain "zebra_mode" metadata
|
||||
@ -232,7 +238,7 @@ def parse_1D(fileobj, data_type):
|
||||
scan["export"] = True
|
||||
|
||||
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
|
||||
motors = [motor.lower() for motor in match.group(1).split(", ")]
|
||||
motors = [motor.strip().lower() for motor in match.group(1).split(",")]
|
||||
# Steps can be separated by " " or ", "
|
||||
steps = [float(step.strip(",")) for step in match.group(2).split()]
|
||||
|
||||
@ -294,7 +300,7 @@ def parse_1D(fileobj, data_type):
|
||||
dataset.append({**metadata, **scan})
|
||||
|
||||
else:
|
||||
print("Unknown file extention")
|
||||
log.error("Unknown file extention")
|
||||
|
||||
return dataset
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
@ -6,6 +7,8 @@ from scipy.integrate import simpson, trapezoid
|
||||
|
||||
from pyzebra import CCL_ANGLES
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PARAM_PRECISIONS = {
|
||||
"twotheta": 0.1,
|
||||
"chi": 0.1,
|
||||
@ -33,12 +36,12 @@ def normalize_dataset(dataset, monitor=100_000):
|
||||
scan["monitor"] = monitor
|
||||
|
||||
|
||||
def merge_duplicates(dataset):
|
||||
def merge_duplicates(dataset, log=logger):
|
||||
merged = np.zeros(len(dataset), dtype=bool)
|
||||
for ind_into, scan_into in enumerate(dataset):
|
||||
for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1):
|
||||
if _parameters_match(scan_into, scan_from) and not merged[ind_from]:
|
||||
merge_scans(scan_into, scan_from)
|
||||
merge_scans(scan_into, scan_from, log=log)
|
||||
merged[ind_from] = True
|
||||
|
||||
|
||||
@ -75,11 +78,13 @@ def _parameters_match(scan1, scan2):
|
||||
return True
|
||||
|
||||
|
||||
def merge_datasets(dataset_into, dataset_from):
|
||||
def merge_datasets(dataset_into, dataset_from, log=logger):
|
||||
scan_motors_into = dataset_into[0]["scan_motors"]
|
||||
scan_motors_from = dataset_from[0]["scan_motors"]
|
||||
if scan_motors_into != scan_motors_from:
|
||||
print(f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}")
|
||||
log.warning(
|
||||
f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}"
|
||||
)
|
||||
return
|
||||
|
||||
merged = np.zeros(len(dataset_from), dtype=bool)
|
||||
@ -87,16 +92,16 @@ def merge_datasets(dataset_into, dataset_from):
|
||||
for ind, scan_from in enumerate(dataset_from):
|
||||
if _parameters_match(scan_into, scan_from) and not merged[ind]:
|
||||
if scan_into["counts"].ndim == 3:
|
||||
merge_h5_scans(scan_into, scan_from)
|
||||
merge_h5_scans(scan_into, scan_from, log=log)
|
||||
else: # scan_into["counts"].ndim == 1
|
||||
merge_scans(scan_into, scan_from)
|
||||
merge_scans(scan_into, scan_from, log=log)
|
||||
merged[ind] = True
|
||||
|
||||
for scan_from in dataset_from:
|
||||
dataset_into.append(scan_from)
|
||||
|
||||
|
||||
def merge_scans(scan_into, scan_from):
|
||||
def merge_scans(scan_into, scan_from, log=logger):
|
||||
if "init_scan" not in scan_into:
|
||||
scan_into["init_scan"] = scan_into.copy()
|
||||
|
||||
@ -148,10 +153,10 @@ def merge_scans(scan_into, scan_from):
|
||||
|
||||
fname1 = os.path.basename(scan_into["original_filename"])
|
||||
fname2 = os.path.basename(scan_from["original_filename"])
|
||||
print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
|
||||
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
|
||||
|
||||
|
||||
def merge_h5_scans(scan_into, scan_from):
|
||||
def merge_h5_scans(scan_into, scan_from, log=logger):
|
||||
if "init_scan" not in scan_into:
|
||||
scan_into["init_scan"] = scan_into.copy()
|
||||
|
||||
@ -160,7 +165,7 @@ def merge_h5_scans(scan_into, scan_from):
|
||||
|
||||
for scan in scan_into["merged_scans"]:
|
||||
if scan_from is scan:
|
||||
print("Already merged scan")
|
||||
log.warning("Already merged scan")
|
||||
return
|
||||
|
||||
scan_into["merged_scans"].append(scan_from)
|
||||
@ -212,7 +217,7 @@ def merge_h5_scans(scan_into, scan_from):
|
||||
|
||||
fname1 = os.path.basename(scan_into["original_filename"])
|
||||
fname2 = os.path.basename(scan_from["original_filename"])
|
||||
print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
|
||||
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
|
||||
|
||||
|
||||
def restore_scan(scan):
|
||||
@ -230,7 +235,7 @@ def restore_scan(scan):
|
||||
scan["export"] = True
|
||||
|
||||
|
||||
def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
|
||||
def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger):
|
||||
if fit_from is None:
|
||||
fit_from = -np.inf
|
||||
if fit_to is None:
|
||||
@ -243,7 +248,7 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
|
||||
# apply fitting range
|
||||
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
|
||||
if not np.any(fit_ind):
|
||||
print(f"No data in fit range for scan {scan['idx']}")
|
||||
log.warning(f"No data in fit range for scan {scan['idx']}")
|
||||
return
|
||||
|
||||
y_fit = y_fit[fit_ind]
|
||||
|
@ -148,11 +148,21 @@ def read_detector_data(filepath, cami_meta=None):
|
||||
if "/entry1/sample/magnetic_field" in h5f:
|
||||
scan["mf"] = h5f["/entry1/sample/magnetic_field"][:]
|
||||
|
||||
if "mf" in scan:
|
||||
# TODO: NaNs are not JSON compliant, so replace them with None
|
||||
# this is not a great solution, but makes it safe to use the array in bokeh
|
||||
scan["mf"] = np.where(np.isnan(scan["mf"]), None, scan["mf"])
|
||||
|
||||
if "/entry1/sample/temperature" in h5f:
|
||||
scan["temp"] = h5f["/entry1/sample/temperature"][:]
|
||||
elif "/entry1/sample/Ts/value" in h5f:
|
||||
scan["temp"] = h5f["/entry1/sample/Ts/value"][:]
|
||||
|
||||
if "temp" in scan:
|
||||
# TODO: NaNs are not JSON compliant, so replace them with None
|
||||
# this is not a great solution, but makes it safe to use the array in bokeh
|
||||
scan["temp"] = np.where(np.isnan(scan["temp"]), None, scan["temp"])
|
||||
|
||||
# overwrite metadata from .cami
|
||||
if cami_meta is not None:
|
||||
if "crystal" in cami_meta:
|
||||
|
@ -1,4 +1,5 @@
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
@ -6,7 +7,9 @@ from math import ceil, floor
|
||||
|
||||
import numpy as np
|
||||
|
||||
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/Sxtal_Refgen"
|
||||
|
||||
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
|
||||
BLFR z-up
|
||||
@ -144,7 +147,7 @@ def export_geom_file(path, ang_lims, template=None):
|
||||
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
|
||||
|
||||
|
||||
def calc_ub_matrix(params):
|
||||
def calc_ub_matrix(params, log=logger):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
|
||||
|
||||
@ -160,8 +163,8 @@ def calc_ub_matrix(params):
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
print(" ".join(comp_proc.args))
|
||||
print(comp_proc.stdout)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
|
||||
ub_matrix = []
|
||||
|
@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=pyzebra-test web server
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=pyzebra
|
||||
ExecStart=/bin/bash /usr/local/sbin/pyzebra-test.sh
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,4 +0,0 @@
|
||||
source /opt/miniconda3/etc/profile.d/conda.sh
|
||||
|
||||
conda activate test
|
||||
python /opt/pyzebra/pyzebra/app/cli.py --port=5010 --allow-websocket-origin=pyzebra.psi.ch:5010 --args --spind-path=/opt/spind
|
@ -1,10 +0,0 @@
|
||||
[Unit]
|
||||
Description=pyzebra web server
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/bin/bash /usr/local/sbin/pyzebra.sh
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,4 +0,0 @@
|
||||
source /opt/miniconda3/etc/profile.d/conda.sh
|
||||
|
||||
conda activate prod
|
||||
pyzebra --port=80 --allow-websocket-origin=pyzebra.psi.ch:80 --args --spind-path=/opt/spind
|
Loading…
x
Reference in New Issue
Block a user