Compare commits

..

No commits in common. "main" and "0.7.2" have entirely different histories.
main ... 0.7.2

33 changed files with 2286 additions and 2599 deletions

View File

@ -1,53 +0,0 @@
name: pyzebra CI/CD pipeline
on:
push:
branches:
- main
tags:
- '*'
env:
CONDA: /opt/miniforge3
jobs:
prepare:
runs-on: pyzebra
steps:
- run: $CONDA/bin/conda config --add channels conda-forge
- run: $CONDA/bin/conda config --set solver libmamba
test-env:
runs-on: pyzebra
needs: prepare
if: github.ref == 'refs/heads/main'
env:
BUILD_DIR: ${{ runner.temp }}/conda_build
steps:
- name: Checkout repository
uses: actions/checkout@v4
- run: $CONDA/bin/conda build --no-anaconda-upload --output-folder $BUILD_DIR ./conda-recipe
- run: $CONDA/bin/conda remove --name test --all --keep-env -y
- run: $CONDA/bin/conda install --name test --channel $BUILD_DIR python=3.8 pyzebra -y
- run: sudo systemctl restart pyzebra-test.service
prod-env:
runs-on: pyzebra
needs: prepare
if: startsWith(github.ref, 'refs/tags/')
env:
BUILD_DIR: ${{ runner.temp }}/conda_build
steps:
- name: Checkout repository
uses: actions/checkout@v4
- run: $CONDA/bin/conda build --token ${{ secrets.ANACONDA_TOKEN }} --output-folder $BUILD_DIR ./conda-recipe
- run: $CONDA/bin/conda remove --name prod --all --keep-env -y
- run: $CONDA/bin/conda install --name prod --channel $BUILD_DIR python=3.8 pyzebra -y
- run: sudo systemctl restart pyzebra-prod.service
cleanup:
runs-on: pyzebra
needs: [test-env, prod-env]
if: always()
steps:
- run: $CONDA/bin/conda build purge-all

25
.github/workflows/deployment.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
name: Deployment
on:
push:
tags:
- '*'
jobs:
publish-conda-package:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Prepare
run: |
$CONDA/bin/conda install --quiet --yes conda-build anaconda-client
$CONDA/bin/conda config --append channels conda-forge
$CONDA/bin/conda config --set anaconda_upload yes
- name: Build and upload
env:
ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }}
run: |
$CONDA/bin/conda build --token $ANACONDA_TOKEN conda-recipe

View File

@ -15,10 +15,10 @@ build:
requirements:
build:
- python >=3.8
- python >=3.7
- setuptools
run:
- python >=3.8
- python >=3.7
- numpy
- scipy
- h5py
@ -28,7 +28,7 @@ requirements:
about:
home: https://gitlab.psi.ch/zebra/pyzebra
home: https://github.com/paulscherrerinstitute/pyzebra
summary: {{ data['description'] }}
license: GNU GPLv3
license_file: LICENSE

View File

@ -7,19 +7,18 @@ import subprocess
def main():
default_branch = "main"
branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip()
if branch != default_branch:
print(f"Aborting, not on '{default_branch}' branch.")
if branch != "master":
print("Aborting, not on 'master' branch.")
return
version_filepath = os.path.join(os.path.basename(os.path.dirname(__file__)), "__init__.py")
filepath = "pyzebra/__init__.py"
parser = argparse.ArgumentParser()
parser.add_argument("level", type=str, choices=["patch", "minor", "major"])
args = parser.parse_args()
with open(version_filepath) as f:
with open(filepath) as f:
file_content = f.read()
version = re.search(r'__version__ = "(.*?)"', file_content).group(1)
@ -37,12 +36,11 @@ def main():
new_version = f"{major}.{minor}.{patch}"
with open(version_filepath, "w") as f:
with open(filepath, "w") as f:
f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content))
os.system(f"git commit {version_filepath} -m 'Updating for version {new_version}'")
os.system(f"git commit {filepath} -m 'Updating for version {new_version}'")
os.system(f"git tag -a {new_version} -m 'Release {new_version}'")
os.system("git push --follow-tags")
if __name__ == "__main__":

View File

@ -2,8 +2,8 @@ from pyzebra.anatric import *
from pyzebra.ccl_io import *
from pyzebra.ccl_process import *
from pyzebra.h5 import *
from pyzebra.sxtal_refgen import *
from pyzebra.utils import *
from pyzebra.xtal import *
from pyzebra.sxtal_refgen import *
__version__ = "0.7.11"
__version__ = "0.7.2"

View File

@ -1,10 +1,12 @@
import logging
import subprocess
import xml.etree.ElementTree as ET
logger = logging.getLogger(__name__)
DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"]
DATA_FACTORY_IMPLEMENTATION = [
"trics",
"morph",
"d10",
]
REFLECTION_PRINTER_FORMATS = [
"rafin",
@ -19,11 +21,11 @@ REFLECTION_PRINTER_FORMATS = [
"oksana",
]
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/anatric"
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/anatric"
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger):
def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None):
comp_proc = subprocess.run(
[anatric_path, config_file],
stdout=subprocess.PIPE,
@ -32,8 +34,8 @@ def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger):
check=True,
text=True,
)
log.info(" ".join(comp_proc.args))
log.info(comp_proc.stdout)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
class AnatricConfig:

View File

@ -1,4 +0,0 @@
from pyzebra.app.download_files import DownloadFiles
from pyzebra.app.fit_controls import FitControls
from pyzebra.app.input_controls import InputControls
from pyzebra.app.plot_hkl import PlotHKL

View File

@ -1,77 +1,47 @@
import argparse
import logging
import sys
from io import StringIO
import pyzebra
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import Button, Panel, Tabs, TextAreaInput, TextInput
import pyzebra
from pyzebra.app import (
panel_ccl_compare,
panel_ccl_integrate,
panel_ccl_prepare,
panel_hdf_anatric,
panel_hdf_param_study,
panel_hdf_viewer,
panel_param_study,
panel_plot_data,
panel_spind,
)
import panel_ccl_integrate
import panel_ccl_compare
import panel_hdf_anatric
import panel_hdf_param_study
import panel_hdf_viewer
import panel_param_study
import panel_spind
import panel_ccl_prepare
doc = curdoc()
doc.title = "pyzebra"
parser = argparse.ArgumentParser()
parser.add_argument(
"--anatric-path", type=str, default=pyzebra.ANATRIC_PATH, help="path to anatric executable"
)
parser.add_argument(
"--sxtal-refgen-path",
type=str,
default=pyzebra.SXTAL_REFGEN_PATH,
help="path to Sxtal_Refgen executable",
)
parser.add_argument("--spind-path", type=str, default=None, help="path to spind scripts folder")
args = parser.parse_args()
doc.anatric_path = args.anatric_path
doc.spind_path = args.spind_path
doc.sxtal_refgen_path = args.sxtal_refgen_path
stream = StringIO()
handler = logging.StreamHandler(stream)
handler.setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
)
logger = logging.getLogger(str(id(doc)))
logger.setLevel(logging.INFO)
logger.addHandler(handler)
doc.logger = logger
log_textareainput = TextAreaInput(title="Logging output:")
sys.stdout = StringIO()
stdout_textareainput = TextAreaInput(title="print output:", height=150)
bokeh_stream = StringIO()
bokeh_handler = logging.StreamHandler(bokeh_stream)
bokeh_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
bokeh_logger = logging.getLogger("bokeh")
bokeh_logger.addHandler(bokeh_handler)
bokeh_log_textareainput = TextAreaInput(title="server output:", height=150)
def proposal_textinput_callback(_attr, _old, _new):
apply_button.disabled = False
proposal_textinput = TextInput(title="Proposal number:", name="")
proposal_textinput.on_change("value_input", proposal_textinput_callback)
doc.proposal_textinput = proposal_textinput
def apply_button_callback():
proposal = proposal_textinput.value.strip()
if proposal:
try:
proposal_path = pyzebra.find_proposal_path(proposal)
except ValueError as e:
logger.exception(e)
print(e)
return
apply_button.disabled = True
else:
@ -79,7 +49,6 @@ def apply_button_callback():
proposal_textinput.name = proposal_path
apply_button = Button(label="Apply", button_type="primary")
apply_button.on_click(apply_button_callback)
@ -92,7 +61,6 @@ doc.add_root(
panel_hdf_viewer.create(),
panel_hdf_anatric.create(),
panel_ccl_prepare.create(),
panel_plot_data.create(),
panel_ccl_integrate.create(),
panel_ccl_compare.create(),
panel_param_study.create(),
@ -100,13 +68,14 @@ doc.add_root(
panel_spind.create(),
]
),
row(log_textareainput, sizing_mode="scale_both"),
row(stdout_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
)
)
def update_stdout():
log_textareainput.value = stream.getvalue()
stdout_textareainput.value = sys.stdout.getvalue()
bokeh_log_textareainput.value = bokeh_stream.getvalue()
doc.add_periodic_callback(update_stdout, 1000)

View File

@ -1,11 +1,79 @@
import argparse
import logging
import os
import subprocess
import sys
from bokeh.application.application import Application
from bokeh.application.handlers import ScriptHandler
from bokeh.server.server import Server
from pyzebra import ANATRIC_PATH, SXTAL_REFGEN_PATH
from pyzebra.app.handler import PyzebraHandler
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
app_path = os.path.dirname(os.path.abspath(__file__))
subprocess.run(["bokeh", "serve", app_path, *sys.argv[1:]], check=True)
"""The pyzebra command line interface.
This is a wrapper around a bokeh server that provides an interface to launch the application,
bundled with the pyzebra package.
"""
app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "app.py")
parser = argparse.ArgumentParser(
prog="pyzebra", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--port", type=int, default=5006, help="port to listen on for HTTP requests"
)
parser.add_argument(
"--allow-websocket-origin",
metavar="HOST[:PORT]",
type=str,
action="append",
default=None,
help="hostname that can connect to the server websocket",
)
parser.add_argument(
"--anatric-path", type=str, default=ANATRIC_PATH, help="path to anatric executable",
)
parser.add_argument(
"--sxtal-refgen-path",
type=str,
default=SXTAL_REFGEN_PATH,
help="path to Sxtal_Refgen executable",
)
parser.add_argument(
"--spind-path", type=str, default=None, help="path to spind scripts folder",
)
parser.add_argument(
"--args",
nargs=argparse.REMAINDER,
default=[],
help="command line arguments for the pyzebra application",
)
args = parser.parse_args()
logger.info(app_path)
pyzebra_handler = PyzebraHandler(args.anatric_path, args.spind_path)
handler = ScriptHandler(filename=app_path, argv=args.args)
server = Server(
{"/": Application(pyzebra_handler, handler)},
port=args.port,
allow_websocket_origin=args.allow_websocket_origin,
)
server.start()
server.io_loop.start()
if __name__ == "__main__":

View File

@ -1,45 +0,0 @@
from bokeh.models import Button, ColumnDataSource, CustomJS
js_code = """
let j = 0;
for (let i = 0; i < source.data['name'].length; i++) {
if (source.data['content'][i] === "") continue;
setTimeout(function() {
const blob = new Blob([source.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = source.data['name'][i] + source.data['ext'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
class DownloadFiles:
def __init__(self, n_files):
self.n_files = n_files
source = ColumnDataSource(
data=dict(content=[""] * n_files, name=[""] * n_files, ext=[""] * n_files)
)
self._source = source
label = "Download File" if n_files == 1 else "Download Files"
button = Button(label=label, button_type="success", width=200)
button.js_on_click(CustomJS(args={"source": source}, code=js_code))
self.button = button
def set_contents(self, contents):
self._source.data.update(content=contents)
def set_names(self, names):
self._source.data.update(name=names)
def set_extensions(self, extensions):
self._source.data.update(ext=extensions)

View File

@ -1,175 +0,0 @@
import types
from bokeh.io import curdoc
from bokeh.models import (
Button,
CellEditor,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
DataTable,
Dropdown,
MultiSelect,
NumberEditor,
RadioGroup,
Spinner,
TableColumn,
TextAreaInput,
)
import pyzebra
def _params_factory(function):
if function == "linear":
param_names = ["slope", "intercept"]
elif function == "gaussian":
param_names = ["amplitude", "center", "sigma"]
elif function == "voigt":
param_names = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
param_names = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
param_names = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(param_names)
params = dict(
param=param_names, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n
)
if function == "linear":
params["value"] = [0, 1]
params["vary"] = [False, True]
params["min"] = [None, 0]
elif function == "gaussian":
params["min"] = [0, None, None]
return params
class FitControls:
def __init__(self):
self.log = curdoc().logger
self.params = {}
def add_function_button_callback(click):
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{function_select.tags[0]}"
function_select.options.append((new_tag, click.item))
self.params[new_tag] = _params_factory(click.item)
function_select.tags[0] += 1
add_function_button = Dropdown(
label="Add fit function",
menu=[
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
width=145,
)
add_function_button.on_click(add_function_button_callback)
self.add_function_button = add_function_button
def function_list_callback(_attr, old, new):
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
function_select.value = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
if new:
params_table_source.data.update(self.params[new[0]])
else:
params_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
function_select = MultiSelect(options=[], height=120, width=145)
function_select.tags = [0]
function_select.on_change("value", function_list_callback)
self.function_select = function_select
def remove_function_button_callback():
if function_select.value:
sel_tag = function_select.value[0]
del self.params[sel_tag]
for elem in function_select.options:
if elem[0] == sel_tag:
function_select.options.remove(elem)
break
function_select.value = []
remove_function_button = Button(label="Remove fit function", width=145)
remove_function_button.on_click(remove_function_button_callback)
self.remove_function_button = remove_function_button
params_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
self.params_table = DataTable(
source=params_table_source,
columns=[
TableColumn(field="param", title="Parameter", editor=CellEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
],
height=200,
width=350,
index_position=None,
editable=True,
auto_edit=True,
)
# start with `background` and `gauss` fit functions added
add_function_button_callback(types.SimpleNamespace(item="linear"))
add_function_button_callback(types.SimpleNamespace(item="gaussian"))
function_select.value = ["gaussian-1"] # put selection on gauss
self.from_spinner = Spinner(title="Fit from:", width=145)
self.to_spinner = Spinner(title="to:", width=145)
self.area_method_radiogroup = RadioGroup(labels=["Function", "Area"], active=0, width=145)
self.lorentz_checkbox = CheckboxGroup(
labels=["Lorentz Correction"], width=145, margin=(13, 5, 5, 5)
)
self.result_textarea = TextAreaInput(title="Fit results:", width=750, height=200)
def _process_scan(self, scan):
pyzebra.fit_scan(
scan,
self.params,
fit_from=self.from_spinner.value,
fit_to=self.to_spinner.value,
log=self.log,
)
pyzebra.get_area(
scan,
area_method=pyzebra.AREA_METHODS[self.area_method_radiogroup.active],
lorentz=self.lorentz_checkbox.active,
)
def fit_scan(self, scan):
self._process_scan(scan)
def fit_dataset(self, dataset):
for scan in dataset:
if scan["export"]:
self._process_scan(scan)
def update_result_textarea(self, scan):
fit = scan.get("fit")
if fit is None:
self.result_textarea.value = ""
else:
self.result_textarea.value = fit.fit_report()

32
pyzebra/app/handler.py Normal file
View File

@ -0,0 +1,32 @@
from bokeh.application.handlers import Handler
class PyzebraHandler(Handler):
"""Provides a mechanism for generic bokeh applications to build up new streamvis documents.
"""
def __init__(self, anatric_path, spind_path):
"""Initialize a pyzebra handler for bokeh applications.
Args:
args (Namespace): Command line parsed arguments.
"""
super().__init__() # no-op
self.anatric_path = anatric_path
self.spind_path = spind_path
def modify_document(self, doc):
"""Modify an application document with pyzebra specific features.
Args:
doc (Document) : A bokeh Document to update in-place
Returns:
Document
"""
doc.title = "pyzebra"
doc.anatric_path = self.anatric_path
doc.spind_path = self.spind_path
return doc

View File

@ -1,159 +0,0 @@
import base64
import io
import os
from bokeh.io import curdoc
from bokeh.models import Button, FileInput, MultiSelect, Spinner
import pyzebra
class InputControls:
def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None):
doc = curdoc()
log = doc.logger
def filelist_select_update_for_proposal():
proposal_path = proposal_textinput.name
if proposal_path:
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
filelist_select.options = file_list
open_button.disabled = False
append_button.disabled = False
else:
filelist_select.options = []
open_button.disabled = True
append_button.disabled = True
doc.add_periodic_callback(filelist_select_update_for_proposal, 5000)
def proposal_textinput_callback(_attr, _old, _new):
filelist_select_update_for_proposal()
proposal_textinput = doc.proposal_textinput
proposal_textinput.on_change("name", proposal_textinput_callback)
filelist_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
self.filelist_select = filelist_select
def open_button_callback():
new_data = []
for f_path in self.filelist_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext, log=log)
except Exception as e:
log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data, log=log)
dlfiles.set_names([base] * dlfiles.n_files)
else:
pyzebra.merge_datasets(new_data, file_data, log=log)
if new_data:
dataset.clear()
dataset.extend(new_data)
on_file_open()
append_upload_button.disabled = False
open_button = Button(label="Open New", width=100, disabled=True)
open_button.on_click(open_button_callback)
self.open_button = open_button
def append_button_callback():
file_data = []
for f_path in self.filelist_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext, log=log)
except Exception as e:
log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log)
if file_data:
on_file_open()
append_button = Button(label="Append", width=100, disabled=True)
append_button.on_click(append_button_callback)
self.append_button = append_button
def upload_button_callback(_attr, _old, _new):
new_data = []
for f_str, f_name in zip(upload_button.value, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext, log=log)
except Exception as e:
log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data, log=log)
dlfiles.set_names([base] * dlfiles.n_files)
else:
pyzebra.merge_datasets(new_data, file_data, log=log)
if new_data:
dataset.clear()
dataset.extend(new_data)
on_file_open()
append_upload_button.disabled = False
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
upload_button.on_change("filename", upload_button_callback)
self.upload_button = upload_button
def append_upload_button_callback(_attr, _old, _new):
file_data = []
for f_str, f_name in zip(append_upload_button.value, append_upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext, log=log)
except Exception as e:
log.exception(e)
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log)
if file_data:
on_file_open()
append_upload_button = FileInput(
accept=".ccl,.dat", multiple=True, width=200, disabled=True
)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
append_upload_button.on_change("filename", append_upload_button_callback)
self.append_upload_button = append_upload_button
def monitor_spinner_callback(_attr, _old, new):
if dataset:
pyzebra.normalize_dataset(dataset, new)
on_monitor_change()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
self.monitor_spinner = monitor_spinner

View File

@ -2,41 +2,80 @@ import base64
import io
import os
import tempfile
import types
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
BasicTicker,
Button,
CellEditor,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
CustomJS,
DataRange1d,
DataTable,
Div,
Dropdown,
FileInput,
Grid,
Legend,
Line,
LinearAxis,
MultiLine,
MultiSelect,
NumberEditor,
Panel,
PanTool,
Plot,
RadioGroup,
ResetTool,
Scatter,
Select,
Spacer,
Span,
Spinner,
TableColumn,
TextAreaInput,
WheelZoomTool,
Whisker,
)
from bokeh.plotting import figure
import pyzebra
from pyzebra import EXPORT_TARGETS, app
from pyzebra.ccl_io import EXPORT_TARGETS
from pyzebra.ccl_process import AREA_METHODS
javaScript = """
let j = 0;
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
setTimeout(function() {
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i] + js_data.data['ext'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
def create():
doc = curdoc()
log = doc.logger
dataset1 = []
dataset2 = []
app_dlfiles = app.DownloadFiles(n_files=2)
fit_params = {}
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""], ext=["", ""]))
def file_select_update_for_proposal():
proposal_path = proposal_textinput.name
@ -95,7 +134,7 @@ def create():
def file_open_button_callback():
if len(file_select.value) != 2:
log.warning("Select exactly 2 .ccl files.")
print("WARNING: Select exactly 2 .ccl files.")
return
new_data1 = []
@ -105,16 +144,16 @@ def create():
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext, log=log)
except Exception as e:
log.exception(e)
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
return
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data, log=log)
pyzebra.merge_duplicates(file_data)
if ind == 0:
app_dlfiles.set_names([base, base])
js_data.data.update(fname=[base, base])
new_data1 = file_data
else: # ind = 1
new_data2 = file_data
@ -134,7 +173,7 @@ def create():
def upload_button_callback(_attr, _old, _new):
if len(upload_button.filename) != 2:
log.warning("Upload exactly 2 .ccl files.")
print("WARNING: Upload exactly 2 .ccl files.")
return
new_data1 = []
@ -143,16 +182,16 @@ def create():
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext, log=log)
except Exception as e:
log.exception(e)
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
return
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data, log=log)
pyzebra.merge_duplicates(file_data)
if ind == 0:
app_dlfiles.set_names([base, base])
js_data.data.update(fname=[base, base])
new_data1 = file_data
else: # ind = 1
new_data2 = file_data
@ -188,17 +227,17 @@ def create():
scan_table_source.data.update(fit=fit_ok, export=export)
def _update_plot():
scatter_sources = [scatter1_source, scatter2_source]
fit_sources = [fit1_source, fit2_source]
bkg_sources = [bkg1_source, bkg2_source]
peak_sources = [peak1_source, peak2_source]
plot_scatter_source = [plot_scatter1_source, plot_scatter2_source]
plot_fit_source = [plot_fit1_source, plot_fit2_source]
plot_bkg_source = [plot_bkg1_source, plot_bkg2_source]
plot_peak_source = [plot_peak1_source, plot_peak2_source]
fit_output = ""
for ind, scan in enumerate(_get_selected_scan()):
scatter_source = scatter_sources[ind]
fit_source = fit_sources[ind]
bkg_source = bkg_sources[ind]
peak_source = peak_sources[ind]
scatter_source = plot_scatter_source[ind]
fit_source = plot_fit_source[ind]
bkg_source = plot_bkg_source[ind]
peak_source = plot_peak_source[ind]
scan_motor = scan["scan_motor"]
y = scan["counts"]
@ -218,7 +257,7 @@ def create():
xs_peak = []
ys_peak = []
comps = fit.eval_components(x=x_fit)
for i, model in enumerate(app_fitctrl.params):
for i, model in enumerate(fit_params):
if "linear" in model:
x_bkg = x_fit
y_bkg = comps[f"f{i}_"]
@ -238,59 +277,62 @@ def create():
bkg_source.data.update(x=[], y=[])
peak_source.data.update(xs=[], ys=[])
app_fitctrl.result_textarea.value = fit_output
fit_output_textinput.value = fit_output
# Main plot
plot = figure(
x_axis_label="Scan motor",
y_axis_label="Counts",
height=470,
width=700,
tools="pan,wheel_zoom,reset",
plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(only_visible=True),
plot_height=470,
plot_width=700,
)
scatter1_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot.circle(
source=scatter1_source,
line_color="steelblue",
fill_color="steelblue",
legend_label="data 1",
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
plot_scatter1_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot_scatter1 = plot.add_glyph(
plot_scatter1_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
)
plot.add_layout(Whisker(source=scatter1_source, base="x", upper="y_upper", lower="y_lower"))
scatter2_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot.circle(
source=scatter2_source,
line_color="firebrick",
fill_color="firebrick",
legend_label="data 2",
)
plot.add_layout(Whisker(source=scatter2_source, base="x", upper="y_upper", lower="y_lower"))
fit1_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.line(source=fit1_source, legend_label="best fit 1")
fit2_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.line(source=fit2_source, line_color="firebrick", legend_label="best fit 2")
bkg1_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.line(
source=bkg1_source, line_color="steelblue", line_dash="dashed", legend_label="linear 1"
plot.add_layout(
Whisker(source=plot_scatter1_source, base="x", upper="y_upper", lower="y_lower")
)
bkg2_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.line(
source=bkg2_source, line_color="firebrick", line_dash="dashed", legend_label="linear 2"
plot_scatter2_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot_scatter2 = plot.add_glyph(
plot_scatter2_source, Scatter(x="x", y="y", line_color="firebrick", fill_color="firebrick")
)
plot.add_layout(
Whisker(source=plot_scatter2_source, base="x", upper="y_upper", lower="y_lower")
)
peak1_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot.multi_line(
source=peak1_source, line_color="steelblue", line_dash="dashed", legend_label="peak 1"
plot_fit1_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit1 = plot.add_glyph(plot_fit1_source, Line(x="x", y="y"))
plot_fit2_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit2 = plot.add_glyph(plot_fit2_source, Line(x="x", y="y"))
plot_bkg1_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_bkg1 = plot.add_glyph(
plot_bkg1_source, Line(x="x", y="y", line_color="steelblue", line_dash="dashed")
)
peak2_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot.multi_line(
source=peak2_source, line_color="firebrick", line_dash="dashed", legend_label="peak 2"
plot_bkg2_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_bkg2 = plot.add_glyph(
plot_bkg2_source, Line(x="x", y="y", line_color="firebrick", line_dash="dashed")
)
plot_peak1_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot_peak1 = plot.add_glyph(
plot_peak1_source, MultiLine(xs="xs", ys="ys", line_color="steelblue", line_dash="dashed")
)
plot_peak2_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot_peak2 = plot.add_glyph(
plot_peak2_source, MultiLine(xs="xs", ys="ys", line_color="firebrick", line_dash="dashed")
)
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
@ -299,9 +341,25 @@ def create():
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_to_span)
plot.y_range.only_visible = True
plot.add_layout(
Legend(
items=[
("data 1", [plot_scatter1]),
("data 2", [plot_scatter2]),
("best fit 1", [plot_fit1]),
("best fit 2", [plot_fit2]),
("peak 1", [plot_peak1]),
("peak 2", [plot_peak2]),
("linear 1", [plot_bkg1]),
("linear 2", [plot_bkg2]),
],
location="top_left",
click_policy="hide",
)
)
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
plot.toolbar.logo = None
plot.legend.click_policy = "hide"
# Scan select
def scan_table_select_callback(_attr, old, new):
@ -378,11 +436,11 @@ def create():
scan_from2 = dataset2[int(merge_from_select.value)]
if scan_into1 is scan_from1:
log.warning("Selected scans for merging are identical")
print("WARNING: Selected scans for merging are identical")
return
pyzebra.merge_scans(scan_into1, scan_from1, log=log)
pyzebra.merge_scans(scan_into2, scan_from2, log=log)
pyzebra.merge_scans(scan_into1, scan_from1)
pyzebra.merge_scans(scan_into2, scan_from2)
_update_table()
_update_plot()
@ -399,21 +457,136 @@ def create():
restore_button = Button(label="Restore scan", width=145)
restore_button.on_click(restore_button_callback)
app_fitctrl = app.FitControls()
def fit_from_spinner_callback(_attr, _old, new):
fit_from_span.location = new
app_fitctrl.from_spinner.on_change("value", fit_from_spinner_callback)
fit_from_spinner = Spinner(title="Fit from:", width=145)
fit_from_spinner.on_change("value", fit_from_spinner_callback)
def fit_to_spinner_callback(_attr, _old, new):
fit_to_span.location = new
app_fitctrl.to_spinner.on_change("value", fit_to_spinner_callback)
fit_to_spinner = Spinner(title="to:", width=145)
fit_to_spinner.on_change("value", fit_to_spinner_callback)
def fitparams_add_dropdown_callback(click):
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
fitparams_select.options.append((new_tag, click.item))
fit_params[new_tag] = fitparams_factory(click.item)
fitparams_select.tags[0] += 1
fitparams_add_dropdown = Dropdown(
label="Add fit function",
menu=[
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
width=145,
)
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
def fitparams_select_callback(_attr, old, new):
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
fitparams_select.value = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
if new:
fitparams_table_source.data.update(fit_params[new[0]])
else:
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_select = MultiSelect(options=[], height=120, width=145)
fitparams_select.tags = [0]
fitparams_select.on_change("value", fitparams_select_callback)
def fitparams_remove_button_callback():
if fitparams_select.value:
sel_tag = fitparams_select.value[0]
del fit_params[sel_tag]
for elem in fitparams_select.options:
if elem[0] == sel_tag:
fitparams_select.options.remove(elem)
break
fitparams_select.value = []
fitparams_remove_button = Button(label="Remove fit function", width=145)
fitparams_remove_button.on_click(fitparams_remove_button_callback)
def fitparams_factory(function):
if function == "linear":
params = ["slope", "intercept"]
elif function == "gaussian":
params = ["amplitude", "center", "sigma"]
elif function == "voigt":
params = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
params = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(params)
fitparams = dict(
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
)
if function == "linear":
fitparams["value"] = [0, 1]
fitparams["vary"] = [False, True]
fitparams["min"] = [None, 0]
elif function == "gaussian":
fitparams["min"] = [0, None, None]
return fitparams
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter", editor=CellEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
],
height=200,
width=350,
index_position=None,
editable=True,
auto_edit=True,
)
# start with `background` and `gauss` fit functions added
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
fitparams_select.value = ["gaussian-1"] # add selection to gauss
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def proc_all_button_callback():
app_fitctrl.fit_dataset(dataset1)
app_fitctrl.fit_dataset(dataset2)
for scan in [*dataset1, *dataset2]:
if scan["export"]:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
pyzebra.get_area(
scan,
area_method=AREA_METHODS[area_method_radiobutton.active],
lorentz=lorentz_checkbox.active,
)
_update_plot()
_update_table()
@ -422,9 +595,15 @@ def create():
proc_all_button.on_click(proc_all_button_callback)
def proc_button_callback():
scan1, scan2 = _get_selected_scan()
app_fitctrl.fit_scan(scan1)
app_fitctrl.fit_scan(scan2)
for scan in _get_selected_scan():
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
pyzebra.get_area(
scan,
area_method=AREA_METHODS[area_method_radiobutton.active],
lorentz=lorentz_checkbox.active,
)
_update_plot()
_update_table()
@ -432,11 +611,16 @@ def create():
proc_button = Button(label="Process Current", width=145)
proc_button.on_click(proc_button_callback)
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
area_method_radiobutton = RadioGroup(labels=["Function", "Area"], active=0, width=145)
intensity_diff_div = Div(text="Intensity difference:", margin=(5, 5, 0, 5))
intensity_diff_radiobutton = RadioGroup(
labels=["file1 - file2", "file2 - file1"], active=0, width=145
)
lorentz_checkbox = CheckboxGroup(labels=["Lorentz Correction"], width=145, margin=(13, 5, 5, 5))
export_preview_textinput = TextAreaInput(title="Export file(s) preview:", width=500, height=400)
def _update_preview():
@ -472,18 +656,18 @@ def create():
content = ""
file_content.append(content)
app_dlfiles.set_contents(file_content)
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
def export_target_select_callback(_attr, _old, new):
app_dlfiles.set_extensions(EXPORT_TARGETS[new])
js_data.data.update(ext=EXPORT_TARGETS[new])
_update_preview()
export_target_select = Select(
title="Export target:", options=list(EXPORT_TARGETS.keys()), value="fullprof", width=80
)
export_target_select.on_change("value", export_target_select_callback)
app_dlfiles.set_extensions(EXPORT_TARGETS[export_target_select.value])
js_data.data.update(ext=EXPORT_TARGETS[export_target_select.value])
def hkl_precision_select_callback(_attr, _old, _new):
_update_preview()
@ -493,24 +677,22 @@ def create():
)
hkl_precision_select.on_change("value", hkl_precision_select_callback)
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
save_button = Button(label="Download File(s)", button_type="success", width=200)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
fitpeak_controls = row(
column(
app_fitctrl.add_function_button,
app_fitctrl.function_select,
app_fitctrl.remove_function_button,
),
app_fitctrl.params_table,
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
fitparams_table,
Spacer(width=20),
column(
app_fitctrl.from_spinner,
app_fitctrl.lorentz_checkbox,
fit_from_spinner,
lorentz_checkbox,
area_method_div,
app_fitctrl.area_method_radiogroup,
area_method_radiobutton,
intensity_diff_div,
intensity_diff_radiobutton,
),
column(app_fitctrl.to_spinner, proc_button, proc_all_button),
column(fit_to_spinner, proc_button, proc_all_button),
)
scan_layout = column(
@ -524,15 +706,13 @@ def create():
export_layout = column(
export_preview_textinput,
row(
export_target_select,
hkl_precision_select,
column(Spacer(height=19), row(app_dlfiles.button)),
export_target_select, hkl_precision_select, column(Spacer(height=19), row(save_button))
),
)
tab_layout = column(
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
row(fitpeak_controls, app_fitctrl.result_textarea),
row(fitpeak_controls, fit_output_textinput),
)
return Panel(child=tab_layout, title="ccl compare")

View File

@ -1,35 +1,103 @@
import base64
import io
import os
import tempfile
import types
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
BasicTicker,
Button,
CellEditor,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
CustomJS,
DataRange1d,
DataTable,
Div,
Dropdown,
FileInput,
Grid,
Legend,
Line,
LinearAxis,
MultiLine,
MultiSelect,
NumberEditor,
Panel,
PanTool,
Plot,
RadioGroup,
ResetTool,
Scatter,
Select,
Spacer,
Span,
Spinner,
TableColumn,
TextAreaInput,
WheelZoomTool,
Whisker,
)
from bokeh.plotting import figure
import pyzebra
from pyzebra import EXPORT_TARGETS, app
from pyzebra.ccl_io import EXPORT_TARGETS
from pyzebra.ccl_process import AREA_METHODS
javaScript = """
let j = 0;
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
setTimeout(function() {
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i] + js_data.data['ext'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
def create():
doc = curdoc()
log = doc.logger
dataset = []
app_dlfiles = app.DownloadFiles(n_files=2)
fit_params = {}
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""], ext=["", ""]))
def file_select_update_for_proposal():
proposal_path = proposal_textinput.name
if proposal_path:
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
file_open_button.disabled = False
file_append_button.disabled = False
else:
file_select.options = []
file_open_button.disabled = True
file_append_button.disabled = True
doc.add_periodic_callback(file_select_update_for_proposal, 5000)
def proposal_textinput_callback(_attr, _old, _new):
file_select_update_for_proposal()
proposal_textinput = doc.proposal_textinput
proposal_textinput.on_change("name", proposal_textinput_callback)
def _init_datatable():
scan_list = [s["idx"] for s in dataset]
@ -62,6 +130,122 @@ def create():
merge_from_select.options = merge_options
merge_from_select.value = merge_options[0][0]
file_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
def file_open_button_callback():
nonlocal dataset
new_data = []
for f_path in file_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data)
js_data.data.update(fname=[base, base])
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
append_upload_button.disabled = False
file_open_button = Button(label="Open New", width=100, disabled=True)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
file_data = []
for f_path in file_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
if file_data:
_init_datatable()
file_append_button = Button(label="Append", width=100, disabled=True)
file_append_button.on_click(file_append_button_callback)
def upload_button_callback(_attr, _old, _new):
nonlocal dataset
new_data = []
for f_str, f_name in zip(upload_button.value, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data)
js_data.data.update(fname=[base, base])
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
append_upload_button.disabled = False
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
upload_button.on_change("filename", upload_button_callback)
def append_upload_button_callback(_attr, _old, _new):
file_data = []
for f_str, f_name in zip(append_upload_button.value, append_upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
if file_data:
_init_datatable()
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200, disabled=True)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
append_upload_button.on_change("filename", append_upload_button_callback)
def monitor_spinner_callback(_attr, old, new):
if dataset:
pyzebra.normalize_dataset(dataset, new)
_update_plot()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def _update_table():
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset]
export = [scan["export"] for scan in dataset]
@ -76,19 +260,19 @@ def create():
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
plot_scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
fit = scan.get("fit")
if fit is not None:
x_fit = np.linspace(x[0], x[-1], 100)
fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
plot_fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
x_bkg = []
y_bkg = []
xs_peak = []
ys_peak = []
comps = fit.eval_components(x=x_fit)
for i, model in enumerate(app_fitctrl.params):
for i, model in enumerate(fit_params):
if "linear" in model:
x_bkg = x_fit
y_bkg = comps[f"f{i}_"]
@ -97,43 +281,49 @@ def create():
xs_peak.append(x_fit)
ys_peak.append(comps[f"f{i}_"])
bkg_source.data.update(x=x_bkg, y=y_bkg)
peak_source.data.update(xs=xs_peak, ys=ys_peak)
plot_bkg_source.data.update(x=x_bkg, y=y_bkg)
plot_peak_source.data.update(xs=xs_peak, ys=ys_peak)
fit_output_textinput.value = fit.fit_report()
else:
fit_source.data.update(x=[], y=[])
bkg_source.data.update(x=[], y=[])
peak_source.data.update(xs=[], ys=[])
app_fitctrl.update_result_textarea(scan)
app_inputctrl = app.InputControls(
dataset, app_dlfiles, on_file_open=_init_datatable, on_monitor_change=_update_plot
)
plot_fit_source.data.update(x=[], y=[])
plot_bkg_source.data.update(x=[], y=[])
plot_peak_source.data.update(xs=[], ys=[])
fit_output_textinput.value = ""
# Main plot
plot = figure(
x_axis_label="Scan motor",
y_axis_label="Counts",
height=470,
width=700,
tools="pan,wheel_zoom,reset",
plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(only_visible=True),
plot_height=470,
plot_width=700,
)
scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot.circle(
source=scatter_source, line_color="steelblue", fill_color="steelblue", legend_label="data"
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot_scatter = plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
)
plot.add_layout(Whisker(source=scatter_source, base="x", upper="y_upper", lower="y_lower"))
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
fit_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.line(source=fit_source, legend_label="best fit")
plot_fit_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit = plot.add_glyph(plot_fit_source, Line(x="x", y="y"))
bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.line(source=bkg_source, line_color="green", line_dash="dashed", legend_label="linear")
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_bkg = plot.add_glyph(
plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed")
)
peak_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot.multi_line(source=peak_source, line_color="red", line_dash="dashed", legend_label="peak")
plot_peak_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot_peak = plot.add_glyph(
plot_peak_source, MultiLine(xs="xs", ys="ys", line_color="red", line_dash="dashed")
)
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_from_span)
@ -141,9 +331,21 @@ def create():
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_to_span)
plot.y_range.only_visible = True
plot.add_layout(
Legend(
items=[
("data", [plot_scatter]),
("best fit", [plot_fit]),
("peak", [plot_peak]),
("linear", [plot_bkg]),
],
location="top_left",
click_policy="hide",
)
)
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
plot.toolbar.logo = None
plot.legend.click_policy = "hide"
# Scan select
def scan_table_select_callback(_attr, old, new):
@ -217,10 +419,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
log.warning("Selected scans for merging are identical")
print("WARNING: Selected scans for merging are identical")
return
pyzebra.merge_scans(scan_into, scan_from, log=log)
pyzebra.merge_scans(scan_into, scan_from)
_update_table()
_update_plot()
@ -235,20 +437,136 @@ def create():
restore_button = Button(label="Restore scan", width=145)
restore_button.on_click(restore_button_callback)
app_fitctrl = app.FitControls()
def fit_from_spinner_callback(_attr, _old, new):
fit_from_span.location = new
app_fitctrl.from_spinner.on_change("value", fit_from_spinner_callback)
fit_from_spinner = Spinner(title="Fit from:", width=145)
fit_from_spinner.on_change("value", fit_from_spinner_callback)
def fit_to_spinner_callback(_attr, _old, new):
fit_to_span.location = new
app_fitctrl.to_spinner.on_change("value", fit_to_spinner_callback)
fit_to_spinner = Spinner(title="to:", width=145)
fit_to_spinner.on_change("value", fit_to_spinner_callback)
def fitparams_add_dropdown_callback(click):
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
fitparams_select.options.append((new_tag, click.item))
fit_params[new_tag] = fitparams_factory(click.item)
fitparams_select.tags[0] += 1
fitparams_add_dropdown = Dropdown(
label="Add fit function",
menu=[
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
width=145,
)
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
def fitparams_select_callback(_attr, old, new):
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
fitparams_select.value = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
if new:
fitparams_table_source.data.update(fit_params[new[0]])
else:
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_select = MultiSelect(options=[], height=120, width=145)
fitparams_select.tags = [0]
fitparams_select.on_change("value", fitparams_select_callback)
def fitparams_remove_button_callback():
if fitparams_select.value:
sel_tag = fitparams_select.value[0]
del fit_params[sel_tag]
for elem in fitparams_select.options:
if elem[0] == sel_tag:
fitparams_select.options.remove(elem)
break
fitparams_select.value = []
fitparams_remove_button = Button(label="Remove fit function", width=145)
fitparams_remove_button.on_click(fitparams_remove_button_callback)
def fitparams_factory(function):
if function == "linear":
params = ["slope", "intercept"]
elif function == "gaussian":
params = ["amplitude", "center", "sigma"]
elif function == "voigt":
params = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
params = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(params)
fitparams = dict(
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
)
if function == "linear":
fitparams["value"] = [0, 1]
fitparams["vary"] = [False, True]
fitparams["min"] = [None, 0]
elif function == "gaussian":
fitparams["min"] = [0, None, None]
return fitparams
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter", editor=CellEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
],
height=200,
width=350,
index_position=None,
editable=True,
auto_edit=True,
)
# start with `background` and `gauss` fit functions added
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
fitparams_select.value = ["gaussian-1"] # add selection to gauss
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def proc_all_button_callback():
app_fitctrl.fit_dataset(dataset)
for scan in dataset:
if scan["export"]:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
pyzebra.get_area(
scan,
area_method=AREA_METHODS[area_method_radiobutton.active],
lorentz=lorentz_checkbox.active,
)
_update_plot()
_update_table()
@ -257,7 +575,15 @@ def create():
proc_all_button.on_click(proc_all_button_callback)
def proc_button_callback():
app_fitctrl.fit_scan(_get_selected_scan())
scan = _get_selected_scan()
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
pyzebra.get_area(
scan,
area_method=AREA_METHODS[area_method_radiobutton.active],
lorentz=lorentz_checkbox.active,
)
_update_plot()
_update_table()
@ -265,6 +591,11 @@ def create():
proc_button = Button(label="Process Current", width=145)
proc_button.on_click(proc_button_callback)
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
area_method_radiobutton = RadioGroup(labels=["Function", "Area"], active=0, width=145)
lorentz_checkbox = CheckboxGroup(labels=["Lorentz Correction"], width=145, margin=(13, 5, 5, 5))
export_preview_textinput = TextAreaInput(title="Export file(s) preview:", width=500, height=400)
def _update_preview():
@ -294,18 +625,18 @@ def create():
content = ""
file_content.append(content)
app_dlfiles.set_contents(file_content)
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
def export_target_select_callback(_attr, _old, new):
app_dlfiles.set_extensions(EXPORT_TARGETS[new])
js_data.data.update(ext=EXPORT_TARGETS[new])
_update_preview()
export_target_select = Select(
title="Export target:", options=list(EXPORT_TARGETS.keys()), value="fullprof", width=80
)
export_target_select.on_change("value", export_target_select_callback)
app_dlfiles.set_extensions(EXPORT_TARGETS[export_target_select.value])
js_data.data.update(ext=EXPORT_TARGETS[export_target_select.value])
def hkl_precision_select_callback(_attr, _old, _new):
_update_preview()
@ -315,53 +646,42 @@ def create():
)
hkl_precision_select.on_change("value", hkl_precision_select_callback)
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
save_button = Button(label="Download File(s)", button_type="success", width=200)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
fitpeak_controls = row(
column(
app_fitctrl.add_function_button,
app_fitctrl.function_select,
app_fitctrl.remove_function_button,
),
app_fitctrl.params_table,
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
fitparams_table,
Spacer(width=20),
column(
app_fitctrl.from_spinner,
app_fitctrl.lorentz_checkbox,
area_method_div,
app_fitctrl.area_method_radiogroup,
),
column(app_fitctrl.to_spinner, proc_button, proc_all_button),
column(fit_from_spinner, lorentz_checkbox, area_method_div, area_method_radiobutton),
column(fit_to_spinner, proc_button, proc_all_button),
)
scan_layout = column(
scan_table,
row(app_inputctrl.monitor_spinner, column(Spacer(height=19), restore_button)),
row(monitor_spinner, column(Spacer(height=19), restore_button)),
row(column(Spacer(height=19), merge_button), merge_from_select),
)
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
import_layout = column(
app_inputctrl.filelist_select,
row(app_inputctrl.open_button, app_inputctrl.append_button),
file_select,
row(file_open_button, file_append_button),
upload_div,
app_inputctrl.upload_button,
upload_button,
append_upload_div,
app_inputctrl.append_upload_button,
append_upload_button,
)
export_layout = column(
export_preview_textinput,
row(
export_target_select,
hkl_precision_select,
column(Spacer(height=19), row(app_dlfiles.button)),
export_target_select, hkl_precision_select, column(Spacer(height=19), row(save_button))
),
)
tab_layout = column(
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
row(fitpeak_controls, app_fitctrl.result_textarea),
row(fitpeak_controls, fit_output_textinput),
)
return Panel(child=tab_layout, title="ccl integrate")

View File

@ -5,34 +5,64 @@ import subprocess
import tempfile
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
Arrow,
BoxZoomTool,
Button,
CheckboxGroup,
ColumnDataSource,
CustomJS,
Div,
Ellipse,
FileInput,
HoverTool,
Legend,
LegendItem,
LinearAxis,
MultiLine,
MultiSelect,
NormalHead,
NumericInput,
Panel,
PanTool,
Plot,
RadioGroup,
Range1d,
ResetTool,
Scatter,
Select,
Spacer,
Spinner,
Text,
TextAreaInput,
TextInput,
WheelZoomTool,
)
from bokeh.palettes import Dark2
from bokeh.plotting import figure
import pyzebra
from pyzebra import app
javaScript = """
let j = 0;
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
setTimeout(function() {
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
ANG_CHUNK_DEFAULTS = {"2theta": 30, "gamma": 30, "omega": 30, "chi": 35, "phi": 35, "nu": 10}
SORT_OPT_BI = ["2theta", "chi", "phi", "omega"]
@ -40,14 +70,11 @@ SORT_OPT_NB = ["gamma", "nu", "omega"]
def create():
doc = curdoc()
log = doc.logger
ang_lims = {}
cif_data = {}
params = {}
ang_lims = None
cif_data = None
params = None
res_files = {}
_update_slice = None
app_dlfiles = app.DownloadFiles(n_files=1)
js_data = ColumnDataSource(data=dict(content=[""], fname=[""]))
anglim_div = Div(text="Angular min/max limits:", margin=(5, 5, 0, 5))
sttgamma_ti = TextInput(title="stt/gamma", width=100)
@ -135,11 +162,7 @@ def create():
params = dict()
params["SPGR"] = cryst_space_group.value
params["CELL"] = cryst_cell.value
try:
ub = pyzebra.calc_ub_matrix(params, log=log)
except Exception as e:
log.exception(e)
return
ub = pyzebra.calc_ub_matrix(params)
ub_matrix.value = " ".join(ub)
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
@ -228,9 +251,9 @@ def create():
geom_template = None
pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
log.info(f"Content of {geom_path}:")
print(f"Content of {geom_path}:")
with open(geom_path) as f:
log.info(f.read())
print(f.read())
priority = [sorting_0.value, sorting_1.value, sorting_2.value]
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
@ -255,9 +278,9 @@ def create():
cfl_template = None
pyzebra.export_cfl_file(cfl_path, params, cfl_template)
log.info(f"Content of {cfl_path}:")
print(f"Content of {cfl_path}:")
with open(cfl_path) as f:
log.info(f.read())
print(f.read())
comp_proc = subprocess.run(
[pyzebra.SXTAL_REFGEN_PATH, cfl_path],
@ -267,8 +290,8 @@ def create():
stderr=subprocess.STDOUT,
text=True,
)
log.info(" ".join(comp_proc.args))
log.info(comp_proc.stdout)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
if i == 1: # all hkl files are identical, so keep only one
hkl_fname = base_fname + ".hkl"
@ -302,48 +325,78 @@ def create():
sel_file = new[0]
file_text = res_files[sel_file]
preview_lists.value = file_text
app_dlfiles.set_contents([file_text])
app_dlfiles.set_names([sel_file])
js_data.data.update(content=[file_text], fname=[sel_file])
created_lists = MultiSelect(title="Created lists:", width=200, height=150)
created_lists.on_change("value", created_lists_callback)
preview_lists = TextAreaInput(title="Preview selected list:", width=600, height=150)
def plot_list_callback():
nonlocal _update_slice
fname = created_lists.value
with io.StringIO(preview_lists.value) as fileobj:
fdata = pyzebra.parse_hkl(fileobj, fname)
_update_slice = _prepare_plotting(fname, [fdata])
_update_slice()
download_file = Button(label="Download file", button_type="success", width=200)
download_file.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
plot_list = Button(label="Plot selected list", button_type="primary", width=200, disabled=True)
plot_list = Button(label="Plot selected list", button_type="primary", width=200)
plot_list.on_click(plot_list_callback)
# Plot
upload_data_div = Div(text="Open hkl/mhkl data:")
upload_data = FileInput(accept=".hkl,.mhkl", multiple=True, width=200)
measured_data_div = Div(text="Measured data:")
measured_data = FileInput(accept=".ccl", multiple=True, width=200)
min_grid_x = -10
max_grid_x = 10
min_grid_y = -10
max_grid_y = 10
min_grid_y = -5
max_grid_y = 5
cmap = Dark2[8]
syms = ["circle", "inverted_triangle", "square", "diamond", "star", "triangle"]
def _prepare_plotting(filenames, filedata):
# Define resolution function
def _res_fun(stt, wave, res_mult):
expr = np.tan(stt / 2 * np.pi / 180)
fwhm = np.sqrt(0.4639 * expr ** 2 - 0.4452 * expr + 0.1506) * res_mult # res in deg
return fwhm
def plot_file_callback():
orth_dir = list(map(float, hkl_normal.value.split()))
cut_tol = hkl_delta.value
cut_or = hkl_cut.value
x_dir = list(map(float, hkl_in_plane_x.value.split()))
y_dir = list(map(float, hkl_in_plane_y.value.split()))
k = np.array(k_vectors.value.split()).astype(float).reshape(-1, 3)
tol_k = tol_k_ni.value
k = np.array(k_vectors.value.split()).astype(float).reshape(3, 3)
tol_k = 0.1
lattice = list(map(float, cryst_cell.value.strip().split()))
alpha = lattice[3] * np.pi / 180.0
beta = lattice[4] * np.pi / 180.0
gamma = lattice[5] * np.pi / 180.0
# Plotting options
grid_flag = 1
grid_minor_flag = 1
grid_div = 2 # Number of minor division lines per unit
# different symbols based on file number
file_flag = 0 in disting_opt_cb.active
# scale marker size according to intensity
intensity_flag = 1 in disting_opt_cb.active
# use color to mark different propagation vectors
prop_legend_flag = 2 in disting_opt_cb.active
# use resolution ellipsis
res_flag = disting_opt_rb.active
# multiplier for resolution function (in case of samples with large mosaicity)
res_mult = res_mult_ni.value
md_fnames = measured_data.filename
md_fdata = measured_data.value
# Load first data cile, read angles and define matrices to perform conversion to cartesian coordinates and back
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
_, ext = os.path.splitext(md_fnames[0])
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {md_fnames[0]}")
return
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
beta = file_data[0]["beta_cell"] * np.pi / 180.0
gamma = file_data[0]["gamma_cell"] * np.pi / 180.0
# reciprocal angle parameters
alpha_star = np.arccos(
(np.cos(beta) * np.cos(gamma) - np.cos(alpha)) / (np.sin(beta) * np.sin(gamma))
)
beta_star = np.arccos(
(np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
)
@ -351,7 +404,7 @@ def create():
(np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
)
# conversion matrix
# conversion matrix:
M = np.array(
[
[1, np.cos(gamma_star), np.cos(beta_star)],
@ -359,45 +412,12 @@ def create():
[0, 0, np.sin(beta_star) * np.sin(alpha)],
]
)
M_inv = np.linalg.inv(M)
# Get last lattice vector
y_dir = np.cross(x_dir, orth_dir) # Second axes of plotting plane
# Rescale such that smallest element of y-dir vector is 1
y_dir2 = y_dir[y_dir != 0]
min_val = np.min(np.abs(y_dir2))
y_dir = y_dir / min_val
# Possibly flip direction of ydir:
if y_dir[np.argmax(abs(y_dir))] < 0:
y_dir = -y_dir
# Display the resulting y_dir
hkl_in_plane_y.value = " ".join([f"{val:.1f}" for val in y_dir])
# Save length of lattice vectors
x_length = np.linalg.norm(x_dir)
y_length = np.linalg.norm(y_dir)
# Save str for labels
xlabel_str = " ".join(map(str, x_dir))
ylabel_str = " ".join(map(str, y_dir))
# Normalize lattice vectors
y_dir = y_dir / np.linalg.norm(y_dir)
x_dir = x_dir / np.linalg.norm(x_dir)
orth_dir = orth_dir / np.linalg.norm(orth_dir)
# Calculate cartesian equivalents of lattice vectors
x_c = np.matmul(M, x_dir)
y_c = np.matmul(M, y_dir)
o_c = np.matmul(M, orth_dir)
# Calulcate vertical direction in plotting plame
y_vert = np.cross(x_c, o_c) # verical direction in plotting plane
if y_vert[np.argmax(abs(y_vert))] < 0:
y_vert = -y_vert
y_vert = y_vert / np.linalg.norm(y_vert)
# Calculate in-plane y-direction
x_c = M @ x_dir
y_c = M @ y_dir
o_c = M @ orth_dir
# Normalize all directions
y_c = y_c / np.linalg.norm(y_c)
@ -409,255 +429,256 @@ def create():
intensity_vec = []
k_flag_vec = []
file_flag_vec = []
res_vec_x = []
res_vec_y = []
res_N = 10
for j in range(len(md_fnames)):
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
_, ext = os.path.splitext(md_fnames[j])
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {md_fnames[j]}")
return
# Loop throguh all data
for scan in file_data:
om = scan["omega"]
gammad = scan["twotheta"]
chi = scan["chi"]
phi = scan["phi"]
nud = 0 # 1d detector
ub = scan["ub"]
ddist = float(scan["detectorDistance"])
counts = scan["counts"]
mon = scan["monitor"]
# Determine wavelength from mcvl value (is wavelength stored anywhere???)
mcvl = scan["mcvl"]
if mcvl == 2.2:
wave = 1.178
elif mcvl == 7.0:
wave = 1.383
else:
wave = 2.3
# Calculate resolution in degrees
res = _res_fun(gammad, wave, res_mult)
# convert to resolution in hkl along scan line
ang2hkl_1d = pyzebra.ang2hkl_1d
res_x = []
res_y = []
for _om in np.linspace(om[0], om[-1], num=res_N):
expr1 = ang2hkl_1d(wave, ddist, gammad, _om + res / 2, chi, phi, nud, ub)
expr2 = ang2hkl_1d(wave, ddist, gammad, _om - res / 2, chi, phi, nud, ub)
hkl_temp = M @ (np.abs(expr1 - expr2) / 2)
res_x.append(hkl_temp[0])
res_y.append(hkl_temp[1])
# Get first and final hkl
hkl1 = ang2hkl_1d(wave, ddist, gammad, om[0], chi, phi, nud, ub)
hkl2 = ang2hkl_1d(wave, ddist, gammad, om[-1], chi, phi, nud, ub)
# Get hkl at best intensity
hkl_m = ang2hkl_1d(wave, ddist, gammad, om[np.argmax(counts)], chi, phi, nud, ub)
# Estimate intensity for marker size scaling
y1 = counts[0]
y2 = counts[-1]
x1 = om[0]
x2 = om[-1]
a = (y1 - y2) / (x1 - x2)
b = y1 - a * x1
intensity_exp = np.sum(counts - (a * om + b))
c = int(intensity_exp / mon * 10000)
for j, fdata in enumerate(filedata):
for ind in range(len(fdata["counts"])):
# Recognize k_flag_vec
hkl = np.array([fdata["h"][ind], fdata["k"][ind], fdata["l"][ind]])
reduced_hkl_m = np.minimum(1 - hkl % 1, hkl % 1)
for k_ind, _k in enumerate(k):
if all(np.abs(reduced_hkl_m - _k) < tol_k):
k_flag_vec.append(k_ind)
min_hkl_m = np.minimum(1 - hkl_m % 1, hkl_m % 1)
for j2, _k in enumerate(k):
if all(np.abs(min_hkl_m - _k) < tol_k):
k_flag_vec.append(j2)
break
else:
# not required
continue
k_flag_vec.append(len(k))
# Save data
hkl_coord.append(hkl)
intensity_vec.append(fdata["counts"][ind])
hkl_coord.append([hkl1, hkl2, hkl_m])
intensity_vec.append(c)
file_flag_vec.append(j)
res_vec_x.append(res_x)
res_vec_y.append(res_y)
x_spacing = np.dot(M @ x_dir, x_c) * x_length
y_spacing = np.dot(M @ y_dir, y_vert) * y_length
y_spacingx = np.dot(M @ y_dir, x_c) * y_length
plot.x_range.start = plot.x_range.reset_start = -2
plot.x_range.end = plot.x_range.reset_end = 5
plot.y_range.start = plot.y_range.reset_start = -4
plot.y_range.end = plot.y_range.reset_end = 3.5
# Plot coordinate system
arrow1.x_end = x_spacing
arrow1.y_end = 0
arrow2.x_end = y_spacingx
arrow2.y_end = y_spacing
# Add labels
kvect_source.data.update(
x=[x_spacing / 4, -0.1],
y=[x_spacing / 4 - 0.5, y_spacing / 2],
text=[xlabel_str, ylabel_str],
)
# Plot grid lines
xs, ys = [], []
xs_minor, ys_minor = [], []
for yy in np.arange(min_grid_y, max_grid_y, 1):
# Calculate end and start point
hkl1 = min_grid_x * x_dir + yy * y_dir
hkl2 = max_grid_x * x_dir + yy * y_dir
hkl1 = M @ hkl1
hkl2 = M @ hkl2
if grid_flag:
for yy in np.arange(min_grid_y, max_grid_y, 1):
hkl1 = M @ [0, yy, 0]
xs.append([min_grid_y, max_grid_y])
ys.append([hkl1[1], hkl1[1]])
# Project points onto axes
x1 = np.dot(x_c, hkl1) * x_length
y1 = np.dot(y_vert, hkl1) * y_length
x2 = np.dot(x_c, hkl2) * x_length
y2 = np.dot(y_vert, hkl2) * y_length
for xx in np.arange(min_grid_x, max_grid_x, 1):
hkl1 = M @ [xx, min_grid_x, 0]
hkl2 = M @ [xx, max_grid_x, 0]
xs.append([hkl1[0], hkl2[0]])
ys.append([hkl1[1], hkl2[1]])
xs.append([x1, x2])
ys.append([y1, y2])
if grid_minor_flag:
for yy in np.arange(min_grid_y, max_grid_y, 1 / grid_div):
hkl1 = M @ [0, yy, 0]
xs_minor.append([min_grid_y, max_grid_y])
ys_minor.append([hkl1[1], hkl1[1]])
for xx in np.arange(min_grid_x, max_grid_x, 1):
# Calculate end and start point
hkl1 = xx * x_dir + min_grid_y * y_dir
hkl2 = xx * x_dir + max_grid_y * y_dir
hkl1 = M @ hkl1
hkl2 = M @ hkl2
# Project points onto axes
x1 = np.dot(x_c, hkl1) * x_length
y1 = np.dot(y_vert, hkl1) * y_length
x2 = np.dot(x_c, hkl2) * x_length
y2 = np.dot(y_vert, hkl2) * y_length
xs.append([x1, x2])
ys.append([y1, y2])
for yy in np.arange(min_grid_y, max_grid_y, 0.5):
# Calculate end and start point
hkl1 = min_grid_x * x_dir + yy * y_dir
hkl2 = max_grid_x * x_dir + yy * y_dir
hkl1 = M @ hkl1
hkl2 = M @ hkl2
# Project points onto axes
x1 = np.dot(x_c, hkl1) * x_length
y1 = np.dot(y_vert, hkl1) * y_length
x2 = np.dot(x_c, hkl2) * x_length
y2 = np.dot(y_vert, hkl2) * y_length
xs_minor.append([x1, x2])
ys_minor.append([y1, y2])
for xx in np.arange(min_grid_x, max_grid_x, 0.5):
# Calculate end and start point
hkl1 = xx * x_dir + min_grid_y * y_dir
hkl2 = xx * x_dir + max_grid_y * y_dir
hkl1 = M @ hkl1
hkl2 = M @ hkl2
# Project points onto axes
x1 = np.dot(x_c, hkl1) * x_length
y1 = np.dot(y_vert, hkl1) * y_length
x2 = np.dot(x_c, hkl2) * x_length
y2 = np.dot(y_vert, hkl2) * y_length
xs_minor.append([x1, x2])
ys_minor.append([y1, y2])
for xx in np.arange(min_grid_x, max_grid_x, 1 / grid_div):
hkl1 = M @ [xx, min_grid_x, 0]
hkl2 = M @ [xx, max_grid_x, 0]
xs_minor.append([hkl1[0], hkl2[0]])
ys_minor.append([hkl1[1], hkl2[1]])
grid_source.data.update(xs=xs, ys=ys)
minor_grid_source.data.update(xs=xs_minor, ys=ys_minor)
def _update_slice():
cut_tol = hkl_delta.value
cut_or = hkl_cut.value
el_x, el_y, el_w, el_h, el_c = [], [], [], [], []
scan_xs, scan_ys, scan_x, scan_y = [], [], [], []
scan_m, scan_s, scan_c, scan_l = [], [], [], []
for j in range(len(hkl_coord)):
# Get middle hkl from list
hklm = M @ hkl_coord[j][2]
# different symbols based on file number
file_flag = 0 in disting_opt_cb.active
# scale marker size according to intensity
intensity_flag = 1 in disting_opt_cb.active
# use color to mark different propagation vectors
prop_legend_flag = 2 in disting_opt_cb.active
# Decide if point is in the cut
proj = np.dot(hklm, o_c)
if abs(proj - cut_or) >= cut_tol:
continue
scan_x, scan_y = [], []
scan_m, scan_s, scan_c, scan_l, scan_hkl = [], [], [], [], []
for j in range(len(hkl_coord)):
# Get middle hkl from list
hklm = M @ hkl_coord[j]
hkl1 = M @ hkl_coord[j][0]
hkl2 = M @ hkl_coord[j][1]
# Decide if point is in the cut
proj = np.dot(hklm, o_c)
if abs(proj - cut_or) >= cut_tol:
continue
if intensity_flag:
markersize = max(1, int(intensity_vec[j] / max(intensity_vec) * 20))
else:
markersize = 4
# Project onto axes
hklmx = np.dot(hklm, x_c)
hklmy = np.dot(hklm, y_vert)
if file_flag:
plot_symbol = syms[file_flag_vec[j]]
else:
plot_symbol = "circle"
if intensity_flag and max(intensity_vec) != 0:
markersize = max(6, int(intensity_vec[j] / max(intensity_vec) * 30))
else:
markersize = 6
if prop_legend_flag:
col_value = cmap[k_flag_vec[j]]
else:
col_value = "black"
if file_flag:
plot_symbol = syms[file_flag_vec[j]]
else:
plot_symbol = "circle"
if prop_legend_flag:
col_value = cmap[k_flag_vec[j]]
else:
col_value = "black"
if res_flag:
# Generate series of ellipses along scan line
el_x.extend(np.linspace(hkl1[0], hkl2[0], num=res_N))
el_y.extend(np.linspace(hkl1[1], hkl2[1], num=res_N))
el_w.extend(np.array(res_vec_x[j]) * 2)
el_h.extend(np.array(res_vec_y[j]) * 2)
el_c.extend([col_value] * res_N)
else:
# Plot scan line
scan_xs.append([hkl1[0], hkl2[0]])
scan_ys.append([hkl1[1], hkl2[1]])
# Plot middle point of scan
scan_x.append(hklmx)
scan_y.append(hklmy)
scan_x.append(hklm[0])
scan_y.append(hklm[1])
scan_m.append(plot_symbol)
scan_s.append(markersize)
# Color and legend label
scan_c.append(col_value)
scan_l.append(filenames[file_flag_vec[j]])
scan_hkl.append(hkl_coord[j])
scan_l.append(md_fnames[file_flag_vec[j]])
scatter_source.data.update(
x=scan_x, y=scan_y, m=scan_m, s=scan_s, c=scan_c, l=scan_l, hkl=scan_hkl
)
ellipse_source.data.update(x=el_x, y=el_y, w=el_w, h=el_h, c=el_c)
scan_source.data.update(
xs=scan_xs, ys=scan_ys, x=scan_x, y=scan_y, m=scan_m, s=scan_s, c=scan_c, l=scan_l,
)
# Legend items for different file entries (symbol)
legend_items = []
if file_flag:
labels, inds = np.unique(scatter_source.data["l"], return_index=True)
for label, ind in zip(labels, inds):
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
arrow1.visible = True
arrow1.x_end = x_c[0]
arrow1.y_end = x_c[1]
arrow2.visible = True
arrow2.x_end = y_c[0]
arrow2.y_end = y_c[1]
# Legend items for propagation vector (color)
if prop_legend_flag:
labels, inds = np.unique(scatter_source.data["c"], return_index=True)
for label, ind in zip(labels, inds):
label = f"k={k[cmap.index(label)]}"
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
kvect_source.data.update(
text_x=[x_c[0] / 2, y_c[0] / 2 - 0.1],
text_y=[x_c[1] - 0.1, y_c[1] / 2],
text=["h", "k"],
)
plot.legend.items = legend_items
# Legend items for different file entries (symbol)
legend_items = []
if not res_flag and file_flag:
labels, inds = np.unique(scan_source.data["l"], return_index=True)
for label, ind in zip(labels, inds):
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
return _update_slice
# Legend items for propagation vector (color)
if prop_legend_flag:
if res_flag:
source, render = ellipse_source, ellipse
else:
source, render = scan_source, mline
def plot_file_callback():
nonlocal _update_slice
fnames = []
fdata = []
for j, fname in enumerate(upload_data.filename):
with io.StringIO(base64.b64decode(upload_data.value[j]).decode()) as file:
_, ext = os.path.splitext(fname)
try:
file_data = pyzebra.parse_hkl(file, ext)
except Exception as e:
log.exception(e)
return
labels, inds = np.unique(source.data["c"], return_index=True)
for label, ind in zip(labels, inds):
label = f"k={k[cmap.index(label)]}"
legend_items.append(LegendItem(label=label, renderers=[render], index=ind))
fnames.append(fname)
fdata.append(file_data)
_update_slice = _prepare_plotting(fnames, fdata)
_update_slice()
plot.legend.items = legend_items
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
plot_file.on_click(plot_file_callback)
plot = figure(height=550, width=550 + 32, tools="pan,wheel_zoom,reset")
plot = Plot(x_range=Range1d(), y_range=Range1d(), plot_height=450, plot_width=600)
plot.add_tools(PanTool(), WheelZoomTool(), BoxZoomTool(), ResetTool())
plot.toolbar.logo = None
plot.xaxis.visible = False
plot.xgrid.visible = False
plot.yaxis.visible = False
plot.ygrid.visible = False
plot.add_layout(LinearAxis(), place="left")
plot.add_layout(LinearAxis(), place="below")
arrow1 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10))
arrow1 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10), visible=False)
plot.add_layout(arrow1)
arrow2 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10))
arrow2 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10), visible=False)
plot.add_layout(arrow2)
kvect_source = ColumnDataSource(dict(x=[], y=[], text=[]))
plot.text(source=kvect_source)
kvect_source = ColumnDataSource(dict(text_x=[], text_y=[], text=[]))
plot.add_glyph(kvect_source, Text(x="text_x", y="text_y", text="text"))
grid_source = ColumnDataSource(dict(xs=[], ys=[]))
plot.multi_line(source=grid_source, line_color="gray")
minor_grid_source = ColumnDataSource(dict(xs=[], ys=[]))
plot.multi_line(source=minor_grid_source, line_color="gray", line_dash="dotted")
scatter_source = ColumnDataSource(dict(x=[], y=[], m=[], s=[], c=[], l=[], hkl=[]))
scatter = plot.scatter(
source=scatter_source, marker="m", size="s", fill_color="c", line_color="c"
plot.add_glyph(grid_source, MultiLine(xs="xs", ys="ys", line_color="gray"))
plot.add_glyph(
minor_grid_source, MultiLine(xs="xs", ys="ys", line_color="gray", line_dash="dotted")
)
plot.x_range.renderers = [scatter]
plot.y_range.renderers = [scatter]
ellipse_source = ColumnDataSource(dict(x=[], y=[], w=[], h=[], c=[]))
ellipse = plot.add_glyph(
ellipse_source, Ellipse(x="x", y="y", width="w", height="h", fill_color="c", line_color="c")
)
scan_source = ColumnDataSource(dict(xs=[], ys=[], x=[], y=[], m=[], s=[], c=[], l=[]))
mline = plot.add_glyph(scan_source, MultiLine(xs="xs", ys="ys", line_color="c"))
scatter = plot.add_glyph(
scan_source, Scatter(x="x", y="y", marker="m", size="s", fill_color="c", line_color="c")
)
plot.add_layout(Legend(items=[], location="top_left", click_policy="hide"))
plot.add_tools(HoverTool(renderers=[scatter], tooltips=[("hkl", "@hkl")]))
hkl_div = Div(text="HKL:", margin=(5, 5, 0, 5))
hkl_normal = TextInput(title="normal", value="0 0 1", width=70)
def hkl_cut_callback(_attr, _old, _new):
if _update_slice is not None:
_update_slice()
hkl_cut = Spinner(title="cut", value=0, step=0.1, width=70)
hkl_cut.on_change("value_throttled", hkl_cut_callback)
hkl_delta = NumericInput(title="delta", value=0.1, mode="float", width=70)
hkl_in_plane_x = TextInput(title="in-plane X", value="1 0 0", width=70)
hkl_in_plane_y = TextInput(title="in-plane Y", value="", width=100, disabled=True)
hkl_in_plane_y = TextInput(title="in-plane Y", value="0 1 0", width=70)
disting_opt_div = Div(text="Distinguish options:", margin=(5, 5, 0, 5))
disting_opt_cb = CheckboxGroup(
@ -665,11 +686,14 @@ def create():
active=[0, 1, 2],
width=200,
)
disting_opt_rb = RadioGroup(
labels=["scan direction", "resolution ellipsoid"], active=0, width=200
)
k_vectors = TextAreaInput(
title="k vectors:", value="0.0 0.0 0.0\n0.5 0.0 0.0\n0.5 0.5 0.0", width=150
title="k vectors:", value="0.0 0.0 0.0\n0.5 0.0 0.0\n0.5 0.5 0.0", width=150,
)
tol_k_ni = NumericInput(title="k tolerance:", value=0.01, mode="float", width=100)
res_mult_ni = NumericInput(title="Resolution mult:", value=10, mode="int", width=100)
fileinput_layout = row(open_cfl_div, open_cfl, open_cif_div, open_cif, open_geom_div, open_geom)
@ -677,7 +701,7 @@ def create():
wavelen_layout = column(wavelen_div, row(wavelen_select, wavelen_input))
anglim_layout = column(anglim_div, row(sttgamma_ti, omega_ti, chinu_ti, phi_ti))
cryst_layout = column(cryst_div, row(cryst_space_group, cryst_cell))
ubmat_layout = row(column(Spacer(height=19), ub_matrix_calc), ub_matrix)
ubmat_layout = row(column(Spacer(height=18), ub_matrix_calc), ub_matrix)
ranges_layout = column(ranges_div, row(ranges_hkl, ranges_srang))
magstruct_layout = column(magstruct_div, row(magstruct_lattice, magstruct_kvec))
sorting_layout = row(
@ -698,25 +722,22 @@ def create():
cryst_layout,
ubmat_layout,
row(ranges_layout, Spacer(width=50), magstruct_layout),
row(sorting_layout, Spacer(width=30), column(Spacer(height=19), go_button)),
row(sorting_layout, Spacer(width=30), column(Spacer(height=18), go_button)),
row(created_lists, preview_lists),
row(app_dlfiles.button, plot_list),
row(download_file, plot_list),
)
hkl_layout = column(
hkl_div,
row(hkl_normal, hkl_cut, hkl_delta, Spacer(width=10), hkl_in_plane_x, hkl_in_plane_y),
)
disting_layout = column(disting_opt_div, row(disting_opt_cb, disting_opt_rb))
column2_layout = column(
row(upload_data_div, upload_data, plot_file),
row(
plot,
column(
hkl_div,
row(hkl_normal, hkl_cut, hkl_delta),
row(hkl_in_plane_x, hkl_in_plane_y),
k_vectors,
tol_k_ni,
disting_opt_div,
disting_opt_cb,
),
),
row(measured_data_div, measured_data, plot_file),
plot,
row(hkl_layout, k_vectors),
row(disting_layout, res_mult_ni),
)
tab_layout = row(column1_layout, column2_layout)

View File

@ -19,12 +19,11 @@ from bokeh.models import (
)
import pyzebra
from pyzebra import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
from pyzebra.anatric import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
def create():
doc = curdoc()
log = doc.logger
config = pyzebra.AnatricConfig()
def _load_config_file(file):
@ -170,7 +169,7 @@ def create():
config.dataFactory_implementation = new
dataFactory_implementation_select = Select(
title="DataFactory implement.:", options=DATA_FACTORY_IMPLEMENTATION, width=145
title="DataFactory implement.:", options=DATA_FACTORY_IMPLEMENTATION, width=145,
)
dataFactory_implementation_select.on_change("value", dataFactory_implementation_select_callback)
@ -201,7 +200,7 @@ def create():
config.reflectionPrinter_format = new
reflectionPrinter_format_select = Select(
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=145
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=145,
)
reflectionPrinter_format_select.on_change("value", reflectionPrinter_format_select_callback)
@ -348,11 +347,7 @@ def create():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/config.xml"
config.save_as(temp_file)
try:
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir, log=log)
except Exception as e:
log.exception(e)
return
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir)
with open(os.path.join(temp_dir, config.logfile)) as f_log:
output_log.value = f_log.read()

View File

@ -6,37 +6,48 @@ import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import (
BasicTicker,
BoxZoomTool,
Button,
CellEditor,
CheckboxGroup,
ColumnDataSource,
DataRange1d,
DataTable,
Div,
FileInput,
LinearColorMapper,
Grid,
MultiSelect,
NumberEditor,
NumberFormatter,
Image,
LinearAxis,
LinearColorMapper,
Panel,
PanTool,
Plot,
Range1d,
ResetTool,
Scatter,
Select,
Spinner,
TableColumn,
Tabs,
Title,
WheelZoomTool,
)
from bokeh.plotting import figure
from bokeh.palettes import Cividis256, Greys256, Plasma256 # pylint: disable=E0611
import pyzebra
IMAGE_W = 256
IMAGE_H = 128
IMAGE_PLOT_W = int(IMAGE_W * 2.4) + 52
IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
IMAGE_PLOT_W = int(IMAGE_W * 2) + 52
IMAGE_PLOT_H = int(IMAGE_H * 2) + 27
def create():
doc = curdoc()
log = doc.logger
dataset = []
cami_meta = {}
@ -134,8 +145,8 @@ def create():
for f_name in file_select.value:
try:
new_data.append(pyzebra.read_detector_data(f_name))
except KeyError as e:
log.exception(e)
except KeyError:
print("Could not read data from the file.")
return
dataset.extend(new_data)
@ -190,7 +201,7 @@ def create():
else:
metadata_table_source.data.update(temp=[None])
_update_proj_plots()
update_overview_plot()
def scan_table_source_callback(_attr, _old, _new):
pass
@ -247,28 +258,33 @@ def create():
)
param_select.on_change("value", param_select_callback)
def _update_proj_plots():
def update_overview_plot():
scan = _get_selected_scan()
counts = scan["counts"]
n_im, n_y, n_x = counts.shape
im_proj_x = np.mean(counts, axis=1)
im_proj_y = np.mean(counts, axis=2)
overview_x = np.mean(counts, axis=1)
overview_y = np.mean(counts, axis=2)
# normalize for simpler colormapping
im_proj_max_val = max(np.max(im_proj_x), np.max(im_proj_y))
im_proj_x = 1000 * im_proj_x / im_proj_max_val
im_proj_y = 1000 * im_proj_y / im_proj_max_val
overview_max_val = max(np.max(overview_x), np.max(overview_y))
overview_x = 1000 * overview_x / overview_max_val
overview_y = 1000 * overview_y / overview_max_val
proj_x_image_source.data.update(image=[im_proj_x], dw=[n_x], dh=[n_im])
proj_y_image_source.data.update(image=[im_proj_y], dw=[n_y], dh=[n_im])
overview_plot_x_image_source.data.update(image=[overview_x], dw=[n_x], dh=[n_im])
overview_plot_y_image_source.data.update(image=[overview_y], dw=[n_y], dh=[n_im])
if proj_auto_checkbox.active:
im_min = min(np.min(im_proj_x), np.min(im_proj_y))
im_max = max(np.max(im_proj_x), np.max(im_proj_y))
im_min = min(np.min(overview_x), np.min(overview_y))
im_max = max(np.max(overview_x), np.max(overview_y))
proj_display_min_spinner.value = im_min
proj_display_max_spinner.value = im_max
overview_plot_x_image_glyph.color_mapper.low = im_min
overview_plot_y_image_glyph.color_mapper.low = im_min
overview_plot_x_image_glyph.color_mapper.high = im_max
overview_plot_y_image_glyph.color_mapper.high = im_max
frame_range.start = 0
frame_range.end = n_im
frame_range.reset_start = 0
@ -276,7 +292,7 @@ def create():
frame_range.bounds = (0, n_im)
scan_motor = scan["scan_motor"]
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}"
overview_plot_y.axis[1].axis_label = f"Scanning motor, {scan_motor}"
var = scan[scan_motor]
var_start = var[0]
@ -292,95 +308,148 @@ def create():
# shared frame ranges
frame_range = Range1d(0, 1, bounds=(0, 1))
scanning_motor_range = Range1d(0, 1, bounds=(0, 1))
color_mapper_proj = LinearColorMapper()
det_x_range = Range1d(0, IMAGE_W, bounds=(0, IMAGE_W))
proj_x_plot = figure(
title="Projections on X-axis",
x_axis_label="Coordinate X, pix",
y_axis_label="Frame",
overview_plot_x = Plot(
title=Title(text="Projections on X-axis"),
x_range=det_x_range,
y_range=frame_range,
extra_y_ranges={"scanning_motor": scanning_motor_range},
height=540,
width=IMAGE_PLOT_W - 3,
tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom",
plot_height=400,
plot_width=IMAGE_PLOT_W - 3,
)
proj_x_plot.yaxis.major_label_orientation = "vertical"
proj_x_plot.toolbar.tools[2].maintain_focus = False
# ---- tools
wheelzoomtool = WheelZoomTool(maintain_focus=False)
overview_plot_x.toolbar.logo = None
overview_plot_x.add_tools(
PanTool(), BoxZoomTool(), wheelzoomtool, ResetTool(),
)
overview_plot_x.toolbar.active_scroll = wheelzoomtool
proj_x_image_source = ColumnDataSource(
# ---- axes
overview_plot_x.add_layout(LinearAxis(axis_label="Coordinate X, pix"), place="below")
overview_plot_x.add_layout(
LinearAxis(axis_label="Frame", major_label_orientation="vertical"), place="left"
)
# ---- grid lines
overview_plot_x.add_layout(Grid(dimension=0, ticker=BasicTicker()))
overview_plot_x.add_layout(Grid(dimension=1, ticker=BasicTicker()))
# ---- rgba image glyph
overview_plot_x_image_source = ColumnDataSource(
dict(image=[np.zeros((1, 1), dtype="float32")], x=[0], y=[0], dw=[IMAGE_W], dh=[1])
)
proj_x_plot.image(source=proj_x_image_source, color_mapper=color_mapper_proj)
overview_plot_x_image_glyph = Image(image="image", x="x", y="y", dw="dw", dh="dh")
overview_plot_x.add_glyph(
overview_plot_x_image_source, overview_plot_x_image_glyph, name="image_glyph"
)
det_y_range = Range1d(0, IMAGE_H, bounds=(0, IMAGE_H))
proj_y_plot = figure(
title="Projections on Y-axis",
x_axis_label="Coordinate Y, pix",
y_axis_label="Scanning motor",
y_axis_location="right",
overview_plot_y = Plot(
title=Title(text="Projections on Y-axis"),
x_range=det_y_range,
y_range=frame_range,
extra_y_ranges={"scanning_motor": scanning_motor_range},
height=540,
width=IMAGE_PLOT_H + 22,
tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom",
plot_height=400,
plot_width=IMAGE_PLOT_H + 22,
)
proj_y_plot.yaxis.y_range_name = "scanning_motor"
proj_y_plot.yaxis.major_label_orientation = "vertical"
proj_y_plot.toolbar.tools[2].maintain_focus = False
# ---- tools
wheelzoomtool = WheelZoomTool(maintain_focus=False)
overview_plot_y.toolbar.logo = None
overview_plot_y.add_tools(
PanTool(), BoxZoomTool(), wheelzoomtool, ResetTool(),
)
overview_plot_y.toolbar.active_scroll = wheelzoomtool
proj_y_image_source = ColumnDataSource(
# ---- axes
overview_plot_y.add_layout(LinearAxis(axis_label="Coordinate Y, pix"), place="below")
overview_plot_y.add_layout(
LinearAxis(
y_range_name="scanning_motor",
axis_label="Scanning motor",
major_label_orientation="vertical",
),
place="right",
)
# ---- grid lines
overview_plot_y.add_layout(Grid(dimension=0, ticker=BasicTicker()))
overview_plot_y.add_layout(Grid(dimension=1, ticker=BasicTicker()))
# ---- rgba image glyph
overview_plot_y_image_source = ColumnDataSource(
dict(image=[np.zeros((1, 1), dtype="float32")], x=[0], y=[0], dw=[IMAGE_H], dh=[1])
)
proj_y_plot.image(source=proj_y_image_source, color_mapper=color_mapper_proj)
def colormap_select_callback(_attr, _old, new):
color_mapper_proj.palette = new
colormap_select = Select(
title="Colormap:",
options=[("Greys256", "greys"), ("Plasma256", "plasma"), ("Cividis256", "cividis")],
width=210,
overview_plot_y_image_glyph = Image(image="image", x="x", y="y", dw="dw", dh="dh")
overview_plot_y.add_glyph(
overview_plot_y_image_source, overview_plot_y_image_glyph, name="image_glyph"
)
colormap_select.on_change("value", colormap_select_callback)
colormap_select.value = "Plasma256"
def proj_auto_checkbox_callback(_attr, _old, new):
if 0 in new:
cmap_dict = {
"gray": Greys256,
"gray_reversed": Greys256[::-1],
"plasma": Plasma256,
"cividis": Cividis256,
}
def colormap_callback(_attr, _old, new):
overview_plot_x_image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
overview_plot_y_image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
colormap = Select(title="Colormap:", options=list(cmap_dict.keys()), width=210)
colormap.on_change("value", colormap_callback)
colormap.value = "plasma"
PROJ_STEP = 1
def proj_auto_checkbox_callback(state):
if state:
proj_display_min_spinner.disabled = True
proj_display_max_spinner.disabled = True
else:
proj_display_min_spinner.disabled = False
proj_display_max_spinner.disabled = False
_update_proj_plots()
update_overview_plot()
proj_auto_checkbox = CheckboxGroup(
labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
)
proj_auto_checkbox.on_change("active", proj_auto_checkbox_callback)
proj_auto_checkbox.on_click(proj_auto_checkbox_callback)
def proj_display_max_spinner_callback(_attr, _old, new):
color_mapper_proj.high = new
def proj_display_max_spinner_callback(_attr, _old_value, new_value):
proj_display_min_spinner.high = new_value - PROJ_STEP
overview_plot_x_image_glyph.color_mapper.high = new_value
overview_plot_y_image_glyph.color_mapper.high = new_value
proj_display_max_spinner = Spinner(
value=1, disabled=bool(proj_auto_checkbox.active), mode="int", width=100
low=0 + PROJ_STEP,
value=1,
step=PROJ_STEP,
disabled=bool(proj_auto_checkbox.active),
width=100,
height=31,
)
proj_display_max_spinner.on_change("value", proj_display_max_spinner_callback)
def proj_display_min_spinner_callback(_attr, _old, new):
color_mapper_proj.low = new
def proj_display_min_spinner_callback(_attr, _old_value, new_value):
proj_display_max_spinner.low = new_value + PROJ_STEP
overview_plot_x_image_glyph.color_mapper.low = new_value
overview_plot_y_image_glyph.color_mapper.low = new_value
proj_display_min_spinner = Spinner(
value=0, disabled=bool(proj_auto_checkbox.active), mode="int", width=100
low=0,
high=1 - PROJ_STEP,
value=0,
step=PROJ_STEP,
disabled=bool(proj_auto_checkbox.active),
width=100,
height=31,
)
proj_display_min_spinner.on_change("value", proj_display_min_spinner_callback)
@ -406,20 +475,21 @@ def create():
if "fit" in s and fit_param:
x.append(p)
y.append(s["fit"][fit_param])
param_scatter_source.data.update(x=x, y=y)
param_plot_scatter_source.data.update(x=x, y=y)
# Parameter plot
param_plot = figure(
x_axis_label="Parameter",
y_axis_label="Fit parameter",
height=400,
width=700,
tools="pan,wheel_zoom,reset",
)
param_plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
param_scatter_source = ColumnDataSource(dict(x=[], y=[]))
param_plot.circle(source=param_scatter_source)
param_plot.add_layout(LinearAxis(axis_label="Fit parameter"), place="left")
param_plot.add_layout(LinearAxis(axis_label="Parameter"), place="below")
param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[]))
param_plot.add_glyph(param_plot_scatter_source, Scatter(x="x", y="y"))
param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
param_plot.toolbar.logo = None
def fit_param_select_callback(_attr, _old, _new):
@ -481,15 +551,18 @@ def create():
proc_button.on_click(proc_button_callback)
layout_controls = row(
colormap_select,
colormap,
column(proj_auto_checkbox, row(proj_display_min_spinner, proj_display_max_spinner)),
proc_button,
proc_all_button,
)
layout_proj = column(
layout_overview = column(
gridplot(
[[proj_x_plot, proj_y_plot]], toolbar_options={"logo": None}, toolbar_location="right"
[[overview_plot_x, overview_plot_y]],
toolbar_options=dict(logo=None),
merge_tools=True,
toolbar_location="left",
),
layout_controls,
)
@ -497,7 +570,7 @@ def create():
# Plot tabs
plots = Tabs(
tabs=[
Panel(child=layout_proj, title="single scan"),
Panel(child=layout_overview, title="single scan"),
Panel(child=column(param_plot, row(fit_param_select)), title="parameter plot"),
]
)

View File

@ -7,43 +7,52 @@ from bokeh.events import MouseEnter
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import (
BasicTicker,
BoxEditTool,
BoxZoomTool,
Button,
CellEditor,
CheckboxGroup,
ColumnDataSource,
DataRange1d,
DataTable,
Div,
FileInput,
HoverTool,
LinearAxis,
LinearColorMapper,
LogColorMapper,
Grid,
MultiSelect,
NumberFormatter,
HoverTool,
Image,
Line,
LinearAxis,
LinearColorMapper,
Panel,
RadioGroup,
PanTool,
Plot,
Range1d,
Rect,
ResetTool,
Select,
Slider,
Spacer,
Spinner,
TableColumn,
Tabs,
Title,
WheelZoomTool,
)
from bokeh.plotting import figure
from bokeh.palettes import Cividis256, Greys256, Plasma256 # pylint: disable=E0611
import pyzebra
IMAGE_W = 256
IMAGE_H = 128
IMAGE_PLOT_W = int(IMAGE_W * 2.4) + 52
IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
IMAGE_PLOT_W = int(IMAGE_W * 2) + 52
IMAGE_PLOT_H = int(IMAGE_H * 2) + 27
def create():
doc = curdoc()
log = doc.logger
dataset = []
cami_meta = {}
@ -103,8 +112,8 @@ def create():
nonlocal dataset
try:
scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None)
except Exception as e:
log.exception(e)
except KeyError:
print("Could not read data from the file.")
return
dataset = [scan]
@ -138,8 +147,8 @@ def create():
f_name = os.path.basename(f_path)
try:
file_data = [pyzebra.read_detector_data(f_path, cm)]
except Exception as e:
log.exception(e)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
@ -147,7 +156,7 @@ def create():
if not new_data: # first file
new_data = file_data
else:
pyzebra.merge_datasets(new_data, file_data, log=log)
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
@ -162,12 +171,12 @@ def create():
f_name = os.path.basename(f_path)
try:
file_data = [pyzebra.read_detector_data(f_path, None)]
except Exception as e:
log.exception(e)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data, log=log)
pyzebra.merge_datasets(dataset, file_data)
if file_data:
_init_datatable()
@ -237,7 +246,7 @@ def create():
metadata_table_source.data.update(geom=["bisecting"])
_update_image()
_update_proj_plots()
_update_overview_plot()
def scan_table_source_callback(_attr, _old, new):
# unfortunately, we don't know if the change comes from data update or user input
@ -246,7 +255,7 @@ def create():
scan["export"] = export
scan_table_source = ColumnDataSource(
dict(scan=[], fit=[], export=[], twotheta=[], gamma=[], omega=[], chi=[], phi=[], nu=[])
dict(scan=[], fit=[], export=[], twotheta=[], gamma=[], omega=[], chi=[], phi=[], nu=[],)
)
scan_table_source.on_change("data", scan_table_source_callback)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
@ -277,11 +286,11 @@ def create():
export = [scan["export"] for scan in dataset]
scan_table_source.data.update(export=export)
def monitor_spinner_callback(_attr, _old, new):
def monitor_spinner_callback(_attr, old, new):
if dataset:
pyzebra.normalize_dataset(dataset, new)
_update_image()
_update_proj_plots()
_update_overview_plot()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
@ -293,13 +302,13 @@ def create():
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
log.warning("Selected scans for merging are identical")
print("WARNING: Selected scans for merging are identical")
return
pyzebra.merge_h5_scans(scan_into, scan_from, log=log)
pyzebra.merge_h5_scans(scan_into, scan_from)
_update_table()
_update_image()
_update_proj_plots()
_update_overview_plot()
merge_button = Button(label="Merge into current", width=145)
merge_button.on_click(merge_button_callback)
@ -308,7 +317,7 @@ def create():
pyzebra.restore_scan(_get_selected_scan())
_update_table()
_update_image()
_update_proj_plots()
_update_overview_plot()
restore_button = Button(label="Restore scan", width=145)
restore_button.on_click(restore_button_callback)
@ -326,7 +335,9 @@ def create():
x=np.mean(current_image, axis=1), y=np.arange(0, IMAGE_H) + 0.5
)
image_source.data.update(h=[np.zeros((1, 1))], k=[np.zeros((1, 1))], l=[np.zeros((1, 1))])
image_source.data.update(
h=[np.zeros((1, 1))], k=[np.zeros((1, 1))], l=[np.zeros((1, 1))],
)
image_source.data.update(image=[current_image])
if main_auto_checkbox.active:
@ -336,6 +347,9 @@ def create():
display_min_spinner.value = im_min
display_max_spinner.value = im_max
image_glyph.color_mapper.low = im_min
image_glyph.color_mapper.high = im_max
if "mf" in scan:
metadata_table_source.data.update(mf=[scan["mf"][index]])
else:
@ -357,8 +371,8 @@ def create():
gamma_c = gamma[det_c_y, det_c_x]
nu_c = nu[det_c_y, det_c_x]
omega_c = omega[det_c_y, det_c_x]
chi_c = scan["chi"][index]
phi_c = scan["phi"][index]
chi_c = None
phi_c = None
else: # zebra_mode == "bi"
wave = scan["wave"]
@ -375,31 +389,36 @@ def create():
)
detcenter_table_source.data.update(
gamma=[gamma_c], nu=[nu_c], omega=[omega_c], chi=[chi_c], phi=[phi_c]
gamma=[gamma_c], nu=[nu_c], omega=[omega_c], chi=[chi_c], phi=[phi_c],
)
def _update_proj_plots():
def _update_overview_plot():
scan = _get_selected_scan()
counts = scan["counts"]
n_im, n_y, n_x = counts.shape
im_proj_x = np.mean(counts, axis=1)
im_proj_y = np.mean(counts, axis=2)
overview_x = np.mean(counts, axis=1)
overview_y = np.mean(counts, axis=2)
# normalize for simpler colormapping
im_proj_max_val = max(np.max(im_proj_x), np.max(im_proj_y))
im_proj_x = 1000 * im_proj_x / im_proj_max_val
im_proj_y = 1000 * im_proj_y / im_proj_max_val
overview_max_val = max(np.max(overview_x), np.max(overview_y))
overview_x = 1000 * overview_x / overview_max_val
overview_y = 1000 * overview_y / overview_max_val
proj_x_image_source.data.update(image=[im_proj_x], dw=[n_x], dh=[n_im])
proj_y_image_source.data.update(image=[im_proj_y], dw=[n_y], dh=[n_im])
overview_plot_x_image_source.data.update(image=[overview_x], dw=[n_x], dh=[n_im])
overview_plot_y_image_source.data.update(image=[overview_y], dw=[n_y], dh=[n_im])
if proj_auto_checkbox.active:
im_min = min(np.min(im_proj_x), np.min(im_proj_y))
im_max = max(np.max(im_proj_x), np.max(im_proj_y))
im_min = min(np.min(overview_x), np.min(overview_y))
im_max = max(np.max(overview_x), np.max(overview_y))
proj_display_min_spinner.value = im_min
proj_display_max_spinner.value = im_max
overview_plot_x_image_glyph.color_mapper.low = im_min
overview_plot_y_image_glyph.color_mapper.low = im_min
overview_plot_x_image_glyph.color_mapper.high = im_max
overview_plot_y_image_glyph.color_mapper.high = im_max
frame_range.start = 0
frame_range.end = n_im
frame_range.reset_start = 0
@ -407,7 +426,7 @@ def create():
frame_range.bounds = (0, n_im)
scan_motor = scan["scan_motor"]
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}"
overview_plot_y.axis[1].axis_label = f"Scanning motor, {scan_motor}"
var = scan[scan_motor]
var_start = var[0]
@ -453,23 +472,26 @@ def create():
index_slider.js_link("value_throttled", index_spinner, "value")
index_spinner.js_link("value", index_slider, "value")
# image viewer figure
plot = figure(
plot = Plot(
x_range=Range1d(0, IMAGE_W, bounds=(0, IMAGE_W)),
y_range=Range1d(0, IMAGE_H, bounds=(0, IMAGE_H)),
x_axis_location="above",
y_axis_location="right",
height=IMAGE_PLOT_H,
width=IMAGE_PLOT_W,
plot_height=IMAGE_PLOT_H,
plot_width=IMAGE_PLOT_W,
toolbar_location="left",
tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom",
)
plot.yaxis.major_label_orientation = "vertical"
plot.toolbar.tools[2].maintain_focus = False
# ---- tools
plot.toolbar.logo = None
# ---- axes
plot.add_layout(LinearAxis(), place="above")
plot.add_layout(LinearAxis(major_label_orientation="vertical"), place="right")
# ---- grid lines
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
# ---- rgba image glyph
image_source = ColumnDataSource(
dict(
image=[np.zeros((IMAGE_H, IMAGE_W), dtype="float32")],
@ -486,15 +508,22 @@ def create():
)
)
lin_color_mapper = LinearColorMapper(low=0, high=1)
log_color_mapper = LogColorMapper(low=0, high=1)
plot_image = plot.image(source=image_source, color_mapper=lin_color_mapper)
plot.image(source=image_source, image="h", global_alpha=0)
plot.image(source=image_source, image="k", global_alpha=0)
plot.image(source=image_source, image="l", global_alpha=0)
plot.image(source=image_source, image="gamma", global_alpha=0)
plot.image(source=image_source, image="nu", global_alpha=0)
plot.image(source=image_source, image="omega", global_alpha=0)
h_glyph = Image(image="h", x="x", y="y", dw="dw", dh="dh", global_alpha=0)
k_glyph = Image(image="k", x="x", y="y", dw="dw", dh="dh", global_alpha=0)
l_glyph = Image(image="l", x="x", y="y", dw="dw", dh="dh", global_alpha=0)
gamma_glyph = Image(image="gamma", x="x", y="y", dw="dw", dh="dh", global_alpha=0)
nu_glyph = Image(image="nu", x="x", y="y", dw="dw", dh="dh", global_alpha=0)
omega_glyph = Image(image="omega", x="x", y="y", dw="dw", dh="dh", global_alpha=0)
plot.add_glyph(image_source, h_glyph)
plot.add_glyph(image_source, k_glyph)
plot.add_glyph(image_source, l_glyph)
plot.add_glyph(image_source, gamma_glyph)
plot.add_glyph(image_source, nu_glyph)
plot.add_glyph(image_source, omega_glyph)
image_glyph = Image(image="image", x="x", y="y", dw="dw", dh="dh")
plot.add_glyph(image_source, image_glyph, name="image_glyph")
# calculate hkl-indices of first mouse entry
def mouse_enter_callback(_event):
@ -506,37 +535,42 @@ def create():
plot.on_event(MouseEnter, mouse_enter_callback)
# Single frame projection plots
proj_v = figure(
# ---- projections
proj_v = Plot(
x_range=plot.x_range,
y_axis_location="right",
height=150,
width=IMAGE_PLOT_W,
tools="",
y_range=DataRange1d(),
plot_height=150,
plot_width=IMAGE_PLOT_W,
toolbar_location=None,
)
proj_v.yaxis.major_label_orientation = "vertical"
proj_v.xaxis.major_label_text_font_size = "0pt"
proj_v.add_layout(LinearAxis(major_label_orientation="vertical"), place="right")
proj_v.add_layout(LinearAxis(major_label_text_font_size="0pt"), place="below")
proj_v.add_layout(Grid(dimension=0, ticker=BasicTicker()))
proj_v.add_layout(Grid(dimension=1, ticker=BasicTicker()))
proj_v_line_source = ColumnDataSource(dict(x=[], y=[]))
proj_v.line(source=proj_v_line_source, line_color="steelblue")
proj_v.add_glyph(proj_v_line_source, Line(x="x", y="y", line_color="steelblue"))
proj_h = figure(
x_axis_location="above",
proj_h = Plot(
x_range=DataRange1d(),
y_range=plot.y_range,
height=IMAGE_PLOT_H,
width=150,
tools="",
plot_height=IMAGE_PLOT_H,
plot_width=150,
toolbar_location=None,
)
proj_h.yaxis.major_label_text_font_size = "0pt"
proj_h.add_layout(LinearAxis(), place="above")
proj_h.add_layout(LinearAxis(major_label_text_font_size="0pt"), place="left")
proj_h.add_layout(Grid(dimension=0, ticker=BasicTicker()))
proj_h.add_layout(Grid(dimension=1, ticker=BasicTicker()))
proj_h_line_source = ColumnDataSource(dict(x=[], y=[]))
proj_h.line(source=proj_h_line_source, line_color="steelblue")
proj_h.add_glyph(proj_h_line_source, Line(x="x", y="y", line_color="steelblue"))
# extra tools
# add tools
hovertool = HoverTool(
tooltips=[
("intensity", "@image"),
@ -550,7 +584,10 @@ def create():
)
box_edit_source = ColumnDataSource(dict(x=[], y=[], width=[], height=[]))
box_edit_renderer = plot.rect(source=box_edit_source, fill_alpha=0, line_color="red")
box_edit_glyph = Rect(
x="x", y="y", width="width", height="height", fill_alpha=0, line_color="red"
)
box_edit_renderer = plot.add_glyph(box_edit_source, box_edit_glyph)
boxedittool = BoxEditTool(renderers=[box_edit_renderer], num_objects=1)
def box_edit_callback(_attr, _old, new):
@ -571,115 +608,147 @@ def create():
box_edit_source.on_change("data", box_edit_callback)
plot.add_tools(hovertool, boxedittool)
wheelzoomtool = WheelZoomTool(maintain_focus=False)
plot.add_tools(
PanTool(), BoxZoomTool(), wheelzoomtool, ResetTool(), hovertool, boxedittool,
)
plot.toolbar.active_scroll = wheelzoomtool
# Overview projection plots
# shared frame ranges
frame_range = Range1d(0, 1, bounds=(0, 1))
scanning_motor_range = Range1d(0, 1, bounds=(0, 1))
lin_color_mapper_proj = LinearColorMapper(low=0, high=1)
log_color_mapper_proj = LogColorMapper(low=0, high=1)
det_x_range = Range1d(0, IMAGE_W, bounds=(0, IMAGE_W))
gamma_range = Range1d(0, 1, bounds=(0, 1))
proj_x_plot = figure(
title="Projections on X-axis",
x_axis_label="Coordinate X, pix",
y_axis_label="Frame",
overview_plot_x = Plot(
title=Title(text="Projections on X-axis"),
x_range=det_x_range,
y_range=frame_range,
extra_x_ranges={"gamma": gamma_range},
extra_y_ranges={"scanning_motor": scanning_motor_range},
height=540,
width=IMAGE_PLOT_W - 3,
tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom",
plot_height=450,
plot_width=IMAGE_PLOT_W - 3,
)
proj_x_plot.yaxis.major_label_orientation = "vertical"
proj_x_plot.toolbar.tools[2].maintain_focus = False
# ---- tools
wheelzoomtool = WheelZoomTool(maintain_focus=False)
overview_plot_x.toolbar.logo = None
overview_plot_x.add_tools(
PanTool(), BoxZoomTool(), wheelzoomtool, ResetTool(),
)
overview_plot_x.toolbar.active_scroll = wheelzoomtool
proj_x_plot.add_layout(LinearAxis(x_range_name="gamma", axis_label="Gamma, deg"), place="above")
# ---- axes
overview_plot_x.add_layout(LinearAxis(axis_label="Coordinate X, pix"), place="below")
overview_plot_x.add_layout(
LinearAxis(x_range_name="gamma", axis_label="Gamma, deg"), place="above"
)
overview_plot_x.add_layout(
LinearAxis(axis_label="Frame", major_label_orientation="vertical"), place="left"
)
proj_x_image_source = ColumnDataSource(
# ---- grid lines
overview_plot_x.add_layout(Grid(dimension=0, ticker=BasicTicker()))
overview_plot_x.add_layout(Grid(dimension=1, ticker=BasicTicker()))
# ---- rgba image glyph
overview_plot_x_image_source = ColumnDataSource(
dict(image=[np.zeros((1, 1), dtype="float32")], x=[0], y=[0], dw=[IMAGE_W], dh=[1])
)
proj_x_image = proj_x_plot.image(source=proj_x_image_source, color_mapper=lin_color_mapper_proj)
overview_plot_x_image_glyph = Image(image="image", x="x", y="y", dw="dw", dh="dh")
overview_plot_x.add_glyph(
overview_plot_x_image_source, overview_plot_x_image_glyph, name="image_glyph"
)
det_y_range = Range1d(0, IMAGE_H, bounds=(0, IMAGE_H))
nu_range = Range1d(0, 1, bounds=(0, 1))
proj_y_plot = figure(
title="Projections on Y-axis",
x_axis_label="Coordinate Y, pix",
y_axis_label="Scanning motor",
y_axis_location="right",
overview_plot_y = Plot(
title=Title(text="Projections on Y-axis"),
x_range=det_y_range,
y_range=frame_range,
extra_x_ranges={"nu": nu_range},
extra_y_ranges={"scanning_motor": scanning_motor_range},
height=540,
width=IMAGE_PLOT_H + 22,
tools="pan,box_zoom,wheel_zoom,reset",
active_scroll="wheel_zoom",
plot_height=450,
plot_width=IMAGE_PLOT_H + 22,
)
proj_y_plot.yaxis.y_range_name = "scanning_motor"
proj_y_plot.yaxis.major_label_orientation = "vertical"
proj_y_plot.toolbar.tools[2].maintain_focus = False
# ---- tools
wheelzoomtool = WheelZoomTool(maintain_focus=False)
overview_plot_y.toolbar.logo = None
overview_plot_y.add_tools(
PanTool(), BoxZoomTool(), wheelzoomtool, ResetTool(),
)
overview_plot_y.toolbar.active_scroll = wheelzoomtool
proj_y_plot.add_layout(LinearAxis(x_range_name="nu", axis_label="Nu, deg"), place="above")
# ---- axes
overview_plot_y.add_layout(LinearAxis(axis_label="Coordinate Y, pix"), place="below")
overview_plot_y.add_layout(LinearAxis(x_range_name="nu", axis_label="Nu, deg"), place="above")
overview_plot_y.add_layout(
LinearAxis(
y_range_name="scanning_motor",
axis_label="Scanning motor",
major_label_orientation="vertical",
),
place="right",
)
proj_y_image_source = ColumnDataSource(
# ---- grid lines
overview_plot_y.add_layout(Grid(dimension=0, ticker=BasicTicker()))
overview_plot_y.add_layout(Grid(dimension=1, ticker=BasicTicker()))
# ---- rgba image glyph
overview_plot_y_image_source = ColumnDataSource(
dict(image=[np.zeros((1, 1), dtype="float32")], x=[0], y=[0], dw=[IMAGE_H], dh=[1])
)
proj_y_image = proj_y_plot.image(source=proj_y_image_source, color_mapper=lin_color_mapper_proj)
overview_plot_y_image_glyph = Image(image="image", x="x", y="y", dw="dw", dh="dh")
overview_plot_y.add_glyph(
overview_plot_y_image_source, overview_plot_y_image_glyph, name="image_glyph"
)
# ROI slice plot
roi_avg_plot = figure(height=150, width=IMAGE_PLOT_W, tools="", toolbar_location=None)
roi_avg_plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(),
plot_height=150,
plot_width=IMAGE_PLOT_W,
toolbar_location="left",
)
# ---- tools
roi_avg_plot.toolbar.logo = None
# ---- axes
roi_avg_plot.add_layout(LinearAxis(), place="below")
roi_avg_plot.add_layout(LinearAxis(major_label_orientation="vertical"), place="left")
# ---- grid lines
roi_avg_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
roi_avg_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
roi_avg_plot_line_source = ColumnDataSource(dict(x=[], y=[]))
roi_avg_plot.line(source=roi_avg_plot_line_source, line_color="steelblue")
roi_avg_plot.add_glyph(roi_avg_plot_line_source, Line(x="x", y="y", line_color="steelblue"))
def colormap_select_callback(_attr, _old, new):
lin_color_mapper.palette = new
log_color_mapper.palette = new
lin_color_mapper_proj.palette = new
log_color_mapper_proj.palette = new
cmap_dict = {
"gray": Greys256,
"gray_reversed": Greys256[::-1],
"plasma": Plasma256,
"cividis": Cividis256,
}
colormap_select = Select(
title="Colormap:",
options=[("Greys256", "greys"), ("Plasma256", "plasma"), ("Cividis256", "cividis")],
width=100,
)
colormap_select.on_change("value", colormap_select_callback)
colormap_select.value = "Plasma256"
def colormap_callback(_attr, _old, new):
image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
overview_plot_x_image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
overview_plot_y_image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
def colormap_scale_rg_callback(_attr, _old, new):
if new == 0: # Linear
plot_image.glyph.color_mapper = lin_color_mapper
proj_x_image.glyph.color_mapper = lin_color_mapper_proj
proj_y_image.glyph.color_mapper = lin_color_mapper_proj
colormap = Select(title="Colormap:", options=list(cmap_dict.keys()), width=210)
colormap.on_change("value", colormap_callback)
colormap.value = "plasma"
else: # Logarithmic
if (
display_min_spinner.value > 0
and display_max_spinner.value > 0
and proj_display_min_spinner.value > 0
and proj_display_max_spinner.value > 0
):
plot_image.glyph.color_mapper = log_color_mapper
proj_x_image.glyph.color_mapper = log_color_mapper_proj
proj_y_image.glyph.color_mapper = log_color_mapper_proj
else:
colormap_scale_rg.active = 0
STEP = 1
colormap_scale_rg = RadioGroup(labels=["Linear", "Logarithmic"], active=0, width=100)
colormap_scale_rg.on_change("active", colormap_scale_rg_callback)
def main_auto_checkbox_callback(_attr, _old, new):
if 0 in new:
def main_auto_checkbox_callback(state):
if state:
display_min_spinner.disabled = True
display_max_spinner.disabled = True
else:
@ -691,54 +760,83 @@ def create():
main_auto_checkbox = CheckboxGroup(
labels=["Frame Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
)
main_auto_checkbox.on_change("active", main_auto_checkbox_callback)
main_auto_checkbox.on_click(main_auto_checkbox_callback)
def display_max_spinner_callback(_attr, _old, new):
lin_color_mapper.high = new
log_color_mapper.high = new
# TODO: without this _update_image() log color mapper display is delayed
_update_image()
def display_max_spinner_callback(_attr, _old_value, new_value):
display_min_spinner.high = new_value - STEP
image_glyph.color_mapper.high = new_value
display_max_spinner = Spinner(value=1, disabled=bool(main_auto_checkbox.active), width=100)
display_max_spinner = Spinner(
low=0 + STEP,
value=1,
step=STEP,
disabled=bool(main_auto_checkbox.active),
width=100,
height=31,
)
display_max_spinner.on_change("value", display_max_spinner_callback)
def display_min_spinner_callback(_attr, _old, new):
lin_color_mapper.low = new
log_color_mapper.low = new
_update_image()
def display_min_spinner_callback(_attr, _old_value, new_value):
display_max_spinner.low = new_value + STEP
image_glyph.color_mapper.low = new_value
display_min_spinner = Spinner(value=0, disabled=bool(main_auto_checkbox.active), width=100)
display_min_spinner = Spinner(
low=0,
high=1 - STEP,
value=0,
step=STEP,
disabled=bool(main_auto_checkbox.active),
width=100,
height=31,
)
display_min_spinner.on_change("value", display_min_spinner_callback)
def proj_auto_checkbox_callback(_attr, _old, new):
if 0 in new:
PROJ_STEP = 1
def proj_auto_checkbox_callback(state):
if state:
proj_display_min_spinner.disabled = True
proj_display_max_spinner.disabled = True
else:
proj_display_min_spinner.disabled = False
proj_display_max_spinner.disabled = False
_update_proj_plots()
_update_overview_plot()
proj_auto_checkbox = CheckboxGroup(
labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
)
proj_auto_checkbox.on_change("active", proj_auto_checkbox_callback)
proj_auto_checkbox.on_click(proj_auto_checkbox_callback)
def proj_display_max_spinner_callback(_attr, _old, new):
lin_color_mapper_proj.high = new
log_color_mapper_proj.high = new
_update_proj_plots()
def proj_display_max_spinner_callback(_attr, _old_value, new_value):
proj_display_min_spinner.high = new_value - PROJ_STEP
overview_plot_x_image_glyph.color_mapper.high = new_value
overview_plot_y_image_glyph.color_mapper.high = new_value
proj_display_max_spinner = Spinner(value=1, disabled=bool(proj_auto_checkbox.active), width=100)
proj_display_max_spinner = Spinner(
low=0 + PROJ_STEP,
value=1,
step=PROJ_STEP,
disabled=bool(proj_auto_checkbox.active),
width=100,
height=31,
)
proj_display_max_spinner.on_change("value", proj_display_max_spinner_callback)
def proj_display_min_spinner_callback(_attr, _old, new):
lin_color_mapper_proj.low = new
log_color_mapper_proj.low = new
_update_proj_plots()
def proj_display_min_spinner_callback(_attr, _old_value, new_value):
proj_display_max_spinner.low = new_value + PROJ_STEP
overview_plot_x_image_glyph.color_mapper.low = new_value
overview_plot_y_image_glyph.color_mapper.low = new_value
proj_display_min_spinner = Spinner(value=0, disabled=bool(proj_auto_checkbox.active), width=100)
proj_display_min_spinner = Spinner(
low=0,
high=1 - PROJ_STEP,
value=0,
step=PROJ_STEP,
disabled=bool(proj_auto_checkbox.active),
width=100,
height=31,
)
proj_display_min_spinner.on_change("value", proj_display_min_spinner_callback)
events_data = dict(
@ -812,7 +910,7 @@ def create():
gamma = scan["gamma"][0]
omega = scan["omega"][0]
nu = scan["nu"]
nu = scan["nu"][0]
chi = scan["chi"][0]
phi = scan["phi"][0]
@ -843,6 +941,10 @@ def create():
x_pos = scan["fit"]["x_pos"]
y_pos = scan["fit"]["y_pos"]
if scan["zebra_mode"] == "nb":
chi = None
phi = None
events_data["wave"].append(wave)
events_data["ddist"].append(ddist)
events_data["cell"].append(cell)
@ -907,7 +1009,7 @@ def create():
layout_image = column(gridplot([[proj_v, None], [plot, proj_h]], merge_tools=False))
colormap_layout = column(
row(colormap_select, column(Spacer(height=15), colormap_scale_rg)),
colormap,
main_auto_checkbox,
row(display_min_spinner, display_max_spinner),
proj_auto_checkbox,
@ -919,10 +1021,13 @@ def create():
row(column(add_event_button, remove_event_button), peak_tables),
)
layout_proj = column(
layout_overview = column(
gridplot(
[[proj_x_plot, proj_y_plot]], toolbar_options={"logo": None}, toolbar_location="right"
)
[[overview_plot_x, overview_plot_y]],
toolbar_options=dict(logo=None),
merge_tools=True,
toolbar_location="left",
),
)
scan_layout = column(
@ -933,7 +1038,7 @@ def create():
tab_layout = row(
column(import_layout, colormap_layout),
column(row(scan_layout, layout_proj), layout_controls),
column(row(scan_layout, layout_overview), layout_controls),
column(roi_avg_plot, layout_image),
)
@ -950,7 +1055,7 @@ def calculate_hkl(scan, index):
gammad = scan["gamma"][index]
om = scan["omega"][index]
nud = scan["nu"]
ub_inv = np.linalg.inv(scan["ub"])
ub = scan["ub"]
geometry = scan["zebra_mode"]
if geometry == "bi":
@ -962,7 +1067,11 @@ def calculate_hkl(scan, index):
else:
raise ValueError(f"Unknown geometry type '{geometry}'")
h, k, l = pyzebra.ang2hkl_det(wave, ddist, gammad, om, chi, phi, nud, ub_inv)
for xi in np.arange(IMAGE_W):
for yi in np.arange(IMAGE_H):
h[yi, xi], k[yi, xi], l[yi, xi] = pyzebra.ang2hkl(
wave, ddist, gammad, om, chi, phi, nud, ub, xi, yi
)
return h, k, l

View File

@ -1,36 +1,79 @@
import base64
import io
import itertools
import os
import tempfile
import types
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
BasicTicker,
Button,
CellEditor,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
CustomJS,
DataRange1d,
DataTable,
Div,
Dropdown,
FileInput,
Grid,
HoverTool,
Image,
Legend,
Line,
LinearAxis,
LinearColorMapper,
MultiLine,
MultiSelect,
NumberEditor,
Panel,
PanTool,
Plot,
RadioGroup,
Range1d,
ResetTool,
Scatter,
Select,
Spacer,
Span,
Spinner,
TableColumn,
Tabs,
TextAreaInput,
WheelZoomTool,
Whisker,
)
from bokeh.palettes import Category10, Plasma256
from bokeh.plotting import figure
from scipy import interpolate
import pyzebra
from pyzebra import app
from pyzebra.ccl_process import AREA_METHODS
javaScript = """
let j = 0;
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
setTimeout(function() {
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i] + js_data.data['ext'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
def color_palette(n_colors):
@ -40,9 +83,32 @@ def color_palette(n_colors):
def create():
doc = curdoc()
log = doc.logger
dataset = []
app_dlfiles = app.DownloadFiles(n_files=1)
fit_params = {}
js_data = ColumnDataSource(data=dict(content=[""], fname=[""], ext=[""]))
def file_select_update_for_proposal():
proposal_path = proposal_textinput.name
if proposal_path:
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
file_open_button.disabled = False
file_append_button.disabled = False
else:
file_select.options = []
file_open_button.disabled = True
file_append_button.disabled = True
doc.add_periodic_callback(file_select_update_for_proposal, 5000)
def proposal_textinput_callback(_attr, _old, _new):
file_select_update_for_proposal()
proposal_textinput = doc.proposal_textinput
proposal_textinput.on_change("name", proposal_textinput_callback)
def _init_datatable():
scan_list = [s["idx"] for s in dataset]
@ -57,7 +123,7 @@ def create():
file_list.append(os.path.basename(scan["original_filename"]))
scan_table_source.data.update(
file=file_list, scan=scan_list, param=param, fit=[0] * len(scan_list), export=export
file=file_list, scan=scan_list, param=param, fit=[0] * len(scan_list), export=export,
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
@ -69,6 +135,123 @@ def create():
merge_from_select.options = merge_options
merge_from_select.value = merge_options[0][0]
file_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
def file_open_button_callback():
nonlocal dataset
new_data = []
for f_path in file_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data)
js_data.data.update(fname=[base])
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
append_upload_button.disabled = False
file_open_button = Button(label="Open New", width=100, disabled=True)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
file_data = []
for f_path in file_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
if file_data:
_init_datatable()
file_append_button = Button(label="Append", width=100, disabled=True)
file_append_button.on_click(file_append_button_callback)
def upload_button_callback(_attr, _old, _new):
nonlocal dataset
new_data = []
for f_str, f_name in zip(upload_button.value, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data)
js_data.data.update(fname=[base])
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
append_upload_button.disabled = False
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
upload_button.on_change("filename", upload_button_callback)
def append_upload_button_callback(_attr, _old, _new):
file_data = []
for f_str, f_name in zip(append_upload_button.value, append_upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
if file_data:
_init_datatable()
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200, disabled=True)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
append_upload_button.on_change("filename", append_upload_button_callback)
def monitor_spinner_callback(_attr, _old, new):
if dataset:
pyzebra.normalize_dataset(dataset, new)
_update_single_scan_plot()
_update_overview()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def scan_motor_select_callback(_attr, _old, new):
if dataset:
for scan in dataset:
@ -98,19 +281,19 @@ def create():
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
plot_scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
fit = scan.get("fit")
if fit is not None:
x_fit = np.linspace(x[0], x[-1], 100)
fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
plot_fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
x_bkg = []
y_bkg = []
xs_peak = []
ys_peak = []
comps = fit.eval_components(x=x_fit)
for i, model in enumerate(app_fitctrl.params):
for i, model in enumerate(fit_params):
if "linear" in model:
x_bkg = x_fit
y_bkg = comps[f"f{i}_"]
@ -119,15 +302,16 @@ def create():
xs_peak.append(x_fit)
ys_peak.append(comps[f"f{i}_"])
bkg_source.data.update(x=x_bkg, y=y_bkg)
peak_source.data.update(xs=xs_peak, ys=ys_peak)
plot_bkg_source.data.update(x=x_bkg, y=y_bkg)
plot_peak_source.data.update(xs=xs_peak, ys=ys_peak)
fit_output_textinput.value = fit.fit_report()
else:
fit_source.data.update(x=[], y=[])
bkg_source.data.update(x=[], y=[])
peak_source.data.update(xs=[], ys=[])
app_fitctrl.update_result_textarea(scan)
plot_fit_source.data.update(x=[], y=[])
plot_bkg_source.data.update(x=[], y=[])
plot_peak_source.data.update(xs=[], ys=[])
fit_output_textinput.value = ""
def _update_overview():
xs = []
@ -152,9 +336,9 @@ def create():
ov_plot.axis[0].axis_label = scan_motor
ov_param_plot.axis[0].axis_label = scan_motor
ov_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
ov_plot_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
ov_param_scatter_source.data.update(x=x, y=y)
ov_param_plot_scatter_source.data.update(x=x, y=y)
if y:
x1, x2 = min(x), max(x)
@ -164,7 +348,7 @@ def create():
np.linspace(y1, y2, ov_param_plot.inner_height),
)
image = interpolate.griddata((x, y), par, (grid_x, grid_y))
ov_param_image_source.data.update(
ov_param_plot_image_source.data.update(
image=[image], x=[x1], y=[y1], dw=[x2 - x1], dh=[y2 - y1]
)
@ -179,7 +363,7 @@ def create():
y_range.bounds = (y1, y2)
else:
ov_param_image_source.data.update(image=[], x=[], y=[], dw=[], dh=[])
ov_param_plot_image_source.data.update(image=[], x=[], y=[], dw=[], dh=[])
def _update_param_plot():
x = []
@ -198,39 +382,40 @@ def create():
y_lower.append(param_fit_val - param_fit_std)
y_upper.append(param_fit_val + param_fit_std)
param_scatter_source.data.update(x=x, y=y, y_lower=y_lower, y_upper=y_upper)
def _monitor_change():
_update_single_scan_plot()
_update_overview()
app_inputctrl = app.InputControls(
dataset, app_dlfiles, on_file_open=_init_datatable, on_monitor_change=_monitor_change
)
param_plot_scatter_source.data.update(x=x, y=y, y_lower=y_lower, y_upper=y_upper)
# Main plot
plot = figure(
x_axis_label="Scan motor",
y_axis_label="Counts",
height=450,
width=700,
tools="pan,wheel_zoom,reset",
plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(only_visible=True),
plot_height=450,
plot_width=700,
)
scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot.circle(
source=scatter_source, line_color="steelblue", fill_color="steelblue", legend_label="data"
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot_scatter = plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
)
plot.add_layout(Whisker(source=scatter_source, base="x", upper="y_upper", lower="y_lower"))
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
fit_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.line(source=fit_source, legend_label="best fit")
plot_fit_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit = plot.add_glyph(plot_fit_source, Line(x="x", y="y"))
bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.line(source=bkg_source, line_color="green", line_dash="dashed", legend_label="linear")
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_bkg = plot.add_glyph(
plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed")
)
peak_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot.multi_line(source=peak_source, line_color="red", line_dash="dashed", legend_label="peak")
plot_peak_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot_peak = plot.add_glyph(
plot_peak_source, MultiLine(xs="xs", ys="ys", line_color="red", line_dash="dashed")
)
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_from_span)
@ -238,61 +423,80 @@ def create():
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_to_span)
plot.y_range.only_visible = True
plot.add_layout(
Legend(
items=[
("data", [plot_scatter]),
("best fit", [plot_fit]),
("peak", [plot_peak]),
("linear", [plot_bkg]),
],
location="top_left",
click_policy="hide",
)
)
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
plot.toolbar.logo = None
plot.legend.click_policy = "hide"
# Overview multilines plot
ov_plot = figure(
x_axis_label="Scan motor",
y_axis_label="Counts",
height=450,
width=700,
tools="pan,wheel_zoom,reset",
)
ov_plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=450, plot_width=700)
ov_mline_source = ColumnDataSource(dict(xs=[], ys=[], param=[], color=[]))
ov_plot.multi_line(source=ov_mline_source, line_color="color")
ov_plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
ov_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
ov_plot.add_tools(HoverTool(tooltips=[("param", "@param")]))
ov_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
ov_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
ov_plot_mline_source = ColumnDataSource(dict(xs=[], ys=[], param=[], color=[]))
ov_plot.add_glyph(ov_plot_mline_source, MultiLine(xs="xs", ys="ys", line_color="color"))
hover_tool = HoverTool(tooltips=[("param", "@param")])
ov_plot.add_tools(PanTool(), WheelZoomTool(), hover_tool, ResetTool())
ov_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
ov_plot.toolbar.logo = None
# Overview params plot
ov_param_plot = figure(
x_axis_label="Scan motor",
y_axis_label="Param",
x_range=Range1d(),
y_range=Range1d(),
height=450,
width=700,
tools="pan,wheel_zoom,reset",
)
# Overview perams plot
ov_param_plot = Plot(x_range=Range1d(), y_range=Range1d(), plot_height=450, plot_width=700)
ov_param_plot.add_layout(LinearAxis(axis_label="Param"), place="left")
ov_param_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
ov_param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
ov_param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
color_mapper = LinearColorMapper(palette=Plasma256)
ov_param_image_source = ColumnDataSource(dict(image=[], x=[], y=[], dw=[], dh=[]))
ov_param_plot.image(source=ov_param_image_source, color_mapper=color_mapper)
ov_param_plot_image_source = ColumnDataSource(dict(image=[], x=[], y=[], dw=[], dh=[]))
ov_param_plot.add_glyph(
ov_param_plot_image_source,
Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper),
)
ov_param_scatter_source = ColumnDataSource(dict(x=[], y=[]))
ov_param_plot.dot(source=ov_param_scatter_source, size=15, color="black")
ov_param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[]))
ov_param_plot.add_glyph(
ov_param_plot_scatter_source, Scatter(x="x", y="y", marker="dot", size=15),
)
ov_param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
ov_param_plot.toolbar.logo = None
# Parameter plot
param_plot = figure(
x_axis_label="Parameter",
y_axis_label="Fit parameter",
height=400,
width=700,
tools="pan,wheel_zoom,reset",
)
param_plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
param_scatter_source = ColumnDataSource(dict(x=[], y=[], y_upper=[], y_lower=[]))
param_plot.circle(source=param_scatter_source)
param_plot.add_layout(LinearAxis(axis_label="Fit parameter"), place="left")
param_plot.add_layout(LinearAxis(axis_label="Parameter"), place="below")
param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[], y_upper=[], y_lower=[]))
param_plot.add_glyph(param_plot_scatter_source, Scatter(x="x", y="y"))
param_plot.add_layout(
Whisker(source=param_scatter_source, base="x", upper="y_upper", lower="y_lower")
Whisker(source=param_plot_scatter_source, base="x", upper="y_upper", lower="y_lower")
)
param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
param_plot.toolbar.logo = None
def fit_param_select_callback(_attr, _old, _new):
@ -364,10 +568,10 @@ def create():
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
log.warning("Selected scans for merging are identical")
print("WARNING: Selected scans for merging are identical")
return
pyzebra.merge_scans(scan_into, scan_from, log=log)
pyzebra.merge_scans(scan_into, scan_from)
_update_table()
_update_single_scan_plot()
_update_overview()
@ -398,20 +602,136 @@ def create():
)
param_select.on_change("value", param_select_callback)
app_fitctrl = app.FitControls()
def fit_from_spinner_callback(_attr, _old, new):
fit_from_span.location = new
app_fitctrl.from_spinner.on_change("value", fit_from_spinner_callback)
fit_from_spinner = Spinner(title="Fit from:", width=145)
fit_from_spinner.on_change("value", fit_from_spinner_callback)
def fit_to_spinner_callback(_attr, _old, new):
fit_to_span.location = new
app_fitctrl.to_spinner.on_change("value", fit_to_spinner_callback)
fit_to_spinner = Spinner(title="to:", width=145)
fit_to_spinner.on_change("value", fit_to_spinner_callback)
def fitparams_add_dropdown_callback(click):
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
fitparams_select.options.append((new_tag, click.item))
fit_params[new_tag] = fitparams_factory(click.item)
fitparams_select.tags[0] += 1
fitparams_add_dropdown = Dropdown(
label="Add fit function",
menu=[
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
width=145,
)
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
def fitparams_select_callback(_attr, old, new):
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
fitparams_select.value = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
if new:
fitparams_table_source.data.update(fit_params[new[0]])
else:
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_select = MultiSelect(options=[], height=120, width=145)
fitparams_select.tags = [0]
fitparams_select.on_change("value", fitparams_select_callback)
def fitparams_remove_button_callback():
if fitparams_select.value:
sel_tag = fitparams_select.value[0]
del fit_params[sel_tag]
for elem in fitparams_select.options:
if elem[0] == sel_tag:
fitparams_select.options.remove(elem)
break
fitparams_select.value = []
fitparams_remove_button = Button(label="Remove fit function", width=145)
fitparams_remove_button.on_click(fitparams_remove_button_callback)
def fitparams_factory(function):
if function == "linear":
params = ["slope", "intercept"]
elif function == "gaussian":
params = ["amplitude", "center", "sigma"]
elif function == "voigt":
params = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
params = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(params)
fitparams = dict(
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
)
if function == "linear":
fitparams["value"] = [0, 1]
fitparams["vary"] = [False, True]
fitparams["min"] = [None, 0]
elif function == "gaussian":
fitparams["min"] = [0, None, None]
return fitparams
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter", editor=CellEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
],
height=200,
width=350,
index_position=None,
editable=True,
auto_edit=True,
)
# start with `background` and `gauss` fit functions added
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
fitparams_select.value = ["gaussian-1"] # add selection to gauss
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def proc_all_button_callback():
app_fitctrl.fit_dataset(dataset)
for scan in dataset:
if scan["export"]:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
pyzebra.get_area(
scan,
area_method=AREA_METHODS[area_method_radiobutton.active],
lorentz=lorentz_checkbox.active,
)
_update_single_scan_plot()
_update_overview()
@ -428,7 +748,15 @@ def create():
proc_all_button.on_click(proc_all_button_callback)
def proc_button_callback():
app_fitctrl.fit_scan(_get_selected_scan())
scan = _get_selected_scan()
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
pyzebra.get_area(
scan,
area_method=AREA_METHODS[area_method_radiobutton.active],
lorentz=lorentz_checkbox.active,
)
_update_single_scan_plot()
_update_overview()
@ -444,6 +772,11 @@ def create():
proc_button = Button(label="Process Current", width=145)
proc_button.on_click(proc_button_callback)
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
area_method_radiobutton = RadioGroup(labels=["Function", "Area"], active=0, width=145)
lorentz_checkbox = CheckboxGroup(labels=["Lorentz Correction"], width=145, margin=(13, 5, 5, 5))
export_preview_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
def _update_preview():
@ -470,49 +803,40 @@ def create():
content = ""
file_content.append(content)
app_dlfiles.set_contents(file_content)
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
save_button = Button(label="Download File", button_type="success", width=220)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
fitpeak_controls = row(
column(
app_fitctrl.add_function_button,
app_fitctrl.function_select,
app_fitctrl.remove_function_button,
),
app_fitctrl.params_table,
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
fitparams_table,
Spacer(width=20),
column(
app_fitctrl.from_spinner,
app_fitctrl.lorentz_checkbox,
area_method_div,
app_fitctrl.area_method_radiogroup,
),
column(app_fitctrl.to_spinner, proc_button, proc_all_button),
column(fit_from_spinner, lorentz_checkbox, area_method_div, area_method_radiobutton),
column(fit_to_spinner, proc_button, proc_all_button),
)
scan_layout = column(
scan_table,
row(app_inputctrl.monitor_spinner, scan_motor_select, param_select),
row(monitor_spinner, scan_motor_select, param_select),
row(column(Spacer(height=19), row(restore_button, merge_button)), merge_from_select),
)
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
import_layout = column(
app_inputctrl.filelist_select,
row(app_inputctrl.open_button, app_inputctrl.append_button),
file_select,
row(file_open_button, file_append_button),
upload_div,
app_inputctrl.upload_button,
upload_button,
append_upload_div,
app_inputctrl.append_upload_button,
append_upload_button,
)
export_layout = column(export_preview_textinput, row(app_dlfiles.button))
export_layout = column(export_preview_textinput, row(save_button))
tab_layout = column(
row(import_layout, scan_layout, plots, Spacer(width=30), export_layout),
row(fitpeak_controls, app_fitctrl.result_textarea),
row(fitpeak_controls, fit_output_textinput),
)
return Panel(child=tab_layout, title="param study")

View File

@ -1,442 +0,0 @@
import base64
import io
import os
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
Button,
CheckboxGroup,
ColorBar,
ColumnDataSource,
DataRange1d,
Div,
FileInput,
LinearColorMapper,
LogColorMapper,
NumericInput,
Panel,
RadioGroup,
Select,
Spacer,
Spinner,
TextInput,
)
from bokeh.plotting import figure
from scipy import interpolate
import pyzebra
from pyzebra import app
from pyzebra.app.panel_hdf_viewer import calculate_hkl
def create():
doc = curdoc()
log = doc.logger
_update_slice = None
measured_data_div = Div(text="Measured <b>HDF</b> data:")
measured_data = FileInput(accept=".hdf", multiple=True, width=200)
upload_hkl_div = Div(text="Open hkl/mhkl data:")
upload_hkl_fi = FileInput(accept=".hkl,.mhkl", multiple=True, width=200)
def _prepare_plotting():
flag_ub = bool(redef_ub_cb.active)
flag_lattice = bool(redef_lattice_cb.active)
# Define horizontal direction of plotting plane, vertical direction will be calculated
# automatically
x_dir = list(map(float, hkl_in_plane_x.value.split()))
# Define direction orthogonal to plotting plane. Together with orth_cut, this parameter also
# defines the position of the cut, ie cut will be taken at orth_dir = [x,y,z]*orth_cut +- delta,
# where delta is max distance a data point can have from cut in rlu units
orth_dir = list(map(float, hkl_normal.value.split()))
# Load data files
md_fnames = measured_data.filename
md_fdata = measured_data.value
for ind, (fname, fdata) in enumerate(zip(md_fnames, md_fdata)):
# Read data
try:
det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata)))
except Exception as e:
log.exception(e)
return None
if ind == 0:
if not flag_ub:
redef_ub_ti.value = " ".join(map(str, det_data["ub"].ravel()))
if not flag_lattice:
redef_lattice_ti.value = " ".join(map(str, det_data["cell"]))
num_slices = np.shape(det_data["counts"])[0]
# Change parameter
if flag_ub:
ub = list(map(float, redef_ub_ti.value.strip().split()))
det_data["ub"] = np.array(ub).reshape(3, 3)
# Convert h k l for all images in file
h_temp = np.empty(np.shape(det_data["counts"]))
k_temp = np.empty(np.shape(det_data["counts"]))
l_temp = np.empty(np.shape(det_data["counts"]))
for i in range(num_slices):
h_temp[i], k_temp[i], l_temp[i] = calculate_hkl(det_data, i)
# Append to matrix
if ind == 0:
h = h_temp
k = k_temp
l = l_temp
I_matrix = det_data["counts"]
else:
h = np.append(h, h_temp, axis=0)
k = np.append(k, k_temp, axis=0)
l = np.append(l, l_temp, axis=0)
I_matrix = np.append(I_matrix, det_data["counts"], axis=0)
if flag_lattice:
vals = list(map(float, redef_lattice_ti.value.strip().split()))
lattice = np.array(vals)
else:
lattice = det_data["cell"]
# Define matrix for converting to cartesian coordinates and back
alpha = lattice[3] * np.pi / 180.0
beta = lattice[4] * np.pi / 180.0
gamma = lattice[5] * np.pi / 180.0
# reciprocal angle parameters
beta_star = np.arccos(
(np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
)
gamma_star = np.arccos(
(np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
)
# conversion matrix:
M = np.array(
[
[1, 1 * np.cos(gamma_star), 1 * np.cos(beta_star)],
[0, 1 * np.sin(gamma_star), -np.sin(beta_star) * np.cos(alpha)],
[0, 0, 1 * np.sin(beta_star) * np.sin(alpha)],
]
)
# Get last lattice vector
y_dir = np.cross(x_dir, orth_dir) # Second axes of plotting plane
# Rescale such that smallest element of y-dir vector is 1
y_dir2 = y_dir[y_dir != 0]
min_val = np.min(np.abs(y_dir2))
y_dir = y_dir / min_val
# Possibly flip direction of ydir:
if y_dir[np.argmax(abs(y_dir))] < 0:
y_dir = -y_dir
# Display the resulting y_dir
hkl_in_plane_y.value = " ".join([f"{val:.1f}" for val in y_dir])
# # Save length of lattice vectors
# x_length = np.linalg.norm(x_dir)
# y_length = np.linalg.norm(y_dir)
# # Save str for labels
# xlabel_str = " ".join(map(str, x_dir))
# ylabel_str = " ".join(map(str, y_dir))
# Normalize lattice vectors
y_dir = y_dir / np.linalg.norm(y_dir)
x_dir = x_dir / np.linalg.norm(x_dir)
orth_dir = orth_dir / np.linalg.norm(orth_dir)
# Calculate cartesian equivalents of lattice vectors
x_c = np.matmul(M, x_dir)
y_c = np.matmul(M, y_dir)
o_c = np.matmul(M, orth_dir)
# Calulcate vertical direction in plotting plame
y_vert = np.cross(x_c, o_c) # verical direction in plotting plane
if y_vert[np.argmax(abs(y_vert))] < 0:
y_vert = -y_vert
y_vert = y_vert / np.linalg.norm(y_vert)
# Normalize all directions
y_c = y_c / np.linalg.norm(y_c)
x_c = x_c / np.linalg.norm(x_c)
o_c = o_c / np.linalg.norm(o_c)
# Convert all hkls to cartesian
hkl = [[h, k, l]]
hkl = np.transpose(hkl)
hkl_c = np.matmul(M, hkl)
# Prepare hkl/mhkl data
hkl_coord = []
for j, fname in enumerate(upload_hkl_fi.filename):
with io.StringIO(base64.b64decode(upload_hkl_fi.value[j]).decode()) as file:
_, ext = os.path.splitext(fname)
try:
fdata = pyzebra.parse_hkl(file, ext)
except Exception as e:
log.exception(e)
return
for ind in range(len(fdata["counts"])):
# Recognize k_flag_vec
hkl = np.array([fdata["h"][ind], fdata["k"][ind], fdata["l"][ind]])
# Save data
hkl_coord.append(hkl)
def _update_slice():
# Where should cut be along orthogonal direction (Mutliplication factor onto orth_dir)
orth_cut = hkl_cut.value
# Width of cut
delta = hkl_delta.value
# Calculate distance of all points to plane
Q = np.array(o_c) * orth_cut
N = o_c / np.sqrt(np.sum(o_c**2))
v = np.empty(np.shape(hkl_c))
v[:, :, :, :, 0] = hkl_c[:, :, :, :, 0] - Q
dist = np.abs(np.dot(N, v))
dist = np.squeeze(dist)
dist = np.transpose(dist)
# Find points within acceptable distance of plane defined by o_c
ind = np.where(abs(dist) < delta)
if ind[0].size == 0:
image_source.data.update(image=[np.zeros((1, 1))])
return
# Project points onto axes
x = np.dot(x_c / np.sqrt(np.sum(x_c**2)), hkl_c)
y = np.dot(y_c / np.sqrt(np.sum(y_c**2)), hkl_c)
# take care of dimensions
x = np.squeeze(x)
x = np.transpose(x)
y = np.squeeze(y)
y = np.transpose(y)
# Get slices:
x_slice = x[ind]
y_slice = y[ind]
I_slice = I_matrix[ind]
# Meshgrid limits for plotting
if auto_range_cb.active:
min_x = np.min(x_slice)
max_x = np.max(x_slice)
min_y = np.min(y_slice)
max_y = np.max(y_slice)
xrange_min_ni.value = min_x
xrange_max_ni.value = max_x
yrange_min_ni.value = min_y
yrange_max_ni.value = max_y
else:
min_x = xrange_min_ni.value
max_x = xrange_max_ni.value
min_y = yrange_min_ni.value
max_y = yrange_max_ni.value
delta_x = xrange_step_ni.value
delta_y = yrange_step_ni.value
# Create interpolated mesh grid for plotting
grid_x, grid_y = np.mgrid[min_x:max_x:delta_x, min_y:max_y:delta_y]
I = interpolate.griddata((x_slice, y_slice), I_slice, (grid_x, grid_y))
# Update plot
display_min_ni.value = 0
display_max_ni.value = np.max(I_slice) * 0.25
image_source.data.update(
image=[I.T], x=[min_x], dw=[max_x - min_x], y=[min_y], dh=[max_y - min_y]
)
scan_x, scan_y = [], []
for j in range(len(hkl_coord)):
# Get middle hkl from list
hklm = M @ hkl_coord[j]
# Decide if point is in the cut
proj = np.dot(hklm, o_c)
if abs(proj - orth_cut) >= delta:
continue
# Project onto axes
hklmx = np.dot(hklm, x_c)
hklmy = np.dot(hklm, y_vert)
# Plot middle point of scan
scan_x.append(hklmx)
scan_y.append(hklmy)
scatter_source.data.update(x=scan_x, y=scan_y)
return _update_slice
def plot_file_callback():
nonlocal _update_slice
_update_slice = _prepare_plotting()
_update_slice()
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
plot_file.on_click(plot_file_callback)
plot = figure(
x_range=DataRange1d(),
y_range=DataRange1d(),
height=550 + 27,
width=550 + 117,
tools="pan,wheel_zoom,reset",
)
plot.toolbar.logo = None
lin_color_mapper = LinearColorMapper(nan_color=(0, 0, 0, 0), low=0, high=1)
log_color_mapper = LogColorMapper(nan_color=(0, 0, 0, 0), low=0, high=1)
image_source = ColumnDataSource(dict(image=[np.zeros((1, 1))], x=[0], y=[0], dw=[1], dh=[1]))
plot_image = plot.image(source=image_source, color_mapper=lin_color_mapper)
lin_color_bar = ColorBar(color_mapper=lin_color_mapper, width=15)
log_color_bar = ColorBar(color_mapper=log_color_mapper, width=15, visible=False)
plot.add_layout(lin_color_bar, "right")
plot.add_layout(log_color_bar, "right")
scatter_source = ColumnDataSource(dict(x=[], y=[]))
plot.scatter(source=scatter_source, size=4, fill_color="green", line_color="green")
hkl_div = Div(text="HKL:", margin=(5, 5, 0, 5))
hkl_normal = TextInput(title="normal", value="0 0 1", width=70)
def hkl_cut_callback(_attr, _old, _new):
if _update_slice is not None:
_update_slice()
hkl_cut = Spinner(title="cut", value=0, step=0.1, width=70)
hkl_cut.on_change("value_throttled", hkl_cut_callback)
hkl_delta = NumericInput(title="delta", value=0.1, mode="float", width=70)
hkl_in_plane_x = TextInput(title="in-plane X", value="1 0 0", width=70)
hkl_in_plane_y = TextInput(title="in-plane Y", value="", width=100, disabled=True)
def redef_lattice_cb_callback(_attr, _old, new):
if 0 in new:
redef_lattice_ti.disabled = False
else:
redef_lattice_ti.disabled = True
redef_lattice_cb = CheckboxGroup(labels=["Redefine lattice:"], width=110)
redef_lattice_cb.on_change("active", redef_lattice_cb_callback)
redef_lattice_ti = TextInput(width=490, disabled=True)
def redef_ub_cb_callback(_attr, _old, new):
if 0 in new:
redef_ub_ti.disabled = False
else:
redef_ub_ti.disabled = True
redef_ub_cb = CheckboxGroup(labels=["Redefine UB:"], width=110)
redef_ub_cb.on_change("active", redef_ub_cb_callback)
redef_ub_ti = TextInput(width=490, disabled=True)
def colormap_select_callback(_attr, _old, new):
lin_color_mapper.palette = new
log_color_mapper.palette = new
colormap_select = Select(
title="Colormap:",
options=[("Greys256", "greys"), ("Plasma256", "plasma"), ("Cividis256", "cividis")],
width=100,
)
colormap_select.on_change("value", colormap_select_callback)
colormap_select.value = "Plasma256"
def display_min_ni_callback(_attr, _old, new):
lin_color_mapper.low = new
log_color_mapper.low = new
display_min_ni = NumericInput(title="Intensity min:", value=0, mode="float", width=70)
display_min_ni.on_change("value", display_min_ni_callback)
def display_max_ni_callback(_attr, _old, new):
lin_color_mapper.high = new
log_color_mapper.high = new
display_max_ni = NumericInput(title="max:", value=1, mode="float", width=70)
display_max_ni.on_change("value", display_max_ni_callback)
def colormap_scale_rg_callback(_attr, _old, new):
if new == 0: # Linear
plot_image.glyph.color_mapper = lin_color_mapper
lin_color_bar.visible = True
log_color_bar.visible = False
else: # Logarithmic
if display_min_ni.value > 0 and display_max_ni.value > 0:
plot_image.glyph.color_mapper = log_color_mapper
lin_color_bar.visible = False
log_color_bar.visible = True
else:
colormap_scale_rg.active = 0
colormap_scale_rg = RadioGroup(labels=["Linear", "Logarithmic"], active=0, width=100)
colormap_scale_rg.on_change("active", colormap_scale_rg_callback)
xrange_min_ni = NumericInput(title="x range min:", value=0, mode="float", width=70)
xrange_max_ni = NumericInput(title="max:", value=1, mode="float", width=70)
xrange_step_ni = NumericInput(title="x mesh:", value=0.01, mode="float", width=70)
yrange_min_ni = NumericInput(title="y range min:", value=0, mode="float", width=70)
yrange_max_ni = NumericInput(title="max:", value=1, mode="float", width=70)
yrange_step_ni = NumericInput(title="y mesh:", value=0.01, mode="float", width=70)
def auto_range_cb_callback(_attr, _old, new):
if 0 in new:
xrange_min_ni.disabled = True
xrange_max_ni.disabled = True
yrange_min_ni.disabled = True
yrange_max_ni.disabled = True
else:
xrange_min_ni.disabled = False
xrange_max_ni.disabled = False
yrange_min_ni.disabled = False
yrange_max_ni.disabled = False
auto_range_cb = CheckboxGroup(labels=["Auto range:"], width=110)
auto_range_cb.on_change("active", auto_range_cb_callback)
auto_range_cb.active = [0]
column1_layout = column(
row(
column(row(measured_data_div, measured_data), row(upload_hkl_div, upload_hkl_fi)),
plot_file,
),
row(
plot,
column(
hkl_div,
row(hkl_normal, hkl_cut, hkl_delta),
row(hkl_in_plane_x, hkl_in_plane_y),
row(colormap_select, column(Spacer(height=15), colormap_scale_rg)),
row(display_min_ni, display_max_ni),
row(column(Spacer(height=19), auto_range_cb)),
row(xrange_min_ni, xrange_max_ni),
row(yrange_min_ni, yrange_max_ni),
row(xrange_step_ni, yrange_step_ni),
),
),
row(column(Spacer(height=7), redef_lattice_cb), redef_lattice_ti),
row(column(Spacer(height=7), redef_ub_cb), redef_ub_ti),
)
column2_layout = app.PlotHKL().layout
tab_layout = row(column1_layout, Spacer(width=50), column2_layout)
return Panel(child=tab_layout, title="plot data")

View File

@ -21,7 +21,6 @@ import pyzebra
def create():
doc = curdoc()
log = doc.logger
events_data = doc.events_data
npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True)
@ -64,8 +63,8 @@ def create():
stderr=subprocess.STDOUT,
text=True,
)
log.info(" ".join(comp_proc.args))
log.info(comp_proc.stdout)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
# prepare an event file
diff_vec = []
@ -95,9 +94,9 @@ def create():
f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n"
)
log.info(f"Content of {temp_event_file}:")
print(f"Content of {temp_event_file}:")
with open(temp_event_file) as f:
log.info(f.read())
print(f.read())
comp_proc = subprocess.run(
[
@ -124,12 +123,12 @@ def create():
stderr=subprocess.STDOUT,
text=True,
)
log.info(" ".join(comp_proc.args))
log.info(comp_proc.stdout)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
spind_out_file = os.path.join(temp_dir, "spind.txt")
spind_res = dict(
label=[], crystal_id=[], match_rate=[], matched_peaks=[], column_5=[], ub_matrix=[]
label=[], crystal_id=[], match_rate=[], matched_peaks=[], column_5=[], ub_matrix=[],
)
try:
with open(spind_out_file) as f_out:
@ -147,12 +146,12 @@ def create():
ub_matrices.append(ub_matrix_spind)
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
log.info(f"Content of {spind_out_file}:")
print(f"Content of {spind_out_file}:")
with open(spind_out_file) as f:
log.info(f.read())
print(f.read())
except FileNotFoundError:
log.warning("No results from spind")
print("No results from spind")
results_table_source.data.update(spind_res)

View File

@ -1,549 +0,0 @@
import base64
import io
import os
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
Arrow,
Button,
CheckboxGroup,
ColumnDataSource,
Div,
FileInput,
HoverTool,
Legend,
LegendItem,
NormalHead,
NumericInput,
RadioGroup,
Spinner,
TextAreaInput,
TextInput,
)
from bokeh.palettes import Dark2
from bokeh.plotting import figure
from scipy.integrate import simpson, trapezoid
import pyzebra
class PlotHKL:
def __init__(self):
doc = curdoc()
log = doc.logger
_update_slice = None
measured_data_div = Div(text="Measured <b>CCL</b> data:")
measured_data = FileInput(accept=".ccl", multiple=True, width=200)
upload_hkl_div = Div(text="Open hkl/mhkl data:")
upload_hkl_fi = FileInput(accept=".hkl,.mhkl", multiple=True, width=200)
min_grid_x = -10
max_grid_x = 10
min_grid_y = -10
max_grid_y = 10
cmap = Dark2[8]
syms = ["circle", "inverted_triangle", "square", "diamond", "star", "triangle"]
def _prepare_plotting():
orth_dir = list(map(float, hkl_normal.value.split()))
x_dir = list(map(float, hkl_in_plane_x.value.split()))
k = np.array(k_vectors.value.split()).astype(float).reshape(-1, 3)
tol_k = tol_k_ni.value
# multiplier for resolution function (in case of samples with large mosaicity)
res_mult = res_mult_ni.value
md_fnames = measured_data.filename
md_fdata = measured_data.value
# Load first data file, read angles and define matrices to perform conversion to cartesian
# coordinates and back
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
_, ext = os.path.splitext(md_fnames[0])
try:
file_data = pyzebra.parse_1D(file, ext, log=log)
except Exception as e:
log.exception(e)
return None
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
beta = file_data[0]["beta_cell"] * np.pi / 180.0
gamma = file_data[0]["gamma_cell"] * np.pi / 180.0
# reciprocal angle parameters
beta_star = np.arccos(
(np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
)
gamma_star = np.arccos(
(np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
)
# conversion matrix
M = np.array(
[
[1, np.cos(gamma_star), np.cos(beta_star)],
[0, np.sin(gamma_star), -np.sin(beta_star) * np.cos(alpha)],
[0, 0, np.sin(beta_star) * np.sin(alpha)],
]
)
# Get last lattice vector
y_dir = np.cross(x_dir, orth_dir) # Second axes of plotting plane
# Rescale such that smallest element of y-dir vector is 1
y_dir2 = y_dir[y_dir != 0]
min_val = np.min(np.abs(y_dir2))
y_dir = y_dir / min_val
# Possibly flip direction of ydir:
if y_dir[np.argmax(abs(y_dir))] < 0:
y_dir = -y_dir
# Display the resulting y_dir
hkl_in_plane_y.value = " ".join([f"{val:.1f}" for val in y_dir])
# Save length of lattice vectors
x_length = np.linalg.norm(x_dir)
y_length = np.linalg.norm(y_dir)
# Save str for labels
xlabel_str = " ".join(map(str, x_dir))
ylabel_str = " ".join(map(str, y_dir))
# Normalize lattice vectors
y_dir = y_dir / np.linalg.norm(y_dir)
x_dir = x_dir / np.linalg.norm(x_dir)
orth_dir = orth_dir / np.linalg.norm(orth_dir)
# Calculate cartesian equivalents of lattice vectors
x_c = np.matmul(M, x_dir)
y_c = np.matmul(M, y_dir)
o_c = np.matmul(M, orth_dir)
# Calulcate vertical direction in plotting plame
y_vert = np.cross(x_c, o_c) # verical direction in plotting plane
if y_vert[np.argmax(abs(y_vert))] < 0:
y_vert = -y_vert
y_vert = y_vert / np.linalg.norm(y_vert)
# Normalize all directions
y_c = y_c / np.linalg.norm(y_c)
x_c = x_c / np.linalg.norm(x_c)
o_c = o_c / np.linalg.norm(o_c)
# Read all data
hkl_coord = []
intensity_vec = []
k_flag_vec = []
file_flag_vec = []
res_vec = []
res_N = 10
for j, md_fname in enumerate(md_fnames):
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
_, ext = os.path.splitext(md_fname)
try:
file_data = pyzebra.parse_1D(file, ext, log=log)
except Exception as e:
log.exception(e)
return None
pyzebra.normalize_dataset(file_data)
# Loop throguh all data
for scan in file_data:
om = scan["omega"]
gammad = scan["twotheta"]
chi = scan["chi"]
phi = scan["phi"]
nud = 0 # 1d detector
ub_inv = np.linalg.inv(scan["ub"])
counts = scan["counts"]
wave = scan["wavelength"]
# Calculate resolution in degrees
expr = np.tan(gammad / 2 * np.pi / 180)
fwhm = np.sqrt(0.4639 * expr**2 - 0.4452 * expr + 0.1506) * res_mult
res = 4 * np.pi / wave * np.sin(fwhm * np.pi / 180)
# Get first and final hkl
hkl1 = pyzebra.ang2hkl_1d(wave, gammad, om[0], chi, phi, nud, ub_inv)
hkl2 = pyzebra.ang2hkl_1d(wave, gammad, om[-1], chi, phi, nud, ub_inv)
# Get hkl at best intensity
hkl_m = pyzebra.ang2hkl_1d(
wave, gammad, om[np.argmax(counts)], chi, phi, nud, ub_inv
)
# Estimate intensity for marker size scaling
y_bkg = [counts[0], counts[-1]]
x_bkg = [om[0], om[-1]]
c = int(simpson(counts, x=om) - trapezoid(y_bkg, x=x_bkg))
# Recognize k_flag_vec
reduced_hkl_m = np.minimum(1 - hkl_m % 1, hkl_m % 1)
for ind, _k in enumerate(k):
if all(np.abs(reduced_hkl_m - _k) < tol_k):
k_flag_vec.append(ind)
break
else:
# not required
continue
# Save data
hkl_coord.append([hkl1, hkl2, hkl_m])
intensity_vec.append(c)
file_flag_vec.append(j)
res_vec.append(res)
x_spacing = np.dot(M @ x_dir, x_c) * x_length
y_spacing = np.dot(M @ y_dir, y_vert) * y_length
y_spacingx = np.dot(M @ y_dir, x_c) * y_length
# Plot coordinate system
arrow1.x_end = x_spacing
arrow1.y_end = 0
arrow2.x_end = y_spacingx
arrow2.y_end = y_spacing
# Add labels
kvect_source.data.update(
x=[x_spacing / 4, -0.1],
y=[x_spacing / 4 - 0.5, y_spacing / 2],
text=[xlabel_str, ylabel_str],
)
# Plot grid lines
xs, ys = [], []
xs_minor, ys_minor = [], []
for yy in np.arange(min_grid_y, max_grid_y, 1):
# Calculate end and start point
hkl1 = min_grid_x * x_dir + yy * y_dir
hkl2 = max_grid_x * x_dir + yy * y_dir
hkl1 = M @ hkl1
hkl2 = M @ hkl2
# Project points onto axes
x1 = np.dot(x_c, hkl1) * x_length
y1 = np.dot(y_vert, hkl1) * y_length
x2 = np.dot(x_c, hkl2) * x_length
y2 = np.dot(y_vert, hkl2) * y_length
xs.append([x1, x2])
ys.append([y1, y2])
for xx in np.arange(min_grid_x, max_grid_x, 1):
# Calculate end and start point
hkl1 = xx * x_dir + min_grid_y * y_dir
hkl2 = xx * x_dir + max_grid_y * y_dir
hkl1 = M @ hkl1
hkl2 = M @ hkl2
# Project points onto axes
x1 = np.dot(x_c, hkl1) * x_length
y1 = np.dot(y_vert, hkl1) * y_length
x2 = np.dot(x_c, hkl2) * x_length
y2 = np.dot(y_vert, hkl2) * y_length
xs.append([x1, x2])
ys.append([y1, y2])
for yy in np.arange(min_grid_y, max_grid_y, 0.5):
# Calculate end and start point
hkl1 = min_grid_x * x_dir + yy * y_dir
hkl2 = max_grid_x * x_dir + yy * y_dir
hkl1 = M @ hkl1
hkl2 = M @ hkl2
# Project points onto axes
x1 = np.dot(x_c, hkl1) * x_length
y1 = np.dot(y_vert, hkl1) * y_length
x2 = np.dot(x_c, hkl2) * x_length
y2 = np.dot(y_vert, hkl2) * y_length
xs_minor.append([x1, x2])
ys_minor.append([y1, y2])
for xx in np.arange(min_grid_x, max_grid_x, 0.5):
# Calculate end and start point
hkl1 = xx * x_dir + min_grid_y * y_dir
hkl2 = xx * x_dir + max_grid_y * y_dir
hkl1 = M @ hkl1
hkl2 = M @ hkl2
# Project points onto axes
x1 = np.dot(x_c, hkl1) * x_length
y1 = np.dot(y_vert, hkl1) * y_length
x2 = np.dot(x_c, hkl2) * x_length
y2 = np.dot(y_vert, hkl2) * y_length
xs_minor.append([x1, x2])
ys_minor.append([y1, y2])
grid_source.data.update(xs=xs, ys=ys)
minor_grid_source.data.update(xs=xs_minor, ys=ys_minor)
# Prepare hkl/mhkl data
hkl_coord2 = []
for j, fname in enumerate(upload_hkl_fi.filename):
with io.StringIO(base64.b64decode(upload_hkl_fi.value[j]).decode()) as file:
_, ext = os.path.splitext(fname)
try:
fdata = pyzebra.parse_hkl(file, ext)
except Exception as e:
log.exception(e)
return
for ind in range(len(fdata["counts"])):
# Recognize k_flag_vec
hkl = np.array([fdata["h"][ind], fdata["k"][ind], fdata["l"][ind]])
# Save data
hkl_coord2.append(hkl)
def _update_slice():
cut_tol = hkl_delta.value
cut_or = hkl_cut.value
# different symbols based on file number
file_flag = 0 in disting_opt_cb.active
# scale marker size according to intensity
intensity_flag = 1 in disting_opt_cb.active
# use color to mark different propagation vectors
prop_legend_flag = 2 in disting_opt_cb.active
# use resolution ellipsis
res_flag = disting_opt_rb.active
el_x, el_y, el_w, el_h, el_c = [], [], [], [], []
scan_xs, scan_ys, scan_x, scan_y = [], [], [], []
scan_m, scan_s, scan_c, scan_l, scan_hkl = [], [], [], [], []
for j in range(len(hkl_coord)):
# Get middle hkl from list
hklm = M @ hkl_coord[j][2]
# Decide if point is in the cut
proj = np.dot(hklm, o_c)
if abs(proj - cut_or) >= cut_tol:
continue
hkl1 = M @ hkl_coord[j][0]
hkl2 = M @ hkl_coord[j][1]
# Project onto axes
hkl1x = np.dot(hkl1, x_c)
hkl1y = np.dot(hkl1, y_vert)
hkl2x = np.dot(hkl2, x_c)
hkl2y = np.dot(hkl2, y_vert)
hklmx = np.dot(hklm, x_c)
hklmy = np.dot(hklm, y_vert)
if intensity_flag:
markersize = max(6, int(intensity_vec[j] / max(intensity_vec) * 30))
else:
markersize = 6
if file_flag:
plot_symbol = syms[file_flag_vec[j]]
else:
plot_symbol = "circle"
if prop_legend_flag:
col_value = cmap[k_flag_vec[j]]
else:
col_value = "black"
if res_flag:
# Generate series of circles along scan line
res = res_vec[j]
el_x.extend(np.linspace(hkl1x, hkl2x, num=res_N))
el_y.extend(np.linspace(hkl1y, hkl2y, num=res_N))
el_w.extend([res / 2] * res_N)
el_h.extend([res / 2] * res_N)
el_c.extend([col_value] * res_N)
else:
# Plot scan line
scan_xs.append([hkl1x, hkl2x])
scan_ys.append([hkl1y, hkl2y])
# Plot middle point of scan
scan_x.append(hklmx)
scan_y.append(hklmy)
scan_m.append(plot_symbol)
scan_s.append(markersize)
# Color and legend label
scan_c.append(col_value)
scan_l.append(md_fnames[file_flag_vec[j]])
scan_hkl.append(hkl_coord[j][2])
ellipse_source.data.update(x=el_x, y=el_y, width=el_w, height=el_h, c=el_c)
scan_source.data.update(
xs=scan_xs,
ys=scan_ys,
x=scan_x,
y=scan_y,
m=scan_m,
s=scan_s,
c=scan_c,
l=scan_l,
hkl=scan_hkl,
)
# Legend items for different file entries (symbol)
legend_items = []
if not res_flag and file_flag:
labels, inds = np.unique(scan_source.data["l"], return_index=True)
for label, ind in zip(labels, inds):
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
# Legend items for propagation vector (color)
if prop_legend_flag:
if res_flag:
source, render = ellipse_source, ellipse
else:
source, render = scan_source, mline
labels, inds = np.unique(source.data["c"], return_index=True)
for label, ind in zip(labels, inds):
label = f"k={k[cmap.index(label)]}"
legend_items.append(LegendItem(label=label, renderers=[render], index=ind))
plot.legend.items = legend_items
scan_x2, scan_y2, scan_hkl2 = [], [], []
for j in range(len(hkl_coord2)):
# Get middle hkl from list
hklm = M @ hkl_coord2[j]
# Decide if point is in the cut
proj = np.dot(hklm, o_c)
if abs(proj - cut_or) >= cut_tol:
continue
# Project onto axes
hklmx = np.dot(hklm, x_c)
hklmy = np.dot(hklm, y_vert)
scan_x2.append(hklmx)
scan_y2.append(hklmy)
scan_hkl2.append(hkl_coord2[j])
scatter_source2.data.update(x=scan_x2, y=scan_y2, hkl=scan_hkl2)
return _update_slice
def plot_file_callback():
nonlocal _update_slice
_update_slice = _prepare_plotting()
_update_slice()
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
plot_file.on_click(plot_file_callback)
plot = figure(height=550, width=550 + 32, tools="pan,wheel_zoom,reset")
plot.toolbar.logo = None
plot.xaxis.visible = False
plot.xgrid.visible = False
plot.yaxis.visible = False
plot.ygrid.visible = False
arrow1 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10))
plot.add_layout(arrow1)
arrow2 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10))
plot.add_layout(arrow2)
kvect_source = ColumnDataSource(dict(x=[], y=[], text=[]))
plot.text(source=kvect_source)
grid_source = ColumnDataSource(dict(xs=[], ys=[]))
plot.multi_line(source=grid_source, line_color="gray")
minor_grid_source = ColumnDataSource(dict(xs=[], ys=[]))
plot.multi_line(source=minor_grid_source, line_color="gray", line_dash="dotted")
ellipse_source = ColumnDataSource(dict(x=[], y=[], width=[], height=[], c=[]))
ellipse = plot.ellipse(source=ellipse_source, fill_color="c", line_color="c")
scan_source = ColumnDataSource(
dict(xs=[], ys=[], x=[], y=[], m=[], s=[], c=[], l=[], hkl=[])
)
mline = plot.multi_line(source=scan_source, line_color="c")
scatter = plot.scatter(
source=scan_source, marker="m", size="s", fill_color="c", line_color="c"
)
scatter_source2 = ColumnDataSource(dict(x=[], y=[], hkl=[]))
scatter2 = plot.scatter(
source=scatter_source2, size=4, fill_color="green", line_color="green"
)
plot.x_range.renderers = [ellipse, mline, scatter, scatter2]
plot.y_range.renderers = [ellipse, mline, scatter, scatter2]
plot.add_layout(Legend(items=[], location="top_left", click_policy="hide"))
plot.add_tools(HoverTool(renderers=[scatter, scatter2], tooltips=[("hkl", "@hkl")]))
hkl_div = Div(text="HKL:", margin=(5, 5, 0, 5))
hkl_normal = TextInput(title="normal", value="0 0 1", width=70)
def hkl_cut_callback(_attr, _old, _new):
if _update_slice is not None:
_update_slice()
hkl_cut = Spinner(title="cut", value=0, step=0.1, width=70)
hkl_cut.on_change("value_throttled", hkl_cut_callback)
hkl_delta = NumericInput(title="delta", value=0.1, mode="float", width=70)
hkl_in_plane_x = TextInput(title="in-plane X", value="1 0 0", width=70)
hkl_in_plane_y = TextInput(title="in-plane Y", value="", width=100, disabled=True)
disting_opt_div = Div(text="Distinguish options:", margin=(5, 5, 0, 5))
disting_opt_cb = CheckboxGroup(
labels=["files (symbols)", "intensities (size)", "k vectors nucl/magn (colors)"],
active=[0, 1, 2],
width=200,
)
disting_opt_rb = RadioGroup(
labels=["scan direction", "resolution ellipsoid"], active=0, width=200
)
k_vectors = TextAreaInput(
title="k vectors:", value="0.0 0.0 0.0\n0.5 0.0 0.0\n0.5 0.5 0.0", width=150
)
res_mult_ni = NumericInput(title="Resolution mult:", value=10, mode="int", width=100)
tol_k_ni = NumericInput(title="k tolerance:", value=0.01, mode="float", width=100)
def show_legend_cb_callback(_attr, _old, new):
plot.legend.visible = 0 in new
show_legend_cb = CheckboxGroup(labels=["Show legend"], active=[0])
show_legend_cb.on_change("active", show_legend_cb_callback)
layout = column(
row(
column(row(measured_data_div, measured_data), row(upload_hkl_div, upload_hkl_fi)),
plot_file,
),
row(
plot,
column(
hkl_div,
row(hkl_normal, hkl_cut, hkl_delta),
row(hkl_in_plane_x, hkl_in_plane_y),
k_vectors,
row(tol_k_ni, res_mult_ni),
disting_opt_div,
disting_opt_cb,
disting_opt_rb,
show_legend_cb,
),
),
)
self.layout = layout

View File

@ -1,4 +1,3 @@
import logging
import os
import re
from ast import literal_eval
@ -6,51 +5,44 @@ from collections import defaultdict
import numpy as np
logger = logging.getLogger(__name__)
META_VARS_STR = (
"instrument",
"title",
"comment",
"sample",
"user",
"proposal_id",
"ProposalID",
"original_filename",
"date",
"zebra_mode",
"zebramode",
"sample_name",
"proposal",
"proposal_user",
"proposal_title",
"proposal_email",
"detectorDistance",
)
META_VARS_FLOAT = (
"omega",
"mf",
"2-theta",
"chi",
"phi",
"nu",
"temp",
"wavelenght",
"a",
"b",
"c",
"alpha",
"beta",
"gamma",
"omega",
"chi",
"phi",
"temp",
"mf",
"temperature",
"magnetic_field",
"cex1",
"cex2",
"wavelength",
"mexz",
"moml",
"mcvl",
"momu",
"mcvu",
"2-theta",
"twotheta",
"nu",
"gamma_angle",
"polar_angle",
"tilt_angle",
"distance",
"distance_an",
"snv",
"snh",
"snvm",
@ -63,13 +55,6 @@ META_VARS_FLOAT = (
"s2vb",
"s2hr",
"s2hl",
"a5",
"a6",
"a4t",
"s2ant",
"s2anb",
"s2anl",
"s2anr",
)
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j", "UB")
@ -114,57 +99,44 @@ def load_1D(filepath):
return dataset
def parse_1D(fileobj, data_type, log=logger):
def parse_1D(fileobj, data_type):
metadata = {"data_type": data_type}
# read metadata
for line in fileobj:
if "=" in line:
variable, value = line.split("=", 1)
variable = variable.strip()
value = value.strip()
try:
if variable in META_VARS_STR:
metadata[variable] = value
elif variable in META_VARS_FLOAT:
if variable == "2-theta": # fix that angle name not to be an expression
variable = "twotheta"
if variable in ("a", "b", "c", "alpha", "beta", "gamma"):
variable += "_cell"
metadata[variable] = float(value)
elif variable in META_UB_MATRIX:
if variable == "UB":
metadata["ub"] = np.array(literal_eval(value)).reshape(3, 3)
else:
if "ub" not in metadata:
metadata["ub"] = np.zeros((3, 3))
row = int(variable[-2]) - 1
metadata["ub"][row, :] = list(map(float, value.split()))
except Exception:
print(f"Error reading {variable} with value '{value}'")
metadata[variable] = 0
if "#data" in line:
# this is the end of metadata and the start of data section
break
if "=" not in line:
# skip comments / empty lines
continue
var_name, value = line.split("=", 1)
var_name = var_name.strip()
value = value.strip()
if value == "UNKNOWN":
metadata[var_name] = None
continue
try:
if var_name in META_VARS_STR:
if var_name == "zebramode":
var_name = "zebra_mode"
metadata[var_name] = value
elif var_name in META_VARS_FLOAT:
if var_name == "2-theta": # fix that angle name not to be an expression
var_name = "twotheta"
if var_name == "temperature":
var_name = "temp"
if var_name == "magnetic_field":
var_name = "mf"
if var_name in ("a", "b", "c", "alpha", "beta", "gamma"):
var_name += "_cell"
metadata[var_name] = float(value)
elif var_name in META_UB_MATRIX:
if var_name == "UB":
metadata["ub"] = np.array(literal_eval(value)).reshape(3, 3)
else:
if "ub" not in metadata:
metadata["ub"] = np.zeros((3, 3))
row = int(var_name[-2]) - 1
metadata["ub"][row, :] = list(map(float, value.split()))
except Exception:
log.error(f"Error reading {var_name} with value '{value}'")
metadata[var_name] = 0
# handle older files that don't contain "zebra_mode" metadata
if "zebra_mode" not in metadata:
metadata["zebra_mode"] = "nb"
@ -187,10 +159,6 @@ def parse_1D(fileobj, data_type, log=logger):
for param, (param_name, param_type) in zip(line.split(), ccl_first_line):
scan[param_name] = param_type(param)
# rename 0 index scan to 1
if scan["idx"] == 0:
scan["idx"] = 1
# second line
next_line = next(fileobj)
for param, (param_name, param_type) in zip(next_line.split(), ccl_second_line):
@ -207,7 +175,7 @@ def parse_1D(fileobj, data_type, log=logger):
# "om" -> "omega"
scan["scan_motor"] = "omega"
scan["scan_motors"] = ["omega"]
scan["scan_motors"] = ["omega", ]
# overwrite metadata, because it only refers to the scan center
half_dist = (scan["n_points"] - 1) / 2 * scan["angle_step"]
scan["omega"] = np.linspace(
@ -227,18 +195,15 @@ def parse_1D(fileobj, data_type, log=logger):
dataset.append({**metadata, **scan})
elif data_type == ".dat":
# TODO: this might need to be adapted in the future, when "gamma" will be added to dat files
if metadata["zebra_mode"] == "nb":
if "gamma_angle" in metadata:
# support for the new format
metadata["gamma"] = metadata["gamma_angle"]
else:
metadata["gamma"] = metadata["twotheta"]
metadata["gamma"] = metadata["twotheta"]
scan = defaultdict(list)
scan["export"] = True
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
motors = [motor.strip().lower() for motor in match.group(1).split(",")]
motors = [motor.lower() for motor in match.group(1).split(", ")]
# Steps can be separated by " " or ", "
steps = [float(step.strip(",")) for step in match.group(2).split()]
@ -300,7 +265,7 @@ def parse_1D(fileobj, data_type, log=logger):
dataset.append({**metadata, **scan})
else:
log.error("Unknown file extention")
print("Unknown file extention")
return dataset
@ -389,7 +354,7 @@ def export_ccl_compare(dataset1, dataset2, path, export_target, hkl_precision=2)
area_n1, area_s1 = scan1["area"]
area_n2, area_s2 = scan2["area"]
area_n = area_n1 - area_n2
area_s = np.sqrt(area_s1**2 + area_s2**2)
area_s = np.sqrt(area_s1 ** 2 + area_s2 ** 2)
area_str = f"{area_n:10.2f}{area_s:10.2f}"
ang_str = ""

View File

@ -1,13 +1,10 @@
import logging
import os
import numpy as np
from lmfit.models import GaussianModel, LinearModel, PseudoVoigtModel, VoigtModel
from scipy.integrate import simpson, trapezoid
from pyzebra import CCL_ANGLES
logger = logging.getLogger(__name__)
from .ccl_io import CCL_ANGLES
PARAM_PRECISIONS = {
"twotheta": 0.1,
@ -21,7 +18,9 @@ PARAM_PRECISIONS = {
"ub": 0.01,
}
MAX_RANGE_GAP = {"omega": 0.5}
MAX_RANGE_GAP = {
"omega": 0.5,
}
MOTOR_POS_PRECISION = 0.01
@ -36,12 +35,12 @@ def normalize_dataset(dataset, monitor=100_000):
scan["monitor"] = monitor
def merge_duplicates(dataset, log=logger):
merged = np.zeros(len(dataset), dtype=bool)
def merge_duplicates(dataset):
merged = np.zeros(len(dataset), dtype=np.bool)
for ind_into, scan_into in enumerate(dataset):
for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1):
if _parameters_match(scan_into, scan_from) and not merged[ind_from]:
merge_scans(scan_into, scan_from, log=log)
merge_scans(scan_into, scan_from)
merged[ind_from] = True
@ -78,30 +77,28 @@ def _parameters_match(scan1, scan2):
return True
def merge_datasets(dataset_into, dataset_from, log=logger):
def merge_datasets(dataset_into, dataset_from):
scan_motors_into = dataset_into[0]["scan_motors"]
scan_motors_from = dataset_from[0]["scan_motors"]
if scan_motors_into != scan_motors_from:
log.warning(
f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}"
)
print(f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}")
return
merged = np.zeros(len(dataset_from), dtype=bool)
merged = np.zeros(len(dataset_from), dtype=np.bool)
for scan_into in dataset_into:
for ind, scan_from in enumerate(dataset_from):
if _parameters_match(scan_into, scan_from) and not merged[ind]:
if scan_into["counts"].ndim == 3:
merge_h5_scans(scan_into, scan_from, log=log)
merge_h5_scans(scan_into, scan_from)
else: # scan_into["counts"].ndim == 1
merge_scans(scan_into, scan_from, log=log)
merge_scans(scan_into, scan_from)
merged[ind] = True
for scan_from in dataset_from:
dataset_into.append(scan_from)
def merge_scans(scan_into, scan_from, log=logger):
def merge_scans(scan_into, scan_from):
if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy()
@ -153,10 +150,10 @@ def merge_scans(scan_into, scan_from, log=logger):
fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"])
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def merge_h5_scans(scan_into, scan_from, log=logger):
def merge_h5_scans(scan_into, scan_from):
if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy()
@ -165,7 +162,7 @@ def merge_h5_scans(scan_into, scan_from, log=logger):
for scan in scan_into["merged_scans"]:
if scan_from is scan:
log.warning("Already merged scan")
print("Already merged scan")
return
scan_into["merged_scans"].append(scan_from)
@ -217,7 +214,7 @@ def merge_h5_scans(scan_into, scan_from, log=logger):
fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"])
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def restore_scan(scan):
@ -235,7 +232,7 @@ def restore_scan(scan):
scan["export"] = True
def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger):
def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
if fit_from is None:
fit_from = -np.inf
if fit_to is None:
@ -248,7 +245,7 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger):
# apply fitting range
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
if not np.any(fit_ind):
log.warning(f"No data in fit range for scan {scan['idx']}")
print(f"No data in fit range for scan {scan['idx']}")
return
y_fit = y_fit[fit_ind]

View File

@ -2,10 +2,9 @@ import h5py
import numpy as np
from lmfit.models import Gaussian2dModel, GaussianModel
META_MATRIX = ("UB",)
META_CELL = ("cell",)
META_STR = ("name",)
META_MATRIX = ("UB", )
META_CELL = ("cell", )
META_STR = ("name", )
def read_h5meta(filepath):
"""Open and parse content of a h5meta file.
@ -47,9 +46,9 @@ def parse_h5meta(file):
if variable in META_STR:
pass
elif variable in META_CELL:
value = np.array(value.split(",")[:6], dtype=float)
value = np.array(value.split(",")[:6], dtype=np.float)
elif variable in META_MATRIX:
value = np.array(value.split(",")[:9], dtype=float).reshape(3, 3)
value = np.array(value.split(",")[:9], dtype=np.float).reshape(3, 3)
else: # default is a single float number
value = float(value)
content[section][variable] = value
@ -69,7 +68,7 @@ def read_detector_data(filepath, cami_meta=None):
ndarray: A 3D array of data, omega, gamma, nu.
"""
with h5py.File(filepath, "r") as h5f:
counts = h5f["/entry1/area_detector2/data"][:].astype(float)
counts = h5f["/entry1/area_detector2/data"][:].astype(np.float64)
n, cols, rows = counts.shape
if "/entry1/experiment_identifier" in h5f: # old format
@ -111,17 +110,13 @@ def read_detector_data(filepath, cami_meta=None):
if len(scan["gamma"]) == 1:
scan["gamma"] = np.ones(n) * scan["gamma"]
scan["twotheta"] = np.ones(n) * scan["twotheta"]
scan["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][0]
scan["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][0]
scan["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][0]
if scan["zebra_mode"] == "nb":
scan["chi"] = np.array([180])
scan["phi"] = np.array([0])
elif scan["zebra_mode"] == "bi":
scan["chi"] = h5f["/entry1/sample/chi"][:]
scan["phi"] = h5f["/entry1/sample/phi"][:]
scan["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:1]
scan["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:1]
scan["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:1]
scan["chi"] = h5f["/entry1/sample/chi"][:]
if len(scan["chi"]) == 1:
scan["chi"] = np.ones(n) * scan["chi"]
scan["phi"] = h5f["/entry1/sample/phi"][:]
if len(scan["phi"]) == 1:
scan["phi"] = np.ones(n) * scan["phi"]
if h5f["/entry1/sample/UB"].size == 0:
@ -135,34 +130,24 @@ def read_detector_data(filepath, cami_meta=None):
# a default motor for a single frame file
scan["scan_motor"] = "omega"
else:
for var in ("omega", "gamma", "chi", "phi"): # TODO: also nu?
for var in ("omega", "gamma", "nu", "chi", "phi"):
if abs(scan[var][0] - scan[var][-1]) > 0.1:
scan["scan_motor"] = var
break
else:
raise ValueError("No angles that vary")
scan["scan_motors"] = [scan["scan_motor"]]
scan["scan_motors"] = [scan["scan_motor"], ]
# optional parameters
if "/entry1/sample/magnetic_field" in h5f:
scan["mf"] = h5f["/entry1/sample/magnetic_field"][:]
if "mf" in scan:
# TODO: NaNs are not JSON compliant, so replace them with None
# this is not a great solution, but makes it safe to use the array in bokeh
scan["mf"] = np.where(np.isnan(scan["mf"]), None, scan["mf"])
if "/entry1/sample/temperature" in h5f:
scan["temp"] = h5f["/entry1/sample/temperature"][:]
elif "/entry1/sample/Ts/value" in h5f:
scan["temp"] = h5f["/entry1/sample/Ts/value"][:]
if "temp" in scan:
# TODO: NaNs are not JSON compliant, so replace them with None
# this is not a great solution, but makes it safe to use the array in bokeh
scan["temp"] = np.where(np.isnan(scan["temp"]), None, scan["temp"])
# overwrite metadata from .cami
if cami_meta is not None:
if "crystal" in cami_meta:

View File

@ -1,5 +1,4 @@
import io
import logging
import os
import subprocess
import tempfile
@ -7,9 +6,7 @@ from math import ceil, floor
import numpy as np
logger = logging.getLogger(__name__)
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/Sxtal_Refgen"
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen"
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
BLFR z-up
@ -147,7 +144,7 @@ def export_geom_file(path, ang_lims, template=None):
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
def calc_ub_matrix(params, log=logger):
def calc_ub_matrix(params):
with tempfile.TemporaryDirectory() as temp_dir:
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
@ -163,8 +160,8 @@ def calc_ub_matrix(params, log=logger):
stderr=subprocess.STDOUT,
text=True,
)
log.info(" ".join(comp_proc.args))
log.info(comp_proc.stdout)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
ub_matrix = []

View File

@ -1,7 +1,5 @@
import os
import numpy as np
SINQ_PATH = "/afs/psi.ch/project/sinqdata"
ZEBRA_PROPOSALS_PATH = os.path.join(SINQ_PATH, "{year}/zebra/{proposal}")
@ -14,23 +12,6 @@ def find_proposal_path(proposal):
# found it
break
else:
raise ValueError(f"Can not find data for proposal '{proposal}'")
raise ValueError(f"Can not find data for proposal '{proposal}'.")
return proposal_path
def parse_hkl(fileobj, data_type):
next(fileobj)
fields = map(str.lower, next(fileobj).strip("!").strip().split())
next(fileobj)
data = np.loadtxt(fileobj, unpack=True)
res = dict(zip(fields, data))
# adapt to .ccl/.dat files naming convention
res["counts"] = res.pop("f2")
if data_type == ".hkl":
for ind in ("h", "k", "l"):
res[ind] = res[ind].astype(int)
return res

View File

@ -2,16 +2,8 @@ import numpy as np
from numba import njit
pi_r = 180 / np.pi
IMAGE_W = 256
IMAGE_H = 128
XNORM = 128
YNORM = 64
XPIX = 0.734
YPIX = 1.4809
@njit(cache=True)
def z4frgn(wave, ga, nu):
"""CALCULATES DIFFRACTION VECTOR IN LAB SYSTEM FROM GA AND NU
@ -23,34 +15,36 @@ def z4frgn(wave, ga, nu):
"""
ga_r = ga / pi_r
nu_r = nu / pi_r
z4 = [np.sin(ga_r) * np.cos(nu_r), np.cos(ga_r) * np.cos(nu_r) - 1.0, np.sin(nu_r)]
z4 = [0.0, 0.0, 0.0]
z4[0] = (np.sin(ga_r) * np.cos(nu_r)) / wave
z4[1] = (np.cos(ga_r) * np.cos(nu_r) - 1.0) / wave
z4[2] = (np.sin(nu_r)) / wave
return np.array(z4) / wave
return z4
@njit(cache=True)
def phimat_T(phi):
"""TRANSPOSED BUSING AND LEVY CONVENTION ROTATION MATRIX FOR PHI OR OMEGA
def phimat(phi):
"""BUSING AND LEVY CONVENTION ROTATION MATRIX FOR PHI OR OMEGA
Args:
PHI
Returns:
DUM_T
DUM
"""
ph_r = phi / pi_r
dum = np.zeros((3, 3))
dum = np.zeros(9).reshape(3, 3)
dum[0, 0] = np.cos(ph_r)
dum[1, 0] = np.sin(ph_r)
dum[0, 1] = -dum[1, 0]
dum[0, 1] = np.sin(ph_r)
dum[1, 0] = -dum[0, 1]
dum[1, 1] = dum[0, 0]
dum[2, 2] = 1
return dum
@njit(cache=True)
def z1frnb(wave, ga, nu, om):
"""CALCULATE DIFFRACTION VECTOR Z1 FROM GA, OM, NU, ASSUMING CH=PH=0
@ -61,28 +55,30 @@ def z1frnb(wave, ga, nu, om):
Z1
"""
z4 = z4frgn(wave, ga, nu)
z3 = phimat_T(phi=om).dot(z4)
dum = phimat(phi=om)
dumt = np.transpose(dum)
z3 = dumt.dot(z4)
return z3
@njit(cache=True)
def chimat_T(chi):
"""TRANSPOSED BUSING AND LEVY CONVENTION ROTATION MATRIX FOR CHI
def chimat(chi):
"""BUSING AND LEVY CONVENTION ROTATION MATRIX FOR CHI
Args:
CHI
Returns:
DUM_T
DUM
"""
ch_r = chi / pi_r
dum = np.zeros((3, 3))
dum = np.zeros(9).reshape(3, 3)
dum[0, 0] = np.cos(ch_r)
dum[2, 0] = np.sin(ch_r)
dum[0, 2] = np.sin(ch_r)
dum[1, 1] = 1
dum[0, 2] = -dum[2, 0]
dum[2, 0] = -dum[0, 2]
dum[2, 2] = dum[0, 0]
return dum
@ -98,8 +94,13 @@ def z1frz3(z3, chi, phi):
Returns:
Z1
"""
z2 = chimat_T(chi).dot(z3)
z1 = phimat_T(phi).dot(z2)
dum1 = chimat(chi)
dum2 = np.transpose(dum1)
z2 = dum2.dot(z3)
dum1 = phimat(phi)
dum2 = np.transpose(dum1)
z1 = dum2.dot(z2)
return z1
@ -281,81 +282,125 @@ def fixdnu(wave, z1, ch2, ph2, nu):
return ch, ph, ga, om
def ang2hkl(wave, ddist, gammad, om, ch, ph, nud, ub_inv, x, y):
"""Calculate hkl-indices of a reflection from its position (x,y,angles) at the 2d-detector"""
ga, nu = det2pol(ddist, gammad, nud, x, y)
z1 = z1frmd(wave, ga, om, ch, ph, nu)
hkl = ub_inv @ z1
return hkl
# for test run:
# angtohkl(wave=1.18,ddist=616,gammad=48.66,om=-22.80,ch=0,ph=0,nud=0,x=128,y=64)
def ang2hkl_det(wave, ddist, gammad, om, chi, phi, nud, ub_inv):
"""Calculate hkl-indices of a reflection from its position (x,y,angles) at the 2d-detector"""
xv, yv = np.meshgrid(range(IMAGE_W), range(IMAGE_H))
xobs = (xv.ravel() - XNORM) * XPIX
yobs = (yv.ravel() - YNORM) * YPIX
def angtohkl(wave, ddist, gammad, om, ch, ph, nud, x, y):
"""finds hkl-indices of a reflection from its position (x,y,angles) at the 2d-detector
a = xobs
b = ddist * np.cos(yobs / ddist)
z = ddist * np.sin(yobs / ddist)
d = np.sqrt(a * a + b * b)
Args:
gammad, om, ch, ph, nud, xobs, yobs
gamma = gammad + np.arctan2(a, b) * pi_r
nu = nud + np.arctan2(z, d) * pi_r
Returns:
gamma_r = gamma / pi_r
nu_r = nu / pi_r
z4 = np.vstack(
(
np.sin(gamma_r) * np.cos(nu_r) / wave,
(np.cos(gamma_r) * np.cos(nu_r) - 1) / wave,
np.sin(nu_r) / wave,
)
"""
# define ub matrix if testing angtohkl(wave=1.18,ddist=616,gammad=48.66,om=-22.80,ch=0,ph=0,nud=0,x=128,y=64) against f90:
# ub = np.array([-0.0178803,-0.0749231,0.0282804,-0.0070082,-0.0368001,-0.0577467,0.1609116,-0.0099281,0.0006274]).reshape(3,3)
ub = np.array(
[0.04489, 0.02045, -0.2334, -0.06447, 0.00129, -0.16356, -0.00328, 0.2542, 0.0196]
).reshape(3, 3)
print(
"The input values are: ga=",
gammad,
", om=",
om,
", ch=",
ch,
", ph=",
ph,
", nu=",
nud,
", x=",
x,
", y=",
y,
)
om_r = om / pi_r
dum3 = np.zeros((3, 3))
dum3[0, 0] = np.cos(om_r)
dum3[1, 0] = np.sin(om_r)
dum3[0, 1] = -dum3[1, 0]
dum3[1, 1] = dum3[0, 0]
dum3[2, 2] = 1
ga, nu = det2pol(ddist, gammad, nud, x, y)
chi_r = chi / pi_r
dum2 = np.zeros((3, 3))
dum2[0, 0] = np.cos(chi_r)
dum2[2, 0] = np.sin(chi_r)
dum2[1, 1] = 1
dum2[0, 2] = -dum2[2, 0]
dum2[2, 2] = dum2[0, 0]
print(
"The calculated actual angles are: ga=",
ga,
", om=",
om,
", ch=",
ch,
", ph=",
ph,
", nu=",
nu,
)
phi_r = phi / pi_r
dum1 = np.zeros((3, 3))
dum1[0, 0] = np.cos(phi_r)
dum1[1, 0] = np.sin(phi_r)
dum1[0, 1] = -dum1[1, 0]
dum1[1, 1] = dum1[0, 0]
dum1[2, 2] = 1
z1 = z1frmd(wave, ga, om, ch, ph, nu)
hkl = (ub_inv @ dum1 @ dum2 @ dum3 @ z4).reshape(3, IMAGE_H, IMAGE_W)
print("The diffraction vector is:", z1[0], z1[1], z1[2])
ubinv = np.linalg.inv(ub)
h = ubinv[0, 0] * z1[0] + ubinv[0, 1] * z1[1] + ubinv[0, 2] * z1[2]
k = ubinv[1, 0] * z1[0] + ubinv[1, 1] * z1[1] + ubinv[1, 2] * z1[2]
l = ubinv[2, 0] * z1[0] + ubinv[2, 1] * z1[1] + ubinv[2, 2] * z1[2]
print("The Miller indexes are:", h, k, l)
ch2, ph2 = eqchph(z1)
ch, ph, ga, om = fixdnu(wave, z1, ch2, ph2, nu)
print(
"Bisecting angles to put reflection into the detector center: ga=",
ga,
", om=",
om,
", ch=",
ch,
", ph=",
ph,
", nu=",
nu,
)
def ang2hkl(wave, ddist, gammad, om, ch, ph, nud, ub, x, y):
"""Calculate hkl-indices of a reflection from its position (x,y,angles) at the 2d-detector
"""
ga, nu = det2pol(ddist, gammad, nud, x, y)
z1 = z1frmd(wave, ga, om, ch, ph, nu)
ubinv = np.linalg.inv(ub)
hkl = ubinv @ z1
return hkl
def ang2hkl_1d(wave, ga, om, ch, ph, nu, ub_inv):
"""Calculate hkl-indices of a reflection from its position (angles) at the 1d-detector"""
def ang2hkl_1d(wave, ddist, ga, om, ch, ph, nu, ub):
"""Calculate hkl-indices of a reflection from its position (angles) at the 1d-detector
"""
z1 = z1frmd(wave, ga, om, ch, ph, nu)
hkl = ub_inv @ z1
ubinv = np.linalg.inv(ub)
hkl = ubinv @ z1
return hkl
def ang_proc(wave, ddist, gammad, om, ch, ph, nud, x, y):
"""Utility function to calculate ch, ph, ga, om"""
"""Utility function to calculate ch, ph, ga, om
"""
ga, nu = det2pol(ddist, gammad, nud, x, y)
z1 = z1frmd(wave, ga, om, ch, ph, nu)
ch2, ph2 = eqchph(z1)
ch, ph, ga, om = fixdnu(wave, z1, ch2, ph2, nu)
return ch, ph, ga, om
def gauss(x, *p):
"""Defines Gaussian function
Args:
A - amplitude, mu - position of the center, sigma - width
Returns:
Gaussian function
"""
A, mu, sigma = p
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))

4
scripts/pyzebra-start.sh Normal file
View File

@ -0,0 +1,4 @@
source /home/pyzebra/miniconda3/etc/profile.d/conda.sh
conda activate prod
pyzebra --port=80 --allow-websocket-origin=pyzebra.psi.ch:80 --spind-path=/home/pyzebra/spind

View File

@ -0,0 +1,4 @@
source /home/pyzebra/miniconda3/etc/profile.d/conda.sh
conda activate test
python ~/pyzebra/pyzebra/app/cli.py --allow-websocket-origin=pyzebra.psi.ch:5006 --spind-path=/home/pyzebra/spind

View File

@ -0,0 +1,11 @@
[Unit]
Description=pyzebra-test web server (runs on port 5006)
[Service]
Type=simple
User=pyzebra
ExecStart=/bin/bash /usr/local/sbin/pyzebra-test-start.sh
Restart=always
[Install]
WantedBy=multi-user.target

10
scripts/pyzebra.service Normal file
View File

@ -0,0 +1,10 @@
[Unit]
Description=pyzebra web server
[Service]
Type=simple
ExecStart=/bin/bash /usr/local/sbin/pyzebra-start.sh
Restart=always
[Install]
WantedBy=multi-user.target