Compare commits
118 Commits
Author | SHA1 | Date | |
---|---|---|---|
45b091f90b | |||
20fc969d60 | |||
bce8cf3120 | |||
56d626e312 | |||
19daa16de7 | |||
4bcfaf8c80 | |||
b8cf76220c | |||
20e43ecce9 | |||
bcb6f7bd3b | |||
c3b198f63a | |||
f044e739a0 | |||
6f83292d69 | |||
7075e0f42b | |||
1d43c86cff | |||
cde2aa1182 | |||
df9607ceb9 | |||
5c5be065be | |||
279dee3935 | |||
4ebb343afb | |||
2810cf3d2b | |||
585d4b3f54 | |||
72fd54d268 | |||
181c359ef9 | |||
00607d94c7 | |||
a192648cc4 | |||
42adda235b | |||
e30035350b | |||
89fffed5d1 | |||
fae90cbeae | |||
7b70d30578 | |||
b26d7005c5 | |||
90021428ee | |||
bf97af1949 | |||
098314e30d | |||
49ff319230 | |||
9b1e396bdb | |||
d72c1d478e | |||
fa73271076 | |||
4d2a8ecad2 | |||
6f1fe1a3d8 | |||
8d4f4a9b50 | |||
c94f784373 | |||
de2a10b507 | |||
e2220760f0 | |||
39b2af60ca | |||
94df4869d4 | |||
4131eb31e7 | |||
4ee1f6fb70 | |||
fbce5de7b9 | |||
636badfba8 | |||
77d6f42e0d | |||
166259d669 | |||
14d65c9590 | |||
48b001180f | |||
21332b3edc | |||
421fc726dd | |||
eb3cc99aeb | |||
d36686177d | |||
7e2c6ad21f | |||
03ed97e063 | |||
234fb1f3b6 | |||
e8ce57b56b | |||
ac6f67cc53 | |||
3234a544de | |||
4bd6c6760e | |||
b401d2f459 | |||
6b8d15234b | |||
5cfa5c176d | |||
bbe7b7d305 | |||
4ba08366df | |||
51c78ad06b | |||
bcd594fa7e | |||
10bcabd7f9 | |||
bee8263184 | |||
98a76ebf63 | |||
12ba0b291b | |||
a719f10f4f | |||
aaff6032c8 | |||
3fb3fe573b | |||
1687337f26 | |||
53ceac21aa | |||
741a09819c | |||
3c619713d5 | |||
3d5a4ed6aa | |||
b2129805dc | |||
92765b5665 | |||
328b71e058 | |||
11ab8485bc | |||
4734b3e50f | |||
dfeeed284b | |||
9adf83ec74 | |||
a299449209 | |||
45a81aa632 | |||
3926e8de39 | |||
d2e2a2c7fd | |||
3934dcdd07 | |||
4c8037af5c | |||
e29b4e7da8 | |||
7189ee8196 | |||
be8417856a | |||
8ba062064a | |||
6557b2f3a4 | |||
7dcd20198f | |||
13a6ff285a | |||
09b6e4fdcf | |||
e7780a2405 | |||
e8b85bcea3 | |||
2482746f14 | |||
3986b8173f | |||
16966b6e3e | |||
e9d3fcc41a | |||
506d70a913 | |||
fc4e9c12cf | |||
c5faa0a55a | |||
c9922bb0cb | |||
813270d6f8 | |||
cf2f8435e7 | |||
380abfb102 |
1
.vscode/launch.json
vendored
1
.vscode/launch.json
vendored
@ -8,6 +8,7 @@
|
||||
"program": "${workspaceFolder}/pyzebra/app/cli.py",
|
||||
"console": "internalConsole",
|
||||
"env": {},
|
||||
"justMyCode": false,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ requirements:
|
||||
- h5py
|
||||
- bokeh =2.4
|
||||
- numba
|
||||
- lmfit
|
||||
- lmfit >=1.0.2
|
||||
|
||||
|
||||
about:
|
||||
|
@ -4,5 +4,6 @@ from pyzebra.ccl_process import *
|
||||
from pyzebra.h5 import *
|
||||
from pyzebra.utils import *
|
||||
from pyzebra.xtal import *
|
||||
from pyzebra.sxtal_refgen import *
|
||||
|
||||
__version__ = "0.5.2"
|
||||
__version__ = "0.7.2"
|
||||
|
@ -8,11 +8,13 @@ from bokeh.layouts import column, row
|
||||
from bokeh.models import Button, Panel, Tabs, TextAreaInput, TextInput
|
||||
|
||||
import panel_ccl_integrate
|
||||
import panel_ccl_compare
|
||||
import panel_hdf_anatric
|
||||
import panel_hdf_param_study
|
||||
import panel_hdf_viewer
|
||||
import panel_param_study
|
||||
import panel_spind
|
||||
import panel_ccl_prepare
|
||||
|
||||
doc = curdoc()
|
||||
|
||||
@ -34,14 +36,18 @@ proposal_textinput.on_change("value_input", proposal_textinput_callback)
|
||||
doc.proposal_textinput = proposal_textinput
|
||||
|
||||
def apply_button_callback():
|
||||
try:
|
||||
proposal_path = pyzebra.find_proposal_path(proposal_textinput.value)
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
return
|
||||
proposal = proposal_textinput.value.strip()
|
||||
if proposal:
|
||||
try:
|
||||
proposal_path = pyzebra.find_proposal_path(proposal)
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
return
|
||||
apply_button.disabled = True
|
||||
else:
|
||||
proposal_path = ""
|
||||
|
||||
proposal_textinput.name = proposal_path
|
||||
apply_button.disabled = True
|
||||
|
||||
apply_button = Button(label="Apply", button_type="primary")
|
||||
apply_button.on_click(apply_button_callback)
|
||||
@ -54,7 +60,9 @@ doc.add_root(
|
||||
Panel(child=column(proposal_textinput, apply_button), title="user config"),
|
||||
panel_hdf_viewer.create(),
|
||||
panel_hdf_anatric.create(),
|
||||
panel_ccl_prepare.create(),
|
||||
panel_ccl_integrate.create(),
|
||||
panel_ccl_compare.create(),
|
||||
panel_param_study.create(),
|
||||
panel_hdf_param_study.create(),
|
||||
panel_spind.create(),
|
||||
|
@ -6,7 +6,7 @@ from bokeh.application.application import Application
|
||||
from bokeh.application.handlers import ScriptHandler
|
||||
from bokeh.server.server import Server
|
||||
|
||||
from pyzebra.anatric import ANATRIC_PATH
|
||||
from pyzebra import ANATRIC_PATH, SXTAL_REFGEN_PATH
|
||||
from pyzebra.app.handler import PyzebraHandler
|
||||
|
||||
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
|
||||
@ -42,6 +42,13 @@ def main():
|
||||
"--anatric-path", type=str, default=ANATRIC_PATH, help="path to anatric executable",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--sxtal-refgen-path",
|
||||
type=str,
|
||||
default=SXTAL_REFGEN_PATH,
|
||||
help="path to Sxtal_Refgen executable",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--spind-path", type=str, default=None, help="path to spind scripts folder",
|
||||
)
|
||||
|
718
pyzebra/app/panel_ccl_compare.py
Normal file
718
pyzebra/app/panel_ccl_compare.py
Normal file
@ -0,0 +1,718 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
import tempfile
|
||||
import types
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
BasicTicker,
|
||||
Button,
|
||||
CellEditor,
|
||||
CheckboxEditor,
|
||||
CheckboxGroup,
|
||||
ColumnDataSource,
|
||||
CustomJS,
|
||||
DataRange1d,
|
||||
DataTable,
|
||||
Div,
|
||||
Dropdown,
|
||||
FileInput,
|
||||
Grid,
|
||||
Legend,
|
||||
Line,
|
||||
LinearAxis,
|
||||
MultiLine,
|
||||
MultiSelect,
|
||||
NumberEditor,
|
||||
Panel,
|
||||
PanTool,
|
||||
Plot,
|
||||
RadioGroup,
|
||||
ResetTool,
|
||||
Scatter,
|
||||
Select,
|
||||
Spacer,
|
||||
Span,
|
||||
Spinner,
|
||||
TableColumn,
|
||||
TextAreaInput,
|
||||
WheelZoomTool,
|
||||
Whisker,
|
||||
)
|
||||
|
||||
import pyzebra
|
||||
from pyzebra.ccl_io import EXPORT_TARGETS
|
||||
from pyzebra.ccl_process import AREA_METHODS
|
||||
|
||||
|
||||
javaScript = """
|
||||
let j = 0;
|
||||
for (let i = 0; i < js_data.data['fname'].length; i++) {
|
||||
if (js_data.data['content'][i] === "") continue;
|
||||
|
||||
setTimeout(function() {
|
||||
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
|
||||
const link = document.createElement('a');
|
||||
document.body.appendChild(link);
|
||||
const url = window.URL.createObjectURL(blob);
|
||||
link.href = url;
|
||||
link.download = js_data.data['fname'][i] + js_data.data['ext'][i];
|
||||
link.click();
|
||||
window.URL.revokeObjectURL(url);
|
||||
document.body.removeChild(link);
|
||||
}, 100 * j)
|
||||
|
||||
j++;
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
dataset1 = []
|
||||
dataset2 = []
|
||||
fit_params = {}
|
||||
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""], ext=["", ""]))
|
||||
|
||||
def file_select_update_for_proposal():
|
||||
proposal_path = proposal_textinput.name
|
||||
if proposal_path:
|
||||
file_list = []
|
||||
for file in os.listdir(proposal_path):
|
||||
if file.endswith((".ccl")):
|
||||
file_list.append((os.path.join(proposal_path, file), file))
|
||||
file_select.options = file_list
|
||||
file_open_button.disabled = False
|
||||
else:
|
||||
file_select.options = []
|
||||
file_open_button.disabled = True
|
||||
|
||||
doc.add_periodic_callback(file_select_update_for_proposal, 5000)
|
||||
|
||||
def proposal_textinput_callback(_attr, _old, _new):
|
||||
file_select_update_for_proposal()
|
||||
|
||||
proposal_textinput = doc.proposal_textinput
|
||||
proposal_textinput.on_change("name", proposal_textinput_callback)
|
||||
|
||||
def _init_datatable():
|
||||
# dataset2 should have the same metadata as dataset1
|
||||
scan_list = [s["idx"] for s in dataset1]
|
||||
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in dataset1]
|
||||
export = [s["export"] for s in dataset1]
|
||||
|
||||
twotheta = [np.median(s["twotheta"]) if "twotheta" in s else None for s in dataset1]
|
||||
gamma = [np.median(s["gamma"]) if "gamma" in s else None for s in dataset1]
|
||||
omega = [np.median(s["omega"]) if "omega" in s else None for s in dataset1]
|
||||
chi = [np.median(s["chi"]) if "chi" in s else None for s in dataset1]
|
||||
phi = [np.median(s["phi"]) if "phi" in s else None for s in dataset1]
|
||||
nu = [np.median(s["nu"]) if "nu" in s else None for s in dataset1]
|
||||
|
||||
scan_table_source.data.update(
|
||||
scan=scan_list,
|
||||
hkl=hkl,
|
||||
fit=[0] * len(scan_list),
|
||||
export=export,
|
||||
twotheta=twotheta,
|
||||
gamma=gamma,
|
||||
omega=omega,
|
||||
chi=chi,
|
||||
phi=phi,
|
||||
nu=nu,
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
|
||||
merge_from_select.options = merge_options
|
||||
merge_from_select.value = merge_options[0][0]
|
||||
|
||||
file_select = MultiSelect(title="Select 2 .ccl files:", width=210, height=250)
|
||||
|
||||
def file_open_button_callback():
|
||||
if len(file_select.value) != 2:
|
||||
print("WARNING: Select exactly 2 .ccl files.")
|
||||
return
|
||||
|
||||
new_data1 = []
|
||||
new_data2 = []
|
||||
for ind, f_path in enumerate(file_select.value):
|
||||
with open(f_path) as file:
|
||||
f_name = os.path.basename(f_path)
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
return
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_duplicates(file_data)
|
||||
|
||||
if ind == 0:
|
||||
js_data.data.update(fname=[base, base])
|
||||
new_data1 = file_data
|
||||
else: # ind = 1
|
||||
new_data2 = file_data
|
||||
|
||||
# ignore extra scans at the end of the longest of the two files
|
||||
min_len = min(len(new_data1), len(new_data2))
|
||||
new_data1 = new_data1[:min_len]
|
||||
new_data2 = new_data2[:min_len]
|
||||
|
||||
nonlocal dataset1, dataset2
|
||||
dataset1 = new_data1
|
||||
dataset2 = new_data2
|
||||
_init_datatable()
|
||||
|
||||
file_open_button = Button(label="Open New", width=100, disabled=True)
|
||||
file_open_button.on_click(file_open_button_callback)
|
||||
|
||||
def upload_button_callback(_attr, _old, _new):
|
||||
if len(upload_button.filename) != 2:
|
||||
print("WARNING: Upload exactly 2 .ccl files.")
|
||||
return
|
||||
|
||||
new_data1 = []
|
||||
new_data2 = []
|
||||
for ind, (f_str, f_name) in enumerate(zip(upload_button.value, upload_button.filename)):
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
return
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_duplicates(file_data)
|
||||
|
||||
if ind == 0:
|
||||
js_data.data.update(fname=[base, base])
|
||||
new_data1 = file_data
|
||||
else: # ind = 1
|
||||
new_data2 = file_data
|
||||
|
||||
# ignore extra scans at the end of the longest of the two files
|
||||
min_len = min(len(new_data1), len(new_data2))
|
||||
new_data1 = new_data1[:min_len]
|
||||
new_data2 = new_data2[:min_len]
|
||||
|
||||
nonlocal dataset1, dataset2
|
||||
dataset1 = new_data1
|
||||
dataset2 = new_data2
|
||||
_init_datatable()
|
||||
|
||||
upload_div = Div(text="or upload 2 .ccl files:", margin=(5, 5, 0, 5))
|
||||
upload_button = FileInput(accept=".ccl", multiple=True, width=200)
|
||||
# for on_change("value", ...) or on_change("filename", ...),
|
||||
# see https://github.com/bokeh/bokeh/issues/11461
|
||||
upload_button.on_change("filename", upload_button_callback)
|
||||
|
||||
def monitor_spinner_callback(_attr, old, new):
|
||||
if dataset1 and dataset2:
|
||||
pyzebra.normalize_dataset(dataset1, new)
|
||||
pyzebra.normalize_dataset(dataset2, new)
|
||||
_update_plot()
|
||||
|
||||
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
|
||||
monitor_spinner.on_change("value", monitor_spinner_callback)
|
||||
|
||||
def _update_table():
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset1]
|
||||
export = [scan["export"] for scan in dataset1]
|
||||
scan_table_source.data.update(fit=fit_ok, export=export)
|
||||
|
||||
def _update_plot():
|
||||
plot_scatter_source = [plot_scatter1_source, plot_scatter2_source]
|
||||
plot_fit_source = [plot_fit1_source, plot_fit2_source]
|
||||
plot_bkg_source = [plot_bkg1_source, plot_bkg2_source]
|
||||
plot_peak_source = [plot_peak1_source, plot_peak2_source]
|
||||
fit_output = ""
|
||||
|
||||
for ind, scan in enumerate(_get_selected_scan()):
|
||||
scatter_source = plot_scatter_source[ind]
|
||||
fit_source = plot_fit_source[ind]
|
||||
bkg_source = plot_bkg_source[ind]
|
||||
peak_source = plot_peak_source[ind]
|
||||
scan_motor = scan["scan_motor"]
|
||||
|
||||
y = scan["counts"]
|
||||
y_err = scan["counts_err"]
|
||||
x = scan[scan_motor]
|
||||
|
||||
plot.axis[0].axis_label = scan_motor
|
||||
scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
|
||||
|
||||
fit = scan.get("fit")
|
||||
if fit is not None:
|
||||
x_fit = np.linspace(x[0], x[-1], 100)
|
||||
fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
|
||||
|
||||
x_bkg = []
|
||||
y_bkg = []
|
||||
xs_peak = []
|
||||
ys_peak = []
|
||||
comps = fit.eval_components(x=x_fit)
|
||||
for i, model in enumerate(fit_params):
|
||||
if "linear" in model:
|
||||
x_bkg = x_fit
|
||||
y_bkg = comps[f"f{i}_"]
|
||||
|
||||
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
|
||||
xs_peak.append(x_fit)
|
||||
ys_peak.append(comps[f"f{i}_"])
|
||||
|
||||
bkg_source.data.update(x=x_bkg, y=y_bkg)
|
||||
peak_source.data.update(xs=xs_peak, ys=ys_peak)
|
||||
if fit_output:
|
||||
fit_output = fit_output + "\n\n"
|
||||
fit_output = fit_output + fit.fit_report()
|
||||
|
||||
else:
|
||||
fit_source.data.update(x=[], y=[])
|
||||
bkg_source.data.update(x=[], y=[])
|
||||
peak_source.data.update(xs=[], ys=[])
|
||||
|
||||
fit_output_textinput.value = fit_output
|
||||
|
||||
# Main plot
|
||||
plot = Plot(
|
||||
x_range=DataRange1d(),
|
||||
y_range=DataRange1d(only_visible=True),
|
||||
plot_height=470,
|
||||
plot_width=700,
|
||||
)
|
||||
|
||||
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
|
||||
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
|
||||
|
||||
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
|
||||
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
|
||||
|
||||
plot_scatter1_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot_scatter1 = plot.add_glyph(
|
||||
plot_scatter1_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
|
||||
)
|
||||
plot.add_layout(
|
||||
Whisker(source=plot_scatter1_source, base="x", upper="y_upper", lower="y_lower")
|
||||
)
|
||||
|
||||
plot_scatter2_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot_scatter2 = plot.add_glyph(
|
||||
plot_scatter2_source, Scatter(x="x", y="y", line_color="firebrick", fill_color="firebrick")
|
||||
)
|
||||
plot.add_layout(
|
||||
Whisker(source=plot_scatter2_source, base="x", upper="y_upper", lower="y_lower")
|
||||
)
|
||||
|
||||
plot_fit1_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot_fit1 = plot.add_glyph(plot_fit1_source, Line(x="x", y="y"))
|
||||
|
||||
plot_fit2_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot_fit2 = plot.add_glyph(plot_fit2_source, Line(x="x", y="y"))
|
||||
|
||||
plot_bkg1_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot_bkg1 = plot.add_glyph(
|
||||
plot_bkg1_source, Line(x="x", y="y", line_color="steelblue", line_dash="dashed")
|
||||
)
|
||||
|
||||
plot_bkg2_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot_bkg2 = plot.add_glyph(
|
||||
plot_bkg2_source, Line(x="x", y="y", line_color="firebrick", line_dash="dashed")
|
||||
)
|
||||
|
||||
plot_peak1_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
|
||||
plot_peak1 = plot.add_glyph(
|
||||
plot_peak1_source, MultiLine(xs="xs", ys="ys", line_color="steelblue", line_dash="dashed")
|
||||
)
|
||||
|
||||
plot_peak2_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
|
||||
plot_peak2 = plot.add_glyph(
|
||||
plot_peak2_source, MultiLine(xs="xs", ys="ys", line_color="firebrick", line_dash="dashed")
|
||||
)
|
||||
|
||||
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(fit_from_span)
|
||||
|
||||
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(fit_to_span)
|
||||
|
||||
plot.add_layout(
|
||||
Legend(
|
||||
items=[
|
||||
("data 1", [plot_scatter1]),
|
||||
("data 2", [plot_scatter2]),
|
||||
("best fit 1", [plot_fit1]),
|
||||
("best fit 2", [plot_fit2]),
|
||||
("peak 1", [plot_peak1]),
|
||||
("peak 2", [plot_peak2]),
|
||||
("linear 1", [plot_bkg1]),
|
||||
("linear 2", [plot_bkg2]),
|
||||
],
|
||||
location="top_left",
|
||||
click_policy="hide",
|
||||
)
|
||||
)
|
||||
|
||||
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
|
||||
plot.toolbar.logo = None
|
||||
|
||||
# Scan select
|
||||
def scan_table_select_callback(_attr, old, new):
|
||||
if not new:
|
||||
# skip empty selections
|
||||
return
|
||||
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
scan_table_source.selected.indices = old
|
||||
return
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
_update_plot()
|
||||
|
||||
def scan_table_source_callback(_attr, _old, new):
|
||||
# unfortunately, we don't know if the change comes from data update or user input
|
||||
# also `old` and `new` are the same for non-scalars
|
||||
for scan1, scan2, export in zip(dataset1, dataset2, new["export"]):
|
||||
scan1["export"] = export
|
||||
scan2["export"] = export
|
||||
_update_preview()
|
||||
|
||||
scan_table_source = ColumnDataSource(
|
||||
dict(
|
||||
scan=[],
|
||||
hkl=[],
|
||||
fit=[],
|
||||
export=[],
|
||||
twotheta=[],
|
||||
gamma=[],
|
||||
omega=[],
|
||||
chi=[],
|
||||
phi=[],
|
||||
nu=[],
|
||||
)
|
||||
)
|
||||
scan_table_source.on_change("data", scan_table_source_callback)
|
||||
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||
|
||||
scan_table = DataTable(
|
||||
source=scan_table_source,
|
||||
columns=[
|
||||
TableColumn(field="scan", title="Scan", editor=CellEditor(), width=50),
|
||||
TableColumn(field="hkl", title="hkl", editor=CellEditor(), width=100),
|
||||
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
|
||||
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
|
||||
TableColumn(field="twotheta", title="2theta", editor=CellEditor(), width=50),
|
||||
TableColumn(field="gamma", title="gamma", editor=CellEditor(), width=50),
|
||||
TableColumn(field="omega", title="omega", editor=CellEditor(), width=50),
|
||||
TableColumn(field="chi", title="chi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="phi", title="phi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="nu", title="nu", editor=CellEditor(), width=50),
|
||||
],
|
||||
width=310, # +60 because of the index column, but excluding twotheta onwards
|
||||
height=350,
|
||||
autosize_mode="none",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
def _get_selected_scan():
|
||||
ind = scan_table_source.selected.indices[0]
|
||||
return dataset1[ind], dataset2[ind]
|
||||
|
||||
merge_from_select = Select(title="scan:", width=145)
|
||||
|
||||
def merge_button_callback():
|
||||
scan_into1, scan_into2 = _get_selected_scan()
|
||||
scan_from1 = dataset1[int(merge_from_select.value)]
|
||||
scan_from2 = dataset2[int(merge_from_select.value)]
|
||||
|
||||
if scan_into1 is scan_from1:
|
||||
print("WARNING: Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_scans(scan_into1, scan_from1)
|
||||
pyzebra.merge_scans(scan_into2, scan_from2)
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
merge_button = Button(label="Merge into current", width=145)
|
||||
merge_button.on_click(merge_button_callback)
|
||||
|
||||
def restore_button_callback():
|
||||
scan1, scan2 = _get_selected_scan()
|
||||
pyzebra.restore_scan(scan1)
|
||||
pyzebra.restore_scan(scan2)
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
restore_button = Button(label="Restore scan", width=145)
|
||||
restore_button.on_click(restore_button_callback)
|
||||
|
||||
def fit_from_spinner_callback(_attr, _old, new):
|
||||
fit_from_span.location = new
|
||||
|
||||
fit_from_spinner = Spinner(title="Fit from:", width=145)
|
||||
fit_from_spinner.on_change("value", fit_from_spinner_callback)
|
||||
|
||||
def fit_to_spinner_callback(_attr, _old, new):
|
||||
fit_to_span.location = new
|
||||
|
||||
fit_to_spinner = Spinner(title="to:", width=145)
|
||||
fit_to_spinner.on_change("value", fit_to_spinner_callback)
|
||||
|
||||
def fitparams_add_dropdown_callback(click):
|
||||
# bokeh requires (str, str) for MultiSelect options
|
||||
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
|
||||
fitparams_select.options.append((new_tag, click.item))
|
||||
fit_params[new_tag] = fitparams_factory(click.item)
|
||||
fitparams_select.tags[0] += 1
|
||||
|
||||
fitparams_add_dropdown = Dropdown(
|
||||
label="Add fit function",
|
||||
menu=[
|
||||
("Linear", "linear"),
|
||||
("Gaussian", "gaussian"),
|
||||
("Voigt", "voigt"),
|
||||
("Pseudo Voigt", "pvoigt"),
|
||||
# ("Pseudo Voigt1", "pseudovoigt1"),
|
||||
],
|
||||
width=145,
|
||||
)
|
||||
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
|
||||
|
||||
def fitparams_select_callback(_attr, old, new):
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
fitparams_select.value = old
|
||||
return
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
if new:
|
||||
fitparams_table_source.data.update(fit_params[new[0]])
|
||||
else:
|
||||
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
|
||||
|
||||
fitparams_select = MultiSelect(options=[], height=120, width=145)
|
||||
fitparams_select.tags = [0]
|
||||
fitparams_select.on_change("value", fitparams_select_callback)
|
||||
|
||||
def fitparams_remove_button_callback():
|
||||
if fitparams_select.value:
|
||||
sel_tag = fitparams_select.value[0]
|
||||
del fit_params[sel_tag]
|
||||
for elem in fitparams_select.options:
|
||||
if elem[0] == sel_tag:
|
||||
fitparams_select.options.remove(elem)
|
||||
break
|
||||
|
||||
fitparams_select.value = []
|
||||
|
||||
fitparams_remove_button = Button(label="Remove fit function", width=145)
|
||||
fitparams_remove_button.on_click(fitparams_remove_button_callback)
|
||||
|
||||
def fitparams_factory(function):
|
||||
if function == "linear":
|
||||
params = ["slope", "intercept"]
|
||||
elif function == "gaussian":
|
||||
params = ["amplitude", "center", "sigma"]
|
||||
elif function == "voigt":
|
||||
params = ["amplitude", "center", "sigma", "gamma"]
|
||||
elif function == "pvoigt":
|
||||
params = ["amplitude", "center", "sigma", "fraction"]
|
||||
elif function == "pseudovoigt1":
|
||||
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
|
||||
else:
|
||||
raise ValueError("Unknown fit function")
|
||||
|
||||
n = len(params)
|
||||
fitparams = dict(
|
||||
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
|
||||
)
|
||||
|
||||
if function == "linear":
|
||||
fitparams["value"] = [0, 1]
|
||||
fitparams["vary"] = [False, True]
|
||||
fitparams["min"] = [None, 0]
|
||||
|
||||
elif function == "gaussian":
|
||||
fitparams["min"] = [0, None, None]
|
||||
|
||||
return fitparams
|
||||
|
||||
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
|
||||
fitparams_table = DataTable(
|
||||
source=fitparams_table_source,
|
||||
columns=[
|
||||
TableColumn(field="param", title="Parameter", editor=CellEditor()),
|
||||
TableColumn(field="value", title="Value", editor=NumberEditor()),
|
||||
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
|
||||
TableColumn(field="min", title="Min", editor=NumberEditor()),
|
||||
TableColumn(field="max", title="Max", editor=NumberEditor()),
|
||||
],
|
||||
height=200,
|
||||
width=350,
|
||||
index_position=None,
|
||||
editable=True,
|
||||
auto_edit=True,
|
||||
)
|
||||
|
||||
# start with `background` and `gauss` fit functions added
|
||||
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
|
||||
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
|
||||
fitparams_select.value = ["gaussian-1"] # add selection to gauss
|
||||
|
||||
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
|
||||
|
||||
def proc_all_button_callback():
|
||||
for scan in [*dataset1, *dataset2]:
|
||||
if scan["export"]:
|
||||
pyzebra.fit_scan(
|
||||
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
|
||||
)
|
||||
pyzebra.get_area(
|
||||
scan,
|
||||
area_method=AREA_METHODS[area_method_radiobutton.active],
|
||||
lorentz=lorentz_checkbox.active,
|
||||
)
|
||||
|
||||
_update_plot()
|
||||
_update_table()
|
||||
|
||||
proc_all_button = Button(label="Process All", button_type="primary", width=145)
|
||||
proc_all_button.on_click(proc_all_button_callback)
|
||||
|
||||
def proc_button_callback():
|
||||
for scan in _get_selected_scan():
|
||||
pyzebra.fit_scan(
|
||||
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
|
||||
)
|
||||
pyzebra.get_area(
|
||||
scan,
|
||||
area_method=AREA_METHODS[area_method_radiobutton.active],
|
||||
lorentz=lorentz_checkbox.active,
|
||||
)
|
||||
|
||||
_update_plot()
|
||||
_update_table()
|
||||
|
||||
proc_button = Button(label="Process Current", width=145)
|
||||
proc_button.on_click(proc_button_callback)
|
||||
|
||||
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
|
||||
area_method_radiobutton = RadioGroup(labels=["Function", "Area"], active=0, width=145)
|
||||
|
||||
intensity_diff_div = Div(text="Intensity difference:", margin=(5, 5, 0, 5))
|
||||
intensity_diff_radiobutton = RadioGroup(
|
||||
labels=["file1 - file2", "file2 - file1"], active=0, width=145
|
||||
)
|
||||
|
||||
lorentz_checkbox = CheckboxGroup(labels=["Lorentz Correction"], width=145, margin=(13, 5, 5, 5))
|
||||
|
||||
export_preview_textinput = TextAreaInput(title="Export file(s) preview:", width=500, height=400)
|
||||
|
||||
def _update_preview():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file = temp_dir + "/temp"
|
||||
export_data1 = []
|
||||
export_data2 = []
|
||||
for scan1, scan2 in zip(dataset1, dataset2):
|
||||
if scan1["export"]:
|
||||
export_data1.append(scan1)
|
||||
export_data2.append(scan2)
|
||||
|
||||
if intensity_diff_radiobutton.active:
|
||||
export_data1, export_data2 = export_data2, export_data1
|
||||
|
||||
pyzebra.export_ccl_compare(
|
||||
export_data1,
|
||||
export_data2,
|
||||
temp_file,
|
||||
export_target_select.value,
|
||||
hkl_precision=int(hkl_precision_select.value),
|
||||
)
|
||||
|
||||
exported_content = ""
|
||||
file_content = []
|
||||
for ext in EXPORT_TARGETS[export_target_select.value]:
|
||||
fname = temp_file + ext
|
||||
if os.path.isfile(fname):
|
||||
with open(fname) as f:
|
||||
content = f.read()
|
||||
exported_content += f"{ext} file:\n" + content
|
||||
else:
|
||||
content = ""
|
||||
file_content.append(content)
|
||||
|
||||
js_data.data.update(content=file_content)
|
||||
export_preview_textinput.value = exported_content
|
||||
|
||||
def export_target_select_callback(_attr, _old, new):
|
||||
js_data.data.update(ext=EXPORT_TARGETS[new])
|
||||
_update_preview()
|
||||
|
||||
export_target_select = Select(
|
||||
title="Export target:", options=list(EXPORT_TARGETS.keys()), value="fullprof", width=80
|
||||
)
|
||||
export_target_select.on_change("value", export_target_select_callback)
|
||||
js_data.data.update(ext=EXPORT_TARGETS[export_target_select.value])
|
||||
|
||||
def hkl_precision_select_callback(_attr, _old, _new):
|
||||
_update_preview()
|
||||
|
||||
hkl_precision_select = Select(
|
||||
title="hkl precision:", options=["2", "3", "4"], value="2", width=80
|
||||
)
|
||||
hkl_precision_select.on_change("value", hkl_precision_select_callback)
|
||||
|
||||
save_button = Button(label="Download File(s)", button_type="success", width=200)
|
||||
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
|
||||
|
||||
fitpeak_controls = row(
|
||||
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
|
||||
fitparams_table,
|
||||
Spacer(width=20),
|
||||
column(
|
||||
fit_from_spinner,
|
||||
lorentz_checkbox,
|
||||
area_method_div,
|
||||
area_method_radiobutton,
|
||||
intensity_diff_div,
|
||||
intensity_diff_radiobutton,
|
||||
),
|
||||
column(fit_to_spinner, proc_button, proc_all_button),
|
||||
)
|
||||
|
||||
scan_layout = column(
|
||||
scan_table,
|
||||
row(monitor_spinner, column(Spacer(height=19), restore_button)),
|
||||
row(column(Spacer(height=19), merge_button), merge_from_select),
|
||||
)
|
||||
|
||||
import_layout = column(file_select, file_open_button, upload_div, upload_button)
|
||||
|
||||
export_layout = column(
|
||||
export_preview_textinput,
|
||||
row(
|
||||
export_target_select, hkl_precision_select, column(Spacer(height=19), row(save_button))
|
||||
),
|
||||
)
|
||||
|
||||
tab_layout = column(
|
||||
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
|
||||
row(fitpeak_controls, fit_output_textinput),
|
||||
)
|
||||
|
||||
return Panel(child=tab_layout, title="ccl compare")
|
@ -72,7 +72,7 @@ for (let i = 0; i < js_data.data['fname'].length; i++) {
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
det_data = {}
|
||||
dataset = []
|
||||
fit_params = {}
|
||||
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""], ext=["", ""]))
|
||||
|
||||
@ -100,11 +100,28 @@ def create():
|
||||
proposal_textinput.on_change("name", proposal_textinput_callback)
|
||||
|
||||
def _init_datatable():
|
||||
scan_list = [s["idx"] for s in det_data]
|
||||
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in det_data]
|
||||
export = [s.get("active", True) for s in det_data]
|
||||
scan_list = [s["idx"] for s in dataset]
|
||||
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in dataset]
|
||||
export = [s["export"] for s in dataset]
|
||||
|
||||
twotheta = [np.median(s["twotheta"]) if "twotheta" in s else None for s in dataset]
|
||||
gamma = [np.median(s["gamma"]) if "gamma" in s else None for s in dataset]
|
||||
omega = [np.median(s["omega"]) if "omega" in s else None for s in dataset]
|
||||
chi = [np.median(s["chi"]) if "chi" in s else None for s in dataset]
|
||||
phi = [np.median(s["phi"]) if "phi" in s else None for s in dataset]
|
||||
nu = [np.median(s["nu"]) if "nu" in s else None for s in dataset]
|
||||
|
||||
scan_table_source.data.update(
|
||||
scan=scan_list, hkl=hkl, fit=[0] * len(scan_list), export=export,
|
||||
scan=scan_list,
|
||||
hkl=hkl,
|
||||
fit=[0] * len(scan_list),
|
||||
export=export,
|
||||
twotheta=twotheta,
|
||||
gamma=gamma,
|
||||
omega=omega,
|
||||
chi=chi,
|
||||
phi=phi,
|
||||
nu=nu,
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
@ -116,91 +133,122 @@ def create():
|
||||
file_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
|
||||
|
||||
def file_open_button_callback():
|
||||
nonlocal det_data
|
||||
for f_ind, f_path in enumerate(file_select.value):
|
||||
nonlocal dataset
|
||||
new_data = []
|
||||
for f_path in file_select.value:
|
||||
with open(f_path) as file:
|
||||
base, ext = os.path.splitext(os.path.basename(f_path))
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
f_name = os.path.basename(f_path)
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if f_ind == 0: # first file
|
||||
det_data = file_data
|
||||
pyzebra.merge_duplicates(det_data)
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
pyzebra.merge_duplicates(new_data)
|
||||
js_data.data.update(fname=[base, base])
|
||||
else:
|
||||
pyzebra.merge_datasets(det_data, file_data)
|
||||
pyzebra.merge_datasets(new_data, file_data)
|
||||
|
||||
_init_datatable()
|
||||
append_upload_button.disabled = False
|
||||
if new_data:
|
||||
dataset = new_data
|
||||
_init_datatable()
|
||||
append_upload_button.disabled = False
|
||||
|
||||
file_open_button = Button(label="Open New", width=100, disabled=True)
|
||||
file_open_button.on_click(file_open_button_callback)
|
||||
|
||||
def file_append_button_callback():
|
||||
file_data = []
|
||||
for f_path in file_select.value:
|
||||
with open(f_path) as file:
|
||||
_, ext = os.path.splitext(f_path)
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
f_name = os.path.basename(f_path)
|
||||
_, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(det_data, file_data)
|
||||
pyzebra.merge_datasets(dataset, file_data)
|
||||
|
||||
_init_datatable()
|
||||
if file_data:
|
||||
_init_datatable()
|
||||
|
||||
file_append_button = Button(label="Append", width=100, disabled=True)
|
||||
file_append_button.on_click(file_append_button_callback)
|
||||
|
||||
def upload_button_callback(_attr, _old, new):
|
||||
nonlocal det_data
|
||||
det_data = []
|
||||
for f_str, f_name in zip(new, upload_button.filename):
|
||||
def upload_button_callback(_attr, _old, _new):
|
||||
nonlocal dataset
|
||||
new_data = []
|
||||
for f_str, f_name in zip(upload_button.value, upload_button.filename):
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
base, ext = os.path.splitext(f_name)
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if not det_data: # first file
|
||||
det_data = file_data
|
||||
pyzebra.merge_duplicates(det_data)
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
pyzebra.merge_duplicates(new_data)
|
||||
js_data.data.update(fname=[base, base])
|
||||
else:
|
||||
pyzebra.merge_datasets(det_data, file_data)
|
||||
pyzebra.merge_datasets(new_data, file_data)
|
||||
|
||||
_init_datatable()
|
||||
append_upload_button.disabled = False
|
||||
if new_data:
|
||||
dataset = new_data
|
||||
_init_datatable()
|
||||
append_upload_button.disabled = False
|
||||
|
||||
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
|
||||
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
|
||||
upload_button.on_change("value", upload_button_callback)
|
||||
# for on_change("value", ...) or on_change("filename", ...),
|
||||
# see https://github.com/bokeh/bokeh/issues/11461
|
||||
upload_button.on_change("filename", upload_button_callback)
|
||||
|
||||
def append_upload_button_callback(_attr, _old, new):
|
||||
for f_str, f_name in zip(new, append_upload_button.filename):
|
||||
def append_upload_button_callback(_attr, _old, _new):
|
||||
file_data = []
|
||||
for f_str, f_name in zip(append_upload_button.value, append_upload_button.filename):
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
_, ext = os.path.splitext(f_name)
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(det_data, file_data)
|
||||
pyzebra.merge_datasets(dataset, file_data)
|
||||
|
||||
_init_datatable()
|
||||
if file_data:
|
||||
_init_datatable()
|
||||
|
||||
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
|
||||
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200, disabled=True)
|
||||
append_upload_button.on_change("value", append_upload_button_callback)
|
||||
# for on_change("value", ...) or on_change("filename", ...),
|
||||
# see https://github.com/bokeh/bokeh/issues/11461
|
||||
append_upload_button.on_change("filename", append_upload_button_callback)
|
||||
|
||||
def monitor_spinner_callback(_attr, old, new):
|
||||
if det_data:
|
||||
pyzebra.normalize_dataset(det_data, new)
|
||||
if dataset:
|
||||
pyzebra.normalize_dataset(dataset, new)
|
||||
_update_plot()
|
||||
|
||||
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
|
||||
monitor_spinner.on_change("value", monitor_spinner_callback)
|
||||
|
||||
def _update_datatable():
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
|
||||
export = [scan.get("active", True) for scan in det_data]
|
||||
def _update_table():
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset]
|
||||
export = [scan["export"] for scan in dataset]
|
||||
scan_table_source.data.update(fit=fit_ok, export=export)
|
||||
|
||||
def _update_plot():
|
||||
@ -260,7 +308,7 @@ def create():
|
||||
|
||||
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot_scatter = plot.add_glyph(
|
||||
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
|
||||
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
|
||||
)
|
||||
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
|
||||
|
||||
@ -317,10 +365,27 @@ def create():
|
||||
|
||||
_update_plot()
|
||||
|
||||
def scan_table_source_callback(_attr, _old, _new):
|
||||
def scan_table_source_callback(_attr, _old, new):
|
||||
# unfortunately, we don't know if the change comes from data update or user input
|
||||
# also `old` and `new` are the same for non-scalars
|
||||
for scan, export in zip(dataset, new["export"]):
|
||||
scan["export"] = export
|
||||
_update_preview()
|
||||
|
||||
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], fit=[], export=[]))
|
||||
scan_table_source = ColumnDataSource(
|
||||
dict(
|
||||
scan=[],
|
||||
hkl=[],
|
||||
fit=[],
|
||||
export=[],
|
||||
twotheta=[],
|
||||
gamma=[],
|
||||
omega=[],
|
||||
chi=[],
|
||||
phi=[],
|
||||
nu=[],
|
||||
)
|
||||
)
|
||||
scan_table_source.on_change("data", scan_table_source_callback)
|
||||
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||
|
||||
@ -331,28 +396,34 @@ def create():
|
||||
TableColumn(field="hkl", title="hkl", editor=CellEditor(), width=100),
|
||||
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
|
||||
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
|
||||
TableColumn(field="twotheta", title="2theta", editor=CellEditor(), width=50),
|
||||
TableColumn(field="gamma", title="gamma", editor=CellEditor(), width=50),
|
||||
TableColumn(field="omega", title="omega", editor=CellEditor(), width=50),
|
||||
TableColumn(field="chi", title="chi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="phi", title="phi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="nu", title="nu", editor=CellEditor(), width=50),
|
||||
],
|
||||
width=310, # +60 because of the index column
|
||||
width=310, # +60 because of the index column, but excluding twotheta onwards
|
||||
height=350,
|
||||
autosize_mode="none",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
def _get_selected_scan():
|
||||
return det_data[scan_table_source.selected.indices[0]]
|
||||
return dataset[scan_table_source.selected.indices[0]]
|
||||
|
||||
merge_from_select = Select(title="scan:", width=145)
|
||||
|
||||
def merge_button_callback():
|
||||
scan_into = _get_selected_scan()
|
||||
scan_from = det_data[int(merge_from_select.value)]
|
||||
scan_from = dataset[int(merge_from_select.value)]
|
||||
|
||||
if scan_into is scan_from:
|
||||
print("WARNING: Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_scans(scan_into, scan_from)
|
||||
_update_datatable()
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
merge_button = Button(label="Merge into current", width=145)
|
||||
@ -360,7 +431,7 @@ def create():
|
||||
|
||||
def restore_button_callback():
|
||||
pyzebra.restore_scan(_get_selected_scan())
|
||||
_update_datatable()
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
restore_button = Button(label="Restore scan", width=145)
|
||||
@ -486,8 +557,8 @@ def create():
|
||||
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
|
||||
|
||||
def proc_all_button_callback():
|
||||
for scan, export in zip(det_data, scan_table_source.data["export"]):
|
||||
if export:
|
||||
for scan in dataset:
|
||||
if scan["export"]:
|
||||
pyzebra.fit_scan(
|
||||
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
|
||||
)
|
||||
@ -498,7 +569,7 @@ def create():
|
||||
)
|
||||
|
||||
_update_plot()
|
||||
_update_datatable()
|
||||
_update_table()
|
||||
|
||||
proc_all_button = Button(label="Process All", button_type="primary", width=145)
|
||||
proc_all_button.on_click(proc_all_button_callback)
|
||||
@ -515,7 +586,7 @@ def create():
|
||||
)
|
||||
|
||||
_update_plot()
|
||||
_update_datatable()
|
||||
_update_table()
|
||||
|
||||
proc_button = Button(label="Process Current", width=145)
|
||||
proc_button.on_click(proc_button_callback)
|
||||
@ -531,9 +602,9 @@ def create():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file = temp_dir + "/temp"
|
||||
export_data = []
|
||||
for s, export in zip(det_data, scan_table_source.data["export"]):
|
||||
if export:
|
||||
export_data.append(s)
|
||||
for scan in dataset:
|
||||
if scan["export"]:
|
||||
export_data.append(scan)
|
||||
|
||||
pyzebra.export_1D(
|
||||
export_data,
|
||||
|
745
pyzebra/app/panel_ccl_prepare.py
Normal file
745
pyzebra/app/panel_ccl_prepare.py
Normal file
@ -0,0 +1,745 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Arrow,
|
||||
BoxZoomTool,
|
||||
Button,
|
||||
CheckboxGroup,
|
||||
ColumnDataSource,
|
||||
CustomJS,
|
||||
Div,
|
||||
Ellipse,
|
||||
FileInput,
|
||||
Legend,
|
||||
LegendItem,
|
||||
LinearAxis,
|
||||
MultiLine,
|
||||
MultiSelect,
|
||||
NormalHead,
|
||||
NumericInput,
|
||||
Panel,
|
||||
PanTool,
|
||||
Plot,
|
||||
RadioGroup,
|
||||
Range1d,
|
||||
ResetTool,
|
||||
Scatter,
|
||||
Select,
|
||||
Spacer,
|
||||
Spinner,
|
||||
Text,
|
||||
TextAreaInput,
|
||||
TextInput,
|
||||
WheelZoomTool,
|
||||
)
|
||||
from bokeh.palettes import Dark2
|
||||
|
||||
import pyzebra
|
||||
|
||||
|
||||
javaScript = """
|
||||
let j = 0;
|
||||
for (let i = 0; i < js_data.data['fname'].length; i++) {
|
||||
if (js_data.data['content'][i] === "") continue;
|
||||
|
||||
setTimeout(function() {
|
||||
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
|
||||
const link = document.createElement('a');
|
||||
document.body.appendChild(link);
|
||||
const url = window.URL.createObjectURL(blob);
|
||||
link.href = url;
|
||||
link.download = js_data.data['fname'][i];
|
||||
link.click();
|
||||
window.URL.revokeObjectURL(url);
|
||||
document.body.removeChild(link);
|
||||
}, 100 * j)
|
||||
|
||||
j++;
|
||||
}
|
||||
"""
|
||||
|
||||
ANG_CHUNK_DEFAULTS = {"2theta": 30, "gamma": 30, "omega": 30, "chi": 35, "phi": 35, "nu": 10}
|
||||
SORT_OPT_BI = ["2theta", "chi", "phi", "omega"]
|
||||
SORT_OPT_NB = ["gamma", "nu", "omega"]
|
||||
|
||||
|
||||
def create():
|
||||
ang_lims = None
|
||||
cif_data = None
|
||||
params = None
|
||||
res_files = {}
|
||||
js_data = ColumnDataSource(data=dict(content=[""], fname=[""]))
|
||||
|
||||
anglim_div = Div(text="Angular min/max limits:", margin=(5, 5, 0, 5))
|
||||
sttgamma_ti = TextInput(title="stt/gamma", width=100)
|
||||
omega_ti = TextInput(title="omega", width=100)
|
||||
chinu_ti = TextInput(title="chi/nu", width=100)
|
||||
phi_ti = TextInput(title="phi", width=100)
|
||||
|
||||
def _update_ang_lims(ang_lims):
|
||||
sttgamma_ti.value = " ".join(ang_lims["gamma"][:2])
|
||||
omega_ti.value = " ".join(ang_lims["omega"][:2])
|
||||
if ang_lims["geom"] == "nb":
|
||||
chinu_ti.value = " ".join(ang_lims["nu"][:2])
|
||||
phi_ti.value = ""
|
||||
else: # ang_lims["geom"] == "bi"
|
||||
chinu_ti.value = " ".join(ang_lims["chi"][:2])
|
||||
phi_ti.value = " ".join(ang_lims["phi"][:2])
|
||||
|
||||
def _update_params(params):
|
||||
if "WAVE" in params:
|
||||
wavelen_input.value = params["WAVE"]
|
||||
if "SPGR" in params:
|
||||
cryst_space_group.value = params["SPGR"]
|
||||
if "CELL" in params:
|
||||
cryst_cell.value = params["CELL"]
|
||||
if "UBMAT" in params:
|
||||
ub_matrix.value = " ".join(params["UBMAT"])
|
||||
if "HLIM" in params:
|
||||
ranges_hkl.value = params["HLIM"]
|
||||
if "SRANG" in params:
|
||||
ranges_srang.value = params["SRANG"]
|
||||
if "lattiCE" in params:
|
||||
magstruct_lattice.value = params["lattiCE"]
|
||||
if "kvect" in params:
|
||||
magstruct_kvec.value = params["kvect"]
|
||||
|
||||
def open_geom_callback(_attr, _old, new):
|
||||
nonlocal ang_lims
|
||||
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
|
||||
ang_lims = pyzebra.read_geom_file(fileobj)
|
||||
_update_ang_lims(ang_lims)
|
||||
|
||||
open_geom_div = Div(text="Open GEOM:")
|
||||
open_geom = FileInput(accept=".geom", width=200)
|
||||
open_geom.on_change("value", open_geom_callback)
|
||||
|
||||
def open_cfl_callback(_attr, _old, new):
|
||||
nonlocal params
|
||||
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
|
||||
params = pyzebra.read_cfl_file(fileobj)
|
||||
_update_params(params)
|
||||
|
||||
open_cfl_div = Div(text="Open CFL:")
|
||||
open_cfl = FileInput(accept=".cfl", width=200)
|
||||
open_cfl.on_change("value", open_cfl_callback)
|
||||
|
||||
def open_cif_callback(_attr, _old, new):
|
||||
nonlocal cif_data
|
||||
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
|
||||
cif_data = pyzebra.read_cif_file(fileobj)
|
||||
_update_params(cif_data)
|
||||
|
||||
open_cif_div = Div(text="Open CIF:")
|
||||
open_cif = FileInput(accept=".cif", width=200)
|
||||
open_cif.on_change("value", open_cif_callback)
|
||||
|
||||
wavelen_div = Div(text="Wavelength:", margin=(5, 5, 0, 5))
|
||||
wavelen_input = TextInput(title="value", width=70)
|
||||
|
||||
def wavelen_select_callback(_attr, _old, new):
|
||||
if new:
|
||||
wavelen_input.value = new
|
||||
else:
|
||||
wavelen_input.value = ""
|
||||
|
||||
wavelen_select = Select(
|
||||
title="preset", options=["", "0.788", "1.178", "1.383", "2.305"], width=70
|
||||
)
|
||||
wavelen_select.on_change("value", wavelen_select_callback)
|
||||
|
||||
cryst_div = Div(text="Crystal structure:", margin=(5, 5, 0, 5))
|
||||
cryst_space_group = TextInput(title="space group", width=100)
|
||||
cryst_cell = TextInput(title="cell", width=250)
|
||||
|
||||
def ub_matrix_calc_callback():
|
||||
params = dict()
|
||||
params["SPGR"] = cryst_space_group.value
|
||||
params["CELL"] = cryst_cell.value
|
||||
ub = pyzebra.calc_ub_matrix(params)
|
||||
ub_matrix.value = " ".join(ub)
|
||||
|
||||
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
|
||||
ub_matrix_calc.on_click(ub_matrix_calc_callback)
|
||||
|
||||
ub_matrix = TextInput(title="\u200B", width=600)
|
||||
|
||||
ranges_div = Div(text="Ranges:", margin=(5, 5, 0, 5))
|
||||
ranges_hkl = TextInput(title="HKL", value="-25 25 -25 25 -25 25", width=250)
|
||||
ranges_srang = TextInput(title="sin(θ)/λ", value="0.0 0.7", width=100)
|
||||
|
||||
magstruct_div = Div(text="Magnetic structure:", margin=(5, 5, 0, 5))
|
||||
magstruct_lattice = TextInput(title="lattice", width=100)
|
||||
magstruct_kvec = TextAreaInput(title="k vector", width=150)
|
||||
|
||||
def sorting0_callback(_attr, _old, new):
|
||||
sorting_0_dt.value = ANG_CHUNK_DEFAULTS[new]
|
||||
|
||||
def sorting1_callback(_attr, _old, new):
|
||||
sorting_1_dt.value = ANG_CHUNK_DEFAULTS[new]
|
||||
|
||||
def sorting2_callback(_attr, _old, new):
|
||||
sorting_2_dt.value = ANG_CHUNK_DEFAULTS[new]
|
||||
|
||||
sorting_0 = Select(title="1st", width=100)
|
||||
sorting_0.on_change("value", sorting0_callback)
|
||||
sorting_0_dt = NumericInput(title="Δ", width=70)
|
||||
sorting_1 = Select(title="2nd", width=100)
|
||||
sorting_1.on_change("value", sorting1_callback)
|
||||
sorting_1_dt = NumericInput(title="Δ", width=70)
|
||||
sorting_2 = Select(title="3rd", width=100)
|
||||
sorting_2.on_change("value", sorting2_callback)
|
||||
sorting_2_dt = NumericInput(title="Δ", width=70)
|
||||
|
||||
def geom_radiogroup_callback(_attr, _old, new):
|
||||
nonlocal ang_lims, params
|
||||
if new == 0:
|
||||
geom_file = pyzebra.get_zebraBI_default_geom_file()
|
||||
sort_opt = SORT_OPT_BI
|
||||
else:
|
||||
geom_file = pyzebra.get_zebraNB_default_geom_file()
|
||||
sort_opt = SORT_OPT_NB
|
||||
cfl_file = pyzebra.get_zebra_default_cfl_file()
|
||||
|
||||
ang_lims = pyzebra.read_geom_file(geom_file)
|
||||
_update_ang_lims(ang_lims)
|
||||
params = pyzebra.read_cfl_file(cfl_file)
|
||||
_update_params(params)
|
||||
|
||||
sorting_0.options = sorting_1.options = sorting_2.options = sort_opt
|
||||
sorting_0.value = sort_opt[0]
|
||||
sorting_1.value = sort_opt[1]
|
||||
sorting_2.value = sort_opt[2]
|
||||
|
||||
geom_radiogroup_div = Div(text="Geometry:", margin=(5, 5, 0, 5))
|
||||
geom_radiogroup = RadioGroup(labels=["bisecting", "normal beam"], width=150)
|
||||
geom_radiogroup.on_change("active", geom_radiogroup_callback)
|
||||
geom_radiogroup.active = 0
|
||||
|
||||
def go_button_callback():
|
||||
ang_lims["gamma"][0], ang_lims["gamma"][1] = sttgamma_ti.value.strip().split()
|
||||
ang_lims["omega"][0], ang_lims["omega"][1] = omega_ti.value.strip().split()
|
||||
if ang_lims["geom"] == "nb":
|
||||
ang_lims["nu"][0], ang_lims["nu"][1] = chinu_ti.value.strip().split()
|
||||
else: # ang_lims["geom"] == "bi"
|
||||
ang_lims["chi"][0], ang_lims["chi"][1] = chinu_ti.value.strip().split()
|
||||
ang_lims["phi"][0], ang_lims["phi"][1] = phi_ti.value.strip().split()
|
||||
|
||||
if cif_data:
|
||||
params.update(cif_data)
|
||||
|
||||
params["WAVE"] = wavelen_input.value
|
||||
params["SPGR"] = cryst_space_group.value
|
||||
params["CELL"] = cryst_cell.value
|
||||
params["UBMAT"] = ub_matrix.value.split()
|
||||
params["HLIM"] = ranges_hkl.value
|
||||
params["SRANG"] = ranges_srang.value
|
||||
params["lattiCE"] = magstruct_lattice.value
|
||||
kvects = magstruct_kvec.value.split("\n")
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
geom_path = os.path.join(temp_dir, "zebra.geom")
|
||||
if open_geom.value:
|
||||
geom_template = io.StringIO(base64.b64decode(open_geom.value).decode())
|
||||
else:
|
||||
geom_template = None
|
||||
pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
|
||||
|
||||
print(f"Content of {geom_path}:")
|
||||
with open(geom_path) as f:
|
||||
print(f.read())
|
||||
|
||||
priority = [sorting_0.value, sorting_1.value, sorting_2.value]
|
||||
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
|
||||
if geom_radiogroup.active == 0:
|
||||
sort_hkl_file = pyzebra.sort_hkl_file_bi
|
||||
priority.extend(set(SORT_OPT_BI) - set(priority))
|
||||
else:
|
||||
sort_hkl_file = pyzebra.sort_hkl_file_nb
|
||||
|
||||
# run sxtal_refgen for each kvect provided
|
||||
for i, kvect in enumerate(kvects, start=1):
|
||||
params["kvect"] = kvect
|
||||
if open_cfl.filename:
|
||||
base_fname = f"{os.path.splitext(open_cfl.filename)[0]}_{i}"
|
||||
else:
|
||||
base_fname = f"zebra_{i}"
|
||||
|
||||
cfl_path = os.path.join(temp_dir, base_fname + ".cfl")
|
||||
if open_cfl.value:
|
||||
cfl_template = io.StringIO(base64.b64decode(open_cfl.value).decode())
|
||||
else:
|
||||
cfl_template = None
|
||||
pyzebra.export_cfl_file(cfl_path, params, cfl_template)
|
||||
|
||||
print(f"Content of {cfl_path}:")
|
||||
with open(cfl_path) as f:
|
||||
print(f.read())
|
||||
|
||||
comp_proc = subprocess.run(
|
||||
[pyzebra.SXTAL_REFGEN_PATH, cfl_path],
|
||||
cwd=temp_dir,
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
print(" ".join(comp_proc.args))
|
||||
print(comp_proc.stdout)
|
||||
|
||||
if i == 1: # all hkl files are identical, so keep only one
|
||||
hkl_fname = base_fname + ".hkl"
|
||||
hkl_fpath = os.path.join(temp_dir, hkl_fname)
|
||||
with open(hkl_fpath) as f:
|
||||
res_files[hkl_fname] = f.read()
|
||||
|
||||
hkl_fname_sorted = base_fname + "_sorted.hkl"
|
||||
hkl_fpath_sorted = os.path.join(temp_dir, hkl_fname_sorted)
|
||||
sort_hkl_file(hkl_fpath, hkl_fpath_sorted, priority, chunks)
|
||||
with open(hkl_fpath_sorted) as f:
|
||||
res_files[hkl_fname_sorted] = f.read()
|
||||
|
||||
mhkl_fname = base_fname + ".mhkl"
|
||||
mhkl_fpath = os.path.join(temp_dir, mhkl_fname)
|
||||
with open(mhkl_fpath) as f:
|
||||
res_files[mhkl_fname] = f.read()
|
||||
|
||||
mhkl_fname_sorted = base_fname + "_sorted.mhkl"
|
||||
mhkl_fpath_sorted = os.path.join(temp_dir, hkl_fname_sorted)
|
||||
sort_hkl_file(mhkl_fpath, mhkl_fpath_sorted, priority, chunks)
|
||||
with open(mhkl_fpath_sorted) as f:
|
||||
res_files[mhkl_fname_sorted] = f.read()
|
||||
|
||||
created_lists.options = list(res_files)
|
||||
|
||||
go_button = Button(label="GO", button_type="primary", width=50)
|
||||
go_button.on_click(go_button_callback)
|
||||
|
||||
def created_lists_callback(_attr, _old, new):
|
||||
sel_file = new[0]
|
||||
file_text = res_files[sel_file]
|
||||
preview_lists.value = file_text
|
||||
js_data.data.update(content=[file_text], fname=[sel_file])
|
||||
|
||||
created_lists = MultiSelect(title="Created lists:", width=200, height=150)
|
||||
created_lists.on_change("value", created_lists_callback)
|
||||
preview_lists = TextAreaInput(title="Preview selected list:", width=600, height=150)
|
||||
|
||||
download_file = Button(label="Download file", button_type="success", width=200)
|
||||
download_file.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
|
||||
plot_list = Button(label="Plot selected list", button_type="primary", width=200, disabled=True)
|
||||
|
||||
measured_data_div = Div(text="Measured data:")
|
||||
measured_data = FileInput(accept=".ccl", multiple=True, width=200)
|
||||
|
||||
min_grid_x = -10
|
||||
max_grid_x = 10
|
||||
min_grid_y = -5
|
||||
max_grid_y = 5
|
||||
cmap = Dark2[8]
|
||||
syms = ["circle", "inverted_triangle", "square", "diamond", "star", "triangle"]
|
||||
|
||||
# Define resolution function
|
||||
def _res_fun(stt, wave, res_mult):
|
||||
expr = np.tan(stt / 2 * np.pi / 180)
|
||||
fwhm = np.sqrt(0.4639 * expr ** 2 - 0.4452 * expr + 0.1506) * res_mult # res in deg
|
||||
return fwhm
|
||||
|
||||
def plot_file_callback():
|
||||
orth_dir = list(map(float, hkl_normal.value.split()))
|
||||
cut_tol = hkl_delta.value
|
||||
cut_or = hkl_cut.value
|
||||
x_dir = list(map(float, hkl_in_plane_x.value.split()))
|
||||
y_dir = list(map(float, hkl_in_plane_y.value.split()))
|
||||
|
||||
k = np.array(k_vectors.value.split()).astype(float).reshape(3, 3)
|
||||
tol_k = 0.1
|
||||
|
||||
# Plotting options
|
||||
grid_flag = 1
|
||||
grid_minor_flag = 1
|
||||
grid_div = 2 # Number of minor division lines per unit
|
||||
|
||||
# different symbols based on file number
|
||||
file_flag = 0 in disting_opt_cb.active
|
||||
# scale marker size according to intensity
|
||||
intensity_flag = 1 in disting_opt_cb.active
|
||||
# use color to mark different propagation vectors
|
||||
prop_legend_flag = 2 in disting_opt_cb.active
|
||||
# use resolution ellipsis
|
||||
res_flag = disting_opt_rb.active
|
||||
# multiplier for resolution function (in case of samples with large mosaicity)
|
||||
res_mult = res_mult_ni.value
|
||||
|
||||
md_fnames = measured_data.filename
|
||||
md_fdata = measured_data.value
|
||||
|
||||
# Load first data cile, read angles and define matrices to perform conversion to cartesian coordinates and back
|
||||
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
|
||||
_, ext = os.path.splitext(md_fnames[0])
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {md_fnames[0]}")
|
||||
return
|
||||
|
||||
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
|
||||
beta = file_data[0]["beta_cell"] * np.pi / 180.0
|
||||
gamma = file_data[0]["gamma_cell"] * np.pi / 180.0
|
||||
|
||||
# reciprocal angle parameters
|
||||
alpha_star = np.arccos(
|
||||
(np.cos(beta) * np.cos(gamma) - np.cos(alpha)) / (np.sin(beta) * np.sin(gamma))
|
||||
)
|
||||
beta_star = np.arccos(
|
||||
(np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
|
||||
)
|
||||
gamma_star = np.arccos(
|
||||
(np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
|
||||
)
|
||||
|
||||
# conversion matrix:
|
||||
M = np.array(
|
||||
[
|
||||
[1, np.cos(gamma_star), np.cos(beta_star)],
|
||||
[0, np.sin(gamma_star), -np.sin(beta_star) * np.cos(alpha)],
|
||||
[0, 0, np.sin(beta_star) * np.sin(alpha)],
|
||||
]
|
||||
)
|
||||
M_inv = np.linalg.inv(M)
|
||||
|
||||
# Calculate in-plane y-direction
|
||||
x_c = M @ x_dir
|
||||
y_c = M @ y_dir
|
||||
o_c = M @ orth_dir
|
||||
|
||||
# Normalize all directions
|
||||
y_c = y_c / np.linalg.norm(y_c)
|
||||
x_c = x_c / np.linalg.norm(x_c)
|
||||
o_c = o_c / np.linalg.norm(o_c)
|
||||
|
||||
# Read all data
|
||||
hkl_coord = []
|
||||
intensity_vec = []
|
||||
k_flag_vec = []
|
||||
file_flag_vec = []
|
||||
res_vec_x = []
|
||||
res_vec_y = []
|
||||
res_N = 10
|
||||
|
||||
for j in range(len(md_fnames)):
|
||||
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
|
||||
_, ext = os.path.splitext(md_fnames[j])
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {md_fnames[j]}")
|
||||
return
|
||||
|
||||
# Loop throguh all data
|
||||
for scan in file_data:
|
||||
om = scan["omega"]
|
||||
gammad = scan["twotheta"]
|
||||
chi = scan["chi"]
|
||||
phi = scan["phi"]
|
||||
nud = 0 # 1d detector
|
||||
ub = scan["ub"]
|
||||
ddist = float(scan["detectorDistance"])
|
||||
counts = scan["counts"]
|
||||
mon = scan["monitor"]
|
||||
|
||||
# Determine wavelength from mcvl value (is wavelength stored anywhere???)
|
||||
mcvl = scan["mcvl"]
|
||||
if mcvl == 2.2:
|
||||
wave = 1.178
|
||||
elif mcvl == 7.0:
|
||||
wave = 1.383
|
||||
else:
|
||||
wave = 2.3
|
||||
|
||||
# Calculate resolution in degrees
|
||||
res = _res_fun(gammad, wave, res_mult)
|
||||
|
||||
# convert to resolution in hkl along scan line
|
||||
ang2hkl_1d = pyzebra.ang2hkl_1d
|
||||
res_x = []
|
||||
res_y = []
|
||||
for _om in np.linspace(om[0], om[-1], num=res_N):
|
||||
expr1 = ang2hkl_1d(wave, ddist, gammad, _om + res / 2, chi, phi, nud, ub)
|
||||
expr2 = ang2hkl_1d(wave, ddist, gammad, _om - res / 2, chi, phi, nud, ub)
|
||||
hkl_temp = M @ (np.abs(expr1 - expr2) / 2)
|
||||
res_x.append(hkl_temp[0])
|
||||
res_y.append(hkl_temp[1])
|
||||
|
||||
# Get first and final hkl
|
||||
hkl1 = ang2hkl_1d(wave, ddist, gammad, om[0], chi, phi, nud, ub)
|
||||
hkl2 = ang2hkl_1d(wave, ddist, gammad, om[-1], chi, phi, nud, ub)
|
||||
|
||||
# Get hkl at best intensity
|
||||
hkl_m = ang2hkl_1d(wave, ddist, gammad, om[np.argmax(counts)], chi, phi, nud, ub)
|
||||
|
||||
# Estimate intensity for marker size scaling
|
||||
y1 = counts[0]
|
||||
y2 = counts[-1]
|
||||
x1 = om[0]
|
||||
x2 = om[-1]
|
||||
a = (y1 - y2) / (x1 - x2)
|
||||
b = y1 - a * x1
|
||||
intensity_exp = np.sum(counts - (a * om + b))
|
||||
c = int(intensity_exp / mon * 10000)
|
||||
|
||||
# Recognize k_flag_vec
|
||||
min_hkl_m = np.minimum(1 - hkl_m % 1, hkl_m % 1)
|
||||
for j2, _k in enumerate(k):
|
||||
if all(np.abs(min_hkl_m - _k) < tol_k):
|
||||
k_flag_vec.append(j2)
|
||||
break
|
||||
else:
|
||||
k_flag_vec.append(len(k))
|
||||
|
||||
# Save data
|
||||
hkl_coord.append([hkl1, hkl2, hkl_m])
|
||||
intensity_vec.append(c)
|
||||
file_flag_vec.append(j)
|
||||
res_vec_x.append(res_x)
|
||||
res_vec_y.append(res_y)
|
||||
|
||||
plot.x_range.start = plot.x_range.reset_start = -2
|
||||
plot.x_range.end = plot.x_range.reset_end = 5
|
||||
plot.y_range.start = plot.y_range.reset_start = -4
|
||||
plot.y_range.end = plot.y_range.reset_end = 3.5
|
||||
|
||||
xs, ys = [], []
|
||||
xs_minor, ys_minor = [], []
|
||||
if grid_flag:
|
||||
for yy in np.arange(min_grid_y, max_grid_y, 1):
|
||||
hkl1 = M @ [0, yy, 0]
|
||||
xs.append([min_grid_y, max_grid_y])
|
||||
ys.append([hkl1[1], hkl1[1]])
|
||||
|
||||
for xx in np.arange(min_grid_x, max_grid_x, 1):
|
||||
hkl1 = M @ [xx, min_grid_x, 0]
|
||||
hkl2 = M @ [xx, max_grid_x, 0]
|
||||
xs.append([hkl1[0], hkl2[0]])
|
||||
ys.append([hkl1[1], hkl2[1]])
|
||||
|
||||
if grid_minor_flag:
|
||||
for yy in np.arange(min_grid_y, max_grid_y, 1 / grid_div):
|
||||
hkl1 = M @ [0, yy, 0]
|
||||
xs_minor.append([min_grid_y, max_grid_y])
|
||||
ys_minor.append([hkl1[1], hkl1[1]])
|
||||
|
||||
for xx in np.arange(min_grid_x, max_grid_x, 1 / grid_div):
|
||||
hkl1 = M @ [xx, min_grid_x, 0]
|
||||
hkl2 = M @ [xx, max_grid_x, 0]
|
||||
xs_minor.append([hkl1[0], hkl2[0]])
|
||||
ys_minor.append([hkl1[1], hkl2[1]])
|
||||
|
||||
grid_source.data.update(xs=xs, ys=ys)
|
||||
minor_grid_source.data.update(xs=xs_minor, ys=ys_minor)
|
||||
|
||||
el_x, el_y, el_w, el_h, el_c = [], [], [], [], []
|
||||
scan_xs, scan_ys, scan_x, scan_y = [], [], [], []
|
||||
scan_m, scan_s, scan_c, scan_l = [], [], [], []
|
||||
for j in range(len(hkl_coord)):
|
||||
# Get middle hkl from list
|
||||
hklm = M @ hkl_coord[j][2]
|
||||
|
||||
# Decide if point is in the cut
|
||||
proj = np.dot(hklm, o_c)
|
||||
if abs(proj - cut_or) >= cut_tol:
|
||||
continue
|
||||
|
||||
hkl1 = M @ hkl_coord[j][0]
|
||||
hkl2 = M @ hkl_coord[j][1]
|
||||
|
||||
if intensity_flag:
|
||||
markersize = max(1, int(intensity_vec[j] / max(intensity_vec) * 20))
|
||||
else:
|
||||
markersize = 4
|
||||
|
||||
if file_flag:
|
||||
plot_symbol = syms[file_flag_vec[j]]
|
||||
else:
|
||||
plot_symbol = "circle"
|
||||
|
||||
if prop_legend_flag:
|
||||
col_value = cmap[k_flag_vec[j]]
|
||||
else:
|
||||
col_value = "black"
|
||||
|
||||
if res_flag:
|
||||
# Generate series of ellipses along scan line
|
||||
el_x.extend(np.linspace(hkl1[0], hkl2[0], num=res_N))
|
||||
el_y.extend(np.linspace(hkl1[1], hkl2[1], num=res_N))
|
||||
el_w.extend(np.array(res_vec_x[j]) * 2)
|
||||
el_h.extend(np.array(res_vec_y[j]) * 2)
|
||||
el_c.extend([col_value] * res_N)
|
||||
else:
|
||||
# Plot scan line
|
||||
scan_xs.append([hkl1[0], hkl2[0]])
|
||||
scan_ys.append([hkl1[1], hkl2[1]])
|
||||
|
||||
# Plot middle point of scan
|
||||
scan_x.append(hklm[0])
|
||||
scan_y.append(hklm[1])
|
||||
scan_m.append(plot_symbol)
|
||||
scan_s.append(markersize)
|
||||
|
||||
# Color and legend label
|
||||
scan_c.append(col_value)
|
||||
scan_l.append(md_fnames[file_flag_vec[j]])
|
||||
|
||||
ellipse_source.data.update(x=el_x, y=el_y, w=el_w, h=el_h, c=el_c)
|
||||
scan_source.data.update(
|
||||
xs=scan_xs, ys=scan_ys, x=scan_x, y=scan_y, m=scan_m, s=scan_s, c=scan_c, l=scan_l,
|
||||
)
|
||||
|
||||
arrow1.visible = True
|
||||
arrow1.x_end = x_c[0]
|
||||
arrow1.y_end = x_c[1]
|
||||
arrow2.visible = True
|
||||
arrow2.x_end = y_c[0]
|
||||
arrow2.y_end = y_c[1]
|
||||
|
||||
kvect_source.data.update(
|
||||
text_x=[x_c[0] / 2, y_c[0] / 2 - 0.1],
|
||||
text_y=[x_c[1] - 0.1, y_c[1] / 2],
|
||||
text=["h", "k"],
|
||||
)
|
||||
|
||||
# Legend items for different file entries (symbol)
|
||||
legend_items = []
|
||||
if not res_flag and file_flag:
|
||||
labels, inds = np.unique(scan_source.data["l"], return_index=True)
|
||||
for label, ind in zip(labels, inds):
|
||||
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
|
||||
|
||||
# Legend items for propagation vector (color)
|
||||
if prop_legend_flag:
|
||||
if res_flag:
|
||||
source, render = ellipse_source, ellipse
|
||||
else:
|
||||
source, render = scan_source, mline
|
||||
|
||||
labels, inds = np.unique(source.data["c"], return_index=True)
|
||||
for label, ind in zip(labels, inds):
|
||||
label = f"k={k[cmap.index(label)]}"
|
||||
legend_items.append(LegendItem(label=label, renderers=[render], index=ind))
|
||||
|
||||
plot.legend.items = legend_items
|
||||
|
||||
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
|
||||
plot_file.on_click(plot_file_callback)
|
||||
|
||||
plot = Plot(x_range=Range1d(), y_range=Range1d(), plot_height=450, plot_width=600)
|
||||
plot.add_tools(PanTool(), WheelZoomTool(), BoxZoomTool(), ResetTool())
|
||||
plot.toolbar.logo = None
|
||||
|
||||
plot.add_layout(LinearAxis(), place="left")
|
||||
plot.add_layout(LinearAxis(), place="below")
|
||||
|
||||
arrow1 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10), visible=False)
|
||||
plot.add_layout(arrow1)
|
||||
arrow2 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10), visible=False)
|
||||
plot.add_layout(arrow2)
|
||||
|
||||
kvect_source = ColumnDataSource(dict(text_x=[], text_y=[], text=[]))
|
||||
plot.add_glyph(kvect_source, Text(x="text_x", y="text_y", text="text"))
|
||||
|
||||
grid_source = ColumnDataSource(dict(xs=[], ys=[]))
|
||||
minor_grid_source = ColumnDataSource(dict(xs=[], ys=[]))
|
||||
plot.add_glyph(grid_source, MultiLine(xs="xs", ys="ys", line_color="gray"))
|
||||
plot.add_glyph(
|
||||
minor_grid_source, MultiLine(xs="xs", ys="ys", line_color="gray", line_dash="dotted")
|
||||
)
|
||||
|
||||
ellipse_source = ColumnDataSource(dict(x=[], y=[], w=[], h=[], c=[]))
|
||||
ellipse = plot.add_glyph(
|
||||
ellipse_source, Ellipse(x="x", y="y", width="w", height="h", fill_color="c", line_color="c")
|
||||
)
|
||||
|
||||
scan_source = ColumnDataSource(dict(xs=[], ys=[], x=[], y=[], m=[], s=[], c=[], l=[]))
|
||||
mline = plot.add_glyph(scan_source, MultiLine(xs="xs", ys="ys", line_color="c"))
|
||||
scatter = plot.add_glyph(
|
||||
scan_source, Scatter(x="x", y="y", marker="m", size="s", fill_color="c", line_color="c")
|
||||
)
|
||||
|
||||
plot.add_layout(Legend(items=[], location="top_left", click_policy="hide"))
|
||||
|
||||
hkl_div = Div(text="HKL:", margin=(5, 5, 0, 5))
|
||||
hkl_normal = TextInput(title="normal", value="0 0 1", width=70)
|
||||
hkl_cut = Spinner(title="cut", value=0, step=0.1, width=70)
|
||||
hkl_delta = NumericInput(title="delta", value=0.1, mode="float", width=70)
|
||||
hkl_in_plane_x = TextInput(title="in-plane X", value="1 0 0", width=70)
|
||||
hkl_in_plane_y = TextInput(title="in-plane Y", value="0 1 0", width=70)
|
||||
|
||||
disting_opt_div = Div(text="Distinguish options:", margin=(5, 5, 0, 5))
|
||||
disting_opt_cb = CheckboxGroup(
|
||||
labels=["files (symbols)", "intensities (size)", "k vectors nucl/magn (colors)"],
|
||||
active=[0, 1, 2],
|
||||
width=200,
|
||||
)
|
||||
disting_opt_rb = RadioGroup(
|
||||
labels=["scan direction", "resolution ellipsoid"], active=0, width=200
|
||||
)
|
||||
|
||||
k_vectors = TextAreaInput(
|
||||
title="k vectors:", value="0.0 0.0 0.0\n0.5 0.0 0.0\n0.5 0.5 0.0", width=150,
|
||||
)
|
||||
res_mult_ni = NumericInput(title="Resolution mult:", value=10, mode="int", width=100)
|
||||
|
||||
fileinput_layout = row(open_cfl_div, open_cfl, open_cif_div, open_cif, open_geom_div, open_geom)
|
||||
|
||||
geom_layout = column(geom_radiogroup_div, geom_radiogroup)
|
||||
wavelen_layout = column(wavelen_div, row(wavelen_select, wavelen_input))
|
||||
anglim_layout = column(anglim_div, row(sttgamma_ti, omega_ti, chinu_ti, phi_ti))
|
||||
cryst_layout = column(cryst_div, row(cryst_space_group, cryst_cell))
|
||||
ubmat_layout = row(column(Spacer(height=18), ub_matrix_calc), ub_matrix)
|
||||
ranges_layout = column(ranges_div, row(ranges_hkl, ranges_srang))
|
||||
magstruct_layout = column(magstruct_div, row(magstruct_lattice, magstruct_kvec))
|
||||
sorting_layout = row(
|
||||
sorting_0,
|
||||
sorting_0_dt,
|
||||
Spacer(width=30),
|
||||
sorting_1,
|
||||
sorting_1_dt,
|
||||
Spacer(width=30),
|
||||
sorting_2,
|
||||
sorting_2_dt,
|
||||
)
|
||||
|
||||
column1_layout = column(
|
||||
fileinput_layout,
|
||||
Spacer(height=10),
|
||||
row(geom_layout, wavelen_layout, Spacer(width=50), anglim_layout),
|
||||
cryst_layout,
|
||||
ubmat_layout,
|
||||
row(ranges_layout, Spacer(width=50), magstruct_layout),
|
||||
row(sorting_layout, Spacer(width=30), column(Spacer(height=18), go_button)),
|
||||
row(created_lists, preview_lists),
|
||||
row(download_file, plot_list),
|
||||
)
|
||||
|
||||
hkl_layout = column(
|
||||
hkl_div,
|
||||
row(hkl_normal, hkl_cut, hkl_delta, Spacer(width=10), hkl_in_plane_x, hkl_in_plane_y),
|
||||
)
|
||||
disting_layout = column(disting_opt_div, row(disting_opt_cb, disting_opt_rb))
|
||||
|
||||
column2_layout = column(
|
||||
row(measured_data_div, measured_data, plot_file),
|
||||
plot,
|
||||
row(hkl_layout, k_vectors),
|
||||
row(disting_layout, res_mult_ni),
|
||||
)
|
||||
|
||||
tab_layout = row(column1_layout, column2_layout)
|
||||
|
||||
return Panel(child=tab_layout, title="ccl prepare")
|
@ -1,6 +1,5 @@
|
||||
import base64
|
||||
import io
|
||||
import math
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
@ -38,7 +37,6 @@ from bokeh.models import (
|
||||
WheelZoomTool,
|
||||
)
|
||||
from bokeh.palettes import Cividis256, Greys256, Plasma256 # pylint: disable=E0611
|
||||
from scipy.optimize import curve_fit
|
||||
|
||||
import pyzebra
|
||||
|
||||
@ -50,8 +48,7 @@ IMAGE_PLOT_H = int(IMAGE_H * 2) + 27
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
zebra_data = []
|
||||
det_data = {}
|
||||
dataset = []
|
||||
cami_meta = {}
|
||||
|
||||
num_formatter = NumberFormatter(format="0.00", nan_format="")
|
||||
@ -110,15 +107,15 @@ def create():
|
||||
|
||||
def _init_datatable():
|
||||
file_list = []
|
||||
for scan in zebra_data:
|
||||
for scan in dataset:
|
||||
file_list.append(os.path.basename(scan["original_filename"]))
|
||||
|
||||
scan_table_source.data.update(
|
||||
file=file_list,
|
||||
param=[None] * len(zebra_data),
|
||||
frame=[None] * len(zebra_data),
|
||||
x_pos=[None] * len(zebra_data),
|
||||
y_pos=[None] * len(zebra_data),
|
||||
param=[None] * len(dataset),
|
||||
frame=[None] * len(dataset),
|
||||
x_pos=[None] * len(dataset),
|
||||
y_pos=[None] * len(dataset),
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
@ -129,7 +126,7 @@ def create():
|
||||
frame = []
|
||||
x_pos = []
|
||||
y_pos = []
|
||||
for scan in zebra_data:
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
framei = scan["fit"]["frame"]
|
||||
x_posi = scan["fit"]["x_pos"]
|
||||
@ -143,30 +140,35 @@ def create():
|
||||
|
||||
scan_table_source.data.update(frame=frame, x_pos=x_pos, y_pos=y_pos)
|
||||
|
||||
def file_open_button_callback():
|
||||
nonlocal zebra_data
|
||||
zebra_data = []
|
||||
def _file_open():
|
||||
new_data = []
|
||||
for f_name in file_select.value:
|
||||
zebra_data.append(pyzebra.read_detector_data(f_name))
|
||||
try:
|
||||
new_data.append(pyzebra.read_detector_data(f_name))
|
||||
except KeyError:
|
||||
print("Could not read data from the file.")
|
||||
return
|
||||
|
||||
dataset.extend(new_data)
|
||||
|
||||
_init_datatable()
|
||||
|
||||
def file_open_button_callback():
|
||||
nonlocal dataset
|
||||
dataset = []
|
||||
_file_open()
|
||||
|
||||
file_open_button = Button(label="Open New", width=100)
|
||||
file_open_button.on_click(file_open_button_callback)
|
||||
|
||||
def file_append_button_callback():
|
||||
for f_name in file_select.value:
|
||||
zebra_data.append(pyzebra.read_detector_data(f_name))
|
||||
|
||||
_init_datatable()
|
||||
_file_open()
|
||||
|
||||
file_append_button = Button(label="Append", width=100)
|
||||
file_append_button.on_click(file_append_button_callback)
|
||||
|
||||
# Scan select
|
||||
def scan_table_select_callback(_attr, old, new):
|
||||
nonlocal det_data
|
||||
|
||||
if not new:
|
||||
# skip empty selections
|
||||
return
|
||||
@ -181,21 +183,21 @@ def create():
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
det_data = zebra_data[new[0]]
|
||||
scan = dataset[new[0]]
|
||||
|
||||
zebra_mode = det_data["zebra_mode"]
|
||||
zebra_mode = scan["zebra_mode"]
|
||||
if zebra_mode == "nb":
|
||||
metadata_table_source.data.update(geom=["normal beam"])
|
||||
else: # zebra_mode == "bi"
|
||||
metadata_table_source.data.update(geom=["bisecting"])
|
||||
|
||||
if "mf" in det_data:
|
||||
metadata_table_source.data.update(mf=[det_data["mf"][0]])
|
||||
if "mf" in scan:
|
||||
metadata_table_source.data.update(mf=[scan["mf"][0]])
|
||||
else:
|
||||
metadata_table_source.data.update(mf=[None])
|
||||
|
||||
if "temp" in det_data:
|
||||
metadata_table_source.data.update(temp=[det_data["temp"][0]])
|
||||
if "temp" in scan:
|
||||
metadata_table_source.data.update(temp=[scan["temp"][0]])
|
||||
else:
|
||||
metadata_table_source.data.update(temp=[None])
|
||||
|
||||
@ -235,12 +237,15 @@ def create():
|
||||
autosize_mode="none",
|
||||
)
|
||||
|
||||
def _get_selected_scan():
|
||||
return dataset[scan_table_source.selected.indices[0]]
|
||||
|
||||
def param_select_callback(_attr, _old, new):
|
||||
if new == "user defined":
|
||||
param = [None] * len(zebra_data)
|
||||
param = [None] * len(dataset)
|
||||
else:
|
||||
# TODO: which value to take?
|
||||
param = [scan[new][0] for scan in zebra_data]
|
||||
param = [scan[new][0] for scan in dataset]
|
||||
|
||||
scan_table_source.data["param"] = param
|
||||
_update_param_plot()
|
||||
@ -254,10 +259,11 @@ def create():
|
||||
param_select.on_change("value", param_select_callback)
|
||||
|
||||
def update_overview_plot():
|
||||
h5_data = det_data["data"]
|
||||
n_im, n_y, n_x = h5_data.shape
|
||||
overview_x = np.mean(h5_data, axis=1)
|
||||
overview_y = np.mean(h5_data, axis=2)
|
||||
scan = _get_selected_scan()
|
||||
counts = scan["counts"]
|
||||
n_im, n_y, n_x = counts.shape
|
||||
overview_x = np.mean(counts, axis=1)
|
||||
overview_y = np.mean(counts, axis=2)
|
||||
|
||||
# normalize for simpler colormapping
|
||||
overview_max_val = max(np.max(overview_x), np.max(overview_y))
|
||||
@ -285,10 +291,10 @@ def create():
|
||||
frame_range.reset_end = n_im
|
||||
frame_range.bounds = (0, n_im)
|
||||
|
||||
scan_motor = det_data["scan_motor"]
|
||||
scan_motor = scan["scan_motor"]
|
||||
overview_plot_y.axis[1].axis_label = f"Scanning motor, {scan_motor}"
|
||||
|
||||
var = det_data[scan_motor]
|
||||
var = scan[scan_motor]
|
||||
var_start = var[0]
|
||||
var_end = var[-1] + (var[-1] - var[0]) / (n_im - 1)
|
||||
|
||||
@ -447,68 +453,6 @@ def create():
|
||||
)
|
||||
proj_display_min_spinner.on_change("value", proj_display_min_spinner_callback)
|
||||
|
||||
def fit_event(scan):
|
||||
p0 = [1.0, 0.0, 1.0]
|
||||
maxfev = 100000
|
||||
|
||||
# wave = scan["wave"]
|
||||
# ddist = scan["ddist"]
|
||||
# cell = scan["cell"]
|
||||
|
||||
# gamma = scan["gamma"][0]
|
||||
# omega = scan["omega"][0]
|
||||
# nu = scan["nu"][0]
|
||||
# chi = scan["chi"][0]
|
||||
# phi = scan["phi"][0]
|
||||
|
||||
scan_motor = scan["scan_motor"]
|
||||
var_angle = scan[scan_motor]
|
||||
|
||||
x0 = int(np.floor(det_x_range.start))
|
||||
xN = int(np.ceil(det_x_range.end))
|
||||
y0 = int(np.floor(det_y_range.start))
|
||||
yN = int(np.ceil(det_y_range.end))
|
||||
fr0 = int(np.floor(frame_range.start))
|
||||
frN = int(np.ceil(frame_range.end))
|
||||
data_roi = scan["data"][fr0:frN, y0:yN, x0:xN]
|
||||
|
||||
cnts = np.sum(data_roi, axis=(1, 2))
|
||||
coeff, _ = curve_fit(gauss, range(len(cnts)), cnts, p0=p0, maxfev=maxfev)
|
||||
|
||||
# m = cnts.mean()
|
||||
# sd = cnts.std()
|
||||
# snr_cnts = np.where(sd == 0, 0, m / sd)
|
||||
|
||||
frC = fr0 + coeff[1]
|
||||
var_F = var_angle[math.floor(frC)]
|
||||
var_C = var_angle[math.ceil(frC)]
|
||||
# frStep = frC - math.floor(frC)
|
||||
var_step = var_C - var_F
|
||||
# var_p = var_F + var_step * frStep
|
||||
|
||||
# if scan_motor == "gamma":
|
||||
# gamma = var_p
|
||||
# elif scan_motor == "omega":
|
||||
# omega = var_p
|
||||
# elif scan_motor == "nu":
|
||||
# nu = var_p
|
||||
# elif scan_motor == "chi":
|
||||
# chi = var_p
|
||||
# elif scan_motor == "phi":
|
||||
# phi = var_p
|
||||
|
||||
intensity = coeff[1] * abs(coeff[2] * var_step) * math.sqrt(2) * math.sqrt(np.pi)
|
||||
|
||||
projX = np.sum(data_roi, axis=(0, 1))
|
||||
coeff, _ = curve_fit(gauss, range(len(projX)), projX, p0=p0, maxfev=maxfev)
|
||||
x_pos = x0 + coeff[1]
|
||||
|
||||
projY = np.sum(data_roi, axis=(0, 2))
|
||||
coeff, _ = curve_fit(gauss, range(len(projY)), projY, p0=p0, maxfev=maxfev)
|
||||
y_pos = y0 + coeff[1]
|
||||
|
||||
scan["fit"] = {"frame": frC, "x_pos": x_pos, "y_pos": y_pos, "intensity": intensity}
|
||||
|
||||
metadata_table_source = ColumnDataSource(dict(geom=[""], temp=[None], mf=[None]))
|
||||
metadata_table = DataTable(
|
||||
source=metadata_table_source,
|
||||
@ -527,7 +471,7 @@ def create():
|
||||
x = []
|
||||
y = []
|
||||
fit_param = fit_param_select.value
|
||||
for s, p in zip(zebra_data, scan_table_source.data["param"]):
|
||||
for s, p in zip(dataset, scan_table_source.data["param"]):
|
||||
if "fit" in s and fit_param:
|
||||
x.append(p)
|
||||
y.append(s["fit"][fit_param])
|
||||
@ -555,12 +499,20 @@ def create():
|
||||
fit_param_select.on_change("value", fit_param_select_callback)
|
||||
|
||||
def proc_all_button_callback():
|
||||
for scan in zebra_data:
|
||||
fit_event(scan)
|
||||
for scan in dataset:
|
||||
pyzebra.fit_event(
|
||||
scan,
|
||||
int(np.floor(frame_range.start)),
|
||||
int(np.ceil(frame_range.end)),
|
||||
int(np.floor(det_y_range.start)),
|
||||
int(np.ceil(det_y_range.end)),
|
||||
int(np.floor(det_x_range.start)),
|
||||
int(np.ceil(det_x_range.end)),
|
||||
)
|
||||
|
||||
_update_table()
|
||||
|
||||
for scan in zebra_data:
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
options = list(scan["fit"].keys())
|
||||
fit_param_select.options = options
|
||||
@ -573,11 +525,20 @@ def create():
|
||||
proc_all_button.on_click(proc_all_button_callback)
|
||||
|
||||
def proc_button_callback():
|
||||
fit_event(det_data)
|
||||
scan = _get_selected_scan()
|
||||
pyzebra.fit_event(
|
||||
scan,
|
||||
int(np.floor(frame_range.start)),
|
||||
int(np.ceil(frame_range.end)),
|
||||
int(np.floor(det_y_range.start)),
|
||||
int(np.ceil(det_y_range.end)),
|
||||
int(np.floor(det_x_range.start)),
|
||||
int(np.ceil(det_x_range.end)),
|
||||
)
|
||||
|
||||
_update_table()
|
||||
|
||||
for scan in zebra_data:
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
options = list(scan["fit"].keys())
|
||||
fit_param_select.options = options
|
||||
@ -628,14 +589,3 @@ def create():
|
||||
tab_layout = column(row(import_layout, scan_layout, plots))
|
||||
|
||||
return Panel(child=tab_layout, title="hdf param study")
|
||||
|
||||
|
||||
def gauss(x, *p):
|
||||
"""Defines Gaussian function
|
||||
Args:
|
||||
A - amplitude, mu - position of the center, sigma - width
|
||||
Returns:
|
||||
Gaussian function
|
||||
"""
|
||||
A, mu, sigma = p
|
||||
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
|
||||
|
@ -1,6 +1,5 @@
|
||||
import base64
|
||||
import io
|
||||
import math
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
@ -12,6 +11,7 @@ from bokeh.models import (
|
||||
BoxEditTool,
|
||||
BoxZoomTool,
|
||||
Button,
|
||||
CellEditor,
|
||||
CheckboxGroup,
|
||||
ColumnDataSource,
|
||||
DataRange1d,
|
||||
@ -37,11 +37,11 @@ from bokeh.models import (
|
||||
Spacer,
|
||||
Spinner,
|
||||
TableColumn,
|
||||
Tabs,
|
||||
Title,
|
||||
WheelZoomTool,
|
||||
)
|
||||
from bokeh.palettes import Cividis256, Greys256, Plasma256 # pylint: disable=E0611
|
||||
from scipy.optimize import curve_fit
|
||||
|
||||
import pyzebra
|
||||
|
||||
@ -53,7 +53,7 @@ IMAGE_PLOT_H = int(IMAGE_H * 2) + 27
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
det_data = {}
|
||||
dataset = []
|
||||
cami_meta = {}
|
||||
|
||||
num_formatter = NumberFormatter(format="0.00", nan_format="")
|
||||
@ -97,22 +97,237 @@ def create():
|
||||
proposal_textinput = doc.proposal_textinput
|
||||
proposal_textinput.on_change("name", proposal_textinput_callback)
|
||||
|
||||
def upload_button_callback(_attr, _old, new):
|
||||
def upload_cami_button_callback(_attr, _old, new):
|
||||
nonlocal cami_meta
|
||||
with io.StringIO(base64.b64decode(new).decode()) as file:
|
||||
cami_meta = pyzebra.parse_h5meta(file)
|
||||
data_source.value = "cami file"
|
||||
file_select_update()
|
||||
|
||||
upload_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
|
||||
upload_button = FileInput(accept=".cami", width=200)
|
||||
upload_button.on_change("value", upload_button_callback)
|
||||
upload_cami_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
|
||||
upload_cami_button = FileInput(accept=".cami", width=200)
|
||||
upload_cami_button.on_change("value", upload_cami_button_callback)
|
||||
|
||||
def update_image(index=None):
|
||||
def upload_hdf_button_callback(_attr, _old, new):
|
||||
nonlocal dataset
|
||||
try:
|
||||
scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None)
|
||||
except KeyError:
|
||||
print("Could not read data from the file.")
|
||||
return
|
||||
|
||||
dataset = [scan]
|
||||
last_im_index = scan["counts"].shape[0] - 1
|
||||
|
||||
index_spinner.value = 0
|
||||
index_spinner.high = last_im_index
|
||||
if last_im_index == 0:
|
||||
index_slider.disabled = True
|
||||
else:
|
||||
index_slider.disabled = False
|
||||
index_slider.end = last_im_index
|
||||
|
||||
zebra_mode = scan["zebra_mode"]
|
||||
if zebra_mode == "nb":
|
||||
metadata_table_source.data.update(geom=["normal beam"])
|
||||
else: # zebra_mode == "bi"
|
||||
metadata_table_source.data.update(geom=["bisecting"])
|
||||
|
||||
_init_datatable()
|
||||
|
||||
upload_hdf_div = Div(text="or upload .hdf file:", margin=(5, 5, 0, 5))
|
||||
upload_hdf_button = FileInput(accept=".hdf", width=200)
|
||||
upload_hdf_button.on_change("value", upload_hdf_button_callback)
|
||||
|
||||
def file_open_button_callback():
|
||||
nonlocal dataset
|
||||
new_data = []
|
||||
cm = cami_meta if data_source.value == "cami file" else None
|
||||
for f_path in file_select.value:
|
||||
f_name = os.path.basename(f_path)
|
||||
try:
|
||||
file_data = [pyzebra.read_detector_data(f_path, cm)]
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
else:
|
||||
pyzebra.merge_datasets(new_data, file_data)
|
||||
|
||||
if new_data:
|
||||
dataset = new_data
|
||||
_init_datatable()
|
||||
|
||||
file_open_button = Button(label="Open New", width=100)
|
||||
file_open_button.on_click(file_open_button_callback)
|
||||
|
||||
def file_append_button_callback():
|
||||
file_data = []
|
||||
for f_path in file_select.value:
|
||||
f_name = os.path.basename(f_path)
|
||||
try:
|
||||
file_data = [pyzebra.read_detector_data(f_path, None)]
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(dataset, file_data)
|
||||
|
||||
if file_data:
|
||||
_init_datatable()
|
||||
|
||||
file_append_button = Button(label="Append", width=100)
|
||||
file_append_button.on_click(file_append_button_callback)
|
||||
|
||||
def _init_datatable():
|
||||
scan_list = [s["idx"] for s in dataset]
|
||||
export = [s["export"] for s in dataset]
|
||||
|
||||
twotheta = [np.median(s["twotheta"]) if "twotheta" in s else None for s in dataset]
|
||||
gamma = [np.median(s["gamma"]) if "gamma" in s else None for s in dataset]
|
||||
omega = [np.median(s["omega"]) if "omega" in s else None for s in dataset]
|
||||
chi = [np.median(s["chi"]) if "chi" in s else None for s in dataset]
|
||||
phi = [np.median(s["phi"]) if "phi" in s else None for s in dataset]
|
||||
nu = [np.median(s["nu"]) if "nu" in s else None for s in dataset]
|
||||
|
||||
scan_table_source.data.update(
|
||||
scan=scan_list,
|
||||
fit=[0] * len(scan_list),
|
||||
export=export,
|
||||
twotheta=twotheta,
|
||||
gamma=gamma,
|
||||
omega=omega,
|
||||
chi=chi,
|
||||
phi=phi,
|
||||
nu=nu,
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
|
||||
merge_from_select.options = merge_options
|
||||
merge_from_select.value = merge_options[0][0]
|
||||
|
||||
def scan_table_select_callback(_attr, old, new):
|
||||
if not new:
|
||||
# skip empty selections
|
||||
return
|
||||
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
scan_table_source.selected.indices = old
|
||||
return
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
scan = _get_selected_scan()
|
||||
last_im_index = scan["counts"].shape[0] - 1
|
||||
|
||||
index_spinner.value = 0
|
||||
index_spinner.high = last_im_index
|
||||
if last_im_index == 0:
|
||||
index_slider.disabled = True
|
||||
else:
|
||||
index_slider.disabled = False
|
||||
index_slider.end = last_im_index
|
||||
|
||||
zebra_mode = scan["zebra_mode"]
|
||||
if zebra_mode == "nb":
|
||||
metadata_table_source.data.update(geom=["normal beam"])
|
||||
else: # zebra_mode == "bi"
|
||||
metadata_table_source.data.update(geom=["bisecting"])
|
||||
|
||||
_update_image()
|
||||
_update_overview_plot()
|
||||
|
||||
def scan_table_source_callback(_attr, _old, new):
|
||||
# unfortunately, we don't know if the change comes from data update or user input
|
||||
# also `old` and `new` are the same for non-scalars
|
||||
for scan, export in zip(dataset, new["export"]):
|
||||
scan["export"] = export
|
||||
|
||||
scan_table_source = ColumnDataSource(
|
||||
dict(scan=[], fit=[], export=[], twotheta=[], gamma=[], omega=[], chi=[], phi=[], nu=[],)
|
||||
)
|
||||
scan_table_source.on_change("data", scan_table_source_callback)
|
||||
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||
|
||||
scan_table = DataTable(
|
||||
source=scan_table_source,
|
||||
columns=[
|
||||
TableColumn(field="scan", title="Scan", editor=CellEditor(), width=50),
|
||||
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
|
||||
TableColumn(field="export", title="Export", editor=CellEditor(), width=50),
|
||||
TableColumn(field="twotheta", title="2theta", editor=CellEditor(), width=50),
|
||||
TableColumn(field="gamma", title="gamma", editor=CellEditor(), width=50),
|
||||
TableColumn(field="omega", title="omega", editor=CellEditor(), width=50),
|
||||
TableColumn(field="chi", title="chi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="phi", title="phi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="nu", title="nu", editor=CellEditor(), width=50),
|
||||
],
|
||||
width=310, # +60 because of the index column, but excluding twotheta onwards
|
||||
height=350,
|
||||
autosize_mode="none",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
def _get_selected_scan():
|
||||
return dataset[scan_table_source.selected.indices[0]]
|
||||
|
||||
def _update_table():
|
||||
export = [scan["export"] for scan in dataset]
|
||||
scan_table_source.data.update(export=export)
|
||||
|
||||
def monitor_spinner_callback(_attr, old, new):
|
||||
if dataset:
|
||||
pyzebra.normalize_dataset(dataset, new)
|
||||
_update_image()
|
||||
_update_overview_plot()
|
||||
|
||||
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
|
||||
monitor_spinner.on_change("value", monitor_spinner_callback)
|
||||
|
||||
merge_from_select = Select(title="scan:", width=145)
|
||||
|
||||
def merge_button_callback():
|
||||
scan_into = _get_selected_scan()
|
||||
scan_from = dataset[int(merge_from_select.value)]
|
||||
|
||||
if scan_into is scan_from:
|
||||
print("WARNING: Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_h5_scans(scan_into, scan_from)
|
||||
_update_table()
|
||||
_update_image()
|
||||
_update_overview_plot()
|
||||
|
||||
merge_button = Button(label="Merge into current", width=145)
|
||||
merge_button.on_click(merge_button_callback)
|
||||
|
||||
def restore_button_callback():
|
||||
pyzebra.restore_scan(_get_selected_scan())
|
||||
_update_table()
|
||||
_update_image()
|
||||
_update_overview_plot()
|
||||
|
||||
restore_button = Button(label="Restore scan", width=145)
|
||||
restore_button.on_click(restore_button_callback)
|
||||
|
||||
def _update_image(index=None):
|
||||
if index is None:
|
||||
index = index_spinner.value
|
||||
|
||||
current_image = det_data["data"][index]
|
||||
scan = _get_selected_scan()
|
||||
current_image = scan["counts"][index]
|
||||
proj_v_line_source.data.update(
|
||||
x=np.arange(0, IMAGE_W) + 0.5, y=np.mean(current_image, axis=0)
|
||||
)
|
||||
@ -135,25 +350,54 @@ def create():
|
||||
image_glyph.color_mapper.low = im_min
|
||||
image_glyph.color_mapper.high = im_max
|
||||
|
||||
if "mf" in det_data:
|
||||
metadata_table_source.data.update(mf=[det_data["mf"][index]])
|
||||
if "mf" in scan:
|
||||
metadata_table_source.data.update(mf=[scan["mf"][index]])
|
||||
else:
|
||||
metadata_table_source.data.update(mf=[None])
|
||||
|
||||
if "temp" in det_data:
|
||||
metadata_table_source.data.update(temp=[det_data["temp"][index]])
|
||||
if "temp" in scan:
|
||||
metadata_table_source.data.update(temp=[scan["temp"][index]])
|
||||
else:
|
||||
metadata_table_source.data.update(temp=[None])
|
||||
|
||||
gamma, nu = calculate_pol(det_data, index)
|
||||
omega = np.ones((IMAGE_H, IMAGE_W)) * det_data["omega"][index]
|
||||
gamma, nu = calculate_pol(scan, index)
|
||||
omega = np.ones((IMAGE_H, IMAGE_W)) * scan["omega"][index]
|
||||
image_source.data.update(gamma=[gamma], nu=[nu], omega=[omega])
|
||||
|
||||
def update_overview_plot():
|
||||
h5_data = det_data["data"]
|
||||
n_im, n_y, n_x = h5_data.shape
|
||||
overview_x = np.mean(h5_data, axis=1)
|
||||
overview_y = np.mean(h5_data, axis=2)
|
||||
# update detector center angles
|
||||
det_c_x = int(IMAGE_W / 2)
|
||||
det_c_y = int(IMAGE_H / 2)
|
||||
if scan["zebra_mode"] == "nb":
|
||||
gamma_c = gamma[det_c_y, det_c_x]
|
||||
nu_c = nu[det_c_y, det_c_x]
|
||||
omega_c = omega[det_c_y, det_c_x]
|
||||
chi_c = None
|
||||
phi_c = None
|
||||
|
||||
else: # zebra_mode == "bi"
|
||||
wave = scan["wave"]
|
||||
ddist = scan["ddist"]
|
||||
gammad = scan["gamma"][index]
|
||||
om = scan["omega"][index]
|
||||
ch = scan["chi"][index]
|
||||
ph = scan["phi"][index]
|
||||
nud = scan["nu"]
|
||||
|
||||
nu_c = 0
|
||||
chi_c, phi_c, gamma_c, omega_c = pyzebra.ang_proc(
|
||||
wave, ddist, gammad, om, ch, ph, nud, det_c_x, det_c_y
|
||||
)
|
||||
|
||||
detcenter_table_source.data.update(
|
||||
gamma=[gamma_c], nu=[nu_c], omega=[omega_c], chi=[chi_c], phi=[phi_c],
|
||||
)
|
||||
|
||||
def _update_overview_plot():
|
||||
scan = _get_selected_scan()
|
||||
counts = scan["counts"]
|
||||
n_im, n_y, n_x = counts.shape
|
||||
overview_x = np.mean(counts, axis=1)
|
||||
overview_y = np.mean(counts, axis=2)
|
||||
|
||||
# normalize for simpler colormapping
|
||||
overview_max_val = max(np.max(overview_x), np.max(overview_y))
|
||||
@ -181,12 +425,12 @@ def create():
|
||||
frame_range.reset_end = n_im
|
||||
frame_range.bounds = (0, n_im)
|
||||
|
||||
scan_motor = det_data["scan_motor"]
|
||||
scan_motor = scan["scan_motor"]
|
||||
overview_plot_y.axis[1].axis_label = f"Scanning motor, {scan_motor}"
|
||||
|
||||
var = det_data[scan_motor]
|
||||
var = scan[scan_motor]
|
||||
var_start = var[0]
|
||||
var_end = var[-1] + (var[-1] - var[0]) / (n_im - 1)
|
||||
var_end = var[-1] + (var[-1] - var[0]) / (n_im - 1) if n_im != 1 else var_start + 1
|
||||
|
||||
scanning_motor_range.start = var_start
|
||||
scanning_motor_range.end = var_end
|
||||
@ -195,42 +439,30 @@ def create():
|
||||
# handle both, ascending and descending sequences
|
||||
scanning_motor_range.bounds = (min(var_start, var_end), max(var_start, var_end))
|
||||
|
||||
def file_select_callback(_attr, old, new):
|
||||
nonlocal det_data
|
||||
if not new:
|
||||
# skip empty selections
|
||||
return
|
||||
gamma = image_source.data["gamma"][0]
|
||||
gamma_start = gamma[0, 0]
|
||||
gamma_end = gamma[0, -1]
|
||||
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
file_select.value = old
|
||||
return
|
||||
gamma_range.start = gamma_start
|
||||
gamma_range.end = gamma_end
|
||||
gamma_range.reset_start = gamma_start
|
||||
gamma_range.reset_end = gamma_end
|
||||
gamma_range.bounds = (min(gamma_start, gamma_end), max(gamma_start, gamma_end))
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
nu = image_source.data["nu"][0]
|
||||
nu_start = nu[0, 0]
|
||||
nu_end = nu[-1, 0]
|
||||
|
||||
det_data = pyzebra.read_detector_data(new[0], cami_meta)
|
||||
|
||||
index_spinner.value = 0
|
||||
index_spinner.high = det_data["data"].shape[0] - 1
|
||||
index_slider.end = det_data["data"].shape[0] - 1
|
||||
|
||||
zebra_mode = det_data["zebra_mode"]
|
||||
if zebra_mode == "nb":
|
||||
metadata_table_source.data.update(geom=["normal beam"])
|
||||
else: # zebra_mode == "bi"
|
||||
metadata_table_source.data.update(geom=["bisecting"])
|
||||
|
||||
update_image(0)
|
||||
update_overview_plot()
|
||||
nu_range.start = nu_start
|
||||
nu_range.end = nu_end
|
||||
nu_range.reset_start = nu_start
|
||||
nu_range.reset_end = nu_end
|
||||
nu_range.bounds = (min(nu_start, nu_end), max(nu_start, nu_end))
|
||||
|
||||
file_select = MultiSelect(title="Available .hdf files:", width=210, height=250)
|
||||
file_select.on_change("value", file_select_callback)
|
||||
|
||||
def index_callback(_attr, _old, new):
|
||||
update_image(new)
|
||||
_update_image(new)
|
||||
|
||||
index_slider = Slider(value=0, start=0, end=1, show_value=False, width=400)
|
||||
|
||||
@ -295,9 +527,10 @@ def create():
|
||||
|
||||
# calculate hkl-indices of first mouse entry
|
||||
def mouse_enter_callback(_event):
|
||||
if det_data and np.array_equal(image_source.data["h"][0], np.zeros((1, 1))):
|
||||
if dataset and np.array_equal(image_source.data["h"][0], np.zeros((1, 1))):
|
||||
scan = _get_selected_scan()
|
||||
index = index_spinner.value
|
||||
h, k, l = calculate_hkl(det_data, index)
|
||||
h, k, l = calculate_hkl(scan, index)
|
||||
image_source.data.update(h=[h], k=[k], l=[l])
|
||||
|
||||
plot.on_event(MouseEnter, mouse_enter_callback)
|
||||
@ -359,13 +592,14 @@ def create():
|
||||
|
||||
def box_edit_callback(_attr, _old, new):
|
||||
if new["x"]:
|
||||
h5_data = det_data["data"]
|
||||
x_val = np.arange(h5_data.shape[0])
|
||||
scan = _get_selected_scan()
|
||||
counts = scan["counts"]
|
||||
x_val = np.arange(counts.shape[0])
|
||||
left = int(np.floor(new["x"][0]))
|
||||
right = int(np.ceil(new["x"][0] + new["width"][0]))
|
||||
bottom = int(np.floor(new["y"][0]))
|
||||
top = int(np.ceil(new["y"][0] + new["height"][0]))
|
||||
y_val = np.sum(h5_data[:, bottom:top, left:right], axis=(1, 2))
|
||||
y_val = np.sum(counts[:, bottom:top, left:right], axis=(1, 2))
|
||||
else:
|
||||
x_val = []
|
||||
y_val = []
|
||||
@ -385,12 +619,14 @@ def create():
|
||||
scanning_motor_range = Range1d(0, 1, bounds=(0, 1))
|
||||
|
||||
det_x_range = Range1d(0, IMAGE_W, bounds=(0, IMAGE_W))
|
||||
gamma_range = Range1d(0, 1, bounds=(0, 1))
|
||||
overview_plot_x = Plot(
|
||||
title=Title(text="Projections on X-axis"),
|
||||
x_range=det_x_range,
|
||||
y_range=frame_range,
|
||||
extra_x_ranges={"gamma": gamma_range},
|
||||
extra_y_ranges={"scanning_motor": scanning_motor_range},
|
||||
plot_height=400,
|
||||
plot_height=450,
|
||||
plot_width=IMAGE_PLOT_W - 3,
|
||||
)
|
||||
|
||||
@ -404,6 +640,9 @@ def create():
|
||||
|
||||
# ---- axes
|
||||
overview_plot_x.add_layout(LinearAxis(axis_label="Coordinate X, pix"), place="below")
|
||||
overview_plot_x.add_layout(
|
||||
LinearAxis(x_range_name="gamma", axis_label="Gamma, deg"), place="above"
|
||||
)
|
||||
overview_plot_x.add_layout(
|
||||
LinearAxis(axis_label="Frame", major_label_orientation="vertical"), place="left"
|
||||
)
|
||||
@ -423,12 +662,14 @@ def create():
|
||||
)
|
||||
|
||||
det_y_range = Range1d(0, IMAGE_H, bounds=(0, IMAGE_H))
|
||||
nu_range = Range1d(0, 1, bounds=(0, 1))
|
||||
overview_plot_y = Plot(
|
||||
title=Title(text="Projections on Y-axis"),
|
||||
x_range=det_y_range,
|
||||
y_range=frame_range,
|
||||
extra_x_ranges={"nu": nu_range},
|
||||
extra_y_ranges={"scanning_motor": scanning_motor_range},
|
||||
plot_height=400,
|
||||
plot_height=450,
|
||||
plot_width=IMAGE_PLOT_H + 22,
|
||||
)
|
||||
|
||||
@ -442,6 +683,7 @@ def create():
|
||||
|
||||
# ---- axes
|
||||
overview_plot_y.add_layout(LinearAxis(axis_label="Coordinate Y, pix"), place="below")
|
||||
overview_plot_y.add_layout(LinearAxis(x_range_name="nu", axis_label="Nu, deg"), place="above")
|
||||
overview_plot_y.add_layout(
|
||||
LinearAxis(
|
||||
y_range_name="scanning_motor",
|
||||
@ -513,7 +755,7 @@ def create():
|
||||
display_min_spinner.disabled = False
|
||||
display_max_spinner.disabled = False
|
||||
|
||||
update_image()
|
||||
_update_image()
|
||||
|
||||
main_auto_checkbox = CheckboxGroup(
|
||||
labels=["Frame Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
|
||||
@ -559,7 +801,7 @@ def create():
|
||||
proj_display_min_spinner.disabled = False
|
||||
proj_display_max_spinner.disabled = False
|
||||
|
||||
update_overview_plot()
|
||||
_update_overview_plot()
|
||||
|
||||
proj_auto_checkbox = CheckboxGroup(
|
||||
labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
|
||||
@ -634,42 +876,53 @@ def create():
|
||||
index_position=None,
|
||||
)
|
||||
|
||||
detcenter_table_source = ColumnDataSource(dict(gamma=[], omega=[], chi=[], phi=[], nu=[]))
|
||||
detcenter_table = DataTable(
|
||||
source=detcenter_table_source,
|
||||
columns=[
|
||||
TableColumn(field="gamma", title="Gamma", formatter=num_formatter, width=70),
|
||||
TableColumn(field="omega", title="Omega", formatter=num_formatter, width=70),
|
||||
TableColumn(field="chi", title="Chi", formatter=num_formatter, width=70),
|
||||
TableColumn(field="phi", title="Phi", formatter=num_formatter, width=70),
|
||||
TableColumn(field="nu", title="Nu", formatter=num_formatter, width=70),
|
||||
],
|
||||
height=150,
|
||||
width=350,
|
||||
autosize_mode="none",
|
||||
index_position=None,
|
||||
)
|
||||
|
||||
def add_event_button_callback():
|
||||
p0 = [1.0, 0.0, 1.0]
|
||||
maxfev = 100000
|
||||
scan = _get_selected_scan()
|
||||
pyzebra.fit_event(
|
||||
scan,
|
||||
int(np.floor(frame_range.start)),
|
||||
int(np.ceil(frame_range.end)),
|
||||
int(np.floor(det_y_range.start)),
|
||||
int(np.ceil(det_y_range.end)),
|
||||
int(np.floor(det_x_range.start)),
|
||||
int(np.ceil(det_x_range.end)),
|
||||
)
|
||||
|
||||
wave = det_data["wave"]
|
||||
ddist = det_data["ddist"]
|
||||
cell = det_data["cell"]
|
||||
wave = scan["wave"]
|
||||
ddist = scan["ddist"]
|
||||
cell = scan["cell"]
|
||||
|
||||
gamma = det_data["gamma"][0]
|
||||
omega = det_data["omega"][0]
|
||||
nu = det_data["nu"][0]
|
||||
chi = det_data["chi"][0]
|
||||
phi = det_data["phi"][0]
|
||||
gamma = scan["gamma"][0]
|
||||
omega = scan["omega"][0]
|
||||
nu = scan["nu"][0]
|
||||
chi = scan["chi"][0]
|
||||
phi = scan["phi"][0]
|
||||
|
||||
scan_motor = det_data["scan_motor"]
|
||||
var_angle = det_data[scan_motor]
|
||||
scan_motor = scan["scan_motor"]
|
||||
var_angle = scan[scan_motor]
|
||||
|
||||
x0 = int(np.floor(det_x_range.start))
|
||||
xN = int(np.ceil(det_x_range.end))
|
||||
y0 = int(np.floor(det_y_range.start))
|
||||
yN = int(np.ceil(det_y_range.end))
|
||||
fr0 = int(np.floor(frame_range.start))
|
||||
frN = int(np.ceil(frame_range.end))
|
||||
data_roi = det_data["data"][fr0:frN, y0:yN, x0:xN]
|
||||
snr_cnts = scan["fit"]["snr"]
|
||||
frC = scan["fit"]["frame"]
|
||||
|
||||
cnts = np.sum(data_roi, axis=(1, 2))
|
||||
coeff, _ = curve_fit(gauss, range(len(cnts)), cnts, p0=p0, maxfev=maxfev)
|
||||
|
||||
m = cnts.mean()
|
||||
sd = cnts.std()
|
||||
snr_cnts = np.where(sd == 0, 0, m / sd)
|
||||
|
||||
frC = fr0 + coeff[1]
|
||||
var_F = var_angle[math.floor(frC)]
|
||||
var_C = var_angle[math.ceil(frC)]
|
||||
frStep = frC - math.floor(frC)
|
||||
var_F = var_angle[int(np.floor(frC))]
|
||||
var_C = var_angle[int(np.ceil(frC))]
|
||||
frStep = frC - np.floor(frC)
|
||||
var_step = var_C - var_F
|
||||
var_p = var_F + var_step * frStep
|
||||
|
||||
@ -684,15 +937,13 @@ def create():
|
||||
elif scan_motor == "phi":
|
||||
phi = var_p
|
||||
|
||||
intensity = coeff[1] * abs(coeff[2] * var_step) * math.sqrt(2) * math.sqrt(np.pi)
|
||||
intensity = scan["fit"]["intensity"]
|
||||
x_pos = scan["fit"]["x_pos"]
|
||||
y_pos = scan["fit"]["y_pos"]
|
||||
|
||||
projX = np.sum(data_roi, axis=(0, 1))
|
||||
coeff, _ = curve_fit(gauss, range(len(projX)), projX, p0=p0, maxfev=maxfev)
|
||||
x_pos = x0 + coeff[1]
|
||||
|
||||
projY = np.sum(data_roi, axis=(0, 2))
|
||||
coeff, _ = curve_fit(gauss, range(len(projY)), projY, p0=p0, maxfev=maxfev)
|
||||
y_pos = y0 + coeff[1]
|
||||
if scan["zebra_mode"] == "nb":
|
||||
chi = None
|
||||
phi = None
|
||||
|
||||
events_data["wave"].append(wave)
|
||||
events_data["ddist"].append(ddist)
|
||||
@ -710,7 +961,7 @@ def create():
|
||||
|
||||
events_table_source.data = events_data
|
||||
|
||||
add_event_button = Button(label="Add spind event", width=145)
|
||||
add_event_button = Button(label="Add peak center", width=145)
|
||||
add_event_button.on_click(add_event_button_callback)
|
||||
|
||||
def remove_event_button_callback():
|
||||
@ -721,7 +972,7 @@ def create():
|
||||
|
||||
events_table_source.data = events_data
|
||||
|
||||
remove_event_button = Button(label="Remove spind event", width=145)
|
||||
remove_event_button = Button(label="Remove peak center", width=145)
|
||||
remove_event_button.on_click(remove_event_button_callback)
|
||||
|
||||
metadata_table_source = ColumnDataSource(dict(geom=[""], temp=[None], mf=[None]))
|
||||
@ -739,7 +990,23 @@ def create():
|
||||
)
|
||||
|
||||
# Final layout
|
||||
import_layout = column(data_source, upload_div, upload_button, file_select)
|
||||
peak_tables = Tabs(
|
||||
tabs=[
|
||||
Panel(child=events_table, title="Actual peak center"),
|
||||
Panel(child=detcenter_table, title="Peak in the detector center"),
|
||||
]
|
||||
)
|
||||
|
||||
import_layout = column(
|
||||
data_source,
|
||||
upload_cami_div,
|
||||
upload_cami_button,
|
||||
upload_hdf_div,
|
||||
upload_hdf_button,
|
||||
file_select,
|
||||
row(file_open_button, file_append_button),
|
||||
)
|
||||
|
||||
layout_image = column(gridplot([[proj_v, None], [plot, proj_h]], merge_tools=False))
|
||||
colormap_layout = column(
|
||||
colormap,
|
||||
@ -751,7 +1018,7 @@ def create():
|
||||
|
||||
layout_controls = column(
|
||||
row(metadata_table, index_spinner, column(Spacer(height=25), index_slider)),
|
||||
row(column(add_event_button, remove_event_button), events_table),
|
||||
row(column(add_event_button, remove_event_button), peak_tables),
|
||||
)
|
||||
|
||||
layout_overview = column(
|
||||
@ -763,42 +1030,37 @@ def create():
|
||||
),
|
||||
)
|
||||
|
||||
scan_layout = column(
|
||||
scan_table,
|
||||
row(monitor_spinner, column(Spacer(height=19), restore_button)),
|
||||
row(column(Spacer(height=19), merge_button), merge_from_select),
|
||||
)
|
||||
|
||||
tab_layout = row(
|
||||
column(import_layout, colormap_layout),
|
||||
column(layout_overview, layout_controls),
|
||||
column(row(scan_layout, layout_overview), layout_controls),
|
||||
column(roi_avg_plot, layout_image),
|
||||
)
|
||||
|
||||
return Panel(child=tab_layout, title="hdf viewer")
|
||||
|
||||
|
||||
def gauss(x, *p):
|
||||
"""Defines Gaussian function
|
||||
Args:
|
||||
A - amplitude, mu - position of the center, sigma - width
|
||||
Returns:
|
||||
Gaussian function
|
||||
"""
|
||||
A, mu, sigma = p
|
||||
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
|
||||
|
||||
|
||||
def calculate_hkl(det_data, index):
|
||||
def calculate_hkl(scan, index):
|
||||
h = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||
k = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||
l = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||
|
||||
wave = det_data["wave"]
|
||||
ddist = det_data["ddist"]
|
||||
gammad = det_data["gamma"][index]
|
||||
om = det_data["omega"][index]
|
||||
nud = det_data["nu"]
|
||||
ub = det_data["ub"]
|
||||
geometry = det_data["zebra_mode"]
|
||||
wave = scan["wave"]
|
||||
ddist = scan["ddist"]
|
||||
gammad = scan["gamma"][index]
|
||||
om = scan["omega"][index]
|
||||
nud = scan["nu"]
|
||||
ub = scan["ub"]
|
||||
geometry = scan["zebra_mode"]
|
||||
|
||||
if geometry == "bi":
|
||||
chi = det_data["chi"][index]
|
||||
phi = det_data["phi"][index]
|
||||
chi = scan["chi"][index]
|
||||
phi = scan["phi"][index]
|
||||
elif geometry == "nb":
|
||||
chi = 0
|
||||
phi = 0
|
||||
@ -814,16 +1076,11 @@ def calculate_hkl(det_data, index):
|
||||
return h, k, l
|
||||
|
||||
|
||||
def calculate_pol(det_data, index):
|
||||
gamma = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||
nu = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||
|
||||
ddist = det_data["ddist"]
|
||||
gammad = det_data["gamma"][index]
|
||||
nud = det_data["nu"]
|
||||
|
||||
for xi in np.arange(IMAGE_W):
|
||||
for yi in np.arange(IMAGE_H):
|
||||
gamma[yi, xi], nu[yi, xi] = pyzebra.det2pol(ddist, gammad, nud, xi, yi)
|
||||
def calculate_pol(scan, index):
|
||||
ddist = scan["ddist"]
|
||||
gammad = scan["gamma"][index]
|
||||
nud = scan["nu"]
|
||||
yi, xi = np.ogrid[:IMAGE_H, :IMAGE_W]
|
||||
gamma, nu = pyzebra.det2pol(ddist, gammad, nud, xi, yi)
|
||||
|
||||
return gamma, nu
|
||||
|
@ -27,6 +27,7 @@ from bokeh.models import (
|
||||
Legend,
|
||||
Line,
|
||||
LinearAxis,
|
||||
LinearColorMapper,
|
||||
MultiLine,
|
||||
MultiSelect,
|
||||
NumberEditor,
|
||||
@ -34,6 +35,7 @@ from bokeh.models import (
|
||||
PanTool,
|
||||
Plot,
|
||||
RadioGroup,
|
||||
Range1d,
|
||||
ResetTool,
|
||||
Scatter,
|
||||
Select,
|
||||
@ -46,8 +48,7 @@ from bokeh.models import (
|
||||
WheelZoomTool,
|
||||
Whisker,
|
||||
)
|
||||
from bokeh.palettes import Category10, Turbo256
|
||||
from bokeh.transform import linear_cmap
|
||||
from bokeh.palettes import Category10, Plasma256
|
||||
from scipy import interpolate
|
||||
|
||||
import pyzebra
|
||||
@ -82,7 +83,7 @@ def color_palette(n_colors):
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
det_data = []
|
||||
dataset = []
|
||||
fit_params = {}
|
||||
js_data = ColumnDataSource(data=dict(content=[""], fname=[""], ext=[""]))
|
||||
|
||||
@ -110,126 +111,166 @@ def create():
|
||||
proposal_textinput.on_change("name", proposal_textinput_callback)
|
||||
|
||||
def _init_datatable():
|
||||
scan_list = [s["idx"] for s in det_data]
|
||||
scan_list = [s["idx"] for s in dataset]
|
||||
export = [s["export"] for s in dataset]
|
||||
if param_select.value == "user defined":
|
||||
param = [None] * len(dataset)
|
||||
else:
|
||||
param = [scan[param_select.value] for scan in dataset]
|
||||
|
||||
file_list = []
|
||||
for scan in det_data:
|
||||
for scan in dataset:
|
||||
file_list.append(os.path.basename(scan["original_filename"]))
|
||||
|
||||
scan_table_source.data.update(
|
||||
file=file_list,
|
||||
scan=scan_list,
|
||||
param=[None] * len(scan_list),
|
||||
fit=[0] * len(scan_list),
|
||||
export=[True] * len(scan_list),
|
||||
file=file_list, scan=scan_list, param=param, fit=[0] * len(scan_list), export=export,
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
scan_motor_select.options = det_data[0]["scan_motors"]
|
||||
scan_motor_select.value = det_data[0]["scan_motor"]
|
||||
param_select.value = "user defined"
|
||||
scan_motor_select.options = dataset[0]["scan_motors"]
|
||||
scan_motor_select.value = dataset[0]["scan_motor"]
|
||||
|
||||
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
|
||||
merge_from_select.options = merge_options
|
||||
merge_from_select.value = merge_options[0][0]
|
||||
|
||||
file_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
|
||||
|
||||
def file_open_button_callback():
|
||||
nonlocal det_data
|
||||
for f_ind, f_path in enumerate(file_select.value):
|
||||
nonlocal dataset
|
||||
new_data = []
|
||||
for f_path in file_select.value:
|
||||
with open(f_path) as file:
|
||||
base, ext = os.path.splitext(os.path.basename(f_path))
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
f_name = os.path.basename(f_path)
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if f_ind == 0: # first file
|
||||
det_data = file_data
|
||||
pyzebra.merge_duplicates(det_data)
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
pyzebra.merge_duplicates(new_data)
|
||||
js_data.data.update(fname=[base])
|
||||
else:
|
||||
pyzebra.merge_datasets(det_data, file_data)
|
||||
pyzebra.merge_datasets(new_data, file_data)
|
||||
|
||||
_init_datatable()
|
||||
append_upload_button.disabled = False
|
||||
if new_data:
|
||||
dataset = new_data
|
||||
_init_datatable()
|
||||
append_upload_button.disabled = False
|
||||
|
||||
file_open_button = Button(label="Open New", width=100, disabled=True)
|
||||
file_open_button.on_click(file_open_button_callback)
|
||||
|
||||
def file_append_button_callback():
|
||||
file_data = []
|
||||
for f_path in file_select.value:
|
||||
with open(f_path) as file:
|
||||
_, ext = os.path.splitext(f_path)
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
f_name = os.path.basename(f_path)
|
||||
_, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(det_data, file_data)
|
||||
pyzebra.merge_datasets(dataset, file_data)
|
||||
|
||||
_init_datatable()
|
||||
if file_data:
|
||||
_init_datatable()
|
||||
|
||||
file_append_button = Button(label="Append", width=100, disabled=True)
|
||||
file_append_button.on_click(file_append_button_callback)
|
||||
|
||||
def upload_button_callback(_attr, _old, new):
|
||||
nonlocal det_data
|
||||
det_data = []
|
||||
for f_str, f_name in zip(new, upload_button.filename):
|
||||
def upload_button_callback(_attr, _old, _new):
|
||||
nonlocal dataset
|
||||
new_data = []
|
||||
for f_str, f_name in zip(upload_button.value, upload_button.filename):
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
base, ext = os.path.splitext(f_name)
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if not det_data: # first file
|
||||
det_data = file_data
|
||||
pyzebra.merge_duplicates(det_data)
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
pyzebra.merge_duplicates(new_data)
|
||||
js_data.data.update(fname=[base])
|
||||
else:
|
||||
pyzebra.merge_datasets(det_data, file_data)
|
||||
pyzebra.merge_datasets(new_data, file_data)
|
||||
|
||||
_init_datatable()
|
||||
append_upload_button.disabled = False
|
||||
if new_data:
|
||||
dataset = new_data
|
||||
_init_datatable()
|
||||
append_upload_button.disabled = False
|
||||
|
||||
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
|
||||
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
|
||||
upload_button.on_change("value", upload_button_callback)
|
||||
# for on_change("value", ...) or on_change("filename", ...),
|
||||
# see https://github.com/bokeh/bokeh/issues/11461
|
||||
upload_button.on_change("filename", upload_button_callback)
|
||||
|
||||
def append_upload_button_callback(_attr, _old, new):
|
||||
for f_str, f_name in zip(new, append_upload_button.filename):
|
||||
def append_upload_button_callback(_attr, _old, _new):
|
||||
file_data = []
|
||||
for f_str, f_name in zip(append_upload_button.value, append_upload_button.filename):
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
_, ext = os.path.splitext(f_name)
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext)
|
||||
except:
|
||||
print(f"Error loading {f_name}")
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(det_data, file_data)
|
||||
pyzebra.merge_datasets(dataset, file_data)
|
||||
|
||||
_init_datatable()
|
||||
if file_data:
|
||||
_init_datatable()
|
||||
|
||||
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
|
||||
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200, disabled=True)
|
||||
append_upload_button.on_change("value", append_upload_button_callback)
|
||||
# for on_change("value", ...) or on_change("filename", ...),
|
||||
# see https://github.com/bokeh/bokeh/issues/11461
|
||||
append_upload_button.on_change("filename", append_upload_button_callback)
|
||||
|
||||
def monitor_spinner_callback(_attr, _old, new):
|
||||
if det_data:
|
||||
pyzebra.normalize_dataset(det_data, new)
|
||||
_update_plot()
|
||||
if dataset:
|
||||
pyzebra.normalize_dataset(dataset, new)
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
||||
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
|
||||
monitor_spinner.on_change("value", monitor_spinner_callback)
|
||||
|
||||
def scan_motor_select_callback(_attr, _old, new):
|
||||
if det_data:
|
||||
for scan in det_data:
|
||||
if dataset:
|
||||
for scan in dataset:
|
||||
scan["scan_motor"] = new
|
||||
_update_plot()
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
||||
scan_motor_select = Select(title="Scan motor:", options=[], width=145)
|
||||
scan_motor_select.on_change("value", scan_motor_select_callback)
|
||||
|
||||
def _update_table():
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
|
||||
scan_table_source.data.update(fit=fit_ok)
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset]
|
||||
export = [scan["export"] for scan in dataset]
|
||||
if param_select.value == "user defined":
|
||||
param = [None] * len(dataset)
|
||||
else:
|
||||
param = [scan[param_select.value] for scan in dataset]
|
||||
|
||||
def _update_plot():
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
scan_table_source.data.update(fit=fit_ok, export=export, param=param)
|
||||
|
||||
def _update_single_scan_plot():
|
||||
scan = _get_selected_scan()
|
||||
@ -281,7 +322,7 @@ def create():
|
||||
par = []
|
||||
for s, p in enumerate(scan_table_source.data["param"]):
|
||||
if p is not None:
|
||||
scan = det_data[s]
|
||||
scan = dataset[s]
|
||||
scan_motor = scan["scan_motor"]
|
||||
xs.append(scan[scan_motor])
|
||||
x.extend(scan[scan_motor])
|
||||
@ -290,30 +331,37 @@ def create():
|
||||
param.append(float(p))
|
||||
par.extend(scan["counts"])
|
||||
|
||||
if det_data:
|
||||
scan_motor = det_data[0]["scan_motor"]
|
||||
if dataset:
|
||||
scan_motor = dataset[0]["scan_motor"]
|
||||
ov_plot.axis[0].axis_label = scan_motor
|
||||
ov_param_plot.axis[0].axis_label = scan_motor
|
||||
|
||||
ov_plot_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
|
||||
|
||||
if y:
|
||||
mapper["transform"].low = np.min([np.min(y) for y in ys])
|
||||
mapper["transform"].high = np.max([np.max(y) for y in ys])
|
||||
ov_param_plot_scatter_source.data.update(x=x, y=y, param=par)
|
||||
ov_param_plot_scatter_source.data.update(x=x, y=y)
|
||||
|
||||
if y:
|
||||
interp_f = interpolate.interp2d(x, y, par)
|
||||
x1, x2 = min(x), max(x)
|
||||
y1, y2 = min(y), max(y)
|
||||
image = interp_f(
|
||||
np.linspace(x1, x2, ov_param_plot.inner_width // 10),
|
||||
np.linspace(y1, y2, ov_param_plot.inner_height // 10),
|
||||
assume_sorted=True,
|
||||
grid_x, grid_y = np.meshgrid(
|
||||
np.linspace(x1, x2, ov_param_plot.inner_width),
|
||||
np.linspace(y1, y2, ov_param_plot.inner_height),
|
||||
)
|
||||
image = interpolate.griddata((x, y), par, (grid_x, grid_y))
|
||||
ov_param_plot_image_source.data.update(
|
||||
image=[image], x=[x1], y=[y1], dw=[x2 - x1], dh=[y2 - y1]
|
||||
)
|
||||
|
||||
x_range = ov_param_plot.x_range
|
||||
x_range.start, x_range.end = x1, x2
|
||||
x_range.reset_start, x_range.reset_end = x1, x2
|
||||
x_range.bounds = (x1, x2)
|
||||
|
||||
y_range = ov_param_plot.y_range
|
||||
y_range.start, y_range.end = y1, y2
|
||||
y_range.reset_start, y_range.reset_end = y1, y2
|
||||
y_range.bounds = (y1, y2)
|
||||
|
||||
else:
|
||||
ov_param_plot_image_source.data.update(image=[], x=[], y=[], dw=[], dh=[])
|
||||
|
||||
@ -323,11 +371,13 @@ def create():
|
||||
y_lower = []
|
||||
y_upper = []
|
||||
fit_param = fit_param_select.value
|
||||
for s, p in zip(det_data, scan_table_source.data["param"]):
|
||||
for s, p in zip(dataset, scan_table_source.data["param"]):
|
||||
if "fit" in s and fit_param:
|
||||
x.append(p)
|
||||
param_fit_val = s["fit"].params[fit_param].value
|
||||
param_fit_std = s["fit"].params[fit_param].stderr
|
||||
if param_fit_std is None:
|
||||
param_fit_std = 0
|
||||
y.append(param_fit_val)
|
||||
y_lower.append(param_fit_val - param_fit_std)
|
||||
y_upper.append(param_fit_val + param_fit_std)
|
||||
@ -350,7 +400,7 @@ def create():
|
||||
|
||||
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot_scatter = plot.add_glyph(
|
||||
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
|
||||
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
|
||||
)
|
||||
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
|
||||
|
||||
@ -408,9 +458,7 @@ def create():
|
||||
ov_plot.toolbar.logo = None
|
||||
|
||||
# Overview perams plot
|
||||
ov_param_plot = Plot(
|
||||
x_range=DataRange1d(), y_range=DataRange1d(), plot_height=450, plot_width=700
|
||||
)
|
||||
ov_param_plot = Plot(x_range=Range1d(), y_range=Range1d(), plot_height=450, plot_width=700)
|
||||
|
||||
ov_param_plot.add_layout(LinearAxis(axis_label="Param"), place="left")
|
||||
ov_param_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
|
||||
@ -418,16 +466,16 @@ def create():
|
||||
ov_param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
|
||||
ov_param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
|
||||
|
||||
color_mapper = LinearColorMapper(palette=Plasma256)
|
||||
ov_param_plot_image_source = ColumnDataSource(dict(image=[], x=[], y=[], dw=[], dh=[]))
|
||||
ov_param_plot.add_glyph(
|
||||
ov_param_plot_image_source, Image(image="image", x="x", y="y", dw="dw", dh="dh")
|
||||
ov_param_plot_image_source,
|
||||
Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper),
|
||||
)
|
||||
|
||||
ov_param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[], param=[]))
|
||||
mapper = linear_cmap(field_name="param", palette=Turbo256, low=0, high=50)
|
||||
ov_param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[]))
|
||||
ov_param_plot.add_glyph(
|
||||
ov_param_plot_scatter_source,
|
||||
Scatter(x="x", y="y", line_color=mapper, fill_color=mapper, size=10),
|
||||
ov_param_plot_scatter_source, Scatter(x="x", y="y", marker="dot", size=15),
|
||||
)
|
||||
|
||||
ov_param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
|
||||
@ -483,9 +531,15 @@ def create():
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
_update_plot()
|
||||
_update_single_scan_plot()
|
||||
|
||||
def scan_table_source_callback(_attr, _old, _new):
|
||||
def scan_table_source_callback(_attr, _old, new):
|
||||
# unfortunately, we don't know if the change comes from data update or user input
|
||||
# also `old` and `new` are the same for non-scalars
|
||||
for scan, export in zip(dataset, new["export"]):
|
||||
scan["export"] = export
|
||||
_update_overview()
|
||||
_update_param_plot()
|
||||
_update_preview()
|
||||
|
||||
scan_table_source = ColumnDataSource(dict(file=[], scan=[], param=[], fit=[], export=[]))
|
||||
@ -502,21 +556,43 @@ def create():
|
||||
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
|
||||
],
|
||||
width=410, # +60 because of the index column
|
||||
height=350,
|
||||
editable=True,
|
||||
autosize_mode="none",
|
||||
)
|
||||
|
||||
merge_from_select = Select(title="scan:", width=145)
|
||||
|
||||
def merge_button_callback():
|
||||
scan_into = _get_selected_scan()
|
||||
scan_from = dataset[int(merge_from_select.value)]
|
||||
|
||||
if scan_into is scan_from:
|
||||
print("WARNING: Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_scans(scan_into, scan_from)
|
||||
_update_table()
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
||||
merge_button = Button(label="Merge into current", width=145)
|
||||
merge_button.on_click(merge_button_callback)
|
||||
|
||||
def restore_button_callback():
|
||||
pyzebra.restore_scan(_get_selected_scan())
|
||||
_update_table()
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
||||
restore_button = Button(label="Restore scan", width=145)
|
||||
restore_button.on_click(restore_button_callback)
|
||||
|
||||
def _get_selected_scan():
|
||||
return det_data[scan_table_source.selected.indices[0]]
|
||||
return dataset[scan_table_source.selected.indices[0]]
|
||||
|
||||
def param_select_callback(_attr, _old, new):
|
||||
if new == "user defined":
|
||||
param = [None] * len(det_data)
|
||||
else:
|
||||
param = [scan[new] for scan in det_data]
|
||||
|
||||
scan_table_source.data["param"] = param
|
||||
_update_param_plot()
|
||||
def param_select_callback(_attr, _old, _new):
|
||||
_update_table()
|
||||
|
||||
param_select = Select(
|
||||
title="Parameter:",
|
||||
@ -646,8 +722,8 @@ def create():
|
||||
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
|
||||
|
||||
def proc_all_button_callback():
|
||||
for scan, export in zip(det_data, scan_table_source.data["export"]):
|
||||
if export:
|
||||
for scan in dataset:
|
||||
if scan["export"]:
|
||||
pyzebra.fit_scan(
|
||||
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
|
||||
)
|
||||
@ -657,16 +733,16 @@ def create():
|
||||
lorentz=lorentz_checkbox.active,
|
||||
)
|
||||
|
||||
_update_plot()
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
_update_table()
|
||||
|
||||
for scan in det_data:
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
options = list(scan["fit"].params.keys())
|
||||
fit_param_select.options = options
|
||||
fit_param_select.value = options[0]
|
||||
break
|
||||
_update_param_plot()
|
||||
|
||||
proc_all_button = Button(label="Process All", button_type="primary", width=145)
|
||||
proc_all_button.on_click(proc_all_button_callback)
|
||||
@ -682,16 +758,16 @@ def create():
|
||||
lorentz=lorentz_checkbox.active,
|
||||
)
|
||||
|
||||
_update_plot()
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
_update_table()
|
||||
|
||||
for scan in det_data:
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
options = list(scan["fit"].params.keys())
|
||||
fit_param_select.options = options
|
||||
fit_param_select.value = options[0]
|
||||
break
|
||||
_update_param_plot()
|
||||
|
||||
proc_button = Button(label="Process Current", width=145)
|
||||
proc_button.on_click(proc_button_callback)
|
||||
@ -708,12 +784,10 @@ def create():
|
||||
temp_file = temp_dir + "/temp"
|
||||
export_data = []
|
||||
param_data = []
|
||||
for s, p, export in zip(
|
||||
det_data, scan_table_source.data["param"], scan_table_source.data["export"]
|
||||
):
|
||||
if export:
|
||||
export_data.append(s)
|
||||
param_data.append(p)
|
||||
for scan, param in zip(dataset, scan_table_source.data["param"]):
|
||||
if scan["export"] and param:
|
||||
export_data.append(scan)
|
||||
param_data.append(param)
|
||||
|
||||
pyzebra.export_param_study(export_data, param_data, temp_file)
|
||||
|
||||
@ -743,7 +817,11 @@ def create():
|
||||
column(fit_to_spinner, proc_button, proc_all_button),
|
||||
)
|
||||
|
||||
scan_layout = column(scan_table, row(monitor_spinner, scan_motor_select, param_select))
|
||||
scan_layout = column(
|
||||
scan_table,
|
||||
row(monitor_spinner, scan_motor_select, param_select),
|
||||
row(column(Spacer(height=19), row(restore_button, merge_button)), merge_from_select),
|
||||
)
|
||||
|
||||
import_layout = column(
|
||||
file_select,
|
||||
|
@ -143,8 +143,7 @@ def create():
|
||||
# last digits are spind UB matrix
|
||||
vals = list(map(float, c_rest))
|
||||
ub_matrix_spind = np.transpose(np.array(vals).reshape(3, 3))
|
||||
ub_matrix = np.linalg.inv(ub_matrix_spind)
|
||||
ub_matrices.append(ub_matrix)
|
||||
ub_matrices.append(ub_matrix_spind)
|
||||
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
|
||||
|
||||
print(f"Content of {spind_out_file}:")
|
||||
@ -168,11 +167,11 @@ def create():
|
||||
def results_table_select_callback(_attr, old, new):
|
||||
if new:
|
||||
ind = new[0]
|
||||
ub_matrix = ub_matrices[ind]
|
||||
ub_matrix_spind = ub_matrices[ind]
|
||||
res = ""
|
||||
for vec in diff_vec:
|
||||
res += f"{ub_matrix @ vec}\n"
|
||||
ub_matrix_textareainput.value = str(ub_matrix * 1e10)
|
||||
res += f"{np.linalg.inv(ub_matrix_spind) @ vec}\n"
|
||||
ub_matrix_textareainput.value = str(ub_matrix_spind * 1e-10)
|
||||
hkl_textareainput.value = res
|
||||
else:
|
||||
ub_matrix_textareainput.value = ""
|
||||
|
@ -1,5 +1,6 @@
|
||||
import os
|
||||
import re
|
||||
from ast import literal_eval
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
@ -56,7 +57,7 @@ META_VARS_FLOAT = (
|
||||
"s2hl",
|
||||
)
|
||||
|
||||
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j")
|
||||
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j", "UB")
|
||||
|
||||
CCL_FIRST_LINE = (("idx", int), ("h", float), ("k", float), ("l", float))
|
||||
|
||||
@ -93,9 +94,9 @@ def load_1D(filepath):
|
||||
"""
|
||||
with open(filepath, "r") as infile:
|
||||
_, ext = os.path.splitext(filepath)
|
||||
det_variables = parse_1D(infile, data_type=ext)
|
||||
dataset = parse_1D(infile, data_type=ext)
|
||||
|
||||
return det_variables
|
||||
return dataset
|
||||
|
||||
|
||||
def parse_1D(fileobj, data_type):
|
||||
@ -108,21 +109,29 @@ def parse_1D(fileobj, data_type):
|
||||
variable = variable.strip()
|
||||
value = value.strip()
|
||||
|
||||
if variable in META_VARS_STR:
|
||||
metadata[variable] = value
|
||||
try:
|
||||
if variable in META_VARS_STR:
|
||||
metadata[variable] = value
|
||||
|
||||
elif variable in META_VARS_FLOAT:
|
||||
if variable == "2-theta": # fix that angle name not to be an expression
|
||||
variable = "twotheta"
|
||||
if variable in ("a", "b", "c", "alpha", "beta", "gamma"):
|
||||
variable += "_cell"
|
||||
metadata[variable] = float(value)
|
||||
elif variable in META_VARS_FLOAT:
|
||||
if variable == "2-theta": # fix that angle name not to be an expression
|
||||
variable = "twotheta"
|
||||
if variable in ("a", "b", "c", "alpha", "beta", "gamma"):
|
||||
variable += "_cell"
|
||||
metadata[variable] = float(value)
|
||||
|
||||
elif variable in META_UB_MATRIX:
|
||||
if "ub" not in metadata:
|
||||
metadata["ub"] = np.zeros((3, 3))
|
||||
row = int(variable[-2]) - 1
|
||||
metadata["ub"][row, :] = list(map(float, value.split()))
|
||||
elif variable in META_UB_MATRIX:
|
||||
if variable == "UB":
|
||||
metadata["ub"] = np.array(literal_eval(value)).reshape(3, 3)
|
||||
else:
|
||||
if "ub" not in metadata:
|
||||
metadata["ub"] = np.zeros((3, 3))
|
||||
row = int(variable[-2]) - 1
|
||||
metadata["ub"][row, :] = list(map(float, value.split()))
|
||||
|
||||
except Exception:
|
||||
print(f"Error reading {variable} with value '{value}'")
|
||||
metadata[variable] = 0
|
||||
|
||||
if "#data" in line:
|
||||
# this is the end of metadata and the start of data section
|
||||
@ -133,7 +142,7 @@ def parse_1D(fileobj, data_type):
|
||||
metadata["zebra_mode"] = "nb"
|
||||
|
||||
# read data
|
||||
scan = []
|
||||
dataset = []
|
||||
if data_type == ".ccl":
|
||||
ccl_first_line = CCL_FIRST_LINE + CCL_ANGLES[metadata["zebra_mode"]]
|
||||
ccl_second_line = CCL_SECOND_LINE
|
||||
@ -143,54 +152,66 @@ def parse_1D(fileobj, data_type):
|
||||
if not line or line.isspace():
|
||||
continue
|
||||
|
||||
s = {}
|
||||
scan = {}
|
||||
scan["export"] = True
|
||||
|
||||
# first line
|
||||
for param, (param_name, param_type) in zip(line.split(), ccl_first_line):
|
||||
s[param_name] = param_type(param)
|
||||
scan[param_name] = param_type(param)
|
||||
|
||||
# second line
|
||||
next_line = next(fileobj)
|
||||
for param, (param_name, param_type) in zip(next_line.split(), ccl_second_line):
|
||||
s[param_name] = param_type(param)
|
||||
scan[param_name] = param_type(param)
|
||||
|
||||
if s["scan_motor"] != "om":
|
||||
if "scan_motor" not in scan:
|
||||
scan["scan_motor"] = "om"
|
||||
|
||||
if scan["scan_motor"] == "o2t":
|
||||
scan["scan_motor"] = "om"
|
||||
|
||||
if scan["scan_motor"] != "om":
|
||||
raise Exception("Unsupported variable name in ccl file.")
|
||||
|
||||
# "om" -> "omega"
|
||||
s["scan_motor"] = "omega"
|
||||
s["scan_motors"] = ["omega", ]
|
||||
scan["scan_motor"] = "omega"
|
||||
scan["scan_motors"] = ["omega", ]
|
||||
# overwrite metadata, because it only refers to the scan center
|
||||
half_dist = (s["n_points"] - 1) / 2 * s["angle_step"]
|
||||
s["omega"] = np.linspace(s["omega"] - half_dist, s["omega"] + half_dist, s["n_points"])
|
||||
half_dist = (scan["n_points"] - 1) / 2 * scan["angle_step"]
|
||||
scan["omega"] = np.linspace(
|
||||
scan["omega"] - half_dist, scan["omega"] + half_dist, scan["n_points"]
|
||||
)
|
||||
|
||||
# subsequent lines with counts
|
||||
counts = []
|
||||
while len(counts) < s["n_points"]:
|
||||
while len(counts) < scan["n_points"]:
|
||||
counts.extend(map(float, next(fileobj).split()))
|
||||
s["counts"] = np.array(counts)
|
||||
s["counts_err"] = np.sqrt(s["counts"])
|
||||
scan["counts"] = np.array(counts)
|
||||
scan["counts_err"] = np.sqrt(np.maximum(scan["counts"], 1))
|
||||
|
||||
if s["h"].is_integer() and s["k"].is_integer() and s["l"].is_integer():
|
||||
s["h"], s["k"], s["l"] = map(int, (s["h"], s["k"], s["l"]))
|
||||
if scan["h"].is_integer() and scan["k"].is_integer() and scan["l"].is_integer():
|
||||
scan["h"], scan["k"], scan["l"] = map(int, (scan["h"], scan["k"], scan["l"]))
|
||||
|
||||
scan.append({**metadata, **s})
|
||||
dataset.append({**metadata, **scan})
|
||||
|
||||
elif data_type == ".dat":
|
||||
# TODO: this might need to be adapted in the future, when "gamma" will be added to dat files
|
||||
if metadata["zebra_mode"] == "nb":
|
||||
metadata["gamma"] = metadata["twotheta"]
|
||||
|
||||
s = defaultdict(list)
|
||||
scan = defaultdict(list)
|
||||
scan["export"] = True
|
||||
|
||||
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
|
||||
motors = [motor.lower() for motor in match.group(1).split(", ")]
|
||||
steps = [float(step) for step in match.group(2).split()]
|
||||
# Steps can be separated by " " or ", "
|
||||
steps = [float(step.strip(",")) for step in match.group(2).split()]
|
||||
|
||||
match = re.search("(.*) Points, Mode: (.*), Preset (.*)", next(fileobj))
|
||||
if match.group(2) != "Monitor":
|
||||
raise Exception("Unknown mode in dat file.")
|
||||
s["monitor"] = float(match.group(3))
|
||||
scan["n_points"] = int(match.group(1))
|
||||
scan["monitor"] = float(match.group(3))
|
||||
|
||||
col_names = list(map(str.lower, next(fileobj).split()))
|
||||
|
||||
@ -200,56 +221,56 @@ def parse_1D(fileobj, data_type):
|
||||
break
|
||||
|
||||
for name, val in zip(col_names, line.split()):
|
||||
s[name].append(float(val))
|
||||
scan[name].append(float(val))
|
||||
|
||||
for name in col_names:
|
||||
s[name] = np.array(s[name])
|
||||
scan[name] = np.array(scan[name])
|
||||
|
||||
s["counts_err"] = np.sqrt(s["counts"])
|
||||
scan["counts_err"] = np.sqrt(np.maximum(scan["counts"], 1))
|
||||
|
||||
s["scan_motors"] = []
|
||||
scan["scan_motors"] = []
|
||||
for motor, step in zip(motors, steps):
|
||||
if step == 0:
|
||||
# it's not a scan motor, so keep only the median value
|
||||
s[motor] = np.median(s[motor])
|
||||
scan[motor] = np.median(scan[motor])
|
||||
else:
|
||||
s["scan_motors"].append(motor)
|
||||
scan["scan_motors"].append(motor)
|
||||
|
||||
# "om" -> "omega"
|
||||
if "om" in s["scan_motors"]:
|
||||
s["scan_motors"][s["scan_motors"].index("om")] = "omega"
|
||||
s["omega"] = s["om"]
|
||||
del s["om"]
|
||||
if "om" in scan["scan_motors"]:
|
||||
scan["scan_motors"][scan["scan_motors"].index("om")] = "omega"
|
||||
scan["omega"] = scan["om"]
|
||||
del scan["om"]
|
||||
|
||||
# "tt" -> "temp"
|
||||
if "tt" in s["scan_motors"]:
|
||||
s["scan_motors"][s["scan_motors"].index("tt")] = "temp"
|
||||
s["temp"] = s["tt"]
|
||||
del s["tt"]
|
||||
if "tt" in scan["scan_motors"]:
|
||||
scan["scan_motors"][scan["scan_motors"].index("tt")] = "temp"
|
||||
scan["temp"] = scan["tt"]
|
||||
del scan["tt"]
|
||||
|
||||
# "mf" stays "mf"
|
||||
# "phi" stays "phi"
|
||||
|
||||
s["scan_motor"] = s["scan_motors"][0]
|
||||
scan["scan_motor"] = scan["scan_motors"][0]
|
||||
|
||||
if "h" not in s:
|
||||
s["h"] = s["k"] = s["l"] = float("nan")
|
||||
if "h" not in scan:
|
||||
scan["h"] = scan["k"] = scan["l"] = float("nan")
|
||||
|
||||
for param in ("mf", "temp"):
|
||||
if param not in metadata:
|
||||
s[param] = 0
|
||||
scan[param] = 0
|
||||
|
||||
s["idx"] = 1
|
||||
scan["idx"] = 1
|
||||
|
||||
scan.append({**metadata, **s})
|
||||
dataset.append({**metadata, **scan})
|
||||
|
||||
else:
|
||||
print("Unknown file extention")
|
||||
|
||||
return scan
|
||||
return dataset
|
||||
|
||||
|
||||
def export_1D(data, path, export_target, hkl_precision=2):
|
||||
def export_1D(dataset, path, export_target, hkl_precision=2):
|
||||
"""Exports data in the .comm/.incomm format for fullprof or .col/.incol format for jana.
|
||||
|
||||
Scans with integer/real hkl values are saved in .comm/.incomm or .col/.incol files
|
||||
@ -259,11 +280,11 @@ def export_1D(data, path, export_target, hkl_precision=2):
|
||||
if export_target not in EXPORT_TARGETS:
|
||||
raise ValueError(f"Unknown export target: {export_target}.")
|
||||
|
||||
zebra_mode = data[0]["zebra_mode"]
|
||||
zebra_mode = dataset[0]["zebra_mode"]
|
||||
exts = EXPORT_TARGETS[export_target]
|
||||
file_content = {ext: [] for ext in exts}
|
||||
|
||||
for scan in data:
|
||||
for scan in dataset:
|
||||
if "fit" not in scan:
|
||||
continue
|
||||
|
||||
@ -303,9 +324,66 @@ def export_1D(data, path, export_target, hkl_precision=2):
|
||||
out_file.writelines(content)
|
||||
|
||||
|
||||
def export_param_study(data, param_data, path):
|
||||
def export_ccl_compare(dataset1, dataset2, path, export_target, hkl_precision=2):
|
||||
"""Exports compare data in the .comm/.incomm format for fullprof or .col/.incol format for jana.
|
||||
|
||||
Scans with integer/real hkl values are saved in .comm/.incomm or .col/.incol files
|
||||
correspondingly. If no scans are present for a particular output format, that file won't be
|
||||
created.
|
||||
"""
|
||||
if export_target not in EXPORT_TARGETS:
|
||||
raise ValueError(f"Unknown export target: {export_target}.")
|
||||
|
||||
zebra_mode = dataset1[0]["zebra_mode"]
|
||||
exts = EXPORT_TARGETS[export_target]
|
||||
file_content = {ext: [] for ext in exts}
|
||||
|
||||
for scan1, scan2 in zip(dataset1, dataset2):
|
||||
if "fit" not in scan1:
|
||||
continue
|
||||
|
||||
idx_str = f"{scan1['idx']:6}"
|
||||
|
||||
h, k, l = scan1["h"], scan1["k"], scan1["l"]
|
||||
hkl_are_integers = isinstance(h, int) # if True, other indices are of type 'int' too
|
||||
if hkl_are_integers:
|
||||
hkl_str = f"{h:4}{k:4}{l:4}"
|
||||
else:
|
||||
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
|
||||
|
||||
area_n1, area_s1 = scan1["area"]
|
||||
area_n2, area_s2 = scan2["area"]
|
||||
area_n = area_n1 - area_n2
|
||||
area_s = np.sqrt(area_s1 ** 2 + area_s2 ** 2)
|
||||
area_str = f"{area_n:10.2f}{area_s:10.2f}"
|
||||
|
||||
ang_str = ""
|
||||
for angle, _ in CCL_ANGLES[zebra_mode]:
|
||||
if angle == scan1["scan_motor"]:
|
||||
angle_center = (np.min(scan1[angle]) + np.max(scan1[angle])) / 2
|
||||
else:
|
||||
angle_center = scan1[angle]
|
||||
|
||||
if angle == "twotheta" and export_target == "jana":
|
||||
angle_center /= 2
|
||||
|
||||
ang_str = ang_str + f"{angle_center:8g}"
|
||||
|
||||
if export_target == "jana":
|
||||
ang_str = ang_str + f"{scan1['temp']:8}" + f"{scan1['monitor']:8}"
|
||||
|
||||
ref = file_content[exts[0]] if hkl_are_integers else file_content[exts[1]]
|
||||
ref.append(idx_str + hkl_str + area_str + ang_str + "\n")
|
||||
|
||||
for ext, content in file_content.items():
|
||||
if content:
|
||||
with open(path + ext, "w") as out_file:
|
||||
out_file.writelines(content)
|
||||
|
||||
|
||||
def export_param_study(dataset, param_data, path):
|
||||
file_content = []
|
||||
for scan, param in zip(data, param_data):
|
||||
for scan, param in zip(dataset, param_data):
|
||||
if "fit" not in scan:
|
||||
continue
|
||||
|
||||
@ -320,7 +398,11 @@ def export_param_study(data, param_data, path):
|
||||
|
||||
fit_str = ""
|
||||
for fit_param in scan["fit"].params.values():
|
||||
fit_str = fit_str + f"{fit_param.value:<20.2f}" + f"{fit_param.stderr:<20.2f}"
|
||||
fit_param_val = fit_param.value
|
||||
fit_param_std = fit_param.stderr
|
||||
if fit_param_std is None:
|
||||
fit_param_std = 0
|
||||
fit_str = fit_str + f"{fit_param_val:<20.2f}" + f"{fit_param_std:<20.2f}"
|
||||
|
||||
_, fname_str = os.path.split(scan["original_filename"])
|
||||
|
||||
|
@ -22,6 +22,8 @@ MAX_RANGE_GAP = {
|
||||
"omega": 0.5,
|
||||
}
|
||||
|
||||
MOTOR_POS_PRECISION = 0.01
|
||||
|
||||
AREA_METHODS = ("fit_area", "int_area")
|
||||
|
||||
|
||||
@ -47,32 +49,49 @@ def _parameters_match(scan1, scan2):
|
||||
if zebra_mode != scan2["zebra_mode"]:
|
||||
return False
|
||||
|
||||
for param in ("ub", "temp", "mf", *(vars[0] for vars in CCL_ANGLES[zebra_mode])):
|
||||
for param in ("ub", *(vars[0] for vars in CCL_ANGLES[zebra_mode])):
|
||||
if param.startswith("skip"):
|
||||
# ignore skip parameters, like the last angle in 'nb' zebra mode
|
||||
continue
|
||||
|
||||
if param == scan1["scan_motor"] == scan2["scan_motor"]:
|
||||
# check if ranges of variable parameter overlap
|
||||
range1 = scan1[param]
|
||||
range2 = scan2[param]
|
||||
r1_start, r1_end = scan1[param][0], scan1[param][-1]
|
||||
r2_start, r2_end = scan2[param][0], scan2[param][-1]
|
||||
# support reversed ranges
|
||||
if r1_start > r1_end:
|
||||
r1_start, r1_end = r1_end, r1_start
|
||||
if r2_start > r2_end:
|
||||
r2_start, r2_end = r2_end, r2_start
|
||||
# maximum gap between ranges of the scanning parameter (default 0)
|
||||
max_range_gap = MAX_RANGE_GAP.get(param, 0)
|
||||
if max(range1[0] - range2[-1], range2[0] - range1[-1]) > max_range_gap:
|
||||
if max(r1_start - r2_end, r2_start - r1_end) > max_range_gap:
|
||||
return False
|
||||
|
||||
elif np.max(np.abs(scan1[param] - scan2[param])) > PARAM_PRECISIONS[param]:
|
||||
elif (
|
||||
np.max(np.abs(np.median(scan1[param]) - np.median(scan2[param])))
|
||||
> PARAM_PRECISIONS[param]
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def merge_datasets(dataset_into, dataset_from):
|
||||
scan_motors_into = dataset_into[0]["scan_motors"]
|
||||
scan_motors_from = dataset_from[0]["scan_motors"]
|
||||
if scan_motors_into != scan_motors_from:
|
||||
print(f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}")
|
||||
return
|
||||
|
||||
merged = np.zeros(len(dataset_from), dtype=np.bool)
|
||||
for scan_into in dataset_into:
|
||||
for ind, scan_from in enumerate(dataset_from):
|
||||
if _parameters_match(scan_into, scan_from) and not merged[ind]:
|
||||
merge_scans(scan_into, scan_from)
|
||||
if scan_into["counts"].ndim == 3:
|
||||
merge_h5_scans(scan_into, scan_from)
|
||||
else: # scan_into["counts"].ndim == 1
|
||||
merge_scans(scan_into, scan_from)
|
||||
merged[ind] = True
|
||||
|
||||
for scan_from in dataset_from:
|
||||
@ -80,7 +99,6 @@ def merge_datasets(dataset_into, dataset_from):
|
||||
|
||||
|
||||
def merge_scans(scan_into, scan_from):
|
||||
# TODO: does it need to be "scan_motor" instead of omega for a generalized solution?
|
||||
if "init_scan" not in scan_into:
|
||||
scan_into["init_scan"] = scan_into.copy()
|
||||
|
||||
@ -92,32 +110,107 @@ def merge_scans(scan_into, scan_from):
|
||||
|
||||
scan_into["merged_scans"].append(scan_from)
|
||||
|
||||
if (
|
||||
scan_into["omega"].shape == scan_from["omega"].shape
|
||||
and np.max(np.abs(scan_into["omega"] - scan_from["omega"])) < 0.0005
|
||||
):
|
||||
counts_tmp = 0
|
||||
counts_err_tmp = 0
|
||||
scan_motor = scan_into["scan_motor"] # the same as scan_from["scan_motor"]
|
||||
|
||||
for scan in [scan_into["init_scan"], *scan_into["merged_scans"]]:
|
||||
counts_tmp += scan["counts"]
|
||||
counts_err_tmp += scan["counts_err"] ** 2
|
||||
pos_all = np.array([])
|
||||
val_all = np.array([])
|
||||
err_all = np.array([])
|
||||
for scan in [scan_into["init_scan"], *scan_into["merged_scans"]]:
|
||||
pos_all = np.append(pos_all, scan[scan_motor])
|
||||
val_all = np.append(val_all, scan["counts"])
|
||||
err_all = np.append(err_all, scan["counts_err"] ** 2)
|
||||
|
||||
scan_into["counts"] = counts_tmp / (1 + len(scan_into["merged_scans"]))
|
||||
scan_into["counts_err"] = np.sqrt(counts_err_tmp)
|
||||
sort_index = np.argsort(pos_all)
|
||||
pos_all = pos_all[sort_index]
|
||||
val_all = val_all[sort_index]
|
||||
err_all = err_all[sort_index]
|
||||
|
||||
else:
|
||||
omega = np.concatenate((scan_into["omega"], scan_from["omega"]))
|
||||
counts = np.concatenate((scan_into["counts"], scan_from["counts"]))
|
||||
counts_err = np.concatenate((scan_into["counts_err"], scan_from["counts_err"]))
|
||||
pos_tmp = pos_all[:1]
|
||||
val_tmp = val_all[:1]
|
||||
err_tmp = err_all[:1]
|
||||
num_tmp = np.array([1])
|
||||
for pos, val, err in zip(pos_all[1:], val_all[1:], err_all[1:]):
|
||||
if pos - pos_tmp[-1] < MOTOR_POS_PRECISION:
|
||||
# the repeated motor position
|
||||
val_tmp[-1] += val
|
||||
err_tmp[-1] += err
|
||||
num_tmp[-1] += 1
|
||||
else:
|
||||
# a new motor position
|
||||
pos_tmp = np.append(pos_tmp, pos)
|
||||
val_tmp = np.append(val_tmp, val)
|
||||
err_tmp = np.append(err_tmp, err)
|
||||
num_tmp = np.append(num_tmp, 1)
|
||||
|
||||
index = np.argsort(omega)
|
||||
scan_into[scan_motor] = pos_tmp
|
||||
scan_into["counts"] = val_tmp / num_tmp
|
||||
scan_into["counts_err"] = np.sqrt(err_tmp) / num_tmp
|
||||
|
||||
scan_into["omega"] = omega[index]
|
||||
scan_into["counts"] = counts[index]
|
||||
scan_into["counts_err"] = counts_err[index]
|
||||
scan_from["export"] = False
|
||||
|
||||
scan_from["active"] = False
|
||||
fname1 = os.path.basename(scan_into["original_filename"])
|
||||
fname2 = os.path.basename(scan_from["original_filename"])
|
||||
print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
|
||||
|
||||
|
||||
def merge_h5_scans(scan_into, scan_from):
|
||||
if "init_scan" not in scan_into:
|
||||
scan_into["init_scan"] = scan_into.copy()
|
||||
|
||||
if "merged_scans" not in scan_into:
|
||||
scan_into["merged_scans"] = []
|
||||
|
||||
for scan in scan_into["merged_scans"]:
|
||||
if scan_from is scan:
|
||||
print("Already merged scan")
|
||||
return
|
||||
|
||||
scan_into["merged_scans"].append(scan_from)
|
||||
|
||||
scan_motor = scan_into["scan_motor"] # the same as scan_from["scan_motor"]
|
||||
|
||||
pos_all = [scan_into["init_scan"][scan_motor]]
|
||||
val_all = [scan_into["init_scan"]["counts"]]
|
||||
err_all = [scan_into["init_scan"]["counts_err"] ** 2]
|
||||
for scan in scan_into["merged_scans"]:
|
||||
pos_all.append(scan[scan_motor])
|
||||
val_all.append(scan["counts"])
|
||||
err_all.append(scan["counts_err"] ** 2)
|
||||
pos_all = np.concatenate(pos_all)
|
||||
val_all = np.concatenate(val_all)
|
||||
err_all = np.concatenate(err_all)
|
||||
|
||||
sort_index = np.argsort(pos_all)
|
||||
pos_all = pos_all[sort_index]
|
||||
val_all = val_all[sort_index]
|
||||
err_all = err_all[sort_index]
|
||||
|
||||
pos_tmp = [pos_all[0]]
|
||||
val_tmp = [val_all[:1]]
|
||||
err_tmp = [err_all[:1]]
|
||||
num_tmp = [1]
|
||||
for pos, val, err in zip(pos_all[1:], val_all[1:], err_all[1:]):
|
||||
if pos - pos_tmp[-1] < MOTOR_POS_PRECISION:
|
||||
# the repeated motor position
|
||||
val_tmp[-1] += val
|
||||
err_tmp[-1] += err
|
||||
num_tmp[-1] += 1
|
||||
else:
|
||||
# a new motor position
|
||||
pos_tmp.append(pos)
|
||||
val_tmp.append(val[None, :])
|
||||
err_tmp.append(err[None, :])
|
||||
num_tmp.append(1)
|
||||
pos_tmp = np.array(pos_tmp)
|
||||
val_tmp = np.concatenate(val_tmp)
|
||||
err_tmp = np.concatenate(err_tmp)
|
||||
num_tmp = np.array(num_tmp)
|
||||
|
||||
scan_into[scan_motor] = pos_tmp
|
||||
scan_into["counts"] = val_tmp / num_tmp[:, None, None]
|
||||
scan_into["counts_err"] = np.sqrt(err_tmp) / num_tmp[:, None, None]
|
||||
|
||||
scan_from["export"] = False
|
||||
|
||||
fname1 = os.path.basename(scan_into["original_filename"])
|
||||
fname2 = os.path.basename(scan_from["original_filename"])
|
||||
@ -127,12 +220,16 @@ def merge_scans(scan_into, scan_from):
|
||||
def restore_scan(scan):
|
||||
if "merged_scans" in scan:
|
||||
for merged_scan in scan["merged_scans"]:
|
||||
merged_scan["active"] = True
|
||||
merged_scan["export"] = True
|
||||
|
||||
if "init_scan" in scan:
|
||||
tmp = scan["init_scan"]
|
||||
scan.clear()
|
||||
scan.update(tmp)
|
||||
# force scan export to True, otherwise in the sequence of incorrectly merged scans
|
||||
# a <- b <- c the scan b will be restored with scan["export"] = False if restoring executed
|
||||
# in the same order, i.e. restore a -> restore b
|
||||
scan["export"] = True
|
||||
|
||||
|
||||
def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
|
||||
@ -200,8 +297,7 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
|
||||
else:
|
||||
model += _model
|
||||
|
||||
weights = [1 / y_err if y_err != 0 else 1 for y_err in y_err]
|
||||
scan["fit"] = model.fit(y_fit, x=x_fit, weights=weights)
|
||||
scan["fit"] = model.fit(y_fit, x=x_fit, weights=1 / y_err)
|
||||
|
||||
|
||||
def get_area(scan, area_method, lorentz):
|
||||
@ -216,12 +312,8 @@ def get_area(scan, area_method, lorentz):
|
||||
area_s = 0
|
||||
for name, param in scan["fit"].params.items():
|
||||
if "amplitude" in name:
|
||||
if param.stderr is None:
|
||||
area_v = np.nan
|
||||
area_s = np.nan
|
||||
else:
|
||||
area_v += param.value
|
||||
area_s += param.stderr
|
||||
area_v += np.nan if param.value is None else param.value
|
||||
area_s += np.nan if param.stderr is None else param.stderr
|
||||
|
||||
else: # area_method == "int_area"
|
||||
y_val = scan["counts"]
|
||||
|
141
pyzebra/h5.py
141
pyzebra/h5.py
@ -1,10 +1,10 @@
|
||||
import h5py
|
||||
import numpy as np
|
||||
from lmfit.models import Gaussian2dModel, GaussianModel
|
||||
|
||||
|
||||
META_MATRIX = ("UB")
|
||||
META_CELL = ("cell")
|
||||
META_STR = ("name")
|
||||
META_MATRIX = ("UB", )
|
||||
META_CELL = ("cell", )
|
||||
META_STR = ("name", )
|
||||
|
||||
def read_h5meta(filepath):
|
||||
"""Open and parse content of a h5meta file.
|
||||
@ -68,71 +68,130 @@ def read_detector_data(filepath, cami_meta=None):
|
||||
ndarray: A 3D array of data, omega, gamma, nu.
|
||||
"""
|
||||
with h5py.File(filepath, "r") as h5f:
|
||||
data = h5f["/entry1/area_detector2/data"][:]
|
||||
counts = h5f["/entry1/area_detector2/data"][:].astype(np.float64)
|
||||
|
||||
# reshape data to a correct shape (2006 issue)
|
||||
n, cols, rows = data.shape
|
||||
data = data.reshape(n, rows, cols)
|
||||
n, cols, rows = counts.shape
|
||||
if "/entry1/experiment_identifier" in h5f: # old format
|
||||
# reshape images (counts) to a correct shape (2006 issue)
|
||||
counts = counts.reshape(n, rows, cols)
|
||||
else:
|
||||
counts = counts.swapaxes(1, 2)
|
||||
|
||||
det_data = {"data": data}
|
||||
det_data["original_filename"] = filepath
|
||||
scan = {"counts": counts, "counts_err": np.sqrt(np.maximum(counts, 1))}
|
||||
scan["original_filename"] = filepath
|
||||
scan["export"] = True
|
||||
|
||||
if "/entry1/zebra_mode" in h5f:
|
||||
det_data["zebra_mode"] = h5f["/entry1/zebra_mode"][0].decode()
|
||||
scan["zebra_mode"] = h5f["/entry1/zebra_mode"][0].decode()
|
||||
else:
|
||||
det_data["zebra_mode"] = "nb"
|
||||
scan["zebra_mode"] = "nb"
|
||||
|
||||
# overwrite zebra_mode from cami
|
||||
if cami_meta is not None:
|
||||
if "zebra_mode" in cami_meta:
|
||||
det_data["zebra_mode"] = cami_meta["zebra_mode"][0]
|
||||
scan["zebra_mode"] = cami_meta["zebra_mode"][0]
|
||||
|
||||
# om, sometimes ph
|
||||
if det_data["zebra_mode"] == "nb":
|
||||
det_data["omega"] = h5f["/entry1/area_detector2/rotation_angle"][:]
|
||||
else: # bi
|
||||
det_data["omega"] = h5f["/entry1/sample/rotation_angle"][:]
|
||||
if "/entry1/control/Monitor" in h5f:
|
||||
scan["monitor"] = h5f["/entry1/control/Monitor"][0]
|
||||
else: # old path
|
||||
scan["monitor"] = h5f["/entry1/control/data"][0]
|
||||
|
||||
det_data["gamma"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
|
||||
det_data["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
|
||||
det_data["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:]
|
||||
det_data["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:]
|
||||
det_data["chi"] = h5f["/entry1/sample/chi"][:] # ch
|
||||
det_data["phi"] = h5f["/entry1/sample/phi"][:] # ph
|
||||
det_data["ub"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
|
||||
det_data["name"] = h5f["/entry1/sample/name"][0].decode()
|
||||
det_data["cell"] = h5f["/entry1/sample/cell"][:]
|
||||
scan["idx"] = 1
|
||||
|
||||
for var in ("omega", "gamma", "nu", "chi", "phi"):
|
||||
if abs(det_data[var][0] - det_data[var][-1]) > 0.1:
|
||||
det_data["scan_motor"] = var
|
||||
break
|
||||
if "/entry1/sample/rotation_angle" in h5f:
|
||||
scan["omega"] = h5f["/entry1/sample/rotation_angle"][:]
|
||||
else:
|
||||
raise ValueError("No angles that vary")
|
||||
scan["omega"] = h5f["/entry1/area_detector2/rotation_angle"][:]
|
||||
if len(scan["omega"]) == 1:
|
||||
scan["omega"] = np.ones(n) * scan["omega"]
|
||||
|
||||
scan["gamma"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:]
|
||||
scan["twotheta"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:]
|
||||
if len(scan["gamma"]) == 1:
|
||||
scan["gamma"] = np.ones(n) * scan["gamma"]
|
||||
scan["twotheta"] = np.ones(n) * scan["twotheta"]
|
||||
scan["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:1]
|
||||
scan["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:1]
|
||||
scan["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:1]
|
||||
scan["chi"] = h5f["/entry1/sample/chi"][:]
|
||||
if len(scan["chi"]) == 1:
|
||||
scan["chi"] = np.ones(n) * scan["chi"]
|
||||
scan["phi"] = h5f["/entry1/sample/phi"][:]
|
||||
if len(scan["phi"]) == 1:
|
||||
scan["phi"] = np.ones(n) * scan["phi"]
|
||||
if h5f["/entry1/sample/UB"].size == 0:
|
||||
scan["ub"] = np.eye(3) * 0.177
|
||||
else:
|
||||
scan["ub"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
|
||||
scan["name"] = h5f["/entry1/sample/name"][0].decode()
|
||||
scan["cell"] = h5f["/entry1/sample/cell"][:]
|
||||
|
||||
if n == 1:
|
||||
# a default motor for a single frame file
|
||||
scan["scan_motor"] = "omega"
|
||||
else:
|
||||
for var in ("omega", "gamma", "nu", "chi", "phi"):
|
||||
if abs(scan[var][0] - scan[var][-1]) > 0.1:
|
||||
scan["scan_motor"] = var
|
||||
break
|
||||
else:
|
||||
raise ValueError("No angles that vary")
|
||||
|
||||
scan["scan_motors"] = [scan["scan_motor"], ]
|
||||
|
||||
# optional parameters
|
||||
if "/entry1/sample/magnetic_field" in h5f:
|
||||
det_data["mf"] = h5f["/entry1/sample/magnetic_field"][:]
|
||||
scan["mf"] = h5f["/entry1/sample/magnetic_field"][:]
|
||||
|
||||
if "/entry1/sample/temperature" in h5f:
|
||||
det_data["temp"] = h5f["/entry1/sample/temperature"][:]
|
||||
scan["temp"] = h5f["/entry1/sample/temperature"][:]
|
||||
elif "/entry1/sample/Ts/value" in h5f:
|
||||
scan["temp"] = h5f["/entry1/sample/Ts/value"][:]
|
||||
|
||||
# overwrite metadata from .cami
|
||||
if cami_meta is not None:
|
||||
if "crystal" in cami_meta:
|
||||
cami_meta_crystal = cami_meta["crystal"]
|
||||
if "name" in cami_meta_crystal:
|
||||
det_data["name"] = cami_meta_crystal["name"]
|
||||
scan["name"] = cami_meta_crystal["name"]
|
||||
if "UB" in cami_meta_crystal:
|
||||
det_data["ub"] = cami_meta_crystal["UB"]
|
||||
scan["ub"] = cami_meta_crystal["UB"]
|
||||
if "cell" in cami_meta_crystal:
|
||||
det_data["cell"] = cami_meta_crystal["cell"]
|
||||
scan["cell"] = cami_meta_crystal["cell"]
|
||||
if "lambda" in cami_meta_crystal:
|
||||
det_data["wave"] = cami_meta_crystal["lambda"]
|
||||
scan["wave"] = cami_meta_crystal["lambda"]
|
||||
|
||||
if "detector parameters" in cami_meta:
|
||||
cami_meta_detparam = cami_meta["detector parameters"]
|
||||
if "dist1" in cami_meta_detparam:
|
||||
det_data["ddist"] = cami_meta_detparam["dist1"]
|
||||
if "dist2" in cami_meta_detparam:
|
||||
scan["ddist"] = cami_meta_detparam["dist2"]
|
||||
|
||||
return det_data
|
||||
return scan
|
||||
|
||||
|
||||
def fit_event(scan, fr_from, fr_to, y_from, y_to, x_from, x_to):
|
||||
data_roi = scan["counts"][fr_from:fr_to, y_from:y_to, x_from:x_to]
|
||||
|
||||
model = GaussianModel()
|
||||
fr = np.arange(fr_from, fr_to)
|
||||
counts_per_fr = np.sum(data_roi, axis=(1, 2))
|
||||
params = model.guess(counts_per_fr, fr)
|
||||
result = model.fit(counts_per_fr, x=fr, params=params)
|
||||
frC = result.params["center"].value
|
||||
intensity = result.params["height"].value
|
||||
|
||||
counts_std = counts_per_fr.std()
|
||||
counts_mean = counts_per_fr.mean()
|
||||
snr = 0 if counts_std == 0 else counts_mean / counts_std
|
||||
|
||||
model = Gaussian2dModel()
|
||||
xs, ys = np.meshgrid(np.arange(x_from, x_to), np.arange(y_from, y_to))
|
||||
xs = xs.flatten()
|
||||
ys = ys.flatten()
|
||||
counts = np.sum(data_roi, axis=0).flatten()
|
||||
params = model.guess(counts, xs, ys)
|
||||
result = model.fit(counts, x=xs, y=ys, params=params)
|
||||
xC = result.params["centerx"].value
|
||||
yC = result.params["centery"].value
|
||||
|
||||
scan["fit"] = {"frame": frC, "x_pos": xC, "y_pos": yC, "intensity": intensity, "snr": snr}
|
||||
|
483
pyzebra/sxtal_refgen.py
Normal file
483
pyzebra/sxtal_refgen.py
Normal file
@ -0,0 +1,483 @@
|
||||
import io
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
from math import ceil, floor
|
||||
|
||||
import numpy as np
|
||||
|
||||
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen"
|
||||
|
||||
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
|
||||
BLFR z-up
|
||||
DIST_UNITS mm
|
||||
ANGL_UNITS deg
|
||||
DET_TYPE Point ipsd 1
|
||||
DIST_DET 488
|
||||
DIM_XY 1.0 1.0 1 1
|
||||
GAPS_DET 0 0
|
||||
|
||||
SETTING 1 0 0 0 1 0 0 0 1
|
||||
NUM_ANG 4
|
||||
ANG_LIMITS Min Max Offset
|
||||
Gamma 0.0 128.0 0.00
|
||||
Omega 0.0 64.0 0.00
|
||||
Chi 80.0 211.0 0.00
|
||||
Phi 0.0 360.0 0.00
|
||||
|
||||
DET_OFF 0 0 0
|
||||
"""
|
||||
|
||||
_zebraNB_default_geom = """GEOM 3 Normal Beam
|
||||
BLFR z-up
|
||||
DIST_UNITS mm
|
||||
ANGL_UNITS deg
|
||||
DET_TYPE Point ipsd 1
|
||||
DIST_DET 448
|
||||
DIM_XY 1.0 1.0 1 1
|
||||
GAPS_DET 0 0
|
||||
|
||||
SETTING 1 0 0 0 1 0 0 0 1
|
||||
NUM_ANG 3
|
||||
ANG_LIMITS Min Max Offset
|
||||
Gamma 0.0 128.0 0.00
|
||||
Omega -180.0 180.0 0.00
|
||||
Nu -15.0 15.0 0.00
|
||||
|
||||
DET_OFF 0 0 0
|
||||
"""
|
||||
|
||||
_zebra_default_cfl = """TITLE mymaterial
|
||||
SPGR P 63 2 2
|
||||
CELL 5.73 5.73 11.89 90 90 120
|
||||
|
||||
WAVE 1.383
|
||||
|
||||
UBMAT
|
||||
0.000000 0.000000 0.084104
|
||||
0.000000 0.174520 -0.000000
|
||||
0.201518 0.100759 0.000000
|
||||
|
||||
INSTR zebra.geom
|
||||
|
||||
ORDER 1 2 3
|
||||
|
||||
ANGOR gamma
|
||||
|
||||
HLIM -25 25 -25 25 -25 25
|
||||
SRANG 0.0 0.7
|
||||
|
||||
Mag_Structure
|
||||
lattiCE P 1
|
||||
kvect 0.0 0.0 0.0
|
||||
magcent
|
||||
symm x,y,z
|
||||
msym u,v,w, 0.0
|
||||
End_Mag_Structure
|
||||
"""
|
||||
|
||||
|
||||
def get_zebraBI_default_geom_file():
|
||||
return io.StringIO(_zebraBI_default_geom)
|
||||
|
||||
|
||||
def get_zebraNB_default_geom_file():
|
||||
return io.StringIO(_zebraNB_default_geom)
|
||||
|
||||
|
||||
def get_zebra_default_cfl_file():
|
||||
return io.StringIO(_zebra_default_cfl)
|
||||
|
||||
|
||||
def read_geom_file(fileobj):
|
||||
ang_lims = dict()
|
||||
for line in fileobj:
|
||||
if "!" in line: # remove comments that start with ! sign
|
||||
line, _ = line.split(sep="!", maxsplit=1)
|
||||
|
||||
if line.startswith("GEOM"):
|
||||
_, val = line.split(maxsplit=1)
|
||||
if val.startswith("2"):
|
||||
ang_lims["geom"] = "bi"
|
||||
else: # val.startswith("3")
|
||||
ang_lims["geom"] = "nb"
|
||||
|
||||
elif line.startswith("ANG_LIMITS"):
|
||||
# read angular limits
|
||||
for line in fileobj:
|
||||
if not line or line.isspace():
|
||||
break
|
||||
|
||||
ang, ang_min, ang_max, ang_offset = line.split()
|
||||
ang_lims[ang.lower()] = [ang_min, ang_max, ang_offset]
|
||||
|
||||
if "2theta" in ang_lims: # treat 2theta as gamma
|
||||
ang_lims["gamma"] = ang_lims.pop("2theta")
|
||||
|
||||
return ang_lims
|
||||
|
||||
|
||||
def export_geom_file(path, ang_lims, template=None):
|
||||
if ang_lims["geom"] == "bi":
|
||||
template_file = get_zebraBI_default_geom_file()
|
||||
n_ang = 4
|
||||
else: # ang_lims["geom"] == "nb"
|
||||
template_file = get_zebraNB_default_geom_file()
|
||||
n_ang = 3
|
||||
|
||||
if template is not None:
|
||||
template_file = template
|
||||
|
||||
with open(path, "w") as out_file:
|
||||
for line in template_file:
|
||||
out_file.write(line)
|
||||
|
||||
if line.startswith("ANG_LIMITS"):
|
||||
for _ in range(n_ang):
|
||||
next_line = next(template_file)
|
||||
ang, _, _, _ = next_line.split()
|
||||
|
||||
if ang == "2theta": # treat 2theta as gamma
|
||||
ang = "Gamma"
|
||||
vals = ang_lims[ang.lower()]
|
||||
|
||||
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
|
||||
|
||||
|
||||
def calc_ub_matrix(params):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
|
||||
|
||||
with open(cfl_file, "w") as fileobj:
|
||||
for key, value in params.items():
|
||||
fileobj.write(f"{key} {value}\n")
|
||||
|
||||
comp_proc = subprocess.run(
|
||||
[SXTAL_REFGEN_PATH, cfl_file],
|
||||
cwd=temp_dir,
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
print(" ".join(comp_proc.args))
|
||||
print(comp_proc.stdout)
|
||||
|
||||
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
|
||||
ub_matrix = []
|
||||
with open(sfa_file, "r") as fileobj:
|
||||
for line in fileobj:
|
||||
if "BL_M" in line: # next 3 lines contain the matrix
|
||||
for _ in range(3):
|
||||
next_line = next(fileobj)
|
||||
*vals, _ = next_line.split(maxsplit=3)
|
||||
ub_matrix.extend(vals)
|
||||
|
||||
return ub_matrix
|
||||
|
||||
|
||||
def read_cfl_file(fileobj):
|
||||
params = {
|
||||
"SPGR": None,
|
||||
"CELL": None,
|
||||
"WAVE": None,
|
||||
"UBMAT": None,
|
||||
"HLIM": None,
|
||||
"SRANG": None,
|
||||
"lattiCE": None,
|
||||
"kvect": None,
|
||||
}
|
||||
param_names = tuple(params)
|
||||
|
||||
for line in fileobj:
|
||||
line = line.strip()
|
||||
if "!" in line: # remove comments that start with ! sign
|
||||
line, _ = line.split(sep="!", maxsplit=1)
|
||||
|
||||
if line.startswith(param_names):
|
||||
if line.startswith("UBMAT"): # next 3 lines contain the matrix
|
||||
param, val = "UBMAT", []
|
||||
for _ in range(3):
|
||||
next_line = next(fileobj).strip()
|
||||
val.extend(next_line.split(maxsplit=2))
|
||||
else:
|
||||
param, val = line.split(maxsplit=1)
|
||||
|
||||
params[param] = val
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def read_cif_file(fileobj):
|
||||
params = {"SPGR": None, "CELL": None, "ATOM": []}
|
||||
|
||||
cell_params = {
|
||||
"_cell_length_a": None,
|
||||
"_cell_length_b": None,
|
||||
"_cell_length_c": None,
|
||||
"_cell_angle_alpha": None,
|
||||
"_cell_angle_beta": None,
|
||||
"_cell_angle_gamma": None,
|
||||
}
|
||||
cell_param_names = tuple(cell_params)
|
||||
|
||||
atom_param_pos = {
|
||||
"_atom_site_label": 0,
|
||||
"_atom_site_type_symbol": None,
|
||||
"_atom_site_fract_x": None,
|
||||
"_atom_site_fract_y": None,
|
||||
"_atom_site_fract_z": None,
|
||||
"_atom_site_U_iso_or_equiv": None,
|
||||
"_atom_site_occupancy": None,
|
||||
}
|
||||
atom_param_names = tuple(atom_param_pos)
|
||||
|
||||
for line in fileobj:
|
||||
line = line.strip()
|
||||
if line.startswith("_space_group_name_H-M_alt"):
|
||||
_, val = line.split(maxsplit=1)
|
||||
params["SPGR"] = val.strip("'")
|
||||
|
||||
elif line.startswith(cell_param_names):
|
||||
param, val = line.split(maxsplit=1)
|
||||
cell_params[param] = val
|
||||
|
||||
elif line.startswith("_atom_site_label"): # assume this is the start of atom data
|
||||
for ind, line in enumerate(fileobj, start=1):
|
||||
line = line.strip()
|
||||
|
||||
# read fields
|
||||
if line.startswith("_atom_site"):
|
||||
if line.startswith(atom_param_names):
|
||||
atom_param_pos[line] = ind
|
||||
continue
|
||||
|
||||
# read data till an empty line
|
||||
if not line:
|
||||
break
|
||||
vals = line.split()
|
||||
params["ATOM"].append(" ".join([vals[ind] for ind in atom_param_pos.values()]))
|
||||
|
||||
if None not in cell_params.values():
|
||||
params["CELL"] = " ".join(cell_params.values())
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def export_cfl_file(path, params, template=None):
|
||||
param_names = tuple(params)
|
||||
if template is None:
|
||||
template_file = get_zebra_default_cfl_file()
|
||||
else:
|
||||
template_file = template
|
||||
|
||||
atom_done = False
|
||||
with open(path, "w") as out_file:
|
||||
for line in template_file:
|
||||
if line.startswith(param_names):
|
||||
if line.startswith("UBMAT"): # only UBMAT values are not on the same line
|
||||
out_file.write(line)
|
||||
for i in range(3):
|
||||
next(template_file)
|
||||
out_file.write(" ".join(params["UBMAT"][3 * i : 3 * (i + 1)]) + "\n")
|
||||
|
||||
elif line.startswith("ATOM"):
|
||||
if "ATOM" in params:
|
||||
# replace all ATOM with values in params
|
||||
while line.startswith("ATOM"):
|
||||
line = next(template_file)
|
||||
for atom_line in params["ATOM"]:
|
||||
out_file.write(f"ATOM {atom_line}\n")
|
||||
atom_done = True
|
||||
|
||||
else:
|
||||
param, _ = line.split(maxsplit=1)
|
||||
out_file.write(f"{param} {params[param]}\n")
|
||||
|
||||
elif line.startswith("INSTR"):
|
||||
# replace it with a default name
|
||||
out_file.write("INSTR zebra.geom\n")
|
||||
|
||||
else:
|
||||
out_file.write(line)
|
||||
|
||||
# append ATOM data if it's present and a template did not contain it
|
||||
if "ATOM" in params and not atom_done:
|
||||
out_file.write("\n")
|
||||
for atom_line in params["ATOM"]:
|
||||
out_file.write(f"ATOM {atom_line}\n")
|
||||
|
||||
|
||||
def sort_hkl_file_bi(file_in, file_out, priority, chunks):
|
||||
with open(file_in) as fileobj:
|
||||
file_in_data = fileobj.readlines()
|
||||
|
||||
data = np.genfromtxt(file_in, skip_header=3)
|
||||
stt = data[:, 4]
|
||||
omega = data[:, 5]
|
||||
chi = data[:, 6]
|
||||
phi = data[:, 7]
|
||||
|
||||
lines = file_in_data[3:]
|
||||
lines_update = []
|
||||
|
||||
angles = {"2theta": stt, "omega": omega, "chi": chi, "phi": phi}
|
||||
|
||||
# Reverse flag
|
||||
to_reverse = False
|
||||
to_reverse_p2 = False
|
||||
to_reverse_p3 = False
|
||||
|
||||
# Get indices within first priority
|
||||
ang_p1 = angles[priority[0]]
|
||||
begin_p1 = floor(min(ang_p1))
|
||||
end_p1 = ceil(max(ang_p1))
|
||||
delta_p1 = chunks[0]
|
||||
for p1 in range(begin_p1, end_p1, delta_p1):
|
||||
ind_p1 = [j for j, x in enumerate(ang_p1) if p1 <= x and x < p1 + delta_p1]
|
||||
|
||||
stt_new = [stt[x] for x in ind_p1]
|
||||
omega_new = [omega[x] for x in ind_p1]
|
||||
chi_new = [chi[x] for x in ind_p1]
|
||||
phi_new = [phi[x] for x in ind_p1]
|
||||
lines_new = [lines[x] for x in ind_p1]
|
||||
|
||||
angles_p2 = {"stt": stt_new, "omega": omega_new, "chi": chi_new, "phi": phi_new}
|
||||
|
||||
# Get indices for second priority
|
||||
ang_p2 = angles_p2[priority[1]]
|
||||
if len(ang_p2) > 0 and to_reverse_p2:
|
||||
begin_p2 = ceil(max(ang_p2))
|
||||
end_p2 = floor(min(ang_p2))
|
||||
delta_p2 = -chunks[1]
|
||||
elif len(ang_p2) > 0 and not to_reverse_p2:
|
||||
end_p2 = ceil(max(ang_p2))
|
||||
begin_p2 = floor(min(ang_p2))
|
||||
delta_p2 = chunks[1]
|
||||
else:
|
||||
end_p2 = 0
|
||||
begin_p2 = 0
|
||||
delta_p2 = 1
|
||||
|
||||
to_reverse_p2 = not to_reverse_p2
|
||||
|
||||
for p2 in range(begin_p2, end_p2, delta_p2):
|
||||
min_p2 = min([p2, p2 + delta_p2])
|
||||
max_p2 = max([p2, p2 + delta_p2])
|
||||
ind_p2 = [j for j, x in enumerate(ang_p2) if min_p2 <= x and x < max_p2]
|
||||
|
||||
stt_new2 = [stt_new[x] for x in ind_p2]
|
||||
omega_new2 = [omega_new[x] for x in ind_p2]
|
||||
chi_new2 = [chi_new[x] for x in ind_p2]
|
||||
phi_new2 = [phi_new[x] for x in ind_p2]
|
||||
lines_new2 = [lines_new[x] for x in ind_p2]
|
||||
|
||||
angles_p3 = {"stt": stt_new2, "omega": omega_new2, "chi": chi_new2, "phi": phi_new2}
|
||||
|
||||
# Get indices for third priority
|
||||
ang_p3 = angles_p3[priority[2]]
|
||||
if len(ang_p3) > 0 and to_reverse_p3:
|
||||
begin_p3 = ceil(max(ang_p3)) + chunks[2]
|
||||
end_p3 = floor(min(ang_p3)) - chunks[2]
|
||||
delta_p3 = -chunks[2]
|
||||
elif len(ang_p3) > 0 and not to_reverse_p3:
|
||||
end_p3 = ceil(max(ang_p3)) + chunks[2]
|
||||
begin_p3 = floor(min(ang_p3)) - chunks[2]
|
||||
delta_p3 = chunks[2]
|
||||
else:
|
||||
end_p3 = 0
|
||||
begin_p3 = 0
|
||||
delta_p3 = 1
|
||||
|
||||
to_reverse_p3 = not to_reverse_p3
|
||||
|
||||
for p3 in range(begin_p3, end_p3, delta_p3):
|
||||
min_p3 = min([p3, p3 + delta_p3])
|
||||
max_p3 = max([p3, p3 + delta_p3])
|
||||
ind_p3 = [j for j, x in enumerate(ang_p3) if min_p3 <= x and x < max_p3]
|
||||
|
||||
angle_new3 = [angles_p3[priority[3]][x] for x in ind_p3]
|
||||
|
||||
ind_final = [x for _, x in sorted(zip(angle_new3, ind_p3), reverse=to_reverse)]
|
||||
|
||||
to_reverse = not to_reverse
|
||||
|
||||
for i in ind_final:
|
||||
lines_update.append(lines_new2[i])
|
||||
|
||||
with open(file_out, "w") as fileobj:
|
||||
for _ in range(3):
|
||||
fileobj.write(file_in_data.pop(0))
|
||||
|
||||
fileobj.writelines(lines_update)
|
||||
|
||||
|
||||
def sort_hkl_file_nb(file_in, file_out, priority, chunks):
|
||||
with open(file_in) as fileobj:
|
||||
file_in_data = fileobj.readlines()
|
||||
|
||||
data = np.genfromtxt(file_in, skip_header=3)
|
||||
gamma = data[:, 4]
|
||||
omega = data[:, 5]
|
||||
nu = data[:, 6]
|
||||
|
||||
lines = file_in_data[3:]
|
||||
lines_update = []
|
||||
|
||||
angles = {"gamma": gamma, "omega": omega, "nu": nu}
|
||||
|
||||
to_reverse = False
|
||||
to_reverse_p2 = False
|
||||
|
||||
# Get indices within first priority
|
||||
ang_p1 = angles[priority[0]]
|
||||
begin_p1 = floor(min(ang_p1))
|
||||
end_p1 = ceil(max(ang_p1))
|
||||
delta_p1 = chunks[0]
|
||||
for p1 in range(begin_p1, end_p1, delta_p1):
|
||||
ind_p1 = [j for j, x in enumerate(ang_p1) if p1 <= x and x < p1 + delta_p1]
|
||||
|
||||
# Get angles from within nu range
|
||||
lines_new = [lines[x] for x in ind_p1]
|
||||
gamma_new = [gamma[x] for x in ind_p1]
|
||||
omega_new = [omega[x] for x in ind_p1]
|
||||
nu_new = [nu[x] for x in ind_p1]
|
||||
|
||||
angles_p2 = {"gamma": gamma_new, "omega": omega_new, "nu": nu_new}
|
||||
|
||||
# Get indices for second priority
|
||||
ang_p2 = angles_p2[priority[1]]
|
||||
if len(gamma_new) > 0 and to_reverse_p2:
|
||||
begin_p2 = ceil(max(ang_p2))
|
||||
end_p2 = floor(min(ang_p2))
|
||||
delta_p2 = -chunks[1]
|
||||
elif len(gamma_new) > 0 and not to_reverse_p2:
|
||||
end_p2 = ceil(max(ang_p2))
|
||||
begin_p2 = floor(min(ang_p2))
|
||||
delta_p2 = chunks[1]
|
||||
else:
|
||||
end_p2 = 0
|
||||
begin_p2 = 0
|
||||
delta_p2 = 1
|
||||
|
||||
to_reverse_p2 = not to_reverse_p2
|
||||
|
||||
for p2 in range(begin_p2, end_p2, delta_p2):
|
||||
min_p2 = min([p2, p2 + delta_p2])
|
||||
max_p2 = max([p2, p2 + delta_p2])
|
||||
ind_p2 = [j for j, x in enumerate(ang_p2) if min_p2 <= x and x < max_p2]
|
||||
|
||||
angle_new2 = [angles_p2[priority[2]][x] for x in ind_p2]
|
||||
|
||||
ind_final = [x for _, x in sorted(zip(angle_new2, ind_p2), reverse=to_reverse)]
|
||||
|
||||
to_reverse = not to_reverse
|
||||
|
||||
for i in ind_final:
|
||||
lines_update.append(lines_new[i])
|
||||
|
||||
with open(file_out, "w") as fileobj:
|
||||
for _ in range(3):
|
||||
fileobj.write(file_in_data.pop(0))
|
||||
|
||||
fileobj.writelines(lines_update)
|
@ -1,20 +1,17 @@
|
||||
import os
|
||||
|
||||
ZEBRA_PROPOSALS_PATHS = [
|
||||
f"/afs/psi.ch/project/sinqdata/{year}/zebra/" for year in (2016, 2017, 2018, 2020, 2021)
|
||||
]
|
||||
SINQ_PATH = "/afs/psi.ch/project/sinqdata"
|
||||
ZEBRA_PROPOSALS_PATH = os.path.join(SINQ_PATH, "{year}/zebra/{proposal}")
|
||||
|
||||
|
||||
def find_proposal_path(proposal):
|
||||
proposal = proposal.strip()
|
||||
if proposal:
|
||||
for zebra_proposals_path in ZEBRA_PROPOSALS_PATHS:
|
||||
proposal_path = os.path.join(zebra_proposals_path, proposal)
|
||||
for entry in os.scandir(SINQ_PATH):
|
||||
if entry.is_dir() and len(entry.name) == 4 and entry.name.isdigit():
|
||||
proposal_path = ZEBRA_PROPOSALS_PATH.format(year=entry.name, proposal=proposal)
|
||||
if os.path.isdir(proposal_path):
|
||||
# found it
|
||||
break
|
||||
else:
|
||||
raise ValueError(f"Can not find data for proposal '{proposal}'.")
|
||||
else:
|
||||
proposal_path = ""
|
||||
raise ValueError(f"Can not find data for proposal '{proposal}'.")
|
||||
|
||||
return proposal_path
|
||||
|
@ -372,6 +372,27 @@ def ang2hkl(wave, ddist, gammad, om, ch, ph, nud, ub, x, y):
|
||||
return hkl
|
||||
|
||||
|
||||
def ang2hkl_1d(wave, ddist, ga, om, ch, ph, nu, ub):
|
||||
"""Calculate hkl-indices of a reflection from its position (angles) at the 1d-detector
|
||||
"""
|
||||
z1 = z1frmd(wave, ga, om, ch, ph, nu)
|
||||
ubinv = np.linalg.inv(ub)
|
||||
hkl = ubinv @ z1
|
||||
|
||||
return hkl
|
||||
|
||||
|
||||
def ang_proc(wave, ddist, gammad, om, ch, ph, nud, x, y):
|
||||
"""Utility function to calculate ch, ph, ga, om
|
||||
"""
|
||||
ga, nu = det2pol(ddist, gammad, nud, x, y)
|
||||
z1 = z1frmd(wave, ga, om, ch, ph, nu)
|
||||
ch2, ph2 = eqchph(z1)
|
||||
ch, ph, ga, om = fixdnu(wave, z1, ch2, ph2, nu)
|
||||
|
||||
return ch, ph, ga, om
|
||||
|
||||
|
||||
def gauss(x, *p):
|
||||
"""Defines Gaussian function
|
||||
|
||||
|
Reference in New Issue
Block a user