125 Commits
0.2.2 ... 0.3.0

Author SHA1 Message Date
0c620d4a08 Updating for version 0.3.0 2021-03-26 09:16:18 +01:00
5d65afa194 Forward stdout of spind subprocs to pyzebra app 2021-03-12 16:59:43 +01:00
d312c99f96 Replace fit_columns with autosize_mode
fit_columns was deprecated in bokeh/2.2
2021-03-12 11:57:50 +01:00
526821e073 Display peak components and allow to hide plots 2021-03-11 18:00:41 +01:00
d606230feb Simplify data provision for js file export 2021-03-11 15:36:58 +01:00
ce5cff05a7 Better default names for exported files 2021-03-11 14:39:29 +01:00
a738f74f88 Do not auto fit non-exported scans (e.g. merged) 2021-03-11 11:07:37 +01:00
4785c97f33 Remove mpi4py from deps
* It is a dependency of an optional spind func
2021-03-10 18:29:33 +01:00
e68fedc7e5 Fix for parameter equal to 0 2021-03-10 18:23:03 +01:00
2581996625 Add h,k,l to a list of possible parameters 2021-03-10 17:56:53 +01:00
d014849abd Allow upload of multiple files in ccl_integrate 2021-03-10 17:42:08 +01:00
aa6e668f97 Minor label fixes 2021-03-10 17:32:03 +01:00
85c74c39f0 Add bulk loading of files from afs in ccl_integrate 2021-03-10 13:21:13 +01:00
0e97f44cc7 Preview/Download export data changes
* Preview button now sets data for downloading
2021-03-10 11:43:07 +01:00
9c5f7e6284 Fix and simplify export data content 2021-03-10 11:06:17 +01:00
167e5136b1 Add bulk loading of files from afs in param_study 2021-03-09 15:48:01 +01:00
21d642b7d7 Display fit range spans 2021-03-09 14:36:10 +01:00
fe68b1de7e Enable fit range selection 2021-03-09 14:13:34 +01:00
3291a67e7d Reset selected parameter on new data open/append 2021-03-08 17:52:24 +01:00
041c5c0e8b Correctly handle scans of different length 2021-03-08 17:46:34 +01:00
566cebb01a Fix a bug in omega range calculation 2021-03-04 10:57:52 +01:00
a70b4fae57 Disable fit model change in ccl_integrate 2021-03-03 16:55:49 +01:00
97836b2906 Auto range for overview scatter plot 2021-03-03 16:53:06 +01:00
7ee8eba007 Add parameter selection option 2021-03-03 16:40:24 +01:00
5169e08777 Handle dat files with hkl scans 2021-03-03 16:10:51 +01:00
ed8e6d262f Postproc hkl indices only for ccl files 2021-03-03 16:08:47 +01:00
426bb16792 Cleanup 2021-03-03 14:56:55 +01:00
554716fc9a Add HoverTool to param study overview plot 2021-03-01 17:39:04 +01:00
69767da850 Use weights in fit 2021-03-01 15:46:54 +01:00
3e174f22e5 Improve naming consistency with lmfit 2021-03-01 15:18:55 +01:00
7c1e2fdf0c Bump to python/3.7
Add mpi4py dep
2021-03-01 14:38:35 +01:00
b61327b5f2 Remove unused reset button 2021-03-01 13:43:54 +01:00
0e3aea142d Formatting fix for angles 2021-03-01 12:17:37 +01:00
c7b05d252f Enable export function 2021-03-01 12:17:23 +01:00
a65708004b Provide filename in merging info 2021-02-26 18:14:06 +01:00
af1336df78 Plot fit results on denser grid 2021-02-26 17:53:44 +01:00
966d6349df Add Legend to main plots 2021-02-26 13:05:57 +01:00
5dbb208e92 Correctly plot background component 2021-02-26 11:31:18 +01:00
af70302bf3 Disable temporary unsupported export functionality 2021-02-26 11:12:21 +01:00
8727e25a2d Plot best_fit instead of gauss component 2021-02-26 11:03:01 +01:00
fac3592ab2 Update bokeh/2.3 2021-02-26 11:03:01 +01:00
599e2e1e74 Remove pandas dep 2021-02-26 11:03:01 +01:00
11fae5d47a Drop peakfinding step
The initial guesses via lmfit seems to work just fine
2021-02-26 11:03:01 +01:00
7ee4b1c86a Rename fit_result -> fit 2021-02-26 08:48:08 +01:00
e76623e55e Allow adding and removing fit model components 2021-02-25 17:44:03 +01:00
794d5c49d4 Use lmfit parameter guessing 2021-02-25 17:41:04 +01:00
0b4f4b1ce9 Replace offset with intercept in LinearModel 2021-02-25 16:47:56 +01:00
15bd970a72 Disable temporarily unsupported functionality 2021-02-25 16:11:16 +01:00
89ff0b15f5 Basic fit refactoring 2021-02-25 15:41:14 +01:00
9e3466fcaa Rename guess -> value
Keep it consistent with lmfit naming convention
2021-02-24 15:44:23 +01:00
55f3198f9d Add fit model name to tag 2021-02-24 15:42:10 +01:00
747b008d5e Add initial selection on gauss component 2021-02-15 15:26:06 +01:00
1a7cd2a4b5 Support other than omega scanning variables 2021-02-15 15:08:32 +01:00
eb4e30b7e0 Remove "variable" and a utility rename 2021-02-15 14:26:01 +01:00
82f3c53380 Output center of the scan range 2021-02-12 17:12:19 +01:00
ac76d4b2c3 Add monitor spinner 2021-02-12 16:24:48 +01:00
dab52d9508 Update layouts 2021-02-12 15:59:47 +01:00
b63ef90e11 Keep all merged scans, but disable export for them 2021-02-12 13:53:51 +01:00
d5ac2c6d56 Fix export for scanning angle 2021-02-12 12:05:21 +01:00
36faad1d9d Export the same number of angles for nb and bi modes 2021-02-12 11:05:15 +01:00
9140798c74 Adjust the name of cell parameters
This way we avoid overwritting gamma in case of nb zebra mode
2021-02-11 16:36:42 +01:00
96eaa27e55 Add manual scan merging 2021-02-11 14:47:37 +01:00
b418fb8300 Assume gamma = 2-theta for 'nb' zebra mode 2021-02-11 13:52:31 +01:00
699c5f3d11 Improve scan merging feedback 2021-02-10 16:18:38 +01:00
6a822c4c85 Allow a small gap between ranges for scans to merge 2021-02-10 16:14:20 +01:00
c2e1f1def1 Fix original_filename read in param_study 2021-02-10 14:06:39 +01:00
2b9775faed Skip empty/whitespace lines before any scan in ccl 2021-02-10 13:39:53 +01:00
ea7d611819 Handle older files that don't contain "zebra_mode" 2021-02-10 13:20:22 +01:00
9ef07cff19 Split on the first occurence of "=" sign
Fix cases where string values also contain "=" signs
2021-02-10 12:45:04 +01:00
12077dd5f3 Merge based on variable parameter range overlap 2021-02-09 18:19:05 +01:00
3b1a2b1a0b Remove param_study_moduls.py 2021-02-09 17:43:16 +01:00
8eee968344 Simplify hkl type handling 2021-02-09 16:26:52 +01:00
97936d6c2a Save variable_name, fix dat file monitor reading 2021-02-09 16:13:45 +01:00
91d3a0ac9e Update DataTable width 2021-02-09 13:54:42 +01:00
4f1f03224c Do not try to read hkl values for dat files 2021-02-09 13:41:44 +01:00
16a47cf3b3 Refactor dataset merging procedure 2021-02-09 13:07:51 +01:00
e3de0f7217 Fix datatable selection 2021-02-09 12:17:37 +01:00
983e0dab42 Flatten structure of metadata in scans 2021-02-09 12:17:30 +01:00
a6f97f59e8 Convert Counts to float ndarray 2021-02-09 11:32:55 +01:00
bf3b44405d Skip reading unused angle in nb zebra mode 2021-02-09 11:28:27 +01:00
93a557fea9 Assign default value of 0 to temp and mf 2021-02-08 21:24:47 +01:00
b31c0b413c Store metadata in each scan 2021-02-08 21:21:13 +01:00
20527e8d2b Consolidate hkl and ub 2021-02-05 15:41:10 +01:00
e09538eaeb Consolidate temp, mf and zebra_mode naming 2021-02-05 14:48:48 +01:00
239949b7c0 Consolidate angle names between data formats 2021-02-05 14:48:48 +01:00
7e6df95c49 Track the original scan index 2021-02-05 14:48:48 +01:00
e38993e69d Keep scans in a list instead of a dict 2021-02-05 14:48:41 +01:00
6bf401aba8 Show hkl values for the peaks in spind panel
Fix #22
2021-02-02 17:59:45 +01:00
0c4fffff0d Show spind results in a DataTable
For #22
2021-02-02 17:59:18 +01:00
2c727d34bd Normalize data on import 2021-02-02 15:35:37 +01:00
5e979eb9e3 Reapply commits that add extra_meta 2021-02-02 10:38:05 +01:00
ba6a99b912 Remove area_method from metadata 2021-02-01 17:07:20 +01:00
315b025341 Remove redundant string conversions 2021-02-01 17:07:20 +01:00
d0b67b8565 Update merge_function.py
Fixed the normalization, changed the structure so we can merge duplicates without merging two dictionaries.
2021-01-24 15:14:25 +01:00
1761003d8a Fix hkl precision not affecting file export 2021-01-06 14:20:54 +01:00
d42a53ed47 Move indices from metadata to scan data 2021-01-06 11:39:03 +01:00
cd1e5c42c0 Do not provide user files with empty content 2021-01-05 18:15:26 +01:00
c2215e9b6d Utility renames 2021-01-05 17:45:08 +01:00
c5ec09a5e3 Split scans between .comm/.incomm files on export
For #21
2021-01-05 17:44:59 +01:00
b0a4e35d3d Optimize locations of Div and FileInput widgets 2021-01-05 14:37:51 +01:00
db85eee329 Add append option for files from AFS
Allow to open both, .ccl and .dat
For #21
2021-01-05 14:37:42 +01:00
67d0af292b Fix hkl precision select title
For #21
2021-01-05 12:05:09 +01:00
241c2a9779 Correction of normalization
suggested by Dr. Zolliker
2020-12-18 16:23:14 +01:00
8c265027bf Found a better place to put the num itegration fix 2020-12-18 15:52:09 +01:00
f5c405bee8 Numerically integrate whole area
Added two lines to integrate full area on the request of Oksana and Romain, might be changed back after some more testing, ergo I did not delete the previous code and only change the final result regardless of what the code produces. I find this least invasive, but a bit messy.
2020-12-18 15:40:37 +01:00
ae33ee825e Support any variable angle 2020-12-16 17:23:23 +01:00
f694249298 Fix magnitude of the resulting UB matrix 2020-12-16 15:11:14 +01:00
b7ac6d4359 Fix UB matrix calculation 2020-12-16 11:15:57 +01:00
e27ba7c711 Fix temp_dir 2020-12-16 11:04:33 +01:00
ddaeda35c7 Fix issue parsing input on spind panel 2020-12-16 09:10:46 +01:00
dac1237009 Output UB matrix 2020-12-15 14:49:52 +01:00
999ceba3b7 Fix missing file extension 2020-12-15 14:38:54 +01:00
0a634ca8da Handle a case when spind doesn't return results 2020-12-15 14:28:33 +01:00
1f1762063d Add basic spind processing 2020-12-14 16:54:13 +01:00
fdb2027fd4 Add spind panel 2020-12-14 15:04:55 +01:00
559e18ed5d Add option to open hdf files via proposal number 2020-12-10 22:49:54 +01:00
d6202d4a6b Use pyzebra lib default anatric location 2020-12-10 22:07:04 +01:00
4b3b14e4ac Disable adding and removing fit funcs in param study 2020-12-10 16:46:48 +01:00
b4db41d672 Make geometry readonly on gui
Data files already contain that information
2020-12-10 16:35:30 +01:00
16174f5cc4 Fix zebra_mode dataset read 2020-12-10 16:27:00 +01:00
b50783f6be Correct auto detection of variable angles
Fix #20
2020-12-10 16:02:01 +01:00
f830acf70d Increase delay before transferring file to clients 2020-12-10 15:54:16 +01:00
ea41301605 Auto detect varying angle in h5 viewer 2020-12-10 14:36:58 +01:00
6a341e5001 Add experimental overview map plot 2020-12-10 11:24:27 +01:00
9bbed3b55d Cleanup after transitioning plots to Tabs 2020-12-10 10:13:41 +01:00
18 changed files with 1274 additions and 2119 deletions

View File

@ -15,15 +15,14 @@ build:
requirements:
build:
- python >=3.6
- python >=3.7
- setuptools
run:
- python >=3.6
- python >=3.7
- numpy
- scipy
- pandas
- h5py
- bokeh =2.2
- bokeh =2.3
- matplotlib
- numba
- lmfit

View File

@ -1,9 +1,7 @@
from pyzebra.anatric import *
from pyzebra.ccl_findpeaks import ccl_findpeaks
from pyzebra.ccl_io import export_comm, load_1D, parse_1D
from pyzebra.fit2 import fitccl
from pyzebra.ccl_io import *
from pyzebra.h5 import *
from pyzebra.merge_function import add_dict, unified_merge
from pyzebra.xtal import *
from pyzebra.ccl_process import *
__version__ = "0.2.2"
__version__ = "0.3.0"

View File

@ -10,6 +10,7 @@ import panel_ccl_integrate
import panel_hdf_anatric
import panel_hdf_viewer
import panel_param_study
import panel_spind
doc = curdoc()
@ -29,10 +30,11 @@ tab_hdf_viewer = panel_hdf_viewer.create()
tab_hdf_anatric = panel_hdf_anatric.create()
tab_ccl_integrate = panel_ccl_integrate.create()
tab_param_study = panel_param_study.create()
tab_spind = panel_spind.create()
doc.add_root(
column(
Tabs(tabs=[tab_hdf_viewer, tab_hdf_anatric, tab_ccl_integrate, tab_param_study]),
Tabs(tabs=[tab_hdf_viewer, tab_hdf_anatric, tab_ccl_integrate, tab_param_study, tab_spind]),
row(stdout_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
)
)

View File

@ -40,7 +40,7 @@ def main():
parser.add_argument(
"--anatric-path",
type=str,
default="/afs/psi.ch/project/sinq/rhel7/bin/anatric",
default=None,
help="path to anatric executable",
)

View File

@ -3,12 +3,10 @@ import io
import os
import tempfile
import types
from copy import deepcopy
import numpy as np
from bokeh.layouts import column, row
from bokeh.models import (
Asterisk,
BasicTicker,
Button,
CheckboxEditor,
@ -20,8 +18,10 @@ from bokeh.models import (
Dropdown,
FileInput,
Grid,
Legend,
Line,
LinearAxis,
MultiLine,
MultiSelect,
NumberEditor,
Panel,
@ -47,18 +47,19 @@ from pyzebra.ccl_io import AREA_METHODS
javaScript = """
setTimeout(function() {
const filename = 'output' + js_data.data['ext']
const blob = new Blob([js_data.data['cont']], {type: 'text/plain'})
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = filename;
link.download = js_data.data['fname'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 500);
}
"""
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
@ -67,180 +68,215 @@ PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
def create():
det_data = {}
fit_params = {}
peak_pos_textinput_lock = False
js_data = ColumnDataSource(data=dict(cont=[], ext=[]))
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""]))
def proposal_textinput_callback(_attr, _old, new):
ccl_path = os.path.join(PROPOSAL_PATH, new.strip())
ccl_file_list = []
for file in os.listdir(ccl_path):
if file.endswith(".ccl"):
if file.endswith((".ccl", ".dat")):
ccl_file_list.append((os.path.join(ccl_path, file), file))
ccl_file_select.options = ccl_file_list
ccl_file_select.value = ccl_file_list[0][0]
file_select.options = ccl_file_list
proposal_textinput = TextInput(title="Enter proposal number:", default_size=145)
proposal_textinput = TextInput(title="Proposal number:", default_size=145)
proposal_textinput.on_change("value", proposal_textinput_callback)
def _init_datatable():
scan_list = list(det_data["scan"].keys())
hkl = [
f'{int(m["h_index"])} {int(m["k_index"])} {int(m["l_index"])}'
if det_data["meta"]["indices"] == "hkl"
else f'{m["h_index"]} {m["k_index"]} {m["l_index"]}'
for m in det_data["scan"].values()
]
scan_list = [s["idx"] for s in det_data]
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in det_data]
export = [s.get("active", True) for s in det_data]
scan_table_source.data.update(
scan=scan_list,
hkl=hkl,
peaks=[0] * len(scan_list),
fit=[0] * len(scan_list),
export=[True] * len(scan_list),
scan=scan_list, hkl=hkl, fit=[0] * len(scan_list), export=export,
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
def ccl_file_select_callback(_attr, _old, new):
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
merge_source_select.options = merge_options
merge_source_select.value = merge_options[0][0]
merge_dest_select.options = merge_options
merge_dest_select.value = merge_options[0][0]
def ccl_file_select_callback(_attr, _old, _new):
pass
file_select = MultiSelect(title="Available .ccl/.dat files:", default_size=200, height=250)
file_select.on_change("value", ccl_file_select_callback)
def file_open_button_callback():
nonlocal det_data
with open(new) as file:
_, ext = os.path.splitext(new)
det_data = pyzebra.parse_1D(file, ext)
det_data = []
for f_name in file_select.value:
with open(f_name) as file:
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
pyzebra.merge_duplicates(det_data)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
_init_datatable()
ccl_file_select = Select(title="Available .ccl files")
ccl_file_select.on_change("value", ccl_file_select_callback)
file_open_button = Button(label="Open New", default_size=100)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
for f_name in file_select.value:
with open(f_name) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
_init_datatable()
file_append_button = Button(label="Append", default_size=100)
file_append_button.on_click(file_append_button_callback)
def upload_button_callback(_attr, _old, new):
nonlocal det_data
with io.StringIO(base64.b64decode(new).decode()) as file:
_, ext = os.path.splitext(upload_button.filename)
det_data = pyzebra.parse_1D(file, ext)
det_data = []
for f_str, f_name in zip(new, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
pyzebra.merge_duplicates(det_data)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
_init_datatable()
upload_button = FileInput(accept=".ccl")
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".ccl,.dat", multiple=True, default_size=200)
upload_button.on_change("value", upload_button_callback)
def append_upload_button_callback(_attr, _old, new):
nonlocal det_data
with io.StringIO(base64.b64decode(new).decode()) as file:
_, ext = os.path.splitext(append_upload_button.filename)
append_data = pyzebra.parse_1D(file, ext)
for f_str, f_name in zip(new, append_upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.unified_merge(det_data, append_data)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
_init_datatable()
append_upload_button = FileInput(accept=".ccl,.dat")
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, default_size=200)
append_upload_button.on_change("value", append_upload_button_callback)
def monitor_spinner_callback(_attr, old, new):
if det_data:
pyzebra.normalize_dataset(det_data, new)
_update_plot(_get_selected_scan())
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def _update_table():
num_of_peaks = [len(scan.get("peak_indexes", [])) for scan in det_data["scan"].values()]
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"].values()]
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
scan_table_source.data.update(fit=fit_ok)
def _update_plot(scan):
nonlocal peak_pos_textinput_lock
peak_pos_textinput_lock = True
scan_motor = scan["scan_motor"]
y = scan["Counts"]
x = scan["om"]
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
num_of_peaks = len(scan.get("peak_indexes", []))
if num_of_peaks is not None and num_of_peaks > 0:
peak_indexes = scan["peak_indexes"]
if len(peak_indexes) == 1:
peak_pos_textinput.value = str(scan["om"][peak_indexes[0]])
else:
peak_pos_textinput.value = str([scan["om"][ind] for ind in peak_indexes])
plot_peak_source.data.update(x=scan["om"][peak_indexes], y=scan["peak_heights"])
plot_line_smooth_source.data.update(x=x, y=scan["smooth_peaks"])
else:
peak_pos_textinput.value = None
plot_peak_source.data.update(x=[], y=[])
plot_line_smooth_source.data.update(x=[], y=[])
peak_pos_textinput_lock = False
fit = scan.get("fit")
if fit is not None:
x = scan["fit"]["x_fit"]
plot_gauss_source.data.update(x=x, y=scan["fit"]["comps"]["gaussian"])
plot_bkg_source.data.update(x=x, y=scan["fit"]["comps"]["background"])
params = fit["result"].params
fit_output_textinput.value = (
"Gaussian: centre = %9.4f, sigma = %9.4f, area = %9.4f \n"
"background: slope = %9.4f, intercept = %9.4f \n"
"Int. area = %9.4f +/- %9.4f \n"
"fit area = %9.4f +/- %9.4f \n"
"ratio((fit-int)/fit) = %9.4f"
% (
params["g_cen"].value,
params["g_width"].value,
params["g_amp"].value,
params["slope"].value,
params["intercept"].value,
fit["int_area"].n,
fit["int_area"].s,
params["g_amp"].value,
params["g_amp"].stderr,
(params["g_amp"].value - fit["int_area"].n) / params["g_amp"].value,
)
)
numfit_min, numfit_max = fit["numfit"]
if numfit_min is None:
numfit_min_span.location = None
else:
numfit_min_span.location = x[numfit_min]
x_fit = np.linspace(x[0], x[-1], 100)
plot_fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
if numfit_max is None:
numfit_max_span.location = None
else:
numfit_max_span.location = x[numfit_max]
x_bkg = []
y_bkg = []
xs_peak = []
ys_peak = []
comps = fit.eval_components(x=x_fit)
for i, model in enumerate(fit_params):
if "linear" in model:
x_bkg = x_fit
y_bkg = comps[f"f{i}_"]
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
xs_peak.append(x_fit)
ys_peak.append(comps[f"f{i}_"])
plot_bkg_source.data.update(x=x_bkg, y=y_bkg)
plot_peak_source.data.update(xs=xs_peak, ys=ys_peak)
fit_output_textinput.value = fit.fit_report()
else:
plot_gauss_source.data.update(x=[], y=[])
plot_fit_source.data.update(x=[], y=[])
plot_bkg_source.data.update(x=[], y=[])
plot_peak_source.data.update(xs=[], ys=[])
fit_output_textinput.value = ""
numfit_min_span.location = None
numfit_max_span.location = None
# Main plot
plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(only_visible=True),
plot_height=470,
plot_width=700,
)
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
plot.add_layout(LinearAxis(axis_label="Omega"), place="below")
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot.add_glyph(plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue"))
plot_scatter = plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
)
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
plot_line_smooth_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(
plot_line_smooth_source, Line(x="x", y="y", line_color="steelblue", line_dash="dashed")
)
plot_gauss_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(plot_gauss_source, Line(x="x", y="y", line_color="red", line_dash="dashed"))
plot_fit_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit = plot.add_glyph(plot_fit_source, Line(x="x", y="y"))
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed"))
plot_bkg = plot.add_glyph(
plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed")
)
plot_peak_source = ColumnDataSource(dict(x=[], y=[]))
plot.add_glyph(plot_peak_source, Asterisk(x="x", y="y", size=10, line_color="red"))
plot_peak_source = ColumnDataSource(dict(xs=[0], ys=[0]))
plot_peak = plot.add_glyph(
plot_peak_source, MultiLine(xs="xs", ys="ys", line_color="red", line_dash="dashed")
)
numfit_min_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(numfit_min_span)
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_from_span)
numfit_max_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(numfit_max_span)
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_to_span)
plot.add_layout(
Legend(
items=[
("data", [plot_scatter]),
("best fit", [plot_fit]),
("peak", [plot_peak]),
("linear", [plot_bkg]),
],
location="top_left",
click_policy="hide",
)
)
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
plot.toolbar.logo = None
@ -261,62 +297,60 @@ def create():
# skip unnecessary update caused by selection drop
return
_update_plot(det_data["scan"][scan_table_source.data["scan"][new[0]]])
_update_plot(det_data[new[0]])
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], peaks=[], fit=[], export=[]))
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], fit=[], export=[]))
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="scan", title="scan"),
TableColumn(field="hkl", title="hkl"),
TableColumn(field="peaks", title="Peaks"),
TableColumn(field="fit", title="Fit"),
TableColumn(field="export", title="Export", editor=CheckboxEditor()),
TableColumn(field="scan", title="Scan", width=50),
TableColumn(field="hkl", title="hkl", width=100),
TableColumn(field="fit", title="Fit", width=50),
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
],
width=250,
index_position=None,
width=310, # +60 because of the index column
height=350,
autosize_mode="none",
editable=True,
)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
def _get_selected_scan():
selected_index = scan_table_source.selected.indices[0]
selected_scan_id = scan_table_source.data["scan"][selected_index]
return det_data["scan"][selected_scan_id]
return det_data[scan_table_source.selected.indices[0]]
def peak_pos_textinput_callback(_attr, _old, new):
if new is not None and not peak_pos_textinput_lock:
scan = _get_selected_scan()
merge_dest_select = Select(title="destination:", width=100)
merge_source_select = Select(title="source:", width=100)
peak_ind = (np.abs(scan["om"] - float(new))).argmin()
scan["peak_indexes"] = np.array([peak_ind], dtype=np.int64)
scan["peak_heights"] = np.array([scan["smooth_peaks"][peak_ind]])
_update_table()
_update_plot(scan)
def merge_button_callback():
scan_dest_ind = int(merge_dest_select.value)
scan_source_ind = int(merge_source_select.value)
peak_pos_textinput = TextInput(title="Peak position:", default_size=145)
peak_pos_textinput.on_change("value", peak_pos_textinput_callback)
if scan_dest_ind == scan_source_ind:
print("WARNING: Selected scans for merging are identical")
return
peak_int_ratio_spinner = Spinner(
title="Peak intensity ratio:", value=0.8, step=0.01, low=0, high=1, default_size=145
)
peak_prominence_spinner = Spinner(title="Peak prominence:", value=50, low=0, default_size=145)
smooth_toggle = Toggle(label="Smooth curve", default_size=145)
window_size_spinner = Spinner(title="Window size:", value=7, step=2, low=1, default_size=145)
poly_order_spinner = Spinner(title="Poly order:", value=3, low=0, default_size=145)
pyzebra.merge_scans(det_data[scan_dest_ind], det_data[scan_source_ind])
_update_plot(_get_selected_scan())
integ_from = Spinner(title="Integrate from:", default_size=145)
integ_to = Spinner(title="to:", default_size=145)
merge_button = Button(label="Merge scans", width=145)
merge_button.on_click(merge_button_callback)
def fitparam_reset_button_callback():
...
def fit_from_spinner_callback(_attr, _old, new):
fit_from_span.location = new
fitparam_reset_button = Button(label="Reset to defaults", default_size=145, disabled=True)
fitparam_reset_button.on_click(fitparam_reset_button_callback)
fit_from_spinner = Spinner(title="Fit from:", default_size=145)
fit_from_spinner.on_change("value", fit_from_spinner_callback)
def fit_to_spinner_callback(_attr, _old, new):
fit_to_span.location = new
fit_to_spinner = Spinner(title="to:", default_size=145)
fit_to_spinner.on_change("value", fit_to_spinner_callback)
def fitparams_add_dropdown_callback(click):
new_tag = str(fitparams_select.tags[0]) # bokeh requires (str, str) for MultiSelect options
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
fitparams_select.options.append((new_tag, click.item))
fit_params[new_tag] = fitparams_factory(click.item)
fitparams_select.tags[0] += 1
@ -324,11 +358,11 @@ def create():
fitparams_add_dropdown = Dropdown(
label="Add fit function",
menu=[
("Background", "background"),
("Gauss", "gauss"),
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pseudovoigt"),
("Pseudo Voigt1", "pseudovoigt1"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
default_size=145,
disabled=True,
@ -349,7 +383,7 @@ def create():
if new:
fitparams_table_source.data.update(fit_params[new[0]])
else:
fitparams_table_source.data.update(dict(param=[], guess=[], vary=[], min=[], max=[]))
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_select = MultiSelect(options=[], height=120, default_size=145)
fitparams_select.tags = [0]
@ -370,32 +404,32 @@ def create():
fitparams_remove_button.on_click(fitparams_remove_button_callback)
def fitparams_factory(function):
if function == "background":
params = ["slope", "offset"]
elif function == "gauss":
params = ["center", "sigma", "amplitude"]
if function == "linear":
params = ["slope", "intercept"]
elif function == "gaussian":
params = ["amplitude", "center", "sigma"]
elif function == "voigt":
params = ["center", "sigma", "amplitude", "gamma"]
elif function == "pseudovoigt":
params = ["center", "sigma", "amplitude", "fraction"]
params = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
params = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
params = ["center", "g_sigma", "l_sigma", "amplitude", "fraction"]
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(params)
fitparams = dict(
param=params, guess=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
)
return fitparams
fitparams_table_source = ColumnDataSource(dict(param=[], guess=[], vary=[], min=[], max=[]))
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter"),
TableColumn(field="guess", title="Guess", editor=NumberEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
@ -408,57 +442,18 @@ def create():
)
# start with `background` and `gauss` fit functions added
fitparams_add_dropdown_callback(types.SimpleNamespace(item="background"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gauss"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
fitparams_select.value = ["gaussian-1"] # add selection to gauss
fit_output_textinput = TextAreaInput(title="Fit results:", width=450, height=400)
def _get_peakfind_params():
return dict(
int_threshold=peak_int_ratio_spinner.value,
prominence=peak_prominence_spinner.value,
smooth=smooth_toggle.active,
window_size=window_size_spinner.value,
poly_order=poly_order_spinner.value,
)
def peakfind_all_button_callback():
peakfind_params = _get_peakfind_params()
for scan in det_data["scan"].values():
pyzebra.ccl_findpeaks(scan, **peakfind_params)
_update_table()
_update_plot(_get_selected_scan())
peakfind_all_button = Button(label="Peak Find All", button_type="primary", default_size=145)
peakfind_all_button.on_click(peakfind_all_button_callback)
def peakfind_button_callback():
scan = _get_selected_scan()
pyzebra.ccl_findpeaks(scan, **_get_peakfind_params())
_update_table()
_update_plot(scan)
peakfind_button = Button(label="Peak Find Current", default_size=145)
peakfind_button.on_click(peakfind_button_callback)
def _get_fit_params():
return dict(
guess=fit_params["1"]["guess"] + fit_params["0"]["guess"],
vary=fit_params["1"]["vary"] + fit_params["0"]["vary"],
constraints_min=fit_params["1"]["min"] + fit_params["0"]["min"],
constraints_max=fit_params["1"]["max"] + fit_params["0"]["max"],
numfit_min=integ_from.value,
numfit_max=integ_to.value,
binning=bin_size_spinner.value,
)
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def fit_all_button_callback():
fit_params = _get_fit_params()
for scan in det_data["scan"].values():
# fit_params are updated inplace within `fitccl`
pyzebra.fitccl(scan, **deepcopy(fit_params))
for scan, export in zip(det_data, scan_table_source.data["export"]):
if export:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
_update_plot(_get_selected_scan())
_update_table()
@ -468,7 +463,9 @@ def create():
def fit_button_callback():
scan = _get_selected_scan()
pyzebra.fitccl(scan, **_get_fit_params())
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
_update_plot(scan)
_update_table()
@ -476,115 +473,95 @@ def create():
fit_button = Button(label="Fit Current", default_size=145)
fit_button.on_click(fit_button_callback)
def area_method_radiobutton_callback(_attr, _old, new):
det_data["meta"]["area_method"] = AREA_METHODS[new]
area_method_radiobutton = RadioButtonGroup(
labels=["Fit area", "Int area"], active=0, default_size=145
labels=["Fit area", "Int area"], active=0, default_size=145, disabled=True
)
area_method_radiobutton.on_change("active", area_method_radiobutton_callback)
bin_size_spinner = Spinner(title="Bin size:", value=1, low=1, step=1, default_size=145)
bin_size_spinner = Spinner(
title="Bin size:", value=1, low=1, step=1, default_size=145, disabled=True
)
lorentz_toggle = Toggle(label="Lorentz Correction", default_size=145)
preview_output_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
def preview_output_button_callback():
if det_data["meta"]["indices"] == "hkl":
ext = ".comm"
elif det_data["meta"]["indices"] == "real":
ext = ".incomm"
export_preview_textinput = TextAreaInput(title="Export preview:", width=500, height=400)
def preview_button_callback():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export:
del export_data["scan"][s]
pyzebra.export_comm(
export_data = []
for s, export in zip(det_data, scan_table_source.data["export"]):
if export:
export_data.append(s)
pyzebra.export_1D(
export_data,
temp_file,
area_method=AREA_METHODS[int(area_method_radiobutton.active)],
lorentz=lorentz_toggle.active,
hkl_precision=int(hkl_precision_select.value),
)
with open(f"{temp_file}{ext}") as f:
preview_output_textinput.value = f.read()
exported_content = ""
file_content = []
for ext in (".comm", ".incomm"):
fname = temp_file + ext
if os.path.isfile(fname):
with open(fname) as f:
content = f.read()
exported_content += f"{ext} file:\n" + content
else:
content = ""
file_content.append(content)
preview_output_button = Button(label="Preview file", default_size=220)
preview_output_button.on_click(preview_output_button_callback)
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
hkl_precision_select = Select(options=["2", "3", "4"], value="2", default_size=220)
preview_button = Button(label="Preview", default_size=200)
preview_button.on_click(preview_button_callback)
def export_results(det_data):
if det_data["meta"]["indices"] == "hkl":
ext = ".comm"
elif det_data["meta"]["indices"] == "real":
ext = ".incomm"
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export:
del export_data["scan"][s]
pyzebra.export_comm(
export_data,
temp_file,
lorentz=lorentz_toggle.active,
hkl_precision=int(hkl_precision_select.value),
)
with open(f"{temp_file}{ext}") as f:
output_content = f.read()
return output_content, ext
def save_button_callback():
cont, ext = export_results(det_data)
js_data.data.update(cont=[cont], ext=[ext])
save_button = Button(label="Download file", button_type="success", default_size=220)
save_button.on_click(save_button_callback)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
findpeak_controls = column(
row(peak_pos_textinput, column(Spacer(height=19), smooth_toggle)),
row(peak_int_ratio_spinner, peak_prominence_spinner),
row(window_size_spinner, poly_order_spinner),
row(peakfind_button, peakfind_all_button),
hkl_precision_select = Select(
title="hkl precision:", options=["2", "3", "4"], value="2", default_size=80
)
save_button = Button(label="Download preview", button_type="success", default_size=200)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
fitpeak_controls = row(
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
fitparams_table,
Spacer(width=20),
column(
row(integ_from, integ_to),
row(fit_from_spinner, fit_to_spinner),
row(bin_size_spinner, column(Spacer(height=19), lorentz_toggle)),
row(fitparam_reset_button, area_method_radiobutton),
row(area_method_radiobutton),
row(fit_button, fit_all_button),
),
)
export_layout = column(
preview_output_textinput,
row(column(preview_output_button, hkl_precision_select), save_button),
scan_layout = column(
scan_table,
monitor_spinner,
row(column(Spacer(height=19), merge_button), merge_dest_select, merge_source_select),
)
import_layout = column(
proposal_textinput,
file_select,
row(file_open_button, file_append_button),
upload_div,
upload_button,
append_upload_div,
append_upload_button,
)
export_layout = column(
export_preview_textinput,
row(hkl_precision_select, column(Spacer(height=19), row(preview_button, save_button))),
)
upload_div = Div(text="Or upload .ccl file:")
append_upload_div = Div(text="append extra .ccl/.dat files:")
tab_layout = column(
row(proposal_textinput, ccl_file_select),
row(
column(Spacer(height=5), upload_div),
upload_button,
column(Spacer(height=5), append_upload_div),
append_upload_button,
),
row(scan_table, plot, Spacer(width=30), fit_output_textinput, export_layout),
row(findpeak_controls, Spacer(width=30), fitpeak_controls),
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
row(fitpeak_controls, fit_output_textinput),
)
return Panel(child=tab_layout, title="ccl integrate")

View File

@ -12,6 +12,7 @@ from bokeh.models import (
Panel,
RadioButtonGroup,
Select,
Spacer,
TextAreaInput,
TextInput,
)
@ -98,12 +99,11 @@ def create():
minPeakCount_textinput.disabled = disable_adaptivedynamic
displacementCurve_textinput.disabled = disable_adaptivedynamic
upload_div = Div(text="Open XML configuration file:")
def upload_button_callback(_attr, _old, new):
with io.BytesIO(base64.b64decode(new)) as file:
_load_config_file(file)
upload_div = Div(text="Open XML configuration file:")
upload_button = FileInput(accept=".xml")
upload_button.on_change("value", upload_button_callback)
@ -112,7 +112,7 @@ def create():
def logfile_textinput_callback(_attr, _old, new):
config.logfile = new
logfile_textinput = TextInput(title="Logfile:", value="logfile.log", width=520)
logfile_textinput = TextInput(title="Logfile:", value="logfile.log", width=320)
logfile_textinput.on_change("value", logfile_textinput_callback)
def logfile_verbosity_select_callback(_attr, _old, new):
@ -133,7 +133,7 @@ def create():
def filelist_format_textinput_callback(_attr, _old, new):
config.filelist_format = new
filelist_format_textinput = TextInput(title="format:", width=490)
filelist_format_textinput = TextInput(title="format:", width=290)
filelist_format_textinput.on_change("value", filelist_format_textinput_callback)
def filelist_datapath_textinput_callback(_attr, _old, new):
@ -161,7 +161,7 @@ def create():
def lambda_textinput_callback(_attr, _old, new):
config.crystal_lambda = new
lambda_textinput = TextInput(title="lambda:", width=140)
lambda_textinput = TextInput(title="lambda:", width=145)
lambda_textinput.on_change("value", lambda_textinput_callback)
def ub_textareainput_callback(_attr, _old, new):
@ -173,19 +173,19 @@ def create():
def zeroOM_textinput_callback(_attr, _old, new):
config.crystal_zeroOM = new
zeroOM_textinput = TextInput(title="zeroOM:", width=140)
zeroOM_textinput = TextInput(title="zeroOM:", width=145)
zeroOM_textinput.on_change("value", zeroOM_textinput_callback)
def zeroSTT_textinput_callback(_attr, _old, new):
config.crystal_zeroSTT = new
zeroSTT_textinput = TextInput(title="zeroSTT:", width=140)
zeroSTT_textinput = TextInput(title="zeroSTT:", width=145)
zeroSTT_textinput.on_change("value", zeroSTT_textinput_callback)
def zeroCHI_textinput_callback(_attr, _old, new):
config.crystal_zeroCHI = new
zeroCHI_textinput = TextInput(title="zeroCHI:", width=140)
zeroCHI_textinput = TextInput(title="zeroCHI:", width=145)
zeroCHI_textinput.on_change("value", zeroCHI_textinput_callback)
# ---- DataFactory
@ -193,14 +193,14 @@ def create():
config.dataFactory_implementation = new
dataFactory_implementation_select = Select(
title="DataFactory implementation:", options=DATA_FACTORY_IMPLEMENTATION, width=300,
title="DataFactory implement.:", options=DATA_FACTORY_IMPLEMENTATION, width=145,
)
dataFactory_implementation_select.on_change("value", dataFactory_implementation_select_callback)
def dataFactory_dist1_textinput_callback(_attr, _old, new):
config.dataFactory_dist1 = new
dataFactory_dist1_textinput = TextInput(title="dist1:", width=290)
dataFactory_dist1_textinput = TextInput(title="dist1:", width=145)
dataFactory_dist1_textinput.on_change("value", dataFactory_dist1_textinput_callback)
# ---- BackgroundProcessor
@ -212,7 +212,7 @@ def create():
config.reflectionPrinter_format = new
reflectionPrinter_format_select = Select(
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=300,
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=145,
)
reflectionPrinter_format_select.on_change("value", reflectionPrinter_format_select_callback)
@ -346,7 +346,10 @@ def create():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp.xml"
config.save_as(temp_file)
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path)
if doc.anatric_path:
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path)
else:
pyzebra.anatric(temp_file)
with open(config.logfile) as f_log:
output_log.value = f_log.read()
@ -354,50 +357,54 @@ def create():
process_button = Button(label="Process", button_type="primary")
process_button.on_click(process_button_callback)
output_log = TextAreaInput(title="Logfile output:", height=700, disabled=True)
output_config = TextAreaInput(title="Current config:", height=700, width=400, disabled=True)
output_log = TextAreaInput(title="Logfile output:", height=600, disabled=True)
output_config = TextAreaInput(title="Current config:", height=600, width=400, disabled=True)
tab_layout = row(
column(
upload_div,
upload_button,
row(logfile_textinput, logfile_verbosity_select),
row(filelist_type, filelist_format_textinput),
filelist_datapath_textinput,
filelist_ranges_textareainput,
crystal_sample_textinput,
row(lambda_textinput, zeroOM_textinput, zeroSTT_textinput, zeroCHI_textinput),
ub_textareainput,
row(dataFactory_implementation_select, dataFactory_dist1_textinput),
reflectionPrinter_format_select,
process_button,
),
column(
mode_radio_button_group,
row(
column(
threshold_textinput,
shell_textinput,
steepness_textinput,
duplicateDistance_textinput,
maxequal_textinput,
aps_window_textinput,
),
column(
adm_window_textinput,
border_textinput,
minWindow_textinput,
reflectionFile_textinput,
targetMonitor_textinput,
smoothSize_textinput,
loop_textinput,
minPeakCount_textinput,
displacementCurve_textinput,
),
general_params_layout = column(
row(logfile_textinput, logfile_verbosity_select),
row(filelist_type, filelist_format_textinput),
filelist_datapath_textinput,
filelist_ranges_textareainput,
crystal_sample_textinput,
row(lambda_textinput, zeroOM_textinput),
row(zeroSTT_textinput, zeroCHI_textinput),
ub_textareainput,
row(dataFactory_implementation_select, dataFactory_dist1_textinput),
reflectionPrinter_format_select,
)
algorithm_params_layout = column(
mode_radio_button_group,
row(
column(
threshold_textinput,
shell_textinput,
steepness_textinput,
duplicateDistance_textinput,
maxequal_textinput,
aps_window_textinput,
),
column(
adm_window_textinput,
border_textinput,
minWindow_textinput,
reflectionFile_textinput,
targetMonitor_textinput,
smoothSize_textinput,
loop_textinput,
minPeakCount_textinput,
displacementCurve_textinput,
),
),
output_config,
output_log,
)
tab_layout = column(
row(column(Spacer(height=2), upload_div), upload_button),
row(
general_params_layout,
algorithm_params_layout,
column(row(output_config, output_log), row(process_button)),
),
)
async def update_config():

View File

@ -30,6 +30,7 @@ from bokeh.models import (
Spacer,
Spinner,
TextAreaInput,
TextInput,
Title,
Toggle,
WheelZoomTool,
@ -40,12 +41,28 @@ import pyzebra
IMAGE_W = 256
IMAGE_H = 128
IMAGE_PLOT_W = int(IMAGE_W * 2.5)
IMAGE_PLOT_H = int(IMAGE_H * 2.5)
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
def create():
det_data = {}
roi_selection = {}
def proposal_textinput_callback(_attr, _old, new):
full_proposal_path = os.path.join(PROPOSAL_PATH, new.strip())
file_list = []
for file in os.listdir(full_proposal_path):
if file.endswith(".hdf"):
file_list.append((os.path.join(full_proposal_path, file), file))
filelist.options = file_list
filelist.value = file_list[0][0]
proposal_textinput = TextInput(title="Enter proposal number:", default_size=145)
proposal_textinput.on_change("value", proposal_textinput_callback)
def upload_button_callback(_attr, _old, new):
with io.StringIO(base64.b64decode(new).decode()) as file:
h5meta_list = pyzebra.parse_h5meta(file)
@ -53,6 +70,7 @@ def create():
filelist.options = [(entry, os.path.basename(entry)) for entry in file_list]
filelist.value = file_list[0]
upload_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".cami")
upload_button.on_change("value", upload_button_callback)
@ -83,18 +101,18 @@ def create():
image_glyph.color_mapper.low = im_min
image_glyph.color_mapper.high = im_max
if "magnetic_field" in det_data:
magnetic_field_spinner.value = det_data["magnetic_field"][index]
if "mf" in det_data:
mf_spinner.value = det_data["mf"][index]
else:
magnetic_field_spinner.value = None
mf_spinner.value = None
if "temperature" in det_data:
temperature_spinner.value = det_data["temperature"][index]
if "temp" in det_data:
temp_spinner.value = det_data["temp"][index]
else:
temperature_spinner.value = None
temp_spinner.value = None
gamma, nu = calculate_pol(det_data, index)
omega = np.ones((IMAGE_H, IMAGE_W)) * det_data["rot_angle"][index]
omega = np.ones((IMAGE_H, IMAGE_W)) * det_data["omega"][index]
image_source.data.update(gamma=[gamma], nu=[nu], omega=[omega])
def update_overview_plot():
@ -125,15 +143,16 @@ def create():
overview_plot_x_image_source.data.update(y=[0], dh=[n_im])
overview_plot_y_image_source.data.update(y=[0], dh=[n_im])
elif frame_button_group.active == 1: # Omega
overview_plot_x.axis[1].axis_label = "Omega"
overview_plot_y.axis[1].axis_label = "Omega"
elif frame_button_group.active == 1: # Variable angle
scan_motor = det_data["scan_motor"]
overview_plot_x.axis[1].axis_label = scan_motor
overview_plot_y.axis[1].axis_label = scan_motor
om = det_data["rot_angle"]
om_start = om[0]
om_end = (om[-1] - om[0]) * n_im / (n_im - 1)
overview_plot_x_image_source.data.update(y=[om_start], dh=[om_end])
overview_plot_y_image_source.data.update(y=[om_start], dh=[om_end])
var = det_data[scan_motor]
var_start = var[0]
var_end = (var[-1] - var[0]) * n_im / (n_im - 1)
overview_plot_x_image_source.data.update(y=[var_start], dh=[var_end])
overview_plot_y_image_source.data.update(y=[var_start], dh=[var_end])
def filelist_callback(_attr, _old, new):
nonlocal det_data
@ -141,10 +160,17 @@ def create():
index_spinner.value = 0
index_spinner.high = det_data["data"].shape[0] - 1
zebra_mode = det_data["zebra_mode"]
if zebra_mode == "nb":
geometry_textinput.value = "normal beam"
else: # zebra_mode == "bi"
geometry_textinput.value = "bisecting"
update_image(0)
update_overview_plot()
filelist = Select()
filelist = Select(title="Available .hdf files:")
filelist.on_change("value", filelist_callback)
def index_spinner_callback(_attr, _old, new):
@ -156,8 +182,8 @@ def create():
plot = Plot(
x_range=Range1d(0, IMAGE_W, bounds=(0, IMAGE_W)),
y_range=Range1d(0, IMAGE_H, bounds=(0, IMAGE_H)),
plot_height=IMAGE_H * 3,
plot_width=IMAGE_W * 3,
plot_height=IMAGE_PLOT_H,
plot_width=IMAGE_PLOT_W,
toolbar_location="left",
)
@ -210,8 +236,8 @@ def create():
proj_v = Plot(
x_range=plot.x_range,
y_range=DataRange1d(),
plot_height=200,
plot_width=IMAGE_W * 3,
plot_height=150,
plot_width=IMAGE_PLOT_W,
toolbar_location=None,
)
@ -227,8 +253,8 @@ def create():
proj_h = Plot(
x_range=DataRange1d(),
y_range=plot.y_range,
plot_height=IMAGE_H * 3,
plot_width=200,
plot_height=IMAGE_PLOT_H,
plot_width=150,
toolbar_location=None,
)
@ -291,8 +317,8 @@ def create():
title=Title(text="Projections on X-axis"),
x_range=det_x_range,
y_range=frame_range,
plot_height=500,
plot_width=IMAGE_W * 3,
plot_height=400,
plot_width=IMAGE_PLOT_W,
)
# ---- tools
@ -328,8 +354,8 @@ def create():
title=Title(text="Projections on Y-axis"),
x_range=det_y_range,
y_range=frame_range,
plot_height=500,
plot_width=IMAGE_H * 3,
plot_height=400,
plot_width=IMAGE_PLOT_H,
)
# ---- tools
@ -363,14 +389,14 @@ def create():
def frame_button_group_callback(_active):
update_overview_plot()
frame_button_group = RadioButtonGroup(labels=["Frames", "Omega"], active=0)
frame_button_group = RadioButtonGroup(labels=["Frames", "Variable Angle"], active=0)
frame_button_group.on_click(frame_button_group_callback)
roi_avg_plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(),
plot_height=200,
plot_width=IMAGE_W * 3,
plot_width=IMAGE_PLOT_W,
toolbar_location="left",
)
@ -404,8 +430,6 @@ def create():
colormap.on_change("value", colormap_callback)
colormap.value = "plasma"
radio_button_group = RadioButtonGroup(labels=["normal beam", "bisecting"], active=0)
STEP = 1
# ---- colormap auto toggle button
def auto_toggle_callback(state):
@ -506,8 +530,7 @@ def create():
def hkl_button_callback():
index = index_spinner.value
geometry = "bi" if radio_button_group.active else "nb"
h, k, l = calculate_hkl(det_data, index, geometry)
h, k, l = calculate_hkl(det_data, index)
image_source.data.update(h=[h], k=[k], l=[l])
hkl_button = Button(label="Calculate hkl (slow)")
@ -537,10 +560,11 @@ def create():
selection_button = Button(label="Add selection")
selection_button.on_click(selection_button_callback)
magnetic_field_spinner = Spinner(
mf_spinner = Spinner(
title="Magnetic field:", format="0.00", width=145, disabled=True
)
temperature_spinner = Spinner(title="Temperature:", format="0.00", width=145, disabled=True)
temp_spinner = Spinner(title="Temperature:", format="0.00", width=145, disabled=True)
geometry_textinput = TextInput(title="Geometry:", disabled=True)
# Final layout
layout_image = column(gridplot([[proj_v, None], [plot, proj_h]], merge_tools=False))
@ -553,9 +577,8 @@ def create():
proj_display_min_spinner,
),
)
geometry_div = Div(text="Geometry:", margin=[5, 5, -5, 5])
hkl_layout = column(column(geometry_div, radio_button_group), hkl_button)
params_layout = row(magnetic_field_spinner, temperature_spinner)
hkl_layout = column(geometry_textinput, hkl_button)
params_layout = row(mf_spinner, temp_spinner)
layout_controls = row(
column(selection_button, selection_list),
@ -574,10 +597,11 @@ def create():
),
)
upload_div = Div(text="Upload .cami file:")
tab_layout = row(
column(
row(column(Spacer(height=5), upload_div), upload_button, filelist),
row(
proposal_textinput, filelist, Spacer(width=100), column(upload_div, upload_button),
),
layout_overview,
layout_controls,
),
@ -587,31 +611,32 @@ def create():
return Panel(child=tab_layout, title="hdf viewer")
def calculate_hkl(det_data, index, geometry):
def calculate_hkl(det_data, index):
h = np.empty(shape=(IMAGE_H, IMAGE_W))
k = np.empty(shape=(IMAGE_H, IMAGE_W))
l = np.empty(shape=(IMAGE_H, IMAGE_W))
wave = det_data["wave"]
ddist = det_data["ddist"]
gammad = det_data["pol_angle"][index]
om = det_data["rot_angle"][index]
nud = det_data["tlt_angle"]
ub = det_data["UB"]
gammad = det_data["gamma"][index]
om = det_data["omega"][index]
nud = det_data["nu"]
ub = det_data["ub"]
geometry = det_data["zebra_mode"]
if geometry == "bi":
ch = det_data["chi_angle"][index]
ph = det_data["phi_angle"][index]
chi = det_data["chi"][index]
phi = det_data["phi"][index]
elif geometry == "nb":
ch = 0
ph = 0
chi = 0
phi = 0
else:
raise ValueError(f"Unknown geometry type '{geometry}'")
for xi in np.arange(IMAGE_W):
for yi in np.arange(IMAGE_H):
h[yi, xi], k[yi, xi], l[yi, xi] = pyzebra.ang2hkl(
wave, ddist, gammad, om, ch, ph, nud, ub, xi, yi
wave, ddist, gammad, om, chi, phi, nud, ub, xi, yi
)
return h, k, l
@ -622,8 +647,8 @@ def calculate_pol(det_data, index):
nu = np.empty(shape=(IMAGE_H, IMAGE_W))
ddist = det_data["ddist"]
gammad = det_data["pol_angle"][index]
nud = det_data["tlt_angle"]
gammad = det_data["gamma"][index]
nud = det_data["nu"]
for xi in np.arange(IMAGE_W):
for yi in np.arange(IMAGE_H):

View File

@ -4,12 +4,10 @@ import itertools
import os
import tempfile
import types
from copy import deepcopy
import numpy as np
from bokeh.layouts import column, row
from bokeh.models import (
Asterisk,
BasicTicker,
Button,
CheckboxEditor,
@ -21,6 +19,8 @@ from bokeh.models import (
Dropdown,
FileInput,
Grid,
HoverTool,
Legend,
Line,
LinearAxis,
MultiLine,
@ -44,28 +44,29 @@ from bokeh.models import (
WheelZoomTool,
Whisker,
)
from bokeh.palettes import Category10
from bokeh.palettes import Category10, Turbo256
from bokeh.transform import linear_cmap
import pyzebra
from pyzebra.ccl_io import AREA_METHODS
javaScript = """
setTimeout(function() {
const filename = 'output' + js_data.data['ext']
const blob = new Blob([js_data.data['cont']], {type: 'text/plain'})
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = filename;
link.download = js_data.data['fname'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 3000);
}
"""
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
PLOT_TYPES = ("single scan", "overview")
def color_palette(n_colors):
@ -74,10 +75,9 @@ def color_palette(n_colors):
def create():
det_data = {}
det_data = []
fit_params = {}
peak_pos_textinput_lock = False
js_data = ColumnDataSource(data=dict(cont=[], ext=[]))
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""]))
def proposal_textinput_callback(_attr, _old, new):
full_proposal_path = os.path.join(PROPOSAL_PATH, new.strip())
@ -86,57 +86,62 @@ def create():
if file.endswith(".dat"):
dat_file_list.append((os.path.join(full_proposal_path, file), file))
file_select.options = dat_file_list
file_select.value = dat_file_list[0][0]
proposal_textinput = TextInput(title="Enter proposal number:", default_size=145)
proposal_textinput = TextInput(title="Proposal number:", default_size=200)
proposal_textinput.on_change("value", proposal_textinput_callback)
def _init_datatable():
scan_list = list(det_data["scan"].keys())
scan_list = [s["idx"] for s in det_data]
file_list = []
extra_meta = det_data.get("extra_meta", {})
for scan_id in scan_list:
if scan_id in extra_meta:
f_path = extra_meta[scan_id]["original_filename"]
else:
f_path = det_data["meta"]["original_filename"]
_, f_name = os.path.split(f_path)
file_list.append(f_name)
for scan in det_data:
file_list.append(os.path.basename(scan["original_filename"]))
scan_table_source.data.update(
file=file_list,
scan=scan_list,
param=[""] * len(scan_list),
peaks=[0] * len(scan_list),
param=[None] * len(scan_list),
fit=[0] * len(scan_list),
export=[True] * len(scan_list),
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
param_select.value = "user defined"
def file_select_callback(_attr, _old, _new):
pass
file_select = Select(title="Available .dat files")
file_select = MultiSelect(title="Available .dat files:", default_size=200, height=250)
file_select.on_change("value", file_select_callback)
def file_open_button_callback():
nonlocal det_data
with open(file_select.value) as file:
_, ext = os.path.splitext(file_select.value)
det_data = pyzebra.parse_1D(file, ext)
det_data = []
for f_name in file_select.value:
with open(f_name) as file:
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
_init_datatable()
file_open_button = Button(label="Open", default_size=100)
file_open_button = Button(label="Open New", default_size=100)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
with open(file_select.value) as file:
_, ext = os.path.splitext(file_select.value)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.add_dict(det_data, append_data)
for f_name in file_select.value:
with open(f_name) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
_init_datatable()
@ -145,19 +150,23 @@ def create():
def upload_button_callback(_attr, _old, new):
nonlocal det_data
det_data = {}
det_data = []
for f_str, f_name in zip(new, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.add_dict(det_data, append_data)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
_init_datatable()
upload_button = FileInput(accept=".dat", multiple=True)
upload_div = Div(text="or upload new .dat files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".dat", multiple=True, default_size=200)
upload_button.on_change("value", upload_button_callback)
def append_upload_button_callback(_attr, _old, new):
@ -165,159 +174,152 @@ def create():
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.add_dict(det_data, append_data)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
_init_datatable()
append_upload_button = FileInput(accept=".dat", multiple=True)
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
append_upload_button = FileInput(accept=".dat", multiple=True, default_size=200)
append_upload_button.on_change("value", append_upload_button_callback)
def monitor_spinner_callback(_attr, _old, new):
if det_data:
pyzebra.normalize_dataset(det_data, new)
_update_plot()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def _update_table():
num_of_peaks = [len(scan.get("peak_indexes", [])) for scan in det_data["scan"].values()]
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"].values()]
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
scan_table_source.data.update(fit=fit_ok)
def _update_plot():
_update_single_scan_plot(_get_selected_scan())
_update_overview()
def _update_single_scan_plot(scan):
nonlocal peak_pos_textinput_lock
peak_pos_textinput_lock = True
scan_motor = scan["scan_motor"]
y = scan["Counts"]
x = scan["om"]
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
num_of_peaks = len(scan.get("peak_indexes", []))
if num_of_peaks is not None and num_of_peaks > 0:
peak_indexes = scan["peak_indexes"]
if len(peak_indexes) == 1:
peak_pos_textinput.value = str(x[peak_indexes[0]])
else:
peak_pos_textinput.value = str([x[ind] for ind in peak_indexes])
plot_peak_source.data.update(x=x[peak_indexes], y=scan["peak_heights"])
plot_line_smooth_source.data.update(x=x, y=scan["smooth_peaks"])
else:
peak_pos_textinput.value = None
plot_peak_source.data.update(x=[], y=[])
plot_line_smooth_source.data.update(x=[], y=[])
peak_pos_textinput_lock = False
fit = scan.get("fit")
if fit is not None:
x = scan["fit"]["x_fit"]
plot_gauss_source.data.update(x=x, y=scan["fit"]["comps"]["gaussian"])
plot_bkg_source.data.update(x=x, y=scan["fit"]["comps"]["background"])
params = fit["result"].params
fit_output_textinput.value = (
"Gaussian: centre = %9.4f, sigma = %9.4f, area = %9.4f \n"
"background: slope = %9.4f, intercept = %9.4f \n"
"Int. area = %9.4f +/- %9.4f \n"
"fit area = %9.4f +/- %9.4f \n"
"ratio((fit-int)/fit) = %9.4f"
% (
params["g_cen"].value,
params["g_width"].value,
params["g_amp"].value,
params["slope"].value,
params["intercept"].value,
fit["int_area"].n,
fit["int_area"].s,
params["g_amp"].value,
params["g_amp"].stderr,
(params["g_amp"].value - fit["int_area"].n) / params["g_amp"].value,
)
)
numfit_min, numfit_max = fit["numfit"]
if numfit_min is None:
numfit_min_span.location = None
else:
numfit_min_span.location = x[numfit_min]
x_fit = np.linspace(x[0], x[-1], 100)
plot_fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
if numfit_max is None:
numfit_max_span.location = None
else:
numfit_max_span.location = x[numfit_max]
x_bkg = []
y_bkg = []
xs_peak = []
ys_peak = []
comps = fit.eval_components(x=x_fit)
for i, model in enumerate(fit_params):
if "linear" in model:
x_bkg = x_fit
y_bkg = comps[f"f{i}_"]
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
xs_peak.append(x_fit)
ys_peak.append(comps[f"f{i}_"])
plot_bkg_source.data.update(x=x_bkg, y=y_bkg)
plot_peak_source.data.update(xs=xs_peak, ys=ys_peak)
fit_output_textinput.value = fit.fit_report()
else:
plot_gauss_source.data.update(x=[], y=[])
plot_fit_source.data.update(x=[], y=[])
plot_bkg_source.data.update(x=[], y=[])
plot_peak_source.data.update(xs=[], ys=[])
fit_output_textinput.value = ""
numfit_min_span.location = None
numfit_max_span.location = None
def _update_overview():
xs = []
ys = []
param = []
for ind, p in enumerate(scan_table_source.data["param"]):
if p:
s = scan_table_source.data["scan"][ind]
xs.append(np.array(det_data["scan"][s]["om"]))
ys.append(np.array(det_data["scan"][s]["Counts"]))
x = []
y = []
par = []
for s, p in enumerate(scan_table_source.data["param"]):
if p is not None:
scan = det_data[s]
scan_motor = scan["scan_motor"]
xs.append(scan[scan_motor])
x.extend(scan[scan_motor])
ys.append(scan["Counts"])
y.extend([float(p)] * len(scan[scan_motor]))
param.append(float(p))
par.extend(scan["Counts"])
if det_data:
scan_motor = det_data[0]["scan_motor"]
ov_plot.axis[0].axis_label = scan_motor
ov_param_plot.axis[0].axis_label = scan_motor
ov_plot_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
if y:
mapper["transform"].low = np.min([np.min(y) for y in ys])
mapper["transform"].high = np.max([np.max(y) for y in ys])
ov_param_plot_scatter_source.data.update(x=x, y=y, param=par)
# Main plot
plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(only_visible=True),
plot_height=450,
plot_width=700,
)
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
plot.add_layout(LinearAxis(axis_label="Omega"), place="below")
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue", name="single scan")
)
plot.add_layout(
Whisker(
source=plot_scatter_source,
base="x",
upper="y_upper",
lower="y_lower",
name="single scan",
)
plot_scatter = plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
)
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
plot_line_smooth_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(
plot_line_smooth_source,
Line(x="x", y="y", line_color="steelblue", line_dash="dashed", name="single scan"),
)
plot_gauss_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(
plot_gauss_source,
Line(x="x", y="y", line_color="red", line_dash="dashed", name="single scan"),
)
plot_fit_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit = plot.add_glyph(plot_fit_source, Line(x="x", y="y"))
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(
plot_bkg_source,
Line(x="x", y="y", line_color="green", line_dash="dashed", name="single scan"),
plot_bkg = plot.add_glyph(
plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed")
)
plot_peak_source = ColumnDataSource(dict(x=[], y=[]))
plot.add_glyph(
plot_peak_source, Asterisk(x="x", y="y", size=10, line_color="red", name="single scan")
plot_peak_source = ColumnDataSource(dict(xs=[0], ys=[0]))
plot_peak = plot.add_glyph(
plot_peak_source, MultiLine(xs="xs", ys="ys", line_color="red", line_dash="dashed")
)
numfit_min_span = Span(
location=None, dimension="height", line_dash="dashed", name="single scan"
)
plot.add_layout(numfit_min_span)
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_from_span)
numfit_max_span = Span(
location=None, dimension="height", line_dash="dashed", name="single scan"
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_to_span)
plot.add_layout(
Legend(
items=[
("data", [plot_scatter]),
("best fit", [plot_fit]),
("peak", [plot_peak]),
("linear", [plot_bkg]),
],
location="top_left",
click_policy="hide",
)
)
plot.add_layout(numfit_max_span)
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
plot.toolbar.logo = None
@ -326,22 +328,48 @@ def create():
ov_plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
ov_plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
ov_plot.add_layout(LinearAxis(axis_label="Omega"), place="below")
ov_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
ov_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
ov_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
ov_plot_mline_source = ColumnDataSource(dict(xs=[], ys=[], param=[], color=[]))
ov_plot.add_glyph(
ov_plot_mline_source, MultiLine(xs="xs", ys="ys", line_color="color", name="overview")
)
ov_plot.add_glyph(ov_plot_mline_source, MultiLine(xs="xs", ys="ys", line_color="color"))
hover_tool = HoverTool(tooltips=[("param", "@param")])
ov_plot.add_tools(PanTool(), WheelZoomTool(), hover_tool, ResetTool())
ov_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
ov_plot.toolbar.logo = None
# Overview perams plot
ov_param_plot = Plot(
x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700
)
ov_param_plot.add_layout(LinearAxis(axis_label="Param"), place="left")
ov_param_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
ov_param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
ov_param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
ov_param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[], param=[]))
mapper = linear_cmap(field_name="param", palette=Turbo256, low=0, high=50)
ov_param_plot.add_glyph(
ov_param_plot_scatter_source,
Scatter(x="x", y="y", line_color=mapper, fill_color=mapper, size=10),
)
ov_param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
ov_param_plot.toolbar.logo = None
# Plot tabs
plots = Tabs(
tabs=[Panel(child=plot, title="single scan"), Panel(child=ov_plot, title="overview")]
tabs=[
Panel(child=plot, title="single scan"),
Panel(child=ov_plot, title="overview"),
Panel(child=ov_param_plot, title="overview map"),
]
)
# Scan select
@ -362,23 +390,19 @@ def create():
_update_plot()
scan_table_source = ColumnDataSource(
dict(file=[], scan=[], param=[], peaks=[], fit=[], export=[])
)
scan_table_source = ColumnDataSource(dict(file=[], scan=[], param=[], fit=[], export=[]))
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="file", title="file", width=150),
TableColumn(field="scan", title="scan", width=50),
TableColumn(field="param", title="param", width=50),
TableColumn(field="peaks", title="Peaks", width=50),
TableColumn(field="param", title="param", editor=NumberEditor(), width=50),
TableColumn(field="fit", title="Fit", width=50),
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
],
width=400,
index_position=None,
width=410, # +60 because of the index column
editable=True,
fit_columns=False,
autosize_mode="none",
)
def scan_table_source_callback(_attr, _old, _new):
@ -389,42 +413,39 @@ def create():
scan_table_source.on_change("data", scan_table_source_callback)
def _get_selected_scan():
selected_index = scan_table_source.selected.indices[0]
selected_scan_id = scan_table_source.data["scan"][selected_index]
return det_data["scan"][selected_scan_id]
return det_data[scan_table_source.selected.indices[0]]
def peak_pos_textinput_callback(_attr, _old, new):
if new is not None and not peak_pos_textinput_lock:
scan = _get_selected_scan()
def param_select_callback(_attr, _old, new):
if new == "user defined":
param = [None] * len(det_data)
else:
param = [scan[new] for scan in det_data]
peak_ind = (np.abs(scan["om"] - float(new))).argmin()
scan["peak_indexes"] = np.array([peak_ind], dtype=np.int64)
scan["peak_heights"] = np.array([scan["smooth_peaks"][peak_ind]])
_update_table()
_update_plot()
scan_table_source.data["param"] = param
peak_pos_textinput = TextInput(title="Peak position:", default_size=145)
peak_pos_textinput.on_change("value", peak_pos_textinput_callback)
peak_int_ratio_spinner = Spinner(
title="Peak intensity ratio:", value=0.8, step=0.01, low=0, high=1, default_size=145
param_select = Select(
title="Parameter:",
options=["user defined", "temp", "mf", "h", "k", "l"],
value="user defined",
default_size=145,
)
peak_prominence_spinner = Spinner(title="Peak prominence:", value=50, low=0, default_size=145)
smooth_toggle = Toggle(label="Smooth curve", default_size=145)
window_size_spinner = Spinner(title="Window size:", value=7, step=2, low=1, default_size=145)
poly_order_spinner = Spinner(title="Poly order:", value=3, low=0, default_size=145)
param_select.on_change("value", param_select_callback)
integ_from = Spinner(title="Integrate from:", default_size=145)
integ_to = Spinner(title="to:", default_size=145)
def fit_from_spinner_callback(_attr, _old, new):
fit_from_span.location = new
def fitparam_reset_button_callback():
...
fit_from_spinner = Spinner(title="Fit from:", default_size=145)
fit_from_spinner.on_change("value", fit_from_spinner_callback)
fitparam_reset_button = Button(label="Reset to defaults", default_size=145, disabled=True)
fitparam_reset_button.on_click(fitparam_reset_button_callback)
def fit_to_spinner_callback(_attr, _old, new):
fit_to_span.location = new
fit_to_spinner = Spinner(title="to:", default_size=145)
fit_to_spinner.on_change("value", fit_to_spinner_callback)
def fitparams_add_dropdown_callback(click):
new_tag = str(fitparams_select.tags[0]) # bokeh requires (str, str) for MultiSelect options
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
fitparams_select.options.append((new_tag, click.item))
fit_params[new_tag] = fitparams_factory(click.item)
fitparams_select.tags[0] += 1
@ -432,11 +453,11 @@ def create():
fitparams_add_dropdown = Dropdown(
label="Add fit function",
menu=[
("Background", "background"),
("Gauss", "gauss"),
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pseudovoigt"),
("Pseudo Voigt1", "pseudovoigt1"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
default_size=145,
)
@ -456,7 +477,7 @@ def create():
if new:
fitparams_table_source.data.update(fit_params[new[0]])
else:
fitparams_table_source.data.update(dict(param=[], guess=[], vary=[], min=[], max=[]))
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_select = MultiSelect(options=[], height=120, default_size=145)
fitparams_select.tags = [0]
@ -477,32 +498,32 @@ def create():
fitparams_remove_button.on_click(fitparams_remove_button_callback)
def fitparams_factory(function):
if function == "background":
params = ["slope", "offset"]
elif function == "gauss":
params = ["center", "sigma", "amplitude"]
if function == "linear":
params = ["slope", "intercept"]
elif function == "gaussian":
params = ["amplitude", "center", "sigma"]
elif function == "voigt":
params = ["center", "sigma", "amplitude", "gamma"]
elif function == "pseudovoigt":
params = ["center", "sigma", "amplitude", "fraction"]
params = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
params = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
params = ["center", "g_sigma", "l_sigma", "amplitude", "fraction"]
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(params)
fitparams = dict(
param=params, guess=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
)
return fitparams
fitparams_table_source = ColumnDataSource(dict(param=[], guess=[], vary=[], min=[], max=[]))
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter"),
TableColumn(field="guess", title="Guess", editor=NumberEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
@ -515,57 +536,18 @@ def create():
)
# start with `background` and `gauss` fit functions added
fitparams_add_dropdown_callback(types.SimpleNamespace(item="background"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gauss"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
fitparams_select.value = ["gaussian-1"] # add selection to gauss
fit_output_textinput = TextAreaInput(title="Fit results:", width=450, height=400)
def _get_peakfind_params():
return dict(
int_threshold=peak_int_ratio_spinner.value,
prominence=peak_prominence_spinner.value,
smooth=smooth_toggle.active,
window_size=window_size_spinner.value,
poly_order=poly_order_spinner.value,
)
def peakfind_all_button_callback():
peakfind_params = _get_peakfind_params()
for scan in det_data["scan"].values():
pyzebra.ccl_findpeaks(scan, **peakfind_params)
_update_table()
_update_plot()
peakfind_all_button = Button(label="Peak Find All", button_type="primary", default_size=145)
peakfind_all_button.on_click(peakfind_all_button_callback)
def peakfind_button_callback():
scan = _get_selected_scan()
pyzebra.ccl_findpeaks(scan, **_get_peakfind_params())
_update_table()
_update_plot()
peakfind_button = Button(label="Peak Find Current", default_size=145)
peakfind_button.on_click(peakfind_button_callback)
def _get_fit_params():
return dict(
guess=fit_params["1"]["guess"] + fit_params["0"]["guess"],
vary=fit_params["1"]["vary"] + fit_params["0"]["vary"],
constraints_min=fit_params["1"]["min"] + fit_params["0"]["min"],
constraints_max=fit_params["1"]["max"] + fit_params["0"]["max"],
numfit_min=integ_from.value,
numfit_max=integ_to.value,
binning=bin_size_spinner.value,
)
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def fit_all_button_callback():
fit_params = _get_fit_params()
for scan in det_data["scan"].values():
# fit_params are updated inplace within `fitccl`
pyzebra.fitccl(scan, **deepcopy(fit_params))
for scan, export in zip(det_data, scan_table_source.data["export"]):
if export:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
_update_plot()
_update_table()
@ -575,7 +557,9 @@ def create():
def fit_button_callback():
scan = _get_selected_scan()
pyzebra.fitccl(scan, **_get_fit_params())
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
_update_plot()
_update_table()
@ -583,104 +567,83 @@ def create():
fit_button = Button(label="Fit Current", default_size=145)
fit_button.on_click(fit_button_callback)
def area_method_radiobutton_callback(_attr, _old, new):
det_data["meta"]["area_method"] = AREA_METHODS[new]
area_method_radiobutton = RadioButtonGroup(
labels=["Fit area", "Int area"], active=0, default_size=145,
labels=["Fit area", "Int area"], active=0, default_size=145, disabled=True
)
area_method_radiobutton.on_change("active", area_method_radiobutton_callback)
bin_size_spinner = Spinner(title="Bin size:", value=1, low=1, step=1, default_size=145)
bin_size_spinner = Spinner(
title="Bin size:", value=1, low=1, step=1, default_size=145, disabled=True
)
lorentz_toggle = Toggle(label="Lorentz Correction", default_size=145)
preview_output_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
def preview_output_button_callback():
if det_data["meta"]["indices"] == "hkl":
ext = ".comm"
elif det_data["meta"]["indices"] == "real":
ext = ".incomm"
export_preview_textinput = TextAreaInput(title="Export preview:", width=450, height=400)
def preview_button_callback():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export:
del export_data["scan"][s]
pyzebra.export_comm(export_data, temp_file, lorentz=lorentz_toggle.active)
export_data = []
for s, export in zip(det_data, scan_table_source.data["export"]):
if export:
export_data.append(s)
with open(f"{temp_file}{ext}") as f:
preview_output_textinput.value = f.read()
pyzebra.export_1D(
export_data,
temp_file,
area_method=AREA_METHODS[int(area_method_radiobutton.active)],
lorentz=lorentz_toggle.active,
)
preview_output_button = Button(label="Preview file", default_size=220)
preview_output_button.on_click(preview_output_button_callback)
exported_content = ""
file_content = []
for ext in (".comm", ".incomm"):
fname = temp_file + ext
if os.path.isfile(fname):
with open(fname) as f:
content = f.read()
exported_content += f"{ext} file:\n" + content
else:
content = ""
file_content.append(content)
def export_results(det_data):
if det_data["meta"]["indices"] == "hkl":
ext = ".comm"
elif det_data["meta"]["indices"] == "real":
ext = ".incomm"
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export:
del export_data["scan"][s]
pyzebra.export_comm(export_data, temp_file, lorentz=lorentz_toggle.active)
preview_button = Button(label="Preview", default_size=220)
preview_button.on_click(preview_button_callback)
with open(f"{temp_file}{ext}") as f:
output_content = f.read()
return output_content, ext
def save_button_callback():
cont, ext = export_results(det_data)
js_data.data.update(cont=[cont], ext=[ext])
save_button = Button(label="Download file", button_type="success", default_size=220)
save_button.on_click(save_button_callback)
save_button = Button(label="Download preview", button_type="success", default_size=220)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
findpeak_controls = column(
row(peak_pos_textinput, column(Spacer(height=19), smooth_toggle)),
row(peak_int_ratio_spinner, peak_prominence_spinner),
row(window_size_spinner, poly_order_spinner),
row(peakfind_button, peakfind_all_button),
)
fitpeak_controls = row(
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
fitparams_table,
Spacer(width=20),
column(
row(integ_from, integ_to),
row(fit_from_spinner, fit_to_spinner),
row(bin_size_spinner, column(Spacer(height=19), lorentz_toggle)),
row(fitparam_reset_button, area_method_radiobutton),
row(area_method_radiobutton),
row(fit_button, fit_all_button),
),
)
export_layout = column(preview_output_textinput, row(preview_output_button, save_button))
scan_layout = column(scan_table, row(monitor_spinner, param_select))
import_layout = column(
proposal_textinput,
file_select,
row(file_open_button, file_append_button),
upload_div,
upload_button,
append_upload_div,
append_upload_button,
)
export_layout = column(export_preview_textinput, row(preview_button, save_button))
upload_div = Div(text="Or upload .dat files:")
append_upload_div = Div(text="append extra .dat files:")
tab_layout = column(
row(
proposal_textinput,
file_select,
column(Spacer(height=19), row(file_open_button, file_append_button)),
),
row(
column(Spacer(height=5), upload_div),
upload_button,
column(Spacer(height=5), append_upload_div),
append_upload_button,
),
row(scan_table, plots, Spacer(width=30), fit_output_textinput, export_layout),
row(findpeak_controls, Spacer(width=30), fitpeak_controls),
row(import_layout, scan_layout, plots, Spacer(width=30), export_layout),
row(fitpeak_controls, fit_output_textinput),
)
return Panel(child=tab_layout, title="param study")

253
pyzebra/app/panel_spind.py Normal file
View File

@ -0,0 +1,253 @@
import ast
import math
import os
import subprocess
import tempfile
from collections import defaultdict
import numpy as np
from bokeh.layouts import column, row
from bokeh.models import (
Button,
ColumnDataSource,
DataTable,
Panel,
Spinner,
TableColumn,
TextAreaInput,
TextInput,
)
from scipy.optimize import curve_fit
import pyzebra
def create():
path_prefix_textinput = TextInput(title="Path prefix:", value="")
selection_list = TextAreaInput(title="ROIs:", rows=7)
lattice_const_textinput = TextInput(
title="Lattice constants:", value="8.3211,8.3211,8.3211,90.00,90.00,90.00"
)
max_res_spinner = Spinner(title="max-res", value=2, step=0.01)
seed_pool_size_spinner = Spinner(title="seed-pool-size", value=5, step=0.01)
seed_len_tol_spinner = Spinner(title="seed-len-tol", value=0.02, step=0.01)
seed_angle_tol_spinner = Spinner(title="seed-angle-tol", value=1, step=0.01)
eval_hkl_tol_spinner = Spinner(title="eval-hkl-tol", value=0.15, step=0.01)
diff_vec = []
def process_button_callback():
nonlocal diff_vec
with tempfile.TemporaryDirectory() as temp_dir:
temp_peak_list_dir = os.path.join(temp_dir, "peak_list")
os.mkdir(temp_peak_list_dir)
temp_event_file = os.path.join(temp_peak_list_dir, "event-0.txt")
temp_hkl_file = os.path.join(temp_dir, "hkl.h5")
roi_dict = ast.literal_eval(selection_list.value)
comp_proc = subprocess.run(
[
"mpiexec",
"-n",
"2",
"python",
"spind/gen_hkl_table.py",
lattice_const_textinput.value,
"--max-res",
str(max_res_spinner.value),
"-o",
temp_hkl_file,
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
diff_vec = prepare_event_file(temp_event_file, roi_dict, path_prefix_textinput.value)
comp_proc = subprocess.run(
[
"mpiexec",
"-n",
"2",
"python",
"spind/SPIND.py",
temp_peak_list_dir,
temp_hkl_file,
"-o",
temp_dir,
"--seed-pool-size",
str(seed_pool_size_spinner.value),
"--seed-len-tol",
str(seed_len_tol_spinner.value),
"--seed-angle-tol",
str(seed_angle_tol_spinner.value),
"--eval-hkl-tol",
str(eval_hkl_tol_spinner.value),
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
try:
with open(os.path.join(temp_dir, "spind.txt")) as f_out:
spind_res = defaultdict(list)
for line in f_out:
c1, c2, c3, c4, c5, *c_rest = line.split()
spind_res["label"].append(c1)
spind_res["crystal_id"].append(c2)
spind_res["match_rate"].append(c3)
spind_res["matched_peaks"].append(c4)
spind_res["column_5"].append(c5)
# last digits are spind UB matrix
vals = list(map(float, c_rest))
ub_matrix_spind = np.array(vals).reshape(3, 3)
ub_matrix = np.linalg.inv(np.transpose(ub_matrix_spind)) * 1e10
spind_res["ub_matrix"].append(ub_matrix)
results_table_source.data.update(spind_res)
except FileNotFoundError:
print("No results from spind")
process_button = Button(label="Process", button_type="primary")
process_button.on_click(process_button_callback)
hkl_textareainput = TextAreaInput(title="hkl values:", rows=7)
def results_table_select_callback(_attr, old, new):
if new:
ind = new[0]
ub_matrix = results_table_source.data["ub_matrix"][ind]
res = ""
for vec in diff_vec:
res += f"{vec @ ub_matrix}\n"
hkl_textareainput.value = res
else:
hkl_textareainput.value = None
results_table_source = ColumnDataSource(dict())
results_table = DataTable(
source=results_table_source,
columns=[
TableColumn(field="label", title="Label", width=50),
TableColumn(field="crystal_id", title="Crystal ID", width=100),
TableColumn(field="match_rate", title="Match Rate", width=100),
TableColumn(field="matched_peaks", title="Matched Peaks", width=100),
TableColumn(field="column_5", title="", width=100),
TableColumn(field="ub_matrix", title="UB Matrix", width=250),
],
height=300,
width=700,
autosize_mode="none",
index_position=None,
)
results_table_source.selected.on_change("indices", results_table_select_callback)
tab_layout = row(
column(
path_prefix_textinput,
selection_list,
lattice_const_textinput,
max_res_spinner,
seed_pool_size_spinner,
seed_len_tol_spinner,
seed_angle_tol_spinner,
eval_hkl_tol_spinner,
process_button,
),
column(results_table, row(hkl_textareainput)),
)
return Panel(child=tab_layout, title="spind")
def gauss(x, *p):
"""Defines Gaussian function
Args:
A - amplitude, mu - position of the center, sigma - width
Returns:
Gaussian function
"""
A, mu, sigma = p
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
def prepare_event_file(export_filename, roi_dict, path_prefix=""):
diff_vec = []
p0 = [1.0, 0.0, 1.0]
maxfev = 100000
with open(export_filename, "w") as f:
for file, rois in roi_dict.items():
dat = pyzebra.read_detector_data(path_prefix + file + ".hdf")
wave = dat["wave"]
ddist = dat["ddist"]
gamma = dat["gamma"][0]
omega = dat["omega"][0]
nu = dat["nu"][0]
chi = dat["chi"][0]
phi = dat["phi"][0]
scan_motor = dat["scan_motor"]
var_angle = dat[scan_motor]
for roi in rois:
x0, xN, y0, yN, fr0, frN = roi
data_roi = dat["data"][fr0:frN, y0:yN, x0:xN]
cnts = np.sum(data_roi, axis=(1, 2))
coeff, _ = curve_fit(gauss, range(len(cnts)), cnts, p0=p0, maxfev=maxfev)
m = cnts.mean()
sd = cnts.std()
snr_cnts = np.where(sd == 0, 0, m / sd)
frC = fr0 + coeff[1]
var_F = var_angle[math.floor(frC)]
var_C = var_angle[math.ceil(frC)]
frStep = frC - math.floor(frC)
var_step = var_C - var_F
var_p = var_F + var_step * frStep
if scan_motor == "gamma":
gamma = var_p
elif scan_motor == "omega":
omega = var_p
elif scan_motor == "nu":
nu = var_p
elif scan_motor == "chi":
chi = var_p
elif scan_motor == "phi":
phi = var_p
intensity = coeff[1] * abs(coeff[2] * var_step) * math.sqrt(2) * math.sqrt(np.pi)
projX = np.sum(data_roi, axis=(0, 1))
coeff, _ = curve_fit(gauss, range(len(projX)), projX, p0=p0, maxfev=maxfev)
x_pos = x0 + coeff[1]
projY = np.sum(data_roi, axis=(0, 2))
coeff, _ = curve_fit(gauss, range(len(projY)), projY, p0=p0, maxfev=maxfev)
y_pos = y0 + coeff[1]
ga, nu = pyzebra.det2pol(ddist, gamma, nu, x_pos, y_pos)
diff_vector = pyzebra.z1frmd(wave, ga, omega, chi, phi, nu)
d_spacing = float(pyzebra.dandth(wave, diff_vector)[0])
dv1, dv2, dv3 = diff_vector.flatten() * 1e10
diff_vec.append(diff_vector.flatten())
f.write(f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n")
return diff_vec

View File

@ -1,75 +0,0 @@
import numpy as np
import scipy as sc
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
def ccl_findpeaks(
scan,
int_threshold=0.8,
prominence=50,
smooth=False,
window_size=7,
poly_order=3,
variable="om",
):
"""function iterates through the dictionary created by load_cclv2 and locates peaks for each scan
args: scan - a single scan,
int_threshold - fraction of threshold_intensity/max_intensity, must be positive num between 0 and 1
i.e. will only detect peaks above 75% of max intensity
prominence - defines a drop of values that must be between two peaks, must be positive number
i.e. if promimence is 20, it will detect two neigbouring peaks of 300 and 310 intesities,
if none of the itermediate values are lower that 290
smooth - if true, smooths data by savitzky golay filter, if false - no smoothing
window_size - window size for savgol filter, must be odd positive integer
poly_order = order of the polynomial used in savgol filter, must be positive integer smaller than
"""
if not 0 <= int_threshold <= 1:
int_threshold = 0.8
print(
"Invalid value for int_threshold, select value between 0 and 1, new value set to:",
int_threshold,
)
if not isinstance(window_size, int) or (window_size % 2) == 0 or window_size <= 1:
window_size = 7
print(
"Invalid value for window_size, select positive odd integer, new value set to!:",
window_size,
)
if not isinstance(poly_order, int) or window_size < poly_order:
poly_order = 3
print(
"Invalid value for poly_order, select positive integer smaller than window_size, new value set to:",
poly_order,
)
if not isinstance(prominence, (int, float)) and prominence < 0:
prominence = 50
print("Invalid value for prominence, select positive number, new value set to:", prominence)
omega = scan[variable]
counts = np.array(scan["Counts"])
if smooth:
itp = interp1d(omega, counts, kind="linear")
absintensity = [abs(number) for number in counts]
lowest_intensity = min(absintensity)
counts[counts < 0] = lowest_intensity
smooth_peaks = savgol_filter(itp(omega), window_size, poly_order)
else:
smooth_peaks = counts
peaks, properties = sc.signal.find_peaks(
smooth_peaks, height=int_threshold * max(smooth_peaks), prominence=prominence
)
scan["peak_indexes"] = peaks
scan["peak_heights"] = properties["peak_heights"]
scan["smooth_peaks"] = smooth_peaks # smoothed curve

View File

@ -58,37 +58,22 @@ META_VARS_FLOAT = (
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j")
CCL_FIRST_LINE = (
("scan_number", int),
("h_index", float),
("k_index", float),
("l_index", float),
)
CCL_FIRST_LINE = (("idx", int), ("h", float), ("k", float), ("l", float))
CCL_ANGLES = {
"bi": (
("twotheta_angle", float),
("omega_angle", float),
("chi_angle", float),
("phi_angle", float),
),
"nb": (
("gamma_angle", float),
("omega_angle", float),
("nu_angle", float),
("unkwn_angle", float),
),
"bi": (("twotheta", float), ("omega", float), ("chi", float), ("phi", float)),
"nb": (("gamma", float), ("omega", float), ("nu", float), ("skip_angle", float)),
}
CCL_SECOND_LINE = (
("n_points", int),
("angle_step", float),
("monitor", float),
("temperature", float),
("mag_field", float),
("temp", float),
("mf", float),
("date", str),
("time", str),
("scan_type", str),
("scan_motor", str),
)
AREA_METHODS = ("fit_area", "int_area")
@ -114,61 +99,106 @@ def load_1D(filepath):
def parse_1D(fileobj, data_type):
metadata = {"data_type": data_type}
# read metadata
metadata = {}
for line in fileobj:
if "=" in line:
variable, value = line.split("=")
variable, value = line.split("=", 1)
variable = variable.strip()
if variable in META_VARS_FLOAT:
value = value.strip()
if variable in META_VARS_STR:
metadata[variable] = value
elif variable in META_VARS_FLOAT:
if variable == "2-theta": # fix that angle name not to be an expression
variable = "twotheta"
if variable in ("a", "b", "c", "alpha", "beta", "gamma"):
variable += "_cell"
metadata[variable] = float(value)
elif variable in META_VARS_STR:
metadata[variable] = str(value)[:-1].strip()
elif variable in META_UB_MATRIX:
metadata[variable] = re.findall(r"[-+]?\d*\.\d+|\d+", str(value))
if "ub" not in metadata:
metadata["ub"] = np.zeros((3, 3))
row = int(variable[-2]) - 1
metadata["ub"][row, :] = list(map(float, value.split()))
if "#data" in line:
# this is the end of metadata and the start of data section
break
# handle older files that don't contain "zebra_mode" metadata
if "zebra_mode" not in metadata:
metadata["zebra_mode"] = "nb"
# read data
scan = {}
scan = []
if data_type == ".ccl":
ccl_first_line = (*CCL_FIRST_LINE, *CCL_ANGLES[metadata["zebra_mode"]])
ccl_first_line = CCL_FIRST_LINE + CCL_ANGLES[metadata["zebra_mode"]]
ccl_second_line = CCL_SECOND_LINE
for line in fileobj:
d = {}
# skip empty/whitespace lines before start of any scan
if not line or line.isspace():
continue
s = {}
# first line
for param, (param_name, param_type) in zip(line.split(), ccl_first_line):
d[param_name] = param_type(param)
s[param_name] = param_type(param)
# second line
next_line = next(fileobj)
for param, (param_name, param_type) in zip(next_line.split(), ccl_second_line):
d[param_name] = param_type(param)
s[param_name] = param_type(param)
d["om"] = np.linspace(
d["omega_angle"] - (d["n_points"] / 2) * d["angle_step"],
d["omega_angle"] + (d["n_points"] / 2) * d["angle_step"],
d["n_points"],
)
if s["scan_motor"] != "om":
raise Exception("Unsupported variable name in ccl file.")
# "om" -> "omega"
s["scan_motor"] = "omega"
# overwrite metadata, because it only refers to the scan center
half_dist = (s["n_points"] - 1) / 2 * s["angle_step"]
s["omega"] = np.linspace(s["omega"] - half_dist, s["omega"] + half_dist, s["n_points"])
# subsequent lines with counts
counts = []
while len(counts) < d["n_points"]:
counts.extend(map(int, next(fileobj).split()))
d["Counts"] = counts
while len(counts) < s["n_points"]:
counts.extend(map(float, next(fileobj).split()))
s["Counts"] = np.array(counts)
scan[d["scan_number"]] = d
if s["h"].is_integer() and s["k"].is_integer() and s["l"].is_integer():
s["h"], s["k"], s["l"] = map(int, (s["h"], s["k"], s["l"]))
scan.append({**metadata, **s})
elif data_type == ".dat":
# skip the first 2 rows, the third row contans the column names
next(fileobj)
next(fileobj)
# TODO: this might need to be adapted in the future, when "gamma" will be added to dat files
if metadata["zebra_mode"] == "nb":
metadata["gamma"] = metadata["twotheta"]
s = defaultdict(list)
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
if match.group(1) == "h, k, l":
steps = match.group(2).split()
for step, ind in zip(steps, "hkl"):
if float(step) != 0:
scan_motor = ind
break
else:
scan_motor = match.group(1)
s["scan_motor"] = scan_motor
match = re.search("(.*) Points, Mode: (.*), Preset (.*)", next(fileobj))
if match.group(2) != "Monitor":
raise Exception("Unknown mode in dat file.")
s["monitor"] = float(match.group(3))
col_names = next(fileobj).split()
data_cols = defaultdict(list)
for line in fileobj:
if "END-OF-DATA" in line:
@ -176,100 +206,105 @@ def parse_1D(fileobj, data_type):
break
for name, val in zip(col_names, line.split()):
data_cols[name].append(float(val))
s[name].append(float(val))
try:
data_cols["h_index"] = float(metadata["title"].split()[-3])
data_cols["k_index"] = float(metadata["title"].split()[-2])
data_cols["l_index"] = float(metadata["title"].split()[-1])
except (ValueError, IndexError):
print("seems hkl is not in title")
for name in col_names:
s[name] = np.array(s[name])
data_cols["om"] = np.array(data_cols["om"])
# "om" -> "omega"
if s["scan_motor"] == "om":
s["scan_motor"] = "omega"
s["omega"] = s["om"]
del s["om"]
data_cols["temperature"] = metadata["temp"]
try:
data_cols["mag_field"] = metadata["mf"]
except KeyError:
print("Mag_field not present in dat file")
# "tt" -> "temp"
elif s["scan_motor"] == "tt":
s["scan_motor"] = "temp"
s["temp"] = s["tt"]
del s["tt"]
data_cols["omega_angle"] = metadata["omega"]
data_cols["n_points"] = len(data_cols["om"])
data_cols["monitor"] = data_cols["Monitor1"][0]
data_cols["twotheta_angle"] = metadata["2-theta"]
data_cols["chi_angle"] = metadata["chi"]
data_cols["phi_angle"] = metadata["phi"]
data_cols["nu_angle"] = metadata["nu"]
# "mf" stays "mf"
# "phi" stays "phi"
data_cols["scan_number"] = 1
scan[data_cols["scan_number"]] = dict(data_cols)
if "h" not in s:
s["h"] = s["k"] = s["l"] = float("nan")
for param in ("mf", "temp"):
if param not in metadata:
s[param] = 0
s["idx"] = 1
scan.append({**metadata, **s})
else:
print("Unknown file extention")
# utility information
if all(
s["h_index"].is_integer() and s["k_index"].is_integer() and s["l_index"].is_integer()
for s in scan.values()
):
metadata["indices"] = "hkl"
else:
metadata["indices"] = "real"
metadata["data_type"] = data_type
metadata["area_method"] = AREA_METHODS[0]
return {"meta": metadata, "scan": scan}
return scan
def export_comm(data, path, lorentz=False, hkl_precision=2):
"""exports data in the *.comm format
:param lorentz: perform Lorentz correction
:param path: path to file + name
:arg data - data to export, is dict after peak fitting
def export_1D(data, path, area_method=AREA_METHODS[0], lorentz=False, hkl_precision=2):
"""Exports data in the .comm/.incomm format
Scans with integer/real hkl values are saved in .comm/.incomm files correspondingly. If no scans
are present for a particular output format, that file won't be created.
"""
zebra_mode = data["meta"]["zebra_mode"]
if data["meta"]["indices"] == "hkl":
extension = ".comm"
else: # data["meta"]["indices"] == "real":
extension = ".incomm"
zebra_mode = data[0]["zebra_mode"]
file_content = {".comm": [], ".incomm": []}
with open(str(path + extension), "w") as out_file:
for key, scan in data["scan"].items():
if "fit" not in scan:
print("Scan skipped - no fit value for:", key)
continue
for scan in data:
if "fit" not in scan:
continue
scan_str = f"{key:6}"
idx_str = f"{scan['idx']:6}"
h, k, l = scan["h_index"], scan["k_index"], scan["l_index"]
if data["meta"]["indices"] == "hkl":
hkl_str = f"{int(h):6}{int(k):6}{int(l):6}"
else: # data["meta"]["indices"] == "real"
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
h, k, l = scan["h"], scan["k"], scan["l"]
hkl_are_integers = isinstance(h, int) # if True, other indices are of type 'int' too
if hkl_are_integers:
hkl_str = f"{h:6}{k:6}{l:6}"
else:
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
area_method = data["meta"]["area_method"]
area_n = scan["fit"][area_method].n
area_s = scan["fit"][area_method].s
for name, param in scan["fit"].params.items():
if "amplitude" in name:
area_n = param.value
area_s = param.stderr
break
else:
area_n = 0
area_s = 0
# apply lorentz correction to area
if lorentz:
if zebra_mode == "bi":
twotheta_angle = np.deg2rad(scan["twotheta_angle"])
corr_factor = np.sin(twotheta_angle)
else: # zebra_mode == "nb":
gamma_angle = np.deg2rad(scan["gamma_angle"])
nu_angle = np.deg2rad(scan["nu_angle"])
corr_factor = np.sin(gamma_angle) * np.cos(nu_angle)
if area_n is None or area_s is None:
print(f"Couldn't export scan: {scan['idx']}")
continue
area_n = np.abs(area_n * corr_factor)
area_s = np.abs(area_s * corr_factor)
# apply lorentz correction to area
if lorentz:
if zebra_mode == "bi":
twotheta = np.deg2rad(scan["twotheta"])
corr_factor = np.sin(twotheta)
else: # zebra_mode == "nb":
gamma = np.deg2rad(scan["gamma"])
nu = np.deg2rad(scan["nu"])
corr_factor = np.sin(gamma) * np.cos(nu)
area_str = f"{area_n:10.2f}{area_s:10.2f}"
area_n = np.abs(area_n * corr_factor)
area_s = np.abs(area_s * corr_factor)
ang_str = ""
for angle, _ in CCL_ANGLES[zebra_mode]:
ang_str = ang_str + f"{scan[angle]:8}"
area_str = f"{area_n:10.2f}{area_s:10.2f}"
out_file.write(scan_str + hkl_str + area_str + ang_str + "\n")
ang_str = ""
for angle, _ in CCL_ANGLES[zebra_mode]:
if angle == scan["scan_motor"]:
angle_center = (np.min(scan[angle]) + np.max(scan[angle])) / 2
else:
angle_center = scan[angle]
ang_str = ang_str + f"{angle_center:8g}"
ref = file_content[".comm"] if hkl_are_integers else file_content[".incomm"]
ref.append(idx_str + hkl_str + area_str + ang_str + "\n")
for ext, content in file_content.items():
if content:
with open(path + ext, "w") as out_file:
out_file.writelines(content)

139
pyzebra/ccl_process.py Normal file
View File

@ -0,0 +1,139 @@
import itertools
import os
import numpy as np
from lmfit.models import GaussianModel, LinearModel, PseudoVoigtModel, VoigtModel
from .ccl_io import CCL_ANGLES
PARAM_PRECISIONS = {
"twotheta": 0.1,
"chi": 0.1,
"nu": 0.1,
"phi": 0.05,
"omega": 0.05,
"gamma": 0.05,
"temp": 1,
"mf": 0.001,
"ub": 0.01,
}
MAX_RANGE_GAP = {
"omega": 0.5,
}
def normalize_dataset(dataset, monitor=100_000):
for scan in dataset:
monitor_ratio = monitor / scan["monitor"]
scan["Counts"] *= monitor_ratio
scan["monitor"] = monitor
def merge_duplicates(dataset):
for scan_i, scan_j in itertools.combinations(dataset, 2):
if _parameters_match(scan_i, scan_j):
merge_scans(scan_i, scan_j)
def _parameters_match(scan1, scan2):
zebra_mode = scan1["zebra_mode"]
if zebra_mode != scan2["zebra_mode"]:
return False
for param in ("ub", "temp", "mf", *(vars[0] for vars in CCL_ANGLES[zebra_mode])):
if param.startswith("skip"):
# ignore skip parameters, like the last angle in 'nb' zebra mode
continue
if param == scan1["scan_motor"] == scan2["scan_motor"]:
# check if ranges of variable parameter overlap
range1 = scan1[param]
range2 = scan2[param]
# maximum gap between ranges of the scanning parameter (default 0)
max_range_gap = MAX_RANGE_GAP.get(param, 0)
if max(range1[0] - range2[-1], range2[0] - range1[-1]) > max_range_gap:
return False
elif np.max(np.abs(scan1[param] - scan2[param])) > PARAM_PRECISIONS[param]:
return False
return True
def merge_datasets(dataset1, dataset2):
for scan_j in dataset2:
for scan_i in dataset1:
if _parameters_match(scan_i, scan_j):
merge_scans(scan_i, scan_j)
break
dataset1.append(scan_j)
def merge_scans(scan1, scan2):
omega = np.concatenate((scan1["omega"], scan2["omega"]))
counts = np.concatenate((scan1["Counts"], scan2["Counts"]))
index = np.argsort(omega)
scan1["omega"] = omega[index]
scan1["Counts"] = counts[index]
scan2["active"] = False
fname1 = os.path.basename(scan1["original_filename"])
fname2 = os.path.basename(scan2["original_filename"])
print(f'Merging scans: {scan1["idx"]} ({fname1}) <-- {scan2["idx"]} ({fname2})')
def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
if fit_from is None:
fit_from = -np.inf
if fit_to is None:
fit_to = np.inf
y_fit = scan["Counts"]
x_fit = scan[scan["scan_motor"]]
# apply fitting range
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
y_fit = y_fit[fit_ind]
x_fit = x_fit[fit_ind]
model = None
for model_index, (model_name, model_param) in enumerate(model_dict.items()):
model_name, _ = model_name.split("-")
prefix = f"f{model_index}_"
if model_name == "linear":
_model = LinearModel(prefix=prefix)
elif model_name == "gaussian":
_model = GaussianModel(prefix=prefix)
elif model_name == "voigt":
_model = VoigtModel(prefix=prefix)
elif model_name == "pvoigt":
_model = PseudoVoigtModel(prefix=prefix)
else:
raise ValueError(f"Unknown model name: '{model_name}'")
_init_guess = _model.guess(y_fit, x=x_fit)
for param_index, param_name in enumerate(model_param["param"]):
param_hints = {}
for hint_name in ("value", "vary", "min", "max"):
tmp = model_param[hint_name][param_index]
if tmp is None:
param_hints[hint_name] = getattr(_init_guess[prefix + param_name], hint_name)
else:
param_hints[hint_name] = tmp
_model.set_param_hint(param_name, **param_hints)
if model is None:
model = _model
else:
model += _model
weights = [1 / np.sqrt(val) if val != 0 else 1 for val in y_fit]
scan["fit"] = model.fit(y_fit, x=x_fit, weights=weights)

View File

@ -1,228 +0,0 @@
import numpy as np
import uncertainties as u
from lmfit import Model, Parameters
from scipy.integrate import simps
def bin_data(array, binsize):
if isinstance(binsize, int) and 0 < binsize < len(array):
return [
np.mean(array[binsize * i : binsize * i + binsize])
for i in range(int(np.ceil(len(array) / binsize)))
]
else:
print("Binsize need to be positive integer smaller than lenght of array")
return array
def find_nearest(array, value):
# find nearest value and return index
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def create_uncertanities(y, y_err):
# create array with uncertanities for error propagation
combined = np.array([])
for i in range(len(y)):
part = u.ufloat(y[i], y_err[i])
combined = np.append(combined, part)
return combined
def fitccl(
scan,
guess,
vary,
constraints_min,
constraints_max,
numfit_min=None,
numfit_max=None,
binning=None,
):
"""Made for fitting of ccl date where 1 peak is expected. Allows for combination of gaussian and linear model combination
:param scan: scan in the data dict (i.e. M123)
:param guess: initial guess for the fitting, if none, some values are added automatically in order (see below)
:param vary: True if parameter can vary during fitting, False if it to be fixed
:param numfit_min: minimal value on x axis for numerical integration - if none is centre of gaussian minus 3 sigma
:param numfit_max: maximal value on x axis for numerical integration - if none is centre of gaussian plus 3 sigma
:param constraints_min: min constranits value for fit
:param constraints_max: max constranits value for fit
:param binning : binning of the data
:return data dict with additional values
order for guess, vary, constraints_min, constraints_max:
[Gaussian centre, Gaussian sigma, Gaussian amplitude, background slope, background intercept]
examples:
guess = [None, None, 100, 0, None]
vary = [True, True, True, True, True]
constraints_min = [23, None, 50, 0, 0]
constraints_min = [80, None, 1000, 0, 100]
"""
if "peak_indexes" not in scan:
scan["peak_indexes"] = []
if len(scan["peak_indexes"]) > 1:
# return in case of more than 1 peaks
return
if binning is None or binning == 0 or binning == 1:
x = list(scan["om"])
y = list(scan["Counts"])
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
if not scan["peak_indexes"]:
centre = np.mean(x)
else:
centre = x[int(scan["peak_indexes"])]
else:
x = list(scan["om"])
if not scan["peak_indexes"]:
centre = np.mean(x)
else:
centre = x[int(scan["peak_indexes"])]
x = bin_data(x, binning)
y = list(scan["Counts"])
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
combined = bin_data(create_uncertanities(y, y_err), binning)
y = [combined[i].n for i in range(len(combined))]
y_err = [combined[i].s for i in range(len(combined))]
if len(scan["peak_indexes"]) == 0:
# Case for no peak, gaussian in centre, sigma as 20% of range
peak_index = find_nearest(x, np.mean(x))
guess[0] = centre if guess[0] is None else guess[0]
guess[1] = (x[-1] - x[0]) / 5 if guess[1] is None else guess[1]
guess[2] = 50 if guess[2] is None else guess[2]
guess[3] = 0 if guess[3] is None else guess[3]
guess[4] = np.mean(y) if guess[4] is None else guess[4]
constraints_min[2] = 0
elif len(scan["peak_indexes"]) == 1:
# case for one peak, takse into account users guesses
peak_height = scan["peak_heights"]
guess[0] = centre if guess[0] is None else guess[0]
guess[1] = 0.1 if guess[1] is None else guess[1]
guess[2] = float(peak_height / 10) if guess[2] is None else float(guess[2])
guess[3] = 0 if guess[3] is None else guess[3]
guess[4] = np.median(x) if guess[4] is None else guess[4]
constraints_min[0] = np.min(x) if constraints_min[0] is None else constraints_min[0]
constraints_max[0] = np.max(x) if constraints_max[0] is None else constraints_max[0]
def gaussian(x, g_cen, g_width, g_amp):
"""1-d gaussian: gaussian(x, amp, cen, wid)"""
return (g_amp / (np.sqrt(2 * np.pi) * g_width)) * np.exp(
-((x - g_cen) ** 2) / (2 * g_width ** 2)
)
def background(x, slope, intercept):
"""background"""
return slope * (x - centre) + intercept
mod = Model(gaussian) + Model(background)
params = Parameters()
params.add_many(
("g_cen", guess[0], bool(vary[0]), np.min(x), np.max(x), None, None),
("g_width", guess[1], bool(vary[1]), constraints_min[1], constraints_max[1], None, None),
("g_amp", guess[2], bool(vary[2]), constraints_min[2], constraints_max[2], None, None),
("slope", guess[3], bool(vary[3]), constraints_min[3], constraints_max[3], None, None),
("intercept", guess[4], bool(vary[4]), constraints_min[4], constraints_max[4], None, None),
)
# the weighted fit
weights = [np.abs(1 / val) if val != 0 else 1 for val in y_err]
try:
result = mod.fit(y, params, weights=weights, x=x, calc_covar=True)
except ValueError:
print(f"Couldn't fit scan {scan['scan_number']}")
return
if result.params["g_amp"].stderr is None:
result.params["g_amp"].stderr = result.params["g_amp"].value
elif result.params["g_amp"].stderr > result.params["g_amp"].value:
result.params["g_amp"].stderr = result.params["g_amp"].value
# u.ufloat to work with uncertanities
fit_area = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
comps = result.eval_components()
if len(scan["peak_indexes"]) == 0:
# for case of no peak, there is no reason to integrate, therefore fit and int are equal
int_area = fit_area
elif len(scan["peak_indexes"]) == 1:
gauss_3sigmamin = find_nearest(
x, result.params["g_cen"].value - 3 * result.params["g_width"].value
)
gauss_3sigmamax = find_nearest(
x, result.params["g_cen"].value + 3 * result.params["g_width"].value
)
numfit_min = gauss_3sigmamin if numfit_min is None else find_nearest(x, numfit_min)
numfit_max = gauss_3sigmamax if numfit_max is None else find_nearest(x, numfit_max)
it = -1
while abs(numfit_max - numfit_min) < 3:
# in the case the peak is very thin and numerical integration would be on zero omega
# difference, finds closes values
it = it + 1
numfit_min = find_nearest(
x,
result.params["g_cen"].value - 3 * (1 + it / 10) * result.params["g_width"].value,
)
numfit_max = find_nearest(
x,
result.params["g_cen"].value + 3 * (1 + it / 10) * result.params["g_width"].value,
)
if x[numfit_min] < np.min(x):
# makes sure that the values supplied by user lay in the omega range
# can be ommited for users who know what they're doing
numfit_min = gauss_3sigmamin
print("Minimal integration value outside of x range")
elif x[numfit_min] >= x[numfit_max]:
numfit_min = gauss_3sigmamin
print("Minimal integration value higher than maximal")
else:
pass
if x[numfit_max] > np.max(x):
numfit_max = gauss_3sigmamax
print("Maximal integration value outside of x range")
elif x[numfit_max] <= x[numfit_min]:
numfit_max = gauss_3sigmamax
print("Maximal integration value lower than minimal")
else:
pass
count_errors = create_uncertanities(y, y_err)
# create error vector for numerical integration propagation
num_int_area = simps(count_errors[numfit_min:numfit_max], x[numfit_min:numfit_max])
slope_err = u.ufloat(result.params["slope"].value, result.params["slope"].stderr)
# pulls the nominal and error values from fit (slope)
intercept_err = u.ufloat(
result.params["intercept"].value, result.params["intercept"].stderr
)
# pulls the nominal and error values from fit (intercept)
background_errors = np.array([])
for j in range(len(x[numfit_min:numfit_max])):
# creates nominal and error vector for numerical integration of background
bg = slope_err * (x[j] - centre) + intercept_err
background_errors = np.append(background_errors, bg)
num_int_background = simps(background_errors, x[numfit_min:numfit_max])
int_area = num_int_area - num_int_background
d = {}
for pars in result.params:
d[str(pars)] = (result.params[str(pars)].value, result.params[str(pars)].vary)
print("Scan", scan["scan_number"])
print(result.fit_report())
d["ratio"] = (result.params["g_amp"].value - int_area.n) / result.params["g_amp"].value
d["int_area"] = int_area
d["fit_area"] = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
d["full_report"] = result.fit_report()
d["result"] = result
d["comps"] = comps
d["numfit"] = [numfit_min, numfit_max]
d["x_fit"] = x
scan["fit"] = d

View File

@ -1,167 +0,0 @@
import numpy as np
from lmfit import Model, Parameters
from scipy.integrate import simps
import matplotlib.pyplot as plt
import uncertainties as u
from lmfit.models import GaussianModel
from lmfit.models import VoigtModel
from lmfit.models import PseudoVoigtModel
def bin_data(array, binsize):
if isinstance(binsize, int) and 0 < binsize < len(array):
return [
np.mean(array[binsize * i : binsize * i + binsize])
for i in range(int(np.ceil(len(array) / binsize)))
]
else:
print("Binsize need to be positive integer smaller than lenght of array")
return array
def create_uncertanities(y, y_err):
# create array with uncertanities for error propagation
combined = np.array([])
for i in range(len(y)):
part = u.ufloat(y[i], y_err[i])
combined = np.append(combined, part)
return combined
def find_nearest(array, value):
# find nearest value and return index
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
# predefined peak positions
# peaks = [6.2, 8.1, 9.9, 11.5]
peaks = [23.5, 24.5]
# peaks = [24]
def fitccl(scan, variable="om", peak_type="gauss", binning=None):
x = list(scan[variable])
y = list(scan["Counts"])
peak_centre = np.mean(x)
if binning is None or binning == 0 or binning == 1:
x = list(scan["om"])
y = list(scan["Counts"])
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
print(scan["peak_indexes"])
if not scan["peak_indexes"]:
peak_centre = np.mean(x)
else:
centre = x[int(scan["peak_indexes"])]
else:
x = list(scan["om"])
if not scan["peak_indexes"]:
peak_centre = np.mean(x)
else:
peak_centre = x[int(scan["peak_indexes"])]
x = bin_data(x, binning)
y = list(scan["Counts"])
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
combined = bin_data(create_uncertanities(y, y_err), binning)
y = [combined[i].n for i in range(len(combined))]
y_err = [combined[i].s for i in range(len(combined))]
def background(x, slope, intercept):
"""background"""
return slope * (x - peak_centre) + intercept
def gaussian(x, center, g_sigma, amplitude):
"""1-d gaussian: gaussian(x, amp, cen, wid)"""
return (amplitude / (np.sqrt(2.0 * np.pi) * g_sigma)) * np.exp(
-((x - center) ** 2) / (2 * g_sigma ** 2)
)
def lorentzian(x, center, l_sigma, amplitude):
"""1d lorentzian"""
return (amplitude / (1 + ((1 * x - center) / l_sigma) ** 2)) / (np.pi * l_sigma)
def pseudoVoigt1(x, center, g_sigma, amplitude, l_sigma, fraction):
"""PseudoVoight peak with different widths of lorenzian and gaussian"""
return (1 - fraction) * gaussian(x, center, g_sigma, amplitude) + fraction * (
lorentzian(x, center, l_sigma, amplitude)
)
mod = Model(background)
params = Parameters()
params.add_many(
("slope", 0, True, None, None, None, None), ("intercept", 0, False, None, None, None, None)
)
for i in range(len(peaks)):
if peak_type == "gauss":
mod = mod + GaussianModel(prefix="p%d_" % (i + 1))
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 5, None)
elif peak_type == "voigt":
mod = mod + VoigtModel(prefix="p%d_" % (i + 1))
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 3, None)
params.add(str("p%d_" % (i + 1) + "gamma"), 0.2, True, 0, 5, None)
elif peak_type == "pseudovoigt":
mod = mod + PseudoVoigtModel(prefix="p%d_" % (i + 1))
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 5, None)
params.add(str("p%d_" % (i + 1) + "fraction"), 0.5, True, -5, 5, None)
elif peak_type == "pseudovoigt1":
mod = mod + Model(pseudoVoigt1, prefix="p%d_" % (i + 1))
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
params.add(str("p%d_" % (i + 1) + "g_sigma"), 0.2, True, 0, 5, None)
params.add(str("p%d_" % (i + 1) + "l_sigma"), 0.2, True, 0, 5, None)
params.add(str("p%d_" % (i + 1) + "fraction"), 0.5, True, 0, 1, None)
# add parameters
result = mod.fit(
y, params, weights=[np.abs(1 / y_err[i]) for i in range(len(y_err))], x=x, calc_covar=True
)
comps = result.eval_components()
reportstring = list()
for keys in result.params:
if result.params[keys].value is not None:
str2 = np.around(result.params[keys].value, 3)
else:
str2 = 0
if result.params[keys].stderr is not None:
str3 = np.around(result.params[keys].stderr, 3)
else:
str3 = 0
reportstring.append("%s = %2.3f +/- %2.3f" % (keys, str2, str3))
reportstring = "\n".join(reportstring)
plt.figure(figsize=(20, 10))
plt.plot(x, result.best_fit, "k-", label="Best fit")
plt.plot(x, y, "b-", label="Original data")
plt.plot(x, comps["background"], "g--", label="Line component")
for i in range(len(peaks)):
plt.plot(
x,
comps[str("p%d_" % (i + 1))],
"r--",
)
plt.fill_between(x, comps[str("p%d_" % (i + 1))], alpha=0.4, label=str("p%d_" % (i + 1)))
plt.legend()
plt.text(
np.min(x),
np.max(y),
reportstring,
fontsize=9,
verticalalignment="top",
)
plt.title(str(peak_type))
plt.xlabel("Omega [deg]")
plt.ylabel("Counts [a.u.]")
plt.show()
print(result.fit_report())

View File

@ -41,7 +41,7 @@ def read_detector_data(filepath):
filepath (str): File path of an h5 file.
Returns:
ndarray: A 3D array of data, rot_angle, pol_angle, tilt_angle.
ndarray: A 3D array of data, omega, gamma, nu.
"""
with h5py.File(filepath, "r") as h5f:
data = h5f["/entry1/area_detector2/data"][:]
@ -52,20 +52,37 @@ def read_detector_data(filepath):
det_data = {"data": data}
det_data["rot_angle"] = h5f["/entry1/area_detector2/rotation_angle"][:] # om, sometimes ph
det_data["pol_angle"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
det_data["tlt_angle"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
if "/entry1/zebra_mode" in h5f:
det_data["zebra_mode"] = h5f["/entry1/zebra_mode"][0].decode()
else:
det_data["zebra_mode"] = "nb"
# om, sometimes ph
if det_data["zebra_mode"] == "nb":
det_data["omega"] = h5f["/entry1/area_detector2/rotation_angle"][:]
else: # bi
det_data["omega"] = h5f["/entry1/sample/rotation_angle"][:]
det_data["gamma"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
det_data["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
det_data["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:]
det_data["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:]
det_data["chi_angle"] = h5f["/entry1/sample/chi"][:] # ch
det_data["phi_angle"] = h5f["/entry1/sample/phi"][:] # ph
det_data["UB"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
det_data["chi"] = h5f["/entry1/sample/chi"][:] # ch
det_data["phi"] = h5f["/entry1/sample/phi"][:] # ph
det_data["ub"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
for var in ("omega", "gamma", "nu", "chi", "phi"):
if abs(det_data[var][0] - det_data[var][-1]) > 0.1:
det_data["scan_motor"] = var
break
else:
raise ValueError("No angles that vary")
# optional parameters
if "/entry1/sample/magnetic_field" in h5f:
det_data["magnetic_field"] = h5f["/entry1/sample/magnetic_field"][:]
det_data["mf"] = h5f["/entry1/sample/magnetic_field"][:]
if "/entry1/sample/temperature" in h5f:
det_data["temperature"] = h5f["/entry1/sample/temperature"][:]
det_data["temp"] = h5f["/entry1/sample/temperature"][:]
return det_data

View File

@ -1,302 +0,0 @@
import numpy as np
import uncertainties as u
def create_tuples(x, y, y_err):
"""creates tuples for sorting and merginng of the data
Counts need to be normalized to monitor before"""
t = list()
for i in range(len(x)):
tup = (x[i], y[i], y_err[i])
t.append(tup)
return t
def normalize(scan, monitor):
"""Normalizes the measurement to monitor, checks if sigma exists, otherwise creates it
:arg dict : dictionary to from which to tkae the scan
:arg key : which scan to normalize from dict1
:arg monitor : final monitor
:return counts - normalized counts
:return sigma - normalized sigma"""
counts = np.array(scan["Counts"])
sigma = np.sqrt(counts) if "sigma" not in scan else scan["sigma"]
monitor_ratio = monitor / scan["monitor"]
scaled_counts = counts * monitor_ratio
scaled_sigma = np.array(sigma) * monitor_ratio
return scaled_counts, scaled_sigma
def merge(scan1, scan2, keep=True, monitor=100000):
"""merges the two tuples and sorts them, if om value is same, Counts value is average
averaging is propagated into sigma if dict1 == dict2, key[1] is deleted after merging
:arg dict1 : dictionary to which measurement will be merged
:arg dict2 : dictionary from which measurement will be merged
:arg scand_dict_result : result of scan_dict after auto function
:arg keep : if true, when monitors are same, does not change it, if flase, takes monitor
always
:arg monitor : final monitor after merging
note: dict1 and dict2 can be same dict
:return dict1 with merged scan"""
if keep:
if scan1["monitor"] == scan2["monitor"]:
monitor = scan1["monitor"]
# load om and Counts
x1, x2 = scan1["om"], scan2["om"]
cor_y1, y_err1 = normalize(scan1, monitor=monitor)
cor_y2, y_err2 = normalize(scan2, monitor=monitor)
# creates touples (om, Counts, sigma) for sorting and further processing
tuple_list = create_tuples(x1, cor_y1, y_err1) + create_tuples(x2, cor_y2, y_err2)
# Sort the list on om and add 0 0 0 tuple to the last position
sorted_t = sorted(tuple_list, key=lambda tup: tup[0])
sorted_t.append((0, 0, 0))
om, Counts, sigma = [], [], []
seen = list()
for i in range(len(sorted_t) - 1):
if sorted_t[i][0] not in seen:
if sorted_t[i][0] != sorted_t[i + 1][0]:
om = np.append(om, sorted_t[i][0])
Counts = np.append(Counts, sorted_t[i][1])
sigma = np.append(sigma, sorted_t[i][2])
else:
om = np.append(om, sorted_t[i][0])
counts1, counts2 = sorted_t[i][1], sorted_t[i + 1][1]
sigma1, sigma2 = sorted_t[i][2], sorted_t[i + 1][2]
count_err1 = u.ufloat(counts1, sigma1)
count_err2 = u.ufloat(counts2, sigma2)
avg = (count_err1 + count_err2) / 2
Counts = np.append(Counts, avg.n)
sigma = np.append(sigma, avg.s)
seen.append(sorted_t[i][0])
else:
continue
scan1["om"] = om
scan1["Counts"] = Counts
scan1["sigma"] = sigma
scan1["monitor"] = monitor
print("merging done")
def check_UB(dict1, dict2, precision=0.01):
truth_list = list()
for i in ["ub1j", "ub2j", "ub3j"]:
for j in range(3):
if abs(abs(float(dict1["meta"][i][j])) - abs(float(dict2["meta"][i][j]))) < precision:
truth_list.append(True)
else:
truth_list.append(False)
# print(truth_list)
if all(truth_list):
return True
else:
return False
def check_zebramode(dict1, dict2):
if dict1["meta"]["zebra_mode"] == dict2["meta"]["zebra_mode"]:
return True
else:
return False
def check_angles(scan1, scan2, angles, precision):
truth_list = list()
for item in angles:
if abs(abs(scan1[item]) - abs(scan2[item])) <= precision[item]:
truth_list.append(True)
else:
truth_list.append(False)
if all(truth_list):
return True
else:
return False
def check_temp_mag(scan1, scan2):
temp_diff = 1
mag_diff = 0.001
truth_list = list()
try:
if abs(abs(scan1["mag_field"]) - abs(scan2["mag_field"])) <= mag_diff:
truth_list.append(True)
else:
truth_list.append(False)
except KeyError:
print("mag_field missing")
try:
if abs(abs(scan1["temperature"]) - abs(scan2["temperature"])) <= temp_diff:
truth_list.append(True)
else:
truth_list.append(False)
except KeyError:
print("temperature missing")
if all(truth_list):
return True
else:
return False
def merge_dups(dictionary, angles):
precision = {
"twotheta_angle": 0.1,
"chi_angle": 0.1,
"nu_angle": 0.1,
"phi_angle": 0.05,
"omega_angle": 0.05,
"gamma_angle": 0.05,
}
for i in list(dictionary["scan"]):
for j in list(dictionary["scan"]):
if i == j:
continue
else:
# print(i, j)
if check_angles(dictionary["scan"][i], dictionary["scan"][j], angles, precision):
merge(dictionary["scan"][i], dictionary["scan"][j])
print("merged %d with %d" % (i, j))
del dictionary["scan"][j]
merge_dups(dictionary, angles)
break
else:
continue
break
def add_scan(dict1, dict2, scan_to_add):
max_scan = np.max(list(dict1["scan"]))
dict1["scan"][max_scan + 1] = dict2["scan"][scan_to_add]
if dict1.get("extra_meta") is None:
dict1["extra_meta"] = {}
dict1["extra_meta"][max_scan + 1] = dict2["meta"]
del dict2["scan"][scan_to_add]
def process(dict1, dict2, angles, precision):
# stop when the second dict is empty
# print(dict2["scan"])
if dict2["scan"]:
print("doing something")
# check UB matrixes
if check_UB(dict1, dict2):
# iterate over second dict and check for matches
for i in list(dict2["scan"]):
for j in list(dict1["scan"]):
if check_angles(dict1["scan"][j], dict2["scan"][i], angles, precision):
# angles good, see the mag and temp
if check_temp_mag(dict1["scan"][j], dict2["scan"][i]):
merge(dict1["scan"][j], dict2["scan"][i])
print("merged")
del dict2["scan"][i]
process(dict1, dict2, angles, precision)
break
else:
add_scan(dict1, dict2, i)
print("scan added r")
process(dict1, dict2, angles, precision)
break
else:
add_scan(dict1, dict2, i)
print("scan added l")
process(dict1, dict2, angles, precision)
break
else:
continue
break
else:
# ask user if he really wants to add
print("UBs are different, do you really wish to add datasets? Y/N")
dict1 = add_dict(dict1, dict2)
return
"""
1. check for bisecting or normal beam geometry in data files; select stt, om, chi, phi for bisecting; select stt, om, nu for normal beam
2. in the ccl files, check for identical stt, chi and nu within 0.1 degree, and, at the same time, for identical om and phi within 0.05 degree;
3. in the dat files, check for identical stt, chi and nu within 0.1 degree, and, at the same time,
for identical phi within 0.05 degree, and, at the same time, for identical om within 5 degree."""
def unified_merge(dict1, dict2):
if not check_zebramode(dict1, dict2):
print("You are trying to add two files with different zebra mdoe")
return
# decide angles
if dict1["meta"]["zebra_mode"] == "bi":
angles = ["twotheta_angle", "omega_angle", "chi_angle", "phi_angle"]
elif dict1["meta"]["zebra_mode"] == "nb":
angles = ["gamma_angle", "omega_angle", "nu_angle"]
# precision of angles to check
precision = {
"twotheta_angle": 0.1,
"chi_angle": 0.1,
"nu_angle": 0.1,
"phi_angle": 0.05,
"omega_angle": 5,
"gamma_angle": 0.05,
}
if (dict1["meta"]["data_type"] == "ccl") and (dict2["meta"]["data_type"] == "ccl"):
precision["omega_angle"] = 0.05
# check for duplicates in original files
for d in dict1, dict2:
# no duplicates in dats
if d["meta"]["data_type"] == "dat":
continue
else:
merge_dups(d, angles)
process(dict1, dict2, angles, precision)
def add_dict(dict1, dict2):
"""adds two dictionaries, meta of the new is saved as meata+original_filename and
measurements are shifted to continue with numbering of first dict
:arg dict1 : dictionarry to add to
:arg dict2 : dictionarry from which to take the measurements
:return dict1 : combined dictionary
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
dat file"""
try:
if dict1["meta"]["zebra_mode"] != dict2["meta"]["zebra_mode"]:
print("You are trying to add scans measured with different zebra modes")
return
# this is for the qscan case
except KeyError:
print("Zebra mode not specified")
max_measurement_dict1 = max([keys for keys in dict1["scan"]])
new_filenames = np.arange(
max_measurement_dict1 + 1, max_measurement_dict1 + 1 + len(dict2["scan"])
)
if dict1.get("extra_meta") is None:
dict1["extra_meta"] = {}
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
if new_meta_name not in dict1:
for keys, name in zip(dict2["scan"], new_filenames):
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
dict1["scan"][name] = dict2["scan"][keys]
dict1["extra_meta"][name] = dict2["meta"]
dict1[new_meta_name] = dict2["meta"]
else:
raise KeyError(
str(
"The file %s has alredy been added to %s"
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"])
)
)
return dict1

View File

@ -1,488 +0,0 @@
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.io as sio
import uncertainties as u
from mpl_toolkits.mplot3d import Axes3D # dont delete, otherwise waterfall wont work
import collections
from .ccl_io import load_1D
def create_tuples(x, y, y_err):
"""creates tuples for sorting and merginng of the data
Counts need to be normalized to monitor before"""
t = list()
for i in range(len(x)):
tup = (x[i], y[i], y_err[i])
t.append(tup)
return t
def load_dats(filepath):
"""reads the txt file, get headers and data
:arg filepath to txt file or list of filepaths to the files
:return ccl like dictionary"""
if isinstance(filepath, str):
data_type = "txt"
file_list = list()
with open(filepath, "r") as infile:
col_names = next(infile).split(",")
col_names = [col_names[i].rstrip() for i in range(len(col_names))]
for line in infile:
if "END" in line:
break
file_list.append(tuple(line.split(",")))
elif isinstance(filepath, list):
data_type = "list"
file_list = filepath
dict1 = {}
for i in range(len(file_list)):
if not dict1:
if data_type == "txt":
dict1 = load_1D(file_list[0][0])
else:
dict1 = load_1D(file_list[0])
else:
if data_type == "txt":
dict1 = add_dict(dict1, load_1D(file_list[i][0]))
else:
dict1 = add_dict(dict1, load_1D(file_list[i]))
dict1["scan"][i + 1]["params"] = {}
if data_type == "txt":
for x in range(len(col_names) - 1):
dict1["scan"][i + 1]["params"][col_names[x + 1]] = float(file_list[i][x + 1])
return dict1
def create_dataframe(dict1, variables):
"""Creates pandas dataframe from the dictionary
:arg ccl like dictionary
:return pandas dataframe"""
# create dictionary to which we pull only wanted items before transforming it to pd.dataframe
pull_dict = {}
pull_dict["filenames"] = list()
for keys in variables:
for item in variables[keys]:
pull_dict[item] = list()
pull_dict["fit_area"] = list()
pull_dict["int_area"] = list()
pull_dict["Counts"] = list()
for keys in pull_dict:
print(keys)
# populate the dict
for keys in dict1["scan"]:
if "file_of_origin" in dict1["scan"][keys]:
pull_dict["filenames"].append(dict1["scan"][keys]["file_of_origin"].split("/")[-1])
else:
pull_dict["filenames"].append(dict1["meta"]["original_filename"].split("/")[-1])
pull_dict["fit_area"].append(dict1["scan"][keys]["fit"]["fit_area"])
pull_dict["int_area"].append(dict1["scan"][keys]["fit"]["int_area"])
pull_dict["Counts"].append(dict1["scan"][keys]["Counts"])
for key in variables:
for i in variables[key]:
pull_dict[i].append(_finditem(dict1["scan"][keys], i))
return pd.DataFrame(data=pull_dict)
def sort_dataframe(dataframe, sorting_parameter):
"""sorts the data frame and resets index"""
data = dataframe.sort_values(by=sorting_parameter)
data = data.reset_index(drop=True)
return data
def make_graph(data, sorting_parameter, style):
"""Makes the graph from the data based on style and sorting parameter
:arg data : pandas dataframe with data after sorting
:arg sorting_parameter to pull the correct variable and name
:arg style of the graph - waterfall, scatter, heatmap
:return matplotlib figure"""
if style == "waterfall":
mpl.rcParams["legend.fontsize"] = 10
fig = plt.figure()
ax = fig.gca(projection="3d")
for i in range(len(data)):
x = data["om"][i]
z = data["Counts"][i]
yy = [data[sorting_parameter][i]] * len(x)
ax.plot(x, yy, z, label=str("%s = %f" % (sorting_parameter, yy[i])))
ax.legend()
ax.set_xlabel("Omega")
ax.set_ylabel(sorting_parameter)
ax.set_zlabel("counts")
elif style == "scatter":
fig = plt.figure()
plt.errorbar(
data[sorting_parameter],
[data["fit_area"][i].n for i in range(len(data["fit_area"]))],
[data["fit_area"][i].s for i in range(len(data["fit_area"]))],
capsize=5,
ecolor="green",
)
plt.xlabel(str(sorting_parameter))
plt.ylabel("Intesity")
elif style == "heat":
new_om = list()
for i in range(len(data)):
new_om = np.append(new_om, np.around(data["om"][i], 2), axis=0)
unique_om = np.unique(new_om)
color_matrix = np.zeros(shape=(len(data), len(unique_om)))
for i in range(len(data)):
for j in range(len(data["om"][i])):
if np.around(data["om"][i][j], 2) in np.unique(new_om):
color_matrix[i, j] = data["Counts"][i][j]
else:
continue
fig = plt.figure()
plt.pcolormesh(unique_om, data[sorting_parameter], color_matrix, shading="gouraud")
plt.xlabel("omega")
plt.ylabel(sorting_parameter)
plt.colorbar()
plt.clim(color_matrix.mean(), color_matrix.max())
return fig
def save_dict(obj, name):
"""saves dictionary as pickle file in binary format
:arg obj - object to save
:arg name - name of the file
NOTE: path should be added later"""
with open(name + ".pkl", "wb") as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(name):
"""load dictionary from picle file
:arg name - name of the file to load
NOTE: expect the file in the same folder, path should be added later
:return dictionary"""
with open(name + ".pkl", "rb") as f:
return pickle.load(f)
# pickle, mat, h5, txt, csv, json
def save_table(data, filetype, name, path=None):
print("Saving: ", filetype)
path = "" if path is None else path
if filetype == "pickle":
# to work with uncertanities, see uncertanity module
with open(path + name + ".pkl", "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
if filetype == "mat":
# matlab doesent allow some special character to be in var names, also cant start with
# numbers, in need, add some to the romove_character list
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
data = data.drop(columns=["fit_area", "int_area"])
remove_characters = [" ", "[", "]", "{", "}", "(", ")"]
for character in remove_characters:
data.columns = [
data.columns[i].replace(character, "") for i in range(len(data.columns))
]
sio.savemat((path + name + ".mat"), {name: col.values for name, col in data.items()})
if filetype == "csv" or "txt":
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
data = data.drop(columns=["fit_area", "int_area", "om", "Counts"])
if filetype == "csv":
data.to_csv(path + name + ".csv")
if filetype == "txt":
with open((path + name + ".txt"), "w") as outfile:
data.to_string(outfile)
if filetype == "h5":
hdf = pd.HDFStore((path + name + ".h5"))
hdf.put("data", data)
hdf.close()
if filetype == "json":
data.to_json((path + name + ".json"))
def normalize(scan, monitor):
"""Normalizes the measurement to monitor, checks if sigma exists, otherwise creates it
:arg dict : dictionary to from which to tkae the scan
:arg key : which scan to normalize from dict1
:arg monitor : final monitor
:return counts - normalized counts
:return sigma - normalized sigma"""
counts = np.array(scan["Counts"])
sigma = np.sqrt(counts) if "sigma" not in scan else scan["sigma"]
monitor_ratio = monitor / scan["monitor"]
scaled_counts = counts * monitor_ratio
scaled_sigma = np.array(sigma) * monitor_ratio
return scaled_counts, scaled_sigma
def merge(scan1, scan2, keep=True, monitor=100000):
"""merges the two tuples and sorts them, if om value is same, Counts value is average
averaging is propagated into sigma if dict1 == dict2, key[1] is deleted after merging
:arg dict1 : dictionary to which measurement will be merged
:arg dict2 : dictionary from which measurement will be merged
:arg scand_dict_result : result of scan_dict after auto function
:arg keep : if true, when monitors are same, does not change it, if flase, takes monitor
always
:arg monitor : final monitor after merging
note: dict1 and dict2 can be same dict
:return dict1 with merged scan"""
if keep:
if scan1["monitor"] == scan2["monitor"]:
monitor = scan1["monitor"]
# load om and Counts
x1, x2 = scan1["om"], scan2["om"]
cor_y1, y_err1 = normalize(scan1, monitor=monitor)
cor_y2, y_err2 = normalize(scan2, monitor=monitor)
# creates touples (om, Counts, sigma) for sorting and further processing
tuple_list = create_tuples(x1, cor_y1, y_err1) + create_tuples(x2, cor_y2, y_err2)
# Sort the list on om and add 0 0 0 tuple to the last position
sorted_t = sorted(tuple_list, key=lambda tup: tup[0])
sorted_t.append((0, 0, 0))
om, Counts, sigma = [], [], []
seen = list()
for i in range(len(sorted_t) - 1):
if sorted_t[i][0] not in seen:
if sorted_t[i][0] != sorted_t[i + 1][0]:
om = np.append(om, sorted_t[i][0])
Counts = np.append(Counts, sorted_t[i][1])
sigma = np.append(sigma, sorted_t[i][2])
else:
om = np.append(om, sorted_t[i][0])
counts1, counts2 = sorted_t[i][1], sorted_t[i + 1][1]
sigma1, sigma2 = sorted_t[i][2], sorted_t[i + 1][2]
count_err1 = u.ufloat(counts1, sigma1)
count_err2 = u.ufloat(counts2, sigma2)
avg = (count_err1 + count_err2) / 2
Counts = np.append(Counts, avg.n)
sigma = np.append(sigma, avg.s)
seen.append(sorted_t[i][0])
else:
continue
scan1["om"] = om
scan1["Counts"] = Counts
scan1["sigma"] = sigma
scan1["monitor"] = monitor
print("merging done")
def add_dict(dict1, dict2):
"""adds two dictionaries, meta of the new is saved as meata+original_filename and
measurements are shifted to continue with numbering of first dict
:arg dict1 : dictionarry to add to
:arg dict2 : dictionarry from which to take the measurements
:return dict1 : combined dictionary
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
dat file"""
try:
if dict1["meta"]["zebra_mode"] != dict2["meta"]["zebra_mode"]:
print("You are trying to add scans measured with different zebra modes")
return
# this is for the qscan case
except KeyError:
print("Zebra mode not specified")
max_measurement_dict1 = max([keys for keys in dict1["scan"]])
new_filenames = np.arange(
max_measurement_dict1 + 1, max_measurement_dict1 + 1 + len(dict2["scan"])
)
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
if new_meta_name not in dict1:
for keys, name in zip(dict2["scan"], new_filenames):
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
dict1["scan"][name] = dict2["scan"][keys]
dict1[new_meta_name] = dict2["meta"]
else:
raise KeyError(
str(
"The file %s has alredy been added to %s"
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"])
)
)
return dict1
def auto(dict):
"""takes just unique tuples from all tuples in dictionary returend by scan_dict
intendet for automatic merge if you doesent want to specify what scans to merge together
args: dict - dictionary from scan_dict function
:return dict - dict without repetitions"""
for keys in dict:
tuple_list = dict[keys]
new = list()
for i in range(len(tuple_list)):
if tuple_list[0][0] == tuple_list[i][0]:
new.append(tuple_list[i])
dict[keys] = new
return dict
def scan_dict(dict, precision=0.5):
"""scans dictionary for duplicate angles indexes
:arg dict : dictionary to scan
:arg precision : in deg, sometimes angles are zero so its easier this way, instead of
checking zero division
:return dictionary with matching scans, if there are none, the dict is empty
note: can be checked by "not d", true if empty
"""
if dict["meta"]["zebra_mode"] == "bi":
angles = ["twotheta_angle", "omega_angle", "chi_angle", "phi_angle"]
elif dict["meta"]["zebra_mode"] == "nb":
angles = ["gamma_angle", "omega_angle", "nu_angle"]
else:
print("Unknown zebra mode")
return
d = {}
for i in dict["scan"]:
for j in dict["scan"]:
if dict["scan"][i] != dict["scan"][j]:
itup = list()
for k in angles:
itup.append(abs(abs(dict["scan"][i][k]) - abs(dict["scan"][j][k])))
if all(i <= precision for i in itup):
print(itup)
print([dict["scan"][i][k] for k in angles])
print([dict["scan"][j][k] for k in angles])
if str([np.around(dict["scan"][i][k], 0) for k in angles]) not in d:
d[str([np.around(dict["scan"][i][k], 0) for k in angles])] = list()
d[str([np.around(dict["scan"][i][k], 0) for k in angles])].append((i, j))
else:
d[str([np.around(dict["scan"][i][k], 0) for k in angles])].append((i, j))
else:
pass
else:
continue
return d
def _finditem(obj, key):
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
item = _finditem(v, key)
if item is not None:
return item
def most_common(lst):
return max(set(lst), key=lst.count)
def variables(dictionary):
"""Funcrion to guess what variables will be used in the param study
i call pripary variable the one the array like variable, usually omega
and secondary the slicing variable, different for each scan,for example temperature"""
# find all variables that are in all scans
stdev_precision = 0.05
all_vars = list()
for keys in dictionary["scan"]:
all_vars.append([key for key in dictionary["scan"][keys] if key != "params"])
if dictionary["scan"][keys]["params"]:
all_vars.append(key for key in dictionary["scan"][keys]["params"])
all_vars = [i for sublist in all_vars for i in sublist]
# get the ones that are in all scans
b = collections.Counter(all_vars)
inall = [key for key in b if b[key] == len(dictionary["scan"])]
# delete those that are obviously wrong
wrong = [
"NP",
"Counts",
"Monitor1",
"Monitor2",
"Monitor3",
"h_index",
"l_index",
"k_index",
"n_points",
"monitor",
"Time",
"omega_angle",
"twotheta_angle",
"chi_angle",
"phi_angle",
"nu_angle",
]
inall_red = [i for i in inall if i not in wrong]
# check for primary variable, needs to be list, we dont suspect the
# primary variable be as a parameter (be in scan[params])
primary_candidates = list()
for key in dictionary["scan"]:
for i in inall_red:
if isinstance(_finditem(dictionary["scan"][key], i), list):
if np.std(_finditem(dictionary["scan"][key], i)) > stdev_precision:
primary_candidates.append(i)
# check which of the primary are in every scan
primary_candidates = collections.Counter(primary_candidates)
second_round_primary_candidates = [
key for key in primary_candidates if primary_candidates[key] == len(dictionary["scan"])
]
if len(second_round_primary_candidates) == 1:
print("We've got a primary winner!", second_round_primary_candidates)
else:
print("Still not sure with primary:(", second_round_primary_candidates)
# check for secondary variable, we suspect a float\int or not changing array
# we dont need to check for primary ones
secondary_candidates = [i for i in inall_red if i not in second_round_primary_candidates]
# print("secondary candidates", secondary_candidates)
# select arrays and floats and ints
second_round_secondary_candidates = list()
for key in dictionary["scan"]:
for i in secondary_candidates:
if isinstance(_finditem(dictionary["scan"][key], i), float):
second_round_secondary_candidates.append(i)
elif isinstance(_finditem(dictionary["scan"][key], i), int):
second_round_secondary_candidates.append(i)
elif isinstance(_finditem(dictionary["scan"][key], i), list):
if np.std(_finditem(dictionary["scan"][key], i)) < stdev_precision:
second_round_secondary_candidates.append(i)
second_round_secondary_candidates = collections.Counter(second_round_secondary_candidates)
second_round_secondary_candidates = [
key
for key in second_round_secondary_candidates
if second_round_secondary_candidates[key] == len(dictionary["scan"])
]
# print("secondary candidates after second round", second_round_secondary_candidates)
# now we check if they vary between the scans
third_round_sec_candidates = list()
for i in second_round_secondary_candidates:
check_array = list()
for keys in dictionary["scan"]:
check_array.append(np.average(_finditem(dictionary["scan"][keys], i)))
# print(i, check_array, np.std(check_array))
if np.std(check_array) > stdev_precision:
third_round_sec_candidates.append(i)
if len(third_round_sec_candidates) == 1:
print("We've got a secondary winner!", third_round_sec_candidates)
else:
print("Still not sure with secondary :(", third_round_sec_candidates)
return {"primary": second_round_primary_candidates, "secondary": third_round_sec_candidates}

View File

@ -407,24 +407,24 @@ def box_int(file, box):
dat = pyzebra.read_detector_data(file)
sttC = dat["pol_angle"][0]
om = dat["rot_angle"]
nuC = dat["tlt_angle"][0]
sttC = dat["gamma"][0]
om = dat["omega"]
nuC = dat["nu"][0]
ddist = dat["ddist"]
# defining indices
x0, xN, y0, yN, fr0, frN = box
# omega fit
om = dat["rot_angle"][fr0:frN]
om = dat["omega"][fr0:frN]
cnts = np.sum(dat["data"][fr0:frN, y0:yN, x0:xN], axis=(1, 2))
p0 = [1.0, 0.0, 1.0]
coeff, var_matrix = curve_fit(gauss, range(len(cnts)), cnts, p0=p0)
frC = fr0 + coeff[1]
omF = dat["rot_angle"][math.floor(frC)]
omC = dat["rot_angle"][math.ceil(frC)]
omF = dat["omega"][math.floor(frC)]
omC = dat["omega"][math.ceil(frC)]
frStep = frC - math.floor(frC)
omStep = omC - omF
omP = omF + omStep * frStep