168 Commits
0.2.1 ... 0.3.2

Author SHA1 Message Date
a2fceffc1b Updating for version 0.3.2 2021-05-10 17:50:15 +02:00
415d68b4dc Return empty string for non-present anatric param 2021-05-10 17:29:36 +02:00
00ff4117ea Isolate anatric subprocesses 2021-05-10 17:06:20 +02:00
67853b8db4 Avoid using temp_dir for anatric xml config preview 2021-05-10 16:34:58 +02:00
60787bccb7 Clarify path to spind 2021-05-10 15:13:38 +02:00
880d86d750 Fix spind output update issues
Fix #27
2021-05-06 18:22:56 +02:00
7a88e5e254 Adapt spind results display according to #27 2021-05-06 17:43:20 +02:00
20f2a8ada4 Update preview on datatable content change 2021-05-04 17:14:59 +02:00
42c092fc14 Fix Safari browser double file download
Introduce a small time delay between .comm/.incomm file downloads

For #24
2021-05-04 16:50:51 +02:00
8153db9f67 Add an extra index spinner 2021-05-04 12:08:39 +02:00
62c969d6ad Allow .ccl files in param study
Fix #28
2021-05-04 11:09:48 +02:00
085620abae Set conda channel_priority to 'strict' 2021-04-23 11:16:57 +02:00
9ebe290966 Export nan for area value/error in case of a bad fit 2021-04-21 12:41:19 +02:00
c9cd96c521 Set lower and upper bounds for center and sigma 2021-04-20 18:25:14 +02:00
d745cda4a5 Reduce hkl columns width to 4 in comm files
Fix #26
2021-04-20 15:07:36 +02:00
1b5f70afa0 Set f0_intercept and f1_amplitude >= 0
For #26
2021-04-20 15:03:27 +02:00
a034065a09 Use Slider for image index 2021-04-12 18:35:58 +02:00
2a60c86b48 Display experiment conditions in DataTable 2021-04-12 18:18:59 +02:00
ccc075975f Unify data files discovery via proposal number 2021-04-12 17:28:25 +02:00
4982b05de0 Updating for version 0.3.1 2021-04-12 09:10:35 +02:00
2b0c392a3e Vary intercept by default 2021-04-12 09:09:47 +02:00
099842b2bd Use TextInput for verbosity value 2021-04-09 17:01:55 +02:00
bd3efd698a Add results output widget for anatric 2021-04-09 14:52:40 +02:00
24f083e585 Treat the first 4 letters of proposal as a year 2021-04-09 10:14:09 +02:00
f43488af34 Layout improvements 2021-04-09 09:03:50 +02:00
1b90d53466 Use Tabs for algorithm params on anatric panel 2021-04-08 17:43:53 +02:00
c1b3a28351 Replace toggles with checkboxes
The CheckboxGroup widget state functionality is more readable
2021-04-08 16:56:50 +02:00
5b45685257 Remove bin size spinner
Binning will be replaced by running average in the future
2021-04-08 15:31:44 +02:00
e7b28a4e75 Auto update export file preview 2021-04-08 15:23:53 +02:00
83a7d607a5 Forward stdout of anatric subprocs to pyzebra app 2021-04-07 17:01:01 +02:00
5eedd14b3f Handle DataFactory for 3 possible detectors 2021-04-07 16:47:48 +02:00
3db7dca7ba Add linear model with fixed default values of 0 2021-04-07 14:59:04 +02:00
b2d1a0be02 Add an extra y-axis for scanning_motor to overview
Also, fix #25
2021-04-07 14:07:51 +02:00
69d22dd067 Layout fixes on the spind tab 2021-04-07 09:59:53 +02:00
242da76c59 Adaptations to the displayed UB matrix 2021-04-07 09:54:31 +02:00
0c812a5dd5 Add TextAreaInput for UB matrix on spind panel 2021-04-06 17:13:18 +02:00
4cfcb3d396 Fix incorrect spind results handling 2021-04-06 17:03:33 +02:00
8018783eb5 Switch from DataRange1d to Range1d in overviews 2021-04-06 15:19:01 +02:00
fdb1609a41 Print the content of spind result file 2021-04-06 11:27:13 +02:00
e7dda3cda8 Print the content of spind event file 2021-03-26 16:32:01 +01:00
f788d74f15 Fix rendering on chrome 2021-03-26 16:03:34 +01:00
0c620d4a08 Updating for version 0.3.0 2021-03-26 09:16:18 +01:00
5d65afa194 Forward stdout of spind subprocs to pyzebra app 2021-03-12 16:59:43 +01:00
d312c99f96 Replace fit_columns with autosize_mode
fit_columns was deprecated in bokeh/2.2
2021-03-12 11:57:50 +01:00
526821e073 Display peak components and allow to hide plots 2021-03-11 18:00:41 +01:00
d606230feb Simplify data provision for js file export 2021-03-11 15:36:58 +01:00
ce5cff05a7 Better default names for exported files 2021-03-11 14:39:29 +01:00
a738f74f88 Do not auto fit non-exported scans (e.g. merged) 2021-03-11 11:07:37 +01:00
4785c97f33 Remove mpi4py from deps
* It is a dependency of an optional spind func
2021-03-10 18:29:33 +01:00
e68fedc7e5 Fix for parameter equal to 0 2021-03-10 18:23:03 +01:00
2581996625 Add h,k,l to a list of possible parameters 2021-03-10 17:56:53 +01:00
d014849abd Allow upload of multiple files in ccl_integrate 2021-03-10 17:42:08 +01:00
aa6e668f97 Minor label fixes 2021-03-10 17:32:03 +01:00
85c74c39f0 Add bulk loading of files from afs in ccl_integrate 2021-03-10 13:21:13 +01:00
0e97f44cc7 Preview/Download export data changes
* Preview button now sets data for downloading
2021-03-10 11:43:07 +01:00
9c5f7e6284 Fix and simplify export data content 2021-03-10 11:06:17 +01:00
167e5136b1 Add bulk loading of files from afs in param_study 2021-03-09 15:48:01 +01:00
21d642b7d7 Display fit range spans 2021-03-09 14:36:10 +01:00
fe68b1de7e Enable fit range selection 2021-03-09 14:13:34 +01:00
3291a67e7d Reset selected parameter on new data open/append 2021-03-08 17:52:24 +01:00
041c5c0e8b Correctly handle scans of different length 2021-03-08 17:46:34 +01:00
566cebb01a Fix a bug in omega range calculation 2021-03-04 10:57:52 +01:00
a70b4fae57 Disable fit model change in ccl_integrate 2021-03-03 16:55:49 +01:00
97836b2906 Auto range for overview scatter plot 2021-03-03 16:53:06 +01:00
7ee8eba007 Add parameter selection option 2021-03-03 16:40:24 +01:00
5169e08777 Handle dat files with hkl scans 2021-03-03 16:10:51 +01:00
ed8e6d262f Postproc hkl indices only for ccl files 2021-03-03 16:08:47 +01:00
426bb16792 Cleanup 2021-03-03 14:56:55 +01:00
554716fc9a Add HoverTool to param study overview plot 2021-03-01 17:39:04 +01:00
69767da850 Use weights in fit 2021-03-01 15:46:54 +01:00
3e174f22e5 Improve naming consistency with lmfit 2021-03-01 15:18:55 +01:00
7c1e2fdf0c Bump to python/3.7
Add mpi4py dep
2021-03-01 14:38:35 +01:00
b61327b5f2 Remove unused reset button 2021-03-01 13:43:54 +01:00
0e3aea142d Formatting fix for angles 2021-03-01 12:17:37 +01:00
c7b05d252f Enable export function 2021-03-01 12:17:23 +01:00
a65708004b Provide filename in merging info 2021-02-26 18:14:06 +01:00
af1336df78 Plot fit results on denser grid 2021-02-26 17:53:44 +01:00
966d6349df Add Legend to main plots 2021-02-26 13:05:57 +01:00
5dbb208e92 Correctly plot background component 2021-02-26 11:31:18 +01:00
af70302bf3 Disable temporary unsupported export functionality 2021-02-26 11:12:21 +01:00
8727e25a2d Plot best_fit instead of gauss component 2021-02-26 11:03:01 +01:00
fac3592ab2 Update bokeh/2.3 2021-02-26 11:03:01 +01:00
599e2e1e74 Remove pandas dep 2021-02-26 11:03:01 +01:00
11fae5d47a Drop peakfinding step
The initial guesses via lmfit seems to work just fine
2021-02-26 11:03:01 +01:00
7ee4b1c86a Rename fit_result -> fit 2021-02-26 08:48:08 +01:00
e76623e55e Allow adding and removing fit model components 2021-02-25 17:44:03 +01:00
794d5c49d4 Use lmfit parameter guessing 2021-02-25 17:41:04 +01:00
0b4f4b1ce9 Replace offset with intercept in LinearModel 2021-02-25 16:47:56 +01:00
15bd970a72 Disable temporarily unsupported functionality 2021-02-25 16:11:16 +01:00
89ff0b15f5 Basic fit refactoring 2021-02-25 15:41:14 +01:00
9e3466fcaa Rename guess -> value
Keep it consistent with lmfit naming convention
2021-02-24 15:44:23 +01:00
55f3198f9d Add fit model name to tag 2021-02-24 15:42:10 +01:00
747b008d5e Add initial selection on gauss component 2021-02-15 15:26:06 +01:00
1a7cd2a4b5 Support other than omega scanning variables 2021-02-15 15:08:32 +01:00
eb4e30b7e0 Remove "variable" and a utility rename 2021-02-15 14:26:01 +01:00
82f3c53380 Output center of the scan range 2021-02-12 17:12:19 +01:00
ac76d4b2c3 Add monitor spinner 2021-02-12 16:24:48 +01:00
dab52d9508 Update layouts 2021-02-12 15:59:47 +01:00
b63ef90e11 Keep all merged scans, but disable export for them 2021-02-12 13:53:51 +01:00
d5ac2c6d56 Fix export for scanning angle 2021-02-12 12:05:21 +01:00
36faad1d9d Export the same number of angles for nb and bi modes 2021-02-12 11:05:15 +01:00
9140798c74 Adjust the name of cell parameters
This way we avoid overwritting gamma in case of nb zebra mode
2021-02-11 16:36:42 +01:00
96eaa27e55 Add manual scan merging 2021-02-11 14:47:37 +01:00
b418fb8300 Assume gamma = 2-theta for 'nb' zebra mode 2021-02-11 13:52:31 +01:00
699c5f3d11 Improve scan merging feedback 2021-02-10 16:18:38 +01:00
6a822c4c85 Allow a small gap between ranges for scans to merge 2021-02-10 16:14:20 +01:00
c2e1f1def1 Fix original_filename read in param_study 2021-02-10 14:06:39 +01:00
2b9775faed Skip empty/whitespace lines before any scan in ccl 2021-02-10 13:39:53 +01:00
ea7d611819 Handle older files that don't contain "zebra_mode" 2021-02-10 13:20:22 +01:00
9ef07cff19 Split on the first occurence of "=" sign
Fix cases where string values also contain "=" signs
2021-02-10 12:45:04 +01:00
12077dd5f3 Merge based on variable parameter range overlap 2021-02-09 18:19:05 +01:00
3b1a2b1a0b Remove param_study_moduls.py 2021-02-09 17:43:16 +01:00
8eee968344 Simplify hkl type handling 2021-02-09 16:26:52 +01:00
97936d6c2a Save variable_name, fix dat file monitor reading 2021-02-09 16:13:45 +01:00
91d3a0ac9e Update DataTable width 2021-02-09 13:54:42 +01:00
4f1f03224c Do not try to read hkl values for dat files 2021-02-09 13:41:44 +01:00
16a47cf3b3 Refactor dataset merging procedure 2021-02-09 13:07:51 +01:00
e3de0f7217 Fix datatable selection 2021-02-09 12:17:37 +01:00
983e0dab42 Flatten structure of metadata in scans 2021-02-09 12:17:30 +01:00
a6f97f59e8 Convert Counts to float ndarray 2021-02-09 11:32:55 +01:00
bf3b44405d Skip reading unused angle in nb zebra mode 2021-02-09 11:28:27 +01:00
93a557fea9 Assign default value of 0 to temp and mf 2021-02-08 21:24:47 +01:00
b31c0b413c Store metadata in each scan 2021-02-08 21:21:13 +01:00
20527e8d2b Consolidate hkl and ub 2021-02-05 15:41:10 +01:00
e09538eaeb Consolidate temp, mf and zebra_mode naming 2021-02-05 14:48:48 +01:00
239949b7c0 Consolidate angle names between data formats 2021-02-05 14:48:48 +01:00
7e6df95c49 Track the original scan index 2021-02-05 14:48:48 +01:00
e38993e69d Keep scans in a list instead of a dict 2021-02-05 14:48:41 +01:00
6bf401aba8 Show hkl values for the peaks in spind panel
Fix #22
2021-02-02 17:59:45 +01:00
0c4fffff0d Show spind results in a DataTable
For #22
2021-02-02 17:59:18 +01:00
2c727d34bd Normalize data on import 2021-02-02 15:35:37 +01:00
5e979eb9e3 Reapply commits that add extra_meta 2021-02-02 10:38:05 +01:00
ba6a99b912 Remove area_method from metadata 2021-02-01 17:07:20 +01:00
315b025341 Remove redundant string conversions 2021-02-01 17:07:20 +01:00
d0b67b8565 Update merge_function.py
Fixed the normalization, changed the structure so we can merge duplicates without merging two dictionaries.
2021-01-24 15:14:25 +01:00
1761003d8a Fix hkl precision not affecting file export 2021-01-06 14:20:54 +01:00
d42a53ed47 Move indices from metadata to scan data 2021-01-06 11:39:03 +01:00
cd1e5c42c0 Do not provide user files with empty content 2021-01-05 18:15:26 +01:00
c2215e9b6d Utility renames 2021-01-05 17:45:08 +01:00
c5ec09a5e3 Split scans between .comm/.incomm files on export
For #21
2021-01-05 17:44:59 +01:00
b0a4e35d3d Optimize locations of Div and FileInput widgets 2021-01-05 14:37:51 +01:00
db85eee329 Add append option for files from AFS
Allow to open both, .ccl and .dat
For #21
2021-01-05 14:37:42 +01:00
67d0af292b Fix hkl precision select title
For #21
2021-01-05 12:05:09 +01:00
241c2a9779 Correction of normalization
suggested by Dr. Zolliker
2020-12-18 16:23:14 +01:00
8c265027bf Found a better place to put the num itegration fix 2020-12-18 15:52:09 +01:00
f5c405bee8 Numerically integrate whole area
Added two lines to integrate full area on the request of Oksana and Romain, might be changed back after some more testing, ergo I did not delete the previous code and only change the final result regardless of what the code produces. I find this least invasive, but a bit messy.
2020-12-18 15:40:37 +01:00
ae33ee825e Support any variable angle 2020-12-16 17:23:23 +01:00
f694249298 Fix magnitude of the resulting UB matrix 2020-12-16 15:11:14 +01:00
b7ac6d4359 Fix UB matrix calculation 2020-12-16 11:15:57 +01:00
e27ba7c711 Fix temp_dir 2020-12-16 11:04:33 +01:00
ddaeda35c7 Fix issue parsing input on spind panel 2020-12-16 09:10:46 +01:00
dac1237009 Output UB matrix 2020-12-15 14:49:52 +01:00
999ceba3b7 Fix missing file extension 2020-12-15 14:38:54 +01:00
0a634ca8da Handle a case when spind doesn't return results 2020-12-15 14:28:33 +01:00
1f1762063d Add basic spind processing 2020-12-14 16:54:13 +01:00
fdb2027fd4 Add spind panel 2020-12-14 15:04:55 +01:00
559e18ed5d Add option to open hdf files via proposal number 2020-12-10 22:49:54 +01:00
d6202d4a6b Use pyzebra lib default anatric location 2020-12-10 22:07:04 +01:00
4b3b14e4ac Disable adding and removing fit funcs in param study 2020-12-10 16:46:48 +01:00
b4db41d672 Make geometry readonly on gui
Data files already contain that information
2020-12-10 16:35:30 +01:00
16174f5cc4 Fix zebra_mode dataset read 2020-12-10 16:27:00 +01:00
b50783f6be Correct auto detection of variable angles
Fix #20
2020-12-10 16:02:01 +01:00
f830acf70d Increase delay before transferring file to clients 2020-12-10 15:54:16 +01:00
ea41301605 Auto detect varying angle in h5 viewer 2020-12-10 14:36:58 +01:00
6a341e5001 Add experimental overview map plot 2020-12-10 11:24:27 +01:00
9bbed3b55d Cleanup after transitioning plots to Tabs 2020-12-10 10:13:41 +01:00
6bd2398f5e Updating for version 0.2.2 2020-12-09 16:51:47 +01:00
768dd77ef5 Increase delay before transferring file to clients 2020-12-09 16:51:21 +01:00
20 changed files with 1572 additions and 2311 deletions

View File

@ -16,6 +16,7 @@ jobs:
run: |
$CONDA/bin/conda install --quiet --yes conda-build anaconda-client
$CONDA/bin/conda config --append channels conda-forge
$CONDA/bin/conda config --set channel_priority strict
$CONDA/bin/conda config --set anaconda_upload yes
- name: Build and upload

View File

@ -15,15 +15,14 @@ build:
requirements:
build:
- python >=3.6
- python >=3.7
- setuptools
run:
- python >=3.6
- python >=3.7
- numpy
- scipy
- pandas
- h5py
- bokeh =2.2
- bokeh =2.3
- matplotlib
- numba
- lmfit

View File

@ -1,9 +1,7 @@
from pyzebra.anatric import *
from pyzebra.ccl_findpeaks import ccl_findpeaks
from pyzebra.ccl_io import export_comm, load_1D, parse_1D
from pyzebra.fit2 import fitccl
from pyzebra.ccl_io import *
from pyzebra.h5 import *
from pyzebra.merge_function import add_dict, unified_merge
from pyzebra.xtal import *
from pyzebra.ccl_process import *
__version__ = "0.2.1"
__version__ = "0.3.2"

View File

@ -23,8 +23,17 @@ REFLECTION_PRINTER_FORMATS = [
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
def anatric(config_file, anatric_path="/afs/psi.ch/project/sinq/rhel7/bin/anatric"):
subprocess.run([anatric_path, config_file], check=True)
def anatric(config_file, anatric_path="/afs/psi.ch/project/sinq/rhel7/bin/anatric", cwd=None):
comp_proc = subprocess.run(
[anatric_path, config_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
check=True,
text=True,
)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
class AnatricConfig:
@ -51,10 +60,13 @@ class AnatricConfig:
def save_as(self, filename):
self._tree.write(filename)
def tostring(self):
return ET.tostring(self._tree.getroot(), encoding="unicode")
def _get_attr(self, name, tag, attr):
elem = self._tree.find(name).find(tag)
if elem is None:
return None
return ""
return elem.attrib[attr]
def _set_attr(self, name, tag, attr, value):
@ -217,7 +229,7 @@ class AnatricConfig:
elem = self._tree.find("crystal").find("UB")
if elem is not None:
return elem.text
return None
return ""
@crystal_UB.setter
def crystal_UB(self, value):
@ -236,12 +248,37 @@ class AnatricConfig:
@property
def dataFactory_dist1(self):
return self._tree.find("DataFactory").find("dist1").attrib["value"]
elem = self._tree.find("DataFactory").find("dist1")
if elem is not None:
return elem.attrib["value"]
return ""
@dataFactory_dist1.setter
def dataFactory_dist1(self, value):
self._tree.find("DataFactory").find("dist1").attrib["value"] = value
@property
def dataFactory_dist2(self):
elem = self._tree.find("DataFactory").find("dist2")
if elem is not None:
return elem.attrib["value"]
return ""
@dataFactory_dist2.setter
def dataFactory_dist2(self, value):
self._tree.find("DataFactory").find("dist2").attrib["value"] = value
@property
def dataFactory_dist3(self):
elem = self._tree.find("DataFactory").find("dist3")
if elem is not None:
return elem.attrib["value"]
return ""
@dataFactory_dist3.setter
def dataFactory_dist3(self, value):
self._tree.find("DataFactory").find("dist3").attrib["value"] = value
@property
def reflectionPrinter_format(self):
return self._tree.find("ReflectionPrinter").attrib["format"]
@ -253,6 +290,14 @@ class AnatricConfig:
self._tree.find("ReflectionPrinter").attrib["format"] = value
@property
def reflectionPrinter_file(self):
return self._tree.find("ReflectionPrinter").attrib["file"]
@reflectionPrinter_file.setter
def reflectionPrinter_file(self, value):
self._tree.find("ReflectionPrinter").attrib["file"] = value
@property
def algorithm(self):
return self._tree.find("Algorithm").attrib["implementation"]
@ -269,7 +314,7 @@ class AnatricConfig:
def _get_alg_attr(self, alg, tag, attr):
param_elem = self._alg_elems[alg].find(tag)
if param_elem is None:
return None
return ""
return param_elem.attrib[attr]
def _set_alg_attr(self, alg, tag, attr, value):

View File

@ -10,6 +10,7 @@ import panel_ccl_integrate
import panel_hdf_anatric
import panel_hdf_viewer
import panel_param_study
import panel_spind
doc = curdoc()
@ -29,10 +30,11 @@ tab_hdf_viewer = panel_hdf_viewer.create()
tab_hdf_anatric = panel_hdf_anatric.create()
tab_ccl_integrate = panel_ccl_integrate.create()
tab_param_study = panel_param_study.create()
tab_spind = panel_spind.create()
doc.add_root(
column(
Tabs(tabs=[tab_hdf_viewer, tab_hdf_anatric, tab_ccl_integrate, tab_param_study]),
Tabs(tabs=[tab_hdf_viewer, tab_hdf_anatric, tab_ccl_integrate, tab_param_study, tab_spind]),
row(stdout_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
)
)

View File

@ -40,7 +40,7 @@ def main():
parser.add_argument(
"--anatric-path",
type=str,
default="/afs/psi.ch/project/sinq/rhel7/bin/anatric",
default=None,
help="path to anatric executable",
)

View File

@ -3,15 +3,14 @@ import io
import os
import tempfile
import types
from copy import deepcopy
import numpy as np
from bokeh.layouts import column, row
from bokeh.models import (
Asterisk,
BasicTicker,
Button,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
CustomJS,
DataRange1d,
@ -20,8 +19,10 @@ from bokeh.models import (
Dropdown,
FileInput,
Grid,
Legend,
Line,
LinearAxis,
MultiLine,
MultiSelect,
NumberEditor,
Panel,
@ -37,7 +38,6 @@ from bokeh.models import (
TableColumn,
TextAreaInput,
TextInput,
Toggle,
WheelZoomTool,
Whisker,
)
@ -47,200 +47,237 @@ from pyzebra.ccl_io import AREA_METHODS
javaScript = """
setTimeout(function() {
const filename = 'output' + js_data.data['ext']
const blob = new Blob([js_data.data['cont']], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = filename;
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 500);
"""
let j = 0;
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
setTimeout(function() {
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
def create():
det_data = {}
fit_params = {}
peak_pos_textinput_lock = False
js_data = ColumnDataSource(data=dict(cont=[], ext=[]))
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""]))
def proposal_textinput_callback(_attr, _old, new):
ccl_path = os.path.join(PROPOSAL_PATH, new.strip())
ccl_file_list = []
for file in os.listdir(ccl_path):
if file.endswith(".ccl"):
ccl_file_list.append((os.path.join(ccl_path, file), file))
ccl_file_select.options = ccl_file_list
ccl_file_select.value = ccl_file_list[0][0]
proposal = new.strip()
year = new[:4]
proposal_path = f"/afs/psi.ch/project/sinqdata/{year}/zebra/{proposal}"
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
proposal_textinput = TextInput(title="Enter proposal number:", default_size=145)
proposal_textinput = TextInput(title="Proposal number:", width=210)
proposal_textinput.on_change("value", proposal_textinput_callback)
def _init_datatable():
scan_list = list(det_data["scan"].keys())
hkl = [
f'{int(m["h_index"])} {int(m["k_index"])} {int(m["l_index"])}'
if det_data["meta"]["indices"] == "hkl"
else f'{m["h_index"]} {m["k_index"]} {m["l_index"]}'
for m in det_data["scan"].values()
]
scan_list = [s["idx"] for s in det_data]
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in det_data]
export = [s.get("active", True) for s in det_data]
scan_table_source.data.update(
scan=scan_list,
hkl=hkl,
peaks=[0] * len(scan_list),
fit=[0] * len(scan_list),
export=[True] * len(scan_list),
scan=scan_list, hkl=hkl, fit=[0] * len(scan_list), export=export,
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
def ccl_file_select_callback(_attr, _old, new):
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
merge_source_select.options = merge_options
merge_source_select.value = merge_options[0][0]
merge_dest_select.options = merge_options
merge_dest_select.value = merge_options[0][0]
file_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
def file_open_button_callback():
nonlocal det_data
with open(new) as file:
_, ext = os.path.splitext(new)
det_data = pyzebra.parse_1D(file, ext)
det_data = []
for f_name in file_select.value:
with open(f_name) as file:
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
pyzebra.merge_duplicates(det_data)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
_init_datatable()
ccl_file_select = Select(title="Available .ccl files")
ccl_file_select.on_change("value", ccl_file_select_callback)
file_open_button = Button(label="Open New", width=100)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
for f_name in file_select.value:
with open(f_name) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
_init_datatable()
file_append_button = Button(label="Append", width=100)
file_append_button.on_click(file_append_button_callback)
def upload_button_callback(_attr, _old, new):
nonlocal det_data
with io.StringIO(base64.b64decode(new).decode()) as file:
_, ext = os.path.splitext(upload_button.filename)
det_data = pyzebra.parse_1D(file, ext)
det_data = []
for f_str, f_name in zip(new, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
pyzebra.merge_duplicates(det_data)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
_init_datatable()
upload_button = FileInput(accept=".ccl")
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
upload_button.on_change("value", upload_button_callback)
def append_upload_button_callback(_attr, _old, new):
nonlocal det_data
with io.StringIO(base64.b64decode(new).decode()) as file:
_, ext = os.path.splitext(append_upload_button.filename)
append_data = pyzebra.parse_1D(file, ext)
for f_str, f_name in zip(new, append_upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.unified_merge(det_data, append_data)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
_init_datatable()
append_upload_button = FileInput(accept=".ccl,.dat")
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
append_upload_button.on_change("value", append_upload_button_callback)
def monitor_spinner_callback(_attr, old, new):
if det_data:
pyzebra.normalize_dataset(det_data, new)
_update_plot(_get_selected_scan())
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def _update_table():
num_of_peaks = [len(scan.get("peak_indexes", [])) for scan in det_data["scan"].values()]
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"].values()]
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
scan_table_source.data.update(fit=fit_ok)
def _update_plot(scan):
nonlocal peak_pos_textinput_lock
peak_pos_textinput_lock = True
scan_motor = scan["scan_motor"]
y = scan["Counts"]
x = scan["om"]
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
num_of_peaks = len(scan.get("peak_indexes", []))
if num_of_peaks is not None and num_of_peaks > 0:
peak_indexes = scan["peak_indexes"]
if len(peak_indexes) == 1:
peak_pos_textinput.value = str(scan["om"][peak_indexes[0]])
else:
peak_pos_textinput.value = str([scan["om"][ind] for ind in peak_indexes])
plot_peak_source.data.update(x=scan["om"][peak_indexes], y=scan["peak_heights"])
plot_line_smooth_source.data.update(x=x, y=scan["smooth_peaks"])
else:
peak_pos_textinput.value = None
plot_peak_source.data.update(x=[], y=[])
plot_line_smooth_source.data.update(x=[], y=[])
peak_pos_textinput_lock = False
fit = scan.get("fit")
if fit is not None:
x = scan["fit"]["x_fit"]
plot_gauss_source.data.update(x=x, y=scan["fit"]["comps"]["gaussian"])
plot_bkg_source.data.update(x=x, y=scan["fit"]["comps"]["background"])
params = fit["result"].params
fit_output_textinput.value = (
"Gaussian: centre = %9.4f, sigma = %9.4f, area = %9.4f \n"
"background: slope = %9.4f, intercept = %9.4f \n"
"Int. area = %9.4f +/- %9.4f \n"
"fit area = %9.4f +/- %9.4f \n"
"ratio((fit-int)/fit) = %9.4f"
% (
params["g_cen"].value,
params["g_width"].value,
params["g_amp"].value,
params["slope"].value,
params["intercept"].value,
fit["int_area"].n,
fit["int_area"].s,
params["g_amp"].value,
params["g_amp"].stderr,
(params["g_amp"].value - fit["int_area"].n) / params["g_amp"].value,
)
)
numfit_min, numfit_max = fit["numfit"]
if numfit_min is None:
numfit_min_span.location = None
else:
numfit_min_span.location = x[numfit_min]
x_fit = np.linspace(x[0], x[-1], 100)
plot_fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
if numfit_max is None:
numfit_max_span.location = None
else:
numfit_max_span.location = x[numfit_max]
x_bkg = []
y_bkg = []
xs_peak = []
ys_peak = []
comps = fit.eval_components(x=x_fit)
for i, model in enumerate(fit_params):
if "linear" in model:
x_bkg = x_fit
y_bkg = comps[f"f{i}_"]
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
xs_peak.append(x_fit)
ys_peak.append(comps[f"f{i}_"])
plot_bkg_source.data.update(x=x_bkg, y=y_bkg)
plot_peak_source.data.update(xs=xs_peak, ys=ys_peak)
fit_output_textinput.value = fit.fit_report()
else:
plot_gauss_source.data.update(x=[], y=[])
plot_fit_source.data.update(x=[], y=[])
plot_bkg_source.data.update(x=[], y=[])
plot_peak_source.data.update(xs=[], ys=[])
fit_output_textinput.value = ""
numfit_min_span.location = None
numfit_max_span.location = None
# Main plot
plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(only_visible=True),
plot_height=470,
plot_width=700,
)
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
plot.add_layout(LinearAxis(axis_label="Omega"), place="below")
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot.add_glyph(plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue"))
plot_scatter = plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
)
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
plot_line_smooth_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(
plot_line_smooth_source, Line(x="x", y="y", line_color="steelblue", line_dash="dashed")
)
plot_gauss_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(plot_gauss_source, Line(x="x", y="y", line_color="red", line_dash="dashed"))
plot_fit_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit = plot.add_glyph(plot_fit_source, Line(x="x", y="y"))
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed"))
plot_bkg = plot.add_glyph(
plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed")
)
plot_peak_source = ColumnDataSource(dict(x=[], y=[]))
plot.add_glyph(plot_peak_source, Asterisk(x="x", y="y", size=10, line_color="red"))
plot_peak_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot_peak = plot.add_glyph(
plot_peak_source, MultiLine(xs="xs", ys="ys", line_color="red", line_dash="dashed")
)
numfit_min_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(numfit_min_span)
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_from_span)
numfit_max_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(numfit_max_span)
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_to_span)
plot.add_layout(
Legend(
items=[
("data", [plot_scatter]),
("best fit", [plot_fit]),
("peak", [plot_peak]),
("linear", [plot_bkg]),
],
location="top_left",
click_policy="hide",
)
)
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
plot.toolbar.logo = None
@ -261,62 +298,65 @@ def create():
# skip unnecessary update caused by selection drop
return
_update_plot(det_data["scan"][scan_table_source.data["scan"][new[0]]])
_update_plot(det_data[new[0]])
def scan_table_source_callback(_attr, _old, _new):
_update_preview()
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], fit=[], export=[]))
scan_table_source.on_change("data", scan_table_source_callback)
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], peaks=[], fit=[], export=[]))
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="scan", title="scan"),
TableColumn(field="hkl", title="hkl"),
TableColumn(field="peaks", title="Peaks"),
TableColumn(field="fit", title="Fit"),
TableColumn(field="export", title="Export", editor=CheckboxEditor()),
TableColumn(field="scan", title="Scan", width=50),
TableColumn(field="hkl", title="hkl", width=100),
TableColumn(field="fit", title="Fit", width=50),
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
],
width=250,
index_position=None,
width=310, # +60 because of the index column
height=350,
autosize_mode="none",
editable=True,
)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
def _get_selected_scan():
selected_index = scan_table_source.selected.indices[0]
selected_scan_id = scan_table_source.data["scan"][selected_index]
return det_data["scan"][selected_scan_id]
return det_data[scan_table_source.selected.indices[0]]
def peak_pos_textinput_callback(_attr, _old, new):
if new is not None and not peak_pos_textinput_lock:
scan = _get_selected_scan()
merge_dest_select = Select(title="destination:", width=100)
merge_source_select = Select(title="source:", width=100)
peak_ind = (np.abs(scan["om"] - float(new))).argmin()
scan["peak_indexes"] = np.array([peak_ind], dtype=np.int64)
scan["peak_heights"] = np.array([scan["smooth_peaks"][peak_ind]])
_update_table()
_update_plot(scan)
def merge_button_callback():
scan_dest_ind = int(merge_dest_select.value)
scan_source_ind = int(merge_source_select.value)
peak_pos_textinput = TextInput(title="Peak position:", default_size=145)
peak_pos_textinput.on_change("value", peak_pos_textinput_callback)
if scan_dest_ind == scan_source_ind:
print("WARNING: Selected scans for merging are identical")
return
peak_int_ratio_spinner = Spinner(
title="Peak intensity ratio:", value=0.8, step=0.01, low=0, high=1, default_size=145
)
peak_prominence_spinner = Spinner(title="Peak prominence:", value=50, low=0, default_size=145)
smooth_toggle = Toggle(label="Smooth curve", default_size=145)
window_size_spinner = Spinner(title="Window size:", value=7, step=2, low=1, default_size=145)
poly_order_spinner = Spinner(title="Poly order:", value=3, low=0, default_size=145)
pyzebra.merge_scans(det_data[scan_dest_ind], det_data[scan_source_ind])
_update_plot(_get_selected_scan())
integ_from = Spinner(title="Integrate from:", default_size=145)
integ_to = Spinner(title="to:", default_size=145)
merge_button = Button(label="Merge scans", width=145)
merge_button.on_click(merge_button_callback)
def fitparam_reset_button_callback():
...
def fit_from_spinner_callback(_attr, _old, new):
fit_from_span.location = new
fitparam_reset_button = Button(label="Reset to defaults", default_size=145, disabled=True)
fitparam_reset_button.on_click(fitparam_reset_button_callback)
fit_from_spinner = Spinner(title="Fit from:", width=145)
fit_from_spinner.on_change("value", fit_from_spinner_callback)
def fit_to_spinner_callback(_attr, _old, new):
fit_to_span.location = new
fit_to_spinner = Spinner(title="to:", width=145)
fit_to_spinner.on_change("value", fit_to_spinner_callback)
def fitparams_add_dropdown_callback(click):
new_tag = str(fitparams_select.tags[0]) # bokeh requires (str, str) for MultiSelect options
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
fitparams_select.options.append((new_tag, click.item))
fit_params[new_tag] = fitparams_factory(click.item)
fitparams_select.tags[0] += 1
@ -324,13 +364,13 @@ def create():
fitparams_add_dropdown = Dropdown(
label="Add fit function",
menu=[
("Background", "background"),
("Gauss", "gauss"),
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pseudovoigt"),
("Pseudo Voigt1", "pseudovoigt1"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
default_size=145,
width=145,
disabled=True,
)
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
@ -349,9 +389,9 @@ def create():
if new:
fitparams_table_source.data.update(fit_params[new[0]])
else:
fitparams_table_source.data.update(dict(param=[], guess=[], vary=[], min=[], max=[]))
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_select = MultiSelect(options=[], height=120, default_size=145)
fitparams_select = MultiSelect(options=[], height=120, width=145)
fitparams_select.tags = [0]
fitparams_select.on_change("value", fitparams_select_callback)
@ -366,36 +406,44 @@ def create():
fitparams_select.value = []
fitparams_remove_button = Button(label="Remove fit function", default_size=145, disabled=True)
fitparams_remove_button = Button(label="Remove fit function", width=145, disabled=True)
fitparams_remove_button.on_click(fitparams_remove_button_callback)
def fitparams_factory(function):
if function == "background":
params = ["slope", "offset"]
elif function == "gauss":
params = ["center", "sigma", "amplitude"]
if function == "linear":
params = ["slope", "intercept"]
elif function == "gaussian":
params = ["amplitude", "center", "sigma"]
elif function == "voigt":
params = ["center", "sigma", "amplitude", "gamma"]
elif function == "pseudovoigt":
params = ["center", "sigma", "amplitude", "fraction"]
params = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
params = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
params = ["center", "g_sigma", "l_sigma", "amplitude", "fraction"]
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(params)
fitparams = dict(
param=params, guess=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
)
if function == "linear":
fitparams["value"] = [0, 1]
fitparams["vary"] = [False, True]
fitparams["min"] = [None, 0]
elif function == "gaussian":
fitparams["min"] = [0, None, None]
return fitparams
fitparams_table_source = ColumnDataSource(dict(param=[], guess=[], vary=[], min=[], max=[]))
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter"),
TableColumn(field="guess", title="Guess", editor=NumberEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
@ -408,183 +456,130 @@ def create():
)
# start with `background` and `gauss` fit functions added
fitparams_add_dropdown_callback(types.SimpleNamespace(item="background"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gauss"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
fitparams_select.value = ["gaussian-1"] # add selection to gauss
fit_output_textinput = TextAreaInput(title="Fit results:", width=450, height=400)
def _get_peakfind_params():
return dict(
int_threshold=peak_int_ratio_spinner.value,
prominence=peak_prominence_spinner.value,
smooth=smooth_toggle.active,
window_size=window_size_spinner.value,
poly_order=poly_order_spinner.value,
)
def peakfind_all_button_callback():
peakfind_params = _get_peakfind_params()
for scan in det_data["scan"].values():
pyzebra.ccl_findpeaks(scan, **peakfind_params)
_update_table()
_update_plot(_get_selected_scan())
peakfind_all_button = Button(label="Peak Find All", button_type="primary", default_size=145)
peakfind_all_button.on_click(peakfind_all_button_callback)
def peakfind_button_callback():
scan = _get_selected_scan()
pyzebra.ccl_findpeaks(scan, **_get_peakfind_params())
_update_table()
_update_plot(scan)
peakfind_button = Button(label="Peak Find Current", default_size=145)
peakfind_button.on_click(peakfind_button_callback)
def _get_fit_params():
return dict(
guess=fit_params["1"]["guess"] + fit_params["0"]["guess"],
vary=fit_params["1"]["vary"] + fit_params["0"]["vary"],
constraints_min=fit_params["1"]["min"] + fit_params["0"]["min"],
constraints_max=fit_params["1"]["max"] + fit_params["0"]["max"],
numfit_min=integ_from.value,
numfit_max=integ_to.value,
binning=bin_size_spinner.value,
)
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def fit_all_button_callback():
fit_params = _get_fit_params()
for scan in det_data["scan"].values():
# fit_params are updated inplace within `fitccl`
pyzebra.fitccl(scan, **deepcopy(fit_params))
for scan, export in zip(det_data, scan_table_source.data["export"]):
if export:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
_update_plot(_get_selected_scan())
_update_table()
fit_all_button = Button(label="Fit All", button_type="primary", default_size=145)
fit_all_button = Button(label="Fit All", button_type="primary", width=145)
fit_all_button.on_click(fit_all_button_callback)
def fit_button_callback():
scan = _get_selected_scan()
pyzebra.fitccl(scan, **_get_fit_params())
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
_update_plot(scan)
_update_table()
fit_button = Button(label="Fit Current", default_size=145)
fit_button = Button(label="Fit Current", width=145)
fit_button.on_click(fit_button_callback)
def area_method_radiobutton_callback(_attr, _old, new):
det_data["meta"]["area_method"] = AREA_METHODS[new]
def area_method_radiobutton_callback(_handler):
_update_preview()
area_method_radiobutton = RadioButtonGroup(
labels=["Fit area", "Int area"], active=0, default_size=145
labels=["Fit area", "Int area"], active=0, width=145, disabled=True
)
area_method_radiobutton.on_change("active", area_method_radiobutton_callback)
area_method_radiobutton.on_click(area_method_radiobutton_callback)
bin_size_spinner = Spinner(title="Bin size:", value=1, low=1, step=1, default_size=145)
def lorentz_checkbox_callback(_handler):
_update_preview()
lorentz_toggle = Toggle(label="Lorentz Correction", default_size=145)
lorentz_checkbox = CheckboxGroup(labels=["Lorentz Correction"], width=145, margin=[13, 5, 5, 5])
lorentz_checkbox.on_click(lorentz_checkbox_callback)
preview_output_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
def preview_output_button_callback():
if det_data["meta"]["indices"] == "hkl":
ext = ".comm"
elif det_data["meta"]["indices"] == "real":
ext = ".incomm"
export_preview_textinput = TextAreaInput(title="Export file preview:", width=500, height=400)
def _update_preview():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export:
del export_data["scan"][s]
pyzebra.export_comm(
export_data = []
for s, export in zip(det_data, scan_table_source.data["export"]):
if export:
export_data.append(s)
pyzebra.export_1D(
export_data,
temp_file,
lorentz=lorentz_toggle.active,
area_method=AREA_METHODS[int(area_method_radiobutton.active)],
lorentz=bool(lorentz_checkbox.active),
hkl_precision=int(hkl_precision_select.value),
)
with open(f"{temp_file}{ext}") as f:
preview_output_textinput.value = f.read()
exported_content = ""
file_content = []
for ext in (".comm", ".incomm"):
fname = temp_file + ext
if os.path.isfile(fname):
with open(fname) as f:
content = f.read()
exported_content += f"{ext} file:\n" + content
else:
content = ""
file_content.append(content)
preview_output_button = Button(label="Preview file", default_size=220)
preview_output_button.on_click(preview_output_button_callback)
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
hkl_precision_select = Select(options=["2", "3", "4"], value="2", default_size=220)
def hkl_precision_select_callback(_attr, _old, _new):
_update_preview()
def export_results(det_data):
if det_data["meta"]["indices"] == "hkl":
ext = ".comm"
elif det_data["meta"]["indices"] == "real":
ext = ".incomm"
hkl_precision_select = Select(
title="hkl precision:", options=["2", "3", "4"], value="2", width=80
)
hkl_precision_select.on_change("value", hkl_precision_select_callback)
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export:
del export_data["scan"][s]
pyzebra.export_comm(
export_data,
temp_file,
lorentz=lorentz_toggle.active,
hkl_precision=int(hkl_precision_select.value),
)
with open(f"{temp_file}{ext}") as f:
output_content = f.read()
return output_content, ext
def save_button_callback():
cont, ext = export_results(det_data)
js_data.data.update(cont=[cont], ext=[ext])
save_button = Button(label="Download file", button_type="success", default_size=220)
save_button.on_click(save_button_callback)
save_button = Button(label="Download File", button_type="success", width=200)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
findpeak_controls = column(
row(peak_pos_textinput, column(Spacer(height=19), smooth_toggle)),
row(peak_int_ratio_spinner, peak_prominence_spinner),
row(window_size_spinner, poly_order_spinner),
row(peakfind_button, peakfind_all_button),
)
fitpeak_controls = row(
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
fitparams_table,
Spacer(width=20),
column(
row(integ_from, integ_to),
row(bin_size_spinner, column(Spacer(height=19), lorentz_toggle)),
row(fitparam_reset_button, area_method_radiobutton),
row(fit_from_spinner, fit_to_spinner),
row(area_method_radiobutton, lorentz_checkbox),
row(fit_button, fit_all_button),
),
)
export_layout = column(
preview_output_textinput,
row(column(preview_output_button, hkl_precision_select), save_button),
scan_layout = column(
scan_table,
monitor_spinner,
row(column(Spacer(height=19), merge_button), merge_dest_select, merge_source_select),
)
import_layout = column(
proposal_textinput,
file_select,
row(file_open_button, file_append_button),
upload_div,
upload_button,
append_upload_div,
append_upload_button,
)
export_layout = column(
export_preview_textinput,
row(hkl_precision_select, column(Spacer(height=19), row(save_button))),
)
upload_div = Div(text="Or upload .ccl file:")
append_upload_div = Div(text="append extra .ccl/.dat files:")
tab_layout = column(
row(proposal_textinput, ccl_file_select),
row(
column(Spacer(height=5), upload_div),
upload_button,
column(Spacer(height=5), append_upload_div),
append_upload_button,
),
row(scan_table, plot, Spacer(width=30), fit_output_textinput, export_layout),
row(findpeak_controls, Spacer(width=30), fitpeak_controls),
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
row(fitpeak_controls, fit_output_textinput),
)
return Panel(child=tab_layout, title="ccl integrate")

View File

@ -1,5 +1,6 @@
import base64
import io
import os
import re
import tempfile
@ -10,8 +11,9 @@ from bokeh.models import (
Div,
FileInput,
Panel,
RadioButtonGroup,
Select,
Spacer,
Tabs,
TextAreaInput,
TextInput,
)
@ -28,7 +30,7 @@ def create():
config.load_from_file(file)
logfile_textinput.value = config.logfile
logfile_verbosity_select.value = config.logfile_verbosity
logfile_verbosity.value = config.logfile_verbosity
filelist_type.value = config.filelist_type
filelist_format_textinput.value = config.filelist_format
@ -43,11 +45,16 @@ def create():
ub_textareainput.value = config.crystal_UB
dataFactory_implementation_select.value = config.dataFactory_implementation
dataFactory_dist1_textinput.value = config.dataFactory_dist1
if config.dataFactory_dist1 is not None:
dataFactory_dist1_textinput.value = config.dataFactory_dist1
if config.dataFactory_dist2 is not None:
dataFactory_dist2_textinput.value = config.dataFactory_dist2
if config.dataFactory_dist3 is not None:
dataFactory_dist3_textinput.value = config.dataFactory_dist3
reflectionPrinter_format_select.value = config.reflectionPrinter_format
set_active_widgets(config.algorithm)
if config.algorithm == "adaptivemaxcog":
algorithm_params.active = 0
threshold_textinput.value = config.threshold
shell_textinput.value = config.shell
steepness_textinput.value = config.steepness
@ -56,6 +63,7 @@ def create():
aps_window_textinput.value = str(tuple(map(int, config.aps_window.values())))
elif config.algorithm == "adaptivedynamic":
algorithm_params.active = 1
adm_window_textinput.value = str(tuple(map(int, config.adm_window.values())))
border_textinput.value = str(tuple(map(int, config.border.values())))
minWindow_textinput.value = str(tuple(map(int, config.minWindow.values())))
@ -65,46 +73,16 @@ def create():
loop_textinput.value = config.loop
minPeakCount_textinput.value = config.minPeakCount
displacementCurve_textinput.value = "\n".join(map(str, config.displacementCurve))
else:
raise ValueError("Unknown processing mode.")
def set_active_widgets(implementation):
if implementation == "adaptivemaxcog":
mode_radio_button_group.active = 0
disable_adaptivemaxcog = False
disable_adaptivedynamic = True
elif implementation == "adaptivedynamic":
mode_radio_button_group.active = 1
disable_adaptivemaxcog = True
disable_adaptivedynamic = False
else:
raise ValueError("Implementation can be either 'adaptivemaxcog' or 'adaptivedynamic'")
threshold_textinput.disabled = disable_adaptivemaxcog
shell_textinput.disabled = disable_adaptivemaxcog
steepness_textinput.disabled = disable_adaptivemaxcog
duplicateDistance_textinput.disabled = disable_adaptivemaxcog
maxequal_textinput.disabled = disable_adaptivemaxcog
aps_window_textinput.disabled = disable_adaptivemaxcog
adm_window_textinput.disabled = disable_adaptivedynamic
border_textinput.disabled = disable_adaptivedynamic
minWindow_textinput.disabled = disable_adaptivedynamic
reflectionFile_textinput.disabled = disable_adaptivedynamic
targetMonitor_textinput.disabled = disable_adaptivedynamic
smoothSize_textinput.disabled = disable_adaptivedynamic
loop_textinput.disabled = disable_adaptivedynamic
minPeakCount_textinput.disabled = disable_adaptivedynamic
displacementCurve_textinput.disabled = disable_adaptivedynamic
upload_div = Div(text="Open XML configuration file:")
def upload_button_callback(_attr, _old, new):
with io.BytesIO(base64.b64decode(new)) as file:
_load_config_file(file)
upload_button = FileInput(accept=".xml")
upload_div = Div(text="Open .xml config:")
upload_button = FileInput(accept=".xml", width=200)
upload_button.on_change("value", upload_button_callback)
# General parameters
@ -112,16 +90,14 @@ def create():
def logfile_textinput_callback(_attr, _old, new):
config.logfile = new
logfile_textinput = TextInput(title="Logfile:", value="logfile.log", width=520)
logfile_textinput = TextInput(title="Logfile:", value="logfile.log")
logfile_textinput.on_change("value", logfile_textinput_callback)
def logfile_verbosity_select_callback(_attr, _old, new):
def logfile_verbosity_callback(_attr, _old, new):
config.logfile_verbosity = new
logfile_verbosity_select = Select(
title="verbosity:", options=["0", "5", "10", "15", "30"], width=70
)
logfile_verbosity_select.on_change("value", logfile_verbosity_select_callback)
logfile_verbosity = TextInput(title="verbosity:", width=70)
logfile_verbosity.on_change("value", logfile_verbosity_callback)
# ---- FileList
def filelist_type_callback(_attr, _old, new):
@ -133,7 +109,7 @@ def create():
def filelist_format_textinput_callback(_attr, _old, new):
config.filelist_format = new
filelist_format_textinput = TextInput(title="format:", width=490)
filelist_format_textinput = TextInput(title="format:", width=290)
filelist_format_textinput.on_change("value", filelist_format_textinput_callback)
def filelist_datapath_textinput_callback(_attr, _old, new):
@ -148,20 +124,20 @@ def create():
ranges.append(re.findall(r"\b\d+\b", line))
config.filelist_ranges = ranges
filelist_ranges_textareainput = TextAreaInput(title="ranges:", height=100)
filelist_ranges_textareainput = TextAreaInput(title="ranges:", rows=1)
filelist_ranges_textareainput.on_change("value", filelist_ranges_textareainput_callback)
# ---- crystal
def crystal_sample_textinput_callback(_attr, _old, new):
config.crystal_sample = new
crystal_sample_textinput = TextInput(title="Sample Name:")
crystal_sample_textinput = TextInput(title="Sample Name:", width=290)
crystal_sample_textinput.on_change("value", crystal_sample_textinput_callback)
def lambda_textinput_callback(_attr, _old, new):
config.crystal_lambda = new
lambda_textinput = TextInput(title="lambda:", width=140)
lambda_textinput = TextInput(title="lambda:", width=100)
lambda_textinput.on_change("value", lambda_textinput_callback)
def ub_textareainput_callback(_attr, _old, new):
@ -173,19 +149,19 @@ def create():
def zeroOM_textinput_callback(_attr, _old, new):
config.crystal_zeroOM = new
zeroOM_textinput = TextInput(title="zeroOM:", width=140)
zeroOM_textinput = TextInput(title="zeroOM:", width=100)
zeroOM_textinput.on_change("value", zeroOM_textinput_callback)
def zeroSTT_textinput_callback(_attr, _old, new):
config.crystal_zeroSTT = new
zeroSTT_textinput = TextInput(title="zeroSTT:", width=140)
zeroSTT_textinput = TextInput(title="zeroSTT:", width=100)
zeroSTT_textinput.on_change("value", zeroSTT_textinput_callback)
def zeroCHI_textinput_callback(_attr, _old, new):
config.crystal_zeroCHI = new
zeroCHI_textinput = TextInput(title="zeroCHI:", width=140)
zeroCHI_textinput = TextInput(title="zeroCHI:", width=100)
zeroCHI_textinput.on_change("value", zeroCHI_textinput_callback)
# ---- DataFactory
@ -193,16 +169,28 @@ def create():
config.dataFactory_implementation = new
dataFactory_implementation_select = Select(
title="DataFactory implementation:", options=DATA_FACTORY_IMPLEMENTATION, width=300,
title="DataFactory implement.:", options=DATA_FACTORY_IMPLEMENTATION, width=145,
)
dataFactory_implementation_select.on_change("value", dataFactory_implementation_select_callback)
def dataFactory_dist1_textinput_callback(_attr, _old, new):
config.dataFactory_dist1 = new
dataFactory_dist1_textinput = TextInput(title="dist1:", width=290)
dataFactory_dist1_textinput = TextInput(title="dist1:", width=75)
dataFactory_dist1_textinput.on_change("value", dataFactory_dist1_textinput_callback)
def dataFactory_dist2_textinput_callback(_attr, _old, new):
config.dataFactory_dist2 = new
dataFactory_dist2_textinput = TextInput(title="dist2:", width=75)
dataFactory_dist2_textinput.on_change("value", dataFactory_dist2_textinput_callback)
def dataFactory_dist3_textinput_callback(_attr, _old, new):
config.dataFactory_dist3 = new
dataFactory_dist3_textinput = TextInput(title="dist3:", width=75)
dataFactory_dist3_textinput.on_change("value", dataFactory_dist3_textinput_callback)
# ---- BackgroundProcessor
# ---- DetectorEfficency
@ -212,7 +200,7 @@ def create():
config.reflectionPrinter_format = new
reflectionPrinter_format_select = Select(
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=300,
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=145,
)
reflectionPrinter_format_select.on_change("value", reflectionPrinter_format_select_callback)
@ -221,42 +209,42 @@ def create():
def threshold_textinput_callback(_attr, _old, new):
config.threshold = new
threshold_textinput = TextInput(title="Threshold:")
threshold_textinput = TextInput(title="Threshold:", width=145)
threshold_textinput.on_change("value", threshold_textinput_callback)
# ---- shell
def shell_textinput_callback(_attr, _old, new):
config.shell = new
shell_textinput = TextInput(title="Shell:")
shell_textinput = TextInput(title="Shell:", width=145)
shell_textinput.on_change("value", shell_textinput_callback)
# ---- steepness
def steepness_textinput_callback(_attr, _old, new):
config.steepness = new
steepness_textinput = TextInput(title="Steepness:")
steepness_textinput = TextInput(title="Steepness:", width=145)
steepness_textinput.on_change("value", steepness_textinput_callback)
# ---- duplicateDistance
def duplicateDistance_textinput_callback(_attr, _old, new):
config.duplicateDistance = new
duplicateDistance_textinput = TextInput(title="Duplicate Distance:")
duplicateDistance_textinput = TextInput(title="Duplicate Distance:", width=145)
duplicateDistance_textinput.on_change("value", duplicateDistance_textinput_callback)
# ---- maxequal
def maxequal_textinput_callback(_attr, _old, new):
config.maxequal = new
maxequal_textinput = TextInput(title="Max Equal:")
maxequal_textinput = TextInput(title="Max Equal:", width=145)
maxequal_textinput.on_change("value", maxequal_textinput_callback)
# ---- window
def aps_window_textinput_callback(_attr, _old, new):
config.aps_window = dict(zip(("x", "y", "z"), re.findall(r"\b\d+\b", new)))
aps_window_textinput = TextInput(title="Window (x, y, z):")
aps_window_textinput = TextInput(title="Window (x, y, z):", width=145)
aps_window_textinput.on_change("value", aps_window_textinput_callback)
# Adaptive Dynamic Mask Integration (adaptivedynamic)
@ -264,56 +252,56 @@ def create():
def adm_window_textinput_callback(_attr, _old, new):
config.adm_window = dict(zip(("x", "y", "z"), re.findall(r"\b\d+\b", new)))
adm_window_textinput = TextInput(title="Window (x, y, z):")
adm_window_textinput = TextInput(title="Window (x, y, z):", width=145)
adm_window_textinput.on_change("value", adm_window_textinput_callback)
# ---- border
def border_textinput_callback(_attr, _old, new):
config.border = dict(zip(("x", "y", "z"), re.findall(r"\b\d+\b", new)))
border_textinput = TextInput(title="Border (x, y, z):")
border_textinput = TextInput(title="Border (x, y, z):", width=145)
border_textinput.on_change("value", border_textinput_callback)
# ---- minWindow
def minWindow_textinput_callback(_attr, _old, new):
config.minWindow = dict(zip(("x", "y", "z"), re.findall(r"\b\d+\b", new)))
minWindow_textinput = TextInput(title="Min Window (x, y, z):")
minWindow_textinput = TextInput(title="Min Window (x, y, z):", width=145)
minWindow_textinput.on_change("value", minWindow_textinput_callback)
# ---- reflectionFile
def reflectionFile_textinput_callback(_attr, _old, new):
config.reflectionFile = new
reflectionFile_textinput = TextInput(title="Reflection File:")
reflectionFile_textinput = TextInput(title="Reflection File:", width=145)
reflectionFile_textinput.on_change("value", reflectionFile_textinput_callback)
# ---- targetMonitor
def targetMonitor_textinput_callback(_attr, _old, new):
config.targetMonitor = new
targetMonitor_textinput = TextInput(title="Target Monitor:")
targetMonitor_textinput = TextInput(title="Target Monitor:", width=145)
targetMonitor_textinput.on_change("value", targetMonitor_textinput_callback)
# ---- smoothSize
def smoothSize_textinput_callback(_attr, _old, new):
config.smoothSize = new
smoothSize_textinput = TextInput(title="Smooth Size:")
smoothSize_textinput = TextInput(title="Smooth Size:", width=145)
smoothSize_textinput.on_change("value", smoothSize_textinput_callback)
# ---- loop
def loop_textinput_callback(_attr, _old, new):
config.loop = new
loop_textinput = TextInput(title="Loop:")
loop_textinput = TextInput(title="Loop:", width=145)
loop_textinput.on_change("value", loop_textinput_callback)
# ---- minPeakCount
def minPeakCount_textinput_callback(_attr, _old, new):
config.minPeakCount = new
minPeakCount_textinput = TextInput(title="Min Peak Count:")
minPeakCount_textinput = TextInput(title="Min Peak Count:", width=145)
minPeakCount_textinput.on_change("value", minPeakCount_textinput_callback)
# ---- displacementCurve
@ -324,88 +312,85 @@ def create():
config.displacementCurve = maps
displacementCurve_textinput = TextAreaInput(
title="Displacement Curve (twotheta, x, y):", height=100
title="Displ. Curve (, x, y):", width=145, height=100
)
displacementCurve_textinput.on_change("value", displacementCurve_textinput_callback)
def mode_radio_button_group_callback(active):
if active == 0:
def algorithm_tabs_callback(_attr, _old, new):
if new == 0:
config.algorithm = "adaptivemaxcog"
set_active_widgets("adaptivemaxcog")
else:
config.algorithm = "adaptivedynamic"
set_active_widgets("adaptivedynamic")
mode_radio_button_group = RadioButtonGroup(
labels=["Adaptive Peak Detection", "Adaptive Dynamic Integration"], active=0
algorithm_params = Tabs(
tabs=[
Panel(
child=column(
row(threshold_textinput, shell_textinput, steepness_textinput),
row(duplicateDistance_textinput, maxequal_textinput, aps_window_textinput),
),
title="Peak Search",
),
Panel(
child=column(
row(adm_window_textinput, border_textinput, minWindow_textinput),
row(reflectionFile_textinput, targetMonitor_textinput, smoothSize_textinput),
row(loop_textinput, minPeakCount_textinput, displacementCurve_textinput),
),
title="Dynamic Integration",
),
]
)
mode_radio_button_group.on_click(mode_radio_button_group_callback)
set_active_widgets("adaptivemaxcog")
algorithm_params.on_change("active", algorithm_tabs_callback)
def process_button_callback():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp.xml"
temp_file = temp_dir + "/config.xml"
config.save_as(temp_file)
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path)
if doc.anatric_path:
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir)
else:
pyzebra.anatric(temp_file, cwd=temp_dir)
with open(config.logfile) as f_log:
with open(os.path.join(temp_dir, config.logfile)) as f_log:
output_log.value = f_log.read()
with open(os.path.join(temp_dir, config.reflectionPrinter_file)) as f_res:
output_res.value = f_res.read()
process_button = Button(label="Process", button_type="primary")
process_button.on_click(process_button_callback)
output_log = TextAreaInput(title="Logfile output:", height=700, disabled=True)
output_config = TextAreaInput(title="Current config:", height=700, width=400, disabled=True)
output_log = TextAreaInput(title="Logfile output:", height=320, width=465, disabled=True)
output_res = TextAreaInput(title="Result output:", height=320, width=465, disabled=True)
output_config = TextAreaInput(title="Current config:", height=320, width=465, disabled=True)
general_params_layout = column(
row(column(Spacer(height=2), upload_div), upload_button),
row(logfile_textinput, logfile_verbosity),
row(filelist_type, filelist_format_textinput),
filelist_datapath_textinput,
filelist_ranges_textareainput,
row(crystal_sample_textinput, lambda_textinput),
ub_textareainput,
row(zeroOM_textinput, zeroSTT_textinput, zeroCHI_textinput),
row(
dataFactory_implementation_select,
dataFactory_dist1_textinput,
dataFactory_dist2_textinput,
dataFactory_dist3_textinput,
),
row(reflectionPrinter_format_select),
)
tab_layout = row(
column(
upload_div,
upload_button,
row(logfile_textinput, logfile_verbosity_select),
row(filelist_type, filelist_format_textinput),
filelist_datapath_textinput,
filelist_ranges_textareainput,
crystal_sample_textinput,
row(lambda_textinput, zeroOM_textinput, zeroSTT_textinput, zeroCHI_textinput),
ub_textareainput,
row(dataFactory_implementation_select, dataFactory_dist1_textinput),
reflectionPrinter_format_select,
process_button,
),
column(
mode_radio_button_group,
row(
column(
threshold_textinput,
shell_textinput,
steepness_textinput,
duplicateDistance_textinput,
maxequal_textinput,
aps_window_textinput,
),
column(
adm_window_textinput,
border_textinput,
minWindow_textinput,
reflectionFile_textinput,
targetMonitor_textinput,
smoothSize_textinput,
loop_textinput,
minPeakCount_textinput,
displacementCurve_textinput,
),
),
),
output_config,
output_log,
general_params_layout,
column(output_config, algorithm_params, row(process_button)),
column(output_log, output_res),
)
async def update_config():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/debug.xml"
config.save_as(temp_file)
with open(temp_file) as f_config:
output_config.value = f_config.read()
output_config.value = config.tostring()
doc.add_periodic_callback(update_config, 1000)

View File

@ -9,11 +9,15 @@ from bokeh.models import (
BoxEditTool,
BoxZoomTool,
Button,
CheckboxGroup,
ColumnDataSource,
DataRange1d,
DataTable,
Div,
FileInput,
Grid,
MultiSelect,
NumberFormatter,
HoverTool,
Image,
Line,
@ -22,16 +26,17 @@ from bokeh.models import (
Panel,
PanTool,
Plot,
RadioButtonGroup,
Range1d,
Rect,
ResetTool,
Select,
Slider,
Spacer,
Spinner,
TableColumn,
TextAreaInput,
TextInput,
Title,
Toggle,
WheelZoomTool,
)
from bokeh.palettes import Cividis256, Greys256, Plasma256 # pylint: disable=E0611
@ -40,20 +45,35 @@ import pyzebra
IMAGE_W = 256
IMAGE_H = 128
IMAGE_PLOT_W = int(IMAGE_W * 2) + 52
IMAGE_PLOT_H = int(IMAGE_H * 2) + 27
def create():
det_data = {}
roi_selection = {}
def proposal_textinput_callback(_attr, _old, new):
proposal = new.strip()
year = new[:4]
proposal_path = f"/afs/psi.ch/project/sinqdata/{year}/zebra/{proposal}"
file_list = []
for file in os.listdir(proposal_path):
if file.endswith(".hdf"):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
proposal_textinput = TextInput(title="Proposal number:", width=210)
proposal_textinput.on_change("value", proposal_textinput_callback)
def upload_button_callback(_attr, _old, new):
with io.StringIO(base64.b64decode(new).decode()) as file:
h5meta_list = pyzebra.parse_h5meta(file)
file_list = h5meta_list["filelist"]
filelist.options = [(entry, os.path.basename(entry)) for entry in file_list]
filelist.value = file_list[0]
file_select.options = [(entry, os.path.basename(entry)) for entry in file_list]
upload_button = FileInput(accept=".cami")
upload_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".cami", width=200)
upload_button.on_change("value", upload_button_callback)
def update_image(index=None):
@ -73,7 +93,7 @@ def create():
)
image_source.data.update(image=[current_image])
if auto_toggle.active:
if main_auto_checkbox.active:
im_min = np.min(current_image)
im_max = np.max(current_image)
@ -83,18 +103,18 @@ def create():
image_glyph.color_mapper.low = im_min
image_glyph.color_mapper.high = im_max
if "magnetic_field" in det_data:
magnetic_field_spinner.value = det_data["magnetic_field"][index]
if "mf" in det_data:
metadata_table_source.data.update(mf=[det_data["mf"][index]])
else:
magnetic_field_spinner.value = None
metadata_table_source.data.update(mf=[None])
if "temperature" in det_data:
temperature_spinner.value = det_data["temperature"][index]
if "temp" in det_data:
metadata_table_source.data.update(temp=[det_data["temp"][index]])
else:
temperature_spinner.value = None
metadata_table_source.data.update(temp=[None])
gamma, nu = calculate_pol(det_data, index)
omega = np.ones((IMAGE_H, IMAGE_W)) * det_data["rot_angle"][index]
omega = np.ones((IMAGE_H, IMAGE_W)) * det_data["omega"][index]
image_source.data.update(gamma=[gamma], nu=[nu], omega=[omega])
def update_overview_plot():
@ -103,10 +123,10 @@ def create():
overview_x = np.mean(h5_data, axis=1)
overview_y = np.mean(h5_data, axis=2)
overview_plot_x_image_source.data.update(image=[overview_x], dw=[n_x])
overview_plot_y_image_source.data.update(image=[overview_y], dw=[n_y])
overview_plot_x_image_source.data.update(image=[overview_x], dw=[n_x], dh=[n_im])
overview_plot_y_image_source.data.update(image=[overview_y], dw=[n_y], dh=[n_im])
if proj_auto_toggle.active:
if proj_auto_checkbox.active:
im_min = min(np.min(overview_x), np.min(overview_y))
im_max = max(np.max(overview_x), np.max(overview_y))
@ -118,46 +138,75 @@ def create():
overview_plot_x_image_glyph.color_mapper.high = im_max
overview_plot_y_image_glyph.color_mapper.high = im_max
if frame_button_group.active == 0: # Frame
overview_plot_x.axis[1].axis_label = "Frame"
overview_plot_y.axis[1].axis_label = "Frame"
frame_range.start = 0
frame_range.end = n_im
frame_range.reset_start = 0
frame_range.reset_end = n_im
frame_range.bounds = (0, n_im)
overview_plot_x_image_source.data.update(y=[0], dh=[n_im])
overview_plot_y_image_source.data.update(y=[0], dh=[n_im])
scan_motor = det_data["scan_motor"]
overview_plot_y.axis[1].axis_label = f"Scanning motor, {scan_motor}"
elif frame_button_group.active == 1: # Omega
overview_plot_x.axis[1].axis_label = "Omega"
overview_plot_y.axis[1].axis_label = "Omega"
var = det_data[scan_motor]
var_start = var[0]
var_end = var[-1] + (var[-1] - var[0]) / (n_im - 1)
om = det_data["rot_angle"]
om_start = om[0]
om_end = (om[-1] - om[0]) * n_im / (n_im - 1)
overview_plot_x_image_source.data.update(y=[om_start], dh=[om_end])
overview_plot_y_image_source.data.update(y=[om_start], dh=[om_end])
scanning_motor_range.start = var_start
scanning_motor_range.end = var_end
scanning_motor_range.reset_start = var_start
scanning_motor_range.reset_end = var_end
scanning_motor_range.bounds = (var_start, var_end)
def filelist_callback(_attr, _old, new):
def file_select_callback(_attr, old, new):
nonlocal det_data
det_data = pyzebra.read_detector_data(new)
if not new:
# skip empty selections
return
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
file_select.value = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
det_data = pyzebra.read_detector_data(new[0])
index_spinner.value = 0
index_spinner.high = det_data["data"].shape[0] - 1
index_slider.end = det_data["data"].shape[0] - 1
zebra_mode = det_data["zebra_mode"]
if zebra_mode == "nb":
metadata_table_source.data.update(geom=["normal beam"])
else: # zebra_mode == "bi"
metadata_table_source.data.update(geom=["bisecting"])
update_image(0)
update_overview_plot()
filelist = Select()
filelist.on_change("value", filelist_callback)
file_select = MultiSelect(title="Available .hdf files:", width=210, height=250)
file_select.on_change("value", file_select_callback)
def index_spinner_callback(_attr, _old, new):
def index_callback(_attr, _old, new):
update_image(new)
index_spinner = Spinner(title="Image index:", value=0, low=0)
index_spinner.on_change("value", index_spinner_callback)
index_slider = Slider(value=0, start=0, end=1, show_value=False, width=400)
index_spinner = Spinner(title="Image index:", value=0, low=0, width=100)
index_spinner.on_change("value", index_callback)
index_slider.js_link("value_throttled", index_spinner, "value")
index_spinner.js_link("value", index_slider, "value")
plot = Plot(
x_range=Range1d(0, IMAGE_W, bounds=(0, IMAGE_W)),
y_range=Range1d(0, IMAGE_H, bounds=(0, IMAGE_H)),
plot_height=IMAGE_H * 3,
plot_width=IMAGE_W * 3,
plot_height=IMAGE_PLOT_H,
plot_width=IMAGE_PLOT_W,
toolbar_location="left",
)
@ -210,8 +259,8 @@ def create():
proj_v = Plot(
x_range=plot.x_range,
y_range=DataRange1d(),
plot_height=200,
plot_width=IMAGE_W * 3,
plot_height=150,
plot_width=IMAGE_PLOT_W,
toolbar_location=None,
)
@ -227,8 +276,8 @@ def create():
proj_h = Plot(
x_range=DataRange1d(),
y_range=plot.y_range,
plot_height=IMAGE_H * 3,
plot_width=200,
plot_height=IMAGE_PLOT_H,
plot_width=150,
toolbar_location=None,
)
@ -284,15 +333,18 @@ def create():
)
plot.toolbar.active_scroll = wheelzoomtool
# shared frame range
frame_range = DataRange1d()
# shared frame ranges
frame_range = Range1d(0, 1, bounds=(0, 1))
scanning_motor_range = Range1d(0, 1, bounds=(0, 1))
det_x_range = Range1d(0, IMAGE_W, bounds=(0, IMAGE_W))
overview_plot_x = Plot(
title=Title(text="Projections on X-axis"),
x_range=det_x_range,
y_range=frame_range,
plot_height=500,
plot_width=IMAGE_W * 3,
extra_y_ranges={"scanning_motor": scanning_motor_range},
plot_height=400,
plot_width=IMAGE_PLOT_W - 3,
)
# ---- tools
@ -328,8 +380,9 @@ def create():
title=Title(text="Projections on Y-axis"),
x_range=det_y_range,
y_range=frame_range,
plot_height=500,
plot_width=IMAGE_H * 3,
extra_y_ranges={"scanning_motor": scanning_motor_range},
plot_height=400,
plot_width=IMAGE_PLOT_H + 22,
)
# ---- tools
@ -343,7 +396,12 @@ def create():
# ---- axes
overview_plot_y.add_layout(LinearAxis(axis_label="Coordinate Y, pix"), place="below")
overview_plot_y.add_layout(
LinearAxis(axis_label="Frame", major_label_orientation="vertical"), place="left"
LinearAxis(
y_range_name="scanning_motor",
axis_label="Scanning motor",
major_label_orientation="vertical",
),
place="right",
)
# ---- grid lines
@ -360,17 +418,11 @@ def create():
overview_plot_y_image_source, overview_plot_y_image_glyph, name="image_glyph"
)
def frame_button_group_callback(_active):
update_overview_plot()
frame_button_group = RadioButtonGroup(labels=["Frames", "Omega"], active=0)
frame_button_group.on_click(frame_button_group_callback)
roi_avg_plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(),
plot_height=200,
plot_width=IMAGE_W * 3,
plot_height=150,
plot_width=IMAGE_PLOT_W,
toolbar_location="left",
)
@ -400,15 +452,13 @@ def create():
overview_plot_x_image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
overview_plot_y_image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
colormap = Select(title="Colormap:", options=list(cmap_dict.keys()), default_size=145)
colormap = Select(title="Colormap:", options=list(cmap_dict.keys()), width=210)
colormap.on_change("value", colormap_callback)
colormap.value = "plasma"
radio_button_group = RadioButtonGroup(labels=["normal beam", "bisecting"], active=0)
STEP = 1
# ---- colormap auto toggle button
def auto_toggle_callback(state):
def main_auto_checkbox_callback(state):
if state:
display_min_spinner.disabled = True
display_max_spinner.disabled = True
@ -418,45 +468,43 @@ def create():
update_image()
auto_toggle = Toggle(
label="Main Auto Range", active=True, button_type="default", default_size=125
main_auto_checkbox = CheckboxGroup(
labels=["Main Auto Range"], active=[0], width=145, margin=[10, 5, 0, 5]
)
auto_toggle.on_click(auto_toggle_callback)
main_auto_checkbox.on_click(main_auto_checkbox_callback)
# ---- colormap display max value
def display_max_spinner_callback(_attr, _old_value, new_value):
display_min_spinner.high = new_value - STEP
image_glyph.color_mapper.high = new_value
display_max_spinner = Spinner(
title="Max Value:",
low=0 + STEP,
value=1,
step=STEP,
disabled=auto_toggle.active,
default_size=80,
disabled=bool(main_auto_checkbox.active),
width=100,
height=31,
)
display_max_spinner.on_change("value", display_max_spinner_callback)
# ---- colormap display min value
def display_min_spinner_callback(_attr, _old_value, new_value):
display_max_spinner.low = new_value + STEP
image_glyph.color_mapper.low = new_value
display_min_spinner = Spinner(
title="Min Value:",
low=0,
high=1 - STEP,
value=0,
step=STEP,
disabled=auto_toggle.active,
default_size=80,
disabled=bool(main_auto_checkbox.active),
width=100,
height=31,
)
display_min_spinner.on_change("value", display_min_spinner_callback)
PROJ_STEP = 0.1
# ---- proj colormap auto toggle button
def proj_auto_toggle_callback(state):
def proj_auto_checkbox_callback(state):
if state:
proj_display_min_spinner.disabled = True
proj_display_max_spinner.disabled = True
@ -466,51 +514,48 @@ def create():
update_overview_plot()
proj_auto_toggle = Toggle(
label="Proj Auto Range", active=True, button_type="default", default_size=125
proj_auto_checkbox = CheckboxGroup(
labels=["Projections Auto Range"], active=[0], width=145, margin=[10, 5, 0, 5]
)
proj_auto_toggle.on_click(proj_auto_toggle_callback)
proj_auto_checkbox.on_click(proj_auto_checkbox_callback)
# ---- proj colormap display max value
def proj_display_max_spinner_callback(_attr, _old_value, new_value):
proj_display_min_spinner.high = new_value - PROJ_STEP
overview_plot_x_image_glyph.color_mapper.high = new_value
overview_plot_y_image_glyph.color_mapper.high = new_value
proj_display_max_spinner = Spinner(
title="Max Value:",
low=0 + PROJ_STEP,
value=1,
step=PROJ_STEP,
disabled=proj_auto_toggle.active,
default_size=80,
disabled=bool(proj_auto_checkbox.active),
width=100,
height=31,
)
proj_display_max_spinner.on_change("value", proj_display_max_spinner_callback)
# ---- proj colormap display min value
def proj_display_min_spinner_callback(_attr, _old_value, new_value):
proj_display_max_spinner.low = new_value + PROJ_STEP
overview_plot_x_image_glyph.color_mapper.low = new_value
overview_plot_y_image_glyph.color_mapper.low = new_value
proj_display_min_spinner = Spinner(
title="Min Value:",
low=0,
high=1 - PROJ_STEP,
value=0,
step=PROJ_STEP,
disabled=proj_auto_toggle.active,
default_size=80,
disabled=bool(proj_auto_checkbox.active),
width=100,
height=31,
)
proj_display_min_spinner.on_change("value", proj_display_min_spinner_callback)
def hkl_button_callback():
index = index_spinner.value
geometry = "bi" if radio_button_group.active else "nb"
h, k, l = calculate_hkl(det_data, index, geometry)
h, k, l = calculate_hkl(det_data, index)
image_source.data.update(h=[h], k=[k], l=[l])
hkl_button = Button(label="Calculate hkl (slow)")
hkl_button = Button(label="Calculate hkl (slow)", width=210)
hkl_button.on_click(hkl_button_callback)
selection_list = TextAreaInput(rows=7)
@ -526,7 +571,7 @@ def create():
int(np.ceil(frame_range.end)),
]
filename_id = filelist.value[-8:-4]
filename_id = file_select.value[0][-8:-4]
if filename_id in roi_selection:
roi_selection[f"{filename_id}"].append(selection)
else:
@ -537,32 +582,38 @@ def create():
selection_button = Button(label="Add selection")
selection_button.on_click(selection_button_callback)
magnetic_field_spinner = Spinner(
title="Magnetic field:", format="0.00", width=145, disabled=True
metadata_table_source = ColumnDataSource(dict(geom=[""], temp=[None], mf=[None]))
num_formatter = NumberFormatter(format="0.00", nan_format="")
metadata_table = DataTable(
source=metadata_table_source,
columns=[
TableColumn(field="geom", title="Geometry", width=100),
TableColumn(field="temp", title="Temperature", formatter=num_formatter, width=100),
TableColumn(field="mf", title="Magnetic Field", formatter=num_formatter, width=100),
],
width=300,
height=50,
autosize_mode="none",
index_position=None,
)
temperature_spinner = Spinner(title="Temperature:", format="0.00", width=145, disabled=True)
# Final layout
import_layout = column(proposal_textinput, upload_div, upload_button, file_select)
layout_image = column(gridplot([[proj_v, None], [plot, proj_h]], merge_tools=False))
colormap_layout = column(
row(colormap),
row(column(Spacer(height=19), auto_toggle), display_max_spinner, display_min_spinner),
row(
column(Spacer(height=19), proj_auto_toggle),
proj_display_max_spinner,
proj_display_min_spinner,
),
colormap,
main_auto_checkbox,
row(display_min_spinner, display_max_spinner),
proj_auto_checkbox,
row(proj_display_min_spinner, proj_display_max_spinner),
)
geometry_div = Div(text="Geometry:", margin=[5, 5, -5, 5])
hkl_layout = column(column(geometry_div, radio_button_group), hkl_button)
params_layout = row(magnetic_field_spinner, temperature_spinner)
layout_controls = row(
column(selection_button, selection_list),
Spacer(width=20),
column(frame_button_group, colormap_layout),
Spacer(width=20),
column(index_spinner, params_layout, hkl_layout),
column(
row(index_spinner, column(Spacer(height=25), index_slider)), metadata_table, hkl_button
),
)
layout_overview = column(
@ -574,44 +625,41 @@ def create():
),
)
upload_div = Div(text="Upload .cami file:")
tab_layout = row(
column(
row(column(Spacer(height=5), upload_div), upload_button, filelist),
layout_overview,
layout_controls,
),
column(import_layout, colormap_layout),
column(layout_overview, layout_controls),
column(roi_avg_plot, layout_image),
)
return Panel(child=tab_layout, title="hdf viewer")
def calculate_hkl(det_data, index, geometry):
def calculate_hkl(det_data, index):
h = np.empty(shape=(IMAGE_H, IMAGE_W))
k = np.empty(shape=(IMAGE_H, IMAGE_W))
l = np.empty(shape=(IMAGE_H, IMAGE_W))
wave = det_data["wave"]
ddist = det_data["ddist"]
gammad = det_data["pol_angle"][index]
om = det_data["rot_angle"][index]
nud = det_data["tlt_angle"]
ub = det_data["UB"]
gammad = det_data["gamma"][index]
om = det_data["omega"][index]
nud = det_data["nu"]
ub = det_data["ub"]
geometry = det_data["zebra_mode"]
if geometry == "bi":
ch = det_data["chi_angle"][index]
ph = det_data["phi_angle"][index]
chi = det_data["chi"][index]
phi = det_data["phi"][index]
elif geometry == "nb":
ch = 0
ph = 0
chi = 0
phi = 0
else:
raise ValueError(f"Unknown geometry type '{geometry}'")
for xi in np.arange(IMAGE_W):
for yi in np.arange(IMAGE_H):
h[yi, xi], k[yi, xi], l[yi, xi] = pyzebra.ang2hkl(
wave, ddist, gammad, om, ch, ph, nud, ub, xi, yi
wave, ddist, gammad, om, chi, phi, nud, ub, xi, yi
)
return h, k, l
@ -622,8 +670,8 @@ def calculate_pol(det_data, index):
nu = np.empty(shape=(IMAGE_H, IMAGE_W))
ddist = det_data["ddist"]
gammad = det_data["pol_angle"][index]
nud = det_data["tlt_angle"]
gammad = det_data["gamma"][index]
nud = det_data["nu"]
for xi in np.arange(IMAGE_W):
for yi in np.arange(IMAGE_H):

View File

@ -4,15 +4,14 @@ import itertools
import os
import tempfile
import types
from copy import deepcopy
import numpy as np
from bokeh.layouts import column, row
from bokeh.models import (
Asterisk,
BasicTicker,
Button,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
CustomJS,
DataRange1d,
@ -21,6 +20,8 @@ from bokeh.models import (
Dropdown,
FileInput,
Grid,
HoverTool,
Legend,
Line,
LinearAxis,
MultiLine,
@ -40,33 +41,31 @@ from bokeh.models import (
Tabs,
TextAreaInput,
TextInput,
Toggle,
WheelZoomTool,
Whisker,
)
from bokeh.palettes import Category10
from bokeh.palettes import Category10, Turbo256
from bokeh.transform import linear_cmap
import pyzebra
from pyzebra.ccl_io import AREA_METHODS
javaScript = """
setTimeout(function() {
const filename = 'output' + js_data.data['ext']
const blob = new Blob([js_data.data['cont']], {type: 'text/plain'})
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = filename;
link.download = js_data.data['fname'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 500);
}
"""
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
PLOT_TYPES = ("single scan", "overview")
def color_palette(n_colors):
palette = itertools.cycle(Category10[10])
@ -74,90 +73,96 @@ def color_palette(n_colors):
def create():
det_data = {}
det_data = []
fit_params = {}
peak_pos_textinput_lock = False
js_data = ColumnDataSource(data=dict(cont=[], ext=[]))
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""]))
def proposal_textinput_callback(_attr, _old, new):
full_proposal_path = os.path.join(PROPOSAL_PATH, new.strip())
dat_file_list = []
for file in os.listdir(full_proposal_path):
if file.endswith(".dat"):
dat_file_list.append((os.path.join(full_proposal_path, file), file))
file_select.options = dat_file_list
file_select.value = dat_file_list[0][0]
proposal = new.strip()
year = new[:4]
proposal_path = f"/afs/psi.ch/project/sinqdata/{year}/zebra/{proposal}"
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
proposal_textinput = TextInput(title="Enter proposal number:", default_size=145)
proposal_textinput = TextInput(title="Proposal number:", width=210)
proposal_textinput.on_change("value", proposal_textinput_callback)
def _init_datatable():
scan_list = list(det_data["scan"].keys())
scan_list = [s["idx"] for s in det_data]
file_list = []
extra_meta = det_data.get("extra_meta", {})
for scan_id in scan_list:
if scan_id in extra_meta:
f_path = extra_meta[scan_id]["original_filename"]
else:
f_path = det_data["meta"]["original_filename"]
_, f_name = os.path.split(f_path)
file_list.append(f_name)
for scan in det_data:
file_list.append(os.path.basename(scan["original_filename"]))
scan_table_source.data.update(
file=file_list,
scan=scan_list,
param=[""] * len(scan_list),
peaks=[0] * len(scan_list),
param=[None] * len(scan_list),
fit=[0] * len(scan_list),
export=[True] * len(scan_list),
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
def file_select_callback(_attr, _old, _new):
pass
param_select.value = "user defined"
file_select = Select(title="Available .dat files")
file_select.on_change("value", file_select_callback)
file_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
def file_open_button_callback():
nonlocal det_data
with open(file_select.value) as file:
_, ext = os.path.splitext(file_select.value)
det_data = pyzebra.parse_1D(file, ext)
det_data = []
for f_name in file_select.value:
with open(f_name) as file:
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
_init_datatable()
file_open_button = Button(label="Open", default_size=100)
file_open_button = Button(label="Open New", width=100)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
with open(file_select.value) as file:
_, ext = os.path.splitext(file_select.value)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.add_dict(det_data, append_data)
for f_name in file_select.value:
with open(f_name) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
_init_datatable()
file_append_button = Button(label="Append", default_size=100)
file_append_button = Button(label="Append", width=100)
file_append_button.on_click(file_append_button_callback)
def upload_button_callback(_attr, _old, new):
nonlocal det_data
det_data = {}
det_data = []
for f_str, f_name in zip(new, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.add_dict(det_data, append_data)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
_init_datatable()
upload_button = FileInput(accept=".dat", multiple=True)
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
upload_button.on_change("value", upload_button_callback)
def append_upload_button_callback(_attr, _old, new):
@ -165,159 +170,152 @@ def create():
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
pyzebra.add_dict(det_data, append_data)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
_init_datatable()
append_upload_button = FileInput(accept=".dat", multiple=True)
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
append_upload_button.on_change("value", append_upload_button_callback)
def monitor_spinner_callback(_attr, _old, new):
if det_data:
pyzebra.normalize_dataset(det_data, new)
_update_plot()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def _update_table():
num_of_peaks = [len(scan.get("peak_indexes", [])) for scan in det_data["scan"].values()]
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"].values()]
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
scan_table_source.data.update(fit=fit_ok)
def _update_plot():
_update_single_scan_plot(_get_selected_scan())
_update_overview()
def _update_single_scan_plot(scan):
nonlocal peak_pos_textinput_lock
peak_pos_textinput_lock = True
scan_motor = scan["scan_motor"]
y = scan["Counts"]
x = scan["om"]
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
num_of_peaks = len(scan.get("peak_indexes", []))
if num_of_peaks is not None and num_of_peaks > 0:
peak_indexes = scan["peak_indexes"]
if len(peak_indexes) == 1:
peak_pos_textinput.value = str(x[peak_indexes[0]])
else:
peak_pos_textinput.value = str([x[ind] for ind in peak_indexes])
plot_peak_source.data.update(x=x[peak_indexes], y=scan["peak_heights"])
plot_line_smooth_source.data.update(x=x, y=scan["smooth_peaks"])
else:
peak_pos_textinput.value = None
plot_peak_source.data.update(x=[], y=[])
plot_line_smooth_source.data.update(x=[], y=[])
peak_pos_textinput_lock = False
fit = scan.get("fit")
if fit is not None:
x = scan["fit"]["x_fit"]
plot_gauss_source.data.update(x=x, y=scan["fit"]["comps"]["gaussian"])
plot_bkg_source.data.update(x=x, y=scan["fit"]["comps"]["background"])
params = fit["result"].params
fit_output_textinput.value = (
"Gaussian: centre = %9.4f, sigma = %9.4f, area = %9.4f \n"
"background: slope = %9.4f, intercept = %9.4f \n"
"Int. area = %9.4f +/- %9.4f \n"
"fit area = %9.4f +/- %9.4f \n"
"ratio((fit-int)/fit) = %9.4f"
% (
params["g_cen"].value,
params["g_width"].value,
params["g_amp"].value,
params["slope"].value,
params["intercept"].value,
fit["int_area"].n,
fit["int_area"].s,
params["g_amp"].value,
params["g_amp"].stderr,
(params["g_amp"].value - fit["int_area"].n) / params["g_amp"].value,
)
)
numfit_min, numfit_max = fit["numfit"]
if numfit_min is None:
numfit_min_span.location = None
else:
numfit_min_span.location = x[numfit_min]
x_fit = np.linspace(x[0], x[-1], 100)
plot_fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
if numfit_max is None:
numfit_max_span.location = None
else:
numfit_max_span.location = x[numfit_max]
x_bkg = []
y_bkg = []
xs_peak = []
ys_peak = []
comps = fit.eval_components(x=x_fit)
for i, model in enumerate(fit_params):
if "linear" in model:
x_bkg = x_fit
y_bkg = comps[f"f{i}_"]
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
xs_peak.append(x_fit)
ys_peak.append(comps[f"f{i}_"])
plot_bkg_source.data.update(x=x_bkg, y=y_bkg)
plot_peak_source.data.update(xs=xs_peak, ys=ys_peak)
fit_output_textinput.value = fit.fit_report()
else:
plot_gauss_source.data.update(x=[], y=[])
plot_fit_source.data.update(x=[], y=[])
plot_bkg_source.data.update(x=[], y=[])
plot_peak_source.data.update(xs=[], ys=[])
fit_output_textinput.value = ""
numfit_min_span.location = None
numfit_max_span.location = None
def _update_overview():
xs = []
ys = []
param = []
for ind, p in enumerate(scan_table_source.data["param"]):
if p:
s = scan_table_source.data["scan"][ind]
xs.append(np.array(det_data["scan"][s]["om"]))
ys.append(np.array(det_data["scan"][s]["Counts"]))
x = []
y = []
par = []
for s, p in enumerate(scan_table_source.data["param"]):
if p is not None:
scan = det_data[s]
scan_motor = scan["scan_motor"]
xs.append(scan[scan_motor])
x.extend(scan[scan_motor])
ys.append(scan["Counts"])
y.extend([float(p)] * len(scan[scan_motor]))
param.append(float(p))
par.extend(scan["Counts"])
if det_data:
scan_motor = det_data[0]["scan_motor"]
ov_plot.axis[0].axis_label = scan_motor
ov_param_plot.axis[0].axis_label = scan_motor
ov_plot_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
if y:
mapper["transform"].low = np.min([np.min(y) for y in ys])
mapper["transform"].high = np.max([np.max(y) for y in ys])
ov_param_plot_scatter_source.data.update(x=x, y=y, param=par)
# Main plot
plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(only_visible=True),
plot_height=450,
plot_width=700,
)
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
plot.add_layout(LinearAxis(axis_label="Omega"), place="below")
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue", name="single scan")
)
plot.add_layout(
Whisker(
source=plot_scatter_source,
base="x",
upper="y_upper",
lower="y_lower",
name="single scan",
)
plot_scatter = plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
)
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
plot_line_smooth_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(
plot_line_smooth_source,
Line(x="x", y="y", line_color="steelblue", line_dash="dashed", name="single scan"),
)
plot_gauss_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(
plot_gauss_source,
Line(x="x", y="y", line_color="red", line_dash="dashed", name="single scan"),
)
plot_fit_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit = plot.add_glyph(plot_fit_source, Line(x="x", y="y"))
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
plot.add_glyph(
plot_bkg_source,
Line(x="x", y="y", line_color="green", line_dash="dashed", name="single scan"),
plot_bkg = plot.add_glyph(
plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed")
)
plot_peak_source = ColumnDataSource(dict(x=[], y=[]))
plot.add_glyph(
plot_peak_source, Asterisk(x="x", y="y", size=10, line_color="red", name="single scan")
plot_peak_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot_peak = plot.add_glyph(
plot_peak_source, MultiLine(xs="xs", ys="ys", line_color="red", line_dash="dashed")
)
numfit_min_span = Span(
location=None, dimension="height", line_dash="dashed", name="single scan"
)
plot.add_layout(numfit_min_span)
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_from_span)
numfit_max_span = Span(
location=None, dimension="height", line_dash="dashed", name="single scan"
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_to_span)
plot.add_layout(
Legend(
items=[
("data", [plot_scatter]),
("best fit", [plot_fit]),
("peak", [plot_peak]),
("linear", [plot_bkg]),
],
location="top_left",
click_policy="hide",
)
)
plot.add_layout(numfit_max_span)
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
plot.toolbar.logo = None
@ -326,22 +324,48 @@ def create():
ov_plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
ov_plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
ov_plot.add_layout(LinearAxis(axis_label="Omega"), place="below")
ov_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
ov_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
ov_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
ov_plot_mline_source = ColumnDataSource(dict(xs=[], ys=[], param=[], color=[]))
ov_plot.add_glyph(
ov_plot_mline_source, MultiLine(xs="xs", ys="ys", line_color="color", name="overview")
)
ov_plot.add_glyph(ov_plot_mline_source, MultiLine(xs="xs", ys="ys", line_color="color"))
hover_tool = HoverTool(tooltips=[("param", "@param")])
ov_plot.add_tools(PanTool(), WheelZoomTool(), hover_tool, ResetTool())
ov_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
ov_plot.toolbar.logo = None
# Overview perams plot
ov_param_plot = Plot(
x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700
)
ov_param_plot.add_layout(LinearAxis(axis_label="Param"), place="left")
ov_param_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
ov_param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
ov_param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
ov_param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[], param=[]))
mapper = linear_cmap(field_name="param", palette=Turbo256, low=0, high=50)
ov_param_plot.add_glyph(
ov_param_plot_scatter_source,
Scatter(x="x", y="y", line_color=mapper, fill_color=mapper, size=10),
)
ov_param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
ov_param_plot.toolbar.logo = None
# Plot tabs
plots = Tabs(
tabs=[Panel(child=plot, title="single scan"), Panel(child=ov_plot, title="overview")]
tabs=[
Panel(child=plot, title="single scan"),
Panel(child=ov_plot, title="overview"),
Panel(child=ov_param_plot, title="overview map"),
]
)
# Scan select
@ -362,23 +386,24 @@ def create():
_update_plot()
scan_table_source = ColumnDataSource(
dict(file=[], scan=[], param=[], peaks=[], fit=[], export=[])
)
def scan_table_source_callback(_attr, _old, _new):
_update_preview()
scan_table_source = ColumnDataSource(dict(file=[], scan=[], param=[], fit=[], export=[]))
scan_table_source.on_change("data", scan_table_source_callback)
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="file", title="file", width=150),
TableColumn(field="scan", title="scan", width=50),
TableColumn(field="param", title="param", width=50),
TableColumn(field="peaks", title="Peaks", width=50),
TableColumn(field="param", title="param", editor=NumberEditor(), width=50),
TableColumn(field="fit", title="Fit", width=50),
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
],
width=400,
index_position=None,
width=410, # +60 because of the index column
editable=True,
fit_columns=False,
autosize_mode="none",
)
def scan_table_source_callback(_attr, _old, _new):
@ -389,42 +414,39 @@ def create():
scan_table_source.on_change("data", scan_table_source_callback)
def _get_selected_scan():
selected_index = scan_table_source.selected.indices[0]
selected_scan_id = scan_table_source.data["scan"][selected_index]
return det_data["scan"][selected_scan_id]
return det_data[scan_table_source.selected.indices[0]]
def peak_pos_textinput_callback(_attr, _old, new):
if new is not None and not peak_pos_textinput_lock:
scan = _get_selected_scan()
def param_select_callback(_attr, _old, new):
if new == "user defined":
param = [None] * len(det_data)
else:
param = [scan[new] for scan in det_data]
peak_ind = (np.abs(scan["om"] - float(new))).argmin()
scan["peak_indexes"] = np.array([peak_ind], dtype=np.int64)
scan["peak_heights"] = np.array([scan["smooth_peaks"][peak_ind]])
_update_table()
_update_plot()
scan_table_source.data["param"] = param
peak_pos_textinput = TextInput(title="Peak position:", default_size=145)
peak_pos_textinput.on_change("value", peak_pos_textinput_callback)
peak_int_ratio_spinner = Spinner(
title="Peak intensity ratio:", value=0.8, step=0.01, low=0, high=1, default_size=145
param_select = Select(
title="Parameter:",
options=["user defined", "temp", "mf", "h", "k", "l"],
value="user defined",
width=145,
)
peak_prominence_spinner = Spinner(title="Peak prominence:", value=50, low=0, default_size=145)
smooth_toggle = Toggle(label="Smooth curve", default_size=145)
window_size_spinner = Spinner(title="Window size:", value=7, step=2, low=1, default_size=145)
poly_order_spinner = Spinner(title="Poly order:", value=3, low=0, default_size=145)
param_select.on_change("value", param_select_callback)
integ_from = Spinner(title="Integrate from:", default_size=145)
integ_to = Spinner(title="to:", default_size=145)
def fit_from_spinner_callback(_attr, _old, new):
fit_from_span.location = new
def fitparam_reset_button_callback():
...
fit_from_spinner = Spinner(title="Fit from:", width=145)
fit_from_spinner.on_change("value", fit_from_spinner_callback)
fitparam_reset_button = Button(label="Reset to defaults", default_size=145, disabled=True)
fitparam_reset_button.on_click(fitparam_reset_button_callback)
def fit_to_spinner_callback(_attr, _old, new):
fit_to_span.location = new
fit_to_spinner = Spinner(title="to:", width=145)
fit_to_spinner.on_change("value", fit_to_spinner_callback)
def fitparams_add_dropdown_callback(click):
new_tag = str(fitparams_select.tags[0]) # bokeh requires (str, str) for MultiSelect options
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
fitparams_select.options.append((new_tag, click.item))
fit_params[new_tag] = fitparams_factory(click.item)
fitparams_select.tags[0] += 1
@ -432,13 +454,13 @@ def create():
fitparams_add_dropdown = Dropdown(
label="Add fit function",
menu=[
("Background", "background"),
("Gauss", "gauss"),
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pseudovoigt"),
("Pseudo Voigt1", "pseudovoigt1"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
default_size=145,
width=145,
)
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
@ -456,9 +478,9 @@ def create():
if new:
fitparams_table_source.data.update(fit_params[new[0]])
else:
fitparams_table_source.data.update(dict(param=[], guess=[], vary=[], min=[], max=[]))
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_select = MultiSelect(options=[], height=120, default_size=145)
fitparams_select = MultiSelect(options=[], height=120, width=145)
fitparams_select.tags = [0]
fitparams_select.on_change("value", fitparams_select_callback)
@ -473,36 +495,44 @@ def create():
fitparams_select.value = []
fitparams_remove_button = Button(label="Remove fit function", default_size=145)
fitparams_remove_button = Button(label="Remove fit function", width=145)
fitparams_remove_button.on_click(fitparams_remove_button_callback)
def fitparams_factory(function):
if function == "background":
params = ["slope", "offset"]
elif function == "gauss":
params = ["center", "sigma", "amplitude"]
if function == "linear":
params = ["slope", "intercept"]
elif function == "gaussian":
params = ["amplitude", "center", "sigma"]
elif function == "voigt":
params = ["center", "sigma", "amplitude", "gamma"]
elif function == "pseudovoigt":
params = ["center", "sigma", "amplitude", "fraction"]
params = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
params = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
params = ["center", "g_sigma", "l_sigma", "amplitude", "fraction"]
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(params)
fitparams = dict(
param=params, guess=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
)
if function == "linear":
fitparams["value"] = [0, 1]
fitparams["vary"] = [False, True]
fitparams["min"] = [None, 0]
elif function == "gaussian":
fitparams["min"] = [0, None, None]
return fitparams
fitparams_table_source = ColumnDataSource(dict(param=[], guess=[], vary=[], min=[], max=[]))
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter"),
TableColumn(field="guess", title="Guess", editor=NumberEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
@ -515,172 +545,114 @@ def create():
)
# start with `background` and `gauss` fit functions added
fitparams_add_dropdown_callback(types.SimpleNamespace(item="background"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gauss"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
fitparams_select.value = ["gaussian-1"] # add selection to gauss
fit_output_textinput = TextAreaInput(title="Fit results:", width=450, height=400)
def _get_peakfind_params():
return dict(
int_threshold=peak_int_ratio_spinner.value,
prominence=peak_prominence_spinner.value,
smooth=smooth_toggle.active,
window_size=window_size_spinner.value,
poly_order=poly_order_spinner.value,
)
def peakfind_all_button_callback():
peakfind_params = _get_peakfind_params()
for scan in det_data["scan"].values():
pyzebra.ccl_findpeaks(scan, **peakfind_params)
_update_table()
_update_plot()
peakfind_all_button = Button(label="Peak Find All", button_type="primary", default_size=145)
peakfind_all_button.on_click(peakfind_all_button_callback)
def peakfind_button_callback():
scan = _get_selected_scan()
pyzebra.ccl_findpeaks(scan, **_get_peakfind_params())
_update_table()
_update_plot()
peakfind_button = Button(label="Peak Find Current", default_size=145)
peakfind_button.on_click(peakfind_button_callback)
def _get_fit_params():
return dict(
guess=fit_params["1"]["guess"] + fit_params["0"]["guess"],
vary=fit_params["1"]["vary"] + fit_params["0"]["vary"],
constraints_min=fit_params["1"]["min"] + fit_params["0"]["min"],
constraints_max=fit_params["1"]["max"] + fit_params["0"]["max"],
numfit_min=integ_from.value,
numfit_max=integ_to.value,
binning=bin_size_spinner.value,
)
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def fit_all_button_callback():
fit_params = _get_fit_params()
for scan in det_data["scan"].values():
# fit_params are updated inplace within `fitccl`
pyzebra.fitccl(scan, **deepcopy(fit_params))
for scan, export in zip(det_data, scan_table_source.data["export"]):
if export:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
_update_plot()
_update_table()
fit_all_button = Button(label="Fit All", button_type="primary", default_size=145)
fit_all_button = Button(label="Fit All", button_type="primary", width=145)
fit_all_button.on_click(fit_all_button_callback)
def fit_button_callback():
scan = _get_selected_scan()
pyzebra.fitccl(scan, **_get_fit_params())
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
_update_plot()
_update_table()
fit_button = Button(label="Fit Current", default_size=145)
fit_button = Button(label="Fit Current", width=145)
fit_button.on_click(fit_button_callback)
def area_method_radiobutton_callback(_attr, _old, new):
det_data["meta"]["area_method"] = AREA_METHODS[new]
def area_method_radiobutton_callback(_handler):
_update_preview()
area_method_radiobutton = RadioButtonGroup(
labels=["Fit area", "Int area"], active=0, default_size=145,
labels=["Fit area", "Int area"], active=0, width=145, disabled=True
)
area_method_radiobutton.on_change("active", area_method_radiobutton_callback)
area_method_radiobutton.on_click(area_method_radiobutton_callback)
bin_size_spinner = Spinner(title="Bin size:", value=1, low=1, step=1, default_size=145)
def lorentz_checkbox_callback(_handler):
_update_preview()
lorentz_toggle = Toggle(label="Lorentz Correction", default_size=145)
lorentz_checkbox = CheckboxGroup(labels=["Lorentz Correction"], width=145, margin=[13, 5, 5, 5])
lorentz_checkbox.on_click(lorentz_checkbox_callback)
preview_output_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
def preview_output_button_callback():
if det_data["meta"]["indices"] == "hkl":
ext = ".comm"
elif det_data["meta"]["indices"] == "real":
ext = ".incomm"
export_preview_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
def _update_preview():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export:
del export_data["scan"][s]
pyzebra.export_comm(export_data, temp_file, lorentz=lorentz_toggle.active)
export_data = []
for s, export in zip(det_data, scan_table_source.data["export"]):
if export:
export_data.append(s)
with open(f"{temp_file}{ext}") as f:
preview_output_textinput.value = f.read()
pyzebra.export_1D(
export_data,
temp_file,
area_method=AREA_METHODS[int(area_method_radiobutton.active)],
lorentz=bool(lorentz_checkbox.active),
)
preview_output_button = Button(label="Preview file", default_size=220)
preview_output_button.on_click(preview_output_button_callback)
exported_content = ""
file_content = []
for ext in (".comm", ".incomm"):
fname = temp_file + ext
if os.path.isfile(fname):
with open(fname) as f:
content = f.read()
exported_content += f"{ext} file:\n" + content
else:
content = ""
file_content.append(content)
def export_results(det_data):
if det_data["meta"]["indices"] == "hkl":
ext = ".comm"
elif det_data["meta"]["indices"] == "real":
ext = ".incomm"
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export:
del export_data["scan"][s]
pyzebra.export_comm(export_data, temp_file, lorentz=lorentz_toggle.active)
with open(f"{temp_file}{ext}") as f:
output_content = f.read()
return output_content, ext
def save_button_callback():
cont, ext = export_results(det_data)
js_data.data.update(cont=[cont], ext=[ext])
save_button = Button(label="Download file", button_type="success", default_size=220)
save_button.on_click(save_button_callback)
save_button = Button(label="Download File", button_type="success", width=220)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
findpeak_controls = column(
row(peak_pos_textinput, column(Spacer(height=19), smooth_toggle)),
row(peak_int_ratio_spinner, peak_prominence_spinner),
row(window_size_spinner, poly_order_spinner),
row(peakfind_button, peakfind_all_button),
)
fitpeak_controls = row(
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
fitparams_table,
Spacer(width=20),
column(
row(integ_from, integ_to),
row(bin_size_spinner, column(Spacer(height=19), lorentz_toggle)),
row(fitparam_reset_button, area_method_radiobutton),
row(fit_from_spinner, fit_to_spinner),
row(area_method_radiobutton, lorentz_checkbox),
row(fit_button, fit_all_button),
),
)
export_layout = column(preview_output_textinput, row(preview_output_button, save_button))
scan_layout = column(scan_table, row(monitor_spinner, param_select))
import_layout = column(
proposal_textinput,
file_select,
row(file_open_button, file_append_button),
upload_div,
upload_button,
append_upload_div,
append_upload_button,
)
export_layout = column(export_preview_textinput, row(save_button))
upload_div = Div(text="Or upload .dat files:")
append_upload_div = Div(text="append extra .dat files:")
tab_layout = column(
row(
proposal_textinput,
file_select,
column(Spacer(height=19), row(file_open_button, file_append_button)),
),
row(
column(Spacer(height=5), upload_div),
upload_button,
column(Spacer(height=5), append_upload_div),
append_upload_button,
),
row(scan_table, plots, Spacer(width=30), fit_output_textinput, export_layout),
row(findpeak_controls, Spacer(width=30), fitpeak_controls),
row(import_layout, scan_layout, plots, Spacer(width=30), export_layout),
row(fitpeak_controls, fit_output_textinput),
)
return Panel(child=tab_layout, title="param study")

272
pyzebra/app/panel_spind.py Normal file
View File

@ -0,0 +1,272 @@
import ast
import math
import os
import subprocess
import tempfile
import numpy as np
from bokeh.layouts import column, row
from bokeh.models import (
Button,
ColumnDataSource,
DataTable,
Panel,
Spinner,
TableColumn,
TextAreaInput,
TextInput,
)
from scipy.optimize import curve_fit
import pyzebra
def create():
path_prefix_textinput = TextInput(title="Path prefix:", value="")
selection_list = TextAreaInput(title="ROIs:", rows=7)
lattice_const_textinput = TextInput(
title="Lattice constants:", value="8.3211,8.3211,8.3211,90.00,90.00,90.00"
)
max_res_spinner = Spinner(title="max-res:", value=2, step=0.01, width=145)
seed_pool_size_spinner = Spinner(title="seed-pool-size:", value=5, step=0.01, width=145)
seed_len_tol_spinner = Spinner(title="seed-len-tol:", value=0.02, step=0.01, width=145)
seed_angle_tol_spinner = Spinner(title="seed-angle-tol:", value=1, step=0.01, width=145)
eval_hkl_tol_spinner = Spinner(title="eval-hkl-tol:", value=0.15, step=0.01, width=145)
diff_vec = []
ub_matrices = []
def process_button_callback():
# drop table selection to clear result fields
results_table_source.selected.indices = []
nonlocal diff_vec
with tempfile.TemporaryDirectory() as temp_dir:
temp_peak_list_dir = os.path.join(temp_dir, "peak_list")
os.mkdir(temp_peak_list_dir)
temp_event_file = os.path.join(temp_peak_list_dir, "event-0.txt")
temp_hkl_file = os.path.join(temp_dir, "hkl.h5")
roi_dict = ast.literal_eval(selection_list.value)
comp_proc = subprocess.run(
[
"mpiexec",
"-n",
"2",
"python",
os.path.expanduser("~/spind/gen_hkl_table.py"),
lattice_const_textinput.value,
"--max-res",
str(max_res_spinner.value),
"-o",
temp_hkl_file,
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
diff_vec = prepare_event_file(temp_event_file, roi_dict, path_prefix_textinput.value)
print(f"Content of {temp_event_file}:")
with open(temp_event_file) as f:
print(f.read())
comp_proc = subprocess.run(
[
"mpiexec",
"-n",
"2",
"python",
os.path.expanduser("~/spind/SPIND.py"),
temp_peak_list_dir,
temp_hkl_file,
"-o",
temp_dir,
"--seed-pool-size",
str(seed_pool_size_spinner.value),
"--seed-len-tol",
str(seed_len_tol_spinner.value),
"--seed-angle-tol",
str(seed_angle_tol_spinner.value),
"--eval-hkl-tol",
str(eval_hkl_tol_spinner.value),
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
spind_out_file = os.path.join(temp_dir, "spind.txt")
spind_res = dict(
label=[], crystal_id=[], match_rate=[], matched_peaks=[], column_5=[], ub_matrix=[],
)
try:
with open(spind_out_file) as f_out:
for line in f_out:
c1, c2, c3, c4, c5, *c_rest = line.split()
spind_res["label"].append(c1)
spind_res["crystal_id"].append(c2)
spind_res["match_rate"].append(c3)
spind_res["matched_peaks"].append(c4)
spind_res["column_5"].append(c5)
# last digits are spind UB matrix
vals = list(map(float, c_rest))
ub_matrix_spind = np.transpose(np.array(vals).reshape(3, 3))
ub_matrix = np.linalg.inv(ub_matrix_spind)
ub_matrices.append(ub_matrix)
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
print(f"Content of {spind_out_file}:")
with open(spind_out_file) as f:
print(f.read())
except FileNotFoundError:
print("No results from spind")
results_table_source.data.update(spind_res)
process_button = Button(label="Process", button_type="primary")
process_button.on_click(process_button_callback)
ub_matrix_textareainput = TextAreaInput(title="UB matrix:", rows=7, width=400)
hkl_textareainput = TextAreaInput(title="hkl values:", rows=7, width=400)
def results_table_select_callback(_attr, old, new):
if new:
ind = new[0]
ub_matrix = ub_matrices[ind]
res = ""
for vec in diff_vec:
res += f"{ub_matrix @ vec}\n"
ub_matrix_textareainput.value = str(ub_matrix * 1e10)
hkl_textareainput.value = res
else:
ub_matrix_textareainput.value = ""
hkl_textareainput.value = ""
results_table_source = ColumnDataSource(
dict(label=[], crystal_id=[], match_rate=[], matched_peaks=[], column_5=[], ub_matrix=[])
)
results_table = DataTable(
source=results_table_source,
columns=[
TableColumn(field="label", title="Label", width=50),
TableColumn(field="crystal_id", title="Crystal ID", width=100),
TableColumn(field="match_rate", title="Match Rate", width=100),
TableColumn(field="matched_peaks", title="Matched Peaks", width=100),
TableColumn(field="column_5", title="", width=100),
TableColumn(field="ub_matrix", title="UB Matrix", width=700),
],
height=300,
width=1200,
autosize_mode="none",
index_position=None,
)
results_table_source.selected.on_change("indices", results_table_select_callback)
tab_layout = row(
column(
path_prefix_textinput,
selection_list,
lattice_const_textinput,
row(max_res_spinner, seed_pool_size_spinner),
row(seed_len_tol_spinner, seed_angle_tol_spinner),
row(eval_hkl_tol_spinner),
process_button,
),
column(results_table, row(ub_matrix_textareainput, hkl_textareainput)),
)
return Panel(child=tab_layout, title="spind")
def gauss(x, *p):
"""Defines Gaussian function
Args:
A - amplitude, mu - position of the center, sigma - width
Returns:
Gaussian function
"""
A, mu, sigma = p
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
def prepare_event_file(export_filename, roi_dict, path_prefix=""):
diff_vec = []
p0 = [1.0, 0.0, 1.0]
maxfev = 100000
with open(export_filename, "w") as f:
for file, rois in roi_dict.items():
dat = pyzebra.read_detector_data(path_prefix + file + ".hdf")
wave = dat["wave"]
ddist = dat["ddist"]
gamma = dat["gamma"][0]
omega = dat["omega"][0]
nu = dat["nu"][0]
chi = dat["chi"][0]
phi = dat["phi"][0]
scan_motor = dat["scan_motor"]
var_angle = dat[scan_motor]
for roi in rois:
x0, xN, y0, yN, fr0, frN = roi
data_roi = dat["data"][fr0:frN, y0:yN, x0:xN]
cnts = np.sum(data_roi, axis=(1, 2))
coeff, _ = curve_fit(gauss, range(len(cnts)), cnts, p0=p0, maxfev=maxfev)
m = cnts.mean()
sd = cnts.std()
snr_cnts = np.where(sd == 0, 0, m / sd)
frC = fr0 + coeff[1]
var_F = var_angle[math.floor(frC)]
var_C = var_angle[math.ceil(frC)]
frStep = frC - math.floor(frC)
var_step = var_C - var_F
var_p = var_F + var_step * frStep
if scan_motor == "gamma":
gamma = var_p
elif scan_motor == "omega":
omega = var_p
elif scan_motor == "nu":
nu = var_p
elif scan_motor == "chi":
chi = var_p
elif scan_motor == "phi":
phi = var_p
intensity = coeff[1] * abs(coeff[2] * var_step) * math.sqrt(2) * math.sqrt(np.pi)
projX = np.sum(data_roi, axis=(0, 1))
coeff, _ = curve_fit(gauss, range(len(projX)), projX, p0=p0, maxfev=maxfev)
x_pos = x0 + coeff[1]
projY = np.sum(data_roi, axis=(0, 2))
coeff, _ = curve_fit(gauss, range(len(projY)), projY, p0=p0, maxfev=maxfev)
y_pos = y0 + coeff[1]
ga, nu = pyzebra.det2pol(ddist, gamma, nu, x_pos, y_pos)
diff_vector = pyzebra.z1frmd(wave, ga, omega, chi, phi, nu)
d_spacing = float(pyzebra.dandth(wave, diff_vector)[0])
diff_vector = diff_vector.flatten() * 1e10
dv1, dv2, dv3 = diff_vector
diff_vec.append(diff_vector)
f.write(f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n")
return diff_vec

View File

@ -1,75 +0,0 @@
import numpy as np
import scipy as sc
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
def ccl_findpeaks(
scan,
int_threshold=0.8,
prominence=50,
smooth=False,
window_size=7,
poly_order=3,
variable="om",
):
"""function iterates through the dictionary created by load_cclv2 and locates peaks for each scan
args: scan - a single scan,
int_threshold - fraction of threshold_intensity/max_intensity, must be positive num between 0 and 1
i.e. will only detect peaks above 75% of max intensity
prominence - defines a drop of values that must be between two peaks, must be positive number
i.e. if promimence is 20, it will detect two neigbouring peaks of 300 and 310 intesities,
if none of the itermediate values are lower that 290
smooth - if true, smooths data by savitzky golay filter, if false - no smoothing
window_size - window size for savgol filter, must be odd positive integer
poly_order = order of the polynomial used in savgol filter, must be positive integer smaller than
"""
if not 0 <= int_threshold <= 1:
int_threshold = 0.8
print(
"Invalid value for int_threshold, select value between 0 and 1, new value set to:",
int_threshold,
)
if not isinstance(window_size, int) or (window_size % 2) == 0 or window_size <= 1:
window_size = 7
print(
"Invalid value for window_size, select positive odd integer, new value set to!:",
window_size,
)
if not isinstance(poly_order, int) or window_size < poly_order:
poly_order = 3
print(
"Invalid value for poly_order, select positive integer smaller than window_size, new value set to:",
poly_order,
)
if not isinstance(prominence, (int, float)) and prominence < 0:
prominence = 50
print("Invalid value for prominence, select positive number, new value set to:", prominence)
omega = scan[variable]
counts = np.array(scan["Counts"])
if smooth:
itp = interp1d(omega, counts, kind="linear")
absintensity = [abs(number) for number in counts]
lowest_intensity = min(absintensity)
counts[counts < 0] = lowest_intensity
smooth_peaks = savgol_filter(itp(omega), window_size, poly_order)
else:
smooth_peaks = counts
peaks, properties = sc.signal.find_peaks(
smooth_peaks, height=int_threshold * max(smooth_peaks), prominence=prominence
)
scan["peak_indexes"] = peaks
scan["peak_heights"] = properties["peak_heights"]
scan["smooth_peaks"] = smooth_peaks # smoothed curve

View File

@ -58,37 +58,22 @@ META_VARS_FLOAT = (
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j")
CCL_FIRST_LINE = (
("scan_number", int),
("h_index", float),
("k_index", float),
("l_index", float),
)
CCL_FIRST_LINE = (("idx", int), ("h", float), ("k", float), ("l", float))
CCL_ANGLES = {
"bi": (
("twotheta_angle", float),
("omega_angle", float),
("chi_angle", float),
("phi_angle", float),
),
"nb": (
("gamma_angle", float),
("omega_angle", float),
("nu_angle", float),
("unkwn_angle", float),
),
"bi": (("twotheta", float), ("omega", float), ("chi", float), ("phi", float)),
"nb": (("gamma", float), ("omega", float), ("nu", float), ("skip_angle", float)),
}
CCL_SECOND_LINE = (
("n_points", int),
("angle_step", float),
("monitor", float),
("temperature", float),
("mag_field", float),
("temp", float),
("mf", float),
("date", str),
("time", str),
("scan_type", str),
("scan_motor", str),
)
AREA_METHODS = ("fit_area", "int_area")
@ -114,61 +99,106 @@ def load_1D(filepath):
def parse_1D(fileobj, data_type):
metadata = {"data_type": data_type}
# read metadata
metadata = {}
for line in fileobj:
if "=" in line:
variable, value = line.split("=")
variable, value = line.split("=", 1)
variable = variable.strip()
if variable in META_VARS_FLOAT:
value = value.strip()
if variable in META_VARS_STR:
metadata[variable] = value
elif variable in META_VARS_FLOAT:
if variable == "2-theta": # fix that angle name not to be an expression
variable = "twotheta"
if variable in ("a", "b", "c", "alpha", "beta", "gamma"):
variable += "_cell"
metadata[variable] = float(value)
elif variable in META_VARS_STR:
metadata[variable] = str(value)[:-1].strip()
elif variable in META_UB_MATRIX:
metadata[variable] = re.findall(r"[-+]?\d*\.\d+|\d+", str(value))
if "ub" not in metadata:
metadata["ub"] = np.zeros((3, 3))
row = int(variable[-2]) - 1
metadata["ub"][row, :] = list(map(float, value.split()))
if "#data" in line:
# this is the end of metadata and the start of data section
break
# handle older files that don't contain "zebra_mode" metadata
if "zebra_mode" not in metadata:
metadata["zebra_mode"] = "nb"
# read data
scan = {}
scan = []
if data_type == ".ccl":
ccl_first_line = (*CCL_FIRST_LINE, *CCL_ANGLES[metadata["zebra_mode"]])
ccl_first_line = CCL_FIRST_LINE + CCL_ANGLES[metadata["zebra_mode"]]
ccl_second_line = CCL_SECOND_LINE
for line in fileobj:
d = {}
# skip empty/whitespace lines before start of any scan
if not line or line.isspace():
continue
s = {}
# first line
for param, (param_name, param_type) in zip(line.split(), ccl_first_line):
d[param_name] = param_type(param)
s[param_name] = param_type(param)
# second line
next_line = next(fileobj)
for param, (param_name, param_type) in zip(next_line.split(), ccl_second_line):
d[param_name] = param_type(param)
s[param_name] = param_type(param)
d["om"] = np.linspace(
d["omega_angle"] - (d["n_points"] / 2) * d["angle_step"],
d["omega_angle"] + (d["n_points"] / 2) * d["angle_step"],
d["n_points"],
)
if s["scan_motor"] != "om":
raise Exception("Unsupported variable name in ccl file.")
# "om" -> "omega"
s["scan_motor"] = "omega"
# overwrite metadata, because it only refers to the scan center
half_dist = (s["n_points"] - 1) / 2 * s["angle_step"]
s["omega"] = np.linspace(s["omega"] - half_dist, s["omega"] + half_dist, s["n_points"])
# subsequent lines with counts
counts = []
while len(counts) < d["n_points"]:
counts.extend(map(int, next(fileobj).split()))
d["Counts"] = counts
while len(counts) < s["n_points"]:
counts.extend(map(float, next(fileobj).split()))
s["Counts"] = np.array(counts)
scan[d["scan_number"]] = d
if s["h"].is_integer() and s["k"].is_integer() and s["l"].is_integer():
s["h"], s["k"], s["l"] = map(int, (s["h"], s["k"], s["l"]))
scan.append({**metadata, **s})
elif data_type == ".dat":
# skip the first 2 rows, the third row contans the column names
next(fileobj)
next(fileobj)
# TODO: this might need to be adapted in the future, when "gamma" will be added to dat files
if metadata["zebra_mode"] == "nb":
metadata["gamma"] = metadata["twotheta"]
s = defaultdict(list)
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
if match.group(1) == "h, k, l":
steps = match.group(2).split()
for step, ind in zip(steps, "hkl"):
if float(step) != 0:
scan_motor = ind
break
else:
scan_motor = match.group(1)
s["scan_motor"] = scan_motor
match = re.search("(.*) Points, Mode: (.*), Preset (.*)", next(fileobj))
if match.group(2) != "Monitor":
raise Exception("Unknown mode in dat file.")
s["monitor"] = float(match.group(3))
col_names = next(fileobj).split()
data_cols = defaultdict(list)
for line in fileobj:
if "END-OF-DATA" in line:
@ -176,100 +206,107 @@ def parse_1D(fileobj, data_type):
break
for name, val in zip(col_names, line.split()):
data_cols[name].append(float(val))
s[name].append(float(val))
try:
data_cols["h_index"] = float(metadata["title"].split()[-3])
data_cols["k_index"] = float(metadata["title"].split()[-2])
data_cols["l_index"] = float(metadata["title"].split()[-1])
except (ValueError, IndexError):
print("seems hkl is not in title")
for name in col_names:
s[name] = np.array(s[name])
data_cols["om"] = np.array(data_cols["om"])
# "om" -> "omega"
if s["scan_motor"] == "om":
s["scan_motor"] = "omega"
s["omega"] = s["om"]
del s["om"]
data_cols["temperature"] = metadata["temp"]
try:
data_cols["mag_field"] = metadata["mf"]
except KeyError:
print("Mag_field not present in dat file")
# "tt" -> "temp"
elif s["scan_motor"] == "tt":
s["scan_motor"] = "temp"
s["temp"] = s["tt"]
del s["tt"]
data_cols["omega_angle"] = metadata["omega"]
data_cols["n_points"] = len(data_cols["om"])
data_cols["monitor"] = data_cols["Monitor1"][0]
data_cols["twotheta_angle"] = metadata["2-theta"]
data_cols["chi_angle"] = metadata["chi"]
data_cols["phi_angle"] = metadata["phi"]
data_cols["nu_angle"] = metadata["nu"]
# "mf" stays "mf"
# "phi" stays "phi"
data_cols["scan_number"] = 1
scan[data_cols["scan_number"]] = dict(data_cols)
if "h" not in s:
s["h"] = s["k"] = s["l"] = float("nan")
for param in ("mf", "temp"):
if param not in metadata:
s[param] = 0
s["idx"] = 1
scan.append({**metadata, **s})
else:
print("Unknown file extention")
# utility information
if all(
s["h_index"].is_integer() and s["k_index"].is_integer() and s["l_index"].is_integer()
for s in scan.values()
):
metadata["indices"] = "hkl"
else:
metadata["indices"] = "real"
metadata["data_type"] = data_type
metadata["area_method"] = AREA_METHODS[0]
return {"meta": metadata, "scan": scan}
return scan
def export_comm(data, path, lorentz=False, hkl_precision=2):
"""exports data in the *.comm format
:param lorentz: perform Lorentz correction
:param path: path to file + name
:arg data - data to export, is dict after peak fitting
def export_1D(data, path, area_method=AREA_METHODS[0], lorentz=False, hkl_precision=2):
"""Exports data in the .comm/.incomm format
Scans with integer/real hkl values are saved in .comm/.incomm files correspondingly. If no scans
are present for a particular output format, that file won't be created.
"""
zebra_mode = data["meta"]["zebra_mode"]
if data["meta"]["indices"] == "hkl":
extension = ".comm"
else: # data["meta"]["indices"] == "real":
extension = ".incomm"
zebra_mode = data[0]["zebra_mode"]
file_content = {".comm": [], ".incomm": []}
with open(str(path + extension), "w") as out_file:
for key, scan in data["scan"].items():
if "fit" not in scan:
print("Scan skipped - no fit value for:", key)
continue
for scan in data:
if "fit" not in scan:
continue
scan_str = f"{key:6}"
idx_str = f"{scan['idx']:6}"
h, k, l = scan["h_index"], scan["k_index"], scan["l_index"]
if data["meta"]["indices"] == "hkl":
hkl_str = f"{int(h):6}{int(k):6}{int(l):6}"
else: # data["meta"]["indices"] == "real"
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
h, k, l = scan["h"], scan["k"], scan["l"]
hkl_are_integers = isinstance(h, int) # if True, other indices are of type 'int' too
if hkl_are_integers:
hkl_str = f"{h:4}{k:4}{l:4}"
else:
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
area_method = data["meta"]["area_method"]
area_n = scan["fit"][area_method].n
area_s = scan["fit"][area_method].s
for name, param in scan["fit"].params.items():
if "amplitude" in name:
if param.stderr is None:
area_n = np.nan
area_s = np.nan
else:
area_n = param.value
area_s = param.stderr
# TODO: take into account multiple peaks
break
else:
# no peak functions in a fit model
area_n = np.nan
area_s = np.nan
# apply lorentz correction to area
if lorentz:
if zebra_mode == "bi":
twotheta_angle = np.deg2rad(scan["twotheta_angle"])
corr_factor = np.sin(twotheta_angle)
else: # zebra_mode == "nb":
gamma_angle = np.deg2rad(scan["gamma_angle"])
nu_angle = np.deg2rad(scan["nu_angle"])
corr_factor = np.sin(gamma_angle) * np.cos(nu_angle)
# apply lorentz correction to area
if lorentz:
if zebra_mode == "bi":
twotheta = np.deg2rad(scan["twotheta"])
corr_factor = np.sin(twotheta)
else: # zebra_mode == "nb":
gamma = np.deg2rad(scan["gamma"])
nu = np.deg2rad(scan["nu"])
corr_factor = np.sin(gamma) * np.cos(nu)
area_n = np.abs(area_n * corr_factor)
area_s = np.abs(area_s * corr_factor)
area_n = np.abs(area_n * corr_factor)
area_s = np.abs(area_s * corr_factor)
area_str = f"{area_n:10.2f}{area_s:10.2f}"
area_str = f"{area_n:10.2f}{area_s:10.2f}"
ang_str = ""
for angle, _ in CCL_ANGLES[zebra_mode]:
ang_str = ang_str + f"{scan[angle]:8}"
ang_str = ""
for angle, _ in CCL_ANGLES[zebra_mode]:
if angle == scan["scan_motor"]:
angle_center = (np.min(scan[angle]) + np.max(scan[angle])) / 2
else:
angle_center = scan[angle]
ang_str = ang_str + f"{angle_center:8g}"
out_file.write(scan_str + hkl_str + area_str + ang_str + "\n")
ref = file_content[".comm"] if hkl_are_integers else file_content[".incomm"]
ref.append(idx_str + hkl_str + area_str + ang_str + "\n")
for ext, content in file_content.items():
if content:
with open(path + ext, "w") as out_file:
out_file.writelines(content)

150
pyzebra/ccl_process.py Normal file
View File

@ -0,0 +1,150 @@
import itertools
import os
import numpy as np
from lmfit.models import GaussianModel, LinearModel, PseudoVoigtModel, VoigtModel
from .ccl_io import CCL_ANGLES
PARAM_PRECISIONS = {
"twotheta": 0.1,
"chi": 0.1,
"nu": 0.1,
"phi": 0.05,
"omega": 0.05,
"gamma": 0.05,
"temp": 1,
"mf": 0.001,
"ub": 0.01,
}
MAX_RANGE_GAP = {
"omega": 0.5,
}
def normalize_dataset(dataset, monitor=100_000):
for scan in dataset:
monitor_ratio = monitor / scan["monitor"]
scan["Counts"] *= monitor_ratio
scan["monitor"] = monitor
def merge_duplicates(dataset):
for scan_i, scan_j in itertools.combinations(dataset, 2):
if _parameters_match(scan_i, scan_j):
merge_scans(scan_i, scan_j)
def _parameters_match(scan1, scan2):
zebra_mode = scan1["zebra_mode"]
if zebra_mode != scan2["zebra_mode"]:
return False
for param in ("ub", "temp", "mf", *(vars[0] for vars in CCL_ANGLES[zebra_mode])):
if param.startswith("skip"):
# ignore skip parameters, like the last angle in 'nb' zebra mode
continue
if param == scan1["scan_motor"] == scan2["scan_motor"]:
# check if ranges of variable parameter overlap
range1 = scan1[param]
range2 = scan2[param]
# maximum gap between ranges of the scanning parameter (default 0)
max_range_gap = MAX_RANGE_GAP.get(param, 0)
if max(range1[0] - range2[-1], range2[0] - range1[-1]) > max_range_gap:
return False
elif np.max(np.abs(scan1[param] - scan2[param])) > PARAM_PRECISIONS[param]:
return False
return True
def merge_datasets(dataset1, dataset2):
for scan_j in dataset2:
for scan_i in dataset1:
if _parameters_match(scan_i, scan_j):
merge_scans(scan_i, scan_j)
break
dataset1.append(scan_j)
def merge_scans(scan1, scan2):
omega = np.concatenate((scan1["omega"], scan2["omega"]))
counts = np.concatenate((scan1["Counts"], scan2["Counts"]))
index = np.argsort(omega)
scan1["omega"] = omega[index]
scan1["Counts"] = counts[index]
scan2["active"] = False
fname1 = os.path.basename(scan1["original_filename"])
fname2 = os.path.basename(scan2["original_filename"])
print(f'Merging scans: {scan1["idx"]} ({fname1}) <-- {scan2["idx"]} ({fname2})')
def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
if fit_from is None:
fit_from = -np.inf
if fit_to is None:
fit_to = np.inf
y_fit = scan["Counts"]
x_fit = scan[scan["scan_motor"]]
# apply fitting range
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
y_fit = y_fit[fit_ind]
x_fit = x_fit[fit_ind]
model = None
for model_index, (model_name, model_param) in enumerate(model_dict.items()):
model_name, _ = model_name.split("-")
prefix = f"f{model_index}_"
if model_name == "linear":
_model = LinearModel(prefix=prefix)
elif model_name == "gaussian":
_model = GaussianModel(prefix=prefix)
elif model_name == "voigt":
_model = VoigtModel(prefix=prefix)
elif model_name == "pvoigt":
_model = PseudoVoigtModel(prefix=prefix)
else:
raise ValueError(f"Unknown model name: '{model_name}'")
_init_guess = _model.guess(y_fit, x=x_fit)
for param_index, param_name in enumerate(model_param["param"]):
param_hints = {}
for hint_name in ("value", "vary", "min", "max"):
tmp = model_param[hint_name][param_index]
if tmp is None:
param_hints[hint_name] = getattr(_init_guess[prefix + param_name], hint_name)
else:
param_hints[hint_name] = tmp
if "center" in param_name:
if np.isneginf(param_hints["min"]):
param_hints["min"] = np.min(x_fit)
if np.isposinf(param_hints["max"]):
param_hints["max"] = np.max(x_fit)
if "sigma" in param_name:
if np.isposinf(param_hints["max"]):
param_hints["max"] = np.max(x_fit) - np.min(x_fit)
_model.set_param_hint(param_name, **param_hints)
if model is None:
model = _model
else:
model += _model
weights = [1 / np.sqrt(val) if val != 0 else 1 for val in y_fit]
scan["fit"] = model.fit(y_fit, x=x_fit, weights=weights)

View File

@ -1,228 +0,0 @@
import numpy as np
import uncertainties as u
from lmfit import Model, Parameters
from scipy.integrate import simps
def bin_data(array, binsize):
if isinstance(binsize, int) and 0 < binsize < len(array):
return [
np.mean(array[binsize * i : binsize * i + binsize])
for i in range(int(np.ceil(len(array) / binsize)))
]
else:
print("Binsize need to be positive integer smaller than lenght of array")
return array
def find_nearest(array, value):
# find nearest value and return index
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def create_uncertanities(y, y_err):
# create array with uncertanities for error propagation
combined = np.array([])
for i in range(len(y)):
part = u.ufloat(y[i], y_err[i])
combined = np.append(combined, part)
return combined
def fitccl(
scan,
guess,
vary,
constraints_min,
constraints_max,
numfit_min=None,
numfit_max=None,
binning=None,
):
"""Made for fitting of ccl date where 1 peak is expected. Allows for combination of gaussian and linear model combination
:param scan: scan in the data dict (i.e. M123)
:param guess: initial guess for the fitting, if none, some values are added automatically in order (see below)
:param vary: True if parameter can vary during fitting, False if it to be fixed
:param numfit_min: minimal value on x axis for numerical integration - if none is centre of gaussian minus 3 sigma
:param numfit_max: maximal value on x axis for numerical integration - if none is centre of gaussian plus 3 sigma
:param constraints_min: min constranits value for fit
:param constraints_max: max constranits value for fit
:param binning : binning of the data
:return data dict with additional values
order for guess, vary, constraints_min, constraints_max:
[Gaussian centre, Gaussian sigma, Gaussian amplitude, background slope, background intercept]
examples:
guess = [None, None, 100, 0, None]
vary = [True, True, True, True, True]
constraints_min = [23, None, 50, 0, 0]
constraints_min = [80, None, 1000, 0, 100]
"""
if "peak_indexes" not in scan:
scan["peak_indexes"] = []
if len(scan["peak_indexes"]) > 1:
# return in case of more than 1 peaks
return
if binning is None or binning == 0 or binning == 1:
x = list(scan["om"])
y = list(scan["Counts"])
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
if not scan["peak_indexes"]:
centre = np.mean(x)
else:
centre = x[int(scan["peak_indexes"])]
else:
x = list(scan["om"])
if not scan["peak_indexes"]:
centre = np.mean(x)
else:
centre = x[int(scan["peak_indexes"])]
x = bin_data(x, binning)
y = list(scan["Counts"])
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
combined = bin_data(create_uncertanities(y, y_err), binning)
y = [combined[i].n for i in range(len(combined))]
y_err = [combined[i].s for i in range(len(combined))]
if len(scan["peak_indexes"]) == 0:
# Case for no peak, gaussian in centre, sigma as 20% of range
peak_index = find_nearest(x, np.mean(x))
guess[0] = centre if guess[0] is None else guess[0]
guess[1] = (x[-1] - x[0]) / 5 if guess[1] is None else guess[1]
guess[2] = 50 if guess[2] is None else guess[2]
guess[3] = 0 if guess[3] is None else guess[3]
guess[4] = np.mean(y) if guess[4] is None else guess[4]
constraints_min[2] = 0
elif len(scan["peak_indexes"]) == 1:
# case for one peak, takse into account users guesses
peak_height = scan["peak_heights"]
guess[0] = centre if guess[0] is None else guess[0]
guess[1] = 0.1 if guess[1] is None else guess[1]
guess[2] = float(peak_height / 10) if guess[2] is None else float(guess[2])
guess[3] = 0 if guess[3] is None else guess[3]
guess[4] = np.median(x) if guess[4] is None else guess[4]
constraints_min[0] = np.min(x) if constraints_min[0] is None else constraints_min[0]
constraints_max[0] = np.max(x) if constraints_max[0] is None else constraints_max[0]
def gaussian(x, g_cen, g_width, g_amp):
"""1-d gaussian: gaussian(x, amp, cen, wid)"""
return (g_amp / (np.sqrt(2 * np.pi) * g_width)) * np.exp(
-((x - g_cen) ** 2) / (2 * g_width ** 2)
)
def background(x, slope, intercept):
"""background"""
return slope * (x - centre) + intercept
mod = Model(gaussian) + Model(background)
params = Parameters()
params.add_many(
("g_cen", guess[0], bool(vary[0]), np.min(x), np.max(x), None, None),
("g_width", guess[1], bool(vary[1]), constraints_min[1], constraints_max[1], None, None),
("g_amp", guess[2], bool(vary[2]), constraints_min[2], constraints_max[2], None, None),
("slope", guess[3], bool(vary[3]), constraints_min[3], constraints_max[3], None, None),
("intercept", guess[4], bool(vary[4]), constraints_min[4], constraints_max[4], None, None),
)
# the weighted fit
weights = [np.abs(1 / val) if val != 0 else 1 for val in y_err]
try:
result = mod.fit(y, params, weights=weights, x=x, calc_covar=True)
except ValueError:
print(f"Couldn't fit scan {scan['scan_number']}")
return
if result.params["g_amp"].stderr is None:
result.params["g_amp"].stderr = result.params["g_amp"].value
elif result.params["g_amp"].stderr > result.params["g_amp"].value:
result.params["g_amp"].stderr = result.params["g_amp"].value
# u.ufloat to work with uncertanities
fit_area = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
comps = result.eval_components()
if len(scan["peak_indexes"]) == 0:
# for case of no peak, there is no reason to integrate, therefore fit and int are equal
int_area = fit_area
elif len(scan["peak_indexes"]) == 1:
gauss_3sigmamin = find_nearest(
x, result.params["g_cen"].value - 3 * result.params["g_width"].value
)
gauss_3sigmamax = find_nearest(
x, result.params["g_cen"].value + 3 * result.params["g_width"].value
)
numfit_min = gauss_3sigmamin if numfit_min is None else find_nearest(x, numfit_min)
numfit_max = gauss_3sigmamax if numfit_max is None else find_nearest(x, numfit_max)
it = -1
while abs(numfit_max - numfit_min) < 3:
# in the case the peak is very thin and numerical integration would be on zero omega
# difference, finds closes values
it = it + 1
numfit_min = find_nearest(
x,
result.params["g_cen"].value - 3 * (1 + it / 10) * result.params["g_width"].value,
)
numfit_max = find_nearest(
x,
result.params["g_cen"].value + 3 * (1 + it / 10) * result.params["g_width"].value,
)
if x[numfit_min] < np.min(x):
# makes sure that the values supplied by user lay in the omega range
# can be ommited for users who know what they're doing
numfit_min = gauss_3sigmamin
print("Minimal integration value outside of x range")
elif x[numfit_min] >= x[numfit_max]:
numfit_min = gauss_3sigmamin
print("Minimal integration value higher than maximal")
else:
pass
if x[numfit_max] > np.max(x):
numfit_max = gauss_3sigmamax
print("Maximal integration value outside of x range")
elif x[numfit_max] <= x[numfit_min]:
numfit_max = gauss_3sigmamax
print("Maximal integration value lower than minimal")
else:
pass
count_errors = create_uncertanities(y, y_err)
# create error vector for numerical integration propagation
num_int_area = simps(count_errors[numfit_min:numfit_max], x[numfit_min:numfit_max])
slope_err = u.ufloat(result.params["slope"].value, result.params["slope"].stderr)
# pulls the nominal and error values from fit (slope)
intercept_err = u.ufloat(
result.params["intercept"].value, result.params["intercept"].stderr
)
# pulls the nominal and error values from fit (intercept)
background_errors = np.array([])
for j in range(len(x[numfit_min:numfit_max])):
# creates nominal and error vector for numerical integration of background
bg = slope_err * (x[j] - centre) + intercept_err
background_errors = np.append(background_errors, bg)
num_int_background = simps(background_errors, x[numfit_min:numfit_max])
int_area = num_int_area - num_int_background
d = {}
for pars in result.params:
d[str(pars)] = (result.params[str(pars)].value, result.params[str(pars)].vary)
print("Scan", scan["scan_number"])
print(result.fit_report())
d["ratio"] = (result.params["g_amp"].value - int_area.n) / result.params["g_amp"].value
d["int_area"] = int_area
d["fit_area"] = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
d["full_report"] = result.fit_report()
d["result"] = result
d["comps"] = comps
d["numfit"] = [numfit_min, numfit_max]
d["x_fit"] = x
scan["fit"] = d

View File

@ -1,167 +0,0 @@
import numpy as np
from lmfit import Model, Parameters
from scipy.integrate import simps
import matplotlib.pyplot as plt
import uncertainties as u
from lmfit.models import GaussianModel
from lmfit.models import VoigtModel
from lmfit.models import PseudoVoigtModel
def bin_data(array, binsize):
if isinstance(binsize, int) and 0 < binsize < len(array):
return [
np.mean(array[binsize * i : binsize * i + binsize])
for i in range(int(np.ceil(len(array) / binsize)))
]
else:
print("Binsize need to be positive integer smaller than lenght of array")
return array
def create_uncertanities(y, y_err):
# create array with uncertanities for error propagation
combined = np.array([])
for i in range(len(y)):
part = u.ufloat(y[i], y_err[i])
combined = np.append(combined, part)
return combined
def find_nearest(array, value):
# find nearest value and return index
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
# predefined peak positions
# peaks = [6.2, 8.1, 9.9, 11.5]
peaks = [23.5, 24.5]
# peaks = [24]
def fitccl(scan, variable="om", peak_type="gauss", binning=None):
x = list(scan[variable])
y = list(scan["Counts"])
peak_centre = np.mean(x)
if binning is None or binning == 0 or binning == 1:
x = list(scan["om"])
y = list(scan["Counts"])
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
print(scan["peak_indexes"])
if not scan["peak_indexes"]:
peak_centre = np.mean(x)
else:
centre = x[int(scan["peak_indexes"])]
else:
x = list(scan["om"])
if not scan["peak_indexes"]:
peak_centre = np.mean(x)
else:
peak_centre = x[int(scan["peak_indexes"])]
x = bin_data(x, binning)
y = list(scan["Counts"])
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
combined = bin_data(create_uncertanities(y, y_err), binning)
y = [combined[i].n for i in range(len(combined))]
y_err = [combined[i].s for i in range(len(combined))]
def background(x, slope, intercept):
"""background"""
return slope * (x - peak_centre) + intercept
def gaussian(x, center, g_sigma, amplitude):
"""1-d gaussian: gaussian(x, amp, cen, wid)"""
return (amplitude / (np.sqrt(2.0 * np.pi) * g_sigma)) * np.exp(
-((x - center) ** 2) / (2 * g_sigma ** 2)
)
def lorentzian(x, center, l_sigma, amplitude):
"""1d lorentzian"""
return (amplitude / (1 + ((1 * x - center) / l_sigma) ** 2)) / (np.pi * l_sigma)
def pseudoVoigt1(x, center, g_sigma, amplitude, l_sigma, fraction):
"""PseudoVoight peak with different widths of lorenzian and gaussian"""
return (1 - fraction) * gaussian(x, center, g_sigma, amplitude) + fraction * (
lorentzian(x, center, l_sigma, amplitude)
)
mod = Model(background)
params = Parameters()
params.add_many(
("slope", 0, True, None, None, None, None), ("intercept", 0, False, None, None, None, None)
)
for i in range(len(peaks)):
if peak_type == "gauss":
mod = mod + GaussianModel(prefix="p%d_" % (i + 1))
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 5, None)
elif peak_type == "voigt":
mod = mod + VoigtModel(prefix="p%d_" % (i + 1))
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 3, None)
params.add(str("p%d_" % (i + 1) + "gamma"), 0.2, True, 0, 5, None)
elif peak_type == "pseudovoigt":
mod = mod + PseudoVoigtModel(prefix="p%d_" % (i + 1))
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 5, None)
params.add(str("p%d_" % (i + 1) + "fraction"), 0.5, True, -5, 5, None)
elif peak_type == "pseudovoigt1":
mod = mod + Model(pseudoVoigt1, prefix="p%d_" % (i + 1))
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
params.add(str("p%d_" % (i + 1) + "g_sigma"), 0.2, True, 0, 5, None)
params.add(str("p%d_" % (i + 1) + "l_sigma"), 0.2, True, 0, 5, None)
params.add(str("p%d_" % (i + 1) + "fraction"), 0.5, True, 0, 1, None)
# add parameters
result = mod.fit(
y, params, weights=[np.abs(1 / y_err[i]) for i in range(len(y_err))], x=x, calc_covar=True
)
comps = result.eval_components()
reportstring = list()
for keys in result.params:
if result.params[keys].value is not None:
str2 = np.around(result.params[keys].value, 3)
else:
str2 = 0
if result.params[keys].stderr is not None:
str3 = np.around(result.params[keys].stderr, 3)
else:
str3 = 0
reportstring.append("%s = %2.3f +/- %2.3f" % (keys, str2, str3))
reportstring = "\n".join(reportstring)
plt.figure(figsize=(20, 10))
plt.plot(x, result.best_fit, "k-", label="Best fit")
plt.plot(x, y, "b-", label="Original data")
plt.plot(x, comps["background"], "g--", label="Line component")
for i in range(len(peaks)):
plt.plot(
x,
comps[str("p%d_" % (i + 1))],
"r--",
)
plt.fill_between(x, comps[str("p%d_" % (i + 1))], alpha=0.4, label=str("p%d_" % (i + 1)))
plt.legend()
plt.text(
np.min(x),
np.max(y),
reportstring,
fontsize=9,
verticalalignment="top",
)
plt.title(str(peak_type))
plt.xlabel("Omega [deg]")
plt.ylabel("Counts [a.u.]")
plt.show()
print(result.fit_report())

View File

@ -41,7 +41,7 @@ def read_detector_data(filepath):
filepath (str): File path of an h5 file.
Returns:
ndarray: A 3D array of data, rot_angle, pol_angle, tilt_angle.
ndarray: A 3D array of data, omega, gamma, nu.
"""
with h5py.File(filepath, "r") as h5f:
data = h5f["/entry1/area_detector2/data"][:]
@ -52,20 +52,37 @@ def read_detector_data(filepath):
det_data = {"data": data}
det_data["rot_angle"] = h5f["/entry1/area_detector2/rotation_angle"][:] # om, sometimes ph
det_data["pol_angle"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
det_data["tlt_angle"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
if "/entry1/zebra_mode" in h5f:
det_data["zebra_mode"] = h5f["/entry1/zebra_mode"][0].decode()
else:
det_data["zebra_mode"] = "nb"
# om, sometimes ph
if det_data["zebra_mode"] == "nb":
det_data["omega"] = h5f["/entry1/area_detector2/rotation_angle"][:]
else: # bi
det_data["omega"] = h5f["/entry1/sample/rotation_angle"][:]
det_data["gamma"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
det_data["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
det_data["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:]
det_data["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:]
det_data["chi_angle"] = h5f["/entry1/sample/chi"][:] # ch
det_data["phi_angle"] = h5f["/entry1/sample/phi"][:] # ph
det_data["UB"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
det_data["chi"] = h5f["/entry1/sample/chi"][:] # ch
det_data["phi"] = h5f["/entry1/sample/phi"][:] # ph
det_data["ub"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
for var in ("omega", "gamma", "nu", "chi", "phi"):
if abs(det_data[var][0] - det_data[var][-1]) > 0.1:
det_data["scan_motor"] = var
break
else:
raise ValueError("No angles that vary")
# optional parameters
if "/entry1/sample/magnetic_field" in h5f:
det_data["magnetic_field"] = h5f["/entry1/sample/magnetic_field"][:]
det_data["mf"] = h5f["/entry1/sample/magnetic_field"][:]
if "/entry1/sample/temperature" in h5f:
det_data["temperature"] = h5f["/entry1/sample/temperature"][:]
det_data["temp"] = h5f["/entry1/sample/temperature"][:]
return det_data

View File

@ -1,302 +0,0 @@
import numpy as np
import uncertainties as u
def create_tuples(x, y, y_err):
"""creates tuples for sorting and merginng of the data
Counts need to be normalized to monitor before"""
t = list()
for i in range(len(x)):
tup = (x[i], y[i], y_err[i])
t.append(tup)
return t
def normalize(scan, monitor):
"""Normalizes the measurement to monitor, checks if sigma exists, otherwise creates it
:arg dict : dictionary to from which to tkae the scan
:arg key : which scan to normalize from dict1
:arg monitor : final monitor
:return counts - normalized counts
:return sigma - normalized sigma"""
counts = np.array(scan["Counts"])
sigma = np.sqrt(counts) if "sigma" not in scan else scan["sigma"]
monitor_ratio = monitor / scan["monitor"]
scaled_counts = counts * monitor_ratio
scaled_sigma = np.array(sigma) * monitor_ratio
return scaled_counts, scaled_sigma
def merge(scan1, scan2, keep=True, monitor=100000):
"""merges the two tuples and sorts them, if om value is same, Counts value is average
averaging is propagated into sigma if dict1 == dict2, key[1] is deleted after merging
:arg dict1 : dictionary to which measurement will be merged
:arg dict2 : dictionary from which measurement will be merged
:arg scand_dict_result : result of scan_dict after auto function
:arg keep : if true, when monitors are same, does not change it, if flase, takes monitor
always
:arg monitor : final monitor after merging
note: dict1 and dict2 can be same dict
:return dict1 with merged scan"""
if keep:
if scan1["monitor"] == scan2["monitor"]:
monitor = scan1["monitor"]
# load om and Counts
x1, x2 = scan1["om"], scan2["om"]
cor_y1, y_err1 = normalize(scan1, monitor=monitor)
cor_y2, y_err2 = normalize(scan2, monitor=monitor)
# creates touples (om, Counts, sigma) for sorting and further processing
tuple_list = create_tuples(x1, cor_y1, y_err1) + create_tuples(x2, cor_y2, y_err2)
# Sort the list on om and add 0 0 0 tuple to the last position
sorted_t = sorted(tuple_list, key=lambda tup: tup[0])
sorted_t.append((0, 0, 0))
om, Counts, sigma = [], [], []
seen = list()
for i in range(len(sorted_t) - 1):
if sorted_t[i][0] not in seen:
if sorted_t[i][0] != sorted_t[i + 1][0]:
om = np.append(om, sorted_t[i][0])
Counts = np.append(Counts, sorted_t[i][1])
sigma = np.append(sigma, sorted_t[i][2])
else:
om = np.append(om, sorted_t[i][0])
counts1, counts2 = sorted_t[i][1], sorted_t[i + 1][1]
sigma1, sigma2 = sorted_t[i][2], sorted_t[i + 1][2]
count_err1 = u.ufloat(counts1, sigma1)
count_err2 = u.ufloat(counts2, sigma2)
avg = (count_err1 + count_err2) / 2
Counts = np.append(Counts, avg.n)
sigma = np.append(sigma, avg.s)
seen.append(sorted_t[i][0])
else:
continue
scan1["om"] = om
scan1["Counts"] = Counts
scan1["sigma"] = sigma
scan1["monitor"] = monitor
print("merging done")
def check_UB(dict1, dict2, precision=0.01):
truth_list = list()
for i in ["ub1j", "ub2j", "ub3j"]:
for j in range(3):
if abs(abs(float(dict1["meta"][i][j])) - abs(float(dict2["meta"][i][j]))) < precision:
truth_list.append(True)
else:
truth_list.append(False)
# print(truth_list)
if all(truth_list):
return True
else:
return False
def check_zebramode(dict1, dict2):
if dict1["meta"]["zebra_mode"] == dict2["meta"]["zebra_mode"]:
return True
else:
return False
def check_angles(scan1, scan2, angles, precision):
truth_list = list()
for item in angles:
if abs(abs(scan1[item]) - abs(scan2[item])) <= precision[item]:
truth_list.append(True)
else:
truth_list.append(False)
if all(truth_list):
return True
else:
return False
def check_temp_mag(scan1, scan2):
temp_diff = 1
mag_diff = 0.001
truth_list = list()
try:
if abs(abs(scan1["mag_field"]) - abs(scan2["mag_field"])) <= mag_diff:
truth_list.append(True)
else:
truth_list.append(False)
except KeyError:
print("mag_field missing")
try:
if abs(abs(scan1["temperature"]) - abs(scan2["temperature"])) <= temp_diff:
truth_list.append(True)
else:
truth_list.append(False)
except KeyError:
print("temperature missing")
if all(truth_list):
return True
else:
return False
def merge_dups(dictionary, angles):
precision = {
"twotheta_angle": 0.1,
"chi_angle": 0.1,
"nu_angle": 0.1,
"phi_angle": 0.05,
"omega_angle": 0.05,
"gamma_angle": 0.05,
}
for i in list(dictionary["scan"]):
for j in list(dictionary["scan"]):
if i == j:
continue
else:
# print(i, j)
if check_angles(dictionary["scan"][i], dictionary["scan"][j], angles, precision):
merge(dictionary["scan"][i], dictionary["scan"][j])
print("merged %d with %d" % (i, j))
del dictionary["scan"][j]
merge_dups(dictionary, angles)
break
else:
continue
break
def add_scan(dict1, dict2, scan_to_add):
max_scan = np.max(list(dict1["scan"]))
dict1["scan"][max_scan + 1] = dict2["scan"][scan_to_add]
if dict1.get("extra_meta") is None:
dict1["extra_meta"] = {}
dict1["extra_meta"][max_scan + 1] = dict2["meta"]
del dict2["scan"][scan_to_add]
def process(dict1, dict2, angles, precision):
# stop when the second dict is empty
# print(dict2["scan"])
if dict2["scan"]:
print("doing something")
# check UB matrixes
if check_UB(dict1, dict2):
# iterate over second dict and check for matches
for i in list(dict2["scan"]):
for j in list(dict1["scan"]):
if check_angles(dict1["scan"][j], dict2["scan"][i], angles, precision):
# angles good, see the mag and temp
if check_temp_mag(dict1["scan"][j], dict2["scan"][i]):
merge(dict1["scan"][j], dict2["scan"][i])
print("merged")
del dict2["scan"][i]
process(dict1, dict2, angles, precision)
break
else:
add_scan(dict1, dict2, i)
print("scan added r")
process(dict1, dict2, angles, precision)
break
else:
add_scan(dict1, dict2, i)
print("scan added l")
process(dict1, dict2, angles, precision)
break
else:
continue
break
else:
# ask user if he really wants to add
print("UBs are different, do you really wish to add datasets? Y/N")
dict1 = add_dict(dict1, dict2)
return
"""
1. check for bisecting or normal beam geometry in data files; select stt, om, chi, phi for bisecting; select stt, om, nu for normal beam
2. in the ccl files, check for identical stt, chi and nu within 0.1 degree, and, at the same time, for identical om and phi within 0.05 degree;
3. in the dat files, check for identical stt, chi and nu within 0.1 degree, and, at the same time,
for identical phi within 0.05 degree, and, at the same time, for identical om within 5 degree."""
def unified_merge(dict1, dict2):
if not check_zebramode(dict1, dict2):
print("You are trying to add two files with different zebra mdoe")
return
# decide angles
if dict1["meta"]["zebra_mode"] == "bi":
angles = ["twotheta_angle", "omega_angle", "chi_angle", "phi_angle"]
elif dict1["meta"]["zebra_mode"] == "nb":
angles = ["gamma_angle", "omega_angle", "nu_angle"]
# precision of angles to check
precision = {
"twotheta_angle": 0.1,
"chi_angle": 0.1,
"nu_angle": 0.1,
"phi_angle": 0.05,
"omega_angle": 5,
"gamma_angle": 0.05,
}
if (dict1["meta"]["data_type"] == "ccl") and (dict2["meta"]["data_type"] == "ccl"):
precision["omega_angle"] = 0.05
# check for duplicates in original files
for d in dict1, dict2:
# no duplicates in dats
if d["meta"]["data_type"] == "dat":
continue
else:
merge_dups(d, angles)
process(dict1, dict2, angles, precision)
def add_dict(dict1, dict2):
"""adds two dictionaries, meta of the new is saved as meata+original_filename and
measurements are shifted to continue with numbering of first dict
:arg dict1 : dictionarry to add to
:arg dict2 : dictionarry from which to take the measurements
:return dict1 : combined dictionary
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
dat file"""
try:
if dict1["meta"]["zebra_mode"] != dict2["meta"]["zebra_mode"]:
print("You are trying to add scans measured with different zebra modes")
return
# this is for the qscan case
except KeyError:
print("Zebra mode not specified")
max_measurement_dict1 = max([keys for keys in dict1["scan"]])
new_filenames = np.arange(
max_measurement_dict1 + 1, max_measurement_dict1 + 1 + len(dict2["scan"])
)
if dict1.get("extra_meta") is None:
dict1["extra_meta"] = {}
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
if new_meta_name not in dict1:
for keys, name in zip(dict2["scan"], new_filenames):
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
dict1["scan"][name] = dict2["scan"][keys]
dict1["extra_meta"][name] = dict2["meta"]
dict1[new_meta_name] = dict2["meta"]
else:
raise KeyError(
str(
"The file %s has alredy been added to %s"
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"])
)
)
return dict1

View File

@ -1,488 +0,0 @@
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.io as sio
import uncertainties as u
from mpl_toolkits.mplot3d import Axes3D # dont delete, otherwise waterfall wont work
import collections
from .ccl_io import load_1D
def create_tuples(x, y, y_err):
"""creates tuples for sorting and merginng of the data
Counts need to be normalized to monitor before"""
t = list()
for i in range(len(x)):
tup = (x[i], y[i], y_err[i])
t.append(tup)
return t
def load_dats(filepath):
"""reads the txt file, get headers and data
:arg filepath to txt file or list of filepaths to the files
:return ccl like dictionary"""
if isinstance(filepath, str):
data_type = "txt"
file_list = list()
with open(filepath, "r") as infile:
col_names = next(infile).split(",")
col_names = [col_names[i].rstrip() for i in range(len(col_names))]
for line in infile:
if "END" in line:
break
file_list.append(tuple(line.split(",")))
elif isinstance(filepath, list):
data_type = "list"
file_list = filepath
dict1 = {}
for i in range(len(file_list)):
if not dict1:
if data_type == "txt":
dict1 = load_1D(file_list[0][0])
else:
dict1 = load_1D(file_list[0])
else:
if data_type == "txt":
dict1 = add_dict(dict1, load_1D(file_list[i][0]))
else:
dict1 = add_dict(dict1, load_1D(file_list[i]))
dict1["scan"][i + 1]["params"] = {}
if data_type == "txt":
for x in range(len(col_names) - 1):
dict1["scan"][i + 1]["params"][col_names[x + 1]] = float(file_list[i][x + 1])
return dict1
def create_dataframe(dict1, variables):
"""Creates pandas dataframe from the dictionary
:arg ccl like dictionary
:return pandas dataframe"""
# create dictionary to which we pull only wanted items before transforming it to pd.dataframe
pull_dict = {}
pull_dict["filenames"] = list()
for keys in variables:
for item in variables[keys]:
pull_dict[item] = list()
pull_dict["fit_area"] = list()
pull_dict["int_area"] = list()
pull_dict["Counts"] = list()
for keys in pull_dict:
print(keys)
# populate the dict
for keys in dict1["scan"]:
if "file_of_origin" in dict1["scan"][keys]:
pull_dict["filenames"].append(dict1["scan"][keys]["file_of_origin"].split("/")[-1])
else:
pull_dict["filenames"].append(dict1["meta"]["original_filename"].split("/")[-1])
pull_dict["fit_area"].append(dict1["scan"][keys]["fit"]["fit_area"])
pull_dict["int_area"].append(dict1["scan"][keys]["fit"]["int_area"])
pull_dict["Counts"].append(dict1["scan"][keys]["Counts"])
for key in variables:
for i in variables[key]:
pull_dict[i].append(_finditem(dict1["scan"][keys], i))
return pd.DataFrame(data=pull_dict)
def sort_dataframe(dataframe, sorting_parameter):
"""sorts the data frame and resets index"""
data = dataframe.sort_values(by=sorting_parameter)
data = data.reset_index(drop=True)
return data
def make_graph(data, sorting_parameter, style):
"""Makes the graph from the data based on style and sorting parameter
:arg data : pandas dataframe with data after sorting
:arg sorting_parameter to pull the correct variable and name
:arg style of the graph - waterfall, scatter, heatmap
:return matplotlib figure"""
if style == "waterfall":
mpl.rcParams["legend.fontsize"] = 10
fig = plt.figure()
ax = fig.gca(projection="3d")
for i in range(len(data)):
x = data["om"][i]
z = data["Counts"][i]
yy = [data[sorting_parameter][i]] * len(x)
ax.plot(x, yy, z, label=str("%s = %f" % (sorting_parameter, yy[i])))
ax.legend()
ax.set_xlabel("Omega")
ax.set_ylabel(sorting_parameter)
ax.set_zlabel("counts")
elif style == "scatter":
fig = plt.figure()
plt.errorbar(
data[sorting_parameter],
[data["fit_area"][i].n for i in range(len(data["fit_area"]))],
[data["fit_area"][i].s for i in range(len(data["fit_area"]))],
capsize=5,
ecolor="green",
)
plt.xlabel(str(sorting_parameter))
plt.ylabel("Intesity")
elif style == "heat":
new_om = list()
for i in range(len(data)):
new_om = np.append(new_om, np.around(data["om"][i], 2), axis=0)
unique_om = np.unique(new_om)
color_matrix = np.zeros(shape=(len(data), len(unique_om)))
for i in range(len(data)):
for j in range(len(data["om"][i])):
if np.around(data["om"][i][j], 2) in np.unique(new_om):
color_matrix[i, j] = data["Counts"][i][j]
else:
continue
fig = plt.figure()
plt.pcolormesh(unique_om, data[sorting_parameter], color_matrix, shading="gouraud")
plt.xlabel("omega")
plt.ylabel(sorting_parameter)
plt.colorbar()
plt.clim(color_matrix.mean(), color_matrix.max())
return fig
def save_dict(obj, name):
"""saves dictionary as pickle file in binary format
:arg obj - object to save
:arg name - name of the file
NOTE: path should be added later"""
with open(name + ".pkl", "wb") as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(name):
"""load dictionary from picle file
:arg name - name of the file to load
NOTE: expect the file in the same folder, path should be added later
:return dictionary"""
with open(name + ".pkl", "rb") as f:
return pickle.load(f)
# pickle, mat, h5, txt, csv, json
def save_table(data, filetype, name, path=None):
print("Saving: ", filetype)
path = "" if path is None else path
if filetype == "pickle":
# to work with uncertanities, see uncertanity module
with open(path + name + ".pkl", "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
if filetype == "mat":
# matlab doesent allow some special character to be in var names, also cant start with
# numbers, in need, add some to the romove_character list
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
data = data.drop(columns=["fit_area", "int_area"])
remove_characters = [" ", "[", "]", "{", "}", "(", ")"]
for character in remove_characters:
data.columns = [
data.columns[i].replace(character, "") for i in range(len(data.columns))
]
sio.savemat((path + name + ".mat"), {name: col.values for name, col in data.items()})
if filetype == "csv" or "txt":
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
data = data.drop(columns=["fit_area", "int_area", "om", "Counts"])
if filetype == "csv":
data.to_csv(path + name + ".csv")
if filetype == "txt":
with open((path + name + ".txt"), "w") as outfile:
data.to_string(outfile)
if filetype == "h5":
hdf = pd.HDFStore((path + name + ".h5"))
hdf.put("data", data)
hdf.close()
if filetype == "json":
data.to_json((path + name + ".json"))
def normalize(scan, monitor):
"""Normalizes the measurement to monitor, checks if sigma exists, otherwise creates it
:arg dict : dictionary to from which to tkae the scan
:arg key : which scan to normalize from dict1
:arg monitor : final monitor
:return counts - normalized counts
:return sigma - normalized sigma"""
counts = np.array(scan["Counts"])
sigma = np.sqrt(counts) if "sigma" not in scan else scan["sigma"]
monitor_ratio = monitor / scan["monitor"]
scaled_counts = counts * monitor_ratio
scaled_sigma = np.array(sigma) * monitor_ratio
return scaled_counts, scaled_sigma
def merge(scan1, scan2, keep=True, monitor=100000):
"""merges the two tuples and sorts them, if om value is same, Counts value is average
averaging is propagated into sigma if dict1 == dict2, key[1] is deleted after merging
:arg dict1 : dictionary to which measurement will be merged
:arg dict2 : dictionary from which measurement will be merged
:arg scand_dict_result : result of scan_dict after auto function
:arg keep : if true, when monitors are same, does not change it, if flase, takes monitor
always
:arg monitor : final monitor after merging
note: dict1 and dict2 can be same dict
:return dict1 with merged scan"""
if keep:
if scan1["monitor"] == scan2["monitor"]:
monitor = scan1["monitor"]
# load om and Counts
x1, x2 = scan1["om"], scan2["om"]
cor_y1, y_err1 = normalize(scan1, monitor=monitor)
cor_y2, y_err2 = normalize(scan2, monitor=monitor)
# creates touples (om, Counts, sigma) for sorting and further processing
tuple_list = create_tuples(x1, cor_y1, y_err1) + create_tuples(x2, cor_y2, y_err2)
# Sort the list on om and add 0 0 0 tuple to the last position
sorted_t = sorted(tuple_list, key=lambda tup: tup[0])
sorted_t.append((0, 0, 0))
om, Counts, sigma = [], [], []
seen = list()
for i in range(len(sorted_t) - 1):
if sorted_t[i][0] not in seen:
if sorted_t[i][0] != sorted_t[i + 1][0]:
om = np.append(om, sorted_t[i][0])
Counts = np.append(Counts, sorted_t[i][1])
sigma = np.append(sigma, sorted_t[i][2])
else:
om = np.append(om, sorted_t[i][0])
counts1, counts2 = sorted_t[i][1], sorted_t[i + 1][1]
sigma1, sigma2 = sorted_t[i][2], sorted_t[i + 1][2]
count_err1 = u.ufloat(counts1, sigma1)
count_err2 = u.ufloat(counts2, sigma2)
avg = (count_err1 + count_err2) / 2
Counts = np.append(Counts, avg.n)
sigma = np.append(sigma, avg.s)
seen.append(sorted_t[i][0])
else:
continue
scan1["om"] = om
scan1["Counts"] = Counts
scan1["sigma"] = sigma
scan1["monitor"] = monitor
print("merging done")
def add_dict(dict1, dict2):
"""adds two dictionaries, meta of the new is saved as meata+original_filename and
measurements are shifted to continue with numbering of first dict
:arg dict1 : dictionarry to add to
:arg dict2 : dictionarry from which to take the measurements
:return dict1 : combined dictionary
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
dat file"""
try:
if dict1["meta"]["zebra_mode"] != dict2["meta"]["zebra_mode"]:
print("You are trying to add scans measured with different zebra modes")
return
# this is for the qscan case
except KeyError:
print("Zebra mode not specified")
max_measurement_dict1 = max([keys for keys in dict1["scan"]])
new_filenames = np.arange(
max_measurement_dict1 + 1, max_measurement_dict1 + 1 + len(dict2["scan"])
)
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
if new_meta_name not in dict1:
for keys, name in zip(dict2["scan"], new_filenames):
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
dict1["scan"][name] = dict2["scan"][keys]
dict1[new_meta_name] = dict2["meta"]
else:
raise KeyError(
str(
"The file %s has alredy been added to %s"
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"])
)
)
return dict1
def auto(dict):
"""takes just unique tuples from all tuples in dictionary returend by scan_dict
intendet for automatic merge if you doesent want to specify what scans to merge together
args: dict - dictionary from scan_dict function
:return dict - dict without repetitions"""
for keys in dict:
tuple_list = dict[keys]
new = list()
for i in range(len(tuple_list)):
if tuple_list[0][0] == tuple_list[i][0]:
new.append(tuple_list[i])
dict[keys] = new
return dict
def scan_dict(dict, precision=0.5):
"""scans dictionary for duplicate angles indexes
:arg dict : dictionary to scan
:arg precision : in deg, sometimes angles are zero so its easier this way, instead of
checking zero division
:return dictionary with matching scans, if there are none, the dict is empty
note: can be checked by "not d", true if empty
"""
if dict["meta"]["zebra_mode"] == "bi":
angles = ["twotheta_angle", "omega_angle", "chi_angle", "phi_angle"]
elif dict["meta"]["zebra_mode"] == "nb":
angles = ["gamma_angle", "omega_angle", "nu_angle"]
else:
print("Unknown zebra mode")
return
d = {}
for i in dict["scan"]:
for j in dict["scan"]:
if dict["scan"][i] != dict["scan"][j]:
itup = list()
for k in angles:
itup.append(abs(abs(dict["scan"][i][k]) - abs(dict["scan"][j][k])))
if all(i <= precision for i in itup):
print(itup)
print([dict["scan"][i][k] for k in angles])
print([dict["scan"][j][k] for k in angles])
if str([np.around(dict["scan"][i][k], 0) for k in angles]) not in d:
d[str([np.around(dict["scan"][i][k], 0) for k in angles])] = list()
d[str([np.around(dict["scan"][i][k], 0) for k in angles])].append((i, j))
else:
d[str([np.around(dict["scan"][i][k], 0) for k in angles])].append((i, j))
else:
pass
else:
continue
return d
def _finditem(obj, key):
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
item = _finditem(v, key)
if item is not None:
return item
def most_common(lst):
return max(set(lst), key=lst.count)
def variables(dictionary):
"""Funcrion to guess what variables will be used in the param study
i call pripary variable the one the array like variable, usually omega
and secondary the slicing variable, different for each scan,for example temperature"""
# find all variables that are in all scans
stdev_precision = 0.05
all_vars = list()
for keys in dictionary["scan"]:
all_vars.append([key for key in dictionary["scan"][keys] if key != "params"])
if dictionary["scan"][keys]["params"]:
all_vars.append(key for key in dictionary["scan"][keys]["params"])
all_vars = [i for sublist in all_vars for i in sublist]
# get the ones that are in all scans
b = collections.Counter(all_vars)
inall = [key for key in b if b[key] == len(dictionary["scan"])]
# delete those that are obviously wrong
wrong = [
"NP",
"Counts",
"Monitor1",
"Monitor2",
"Monitor3",
"h_index",
"l_index",
"k_index",
"n_points",
"monitor",
"Time",
"omega_angle",
"twotheta_angle",
"chi_angle",
"phi_angle",
"nu_angle",
]
inall_red = [i for i in inall if i not in wrong]
# check for primary variable, needs to be list, we dont suspect the
# primary variable be as a parameter (be in scan[params])
primary_candidates = list()
for key in dictionary["scan"]:
for i in inall_red:
if isinstance(_finditem(dictionary["scan"][key], i), list):
if np.std(_finditem(dictionary["scan"][key], i)) > stdev_precision:
primary_candidates.append(i)
# check which of the primary are in every scan
primary_candidates = collections.Counter(primary_candidates)
second_round_primary_candidates = [
key for key in primary_candidates if primary_candidates[key] == len(dictionary["scan"])
]
if len(second_round_primary_candidates) == 1:
print("We've got a primary winner!", second_round_primary_candidates)
else:
print("Still not sure with primary:(", second_round_primary_candidates)
# check for secondary variable, we suspect a float\int or not changing array
# we dont need to check for primary ones
secondary_candidates = [i for i in inall_red if i not in second_round_primary_candidates]
# print("secondary candidates", secondary_candidates)
# select arrays and floats and ints
second_round_secondary_candidates = list()
for key in dictionary["scan"]:
for i in secondary_candidates:
if isinstance(_finditem(dictionary["scan"][key], i), float):
second_round_secondary_candidates.append(i)
elif isinstance(_finditem(dictionary["scan"][key], i), int):
second_round_secondary_candidates.append(i)
elif isinstance(_finditem(dictionary["scan"][key], i), list):
if np.std(_finditem(dictionary["scan"][key], i)) < stdev_precision:
second_round_secondary_candidates.append(i)
second_round_secondary_candidates = collections.Counter(second_round_secondary_candidates)
second_round_secondary_candidates = [
key
for key in second_round_secondary_candidates
if second_round_secondary_candidates[key] == len(dictionary["scan"])
]
# print("secondary candidates after second round", second_round_secondary_candidates)
# now we check if they vary between the scans
third_round_sec_candidates = list()
for i in second_round_secondary_candidates:
check_array = list()
for keys in dictionary["scan"]:
check_array.append(np.average(_finditem(dictionary["scan"][keys], i)))
# print(i, check_array, np.std(check_array))
if np.std(check_array) > stdev_precision:
third_round_sec_candidates.append(i)
if len(third_round_sec_candidates) == 1:
print("We've got a secondary winner!", third_round_sec_candidates)
else:
print("Still not sure with secondary :(", third_round_sec_candidates)
return {"primary": second_round_primary_candidates, "secondary": third_round_sec_candidates}

View File

@ -407,24 +407,24 @@ def box_int(file, box):
dat = pyzebra.read_detector_data(file)
sttC = dat["pol_angle"][0]
om = dat["rot_angle"]
nuC = dat["tlt_angle"][0]
sttC = dat["gamma"][0]
om = dat["omega"]
nuC = dat["nu"][0]
ddist = dat["ddist"]
# defining indices
x0, xN, y0, yN, fr0, frN = box
# omega fit
om = dat["rot_angle"][fr0:frN]
om = dat["omega"][fr0:frN]
cnts = np.sum(dat["data"][fr0:frN, y0:yN, x0:xN], axis=(1, 2))
p0 = [1.0, 0.0, 1.0]
coeff, var_matrix = curve_fit(gauss, range(len(cnts)), cnts, p0=p0)
frC = fr0 + coeff[1]
omF = dat["rot_angle"][math.floor(frC)]
omC = dat["rot_angle"][math.ceil(frC)]
omF = dat["omega"][math.floor(frC)]
omC = dat["omega"][math.ceil(frC)]
frStep = frC - math.floor(frC)
omStep = omC - omF
omP = omF + omStep * frStep