138 Commits
0.4.0 ... 0.7.0

Author SHA1 Message Date
b8cf76220c Updating for version 0.7.0 2022-05-23 09:16:20 +02:00
20e43ecce9 Remove temp and mf from parameters match 2022-05-20 14:56:55 +02:00
bcb6f7bd3b Add a workaround for single motor positions 2022-05-20 14:20:01 +02:00
c3b198f63a Adapt 'temp' to new h5 file format 2022-05-20 12:48:54 +02:00
f044e739a0 Adapt counts to new h5 file format 2022-05-06 16:57:06 +02:00
6f83292d69 Temporary resolve issues with new format metadata 2022-05-06 15:54:12 +02:00
7075e0f42b Assume scan_motor to be "om" if it's not present 2022-05-06 15:29:45 +02:00
1d43c86cff Fix UB matrix read 2022-05-06 15:29:09 +02:00
cde2aa1182 Fix letter case for UB matrix metadata 2022-05-06 14:46:37 +02:00
df9607ceb9 Adapt 'wave' to new h5 file format 2022-05-06 14:20:26 +02:00
5c5be065be Adapt 'monitor' and 'omega' to new h5 file format 2022-05-06 12:27:31 +02:00
279dee3935 Temporary fix for new h5 files
Datasets has changed from a single number to arrays
2022-05-06 12:14:27 +02:00
4ebb343afb Support new UB matrix format in ccl files 2022-05-06 11:19:25 +02:00
2810cf3d2b Fix legend when res_flag is True 2022-05-05 19:04:14 +02:00
585d4b3f54 Code refactoring 2022-05-04 20:01:53 +02:00
72fd54d268 Add legend items for propagation vectors 2022-05-04 17:20:24 +02:00
181c359ef9 Add legend items for file entries 2022-05-04 17:10:18 +02:00
00607d94c7 Add a widget for orthogonal cut value 2022-05-03 17:27:45 +02:00
a192648cc4 Add ccl_prepare plotting functionality
* Based on Camilla's notebook
2022-05-03 17:18:53 +02:00
42adda235b Update ccl_prepare plot widgets 2022-04-30 20:12:24 +02:00
e30035350b Generalize search for proposals
Avoid hardcoding specific years in proposal paths
2022-04-28 16:37:54 +02:00
89fffed5d1 Add 2022 data folder 2022-04-28 15:04:18 +02:00
fae90cbeae Initial version of (m)hkl sorting 2022-04-26 16:37:20 +02:00
7b70d30578 Use uploaded cfl filename as a default for outputs 2022-04-21 18:15:33 +02:00
b26d7005c5 Optimize layout 2022-04-21 17:50:54 +02:00
90021428ee Add support for lattiCE and multiple kvect values 2022-04-21 17:50:50 +02:00
bf97af1949 Treat 2theta as gamma in geom files 2022-04-21 12:00:50 +02:00
098314e30d Performance optimization
Use lists as intermediate data structure to avoid lots of numpy array
allocations
2022-04-20 13:15:48 +02:00
49ff319230 Use TextInput for wavelength widget 2022-04-14 16:14:27 +02:00
9b1e396bdb Disabled currently no-op buttons 2022-04-13 10:15:44 +02:00
d72c1d478e Add download option for hkl/mhkl files 2022-04-13 10:12:30 +02:00
fa73271076 Display created hkl and mhkl lists 2022-04-13 09:16:14 +02:00
4d2a8ecad2 Run sxtal_refgen on GO press 2022-04-12 20:12:11 +02:00
6f1fe1a3d8 Correctly strip lines in read_cfl_file 2022-04-12 19:45:45 +02:00
8d4f4a9b50 Add export_cfl_file function 2022-04-12 19:45:00 +02:00
c94f784373 Keep params as nonlocal var 2022-04-12 18:44:23 +02:00
de2a10b507 Keep ang_lims as nonlocal var 2022-04-12 17:01:21 +02:00
e2220760f0 Save geometry in ang_lims 2022-04-12 16:09:22 +02:00
39b2af60ca Allow export of geom file based on template 2022-04-12 15:11:42 +02:00
94df4869d4 Add ability to load cif data 2022-04-12 12:05:42 +02:00
4131eb31e7 Activate cfl file upload 2022-04-11 16:06:19 +02:00
4ee1f6fb70 Ignore comments in geom and cfl files 2022-04-11 16:04:10 +02:00
fbce5de7b9 Handle default cfl file 2022-04-11 15:16:37 +02:00
636badfba8 Handle different number of angles for geoms 2022-04-11 14:15:45 +02:00
77d6f42e0d Activate initially the first geom selection 2022-04-11 12:29:01 +02:00
166259d669 Merge min/max widgets of ang limits 2022-04-11 12:13:21 +02:00
14d65c9590 Update default geometry files 2022-04-11 11:33:35 +02:00
48b001180f Add default values for ranges 2022-04-07 19:48:06 +02:00
21332b3edc Calculate ub matrix via Sxtal_Refgen 2022-04-07 19:13:04 +02:00
421fc726dd Handle wavelength-related widgets 2022-04-07 17:05:09 +02:00
eb3cc99aeb Implement angular limit updates from geom files 2022-04-06 17:51:41 +02:00
d36686177d Add optional --sxtal-refgen-path cli argument 2022-04-06 17:38:46 +02:00
7e2c6ad21f Add basic functions to handle geom files 2022-04-06 17:38:14 +02:00
03ed97e063 Add panel_ccl_prepare
Basic layout only
2022-03-11 17:05:45 +01:00
234fb1f3b6 Update hdf_view with 2D detector data merging 2022-02-14 11:04:56 +01:00
e8ce57b56b Add functions for 2D detector data merging 2022-02-14 11:04:16 +01:00
ac6f67cc53 Reduce scan motor position precision for merging 2022-02-08 15:23:03 +01:00
3234a544de Cast counts to float64 in h5 data
For correct calculation of count_err
2022-02-03 17:15:42 +01:00
4bd6c6760e Fix parse_h5meta 2022-02-03 17:12:48 +01:00
b401d2f459 Fix param_study for an unconverged fit
For #49
2022-02-02 10:42:44 +01:00
6b8d15234b Fix check for reversed ranges upon scan merging 2022-01-28 15:06:28 +01:00
5cfa5c176d Add counts_err, idx and scan_motors to hdf data 2022-01-28 13:47:57 +01:00
bbe7b7d305 Rename data into counts for hdf5 zebra data 2022-01-28 10:08:27 +01:00
4ba08366df Assign twotheta to polar_angle 2022-01-28 09:55:43 +01:00
51c78ad06b Read monitor value in hdf 2022-01-27 17:45:01 +01:00
bcd594fa7e Utility renames and hdf_param_study simplification 2022-01-27 16:37:55 +01:00
10bcabd7f9 Updating for version 0.6.5 2022-01-21 10:25:01 +01:00
bee8263184 Use dist2 in cami for detector distance 2022-01-21 10:24:30 +01:00
98a76ebf63 Updating for version 0.6.4 2022-01-03 17:34:23 +01:00
12ba0b291b Update overview map plotting 2022-01-03 17:33:37 +01:00
a719f10f4f Replace interp2d with griddata 2022-01-03 16:12:48 +01:00
aaff6032c8 Allow single frame in hdf_viewer 2022-01-03 10:25:19 +01:00
3fb3fe573b Handle single frame h5 files 2021-12-23 20:05:58 +01:00
1687337f26 Report in user output if reading h5 file fails 2021-12-23 17:41:15 +01:00
53ceac21aa Suppress RuntimeWarning in interp2d 2021-12-23 15:37:05 +01:00
741a09819c Fix unpacking from enumerate 2021-12-20 13:10:05 +01:00
3c619713d5 Updating for version 0.6.3 2021-12-07 09:49:09 +01:00
3d5a4ed6aa Fix error calculation for 0-count data points
Fix #47
2021-12-01 12:18:24 +01:00
b2129805dc Updating for version 0.6.2 2021-11-22 16:00:44 +01:00
92765b5665 Fix error calculation when merging data
Fix #46
2021-11-22 14:23:50 +01:00
328b71e058 Updating for version 0.6.1 2021-11-19 16:20:19 +01:00
11ab8485bc Avoid crush on failed interp2d
There is often not enough data for 2d interpolation at intermediate data
analysis steps
2021-11-16 18:55:30 +01:00
4734b3e50f Do not export data without a specified parameter 2021-11-15 16:32:16 +01:00
dfeeed284b Updating for version 0.6.0 2021-11-15 09:21:26 +01:00
9adf83ec74 Minor visual tweaks 2021-11-12 17:04:13 +01:00
a299449209 Add ccl_compare panel
Fix #41
2021-11-12 16:47:01 +01:00
45a81aa632 Set chi and phi to None for peaks in nb geometry
Fix #44
2021-11-10 15:59:54 +01:00
3926e8de39 Add gamma column
Fix #43
2021-11-10 14:44:10 +01:00
d2e2a2c7fd Do not reset Parameter on opening new data
Fix #45
2021-11-09 17:49:08 +01:00
3934dcdd07 Update overview plot on parameter change
For #45
2021-11-09 17:12:15 +01:00
4c8037af5c Convenience fix for restoring chain-merged scans 2021-11-09 16:39:12 +01:00
e29b4e7da8 Add Restore and Merge buttons to param study
For #45
2021-11-09 16:38:16 +01:00
7189ee8196 Fix gamma and nu axes
For #44
2021-11-09 15:04:23 +01:00
be8417856a Open hdf file on file selection
For #44
2021-11-09 11:00:47 +01:00
8ba062064a Rename column (twotheta -> 2theta)
For #43
2021-11-08 16:39:03 +01:00
6557b2f3a4 Average counts per scan position upon scan merging
Fix #42
2021-11-08 16:04:03 +01:00
7dcd20198f Do not set area_v to nan if the fit is not good
For #42
2021-11-08 13:52:08 +01:00
13a6ff285a Check for scan motors upon dataset merging 2021-10-22 16:11:24 +02:00
09b6e4fdcf Skip unreadable files
For #41
2021-10-22 15:36:42 +02:00
e7780a2405 Add gamma and nu axes
For #41
2021-10-20 15:27:47 +02:00
e8b85bcea3 Add extra angle columns for scan center
For #41
2021-10-20 10:39:03 +02:00
2482746f14 Fix for FileInput props not updating simultaneously 2021-10-20 09:28:03 +02:00
3986b8173f Use scan_motor instead of "omega" 2021-10-20 09:15:01 +02:00
16966b6e3e Fix export flag change
For #41
2021-10-20 09:12:04 +02:00
e9d3fcc41a Fix lmfit >=1.0.2
Needed for two-dimensional Gaussian model
2021-10-19 18:18:38 +02:00
506d70a913 Add Open New button to panel_hdf_viewer
For #41
2021-10-19 18:07:20 +02:00
fc4e9c12cf Calculate angles in the detector center
For #41
2021-10-19 17:33:19 +02:00
c5faa0a55a Update button labels
For #41
2021-10-19 14:51:35 +02:00
c9922bb0cb Reuse fit_event in panel_hdf_viewer 2021-10-19 14:50:56 +02:00
813270d6f8 Refactor fit_event 2021-10-19 10:59:06 +02:00
cf2f8435e7 Allow debug of external libs 2021-10-18 13:28:12 +02:00
380abfb102 Add support for direct hdf file upload
Fix #38
2021-10-08 16:42:56 +02:00
c8502a3b93 Updating for version 0.5.2 2021-10-05 22:04:36 +02:00
b84fc632aa Restrict DataTable column editing
Setting editor to CellEditor makes the column read-only for user even
if "editable=True" for the entire DataTable
2021-10-05 16:34:15 +02:00
3acd57adb9 Abort fit if there is no data in range 2021-10-05 16:34:05 +02:00
960ce0a534 Updating for version 0.5.1 2021-10-01 16:15:46 +02:00
1d43a952e6 Remove strict channel priority 2021-10-01 16:14:06 +02:00
9f7a7b8bbf Bump bokeh=2.4 2021-10-01 15:54:04 +02:00
8129b5e683 Fix scan_motors renaming 2021-10-01 15:44:22 +02:00
eaa6c4a2ad Correctly merge multiple scans in one 2021-09-30 20:12:19 +02:00
c2be907113 Fix error values calculation
Fix #40
2021-09-29 16:49:43 +02:00
4dae756b3e Add error bars to parameter plot
Fix #39
2021-09-21 14:56:57 +02:00
a77a40618d Add Apply button to proposal selection 2021-09-08 17:17:17 +02:00
a73c34b06f Utility cleanup 2021-09-08 16:07:04 +02:00
4b9f0a8c36 Fix upload data button 2021-09-08 14:58:22 +02:00
9f56921072 Average counts in case of almost identical scans
Fix #37
2021-09-08 14:30:15 +02:00
49a6bd22ae Merge datasets in param_study 2021-09-08 14:05:57 +02:00
5b502b31eb Refactor file reads 2021-08-25 17:18:33 +02:00
20e99c35ba Update export column on scan merge/restore
For #37
2021-08-25 15:13:37 +02:00
abf4750030 Unify proposal id for all tabs
For #36
2021-08-24 18:07:04 +02:00
5de09d16ca Normalize projection images to max value of 1000 2021-08-24 14:30:16 +02:00
5c4362d984 Updating for version 0.5.0 2021-08-24 09:24:27 +02:00
8d065b85a4 Fix filenames for download 2021-08-20 17:00:11 +02:00
c86466b470 Add export to param_study 2021-08-20 16:35:55 +02:00
b8968192ca Enable editing lattice constants
Fix #35
2021-08-19 15:14:48 +02:00
4745f0f401 Minor formatting fix 2021-07-15 09:19:22 +02:00
9f6e7230fa Initial implementation of hdf param study panel 2021-07-15 08:41:24 +02:00
089a0cf5ac Add hdf param study panel (based on hdf viewer) 2021-07-06 16:31:30 +02:00
19 changed files with 3915 additions and 592 deletions

View File

@ -16,7 +16,6 @@ jobs:
run: |
$CONDA/bin/conda install --quiet --yes conda-build anaconda-client
$CONDA/bin/conda config --append channels conda-forge
$CONDA/bin/conda config --set channel_priority strict
$CONDA/bin/conda config --set anaconda_upload yes
- name: Build and upload

1
.vscode/launch.json vendored
View File

@ -8,6 +8,7 @@
"program": "${workspaceFolder}/pyzebra/app/cli.py",
"console": "internalConsole",
"env": {},
"justMyCode": false,
},
]
}

View File

@ -22,9 +22,9 @@ requirements:
- numpy
- scipy
- h5py
- bokeh =2.3
- bokeh =2.4
- numba
- lmfit
- lmfit >=1.0.2
about:

View File

@ -1,11 +1,9 @@
from pyzebra.anatric import *
from pyzebra.ccl_io import *
from pyzebra.h5 import *
from pyzebra.xtal import *
from pyzebra.ccl_process import *
from pyzebra.h5 import *
from pyzebra.utils import *
from pyzebra.xtal import *
from pyzebra.sxtal_refgen import *
ZEBRA_PROPOSALS_PATHS = [
f"/afs/psi.ch/project/sinqdata/{year}/zebra/" for year in (2016, 2017, 2018, 2020, 2021)
]
__version__ = "0.4.0"
__version__ = "0.7.0"

View File

@ -2,16 +2,19 @@ import logging
import sys
from io import StringIO
import pyzebra
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import Tabs, TextAreaInput
from bokeh.models import Button, Panel, Tabs, TextAreaInput, TextInput
import panel_ccl_integrate
import panel_ccl_compare
import panel_hdf_anatric
import panel_hdf_param_study
import panel_hdf_viewer
import panel_param_study
import panel_spind
import panel_ccl_prepare
doc = curdoc()
@ -25,16 +28,46 @@ bokeh_logger = logging.getLogger("bokeh")
bokeh_logger.addHandler(bokeh_handler)
bokeh_log_textareainput = TextAreaInput(title="server output:", height=150)
# Final layout
tab_hdf_viewer = panel_hdf_viewer.create()
tab_hdf_anatric = panel_hdf_anatric.create()
tab_ccl_integrate = panel_ccl_integrate.create()
tab_param_study = panel_param_study.create()
tab_spind = panel_spind.create()
def proposal_textinput_callback(_attr, _old, _new):
apply_button.disabled = False
proposal_textinput = TextInput(title="Proposal number:", name="")
proposal_textinput.on_change("value_input", proposal_textinput_callback)
doc.proposal_textinput = proposal_textinput
def apply_button_callback():
proposal = proposal_textinput.value.strip()
if proposal:
try:
proposal_path = pyzebra.find_proposal_path(proposal)
except ValueError as e:
print(e)
return
apply_button.disabled = True
else:
proposal_path = ""
proposal_textinput.name = proposal_path
apply_button = Button(label="Apply", button_type="primary")
apply_button.on_click(apply_button_callback)
# Final layout
doc.add_root(
column(
Tabs(tabs=[tab_hdf_viewer, tab_hdf_anatric, tab_ccl_integrate, tab_param_study, tab_spind]),
Tabs(
tabs=[
Panel(child=column(proposal_textinput, apply_button), title="user config"),
panel_hdf_viewer.create(),
panel_hdf_anatric.create(),
panel_ccl_prepare.create(),
panel_ccl_integrate.create(),
panel_ccl_compare.create(),
panel_param_study.create(),
panel_hdf_param_study.create(),
panel_spind.create(),
]
),
row(stdout_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
)
)

View File

@ -6,7 +6,7 @@ from bokeh.application.application import Application
from bokeh.application.handlers import ScriptHandler
from bokeh.server.server import Server
from pyzebra.anatric import ANATRIC_PATH
from pyzebra import ANATRIC_PATH, SXTAL_REFGEN_PATH
from pyzebra.app.handler import PyzebraHandler
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
@ -42,6 +42,13 @@ def main():
"--anatric-path", type=str, default=ANATRIC_PATH, help="path to anatric executable",
)
parser.add_argument(
"--sxtal-refgen-path",
type=str,
default=SXTAL_REFGEN_PATH,
help="path to Sxtal_Refgen executable",
)
parser.add_argument(
"--spind-path", type=str, default=None, help="path to spind scripts folder",
)

View File

@ -0,0 +1,718 @@
import base64
import io
import os
import tempfile
import types
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import (
BasicTicker,
Button,
CellEditor,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
CustomJS,
DataRange1d,
DataTable,
Div,
Dropdown,
FileInput,
Grid,
Legend,
Line,
LinearAxis,
MultiLine,
MultiSelect,
NumberEditor,
Panel,
PanTool,
Plot,
RadioGroup,
ResetTool,
Scatter,
Select,
Spacer,
Span,
Spinner,
TableColumn,
TextAreaInput,
WheelZoomTool,
Whisker,
)
import pyzebra
from pyzebra.ccl_io import EXPORT_TARGETS
from pyzebra.ccl_process import AREA_METHODS
javaScript = """
let j = 0;
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
setTimeout(function() {
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i] + js_data.data['ext'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
def create():
doc = curdoc()
dataset1 = []
dataset2 = []
fit_params = {}
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""], ext=["", ""]))
def file_select_update_for_proposal():
proposal_path = proposal_textinput.name
if proposal_path:
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
file_open_button.disabled = False
else:
file_select.options = []
file_open_button.disabled = True
doc.add_periodic_callback(file_select_update_for_proposal, 5000)
def proposal_textinput_callback(_attr, _old, _new):
file_select_update_for_proposal()
proposal_textinput = doc.proposal_textinput
proposal_textinput.on_change("name", proposal_textinput_callback)
def _init_datatable():
# dataset2 should have the same metadata as dataset1
scan_list = [s["idx"] for s in dataset1]
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in dataset1]
export = [s["export"] for s in dataset1]
twotheta = [np.median(s["twotheta"]) if "twotheta" in s else None for s in dataset1]
gamma = [np.median(s["gamma"]) if "gamma" in s else None for s in dataset1]
omega = [np.median(s["omega"]) if "omega" in s else None for s in dataset1]
chi = [np.median(s["chi"]) if "chi" in s else None for s in dataset1]
phi = [np.median(s["phi"]) if "phi" in s else None for s in dataset1]
nu = [np.median(s["nu"]) if "nu" in s else None for s in dataset1]
scan_table_source.data.update(
scan=scan_list,
hkl=hkl,
fit=[0] * len(scan_list),
export=export,
twotheta=twotheta,
gamma=gamma,
omega=omega,
chi=chi,
phi=phi,
nu=nu,
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
merge_from_select.options = merge_options
merge_from_select.value = merge_options[0][0]
file_select = MultiSelect(title="Select 2 .ccl files:", width=210, height=250)
def file_open_button_callback():
if len(file_select.value) != 2:
print("WARNING: Select exactly 2 .ccl files.")
return
new_data1 = []
new_data2 = []
for ind, f_path in enumerate(file_select.value):
with open(f_path) as file:
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
return
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data)
if ind == 0:
js_data.data.update(fname=[base, base])
new_data1 = file_data
else: # ind = 1
new_data2 = file_data
# ignore extra scans at the end of the longest of the two files
min_len = min(len(new_data1), len(new_data2))
new_data1 = new_data1[:min_len]
new_data2 = new_data2[:min_len]
nonlocal dataset1, dataset2
dataset1 = new_data1
dataset2 = new_data2
_init_datatable()
file_open_button = Button(label="Open New", width=100, disabled=True)
file_open_button.on_click(file_open_button_callback)
def upload_button_callback(_attr, _old, _new):
if len(upload_button.filename) != 2:
print("WARNING: Upload exactly 2 .ccl files.")
return
new_data1 = []
new_data2 = []
for ind, (f_str, f_name) in enumerate(zip(upload_button.value, upload_button.filename)):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
return
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_duplicates(file_data)
if ind == 0:
js_data.data.update(fname=[base, base])
new_data1 = file_data
else: # ind = 1
new_data2 = file_data
# ignore extra scans at the end of the longest of the two files
min_len = min(len(new_data1), len(new_data2))
new_data1 = new_data1[:min_len]
new_data2 = new_data2[:min_len]
nonlocal dataset1, dataset2
dataset1 = new_data1
dataset2 = new_data2
_init_datatable()
upload_div = Div(text="or upload 2 .ccl files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".ccl", multiple=True, width=200)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
upload_button.on_change("filename", upload_button_callback)
def monitor_spinner_callback(_attr, old, new):
if dataset1 and dataset2:
pyzebra.normalize_dataset(dataset1, new)
pyzebra.normalize_dataset(dataset2, new)
_update_plot()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def _update_table():
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset1]
export = [scan["export"] for scan in dataset1]
scan_table_source.data.update(fit=fit_ok, export=export)
def _update_plot():
plot_scatter_source = [plot_scatter1_source, plot_scatter2_source]
plot_fit_source = [plot_fit1_source, plot_fit2_source]
plot_bkg_source = [plot_bkg1_source, plot_bkg2_source]
plot_peak_source = [plot_peak1_source, plot_peak2_source]
fit_output = ""
for ind, scan in enumerate(_get_selected_scan()):
scatter_source = plot_scatter_source[ind]
fit_source = plot_fit_source[ind]
bkg_source = plot_bkg_source[ind]
peak_source = plot_peak_source[ind]
scan_motor = scan["scan_motor"]
y = scan["counts"]
y_err = scan["counts_err"]
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
fit = scan.get("fit")
if fit is not None:
x_fit = np.linspace(x[0], x[-1], 100)
fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
x_bkg = []
y_bkg = []
xs_peak = []
ys_peak = []
comps = fit.eval_components(x=x_fit)
for i, model in enumerate(fit_params):
if "linear" in model:
x_bkg = x_fit
y_bkg = comps[f"f{i}_"]
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
xs_peak.append(x_fit)
ys_peak.append(comps[f"f{i}_"])
bkg_source.data.update(x=x_bkg, y=y_bkg)
peak_source.data.update(xs=xs_peak, ys=ys_peak)
if fit_output:
fit_output = fit_output + "\n\n"
fit_output = fit_output + fit.fit_report()
else:
fit_source.data.update(x=[], y=[])
bkg_source.data.update(x=[], y=[])
peak_source.data.update(xs=[], ys=[])
fit_output_textinput.value = fit_output
# Main plot
plot = Plot(
x_range=DataRange1d(),
y_range=DataRange1d(only_visible=True),
plot_height=470,
plot_width=700,
)
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
plot_scatter1_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot_scatter1 = plot.add_glyph(
plot_scatter1_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
)
plot.add_layout(
Whisker(source=plot_scatter1_source, base="x", upper="y_upper", lower="y_lower")
)
plot_scatter2_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot_scatter2 = plot.add_glyph(
plot_scatter2_source, Scatter(x="x", y="y", line_color="firebrick", fill_color="firebrick")
)
plot.add_layout(
Whisker(source=plot_scatter2_source, base="x", upper="y_upper", lower="y_lower")
)
plot_fit1_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit1 = plot.add_glyph(plot_fit1_source, Line(x="x", y="y"))
plot_fit2_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_fit2 = plot.add_glyph(plot_fit2_source, Line(x="x", y="y"))
plot_bkg1_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_bkg1 = plot.add_glyph(
plot_bkg1_source, Line(x="x", y="y", line_color="steelblue", line_dash="dashed")
)
plot_bkg2_source = ColumnDataSource(dict(x=[0], y=[0]))
plot_bkg2 = plot.add_glyph(
plot_bkg2_source, Line(x="x", y="y", line_color="firebrick", line_dash="dashed")
)
plot_peak1_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot_peak1 = plot.add_glyph(
plot_peak1_source, MultiLine(xs="xs", ys="ys", line_color="steelblue", line_dash="dashed")
)
plot_peak2_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
plot_peak2 = plot.add_glyph(
plot_peak2_source, MultiLine(xs="xs", ys="ys", line_color="firebrick", line_dash="dashed")
)
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_from_span)
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
plot.add_layout(fit_to_span)
plot.add_layout(
Legend(
items=[
("data 1", [plot_scatter1]),
("data 2", [plot_scatter2]),
("best fit 1", [plot_fit1]),
("best fit 2", [plot_fit2]),
("peak 1", [plot_peak1]),
("peak 2", [plot_peak2]),
("linear 1", [plot_bkg1]),
("linear 2", [plot_bkg2]),
],
location="top_left",
click_policy="hide",
)
)
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
plot.toolbar.logo = None
# Scan select
def scan_table_select_callback(_attr, old, new):
if not new:
# skip empty selections
return
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
scan_table_source.selected.indices = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
_update_plot()
def scan_table_source_callback(_attr, _old, new):
# unfortunately, we don't know if the change comes from data update or user input
# also `old` and `new` are the same for non-scalars
for scan1, scan2, export in zip(dataset1, dataset2, new["export"]):
scan1["export"] = export
scan2["export"] = export
_update_preview()
scan_table_source = ColumnDataSource(
dict(
scan=[],
hkl=[],
fit=[],
export=[],
twotheta=[],
gamma=[],
omega=[],
chi=[],
phi=[],
nu=[],
)
)
scan_table_source.on_change("data", scan_table_source_callback)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="scan", title="Scan", editor=CellEditor(), width=50),
TableColumn(field="hkl", title="hkl", editor=CellEditor(), width=100),
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
TableColumn(field="twotheta", title="2theta", editor=CellEditor(), width=50),
TableColumn(field="gamma", title="gamma", editor=CellEditor(), width=50),
TableColumn(field="omega", title="omega", editor=CellEditor(), width=50),
TableColumn(field="chi", title="chi", editor=CellEditor(), width=50),
TableColumn(field="phi", title="phi", editor=CellEditor(), width=50),
TableColumn(field="nu", title="nu", editor=CellEditor(), width=50),
],
width=310, # +60 because of the index column, but excluding twotheta onwards
height=350,
autosize_mode="none",
editable=True,
)
def _get_selected_scan():
ind = scan_table_source.selected.indices[0]
return dataset1[ind], dataset2[ind]
merge_from_select = Select(title="scan:", width=145)
def merge_button_callback():
scan_into1, scan_into2 = _get_selected_scan()
scan_from1 = dataset1[int(merge_from_select.value)]
scan_from2 = dataset2[int(merge_from_select.value)]
if scan_into1 is scan_from1:
print("WARNING: Selected scans for merging are identical")
return
pyzebra.merge_scans(scan_into1, scan_from1)
pyzebra.merge_scans(scan_into2, scan_from2)
_update_table()
_update_plot()
merge_button = Button(label="Merge into current", width=145)
merge_button.on_click(merge_button_callback)
def restore_button_callback():
scan1, scan2 = _get_selected_scan()
pyzebra.restore_scan(scan1)
pyzebra.restore_scan(scan2)
_update_table()
_update_plot()
restore_button = Button(label="Restore scan", width=145)
restore_button.on_click(restore_button_callback)
def fit_from_spinner_callback(_attr, _old, new):
fit_from_span.location = new
fit_from_spinner = Spinner(title="Fit from:", width=145)
fit_from_spinner.on_change("value", fit_from_spinner_callback)
def fit_to_spinner_callback(_attr, _old, new):
fit_to_span.location = new
fit_to_spinner = Spinner(title="to:", width=145)
fit_to_spinner.on_change("value", fit_to_spinner_callback)
def fitparams_add_dropdown_callback(click):
# bokeh requires (str, str) for MultiSelect options
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
fitparams_select.options.append((new_tag, click.item))
fit_params[new_tag] = fitparams_factory(click.item)
fitparams_select.tags[0] += 1
fitparams_add_dropdown = Dropdown(
label="Add fit function",
menu=[
("Linear", "linear"),
("Gaussian", "gaussian"),
("Voigt", "voigt"),
("Pseudo Voigt", "pvoigt"),
# ("Pseudo Voigt1", "pseudovoigt1"),
],
width=145,
)
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
def fitparams_select_callback(_attr, old, new):
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
fitparams_select.value = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
if new:
fitparams_table_source.data.update(fit_params[new[0]])
else:
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_select = MultiSelect(options=[], height=120, width=145)
fitparams_select.tags = [0]
fitparams_select.on_change("value", fitparams_select_callback)
def fitparams_remove_button_callback():
if fitparams_select.value:
sel_tag = fitparams_select.value[0]
del fit_params[sel_tag]
for elem in fitparams_select.options:
if elem[0] == sel_tag:
fitparams_select.options.remove(elem)
break
fitparams_select.value = []
fitparams_remove_button = Button(label="Remove fit function", width=145)
fitparams_remove_button.on_click(fitparams_remove_button_callback)
def fitparams_factory(function):
if function == "linear":
params = ["slope", "intercept"]
elif function == "gaussian":
params = ["amplitude", "center", "sigma"]
elif function == "voigt":
params = ["amplitude", "center", "sigma", "gamma"]
elif function == "pvoigt":
params = ["amplitude", "center", "sigma", "fraction"]
elif function == "pseudovoigt1":
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
else:
raise ValueError("Unknown fit function")
n = len(params)
fitparams = dict(
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
)
if function == "linear":
fitparams["value"] = [0, 1]
fitparams["vary"] = [False, True]
fitparams["min"] = [None, 0]
elif function == "gaussian":
fitparams["min"] = [0, None, None]
return fitparams
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter", editor=CellEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
TableColumn(field="max", title="Max", editor=NumberEditor()),
],
height=200,
width=350,
index_position=None,
editable=True,
auto_edit=True,
)
# start with `background` and `gauss` fit functions added
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
fitparams_select.value = ["gaussian-1"] # add selection to gauss
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def proc_all_button_callback():
for scan in [*dataset1, *dataset2]:
if scan["export"]:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
pyzebra.get_area(
scan,
area_method=AREA_METHODS[area_method_radiobutton.active],
lorentz=lorentz_checkbox.active,
)
_update_plot()
_update_table()
proc_all_button = Button(label="Process All", button_type="primary", width=145)
proc_all_button.on_click(proc_all_button_callback)
def proc_button_callback():
for scan in _get_selected_scan():
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
pyzebra.get_area(
scan,
area_method=AREA_METHODS[area_method_radiobutton.active],
lorentz=lorentz_checkbox.active,
)
_update_plot()
_update_table()
proc_button = Button(label="Process Current", width=145)
proc_button.on_click(proc_button_callback)
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
area_method_radiobutton = RadioGroup(labels=["Function", "Area"], active=0, width=145)
intensity_diff_div = Div(text="Intensity difference:", margin=(5, 5, 0, 5))
intensity_diff_radiobutton = RadioGroup(
labels=["file1 - file2", "file2 - file1"], active=0, width=145
)
lorentz_checkbox = CheckboxGroup(labels=["Lorentz Correction"], width=145, margin=(13, 5, 5, 5))
export_preview_textinput = TextAreaInput(title="Export file(s) preview:", width=500, height=400)
def _update_preview():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data1 = []
export_data2 = []
for scan1, scan2 in zip(dataset1, dataset2):
if scan1["export"]:
export_data1.append(scan1)
export_data2.append(scan2)
if intensity_diff_radiobutton.active:
export_data1, export_data2 = export_data2, export_data1
pyzebra.export_ccl_compare(
export_data1,
export_data2,
temp_file,
export_target_select.value,
hkl_precision=int(hkl_precision_select.value),
)
exported_content = ""
file_content = []
for ext in EXPORT_TARGETS[export_target_select.value]:
fname = temp_file + ext
if os.path.isfile(fname):
with open(fname) as f:
content = f.read()
exported_content += f"{ext} file:\n" + content
else:
content = ""
file_content.append(content)
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
def export_target_select_callback(_attr, _old, new):
js_data.data.update(ext=EXPORT_TARGETS[new])
_update_preview()
export_target_select = Select(
title="Export target:", options=list(EXPORT_TARGETS.keys()), value="fullprof", width=80
)
export_target_select.on_change("value", export_target_select_callback)
js_data.data.update(ext=EXPORT_TARGETS[export_target_select.value])
def hkl_precision_select_callback(_attr, _old, _new):
_update_preview()
hkl_precision_select = Select(
title="hkl precision:", options=["2", "3", "4"], value="2", width=80
)
hkl_precision_select.on_change("value", hkl_precision_select_callback)
save_button = Button(label="Download File(s)", button_type="success", width=200)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
fitpeak_controls = row(
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
fitparams_table,
Spacer(width=20),
column(
fit_from_spinner,
lorentz_checkbox,
area_method_div,
area_method_radiobutton,
intensity_diff_div,
intensity_diff_radiobutton,
),
column(fit_to_spinner, proc_button, proc_all_button),
)
scan_layout = column(
scan_table,
row(monitor_spinner, column(Spacer(height=19), restore_button)),
row(column(Spacer(height=19), merge_button), merge_from_select),
)
import_layout = column(file_select, file_open_button, upload_div, upload_button)
export_layout = column(
export_preview_textinput,
row(
export_target_select, hkl_precision_select, column(Spacer(height=19), row(save_button))
),
)
tab_layout = column(
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
row(fitpeak_controls, fit_output_textinput),
)
return Panel(child=tab_layout, title="ccl compare")

View File

@ -10,6 +10,7 @@ from bokeh.layouts import column, row
from bokeh.models import (
BasicTicker,
Button,
CellEditor,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
@ -38,7 +39,6 @@ from bokeh.models import (
Spinner,
TableColumn,
TextAreaInput,
TextInput,
WheelZoomTool,
Whisker,
)
@ -72,48 +72,56 @@ for (let i = 0; i < js_data.data['fname'].length; i++) {
def create():
doc = curdoc()
det_data = {}
dataset = []
fit_params = {}
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""], ext=["", ""]))
def file_select_update_for_proposal():
proposal = proposal_textinput.value.strip()
if not proposal:
proposal_path = proposal_textinput.name
if proposal_path:
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
file_open_button.disabled = False
file_append_button.disabled = False
else:
file_select.options = []
file_open_button.disabled = True
file_append_button.disabled = True
return
for zebra_proposals_path in pyzebra.ZEBRA_PROPOSALS_PATHS:
proposal_path = os.path.join(zebra_proposals_path, proposal)
if os.path.isdir(proposal_path):
# found it
break
else:
raise ValueError(f"Can not find data for proposal '{proposal}'.")
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
file_open_button.disabled = False
file_append_button.disabled = False
doc.add_periodic_callback(file_select_update_for_proposal, 5000)
def proposal_textinput_callback(_attr, _old, _new):
file_select_update_for_proposal()
proposal_textinput = TextInput(title="Proposal number:", width=210)
proposal_textinput.on_change("value", proposal_textinput_callback)
proposal_textinput = doc.proposal_textinput
proposal_textinput.on_change("name", proposal_textinput_callback)
def _init_datatable():
scan_list = [s["idx"] for s in det_data]
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in det_data]
export = [s.get("active", True) for s in det_data]
scan_list = [s["idx"] for s in dataset]
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in dataset]
export = [s["export"] for s in dataset]
twotheta = [np.median(s["twotheta"]) if "twotheta" in s else None for s in dataset]
gamma = [np.median(s["gamma"]) if "gamma" in s else None for s in dataset]
omega = [np.median(s["omega"]) if "omega" in s else None for s in dataset]
chi = [np.median(s["chi"]) if "chi" in s else None for s in dataset]
phi = [np.median(s["phi"]) if "phi" in s else None for s in dataset]
nu = [np.median(s["nu"]) if "nu" in s else None for s in dataset]
scan_table_source.data.update(
scan=scan_list, hkl=hkl, fit=[0] * len(scan_list), export=export,
scan=scan_list,
hkl=hkl,
fit=[0] * len(scan_list),
export=export,
twotheta=twotheta,
gamma=gamma,
omega=omega,
chi=chi,
phi=phi,
nu=nu,
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
@ -125,100 +133,134 @@ def create():
file_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
def file_open_button_callback():
nonlocal det_data
det_data = []
for f_name in file_select.value:
with open(f_name) as file:
nonlocal dataset
new_data = []
for f_path in file_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
pyzebra.merge_duplicates(det_data)
js_data.data.update(fname=[base, base])
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
_init_datatable()
append_upload_button.disabled = False
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data)
js_data.data.update(fname=[base, base])
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
append_upload_button.disabled = False
file_open_button = Button(label="Open New", width=100, disabled=True)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
for f_name in file_select.value:
with open(f_name) as file:
file_data = []
for f_path in file_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
_init_datatable()
if file_data:
_init_datatable()
file_append_button = Button(label="Append", width=100, disabled=True)
file_append_button.on_click(file_append_button_callback)
def upload_button_callback(_attr, _old, new):
nonlocal det_data
det_data = []
proposal_textinput.value = ""
for f_str, f_name in zip(new, upload_button.filename):
def upload_button_callback(_attr, _old, _new):
nonlocal dataset
new_data = []
for f_str, f_name in zip(upload_button.value, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
pyzebra.merge_duplicates(det_data)
js_data.data.update(fname=[base, base])
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
_init_datatable()
append_upload_button.disabled = False
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data)
js_data.data.update(fname=[base, base])
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
append_upload_button.disabled = False
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
upload_button.on_change("value", upload_button_callback)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
upload_button.on_change("filename", upload_button_callback)
def append_upload_button_callback(_attr, _old, new):
for f_str, f_name in zip(new, append_upload_button.filename):
def append_upload_button_callback(_attr, _old, _new):
file_data = []
for f_str, f_name in zip(append_upload_button.value, append_upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
pyzebra.merge_datasets(det_data, append_data)
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
_init_datatable()
if file_data:
_init_datatable()
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200, disabled=True)
append_upload_button.on_change("value", append_upload_button_callback)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
append_upload_button.on_change("filename", append_upload_button_callback)
def monitor_spinner_callback(_attr, old, new):
if det_data:
pyzebra.normalize_dataset(det_data, new)
_update_plot(_get_selected_scan())
if dataset:
pyzebra.normalize_dataset(dataset, new)
_update_plot()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def _update_table():
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
scan_table_source.data.update(fit=fit_ok)
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset]
export = [scan["export"] for scan in dataset]
scan_table_source.data.update(fit=fit_ok, export=export)
def _update_plot(scan):
def _update_plot():
scan = _get_selected_scan()
scan_motor = scan["scan_motor"]
y = scan["counts"]
y_err = scan["counts_err"]
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
plot_scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
fit = scan.get("fit")
if fit is not None:
@ -266,7 +308,7 @@ def create():
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot_scatter = plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
)
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
@ -321,52 +363,76 @@ def create():
# skip unnecessary update caused by selection drop
return
_update_plot(det_data[new[0]])
_update_plot()
def scan_table_source_callback(_attr, _old, _new):
def scan_table_source_callback(_attr, _old, new):
# unfortunately, we don't know if the change comes from data update or user input
# also `old` and `new` are the same for non-scalars
for scan, export in zip(dataset, new["export"]):
scan["export"] = export
_update_preview()
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], fit=[], export=[]))
scan_table_source = ColumnDataSource(
dict(
scan=[],
hkl=[],
fit=[],
export=[],
twotheta=[],
gamma=[],
omega=[],
chi=[],
phi=[],
nu=[],
)
)
scan_table_source.on_change("data", scan_table_source_callback)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="scan", title="Scan", width=50),
TableColumn(field="hkl", title="hkl", width=100),
TableColumn(field="fit", title="Fit", width=50),
TableColumn(field="scan", title="Scan", editor=CellEditor(), width=50),
TableColumn(field="hkl", title="hkl", editor=CellEditor(), width=100),
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
TableColumn(field="twotheta", title="2theta", editor=CellEditor(), width=50),
TableColumn(field="gamma", title="gamma", editor=CellEditor(), width=50),
TableColumn(field="omega", title="omega", editor=CellEditor(), width=50),
TableColumn(field="chi", title="chi", editor=CellEditor(), width=50),
TableColumn(field="phi", title="phi", editor=CellEditor(), width=50),
TableColumn(field="nu", title="nu", editor=CellEditor(), width=50),
],
width=310, # +60 because of the index column
width=310, # +60 because of the index column, but excluding twotheta onwards
height=350,
autosize_mode="none",
editable=True,
)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
def _get_selected_scan():
return det_data[scan_table_source.selected.indices[0]]
return dataset[scan_table_source.selected.indices[0]]
merge_from_select = Select(title="scan:", width=145)
def merge_button_callback():
scan_into = _get_selected_scan()
scan_from = det_data[int(merge_from_select.value)]
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
print("WARNING: Selected scans for merging are identical")
return
pyzebra.merge_scans(scan_into, scan_from)
_update_plot(_get_selected_scan())
_update_table()
_update_plot()
merge_button = Button(label="Merge into current", width=145)
merge_button.on_click(merge_button_callback)
def restore_button_callback():
pyzebra.restore_scan(_get_selected_scan())
_update_plot(_get_selected_scan())
_update_table()
_update_plot()
restore_button = Button(label="Restore scan", width=145)
restore_button.on_click(restore_button_callback)
@ -470,7 +536,7 @@ def create():
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter"),
TableColumn(field="param", title="Parameter", editor=CellEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
@ -491,8 +557,8 @@ def create():
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def proc_all_button_callback():
for scan, export in zip(det_data, scan_table_source.data["export"]):
if export:
for scan in dataset:
if scan["export"]:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
@ -502,7 +568,7 @@ def create():
lorentz=lorentz_checkbox.active,
)
_update_plot(_get_selected_scan())
_update_plot()
_update_table()
proc_all_button = Button(label="Process All", button_type="primary", width=145)
@ -519,7 +585,7 @@ def create():
lorentz=lorentz_checkbox.active,
)
_update_plot(scan)
_update_plot()
_update_table()
proc_button = Button(label="Process Current", width=145)
@ -536,9 +602,9 @@ def create():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = []
for s, export in zip(det_data, scan_table_source.data["export"]):
if export:
export_data.append(s)
for scan in dataset:
if scan["export"]:
export_data.append(scan)
pyzebra.export_1D(
export_data,
@ -598,7 +664,6 @@ def create():
)
import_layout = column(
proposal_textinput,
file_select,
row(file_open_button, file_append_button),
upload_div,

View File

@ -0,0 +1,745 @@
import base64
import io
import os
import subprocess
import tempfile
import numpy as np
from bokeh.layouts import column, row
from bokeh.models import (
Arrow,
BoxZoomTool,
Button,
CheckboxGroup,
ColumnDataSource,
CustomJS,
Div,
Ellipse,
FileInput,
Legend,
LegendItem,
LinearAxis,
MultiLine,
MultiSelect,
NormalHead,
NumericInput,
Panel,
PanTool,
Plot,
RadioGroup,
Range1d,
ResetTool,
Scatter,
Select,
Spacer,
Spinner,
Text,
TextAreaInput,
TextInput,
WheelZoomTool,
)
from bokeh.palettes import Dark2
import pyzebra
javaScript = """
let j = 0;
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
setTimeout(function() {
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
ANG_CHUNK_DEFAULTS = {"2theta": 30, "gamma": 30, "omega": 30, "chi": 35, "phi": 35, "nu": 10}
SORT_OPT_BI = ["2theta", "chi", "phi", "omega"]
SORT_OPT_NB = ["gamma", "nu", "omega"]
def create():
ang_lims = None
cif_data = None
params = None
res_files = {}
js_data = ColumnDataSource(data=dict(content=[""], fname=[""]))
anglim_div = Div(text="Angular min/max limits:", margin=(5, 5, 0, 5))
sttgamma_ti = TextInput(title="stt/gamma", width=100)
omega_ti = TextInput(title="omega", width=100)
chinu_ti = TextInput(title="chi/nu", width=100)
phi_ti = TextInput(title="phi", width=100)
def _update_ang_lims(ang_lims):
sttgamma_ti.value = " ".join(ang_lims["gamma"][:2])
omega_ti.value = " ".join(ang_lims["omega"][:2])
if ang_lims["geom"] == "nb":
chinu_ti.value = " ".join(ang_lims["nu"][:2])
phi_ti.value = ""
else: # ang_lims["geom"] == "bi"
chinu_ti.value = " ".join(ang_lims["chi"][:2])
phi_ti.value = " ".join(ang_lims["phi"][:2])
def _update_params(params):
if "WAVE" in params:
wavelen_input.value = params["WAVE"]
if "SPGR" in params:
cryst_space_group.value = params["SPGR"]
if "CELL" in params:
cryst_cell.value = params["CELL"]
if "UBMAT" in params:
ub_matrix.value = " ".join(params["UBMAT"])
if "HLIM" in params:
ranges_hkl.value = params["HLIM"]
if "SRANG" in params:
ranges_srang.value = params["SRANG"]
if "lattiCE" in params:
magstruct_lattice.value = params["lattiCE"]
if "kvect" in params:
magstruct_kvec.value = params["kvect"]
def open_geom_callback(_attr, _old, new):
nonlocal ang_lims
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
ang_lims = pyzebra.read_geom_file(fileobj)
_update_ang_lims(ang_lims)
open_geom_div = Div(text="Open GEOM:")
open_geom = FileInput(accept=".geom", width=200)
open_geom.on_change("value", open_geom_callback)
def open_cfl_callback(_attr, _old, new):
nonlocal params
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
params = pyzebra.read_cfl_file(fileobj)
_update_params(params)
open_cfl_div = Div(text="Open CFL:")
open_cfl = FileInput(accept=".cfl", width=200)
open_cfl.on_change("value", open_cfl_callback)
def open_cif_callback(_attr, _old, new):
nonlocal cif_data
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
cif_data = pyzebra.read_cif_file(fileobj)
_update_params(cif_data)
open_cif_div = Div(text="Open CIF:")
open_cif = FileInput(accept=".cif", width=200)
open_cif.on_change("value", open_cif_callback)
wavelen_div = Div(text="Wavelength:", margin=(5, 5, 0, 5))
wavelen_input = TextInput(title="value", width=70)
def wavelen_select_callback(_attr, _old, new):
if new:
wavelen_input.value = new
else:
wavelen_input.value = ""
wavelen_select = Select(
title="preset", options=["", "0.788", "1.178", "1.383", "2.305"], width=70
)
wavelen_select.on_change("value", wavelen_select_callback)
cryst_div = Div(text="Crystal structure:", margin=(5, 5, 0, 5))
cryst_space_group = TextInput(title="space group", width=100)
cryst_cell = TextInput(title="cell", width=250)
def ub_matrix_calc_callback():
params = dict()
params["SPGR"] = cryst_space_group.value
params["CELL"] = cryst_cell.value
ub = pyzebra.calc_ub_matrix(params)
ub_matrix.value = " ".join(ub)
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
ub_matrix_calc.on_click(ub_matrix_calc_callback)
ub_matrix = TextInput(title="\u200B", width=600)
ranges_div = Div(text="Ranges:", margin=(5, 5, 0, 5))
ranges_hkl = TextInput(title="HKL", value="-25 25 -25 25 -25 25", width=250)
ranges_srang = TextInput(title="sin(​θ​)/λ", value="0.0 0.7", width=100)
magstruct_div = Div(text="Magnetic structure:", margin=(5, 5, 0, 5))
magstruct_lattice = TextInput(title="lattice", width=100)
magstruct_kvec = TextAreaInput(title="k vector", width=150)
def sorting0_callback(_attr, _old, new):
sorting_0_dt.value = ANG_CHUNK_DEFAULTS[new]
def sorting1_callback(_attr, _old, new):
sorting_1_dt.value = ANG_CHUNK_DEFAULTS[new]
def sorting2_callback(_attr, _old, new):
sorting_2_dt.value = ANG_CHUNK_DEFAULTS[new]
sorting_0 = Select(title="1st", width=100)
sorting_0.on_change("value", sorting0_callback)
sorting_0_dt = NumericInput(title="Δ", width=70)
sorting_1 = Select(title="2nd", width=100)
sorting_1.on_change("value", sorting1_callback)
sorting_1_dt = NumericInput(title="Δ", width=70)
sorting_2 = Select(title="3rd", width=100)
sorting_2.on_change("value", sorting2_callback)
sorting_2_dt = NumericInput(title="Δ", width=70)
def geom_radiogroup_callback(_attr, _old, new):
nonlocal ang_lims, params
if new == 0:
geom_file = pyzebra.get_zebraBI_default_geom_file()
sort_opt = SORT_OPT_BI
else:
geom_file = pyzebra.get_zebraNB_default_geom_file()
sort_opt = SORT_OPT_NB
cfl_file = pyzebra.get_zebra_default_cfl_file()
ang_lims = pyzebra.read_geom_file(geom_file)
_update_ang_lims(ang_lims)
params = pyzebra.read_cfl_file(cfl_file)
_update_params(params)
sorting_0.options = sorting_1.options = sorting_2.options = sort_opt
sorting_0.value = sort_opt[0]
sorting_1.value = sort_opt[1]
sorting_2.value = sort_opt[2]
geom_radiogroup_div = Div(text="Geometry:", margin=(5, 5, 0, 5))
geom_radiogroup = RadioGroup(labels=["bisecting", "normal beam"], width=150)
geom_radiogroup.on_change("active", geom_radiogroup_callback)
geom_radiogroup.active = 0
def go_button_callback():
ang_lims["gamma"][0], ang_lims["gamma"][1] = sttgamma_ti.value.strip().split()
ang_lims["omega"][0], ang_lims["omega"][1] = omega_ti.value.strip().split()
if ang_lims["geom"] == "nb":
ang_lims["nu"][0], ang_lims["nu"][1] = chinu_ti.value.strip().split()
else: # ang_lims["geom"] == "bi"
ang_lims["chi"][0], ang_lims["chi"][1] = chinu_ti.value.strip().split()
ang_lims["phi"][0], ang_lims["phi"][1] = phi_ti.value.strip().split()
if cif_data:
params.update(cif_data)
params["WAVE"] = wavelen_input.value
params["SPGR"] = cryst_space_group.value
params["CELL"] = cryst_cell.value
params["UBMAT"] = ub_matrix.value.split()
params["HLIM"] = ranges_hkl.value
params["SRANG"] = ranges_srang.value
params["lattiCE"] = magstruct_lattice.value
kvects = magstruct_kvec.value.split("\n")
with tempfile.TemporaryDirectory() as temp_dir:
geom_path = os.path.join(temp_dir, "zebra.geom")
if open_geom.value:
geom_template = io.StringIO(base64.b64decode(open_geom.value).decode())
else:
geom_template = None
pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
print(f"Content of {geom_path}:")
with open(geom_path) as f:
print(f.read())
priority = [sorting_0.value, sorting_1.value, sorting_2.value]
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
if geom_radiogroup.active == 0:
sort_hkl_file = pyzebra.sort_hkl_file_bi
priority.extend(set(SORT_OPT_BI) - set(priority))
else:
sort_hkl_file = pyzebra.sort_hkl_file_nb
# run sxtal_refgen for each kvect provided
for i, kvect in enumerate(kvects, start=1):
params["kvect"] = kvect
if open_cfl.filename:
base_fname = f"{os.path.splitext(open_cfl.filename)[0]}_{i}"
else:
base_fname = f"zebra_{i}"
cfl_path = os.path.join(temp_dir, base_fname + ".cfl")
if open_cfl.value:
cfl_template = io.StringIO(base64.b64decode(open_cfl.value).decode())
else:
cfl_template = None
pyzebra.export_cfl_file(cfl_path, params, cfl_template)
print(f"Content of {cfl_path}:")
with open(cfl_path) as f:
print(f.read())
comp_proc = subprocess.run(
[pyzebra.SXTAL_REFGEN_PATH, cfl_path],
cwd=temp_dir,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
if i == 1: # all hkl files are identical, so keep only one
hkl_fname = base_fname + ".hkl"
hkl_fpath = os.path.join(temp_dir, hkl_fname)
with open(hkl_fpath) as f:
res_files[hkl_fname] = f.read()
hkl_fname_sorted = base_fname + "_sorted.hkl"
hkl_fpath_sorted = os.path.join(temp_dir, hkl_fname_sorted)
sort_hkl_file(hkl_fpath, hkl_fpath_sorted, priority, chunks)
with open(hkl_fpath_sorted) as f:
res_files[hkl_fname_sorted] = f.read()
mhkl_fname = base_fname + ".mhkl"
mhkl_fpath = os.path.join(temp_dir, mhkl_fname)
with open(mhkl_fpath) as f:
res_files[mhkl_fname] = f.read()
mhkl_fname_sorted = base_fname + "_sorted.mhkl"
mhkl_fpath_sorted = os.path.join(temp_dir, hkl_fname_sorted)
sort_hkl_file(mhkl_fpath, mhkl_fpath_sorted, priority, chunks)
with open(mhkl_fpath_sorted) as f:
res_files[mhkl_fname_sorted] = f.read()
created_lists.options = list(res_files)
go_button = Button(label="GO", button_type="primary", width=50)
go_button.on_click(go_button_callback)
def created_lists_callback(_attr, _old, new):
sel_file = new[0]
file_text = res_files[sel_file]
preview_lists.value = file_text
js_data.data.update(content=[file_text], fname=[sel_file])
created_lists = MultiSelect(title="Created lists:", width=200, height=150)
created_lists.on_change("value", created_lists_callback)
preview_lists = TextAreaInput(title="Preview selected list:", width=600, height=150)
download_file = Button(label="Download file", button_type="success", width=200)
download_file.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
plot_list = Button(label="Plot selected list", button_type="primary", width=200, disabled=True)
measured_data_div = Div(text="Measured data:")
measured_data = FileInput(accept=".ccl", multiple=True, width=200)
min_grid_x = -10
max_grid_x = 10
min_grid_y = -5
max_grid_y = 5
cmap = Dark2[8]
syms = ["circle", "inverted_triangle", "square", "diamond", "star", "triangle"]
# Define resolution function
def _res_fun(stt, wave, res_mult):
expr = np.tan(stt / 2 * np.pi / 180)
fwhm = np.sqrt(0.4639 * expr ** 2 - 0.4452 * expr + 0.1506) * res_mult # res in deg
return fwhm
def plot_file_callback():
orth_dir = list(map(float, hkl_normal.value.split()))
cut_tol = hkl_delta.value
cut_or = hkl_cut.value
x_dir = list(map(float, hkl_in_plane_x.value.split()))
y_dir = list(map(float, hkl_in_plane_y.value.split()))
k = np.array(k_vectors.value.split()).astype(float).reshape(3, 3)
tol_k = 0.1
# Plotting options
grid_flag = 1
grid_minor_flag = 1
grid_div = 2 # Number of minor division lines per unit
# different symbols based on file number
file_flag = 0 in disting_opt_cb.active
# scale marker size according to intensity
intensity_flag = 1 in disting_opt_cb.active
# use color to mark different propagation vectors
prop_legend_flag = 2 in disting_opt_cb.active
# use resolution ellipsis
res_flag = disting_opt_rb.active
# multiplier for resolution function (in case of samples with large mosaicity)
res_mult = res_mult_ni.value
md_fnames = measured_data.filename
md_fdata = measured_data.value
# Load first data cile, read angles and define matrices to perform conversion to cartesian coordinates and back
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
_, ext = os.path.splitext(md_fnames[0])
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {md_fnames[0]}")
return
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
beta = file_data[0]["beta_cell"] * np.pi / 180.0
gamma = file_data[0]["gamma_cell"] * np.pi / 180.0
# reciprocal angle parameters
alpha_star = np.arccos(
(np.cos(beta) * np.cos(gamma) - np.cos(alpha)) / (np.sin(beta) * np.sin(gamma))
)
beta_star = np.arccos(
(np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
)
gamma_star = np.arccos(
(np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
)
# conversion matrix:
M = np.array(
[
[1, np.cos(gamma_star), np.cos(beta_star)],
[0, np.sin(gamma_star), -np.sin(beta_star) * np.cos(alpha)],
[0, 0, np.sin(beta_star) * np.sin(alpha)],
]
)
M_inv = np.linalg.inv(M)
# Calculate in-plane y-direction
x_c = M @ x_dir
y_c = M @ y_dir
o_c = M @ orth_dir
# Normalize all directions
y_c = y_c / np.linalg.norm(y_c)
x_c = x_c / np.linalg.norm(x_c)
o_c = o_c / np.linalg.norm(o_c)
# Read all data
hkl_coord = []
intensity_vec = []
k_flag_vec = []
file_flag_vec = []
res_vec_x = []
res_vec_y = []
res_N = 10
for j in range(len(md_fnames)):
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
_, ext = os.path.splitext(md_fnames[j])
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {md_fnames[j]}")
return
# Loop throguh all data
for scan in file_data:
om = scan["omega"]
gammad = scan["twotheta"]
chi = scan["chi"]
phi = scan["phi"]
nud = 0 # 1d detector
ub = scan["ub"]
ddist = float(scan["detectorDistance"])
counts = scan["counts"]
mon = scan["monitor"]
# Determine wavelength from mcvl value (is wavelength stored anywhere???)
mcvl = scan["mcvl"]
if mcvl == 2.2:
wave = 1.178
elif mcvl == 7.0:
wave = 1.383
else:
wave = 2.3
# Calculate resolution in degrees
res = _res_fun(gammad, wave, res_mult)
# convert to resolution in hkl along scan line
ang2hkl_1d = pyzebra.ang2hkl_1d
res_x = []
res_y = []
for _om in np.linspace(om[0], om[-1], num=res_N):
expr1 = ang2hkl_1d(wave, ddist, gammad, _om + res / 2, chi, phi, nud, ub)
expr2 = ang2hkl_1d(wave, ddist, gammad, _om - res / 2, chi, phi, nud, ub)
hkl_temp = M @ (np.abs(expr1 - expr2) / 2)
res_x.append(hkl_temp[0])
res_y.append(hkl_temp[1])
# Get first and final hkl
hkl1 = ang2hkl_1d(wave, ddist, gammad, om[0], chi, phi, nud, ub)
hkl2 = ang2hkl_1d(wave, ddist, gammad, om[-1], chi, phi, nud, ub)
# Get hkl at best intensity
hkl_m = ang2hkl_1d(wave, ddist, gammad, om[np.argmax(counts)], chi, phi, nud, ub)
# Estimate intensity for marker size scaling
y1 = counts[0]
y2 = counts[-1]
x1 = om[0]
x2 = om[-1]
a = (y1 - y2) / (x1 - x2)
b = y1 - a * x1
intensity_exp = np.sum(counts - (a * om + b))
c = int(intensity_exp / mon * 10000)
# Recognize k_flag_vec
min_hkl_m = np.minimum(1 - hkl_m % 1, hkl_m % 1)
for j2, _k in enumerate(k):
if all(np.abs(min_hkl_m - _k) < tol_k):
k_flag_vec.append(j2)
break
else:
k_flag_vec.append(len(k))
# Save data
hkl_coord.append([hkl1, hkl2, hkl_m])
intensity_vec.append(c)
file_flag_vec.append(j)
res_vec_x.append(res_x)
res_vec_y.append(res_y)
plot.x_range.start = plot.x_range.reset_start = -2
plot.x_range.end = plot.x_range.reset_end = 5
plot.y_range.start = plot.y_range.reset_start = -4
plot.y_range.end = plot.y_range.reset_end = 3.5
xs, ys = [], []
xs_minor, ys_minor = [], []
if grid_flag:
for yy in np.arange(min_grid_y, max_grid_y, 1):
hkl1 = M @ [0, yy, 0]
xs.append([min_grid_y, max_grid_y])
ys.append([hkl1[1], hkl1[1]])
for xx in np.arange(min_grid_x, max_grid_x, 1):
hkl1 = M @ [xx, min_grid_x, 0]
hkl2 = M @ [xx, max_grid_x, 0]
xs.append([hkl1[0], hkl2[0]])
ys.append([hkl1[1], hkl2[1]])
if grid_minor_flag:
for yy in np.arange(min_grid_y, max_grid_y, 1 / grid_div):
hkl1 = M @ [0, yy, 0]
xs_minor.append([min_grid_y, max_grid_y])
ys_minor.append([hkl1[1], hkl1[1]])
for xx in np.arange(min_grid_x, max_grid_x, 1 / grid_div):
hkl1 = M @ [xx, min_grid_x, 0]
hkl2 = M @ [xx, max_grid_x, 0]
xs_minor.append([hkl1[0], hkl2[0]])
ys_minor.append([hkl1[1], hkl2[1]])
grid_source.data.update(xs=xs, ys=ys)
minor_grid_source.data.update(xs=xs_minor, ys=ys_minor)
el_x, el_y, el_w, el_h, el_c = [], [], [], [], []
scan_xs, scan_ys, scan_x, scan_y = [], [], [], []
scan_m, scan_s, scan_c, scan_l = [], [], [], []
for j in range(len(hkl_coord)):
# Get middle hkl from list
hklm = M @ hkl_coord[j][2]
# Decide if point is in the cut
proj = np.dot(hklm, o_c)
if abs(proj - cut_or) >= cut_tol:
continue
hkl1 = M @ hkl_coord[j][0]
hkl2 = M @ hkl_coord[j][1]
if intensity_flag:
markersize = max(1, int(intensity_vec[j] / max(intensity_vec) * 20))
else:
markersize = 4
if file_flag:
plot_symbol = syms[file_flag_vec[j]]
else:
plot_symbol = "circle"
if prop_legend_flag:
col_value = cmap[k_flag_vec[j]]
else:
col_value = "black"
if res_flag:
# Generate series of ellipses along scan line
el_x.extend(np.linspace(hkl1[0], hkl2[0], num=res_N))
el_y.extend(np.linspace(hkl1[1], hkl2[1], num=res_N))
el_w.extend(np.array(res_vec_x[j]) * 2)
el_h.extend(np.array(res_vec_y[j]) * 2)
el_c.extend([col_value] * res_N)
else:
# Plot scan line
scan_xs.append([hkl1[0], hkl2[0]])
scan_ys.append([hkl1[1], hkl2[1]])
# Plot middle point of scan
scan_x.append(hklm[0])
scan_y.append(hklm[1])
scan_m.append(plot_symbol)
scan_s.append(markersize)
# Color and legend label
scan_c.append(col_value)
scan_l.append(md_fnames[file_flag_vec[j]])
ellipse_source.data.update(x=el_x, y=el_y, w=el_w, h=el_h, c=el_c)
scan_source.data.update(
xs=scan_xs, ys=scan_ys, x=scan_x, y=scan_y, m=scan_m, s=scan_s, c=scan_c, l=scan_l,
)
arrow1.visible = True
arrow1.x_end = x_c[0]
arrow1.y_end = x_c[1]
arrow2.visible = True
arrow2.x_end = y_c[0]
arrow2.y_end = y_c[1]
kvect_source.data.update(
text_x=[x_c[0] / 2, y_c[0] / 2 - 0.1],
text_y=[x_c[1] - 0.1, y_c[1] / 2],
text=["h", "k"],
)
# Legend items for different file entries (symbol)
legend_items = []
if not res_flag and file_flag:
labels, inds = np.unique(scan_source.data["l"], return_index=True)
for label, ind in zip(labels, inds):
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
# Legend items for propagation vector (color)
if prop_legend_flag:
if res_flag:
source, render = ellipse_source, ellipse
else:
source, render = scan_source, mline
labels, inds = np.unique(source.data["c"], return_index=True)
for label, ind in zip(labels, inds):
label = f"k={k[cmap.index(label)]}"
legend_items.append(LegendItem(label=label, renderers=[render], index=ind))
plot.legend.items = legend_items
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
plot_file.on_click(plot_file_callback)
plot = Plot(x_range=Range1d(), y_range=Range1d(), plot_height=450, plot_width=600)
plot.add_tools(PanTool(), WheelZoomTool(), BoxZoomTool(), ResetTool())
plot.toolbar.logo = None
plot.add_layout(LinearAxis(), place="left")
plot.add_layout(LinearAxis(), place="below")
arrow1 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10), visible=False)
plot.add_layout(arrow1)
arrow2 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10), visible=False)
plot.add_layout(arrow2)
kvect_source = ColumnDataSource(dict(text_x=[], text_y=[], text=[]))
plot.add_glyph(kvect_source, Text(x="text_x", y="text_y", text="text"))
grid_source = ColumnDataSource(dict(xs=[], ys=[]))
minor_grid_source = ColumnDataSource(dict(xs=[], ys=[]))
plot.add_glyph(grid_source, MultiLine(xs="xs", ys="ys", line_color="gray"))
plot.add_glyph(
minor_grid_source, MultiLine(xs="xs", ys="ys", line_color="gray", line_dash="dotted")
)
ellipse_source = ColumnDataSource(dict(x=[], y=[], w=[], h=[], c=[]))
ellipse = plot.add_glyph(
ellipse_source, Ellipse(x="x", y="y", width="w", height="h", fill_color="c", line_color="c")
)
scan_source = ColumnDataSource(dict(xs=[], ys=[], x=[], y=[], m=[], s=[], c=[], l=[]))
mline = plot.add_glyph(scan_source, MultiLine(xs="xs", ys="ys", line_color="c"))
scatter = plot.add_glyph(
scan_source, Scatter(x="x", y="y", marker="m", size="s", fill_color="c", line_color="c")
)
plot.add_layout(Legend(items=[], location="top_left", click_policy="hide"))
hkl_div = Div(text="HKL:", margin=(5, 5, 0, 5))
hkl_normal = TextInput(title="normal", value="0 0 1", width=70)
hkl_cut = Spinner(title="cut", value=0, step=0.1, width=70)
hkl_delta = NumericInput(title="delta", value=0.1, mode="float", width=70)
hkl_in_plane_x = TextInput(title="in-plane X", value="1 0 0", width=70)
hkl_in_plane_y = TextInput(title="in-plane Y", value="0 1 0", width=70)
disting_opt_div = Div(text="Distinguish options:", margin=(5, 5, 0, 5))
disting_opt_cb = CheckboxGroup(
labels=["files (symbols)", "intensities (size)", "k vectors nucl/magn (colors)"],
active=[0, 1, 2],
width=200,
)
disting_opt_rb = RadioGroup(
labels=["scan direction", "resolution ellipsoid"], active=0, width=200
)
k_vectors = TextAreaInput(
title="k vectors:", value="0.0 0.0 0.0\n0.5 0.0 0.0\n0.5 0.5 0.0", width=150,
)
res_mult_ni = NumericInput(title="Resolution mult:", value=10, mode="int", width=100)
fileinput_layout = row(open_cfl_div, open_cfl, open_cif_div, open_cif, open_geom_div, open_geom)
geom_layout = column(geom_radiogroup_div, geom_radiogroup)
wavelen_layout = column(wavelen_div, row(wavelen_select, wavelen_input))
anglim_layout = column(anglim_div, row(sttgamma_ti, omega_ti, chinu_ti, phi_ti))
cryst_layout = column(cryst_div, row(cryst_space_group, cryst_cell))
ubmat_layout = row(column(Spacer(height=18), ub_matrix_calc), ub_matrix)
ranges_layout = column(ranges_div, row(ranges_hkl, ranges_srang))
magstruct_layout = column(magstruct_div, row(magstruct_lattice, magstruct_kvec))
sorting_layout = row(
sorting_0,
sorting_0_dt,
Spacer(width=30),
sorting_1,
sorting_1_dt,
Spacer(width=30),
sorting_2,
sorting_2_dt,
)
column1_layout = column(
fileinput_layout,
Spacer(height=10),
row(geom_layout, wavelen_layout, Spacer(width=50), anglim_layout),
cryst_layout,
ubmat_layout,
row(ranges_layout, Spacer(width=50), magstruct_layout),
row(sorting_layout, Spacer(width=30), column(Spacer(height=18), go_button)),
row(created_lists, preview_lists),
row(download_file, plot_list),
)
hkl_layout = column(
hkl_div,
row(hkl_normal, hkl_cut, hkl_delta, Spacer(width=10), hkl_in_plane_x, hkl_in_plane_y),
)
disting_layout = column(disting_opt_div, row(disting_opt_cb, disting_opt_rb))
column2_layout = column(
row(measured_data_div, measured_data, plot_file),
plot,
row(hkl_layout, k_vectors),
row(disting_layout, res_mult_ni),
)
tab_layout = row(column1_layout, column2_layout)
return Panel(child=tab_layout, title="ccl prepare")

View File

@ -0,0 +1,591 @@
import base64
import io
import os
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import (
BasicTicker,
BoxZoomTool,
Button,
CellEditor,
CheckboxGroup,
ColumnDataSource,
DataRange1d,
DataTable,
Div,
FileInput,
Grid,
MultiSelect,
NumberEditor,
NumberFormatter,
Image,
LinearAxis,
LinearColorMapper,
Panel,
PanTool,
Plot,
Range1d,
ResetTool,
Scatter,
Select,
Spinner,
TableColumn,
Tabs,
Title,
WheelZoomTool,
)
from bokeh.palettes import Cividis256, Greys256, Plasma256 # pylint: disable=E0611
import pyzebra
IMAGE_W = 256
IMAGE_H = 128
IMAGE_PLOT_W = int(IMAGE_W * 2) + 52
IMAGE_PLOT_H = int(IMAGE_H * 2) + 27
def create():
doc = curdoc()
dataset = []
cami_meta = {}
num_formatter = NumberFormatter(format="0.00", nan_format="")
def file_select_update():
if data_source.value == "proposal number":
proposal_path = proposal_textinput.name
if proposal_path:
file_list = []
for file in os.listdir(proposal_path):
if file.endswith(".hdf"):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
else:
file_select.options = []
else: # "cami file"
if not cami_meta:
file_select.options = []
return
file_list = cami_meta["filelist"]
file_select.options = [(entry, os.path.basename(entry)) for entry in file_list]
def data_source_callback(_attr, _old, _new):
file_select_update()
data_source = Select(
title="Data Source:",
value="proposal number",
options=["proposal number", "cami file"],
width=210,
)
data_source.on_change("value", data_source_callback)
doc.add_periodic_callback(file_select_update, 5000)
def proposal_textinput_callback(_attr, _old, _new):
file_select_update()
proposal_textinput = doc.proposal_textinput
proposal_textinput.on_change("name", proposal_textinput_callback)
def upload_button_callback(_attr, _old, new):
nonlocal cami_meta
with io.StringIO(base64.b64decode(new).decode()) as file:
cami_meta = pyzebra.parse_h5meta(file)
data_source.value = "cami file"
file_select_update()
upload_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".cami", width=200)
upload_button.on_change("value", upload_button_callback)
file_select = MultiSelect(title="Available .hdf files:", width=210, height=320)
def _init_datatable():
file_list = []
for scan in dataset:
file_list.append(os.path.basename(scan["original_filename"]))
scan_table_source.data.update(
file=file_list,
param=[None] * len(dataset),
frame=[None] * len(dataset),
x_pos=[None] * len(dataset),
y_pos=[None] * len(dataset),
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
param_select.value = "user defined"
def _update_table():
frame = []
x_pos = []
y_pos = []
for scan in dataset:
if "fit" in scan:
framei = scan["fit"]["frame"]
x_posi = scan["fit"]["x_pos"]
y_posi = scan["fit"]["y_pos"]
else:
framei = x_posi = y_posi = None
frame.append(framei)
x_pos.append(x_posi)
y_pos.append(y_posi)
scan_table_source.data.update(frame=frame, x_pos=x_pos, y_pos=y_pos)
def _file_open():
new_data = []
for f_name in file_select.value:
try:
new_data.append(pyzebra.read_detector_data(f_name))
except KeyError:
print("Could not read data from the file.")
return
dataset.extend(new_data)
_init_datatable()
def file_open_button_callback():
nonlocal dataset
dataset = []
_file_open()
file_open_button = Button(label="Open New", width=100)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
_file_open()
file_append_button = Button(label="Append", width=100)
file_append_button.on_click(file_append_button_callback)
# Scan select
def scan_table_select_callback(_attr, old, new):
if not new:
# skip empty selections
return
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
scan_table_source.selected.indices = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
scan = dataset[new[0]]
zebra_mode = scan["zebra_mode"]
if zebra_mode == "nb":
metadata_table_source.data.update(geom=["normal beam"])
else: # zebra_mode == "bi"
metadata_table_source.data.update(geom=["bisecting"])
if "mf" in scan:
metadata_table_source.data.update(mf=[scan["mf"][0]])
else:
metadata_table_source.data.update(mf=[None])
if "temp" in scan:
metadata_table_source.data.update(temp=[scan["temp"][0]])
else:
metadata_table_source.data.update(temp=[None])
update_overview_plot()
def scan_table_source_callback(_attr, _old, _new):
pass
scan_table_source = ColumnDataSource(dict(file=[], param=[], frame=[], x_pos=[], y_pos=[]))
scan_table_source.selected.on_change("indices", scan_table_select_callback)
scan_table_source.on_change("data", scan_table_source_callback)
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="file", title="file", editor=CellEditor(), width=150),
TableColumn(
field="param",
title="param",
formatter=num_formatter,
editor=NumberEditor(),
width=50,
),
TableColumn(
field="frame", title="Frame", formatter=num_formatter, editor=CellEditor(), width=70
),
TableColumn(
field="x_pos", title="X", formatter=num_formatter, editor=CellEditor(), width=70
),
TableColumn(
field="y_pos", title="Y", formatter=num_formatter, editor=CellEditor(), width=70
),
],
width=470, # +60 because of the index column
height=420,
editable=True,
autosize_mode="none",
)
def _get_selected_scan():
return dataset[scan_table_source.selected.indices[0]]
def param_select_callback(_attr, _old, new):
if new == "user defined":
param = [None] * len(dataset)
else:
# TODO: which value to take?
param = [scan[new][0] for scan in dataset]
scan_table_source.data["param"] = param
_update_param_plot()
param_select = Select(
title="Parameter:",
options=["user defined", "temp", "mf", "h", "k", "l"],
value="user defined",
width=145,
)
param_select.on_change("value", param_select_callback)
def update_overview_plot():
scan = _get_selected_scan()
counts = scan["counts"]
n_im, n_y, n_x = counts.shape
overview_x = np.mean(counts, axis=1)
overview_y = np.mean(counts, axis=2)
# normalize for simpler colormapping
overview_max_val = max(np.max(overview_x), np.max(overview_y))
overview_x = 1000 * overview_x / overview_max_val
overview_y = 1000 * overview_y / overview_max_val
overview_plot_x_image_source.data.update(image=[overview_x], dw=[n_x], dh=[n_im])
overview_plot_y_image_source.data.update(image=[overview_y], dw=[n_y], dh=[n_im])
if proj_auto_checkbox.active:
im_min = min(np.min(overview_x), np.min(overview_y))
im_max = max(np.max(overview_x), np.max(overview_y))
proj_display_min_spinner.value = im_min
proj_display_max_spinner.value = im_max
overview_plot_x_image_glyph.color_mapper.low = im_min
overview_plot_y_image_glyph.color_mapper.low = im_min
overview_plot_x_image_glyph.color_mapper.high = im_max
overview_plot_y_image_glyph.color_mapper.high = im_max
frame_range.start = 0
frame_range.end = n_im
frame_range.reset_start = 0
frame_range.reset_end = n_im
frame_range.bounds = (0, n_im)
scan_motor = scan["scan_motor"]
overview_plot_y.axis[1].axis_label = f"Scanning motor, {scan_motor}"
var = scan[scan_motor]
var_start = var[0]
var_end = var[-1] + (var[-1] - var[0]) / (n_im - 1)
scanning_motor_range.start = var_start
scanning_motor_range.end = var_end
scanning_motor_range.reset_start = var_start
scanning_motor_range.reset_end = var_end
# handle both, ascending and descending sequences
scanning_motor_range.bounds = (min(var_start, var_end), max(var_start, var_end))
# shared frame ranges
frame_range = Range1d(0, 1, bounds=(0, 1))
scanning_motor_range = Range1d(0, 1, bounds=(0, 1))
det_x_range = Range1d(0, IMAGE_W, bounds=(0, IMAGE_W))
overview_plot_x = Plot(
title=Title(text="Projections on X-axis"),
x_range=det_x_range,
y_range=frame_range,
extra_y_ranges={"scanning_motor": scanning_motor_range},
plot_height=400,
plot_width=IMAGE_PLOT_W - 3,
)
# ---- tools
wheelzoomtool = WheelZoomTool(maintain_focus=False)
overview_plot_x.toolbar.logo = None
overview_plot_x.add_tools(
PanTool(), BoxZoomTool(), wheelzoomtool, ResetTool(),
)
overview_plot_x.toolbar.active_scroll = wheelzoomtool
# ---- axes
overview_plot_x.add_layout(LinearAxis(axis_label="Coordinate X, pix"), place="below")
overview_plot_x.add_layout(
LinearAxis(axis_label="Frame", major_label_orientation="vertical"), place="left"
)
# ---- grid lines
overview_plot_x.add_layout(Grid(dimension=0, ticker=BasicTicker()))
overview_plot_x.add_layout(Grid(dimension=1, ticker=BasicTicker()))
# ---- rgba image glyph
overview_plot_x_image_source = ColumnDataSource(
dict(image=[np.zeros((1, 1), dtype="float32")], x=[0], y=[0], dw=[IMAGE_W], dh=[1])
)
overview_plot_x_image_glyph = Image(image="image", x="x", y="y", dw="dw", dh="dh")
overview_plot_x.add_glyph(
overview_plot_x_image_source, overview_plot_x_image_glyph, name="image_glyph"
)
det_y_range = Range1d(0, IMAGE_H, bounds=(0, IMAGE_H))
overview_plot_y = Plot(
title=Title(text="Projections on Y-axis"),
x_range=det_y_range,
y_range=frame_range,
extra_y_ranges={"scanning_motor": scanning_motor_range},
plot_height=400,
plot_width=IMAGE_PLOT_H + 22,
)
# ---- tools
wheelzoomtool = WheelZoomTool(maintain_focus=False)
overview_plot_y.toolbar.logo = None
overview_plot_y.add_tools(
PanTool(), BoxZoomTool(), wheelzoomtool, ResetTool(),
)
overview_plot_y.toolbar.active_scroll = wheelzoomtool
# ---- axes
overview_plot_y.add_layout(LinearAxis(axis_label="Coordinate Y, pix"), place="below")
overview_plot_y.add_layout(
LinearAxis(
y_range_name="scanning_motor",
axis_label="Scanning motor",
major_label_orientation="vertical",
),
place="right",
)
# ---- grid lines
overview_plot_y.add_layout(Grid(dimension=0, ticker=BasicTicker()))
overview_plot_y.add_layout(Grid(dimension=1, ticker=BasicTicker()))
# ---- rgba image glyph
overview_plot_y_image_source = ColumnDataSource(
dict(image=[np.zeros((1, 1), dtype="float32")], x=[0], y=[0], dw=[IMAGE_H], dh=[1])
)
overview_plot_y_image_glyph = Image(image="image", x="x", y="y", dw="dw", dh="dh")
overview_plot_y.add_glyph(
overview_plot_y_image_source, overview_plot_y_image_glyph, name="image_glyph"
)
cmap_dict = {
"gray": Greys256,
"gray_reversed": Greys256[::-1],
"plasma": Plasma256,
"cividis": Cividis256,
}
def colormap_callback(_attr, _old, new):
overview_plot_x_image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
overview_plot_y_image_glyph.color_mapper = LinearColorMapper(palette=cmap_dict[new])
colormap = Select(title="Colormap:", options=list(cmap_dict.keys()), width=210)
colormap.on_change("value", colormap_callback)
colormap.value = "plasma"
PROJ_STEP = 1
def proj_auto_checkbox_callback(state):
if state:
proj_display_min_spinner.disabled = True
proj_display_max_spinner.disabled = True
else:
proj_display_min_spinner.disabled = False
proj_display_max_spinner.disabled = False
update_overview_plot()
proj_auto_checkbox = CheckboxGroup(
labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
)
proj_auto_checkbox.on_click(proj_auto_checkbox_callback)
def proj_display_max_spinner_callback(_attr, _old_value, new_value):
proj_display_min_spinner.high = new_value - PROJ_STEP
overview_plot_x_image_glyph.color_mapper.high = new_value
overview_plot_y_image_glyph.color_mapper.high = new_value
proj_display_max_spinner = Spinner(
low=0 + PROJ_STEP,
value=1,
step=PROJ_STEP,
disabled=bool(proj_auto_checkbox.active),
width=100,
height=31,
)
proj_display_max_spinner.on_change("value", proj_display_max_spinner_callback)
def proj_display_min_spinner_callback(_attr, _old_value, new_value):
proj_display_max_spinner.low = new_value + PROJ_STEP
overview_plot_x_image_glyph.color_mapper.low = new_value
overview_plot_y_image_glyph.color_mapper.low = new_value
proj_display_min_spinner = Spinner(
low=0,
high=1 - PROJ_STEP,
value=0,
step=PROJ_STEP,
disabled=bool(proj_auto_checkbox.active),
width=100,
height=31,
)
proj_display_min_spinner.on_change("value", proj_display_min_spinner_callback)
metadata_table_source = ColumnDataSource(dict(geom=[""], temp=[None], mf=[None]))
metadata_table = DataTable(
source=metadata_table_source,
columns=[
TableColumn(field="geom", title="Geometry", width=100),
TableColumn(field="temp", title="Temperature", formatter=num_formatter, width=100),
TableColumn(field="mf", title="Magnetic Field", formatter=num_formatter, width=100),
],
width=300,
height=50,
autosize_mode="none",
index_position=None,
)
def _update_param_plot():
x = []
y = []
fit_param = fit_param_select.value
for s, p in zip(dataset, scan_table_source.data["param"]):
if "fit" in s and fit_param:
x.append(p)
y.append(s["fit"][fit_param])
param_plot_scatter_source.data.update(x=x, y=y)
# Parameter plot
param_plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
param_plot.add_layout(LinearAxis(axis_label="Fit parameter"), place="left")
param_plot.add_layout(LinearAxis(axis_label="Parameter"), place="below")
param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[]))
param_plot.add_glyph(param_plot_scatter_source, Scatter(x="x", y="y"))
param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
param_plot.toolbar.logo = None
def fit_param_select_callback(_attr, _old, _new):
_update_param_plot()
fit_param_select = Select(title="Fit parameter", options=[], width=145)
fit_param_select.on_change("value", fit_param_select_callback)
def proc_all_button_callback():
for scan in dataset:
pyzebra.fit_event(
scan,
int(np.floor(frame_range.start)),
int(np.ceil(frame_range.end)),
int(np.floor(det_y_range.start)),
int(np.ceil(det_y_range.end)),
int(np.floor(det_x_range.start)),
int(np.ceil(det_x_range.end)),
)
_update_table()
for scan in dataset:
if "fit" in scan:
options = list(scan["fit"].keys())
fit_param_select.options = options
fit_param_select.value = options[0]
break
_update_param_plot()
proc_all_button = Button(label="Process All", button_type="primary", width=145)
proc_all_button.on_click(proc_all_button_callback)
def proc_button_callback():
scan = _get_selected_scan()
pyzebra.fit_event(
scan,
int(np.floor(frame_range.start)),
int(np.ceil(frame_range.end)),
int(np.floor(det_y_range.start)),
int(np.ceil(det_y_range.end)),
int(np.floor(det_x_range.start)),
int(np.ceil(det_x_range.end)),
)
_update_table()
for scan in dataset:
if "fit" in scan:
options = list(scan["fit"].keys())
fit_param_select.options = options
fit_param_select.value = options[0]
break
_update_param_plot()
proc_button = Button(label="Process Current", width=145)
proc_button.on_click(proc_button_callback)
layout_controls = row(
colormap,
column(proj_auto_checkbox, row(proj_display_min_spinner, proj_display_max_spinner)),
proc_button,
proc_all_button,
)
layout_overview = column(
gridplot(
[[overview_plot_x, overview_plot_y]],
toolbar_options=dict(logo=None),
merge_tools=True,
toolbar_location="left",
),
layout_controls,
)
# Plot tabs
plots = Tabs(
tabs=[
Panel(child=layout_overview, title="single scan"),
Panel(child=column(param_plot, row(fit_param_select)), title="parameter plot"),
]
)
# Final layout
import_layout = column(
data_source,
upload_div,
upload_button,
file_select,
row(file_open_button, file_append_button),
)
scan_layout = column(scan_table, row(param_select, metadata_table))
tab_layout = column(row(import_layout, scan_layout, plots))
return Panel(child=tab_layout, title="hdf param study")

View File

@ -1,6 +1,5 @@
import base64
import io
import math
import os
import numpy as np
@ -12,6 +11,7 @@ from bokeh.models import (
BoxEditTool,
BoxZoomTool,
Button,
CellEditor,
CheckboxGroup,
ColumnDataSource,
DataRange1d,
@ -37,12 +37,11 @@ from bokeh.models import (
Spacer,
Spinner,
TableColumn,
TextInput,
Tabs,
Title,
WheelZoomTool,
)
from bokeh.palettes import Cividis256, Greys256, Plasma256 # pylint: disable=E0611
from scipy.optimize import curve_fit
import pyzebra
@ -54,57 +53,281 @@ IMAGE_PLOT_H = int(IMAGE_H * 2) + 27
def create():
doc = curdoc()
det_data = {}
dataset = []
cami_meta = {}
num_formatter = NumberFormatter(format="0.00", nan_format="")
def file_select_update_for_proposal():
proposal = proposal_textinput.value.strip()
if not proposal:
return
def file_select_update():
if data_source.value == "proposal number":
proposal_path = proposal_textinput.name
if proposal_path:
file_list = []
for file in os.listdir(proposal_path):
if file.endswith(".hdf"):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
else:
file_select.options = []
for zebra_proposals_path in pyzebra.ZEBRA_PROPOSALS_PATHS:
proposal_path = os.path.join(zebra_proposals_path, proposal)
if os.path.isdir(proposal_path):
# found it
break
else:
raise ValueError(f"Can not find data for proposal '{proposal}'.")
else: # "cami file"
if not cami_meta:
file_select.options = []
return
file_list = []
for file in os.listdir(proposal_path):
if file.endswith(".hdf"):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
doc.add_periodic_callback(file_select_update_for_proposal, 5000)
def proposal_textinput_callback(_attr, _old, _new):
nonlocal cami_meta
cami_meta = {}
file_select_update_for_proposal()
proposal_textinput = TextInput(title="Proposal number:", width=210)
proposal_textinput.on_change("value", proposal_textinput_callback)
def upload_button_callback(_attr, _old, new):
nonlocal cami_meta
proposal_textinput.value = ""
with io.StringIO(base64.b64decode(new).decode()) as file:
cami_meta = pyzebra.parse_h5meta(file)
file_list = cami_meta["filelist"]
file_select.options = [(entry, os.path.basename(entry)) for entry in file_list]
upload_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".cami", width=200)
upload_button.on_change("value", upload_button_callback)
def data_source_callback(_attr, _old, _new):
file_select_update()
def update_image(index=None):
data_source = Select(
title="Data Source:",
value="proposal number",
options=["proposal number", "cami file"],
width=210,
)
data_source.on_change("value", data_source_callback)
doc.add_periodic_callback(file_select_update, 5000)
def proposal_textinput_callback(_attr, _old, _new):
file_select_update()
proposal_textinput = doc.proposal_textinput
proposal_textinput.on_change("name", proposal_textinput_callback)
def upload_cami_button_callback(_attr, _old, new):
nonlocal cami_meta
with io.StringIO(base64.b64decode(new).decode()) as file:
cami_meta = pyzebra.parse_h5meta(file)
data_source.value = "cami file"
file_select_update()
upload_cami_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
upload_cami_button = FileInput(accept=".cami", width=200)
upload_cami_button.on_change("value", upload_cami_button_callback)
def upload_hdf_button_callback(_attr, _old, new):
nonlocal dataset
try:
scan = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(new)), None)
except KeyError:
print("Could not read data from the file.")
return
dataset = [scan]
last_im_index = scan["counts"].shape[0] - 1
index_spinner.value = 0
index_spinner.high = last_im_index
if last_im_index == 0:
index_slider.disabled = True
else:
index_slider.disabled = False
index_slider.end = last_im_index
zebra_mode = scan["zebra_mode"]
if zebra_mode == "nb":
metadata_table_source.data.update(geom=["normal beam"])
else: # zebra_mode == "bi"
metadata_table_source.data.update(geom=["bisecting"])
_init_datatable()
upload_hdf_div = Div(text="or upload .hdf file:", margin=(5, 5, 0, 5))
upload_hdf_button = FileInput(accept=".hdf", width=200)
upload_hdf_button.on_change("value", upload_hdf_button_callback)
def file_open_button_callback():
nonlocal dataset
new_data = []
cm = cami_meta if data_source.value == "cami file" else None
for f_path in file_select.value:
f_name = os.path.basename(f_path)
try:
file_data = [pyzebra.read_detector_data(f_path, cm)]
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
file_open_button = Button(label="Open New", width=100)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
file_data = []
for f_path in file_select.value:
f_name = os.path.basename(f_path)
try:
file_data = [pyzebra.read_detector_data(f_path, None)]
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
if file_data:
_init_datatable()
file_append_button = Button(label="Append", width=100)
file_append_button.on_click(file_append_button_callback)
def _init_datatable():
scan_list = [s["idx"] for s in dataset]
export = [s["export"] for s in dataset]
twotheta = [np.median(s["twotheta"]) if "twotheta" in s else None for s in dataset]
gamma = [np.median(s["gamma"]) if "gamma" in s else None for s in dataset]
omega = [np.median(s["omega"]) if "omega" in s else None for s in dataset]
chi = [np.median(s["chi"]) if "chi" in s else None for s in dataset]
phi = [np.median(s["phi"]) if "phi" in s else None for s in dataset]
nu = [np.median(s["nu"]) if "nu" in s else None for s in dataset]
scan_table_source.data.update(
scan=scan_list,
fit=[0] * len(scan_list),
export=export,
twotheta=twotheta,
gamma=gamma,
omega=omega,
chi=chi,
phi=phi,
nu=nu,
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
merge_from_select.options = merge_options
merge_from_select.value = merge_options[0][0]
def scan_table_select_callback(_attr, old, new):
if not new:
# skip empty selections
return
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
scan_table_source.selected.indices = old
return
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
scan = _get_selected_scan()
last_im_index = scan["counts"].shape[0] - 1
index_spinner.value = 0
index_spinner.high = last_im_index
if last_im_index == 0:
index_slider.disabled = True
else:
index_slider.disabled = False
index_slider.end = last_im_index
zebra_mode = scan["zebra_mode"]
if zebra_mode == "nb":
metadata_table_source.data.update(geom=["normal beam"])
else: # zebra_mode == "bi"
metadata_table_source.data.update(geom=["bisecting"])
_update_image()
_update_overview_plot()
def scan_table_source_callback(_attr, _old, new):
# unfortunately, we don't know if the change comes from data update or user input
# also `old` and `new` are the same for non-scalars
for scan, export in zip(dataset, new["export"]):
scan["export"] = export
scan_table_source = ColumnDataSource(
dict(scan=[], fit=[], export=[], twotheta=[], gamma=[], omega=[], chi=[], phi=[], nu=[],)
)
scan_table_source.on_change("data", scan_table_source_callback)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="scan", title="Scan", editor=CellEditor(), width=50),
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
TableColumn(field="export", title="Export", editor=CellEditor(), width=50),
TableColumn(field="twotheta", title="2theta", editor=CellEditor(), width=50),
TableColumn(field="gamma", title="gamma", editor=CellEditor(), width=50),
TableColumn(field="omega", title="omega", editor=CellEditor(), width=50),
TableColumn(field="chi", title="chi", editor=CellEditor(), width=50),
TableColumn(field="phi", title="phi", editor=CellEditor(), width=50),
TableColumn(field="nu", title="nu", editor=CellEditor(), width=50),
],
width=310, # +60 because of the index column, but excluding twotheta onwards
height=350,
autosize_mode="none",
editable=True,
)
def _get_selected_scan():
return dataset[scan_table_source.selected.indices[0]]
def _update_table():
export = [scan["export"] for scan in dataset]
scan_table_source.data.update(export=export)
def monitor_spinner_callback(_attr, old, new):
if dataset:
pyzebra.normalize_dataset(dataset, new)
_update_image()
_update_overview_plot()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
merge_from_select = Select(title="scan:", width=145)
def merge_button_callback():
scan_into = _get_selected_scan()
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
print("WARNING: Selected scans for merging are identical")
return
pyzebra.merge_h5_scans(scan_into, scan_from)
_update_table()
_update_image()
_update_overview_plot()
merge_button = Button(label="Merge into current", width=145)
merge_button.on_click(merge_button_callback)
def restore_button_callback():
pyzebra.restore_scan(_get_selected_scan())
_update_table()
_update_image()
_update_overview_plot()
restore_button = Button(label="Restore scan", width=145)
restore_button.on_click(restore_button_callback)
def _update_image(index=None):
if index is None:
index = index_spinner.value
current_image = det_data["data"][index]
scan = _get_selected_scan()
current_image = scan["counts"][index]
proj_v_line_source.data.update(
x=np.arange(0, IMAGE_W) + 0.5, y=np.mean(current_image, axis=0)
)
@ -127,25 +350,59 @@ def create():
image_glyph.color_mapper.low = im_min
image_glyph.color_mapper.high = im_max
if "mf" in det_data:
metadata_table_source.data.update(mf=[det_data["mf"][index]])
if "mf" in scan:
metadata_table_source.data.update(mf=[scan["mf"][index]])
else:
metadata_table_source.data.update(mf=[None])
if "temp" in det_data:
metadata_table_source.data.update(temp=[det_data["temp"][index]])
if "temp" in scan:
metadata_table_source.data.update(temp=[scan["temp"][index]])
else:
metadata_table_source.data.update(temp=[None])
gamma, nu = calculate_pol(det_data, index)
omega = np.ones((IMAGE_H, IMAGE_W)) * det_data["omega"][index]
gamma, nu = calculate_pol(scan, index)
omega = np.ones((IMAGE_H, IMAGE_W)) * scan["omega"][index]
image_source.data.update(gamma=[gamma], nu=[nu], omega=[omega])
def update_overview_plot():
h5_data = det_data["data"]
n_im, n_y, n_x = h5_data.shape
overview_x = np.mean(h5_data, axis=1)
overview_y = np.mean(h5_data, axis=2)
# update detector center angles
det_c_x = int(IMAGE_W / 2)
det_c_y = int(IMAGE_H / 2)
if scan["zebra_mode"] == "nb":
gamma_c = gamma[det_c_y, det_c_x]
nu_c = nu[det_c_y, det_c_x]
omega_c = omega[det_c_y, det_c_x]
chi_c = None
phi_c = None
else: # zebra_mode == "bi"
wave = scan["wave"]
ddist = scan["ddist"]
gammad = scan["gamma"][index]
om = scan["omega"][index]
ch = scan["chi"][index]
ph = scan["phi"][index]
nud = scan["nu"]
nu_c = 0
chi_c, phi_c, gamma_c, omega_c = pyzebra.ang_proc(
wave, ddist, gammad, om, ch, ph, nud, det_c_x, det_c_y
)
detcenter_table_source.data.update(
gamma=[gamma_c], nu=[nu_c], omega=[omega_c], chi=[chi_c], phi=[phi_c],
)
def _update_overview_plot():
scan = _get_selected_scan()
counts = scan["counts"]
n_im, n_y, n_x = counts.shape
overview_x = np.mean(counts, axis=1)
overview_y = np.mean(counts, axis=2)
# normalize for simpler colormapping
overview_max_val = max(np.max(overview_x), np.max(overview_y))
overview_x = 1000 * overview_x / overview_max_val
overview_y = 1000 * overview_y / overview_max_val
overview_plot_x_image_source.data.update(image=[overview_x], dw=[n_x], dh=[n_im])
overview_plot_y_image_source.data.update(image=[overview_y], dw=[n_y], dh=[n_im])
@ -168,12 +425,12 @@ def create():
frame_range.reset_end = n_im
frame_range.bounds = (0, n_im)
scan_motor = det_data["scan_motor"]
scan_motor = scan["scan_motor"]
overview_plot_y.axis[1].axis_label = f"Scanning motor, {scan_motor}"
var = det_data[scan_motor]
var = scan[scan_motor]
var_start = var[0]
var_end = var[-1] + (var[-1] - var[0]) / (n_im - 1)
var_end = var[-1] + (var[-1] - var[0]) / (n_im - 1) if n_im != 1 else var_start + 1
scanning_motor_range.start = var_start
scanning_motor_range.end = var_end
@ -182,42 +439,30 @@ def create():
# handle both, ascending and descending sequences
scanning_motor_range.bounds = (min(var_start, var_end), max(var_start, var_end))
def file_select_callback(_attr, old, new):
nonlocal det_data
if not new:
# skip empty selections
return
gamma = image_source.data["gamma"][0]
gamma_start = gamma[0, 0]
gamma_end = gamma[0, -1]
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
if len(new) > 1:
# drop selection to the previous one
file_select.value = old
return
gamma_range.start = gamma_start
gamma_range.end = gamma_end
gamma_range.reset_start = gamma_start
gamma_range.reset_end = gamma_end
gamma_range.bounds = (min(gamma_start, gamma_end), max(gamma_start, gamma_end))
if len(old) > 1:
# skip unnecessary update caused by selection drop
return
nu = image_source.data["nu"][0]
nu_start = nu[0, 0]
nu_end = nu[-1, 0]
det_data = pyzebra.read_detector_data(new[0], cami_meta)
index_spinner.value = 0
index_spinner.high = det_data["data"].shape[0] - 1
index_slider.end = det_data["data"].shape[0] - 1
zebra_mode = det_data["zebra_mode"]
if zebra_mode == "nb":
metadata_table_source.data.update(geom=["normal beam"])
else: # zebra_mode == "bi"
metadata_table_source.data.update(geom=["bisecting"])
update_image(0)
update_overview_plot()
nu_range.start = nu_start
nu_range.end = nu_end
nu_range.reset_start = nu_start
nu_range.reset_end = nu_end
nu_range.bounds = (min(nu_start, nu_end), max(nu_start, nu_end))
file_select = MultiSelect(title="Available .hdf files:", width=210, height=250)
file_select.on_change("value", file_select_callback)
def index_callback(_attr, _old, new):
update_image(new)
_update_image(new)
index_slider = Slider(value=0, start=0, end=1, show_value=False, width=400)
@ -282,9 +527,10 @@ def create():
# calculate hkl-indices of first mouse entry
def mouse_enter_callback(_event):
if det_data and np.array_equal(image_source.data["h"][0], np.zeros((1, 1))):
if dataset and np.array_equal(image_source.data["h"][0], np.zeros((1, 1))):
scan = _get_selected_scan()
index = index_spinner.value
h, k, l = calculate_hkl(det_data, index)
h, k, l = calculate_hkl(scan, index)
image_source.data.update(h=[h], k=[k], l=[l])
plot.on_event(MouseEnter, mouse_enter_callback)
@ -346,13 +592,14 @@ def create():
def box_edit_callback(_attr, _old, new):
if new["x"]:
h5_data = det_data["data"]
x_val = np.arange(h5_data.shape[0])
scan = _get_selected_scan()
counts = scan["counts"]
x_val = np.arange(counts.shape[0])
left = int(np.floor(new["x"][0]))
right = int(np.ceil(new["x"][0] + new["width"][0]))
bottom = int(np.floor(new["y"][0]))
top = int(np.ceil(new["y"][0] + new["height"][0]))
y_val = np.sum(h5_data[:, bottom:top, left:right], axis=(1, 2))
y_val = np.sum(counts[:, bottom:top, left:right], axis=(1, 2))
else:
x_val = []
y_val = []
@ -372,12 +619,14 @@ def create():
scanning_motor_range = Range1d(0, 1, bounds=(0, 1))
det_x_range = Range1d(0, IMAGE_W, bounds=(0, IMAGE_W))
gamma_range = Range1d(0, 1, bounds=(0, 1))
overview_plot_x = Plot(
title=Title(text="Projections on X-axis"),
x_range=det_x_range,
y_range=frame_range,
extra_x_ranges={"gamma": gamma_range},
extra_y_ranges={"scanning_motor": scanning_motor_range},
plot_height=400,
plot_height=450,
plot_width=IMAGE_PLOT_W - 3,
)
@ -391,6 +640,9 @@ def create():
# ---- axes
overview_plot_x.add_layout(LinearAxis(axis_label="Coordinate X, pix"), place="below")
overview_plot_x.add_layout(
LinearAxis(x_range_name="gamma", axis_label="Gamma, deg"), place="above"
)
overview_plot_x.add_layout(
LinearAxis(axis_label="Frame", major_label_orientation="vertical"), place="left"
)
@ -410,12 +662,14 @@ def create():
)
det_y_range = Range1d(0, IMAGE_H, bounds=(0, IMAGE_H))
nu_range = Range1d(0, 1, bounds=(0, 1))
overview_plot_y = Plot(
title=Title(text="Projections on Y-axis"),
x_range=det_y_range,
y_range=frame_range,
extra_x_ranges={"nu": nu_range},
extra_y_ranges={"scanning_motor": scanning_motor_range},
plot_height=400,
plot_height=450,
plot_width=IMAGE_PLOT_H + 22,
)
@ -429,6 +683,7 @@ def create():
# ---- axes
overview_plot_y.add_layout(LinearAxis(axis_label="Coordinate Y, pix"), place="below")
overview_plot_y.add_layout(LinearAxis(x_range_name="nu", axis_label="Nu, deg"), place="above")
overview_plot_y.add_layout(
LinearAxis(
y_range_name="scanning_motor",
@ -500,7 +755,7 @@ def create():
display_min_spinner.disabled = False
display_max_spinner.disabled = False
update_image()
_update_image()
main_auto_checkbox = CheckboxGroup(
labels=["Frame Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
@ -536,7 +791,7 @@ def create():
)
display_min_spinner.on_change("value", display_min_spinner_callback)
PROJ_STEP = 0.1
PROJ_STEP = 1
def proj_auto_checkbox_callback(state):
if state:
@ -546,7 +801,7 @@ def create():
proj_display_min_spinner.disabled = False
proj_display_max_spinner.disabled = False
update_overview_plot()
_update_overview_plot()
proj_auto_checkbox = CheckboxGroup(
labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
@ -621,42 +876,53 @@ def create():
index_position=None,
)
detcenter_table_source = ColumnDataSource(dict(gamma=[], omega=[], chi=[], phi=[], nu=[]))
detcenter_table = DataTable(
source=detcenter_table_source,
columns=[
TableColumn(field="gamma", title="Gamma", formatter=num_formatter, width=70),
TableColumn(field="omega", title="Omega", formatter=num_formatter, width=70),
TableColumn(field="chi", title="Chi", formatter=num_formatter, width=70),
TableColumn(field="phi", title="Phi", formatter=num_formatter, width=70),
TableColumn(field="nu", title="Nu", formatter=num_formatter, width=70),
],
height=150,
width=350,
autosize_mode="none",
index_position=None,
)
def add_event_button_callback():
p0 = [1.0, 0.0, 1.0]
maxfev = 100000
scan = _get_selected_scan()
pyzebra.fit_event(
scan,
int(np.floor(frame_range.start)),
int(np.ceil(frame_range.end)),
int(np.floor(det_y_range.start)),
int(np.ceil(det_y_range.end)),
int(np.floor(det_x_range.start)),
int(np.ceil(det_x_range.end)),
)
wave = det_data["wave"]
ddist = det_data["ddist"]
cell = det_data["cell"]
wave = scan["wave"]
ddist = scan["ddist"]
cell = scan["cell"]
gamma = det_data["gamma"][0]
omega = det_data["omega"][0]
nu = det_data["nu"][0]
chi = det_data["chi"][0]
phi = det_data["phi"][0]
gamma = scan["gamma"][0]
omega = scan["omega"][0]
nu = scan["nu"][0]
chi = scan["chi"][0]
phi = scan["phi"][0]
scan_motor = det_data["scan_motor"]
var_angle = det_data[scan_motor]
scan_motor = scan["scan_motor"]
var_angle = scan[scan_motor]
x0 = int(np.floor(det_x_range.start))
xN = int(np.ceil(det_x_range.end))
y0 = int(np.floor(det_y_range.start))
yN = int(np.ceil(det_y_range.end))
fr0 = int(np.floor(frame_range.start))
frN = int(np.ceil(frame_range.end))
data_roi = det_data["data"][fr0:frN, y0:yN, x0:xN]
snr_cnts = scan["fit"]["snr"]
frC = scan["fit"]["frame"]
cnts = np.sum(data_roi, axis=(1, 2))
coeff, _ = curve_fit(gauss, range(len(cnts)), cnts, p0=p0, maxfev=maxfev)
m = cnts.mean()
sd = cnts.std()
snr_cnts = np.where(sd == 0, 0, m / sd)
frC = fr0 + coeff[1]
var_F = var_angle[math.floor(frC)]
var_C = var_angle[math.ceil(frC)]
frStep = frC - math.floor(frC)
var_F = var_angle[int(np.floor(frC))]
var_C = var_angle[int(np.ceil(frC))]
frStep = frC - np.floor(frC)
var_step = var_C - var_F
var_p = var_F + var_step * frStep
@ -671,15 +937,13 @@ def create():
elif scan_motor == "phi":
phi = var_p
intensity = coeff[1] * abs(coeff[2] * var_step) * math.sqrt(2) * math.sqrt(np.pi)
intensity = scan["fit"]["intensity"]
x_pos = scan["fit"]["x_pos"]
y_pos = scan["fit"]["y_pos"]
projX = np.sum(data_roi, axis=(0, 1))
coeff, _ = curve_fit(gauss, range(len(projX)), projX, p0=p0, maxfev=maxfev)
x_pos = x0 + coeff[1]
projY = np.sum(data_roi, axis=(0, 2))
coeff, _ = curve_fit(gauss, range(len(projY)), projY, p0=p0, maxfev=maxfev)
y_pos = y0 + coeff[1]
if scan["zebra_mode"] == "nb":
chi = None
phi = None
events_data["wave"].append(wave)
events_data["ddist"].append(ddist)
@ -697,7 +961,7 @@ def create():
events_table_source.data = events_data
add_event_button = Button(label="Add spind event", width=145)
add_event_button = Button(label="Add peak center", width=145)
add_event_button.on_click(add_event_button_callback)
def remove_event_button_callback():
@ -708,7 +972,7 @@ def create():
events_table_source.data = events_data
remove_event_button = Button(label="Remove spind event", width=145)
remove_event_button = Button(label="Remove peak center", width=145)
remove_event_button.on_click(remove_event_button_callback)
metadata_table_source = ColumnDataSource(dict(geom=[""], temp=[None], mf=[None]))
@ -726,7 +990,23 @@ def create():
)
# Final layout
import_layout = column(proposal_textinput, upload_div, upload_button, file_select)
peak_tables = Tabs(
tabs=[
Panel(child=events_table, title="Actual peak center"),
Panel(child=detcenter_table, title="Peak in the detector center"),
]
)
import_layout = column(
data_source,
upload_cami_div,
upload_cami_button,
upload_hdf_div,
upload_hdf_button,
file_select,
row(file_open_button, file_append_button),
)
layout_image = column(gridplot([[proj_v, None], [plot, proj_h]], merge_tools=False))
colormap_layout = column(
colormap,
@ -738,7 +1018,7 @@ def create():
layout_controls = column(
row(metadata_table, index_spinner, column(Spacer(height=25), index_slider)),
row(column(add_event_button, remove_event_button), events_table),
row(column(add_event_button, remove_event_button), peak_tables),
)
layout_overview = column(
@ -750,42 +1030,37 @@ def create():
),
)
scan_layout = column(
scan_table,
row(monitor_spinner, column(Spacer(height=19), restore_button)),
row(column(Spacer(height=19), merge_button), merge_from_select),
)
tab_layout = row(
column(import_layout, colormap_layout),
column(layout_overview, layout_controls),
column(row(scan_layout, layout_overview), layout_controls),
column(roi_avg_plot, layout_image),
)
return Panel(child=tab_layout, title="hdf viewer")
def gauss(x, *p):
"""Defines Gaussian function
Args:
A - amplitude, mu - position of the center, sigma - width
Returns:
Gaussian function
"""
A, mu, sigma = p
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
def calculate_hkl(det_data, index):
def calculate_hkl(scan, index):
h = np.empty(shape=(IMAGE_H, IMAGE_W))
k = np.empty(shape=(IMAGE_H, IMAGE_W))
l = np.empty(shape=(IMAGE_H, IMAGE_W))
wave = det_data["wave"]
ddist = det_data["ddist"]
gammad = det_data["gamma"][index]
om = det_data["omega"][index]
nud = det_data["nu"]
ub = det_data["ub"]
geometry = det_data["zebra_mode"]
wave = scan["wave"]
ddist = scan["ddist"]
gammad = scan["gamma"][index]
om = scan["omega"][index]
nud = scan["nu"]
ub = scan["ub"]
geometry = scan["zebra_mode"]
if geometry == "bi":
chi = det_data["chi"][index]
phi = det_data["phi"][index]
chi = scan["chi"][index]
phi = scan["phi"][index]
elif geometry == "nb":
chi = 0
phi = 0
@ -801,16 +1076,11 @@ def calculate_hkl(det_data, index):
return h, k, l
def calculate_pol(det_data, index):
gamma = np.empty(shape=(IMAGE_H, IMAGE_W))
nu = np.empty(shape=(IMAGE_H, IMAGE_W))
ddist = det_data["ddist"]
gammad = det_data["gamma"][index]
nud = det_data["nu"]
for xi in np.arange(IMAGE_W):
for yi in np.arange(IMAGE_H):
gamma[yi, xi], nu[yi, xi] = pyzebra.det2pol(ddist, gammad, nud, xi, yi)
def calculate_pol(scan, index):
ddist = scan["ddist"]
gammad = scan["gamma"][index]
nud = scan["nu"]
yi, xi = np.ogrid[:IMAGE_H, :IMAGE_W]
gamma, nu = pyzebra.det2pol(ddist, gammad, nud, xi, yi)
return gamma, nu

View File

@ -11,6 +11,7 @@ from bokeh.layouts import column, row
from bokeh.models import (
BasicTicker,
Button,
CellEditor,
CheckboxEditor,
CheckboxGroup,
ColumnDataSource,
@ -26,6 +27,7 @@ from bokeh.models import (
Legend,
Line,
LinearAxis,
LinearColorMapper,
MultiLine,
MultiSelect,
NumberEditor,
@ -33,6 +35,7 @@ from bokeh.models import (
PanTool,
Plot,
RadioGroup,
Range1d,
ResetTool,
Scatter,
Select,
@ -42,30 +45,33 @@ from bokeh.models import (
TableColumn,
Tabs,
TextAreaInput,
TextInput,
WheelZoomTool,
Whisker,
)
from bokeh.palettes import Category10, Turbo256
from bokeh.transform import linear_cmap
from bokeh.palettes import Category10, Plasma256
from scipy import interpolate
import pyzebra
from pyzebra.ccl_process import AREA_METHODS
javaScript = """
let j = 0;
for (let i = 0; i < js_data.data['fname'].length; i++) {
if (js_data.data['content'][i] === "") continue;
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
setTimeout(function() {
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
const link = document.createElement('a');
document.body.appendChild(link);
const url = window.URL.createObjectURL(blob);
link.href = url;
link.download = js_data.data['fname'][i] + js_data.data['ext'][i];
link.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 100 * j)
j++;
}
"""
@ -77,170 +83,205 @@ def color_palette(n_colors):
def create():
doc = curdoc()
det_data = []
dataset = []
fit_params = {}
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""]))
js_data = ColumnDataSource(data=dict(content=[""], fname=[""], ext=[""]))
def file_select_update_for_proposal():
proposal = proposal_textinput.value.strip()
if not proposal:
proposal_path = proposal_textinput.name
if proposal_path:
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
file_open_button.disabled = False
file_append_button.disabled = False
else:
file_select.options = []
file_open_button.disabled = True
file_append_button.disabled = True
return
for zebra_proposals_path in pyzebra.ZEBRA_PROPOSALS_PATHS:
proposal_path = os.path.join(zebra_proposals_path, proposal)
if os.path.isdir(proposal_path):
# found it
break
else:
raise ValueError(f"Can not find data for proposal '{proposal}'.")
file_list = []
for file in os.listdir(proposal_path):
if file.endswith((".ccl", ".dat")):
file_list.append((os.path.join(proposal_path, file), file))
file_select.options = file_list
file_open_button.disabled = False
file_append_button.disabled = False
doc.add_periodic_callback(file_select_update_for_proposal, 5000)
def proposal_textinput_callback(_attr, _old, _new):
file_select_update_for_proposal()
proposal_textinput = TextInput(title="Proposal number:", width=210)
proposal_textinput.on_change("value", proposal_textinput_callback)
proposal_textinput = doc.proposal_textinput
proposal_textinput.on_change("name", proposal_textinput_callback)
def _init_datatable():
scan_list = [s["idx"] for s in det_data]
scan_list = [s["idx"] for s in dataset]
export = [s["export"] for s in dataset]
if param_select.value == "user defined":
param = [None] * len(dataset)
else:
param = [scan[param_select.value] for scan in dataset]
file_list = []
for scan in det_data:
for scan in dataset:
file_list.append(os.path.basename(scan["original_filename"]))
scan_table_source.data.update(
file=file_list,
scan=scan_list,
param=[None] * len(scan_list),
fit=[0] * len(scan_list),
export=[True] * len(scan_list),
file=file_list, scan=scan_list, param=param, fit=[0] * len(scan_list), export=export,
)
scan_table_source.selected.indices = []
scan_table_source.selected.indices = [0]
scan_motor_select.options = det_data[0]["scan_motors"]
scan_motor_select.value = det_data[0]["scan_motor"]
param_select.value = "user defined"
scan_motor_select.options = dataset[0]["scan_motors"]
scan_motor_select.value = dataset[0]["scan_motor"]
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
merge_from_select.options = merge_options
merge_from_select.value = merge_options[0][0]
file_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
def file_open_button_callback():
nonlocal det_data
det_data = []
for f_name in file_select.value:
with open(f_name) as file:
nonlocal dataset
new_data = []
for f_path in file_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
_init_datatable()
append_upload_button.disabled = False
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data)
js_data.data.update(fname=[base])
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
append_upload_button.disabled = False
file_open_button = Button(label="Open New", width=100, disabled=True)
file_open_button.on_click(file_open_button_callback)
def file_append_button_callback():
for f_name in file_select.value:
with open(f_name) as file:
file_data = []
for f_path in file_select.value:
with open(f_path) as file:
f_name = os.path.basename(f_path)
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
_init_datatable()
if file_data:
_init_datatable()
file_append_button = Button(label="Append", width=100, disabled=True)
file_append_button.on_click(file_append_button_callback)
def upload_button_callback(_attr, _old, new):
nonlocal det_data
det_data = []
proposal_textinput.value = ""
for f_str, f_name in zip(new, upload_button.filename):
def upload_button_callback(_attr, _old, _new):
nonlocal dataset
new_data = []
for f_str, f_name in zip(upload_button.value, upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
base, ext = os.path.splitext(f_name)
if det_data:
append_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
else:
det_data = pyzebra.parse_1D(file, ext)
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
_init_datatable()
append_upload_button.disabled = False
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
if not new_data: # first file
new_data = file_data
pyzebra.merge_duplicates(new_data)
js_data.data.update(fname=[base])
else:
pyzebra.merge_datasets(new_data, file_data)
if new_data:
dataset = new_data
_init_datatable()
append_upload_button.disabled = False
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
upload_button.on_change("value", upload_button_callback)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
upload_button.on_change("filename", upload_button_callback)
def append_upload_button_callback(_attr, _old, new):
for f_str, f_name in zip(new, append_upload_button.filename):
def append_upload_button_callback(_attr, _old, _new):
file_data = []
for f_str, f_name in zip(append_upload_button.value, append_upload_button.filename):
with io.StringIO(base64.b64decode(f_str).decode()) as file:
_, ext = os.path.splitext(f_name)
append_data = pyzebra.parse_1D(file, ext)
try:
file_data = pyzebra.parse_1D(file, ext)
except:
print(f"Error loading {f_name}")
continue
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
det_data.extend(append_data)
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
pyzebra.merge_datasets(dataset, file_data)
_init_datatable()
if file_data:
_init_datatable()
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200, disabled=True)
append_upload_button.on_change("value", append_upload_button_callback)
# for on_change("value", ...) or on_change("filename", ...),
# see https://github.com/bokeh/bokeh/issues/11461
append_upload_button.on_change("filename", append_upload_button_callback)
def monitor_spinner_callback(_attr, _old, new):
if det_data:
pyzebra.normalize_dataset(det_data, new)
_update_plot()
if dataset:
pyzebra.normalize_dataset(dataset, new)
_update_single_scan_plot()
_update_overview()
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
monitor_spinner.on_change("value", monitor_spinner_callback)
def scan_motor_select_callback(_attr, _old, new):
if det_data:
for scan in det_data:
if dataset:
for scan in dataset:
scan["scan_motor"] = new
_update_plot()
_update_single_scan_plot()
_update_overview()
scan_motor_select = Select(title="Scan motor:", options=[], width=145)
scan_motor_select.on_change("value", scan_motor_select_callback)
def _update_table():
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
scan_table_source.data.update(fit=fit_ok)
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset]
export = [scan["export"] for scan in dataset]
if param_select.value == "user defined":
param = [None] * len(dataset)
else:
param = [scan[param_select.value] for scan in dataset]
def _update_plot():
_update_single_scan_plot(_get_selected_scan())
_update_overview()
scan_table_source.data.update(fit=fit_ok, export=export, param=param)
def _update_single_scan_plot(scan):
def _update_single_scan_plot():
scan = _get_selected_scan()
scan_motor = scan["scan_motor"]
y = scan["counts"]
y_err = scan["counts_err"]
x = scan[scan_motor]
plot.axis[0].axis_label = scan_motor
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
plot_scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
fit = scan.get("fit")
if fit is not None:
@ -281,7 +322,7 @@ def create():
par = []
for s, p in enumerate(scan_table_source.data["param"]):
if p is not None:
scan = det_data[s]
scan = dataset[s]
scan_motor = scan["scan_motor"]
xs.append(scan[scan_motor])
x.extend(scan[scan_motor])
@ -290,43 +331,58 @@ def create():
param.append(float(p))
par.extend(scan["counts"])
if det_data:
scan_motor = det_data[0]["scan_motor"]
if dataset:
scan_motor = dataset[0]["scan_motor"]
ov_plot.axis[0].axis_label = scan_motor
ov_param_plot.axis[0].axis_label = scan_motor
ov_plot_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
if y:
mapper["transform"].low = np.min([np.min(y) for y in ys])
mapper["transform"].high = np.max([np.max(y) for y in ys])
ov_param_plot_scatter_source.data.update(x=x, y=y, param=par)
ov_param_plot_scatter_source.data.update(x=x, y=y)
if y:
interp_f = interpolate.interp2d(x, y, par)
x1, x2 = min(x), max(x)
y1, y2 = min(y), max(y)
image = interp_f(
np.linspace(x1, x2, ov_param_plot.inner_width // 10),
np.linspace(y1, y2, ov_param_plot.inner_height // 10),
assume_sorted=True,
grid_x, grid_y = np.meshgrid(
np.linspace(x1, x2, ov_param_plot.inner_width),
np.linspace(y1, y2, ov_param_plot.inner_height),
)
image = interpolate.griddata((x, y), par, (grid_x, grid_y))
ov_param_plot_image_source.data.update(
image=[image], x=[x1], y=[y1], dw=[x2 - x1], dh=[y2 - y1]
)
x_range = ov_param_plot.x_range
x_range.start, x_range.end = x1, x2
x_range.reset_start, x_range.reset_end = x1, x2
x_range.bounds = (x1, x2)
y_range = ov_param_plot.y_range
y_range.start, y_range.end = y1, y2
y_range.reset_start, y_range.reset_end = y1, y2
y_range.bounds = (y1, y2)
else:
ov_param_plot_image_source.data.update(image=[], x=[], y=[], dw=[], dh=[])
def _update_param_plot():
x = []
y = []
y_lower = []
y_upper = []
fit_param = fit_param_select.value
for s, p in zip(det_data, scan_table_source.data["param"]):
for s, p in zip(dataset, scan_table_source.data["param"]):
if "fit" in s and fit_param:
x.append(p)
y.append(s["fit"].values[fit_param])
param_fit_val = s["fit"].params[fit_param].value
param_fit_std = s["fit"].params[fit_param].stderr
if param_fit_std is None:
param_fit_std = 0
y.append(param_fit_val)
y_lower.append(param_fit_val - param_fit_std)
y_upper.append(param_fit_val + param_fit_std)
param_plot_scatter_source.data.update(x=x, y=y)
param_plot_scatter_source.data.update(x=x, y=y, y_lower=y_lower, y_upper=y_upper)
# Main plot
plot = Plot(
@ -344,7 +400,7 @@ def create():
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
plot_scatter = plot.add_glyph(
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue", fill_color="steelblue")
)
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
@ -402,9 +458,7 @@ def create():
ov_plot.toolbar.logo = None
# Overview perams plot
ov_param_plot = Plot(
x_range=DataRange1d(), y_range=DataRange1d(), plot_height=450, plot_width=700
)
ov_param_plot = Plot(x_range=Range1d(), y_range=Range1d(), plot_height=450, plot_width=700)
ov_param_plot.add_layout(LinearAxis(axis_label="Param"), place="left")
ov_param_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
@ -412,16 +466,16 @@ def create():
ov_param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
ov_param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
color_mapper = LinearColorMapper(palette=Plasma256)
ov_param_plot_image_source = ColumnDataSource(dict(image=[], x=[], y=[], dw=[], dh=[]))
ov_param_plot.add_glyph(
ov_param_plot_image_source, Image(image="image", x="x", y="y", dw="dw", dh="dh")
ov_param_plot_image_source,
Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper),
)
ov_param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[], param=[]))
mapper = linear_cmap(field_name="param", palette=Turbo256, low=0, high=50)
ov_param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[]))
ov_param_plot.add_glyph(
ov_param_plot_scatter_source,
Scatter(x="x", y="y", line_color=mapper, fill_color=mapper, size=10),
ov_param_plot_scatter_source, Scatter(x="x", y="y", marker="dot", size=15),
)
ov_param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
@ -436,8 +490,11 @@ def create():
param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[]))
param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[], y_upper=[], y_lower=[]))
param_plot.add_glyph(param_plot_scatter_source, Scatter(x="x", y="y"))
param_plot.add_layout(
Whisker(source=param_plot_scatter_source, base="x", upper="y_upper", lower="y_lower")
)
param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
param_plot.toolbar.logo = None
@ -474,46 +531,68 @@ def create():
# skip unnecessary update caused by selection drop
return
_update_plot()
_update_single_scan_plot()
def scan_table_source_callback(_attr, _old, _new):
def scan_table_source_callback(_attr, _old, new):
# unfortunately, we don't know if the change comes from data update or user input
# also `old` and `new` are the same for non-scalars
for scan, export in zip(dataset, new["export"]):
scan["export"] = export
_update_overview()
_update_param_plot()
_update_preview()
scan_table_source = ColumnDataSource(dict(file=[], scan=[], param=[], fit=[], export=[]))
scan_table_source.on_change("data", scan_table_source_callback)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
scan_table = DataTable(
source=scan_table_source,
columns=[
TableColumn(field="file", title="file", width=150),
TableColumn(field="scan", title="scan", width=50),
TableColumn(field="file", title="file", editor=CellEditor(), width=150),
TableColumn(field="scan", title="scan", editor=CellEditor(), width=50),
TableColumn(field="param", title="param", editor=NumberEditor(), width=50),
TableColumn(field="fit", title="Fit", width=50),
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
],
width=410, # +60 because of the index column
height=350,
editable=True,
autosize_mode="none",
)
def scan_table_source_callback(_attr, _old, _new):
if scan_table_source.selected.indices:
_update_plot()
merge_from_select = Select(title="scan:", width=145)
scan_table_source.selected.on_change("indices", scan_table_select_callback)
scan_table_source.on_change("data", scan_table_source_callback)
def merge_button_callback():
scan_into = _get_selected_scan()
scan_from = dataset[int(merge_from_select.value)]
if scan_into is scan_from:
print("WARNING: Selected scans for merging are identical")
return
pyzebra.merge_scans(scan_into, scan_from)
_update_table()
_update_single_scan_plot()
_update_overview()
merge_button = Button(label="Merge into current", width=145)
merge_button.on_click(merge_button_callback)
def restore_button_callback():
pyzebra.restore_scan(_get_selected_scan())
_update_table()
_update_single_scan_plot()
_update_overview()
restore_button = Button(label="Restore scan", width=145)
restore_button.on_click(restore_button_callback)
def _get_selected_scan():
return det_data[scan_table_source.selected.indices[0]]
return dataset[scan_table_source.selected.indices[0]]
def param_select_callback(_attr, _old, new):
if new == "user defined":
param = [None] * len(det_data)
else:
param = [scan[new] for scan in det_data]
scan_table_source.data["param"] = param
_update_param_plot()
def param_select_callback(_attr, _old, _new):
_update_table()
param_select = Select(
title="Parameter:",
@ -622,7 +701,7 @@ def create():
fitparams_table = DataTable(
source=fitparams_table_source,
columns=[
TableColumn(field="param", title="Parameter"),
TableColumn(field="param", title="Parameter", editor=CellEditor()),
TableColumn(field="value", title="Value", editor=NumberEditor()),
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
TableColumn(field="min", title="Min", editor=NumberEditor()),
@ -643,8 +722,8 @@ def create():
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
def proc_all_button_callback():
for scan, export in zip(det_data, scan_table_source.data["export"]):
if export:
for scan in dataset:
if scan["export"]:
pyzebra.fit_scan(
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
)
@ -654,16 +733,16 @@ def create():
lorentz=lorentz_checkbox.active,
)
_update_plot()
_update_single_scan_plot()
_update_overview()
_update_table()
for scan in det_data:
for scan in dataset:
if "fit" in scan:
options = list(scan["fit"].params.keys())
fit_param_select.options = options
fit_param_select.value = options[0]
break
_update_param_plot()
proc_all_button = Button(label="Process All", button_type="primary", width=145)
proc_all_button.on_click(proc_all_button_callback)
@ -679,16 +758,16 @@ def create():
lorentz=lorentz_checkbox.active,
)
_update_plot()
_update_single_scan_plot()
_update_overview()
_update_table()
for scan in det_data:
for scan in dataset:
if "fit" in scan:
options = list(scan["fit"].params.keys())
fit_param_select.options = options
fit_param_select.value = options[0]
break
_update_param_plot()
proc_button = Button(label="Process Current", width=145)
proc_button.on_click(proc_button_callback)
@ -698,34 +777,36 @@ def create():
lorentz_checkbox = CheckboxGroup(labels=["Lorentz Correction"], width=145, margin=(13, 5, 5, 5))
export_preview_textinput = TextAreaInput(title="Export file(s) preview:", width=450, height=400)
export_preview_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
def _update_preview():
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = temp_dir + "/temp"
export_data = []
for s, export in zip(det_data, scan_table_source.data["export"]):
if export:
export_data.append(s)
param_data = []
for scan, param in zip(dataset, scan_table_source.data["param"]):
if scan["export"] and param:
export_data.append(scan)
param_data.append(param)
# pyzebra.export_1D(export_data, temp_file, "fullprof")
pyzebra.export_param_study(export_data, param_data, temp_file)
exported_content = ""
file_content = []
for ext in (".comm", ".incomm"):
fname = temp_file + ext
if os.path.isfile(fname):
with open(fname) as f:
content = f.read()
exported_content += f"{ext} file:\n" + content
else:
content = ""
file_content.append(content)
fname = temp_file
if os.path.isfile(fname):
with open(fname) as f:
content = f.read()
exported_content += content
else:
content = ""
file_content.append(content)
js_data.data.update(content=file_content)
export_preview_textinput.value = exported_content
save_button = Button(label="Download File(s)", button_type="success", width=220)
save_button = Button(label="Download File", button_type="success", width=220)
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
fitpeak_controls = row(
@ -736,10 +817,13 @@ def create():
column(fit_to_spinner, proc_button, proc_all_button),
)
scan_layout = column(scan_table, row(monitor_spinner, scan_motor_select, param_select))
scan_layout = column(
scan_table,
row(monitor_spinner, scan_motor_select, param_select),
row(column(Spacer(height=19), row(restore_button, merge_button)), merge_from_select),
)
import_layout = column(
proposal_textinput,
file_select,
row(file_open_button, file_append_button),
upload_div,

View File

@ -24,7 +24,7 @@ def create():
events_data = doc.events_data
npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True)
lattice_const_textinput = TextInput(title="Lattice constants:", disabled=True)
lattice_const_textinput = TextInput(title="Lattice constants:")
max_res_spinner = Spinner(title="max-res:", value=2, step=0.01, width=145)
seed_pool_size_spinner = Spinner(title="seed-pool-size:", value=5, step=0.01, width=145)
seed_len_tol_spinner = Spinner(title="seed-len-tol:", value=0.02, step=0.01, width=145)

View File

@ -1,5 +1,6 @@
import os
import re
from ast import literal_eval
from collections import defaultdict
import numpy as np
@ -56,7 +57,7 @@ META_VARS_FLOAT = (
"s2hl",
)
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j")
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j", "UB")
CCL_FIRST_LINE = (("idx", int), ("h", float), ("k", float), ("l", float))
@ -93,9 +94,9 @@ def load_1D(filepath):
"""
with open(filepath, "r") as infile:
_, ext = os.path.splitext(filepath)
det_variables = parse_1D(infile, data_type=ext)
dataset = parse_1D(infile, data_type=ext)
return det_variables
return dataset
def parse_1D(fileobj, data_type):
@ -108,21 +109,29 @@ def parse_1D(fileobj, data_type):
variable = variable.strip()
value = value.strip()
if variable in META_VARS_STR:
metadata[variable] = value
try:
if variable in META_VARS_STR:
metadata[variable] = value
elif variable in META_VARS_FLOAT:
if variable == "2-theta": # fix that angle name not to be an expression
variable = "twotheta"
if variable in ("a", "b", "c", "alpha", "beta", "gamma"):
variable += "_cell"
metadata[variable] = float(value)
elif variable in META_VARS_FLOAT:
if variable == "2-theta": # fix that angle name not to be an expression
variable = "twotheta"
if variable in ("a", "b", "c", "alpha", "beta", "gamma"):
variable += "_cell"
metadata[variable] = float(value)
elif variable in META_UB_MATRIX:
if "ub" not in metadata:
metadata["ub"] = np.zeros((3, 3))
row = int(variable[-2]) - 1
metadata["ub"][row, :] = list(map(float, value.split()))
elif variable in META_UB_MATRIX:
if variable == "UB":
metadata["ub"] = np.array(literal_eval(value)).reshape(3, 3)
else:
if "ub" not in metadata:
metadata["ub"] = np.zeros((3, 3))
row = int(variable[-2]) - 1
metadata["ub"][row, :] = list(map(float, value.split()))
except Exception:
print(f"Error reading {variable} with value '{value}'")
metadata[variable] = 0
if "#data" in line:
# this is the end of metadata and the start of data section
@ -133,7 +142,7 @@ def parse_1D(fileobj, data_type):
metadata["zebra_mode"] = "nb"
# read data
scan = []
dataset = []
if data_type == ".ccl":
ccl_first_line = CCL_FIRST_LINE + CCL_ANGLES[metadata["zebra_mode"]]
ccl_second_line = CCL_SECOND_LINE
@ -143,44 +152,52 @@ def parse_1D(fileobj, data_type):
if not line or line.isspace():
continue
s = {}
scan = {}
scan["export"] = True
# first line
for param, (param_name, param_type) in zip(line.split(), ccl_first_line):
s[param_name] = param_type(param)
scan[param_name] = param_type(param)
# second line
next_line = next(fileobj)
for param, (param_name, param_type) in zip(next_line.split(), ccl_second_line):
s[param_name] = param_type(param)
scan[param_name] = param_type(param)
if s["scan_motor"] != "om":
if "scan_motor" not in scan:
scan["scan_motor"] = "om"
if scan["scan_motor"] != "om":
raise Exception("Unsupported variable name in ccl file.")
# "om" -> "omega"
s["scan_motor"] = "omega"
s["scan_motors"] = ["omega", ]
scan["scan_motor"] = "omega"
scan["scan_motors"] = ["omega", ]
# overwrite metadata, because it only refers to the scan center
half_dist = (s["n_points"] - 1) / 2 * s["angle_step"]
s["omega"] = np.linspace(s["omega"] - half_dist, s["omega"] + half_dist, s["n_points"])
half_dist = (scan["n_points"] - 1) / 2 * scan["angle_step"]
scan["omega"] = np.linspace(
scan["omega"] - half_dist, scan["omega"] + half_dist, scan["n_points"]
)
# subsequent lines with counts
counts = []
while len(counts) < s["n_points"]:
while len(counts) < scan["n_points"]:
counts.extend(map(float, next(fileobj).split()))
s["counts"] = np.array(counts)
scan["counts"] = np.array(counts)
scan["counts_err"] = np.sqrt(np.maximum(scan["counts"], 1))
if s["h"].is_integer() and s["k"].is_integer() and s["l"].is_integer():
s["h"], s["k"], s["l"] = map(int, (s["h"], s["k"], s["l"]))
if scan["h"].is_integer() and scan["k"].is_integer() and scan["l"].is_integer():
scan["h"], scan["k"], scan["l"] = map(int, (scan["h"], scan["k"], scan["l"]))
scan.append({**metadata, **s})
dataset.append({**metadata, **scan})
elif data_type == ".dat":
# TODO: this might need to be adapted in the future, when "gamma" will be added to dat files
if metadata["zebra_mode"] == "nb":
metadata["gamma"] = metadata["twotheta"]
s = defaultdict(list)
scan = defaultdict(list)
scan["export"] = True
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
motors = [motor.lower() for motor in match.group(1).split(", ")]
@ -189,7 +206,8 @@ def parse_1D(fileobj, data_type):
match = re.search("(.*) Points, Mode: (.*), Preset (.*)", next(fileobj))
if match.group(2) != "Monitor":
raise Exception("Unknown mode in dat file.")
s["monitor"] = float(match.group(3))
scan["n_points"] = int(match.group(1))
scan["monitor"] = float(match.group(3))
col_names = list(map(str.lower, next(fileobj).split()))
@ -199,57 +217,56 @@ def parse_1D(fileobj, data_type):
break
for name, val in zip(col_names, line.split()):
s[name].append(float(val))
scan[name].append(float(val))
for name in col_names:
s[name] = np.array(s[name])
scan[name] = np.array(scan[name])
s["scan_motors"] = []
scan["counts_err"] = np.sqrt(np.maximum(scan["counts"], 1))
scan["scan_motors"] = []
for motor, step in zip(motors, steps):
if step == 0:
# it's not a scan motor, so keep only the median value
s[motor] = np.median(s[motor])
scan[motor] = np.median(scan[motor])
else:
s["scan_motors"].append(motor)
s["scan_motor"] = s["scan_motors"][0]
scan["scan_motors"].append(motor)
# "om" -> "omega"
if "om" in s["scan_motors"]:
s["scan_motors"][s["scan_motors"].index("om")] = "omega"
if s["scan_motor"] == "om":
s["scan_motor"] = "omega"
s["omega"] = s["om"]
del s["om"]
if "om" in scan["scan_motors"]:
scan["scan_motors"][scan["scan_motors"].index("om")] = "omega"
scan["omega"] = scan["om"]
del scan["om"]
# "tt" -> "temp"
elif "tt" in s["scan_motors"]:
s["scan_motors"][s["scan_motors"].index("tt")] = "temp"
if s["scan_motor"] == "tt":
s["scan_motor"] = "temp"
s["temp"] = s["tt"]
del s["tt"]
if "tt" in scan["scan_motors"]:
scan["scan_motors"][scan["scan_motors"].index("tt")] = "temp"
scan["temp"] = scan["tt"]
del scan["tt"]
# "mf" stays "mf"
# "phi" stays "phi"
if "h" not in s:
s["h"] = s["k"] = s["l"] = float("nan")
scan["scan_motor"] = scan["scan_motors"][0]
if "h" not in scan:
scan["h"] = scan["k"] = scan["l"] = float("nan")
for param in ("mf", "temp"):
if param not in metadata:
s[param] = 0
scan[param] = 0
s["idx"] = 1
scan["idx"] = 1
scan.append({**metadata, **s})
dataset.append({**metadata, **scan})
else:
print("Unknown file extention")
return scan
return dataset
def export_1D(data, path, export_target, hkl_precision=2):
def export_1D(dataset, path, export_target, hkl_precision=2):
"""Exports data in the .comm/.incomm format for fullprof or .col/.incol format for jana.
Scans with integer/real hkl values are saved in .comm/.incomm or .col/.incol files
@ -259,11 +276,11 @@ def export_1D(data, path, export_target, hkl_precision=2):
if export_target not in EXPORT_TARGETS:
raise ValueError(f"Unknown export target: {export_target}.")
zebra_mode = data[0]["zebra_mode"]
zebra_mode = dataset[0]["zebra_mode"]
exts = EXPORT_TARGETS[export_target]
file_content = {ext: [] for ext in exts}
for scan in data:
for scan in dataset:
if "fit" not in scan:
continue
@ -301,3 +318,92 @@ def export_1D(data, path, export_target, hkl_precision=2):
if content:
with open(path + ext, "w") as out_file:
out_file.writelines(content)
def export_ccl_compare(dataset1, dataset2, path, export_target, hkl_precision=2):
"""Exports compare data in the .comm/.incomm format for fullprof or .col/.incol format for jana.
Scans with integer/real hkl values are saved in .comm/.incomm or .col/.incol files
correspondingly. If no scans are present for a particular output format, that file won't be
created.
"""
if export_target not in EXPORT_TARGETS:
raise ValueError(f"Unknown export target: {export_target}.")
zebra_mode = dataset1[0]["zebra_mode"]
exts = EXPORT_TARGETS[export_target]
file_content = {ext: [] for ext in exts}
for scan1, scan2 in zip(dataset1, dataset2):
if "fit" not in scan1:
continue
idx_str = f"{scan1['idx']:6}"
h, k, l = scan1["h"], scan1["k"], scan1["l"]
hkl_are_integers = isinstance(h, int) # if True, other indices are of type 'int' too
if hkl_are_integers:
hkl_str = f"{h:4}{k:4}{l:4}"
else:
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
area_n1, area_s1 = scan1["area"]
area_n2, area_s2 = scan2["area"]
area_n = area_n1 - area_n2
area_s = np.sqrt(area_s1 ** 2 + area_s2 ** 2)
area_str = f"{area_n:10.2f}{area_s:10.2f}"
ang_str = ""
for angle, _ in CCL_ANGLES[zebra_mode]:
if angle == scan1["scan_motor"]:
angle_center = (np.min(scan1[angle]) + np.max(scan1[angle])) / 2
else:
angle_center = scan1[angle]
if angle == "twotheta" and export_target == "jana":
angle_center /= 2
ang_str = ang_str + f"{angle_center:8g}"
if export_target == "jana":
ang_str = ang_str + f"{scan1['temp']:8}" + f"{scan1['monitor']:8}"
ref = file_content[exts[0]] if hkl_are_integers else file_content[exts[1]]
ref.append(idx_str + hkl_str + area_str + ang_str + "\n")
for ext, content in file_content.items():
if content:
with open(path + ext, "w") as out_file:
out_file.writelines(content)
def export_param_study(dataset, param_data, path):
file_content = []
for scan, param in zip(dataset, param_data):
if "fit" not in scan:
continue
if not file_content:
title_str = f"{'param':12}"
for fit_param_name in scan["fit"].params:
title_str = title_str + f"{fit_param_name:20}" + f"{'std_' + fit_param_name:20}"
title_str = title_str + "file"
file_content.append(title_str + "\n")
param_str = f"{param:<12.2f}"
fit_str = ""
for fit_param in scan["fit"].params.values():
fit_param_val = fit_param.value
fit_param_std = fit_param.stderr
if fit_param_std is None:
fit_param_std = 0
fit_str = fit_str + f"{fit_param_val:<20.2f}" + f"{fit_param_std:<20.2f}"
_, fname_str = os.path.split(scan["original_filename"])
file_content.append(param_str + fit_str + fname_str + "\n")
if file_content:
with open(path, "w") as out_file:
out_file.writelines(file_content)

View File

@ -1,4 +1,3 @@
import itertools
import os
import numpy as np
@ -23,6 +22,8 @@ MAX_RANGE_GAP = {
"omega": 0.5,
}
MOTOR_POS_PRECISION = 0.01
AREA_METHODS = ("fit_area", "int_area")
@ -30,6 +31,7 @@ def normalize_dataset(dataset, monitor=100_000):
for scan in dataset:
monitor_ratio = monitor / scan["monitor"]
scan["counts"] *= monitor_ratio
scan["counts_err"] *= monitor_ratio
scan["monitor"] = monitor
@ -47,32 +49,49 @@ def _parameters_match(scan1, scan2):
if zebra_mode != scan2["zebra_mode"]:
return False
for param in ("ub", "temp", "mf", *(vars[0] for vars in CCL_ANGLES[zebra_mode])):
for param in ("ub", *(vars[0] for vars in CCL_ANGLES[zebra_mode])):
if param.startswith("skip"):
# ignore skip parameters, like the last angle in 'nb' zebra mode
continue
if param == scan1["scan_motor"] == scan2["scan_motor"]:
# check if ranges of variable parameter overlap
range1 = scan1[param]
range2 = scan2[param]
r1_start, r1_end = scan1[param][0], scan1[param][-1]
r2_start, r2_end = scan2[param][0], scan2[param][-1]
# support reversed ranges
if r1_start > r1_end:
r1_start, r1_end = r1_end, r1_start
if r2_start > r2_end:
r2_start, r2_end = r2_end, r2_start
# maximum gap between ranges of the scanning parameter (default 0)
max_range_gap = MAX_RANGE_GAP.get(param, 0)
if max(range1[0] - range2[-1], range2[0] - range1[-1]) > max_range_gap:
if max(r1_start - r2_end, r2_start - r1_end) > max_range_gap:
return False
elif np.max(np.abs(scan1[param] - scan2[param])) > PARAM_PRECISIONS[param]:
elif (
np.max(np.abs(np.median(scan1[param]) - np.median(scan2[param])))
> PARAM_PRECISIONS[param]
):
return False
return True
def merge_datasets(dataset_into, dataset_from):
scan_motors_into = dataset_into[0]["scan_motors"]
scan_motors_from = dataset_from[0]["scan_motors"]
if scan_motors_into != scan_motors_from:
print(f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}")
return
merged = np.zeros(len(dataset_from), dtype=np.bool)
for scan_into in dataset_into:
for ind, scan_from in enumerate(dataset_from):
if _parameters_match(scan_into, scan_from) and not merged[ind]:
merge_scans(scan_into, scan_from)
if scan_into["counts"].ndim == 3:
merge_h5_scans(scan_into, scan_from)
else: # scan_into["counts"].ndim == 1
merge_scans(scan_into, scan_from)
merged[ind] = True
for scan_from in dataset_from:
@ -80,20 +99,118 @@ def merge_datasets(dataset_into, dataset_from):
def merge_scans(scan_into, scan_from):
# TODO: does it need to be "scan_motor" instead of omega for a generalized solution?
if "init_omega" not in scan_into:
scan_into["init_omega"] = scan_into["omega"]
scan_into["init_counts"] = scan_into["counts"]
if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy()
omega = np.concatenate((scan_into["omega"], scan_from["omega"]))
counts = np.concatenate((scan_into["counts"], scan_from["counts"]))
if "merged_scans" not in scan_into:
scan_into["merged_scans"] = []
index = np.argsort(omega)
if scan_from in scan_into["merged_scans"]:
return
scan_into["omega"] = omega[index]
scan_into["counts"] = counts[index]
scan_into["merged_scans"].append(scan_from)
scan_from["active"] = False
scan_motor = scan_into["scan_motor"] # the same as scan_from["scan_motor"]
pos_all = np.array([])
val_all = np.array([])
err_all = np.array([])
for scan in [scan_into["init_scan"], *scan_into["merged_scans"]]:
pos_all = np.append(pos_all, scan[scan_motor])
val_all = np.append(val_all, scan["counts"])
err_all = np.append(err_all, scan["counts_err"] ** 2)
sort_index = np.argsort(pos_all)
pos_all = pos_all[sort_index]
val_all = val_all[sort_index]
err_all = err_all[sort_index]
pos_tmp = pos_all[:1]
val_tmp = val_all[:1]
err_tmp = err_all[:1]
num_tmp = np.array([1])
for pos, val, err in zip(pos_all[1:], val_all[1:], err_all[1:]):
if pos - pos_tmp[-1] < MOTOR_POS_PRECISION:
# the repeated motor position
val_tmp[-1] += val
err_tmp[-1] += err
num_tmp[-1] += 1
else:
# a new motor position
pos_tmp = np.append(pos_tmp, pos)
val_tmp = np.append(val_tmp, val)
err_tmp = np.append(err_tmp, err)
num_tmp = np.append(num_tmp, 1)
scan_into[scan_motor] = pos_tmp
scan_into["counts"] = val_tmp / num_tmp
scan_into["counts_err"] = np.sqrt(err_tmp) / num_tmp
scan_from["export"] = False
fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"])
print(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
def merge_h5_scans(scan_into, scan_from):
if "init_scan" not in scan_into:
scan_into["init_scan"] = scan_into.copy()
if "merged_scans" not in scan_into:
scan_into["merged_scans"] = []
for scan in scan_into["merged_scans"]:
if scan_from is scan:
print("Already merged scan")
return
scan_into["merged_scans"].append(scan_from)
scan_motor = scan_into["scan_motor"] # the same as scan_from["scan_motor"]
pos_all = [scan_into["init_scan"][scan_motor]]
val_all = [scan_into["init_scan"]["counts"]]
err_all = [scan_into["init_scan"]["counts_err"] ** 2]
for scan in scan_into["merged_scans"]:
pos_all.append(scan[scan_motor])
val_all.append(scan["counts"])
err_all.append(scan["counts_err"] ** 2)
pos_all = np.concatenate(pos_all)
val_all = np.concatenate(val_all)
err_all = np.concatenate(err_all)
sort_index = np.argsort(pos_all)
pos_all = pos_all[sort_index]
val_all = val_all[sort_index]
err_all = err_all[sort_index]
pos_tmp = [pos_all[0]]
val_tmp = [val_all[:1]]
err_tmp = [err_all[:1]]
num_tmp = [1]
for pos, val, err in zip(pos_all[1:], val_all[1:], err_all[1:]):
if pos - pos_tmp[-1] < MOTOR_POS_PRECISION:
# the repeated motor position
val_tmp[-1] += val
err_tmp[-1] += err
num_tmp[-1] += 1
else:
# a new motor position
pos_tmp.append(pos)
val_tmp.append(val[None, :])
err_tmp.append(err[None, :])
num_tmp.append(1)
pos_tmp = np.array(pos_tmp)
val_tmp = np.concatenate(val_tmp)
err_tmp = np.concatenate(err_tmp)
num_tmp = np.array(num_tmp)
scan_into[scan_motor] = pos_tmp
scan_into["counts"] = val_tmp / num_tmp[:, None, None]
scan_into["counts_err"] = np.sqrt(err_tmp) / num_tmp[:, None, None]
scan_from["export"] = False
fname1 = os.path.basename(scan_into["original_filename"])
fname2 = os.path.basename(scan_from["original_filename"])
@ -101,11 +218,18 @@ def merge_scans(scan_into, scan_from):
def restore_scan(scan):
if "init_omega" in scan:
scan["omega"] = scan["init_omega"]
scan["counts"] = scan["init_counts"]
del scan["init_omega"]
del scan["init_counts"]
if "merged_scans" in scan:
for merged_scan in scan["merged_scans"]:
merged_scan["export"] = True
if "init_scan" in scan:
tmp = scan["init_scan"]
scan.clear()
scan.update(tmp)
# force scan export to True, otherwise in the sequence of incorrectly merged scans
# a <- b <- c the scan b will be restored with scan["export"] = False if restoring executed
# in the same order, i.e. restore a -> restore b
scan["export"] = True
def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
@ -115,11 +239,17 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
fit_to = np.inf
y_fit = scan["counts"]
y_err = scan["counts_err"]
x_fit = scan[scan["scan_motor"]]
# apply fitting range
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
if not np.any(fit_ind):
print(f"No data in fit range for scan {scan['idx']}")
return
y_fit = y_fit[fit_ind]
y_err = y_err[fit_ind]
x_fit = x_fit[fit_ind]
model = None
@ -167,11 +297,13 @@ def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
else:
model += _model
weights = [1 / np.sqrt(val) if val != 0 else 1 for val in y_fit]
scan["fit"] = model.fit(y_fit, x=x_fit, weights=weights)
scan["fit"] = model.fit(y_fit, x=x_fit, weights=1 / y_err)
def get_area(scan, area_method, lorentz):
if "fit" not in scan:
return
if area_method not in AREA_METHODS:
raise ValueError(f"Unknown area method: {area_method}.")
@ -180,12 +312,8 @@ def get_area(scan, area_method, lorentz):
area_s = 0
for name, param in scan["fit"].params.items():
if "amplitude" in name:
if param.stderr is None:
area_v = np.nan
area_s = np.nan
else:
area_v += param.value
area_s += param.stderr
area_v += np.nan if param.value is None else param.value
area_s += np.nan if param.stderr is None else param.stderr
else: # area_method == "int_area"
y_val = scan["counts"]

View File

@ -1,10 +1,10 @@
import h5py
import numpy as np
from lmfit.models import Gaussian2dModel, GaussianModel
META_MATRIX = ("UB")
META_CELL = ("cell")
META_STR = ("name")
META_MATRIX = ("UB", )
META_CELL = ("cell", )
META_STR = ("name", )
def read_h5meta(filepath):
"""Open and parse content of a h5meta file.
@ -68,70 +68,127 @@ def read_detector_data(filepath, cami_meta=None):
ndarray: A 3D array of data, omega, gamma, nu.
"""
with h5py.File(filepath, "r") as h5f:
data = h5f["/entry1/area_detector2/data"][:]
counts = h5f["/entry1/area_detector2/data"][:].astype(np.float64)
# reshape data to a correct shape (2006 issue)
n, cols, rows = data.shape
data = data.reshape(n, rows, cols)
n, cols, rows = counts.shape
if "/entry1/experiment_identifier" in h5f: # old format
# reshape images (counts) to a correct shape (2006 issue)
counts = counts.reshape(n, rows, cols)
else:
counts = counts.swapaxes(1, 2)
det_data = {"data": data}
scan = {"counts": counts, "counts_err": np.sqrt(np.maximum(counts, 1))}
scan["original_filename"] = filepath
scan["export"] = True
if "/entry1/zebra_mode" in h5f:
det_data["zebra_mode"] = h5f["/entry1/zebra_mode"][0].decode()
scan["zebra_mode"] = h5f["/entry1/zebra_mode"][0].decode()
else:
det_data["zebra_mode"] = "nb"
scan["zebra_mode"] = "nb"
# overwrite zebra_mode from cami
if cami_meta is not None:
if "zebra_mode" in cami_meta:
det_data["zebra_mode"] = cami_meta["zebra_mode"][0]
scan["zebra_mode"] = cami_meta["zebra_mode"][0]
# om, sometimes ph
if det_data["zebra_mode"] == "nb":
det_data["omega"] = h5f["/entry1/area_detector2/rotation_angle"][:]
else: # bi
det_data["omega"] = h5f["/entry1/sample/rotation_angle"][:]
if "/entry1/control/Monitor" in h5f:
scan["monitor"] = h5f["/entry1/control/Monitor"][0]
else: # old path
scan["monitor"] = h5f["/entry1/control/data"][0]
det_data["gamma"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
det_data["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
det_data["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:]
det_data["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:]
det_data["chi"] = h5f["/entry1/sample/chi"][:] # ch
det_data["phi"] = h5f["/entry1/sample/phi"][:] # ph
det_data["ub"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
det_data["name"] = h5f["/entry1/sample/name"][0].decode()
det_data["cell"] = h5f["/entry1/sample/cell"][:]
scan["idx"] = 1
for var in ("omega", "gamma", "nu", "chi", "phi"):
if abs(det_data[var][0] - det_data[var][-1]) > 0.1:
det_data["scan_motor"] = var
break
if "/entry1/sample/rotation_angle" in h5f:
scan["omega"] = h5f["/entry1/sample/rotation_angle"][:]
else:
raise ValueError("No angles that vary")
scan["omega"] = h5f["/entry1/area_detector2/rotation_angle"][:]
if len(scan["omega"]) == 1:
scan["omega"] = np.ones(n) * scan["omega"]
scan["gamma"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:]
scan["twotheta"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:]
if len(scan["gamma"]) == 1:
scan["gamma"] = np.ones(n) * scan["gamma"]
scan["twotheta"] = np.ones(n) * scan["twotheta"]
scan["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:1]
scan["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:1]
scan["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:1]
scan["chi"] = h5f["/entry1/sample/chi"][:]
if len(scan["chi"]) == 1:
scan["chi"] = np.ones(n) * scan["chi"]
scan["phi"] = h5f["/entry1/sample/phi"][:]
if len(scan["phi"]) == 1:
scan["phi"] = np.ones(n) * scan["phi"]
scan["ub"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
scan["name"] = h5f["/entry1/sample/name"][0].decode()
scan["cell"] = h5f["/entry1/sample/cell"][:]
if n == 1:
# a default motor for a single frame file
scan["scan_motor"] = "omega"
else:
for var in ("omega", "gamma", "nu", "chi", "phi"):
if abs(scan[var][0] - scan[var][-1]) > 0.1:
scan["scan_motor"] = var
break
else:
raise ValueError("No angles that vary")
scan["scan_motors"] = [scan["scan_motor"], ]
# optional parameters
if "/entry1/sample/magnetic_field" in h5f:
det_data["mf"] = h5f["/entry1/sample/magnetic_field"][:]
scan["mf"] = h5f["/entry1/sample/magnetic_field"][:]
if "/entry1/sample/temperature" in h5f:
det_data["temp"] = h5f["/entry1/sample/temperature"][:]
scan["temp"] = h5f["/entry1/sample/temperature"][:]
elif "/entry1/sample/Ts/value" in h5f:
scan["temp"] = h5f["/entry1/sample/Ts/value"][:]
# overwrite metadata from .cami
if cami_meta is not None:
if "crystal" in cami_meta:
cami_meta_crystal = cami_meta["crystal"]
if "name" in cami_meta_crystal:
det_data["name"] = cami_meta_crystal["name"]
scan["name"] = cami_meta_crystal["name"]
if "UB" in cami_meta_crystal:
det_data["ub"] = cami_meta_crystal["UB"]
scan["ub"] = cami_meta_crystal["UB"]
if "cell" in cami_meta_crystal:
det_data["cell"] = cami_meta_crystal["cell"]
scan["cell"] = cami_meta_crystal["cell"]
if "lambda" in cami_meta_crystal:
det_data["wave"] = cami_meta_crystal["lambda"]
scan["wave"] = cami_meta_crystal["lambda"]
if "detector parameters" in cami_meta:
cami_meta_detparam = cami_meta["detector parameters"]
if "dist1" in cami_meta_detparam:
det_data["ddist"] = cami_meta_detparam["dist1"]
if "dist2" in cami_meta_detparam:
scan["ddist"] = cami_meta_detparam["dist2"]
return det_data
return scan
def fit_event(scan, fr_from, fr_to, y_from, y_to, x_from, x_to):
data_roi = scan["counts"][fr_from:fr_to, y_from:y_to, x_from:x_to]
model = GaussianModel()
fr = np.arange(fr_from, fr_to)
counts_per_fr = np.sum(data_roi, axis=(1, 2))
params = model.guess(counts_per_fr, fr)
result = model.fit(counts_per_fr, x=fr, params=params)
frC = result.params["center"].value
intensity = result.params["height"].value
counts_std = counts_per_fr.std()
counts_mean = counts_per_fr.mean()
snr = 0 if counts_std == 0 else counts_mean / counts_std
model = Gaussian2dModel()
xs, ys = np.meshgrid(np.arange(x_from, x_to), np.arange(y_from, y_to))
xs = xs.flatten()
ys = ys.flatten()
counts = np.sum(data_roi, axis=0).flatten()
params = model.guess(counts, xs, ys)
result = model.fit(counts, x=xs, y=ys, params=params)
xC = result.params["centerx"].value
yC = result.params["centery"].value
scan["fit"] = {"frame": frC, "x_pos": xC, "y_pos": yC, "intensity": intensity, "snr": snr}

483
pyzebra/sxtal_refgen.py Normal file
View File

@ -0,0 +1,483 @@
import io
import os
import subprocess
import tempfile
from math import ceil, floor
import numpy as np
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/Sxtal_Refgen"
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
BLFR z-up
DIST_UNITS mm
ANGL_UNITS deg
DET_TYPE Point ipsd 1
DIST_DET 488
DIM_XY 1.0 1.0 1 1
GAPS_DET 0 0
SETTING 1 0 0 0 1 0 0 0 1
NUM_ANG 4
ANG_LIMITS Min Max Offset
Gamma 0.0 128.0 0.00
Omega 0.0 64.0 0.00
Chi 80.0 211.0 0.00
Phi 0.0 360.0 0.00
DET_OFF 0 0 0
"""
_zebraNB_default_geom = """GEOM 3 Normal Beam
BLFR z-up
DIST_UNITS mm
ANGL_UNITS deg
DET_TYPE Point ipsd 1
DIST_DET 448
DIM_XY 1.0 1.0 1 1
GAPS_DET 0 0
SETTING 1 0 0 0 1 0 0 0 1
NUM_ANG 3
ANG_LIMITS Min Max Offset
Gamma 0.0 128.0 0.00
Omega -180.0 180.0 0.00
Nu -15.0 15.0 0.00
DET_OFF 0 0 0
"""
_zebra_default_cfl = """TITLE mymaterial
SPGR P 63 2 2
CELL 5.73 5.73 11.89 90 90 120
WAVE 1.383
UBMAT
0.000000 0.000000 0.084104
0.000000 0.174520 -0.000000
0.201518 0.100759 0.000000
INSTR zebra.geom
ORDER 1 2 3
ANGOR gamma
HLIM -25 25 -25 25 -25 25
SRANG 0.0 0.7
Mag_Structure
lattiCE P 1
kvect 0.0 0.0 0.0
magcent
symm x,y,z
msym u,v,w, 0.0
End_Mag_Structure
"""
def get_zebraBI_default_geom_file():
return io.StringIO(_zebraBI_default_geom)
def get_zebraNB_default_geom_file():
return io.StringIO(_zebraNB_default_geom)
def get_zebra_default_cfl_file():
return io.StringIO(_zebra_default_cfl)
def read_geom_file(fileobj):
ang_lims = dict()
for line in fileobj:
if "!" in line: # remove comments that start with ! sign
line, _ = line.split(sep="!", maxsplit=1)
if line.startswith("GEOM"):
_, val = line.split(maxsplit=1)
if val.startswith("2"):
ang_lims["geom"] = "bi"
else: # val.startswith("3")
ang_lims["geom"] = "nb"
elif line.startswith("ANG_LIMITS"):
# read angular limits
for line in fileobj:
if not line or line.isspace():
break
ang, ang_min, ang_max, ang_offset = line.split()
ang_lims[ang.lower()] = [ang_min, ang_max, ang_offset]
if "2theta" in ang_lims: # treat 2theta as gamma
ang_lims["gamma"] = ang_lims.pop("2theta")
return ang_lims
def export_geom_file(path, ang_lims, template=None):
if ang_lims["geom"] == "bi":
template_file = get_zebraBI_default_geom_file()
n_ang = 4
else: # ang_lims["geom"] == "nb"
template_file = get_zebraNB_default_geom_file()
n_ang = 3
if template is not None:
template_file = template
with open(path, "w") as out_file:
for line in template_file:
out_file.write(line)
if line.startswith("ANG_LIMITS"):
for _ in range(n_ang):
next_line = next(template_file)
ang, _, _, _ = next_line.split()
if ang == "2theta": # treat 2theta as gamma
ang = "Gamma"
vals = ang_lims[ang.lower()]
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
def calc_ub_matrix(params):
with tempfile.TemporaryDirectory() as temp_dir:
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
with open(cfl_file, "w") as fileobj:
for key, value in params.items():
fileobj.write(f"{key} {value}\n")
comp_proc = subprocess.run(
[SXTAL_REFGEN_PATH, cfl_file],
cwd=temp_dir,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
print(" ".join(comp_proc.args))
print(comp_proc.stdout)
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
ub_matrix = []
with open(sfa_file, "r") as fileobj:
for line in fileobj:
if "BL_M" in line: # next 3 lines contain the matrix
for _ in range(3):
next_line = next(fileobj)
*vals, _ = next_line.split(maxsplit=3)
ub_matrix.extend(vals)
return ub_matrix
def read_cfl_file(fileobj):
params = {
"SPGR": None,
"CELL": None,
"WAVE": None,
"UBMAT": None,
"HLIM": None,
"SRANG": None,
"lattiCE": None,
"kvect": None,
}
param_names = tuple(params)
for line in fileobj:
line = line.strip()
if "!" in line: # remove comments that start with ! sign
line, _ = line.split(sep="!", maxsplit=1)
if line.startswith(param_names):
if line.startswith("UBMAT"): # next 3 lines contain the matrix
param, val = "UBMAT", []
for _ in range(3):
next_line = next(fileobj).strip()
val.extend(next_line.split(maxsplit=2))
else:
param, val = line.split(maxsplit=1)
params[param] = val
return params
def read_cif_file(fileobj):
params = {"SPGR": None, "CELL": None, "ATOM": []}
cell_params = {
"_cell_length_a": None,
"_cell_length_b": None,
"_cell_length_c": None,
"_cell_angle_alpha": None,
"_cell_angle_beta": None,
"_cell_angle_gamma": None,
}
cell_param_names = tuple(cell_params)
atom_param_pos = {
"_atom_site_label": 0,
"_atom_site_type_symbol": None,
"_atom_site_fract_x": None,
"_atom_site_fract_y": None,
"_atom_site_fract_z": None,
"_atom_site_U_iso_or_equiv": None,
"_atom_site_occupancy": None,
}
atom_param_names = tuple(atom_param_pos)
for line in fileobj:
line = line.strip()
if line.startswith("_space_group_name_H-M_alt"):
_, val = line.split(maxsplit=1)
params["SPGR"] = val.strip("'")
elif line.startswith(cell_param_names):
param, val = line.split(maxsplit=1)
cell_params[param] = val
elif line.startswith("_atom_site_label"): # assume this is the start of atom data
for ind, line in enumerate(fileobj, start=1):
line = line.strip()
# read fields
if line.startswith("_atom_site"):
if line.startswith(atom_param_names):
atom_param_pos[line] = ind
continue
# read data till an empty line
if not line:
break
vals = line.split()
params["ATOM"].append(" ".join([vals[ind] for ind in atom_param_pos.values()]))
if None not in cell_params.values():
params["CELL"] = " ".join(cell_params.values())
return params
def export_cfl_file(path, params, template=None):
param_names = tuple(params)
if template is None:
template_file = get_zebra_default_cfl_file()
else:
template_file = template
atom_done = False
with open(path, "w") as out_file:
for line in template_file:
if line.startswith(param_names):
if line.startswith("UBMAT"): # only UBMAT values are not on the same line
out_file.write(line)
for i in range(3):
next(template_file)
out_file.write(" ".join(params["UBMAT"][3 * i : 3 * (i + 1)]) + "\n")
elif line.startswith("ATOM"):
if "ATOM" in params:
# replace all ATOM with values in params
while line.startswith("ATOM"):
line = next(template_file)
for atom_line in params["ATOM"]:
out_file.write(f"ATOM {atom_line}\n")
atom_done = True
else:
param, _ = line.split(maxsplit=1)
out_file.write(f"{param} {params[param]}\n")
elif line.startswith("INSTR"):
# replace it with a default name
out_file.write("INSTR zebra.geom\n")
else:
out_file.write(line)
# append ATOM data if it's present and a template did not contain it
if "ATOM" in params and not atom_done:
out_file.write("\n")
for atom_line in params["ATOM"]:
out_file.write(f"ATOM {atom_line}\n")
def sort_hkl_file_bi(file_in, file_out, priority, chunks):
with open(file_in) as fileobj:
file_in_data = fileobj.readlines()
data = np.genfromtxt(file_in, skip_header=3)
stt = data[:, 4]
omega = data[:, 5]
chi = data[:, 6]
phi = data[:, 7]
lines = file_in_data[3:]
lines_update = []
angles = {"2theta": stt, "omega": omega, "chi": chi, "phi": phi}
# Reverse flag
to_reverse = False
to_reverse_p2 = False
to_reverse_p3 = False
# Get indices within first priority
ang_p1 = angles[priority[0]]
begin_p1 = floor(min(ang_p1))
end_p1 = ceil(max(ang_p1))
delta_p1 = chunks[0]
for p1 in range(begin_p1, end_p1, delta_p1):
ind_p1 = [j for j, x in enumerate(ang_p1) if p1 <= x and x < p1 + delta_p1]
stt_new = [stt[x] for x in ind_p1]
omega_new = [omega[x] for x in ind_p1]
chi_new = [chi[x] for x in ind_p1]
phi_new = [phi[x] for x in ind_p1]
lines_new = [lines[x] for x in ind_p1]
angles_p2 = {"stt": stt_new, "omega": omega_new, "chi": chi_new, "phi": phi_new}
# Get indices for second priority
ang_p2 = angles_p2[priority[1]]
if len(ang_p2) > 0 and to_reverse_p2:
begin_p2 = ceil(max(ang_p2))
end_p2 = floor(min(ang_p2))
delta_p2 = -chunks[1]
elif len(ang_p2) > 0 and not to_reverse_p2:
end_p2 = ceil(max(ang_p2))
begin_p2 = floor(min(ang_p2))
delta_p2 = chunks[1]
else:
end_p2 = 0
begin_p2 = 0
delta_p2 = 1
to_reverse_p2 = not to_reverse_p2
for p2 in range(begin_p2, end_p2, delta_p2):
min_p2 = min([p2, p2 + delta_p2])
max_p2 = max([p2, p2 + delta_p2])
ind_p2 = [j for j, x in enumerate(ang_p2) if min_p2 <= x and x < max_p2]
stt_new2 = [stt_new[x] for x in ind_p2]
omega_new2 = [omega_new[x] for x in ind_p2]
chi_new2 = [chi_new[x] for x in ind_p2]
phi_new2 = [phi_new[x] for x in ind_p2]
lines_new2 = [lines_new[x] for x in ind_p2]
angles_p3 = {"stt": stt_new2, "omega": omega_new2, "chi": chi_new2, "phi": phi_new2}
# Get indices for third priority
ang_p3 = angles_p3[priority[2]]
if len(ang_p3) > 0 and to_reverse_p3:
begin_p3 = ceil(max(ang_p3)) + chunks[2]
end_p3 = floor(min(ang_p3)) - chunks[2]
delta_p3 = -chunks[2]
elif len(ang_p3) > 0 and not to_reverse_p3:
end_p3 = ceil(max(ang_p3)) + chunks[2]
begin_p3 = floor(min(ang_p3)) - chunks[2]
delta_p3 = chunks[2]
else:
end_p3 = 0
begin_p3 = 0
delta_p3 = 1
to_reverse_p3 = not to_reverse_p3
for p3 in range(begin_p3, end_p3, delta_p3):
min_p3 = min([p3, p3 + delta_p3])
max_p3 = max([p3, p3 + delta_p3])
ind_p3 = [j for j, x in enumerate(ang_p3) if min_p3 <= x and x < max_p3]
angle_new3 = [angles_p3[priority[3]][x] for x in ind_p3]
ind_final = [x for _, x in sorted(zip(angle_new3, ind_p3), reverse=to_reverse)]
to_reverse = not to_reverse
for i in ind_final:
lines_update.append(lines_new2[i])
with open(file_out, "w") as fileobj:
for _ in range(3):
fileobj.write(file_in_data.pop(0))
fileobj.writelines(lines_update)
def sort_hkl_file_nb(file_in, file_out, priority, chunks):
with open(file_in) as fileobj:
file_in_data = fileobj.readlines()
data = np.genfromtxt(file_in, skip_header=3)
gamma = data[:, 4]
omega = data[:, 5]
nu = data[:, 6]
lines = file_in_data[3:]
lines_update = []
angles = {"gamma": gamma, "omega": omega, "nu": nu}
to_reverse = False
to_reverse_p2 = False
# Get indices within first priority
ang_p1 = angles[priority[0]]
begin_p1 = floor(min(ang_p1))
end_p1 = ceil(max(ang_p1))
delta_p1 = chunks[0]
for p1 in range(begin_p1, end_p1, delta_p1):
ind_p1 = [j for j, x in enumerate(ang_p1) if p1 <= x and x < p1 + delta_p1]
# Get angles from within nu range
lines_new = [lines[x] for x in ind_p1]
gamma_new = [gamma[x] for x in ind_p1]
omega_new = [omega[x] for x in ind_p1]
nu_new = [nu[x] for x in ind_p1]
angles_p2 = {"gamma": gamma_new, "omega": omega_new, "nu": nu_new}
# Get indices for second priority
ang_p2 = angles_p2[priority[1]]
if len(gamma_new) > 0 and to_reverse_p2:
begin_p2 = ceil(max(ang_p2))
end_p2 = floor(min(ang_p2))
delta_p2 = -chunks[1]
elif len(gamma_new) > 0 and not to_reverse_p2:
end_p2 = ceil(max(ang_p2))
begin_p2 = floor(min(ang_p2))
delta_p2 = chunks[1]
else:
end_p2 = 0
begin_p2 = 0
delta_p2 = 1
to_reverse_p2 = not to_reverse_p2
for p2 in range(begin_p2, end_p2, delta_p2):
min_p2 = min([p2, p2 + delta_p2])
max_p2 = max([p2, p2 + delta_p2])
ind_p2 = [j for j, x in enumerate(ang_p2) if min_p2 <= x and x < max_p2]
angle_new2 = [angles_p2[priority[2]][x] for x in ind_p2]
ind_final = [x for _, x in sorted(zip(angle_new2, ind_p2), reverse=to_reverse)]
to_reverse = not to_reverse
for i in ind_final:
lines_update.append(lines_new[i])
with open(file_out, "w") as fileobj:
for _ in range(3):
fileobj.write(file_in_data.pop(0))
fileobj.writelines(lines_update)

17
pyzebra/utils.py Normal file
View File

@ -0,0 +1,17 @@
import os
SINQ_PATH = "/afs/psi.ch/project/sinqdata"
ZEBRA_PROPOSALS_PATH = os.path.join(SINQ_PATH, "{year}/zebra/{proposal}")
def find_proposal_path(proposal):
for entry in os.scandir(SINQ_PATH):
if entry.is_dir() and len(entry.name) == 4 and entry.name.isdigit():
proposal_path = ZEBRA_PROPOSALS_PATH.format(year=entry.name, proposal=proposal)
if os.path.isdir(proposal_path):
# found it
break
else:
raise ValueError(f"Can not find data for proposal '{proposal}'.")
return proposal_path

View File

@ -372,6 +372,27 @@ def ang2hkl(wave, ddist, gammad, om, ch, ph, nud, ub, x, y):
return hkl
def ang2hkl_1d(wave, ddist, ga, om, ch, ph, nu, ub):
"""Calculate hkl-indices of a reflection from its position (angles) at the 1d-detector
"""
z1 = z1frmd(wave, ga, om, ch, ph, nu)
ubinv = np.linalg.inv(ub)
hkl = ubinv @ z1
return hkl
def ang_proc(wave, ddist, gammad, om, ch, ph, nud, x, y):
"""Utility function to calculate ch, ph, ga, om
"""
ga, nu = det2pol(ddist, gammad, nud, x, y)
z1 = z1frmd(wave, ga, om, ch, ph, nu)
ch2, ph2 = eqchph(z1)
ch, ph, ga, om = fixdnu(wave, z1, ch2, ph2, nu)
return ch, ph, ga, om
def gauss(x, *p):
"""Defines Gaussian function