Store metadata in each scan

This commit is contained in:
usov_i 2021-02-08 15:52:54 +01:00
parent 20527e8d2b
commit b31c0b413c
5 changed files with 110 additions and 174 deletions

View File

@ -87,8 +87,8 @@ def create():
proposal_textinput.on_change("value", proposal_textinput_callback) proposal_textinput.on_change("value", proposal_textinput_callback)
def _init_datatable(): def _init_datatable():
scan_list = [s["idx"] for s in det_data["scan"]] scan_list = [s["idx"] for s in det_data]
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in det_data["scan"]] hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in det_data]
scan_table_source.data.update( scan_table_source.data.update(
scan=scan_list, scan=scan_list,
hkl=hkl, hkl=hkl,
@ -159,8 +159,8 @@ def create():
append_upload_button.on_change("value", append_upload_button_callback) append_upload_button.on_change("value", append_upload_button_callback)
def _update_table(): def _update_table():
num_of_peaks = [len(scan.get("peak_indexes", [])) for scan in det_data["scan"]] num_of_peaks = [len(scan.get("peak_indexes", [])) for scan in det_data]
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"]] fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok) scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
def _update_plot(scan): def _update_plot(scan):
@ -284,7 +284,7 @@ def create():
# skip unnecessary update caused by selection drop # skip unnecessary update caused by selection drop
return return
_update_plot(det_data["scan"][scan_table_source.data["scan"][new[0]]]) _update_plot(det_data[scan_table_source.data["scan"][new[0]]])
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], peaks=[], fit=[], export=[])) scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], peaks=[], fit=[], export=[]))
scan_table = DataTable( scan_table = DataTable(
@ -305,7 +305,7 @@ def create():
def _get_selected_scan(): def _get_selected_scan():
selected_index = scan_table_source.selected.indices[0] selected_index = scan_table_source.selected.indices[0]
selected_scan_id = scan_table_source.data["scan"][selected_index] selected_scan_id = scan_table_source.data["scan"][selected_index]
return det_data["scan"][selected_scan_id] return det_data[selected_scan_id]
def peak_pos_textinput_callback(_attr, _old, new): def peak_pos_textinput_callback(_attr, _old, new):
if new is not None and not peak_pos_textinput_lock: if new is not None and not peak_pos_textinput_lock:
@ -446,7 +446,7 @@ def create():
def peakfind_all_button_callback(): def peakfind_all_button_callback():
peakfind_params = _get_peakfind_params() peakfind_params = _get_peakfind_params()
for scan in det_data["scan"]: for scan in det_data:
pyzebra.ccl_findpeaks(scan, **peakfind_params) pyzebra.ccl_findpeaks(scan, **peakfind_params)
_update_table() _update_table()
@ -478,7 +478,7 @@ def create():
def fit_all_button_callback(): def fit_all_button_callback():
fit_params = _get_fit_params() fit_params = _get_fit_params()
for scan in det_data["scan"]: for scan in det_data:
# fit_params are updated inplace within `fitccl` # fit_params are updated inplace within `fitccl`
pyzebra.fitccl(scan, **deepcopy(fit_params)) pyzebra.fitccl(scan, **deepcopy(fit_params))
@ -514,8 +514,8 @@ def create():
export_data = deepcopy(det_data) export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]): for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export: if not export:
if "fit" in export_data["scan"][s]: if "fit" in export_data[s]:
del export_data["scan"][s]["fit"] del export_data[s]["fit"]
pyzebra.export_1D( pyzebra.export_1D(
export_data, export_data,
@ -547,7 +547,7 @@ def create():
export_data = deepcopy(det_data) export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]): for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export: if not export:
del export_data["scan"][s] del export_data[s]
pyzebra.export_1D( pyzebra.export_1D(
export_data, export_data,

View File

@ -96,16 +96,10 @@ def create():
proposal_textinput.on_change("value", proposal_textinput_callback) proposal_textinput.on_change("value", proposal_textinput_callback)
def _init_datatable(): def _init_datatable():
scan_list = [s["idx"] for s in det_data["scan"]] scan_list = [s["idx"] for s in det_data]
file_list = [] file_list = []
extra_meta = det_data.get("extra_meta", {})
for scan_id in scan_list: for scan_id in scan_list:
if scan_id in extra_meta: _, f_name = os.path.split(det_data[scan_id]["meta"]["original_filename"])
f_path = extra_meta[scan_id]["original_filename"]
else:
f_path = det_data["meta"]["original_filename"]
_, f_name = os.path.split(f_path)
file_list.append(f_name) file_list.append(f_name)
scan_table_source.data.update( scan_table_source.data.update(
@ -184,8 +178,8 @@ def create():
append_upload_button.on_change("value", append_upload_button_callback) append_upload_button.on_change("value", append_upload_button_callback)
def _update_table(): def _update_table():
num_of_peaks = [len(scan.get("peak_indexes", [])) for scan in det_data["scan"]] num_of_peaks = [len(scan.get("peak_indexes", [])) for scan in det_data]
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"]] fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok) scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
def _update_plot(): def _update_plot():
@ -271,12 +265,12 @@ def create():
for ind, p in enumerate(scan_table_source.data["param"]): for ind, p in enumerate(scan_table_source.data["param"]):
if p: if p:
s = scan_table_source.data["scan"][ind] s = scan_table_source.data["scan"][ind]
xs.append(np.array(det_data["scan"][s]["om"])) xs.append(np.array(det_data[s]["om"]))
x.extend(det_data["scan"][s]["om"]) x.extend(det_data[s]["om"])
ys.append(np.array(det_data["scan"][s]["Counts"])) ys.append(np.array(det_data[s]["Counts"]))
y.extend([float(p)] * len(det_data["scan"][s]["om"])) y.extend([float(p)] * len(det_data[s]["om"]))
param.append(float(p)) param.append(float(p))
par.extend(det_data["scan"][s]["Counts"]) par.extend(det_data[s]["Counts"])
ov_plot_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs))) ov_plot_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
ov_param_plot_scatter_source.data.update(x=x, y=y, param=par) ov_param_plot_scatter_source.data.update(x=x, y=y, param=par)
@ -412,7 +406,7 @@ def create():
def _get_selected_scan(): def _get_selected_scan():
selected_index = scan_table_source.selected.indices[0] selected_index = scan_table_source.selected.indices[0]
selected_scan_id = scan_table_source.data["scan"][selected_index] selected_scan_id = scan_table_source.data["scan"][selected_index]
return det_data["scan"][selected_scan_id] return det_data[selected_scan_id]
def peak_pos_textinput_callback(_attr, _old, new): def peak_pos_textinput_callback(_attr, _old, new):
if new is not None and not peak_pos_textinput_lock: if new is not None and not peak_pos_textinput_lock:
@ -553,7 +547,7 @@ def create():
def peakfind_all_button_callback(): def peakfind_all_button_callback():
peakfind_params = _get_peakfind_params() peakfind_params = _get_peakfind_params()
for scan in det_data["scan"]: for scan in det_data:
pyzebra.ccl_findpeaks(scan, **peakfind_params) pyzebra.ccl_findpeaks(scan, **peakfind_params)
_update_table() _update_table()
@ -585,7 +579,7 @@ def create():
def fit_all_button_callback(): def fit_all_button_callback():
fit_params = _get_fit_params() fit_params = _get_fit_params()
for scan in det_data["scan"]: for scan in det_data:
# fit_params are updated inplace within `fitccl` # fit_params are updated inplace within `fitccl`
pyzebra.fitccl(scan, **deepcopy(fit_params)) pyzebra.fitccl(scan, **deepcopy(fit_params))
@ -621,7 +615,7 @@ def create():
export_data = deepcopy(det_data) export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]): for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export: if not export:
del export_data["scan"][s] del export_data[s]
pyzebra.export_1D( pyzebra.export_1D(
export_data, export_data,
@ -648,8 +642,8 @@ def create():
export_data = deepcopy(det_data) export_data = deepcopy(det_data)
for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]): for s, export in zip(scan_table_source.data["scan"], scan_table_source.data["export"]):
if not export: if not export:
if "fit" in export_data["scan"][s]: if "fit" in export_data[s]:
del export_data["scan"][s]["fit"] del export_data[s]["fit"]
pyzebra.export_1D( pyzebra.export_1D(
export_data, export_data,

View File

@ -98,8 +98,9 @@ def load_1D(filepath):
def parse_1D(fileobj, data_type): def parse_1D(fileobj, data_type):
metadata = {"data_type": data_type}
# read metadata # read metadata
metadata = {}
for line in fileobj: for line in fileobj:
if "=" in line: if "=" in line:
variable, value = line.split("=") variable, value = line.split("=")
@ -154,6 +155,9 @@ def parse_1D(fileobj, data_type):
counts.extend(map(int, next(fileobj).split())) counts.extend(map(int, next(fileobj).split()))
s["Counts"] = counts s["Counts"] = counts
# add metadata to each scan
s["meta"] = metadata
scan.append(s) scan.append(s)
elif data_type == ".dat": elif data_type == ".dat":
@ -179,21 +183,17 @@ def parse_1D(fileobj, data_type):
s["om"] = np.array(s["om"]) s["om"] = np.array(s["om"])
s["temp"] = metadata["temp"] if "mf" not in metadata:
try:
s["mf"] = metadata["mf"]
except KeyError:
print("Magnetic field is not present in dat file") print("Magnetic field is not present in dat file")
s["omega"] = metadata["omega"]
s["n_points"] = len(s["om"]) s["n_points"] = len(s["om"])
s["monitor"] = s["Monitor1"][0] s["monitor"] = s["Monitor1"][0]
s["twotheta"] = metadata["twotheta"]
s["chi"] = metadata["chi"]
s["phi"] = metadata["phi"]
s["nu"] = metadata["nu"]
s["idx"] = 1 s["idx"] = 1
# add metadata to the scan
s["meta"] = metadata
scan.append(dict(s)) scan.append(dict(s))
else: else:
@ -206,9 +206,7 @@ def parse_1D(fileobj, data_type):
else: else:
s["indices"] = "real" s["indices"] = "real"
metadata["data_type"] = data_type return scan
return {"meta": metadata, "scan": scan}
def export_1D(data, path, area_method=AREA_METHODS[0], lorentz=False, hkl_precision=2): def export_1D(data, path, area_method=AREA_METHODS[0], lorentz=False, hkl_precision=2):
@ -217,10 +215,10 @@ def export_1D(data, path, area_method=AREA_METHODS[0], lorentz=False, hkl_precis
Scans with integer/real hkl values are saved in .comm/.incomm files correspondingly. If no scans Scans with integer/real hkl values are saved in .comm/.incomm files correspondingly. If no scans
are present for a particular output format, that file won't be created. are present for a particular output format, that file won't be created.
""" """
zebra_mode = data["meta"]["zebra_mode"] zebra_mode = data[0]["meta"]["zebra_mode"]
file_content = {".comm": [], ".incomm": []} file_content = {".comm": [], ".incomm": []}
for scan in data["scan"]: for scan in data:
if "fit" not in scan: if "fit" not in scan:
continue continue

View File

@ -13,14 +13,14 @@ def create_tuples(x, y, y_err):
def normalize_all(dictionary, monitor=100000): def normalize_all(dictionary, monitor=100000):
for scan in dictionary["scan"]: for scan in dictionary:
counts = np.array(scan["Counts"]) counts = np.array(scan["Counts"])
sigma = np.sqrt(counts) if "sigma" not in scan else scan["sigma"] sigma = np.sqrt(counts) if "sigma" not in scan else scan["sigma"]
monitor_ratio = monitor / scan["monitor"] monitor_ratio = monitor / scan["monitor"]
scan["Counts"] = counts * monitor_ratio scan["Counts"] = counts * monitor_ratio
scan["sigma"] = np.array(sigma) * monitor_ratio scan["sigma"] = np.array(sigma) * monitor_ratio
scan["monitor"] = monitor scan["monitor"] = monitor
print("Normalized %d scans to monitor %d" % (len(dictionary["scan"]), monitor)) print("Normalized %d scans to monitor %d" % (len(dictionary), monitor))
def merge(scan1, scan2): def merge(scan1, scan2):
@ -77,11 +77,11 @@ def merge(scan1, scan2):
def check_UB(dict1, dict2, precision=0.01): def check_UB(dict1, dict2, precision=0.01):
return np.max(np.abs(dict1["meta"]["ub"] - dict2["meta"]["ub"])) < precision return np.max(np.abs(dict1[0]["meta"]["ub"] - dict2[0]["meta"]["ub"])) < precision
def check_zebramode(dict1, dict2): def check_zebramode(dict1, dict2):
if dict1["meta"]["zebra_mode"] == dict2["meta"]["zebra_mode"]: if dict1[0]["meta"]["zebra_mode"] == dict2[0]["meta"]["zebra_mode"]:
return True return True
else: else:
return False return False
@ -128,12 +128,12 @@ def check_temp_mag(scan1, scan2):
def merge_dups(dictionary): def merge_dups(dictionary):
if dictionary["meta"]["data_type"] == "dat": if dictionary[0]["meta"]["data_type"] == "dat":
return return
if dictionary["meta"]["zebra_mode"] == "bi": if dictionary[0]["meta"]["zebra_mode"] == "bi":
angles = ["twotheta", "omega", "chi", "phi"] angles = ["twotheta", "omega", "chi", "phi"]
elif dictionary["meta"]["zebra_mode"] == "nb": elif dictionary[0]["meta"]["zebra_mode"] == "nb":
angles = ["gamma", "omega", "nu"] angles = ["gamma", "omega", "nu"]
precision = { precision = {
@ -145,19 +145,19 @@ def merge_dups(dictionary):
"gamma": 0.05, "gamma": 0.05,
} }
for i in range(len(dictionary["scan"])): for i in range(len(dictionary)):
for j in range(len(dictionary["scan"])): for j in range(len(dictionary)):
if i == j: if i == j:
continue continue
else: else:
# print(i, j) # print(i, j)
if check_angles( if check_angles(dictionary[i], dictionary[j], angles, precision) and check_temp_mag(
dictionary["scan"][i], dictionary["scan"][j], angles, precision dictionary[i], dictionary[j]
) and check_temp_mag(dictionary["scan"][i], dictionary["scan"][j]): ):
merge(dictionary["scan"][i], dictionary["scan"][j]) merge(dictionary[i], dictionary[j])
print("merged %d with %d within the dictionary" % (i, j)) print("merged %d with %d within the dictionary" % (i, j))
del dictionary["scan"][j] del dictionary[j]
merge_dups(dictionary) merge_dups(dictionary)
break break
else: else:
@ -166,29 +166,24 @@ def merge_dups(dictionary):
def add_scan(dict1, dict2, scan_to_add): def add_scan(dict1, dict2, scan_to_add):
max_scan = len(dict1["scan"]) dict1.append(dict2[scan_to_add])
dict1["scan"].append(dict2["scan"][scan_to_add]) del dict2[scan_to_add]
if dict1.get("extra_meta") is None:
dict1["extra_meta"] = {}
dict1["extra_meta"][max_scan + 1] = dict2["meta"]
del dict2["scan"][scan_to_add]
def process(dict1, dict2, angles, precision): def process(dict1, dict2, angles, precision):
# stop when the second dict is empty # stop when the second dict is empty
# print(dict2["scan"]) if dict2:
if dict2["scan"]:
# check UB matrixes # check UB matrixes
if check_UB(dict1, dict2): if check_UB(dict1, dict2):
# iterate over second dict and check for matches # iterate over second dict and check for matches
for i in range(len(dict2["scan"])): for i in range(len(dict2)):
for j in range(len(dict1["scan"])): for j in range(len(dict1)):
if check_angles(dict1["scan"][j], dict2["scan"][i], angles, precision): if check_angles(dict1[j], dict2[i], angles, precision):
# angles good, see the mag and temp # angles good, see the mag and temp
if check_temp_mag(dict1["scan"][j], dict2["scan"][i]): if check_temp_mag(dict1[j], dict2[i]):
merge(dict1["scan"][j], dict2["scan"][i]) merge(dict1[j], dict2[i])
print("merged %d with %d from different dictionaries" % (i, j)) print("merged %d with %d from different dictionaries" % (i, j))
del dict2["scan"][i] del dict2[i]
process(dict1, dict2, angles, precision) process(dict1, dict2, angles, precision)
break break
else: else:
@ -225,9 +220,9 @@ def unified_merge(dict1, dict2):
return return
# decide angles # decide angles
if dict1["meta"]["zebra_mode"] == "bi": if dict1[0]["meta"]["zebra_mode"] == "bi":
angles = ["twotheta", "omega", "chi", "phi"] angles = ["twotheta", "omega", "chi", "phi"]
elif dict1["meta"]["zebra_mode"] == "nb": elif dict1[0]["meta"]["zebra_mode"] == "nb":
angles = ["gamma", "omega", "nu"] angles = ["gamma", "omega", "nu"]
# precision of angles to check # precision of angles to check
@ -239,7 +234,7 @@ def unified_merge(dict1, dict2):
"omega": 5, "omega": 5,
"gamma": 0.1, "gamma": 0.1,
} }
if (dict1["meta"]["data_type"] == "ccl") and (dict2["meta"]["data_type"] == "ccl"): if (dict1[0]["meta"]["data_type"] == "ccl") and (dict2[0]["meta"]["data_type"] == "ccl"):
precision["omega"] = 0.05 precision["omega"] = 0.05
process(dict1, dict2, angles, precision) process(dict1, dict2, angles, precision)
@ -254,33 +249,20 @@ def add_dict(dict1, dict2):
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
dat file""" dat file"""
try: try:
if dict1["meta"]["zebra_mode"] != dict2["meta"]["zebra_mode"]: if dict1[0]["meta"]["zebra_mode"] != dict2[0]["meta"]["zebra_mode"]:
print("You are trying to add scans measured with different zebra modes") print("You are trying to add scans measured with different zebra modes")
return return
# this is for the qscan case # this is for the qscan case
except KeyError: except KeyError:
print("Zebra mode not specified") print("Zebra mode not specified")
max_measurement_dict1 = len(dict1["scan"])
new_filenames = np.arange(
max_measurement_dict1 + 1, max_measurement_dict1 + 1 + len(dict2["scan"])
)
if dict1.get("extra_meta") is None: for s in dict2:
dict1["extra_meta"] = {} if s not in dict1:
dict1.append(s)
new_meta_name = "meta" + str(dict2["meta"]["original_filename"]) else:
if new_meta_name not in dict1: print(
for keys, name in zip(range(len(dict2["scan"])), new_filenames):
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
dict1["scan"].append(dict2["scan"][keys])
dict1["extra_meta"][name] = dict2["meta"]
dict1[new_meta_name] = dict2["meta"]
else:
raise KeyError(
str(
"The file %s has alredy been added to %s" "The file %s has alredy been added to %s"
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"]) % (dict2[0]["meta"]["original_filename"], dict1[0]["meta"]["original_filename"])
) )
)
return dict1 return dict1

View File

@ -10,6 +10,7 @@ from mpl_toolkits.mplot3d import Axes3D # dont delete, otherwise waterfall wont
import collections import collections
from .ccl_io import load_1D from .ccl_io import load_1D
from .merge_function import add_dict
def create_tuples(x, y, y_err): def create_tuples(x, y, y_err):
@ -52,10 +53,10 @@ def load_dats(filepath):
else: else:
dict1 = add_dict(dict1, load_1D(file_list[i])) dict1 = add_dict(dict1, load_1D(file_list[i]))
dict1["scan"].append({}) dict1.append({})
if data_type == "txt": if data_type == "txt":
for x in range(len(col_names) - 1): for x in range(len(col_names) - 1):
dict1["scan"][i + 1]["params"][col_names[x + 1]] = float(file_list[i][x + 1]) dict1[i + 1]["params"][col_names[x + 1]] = float(file_list[i][x + 1])
return dict1 return dict1
@ -77,18 +78,15 @@ def create_dataframe(dict1, variables):
print(keys) print(keys)
# populate the dict # populate the dict
for keys in range(len(dict1["scan"])): for keys in range(len(dict1)):
if "file_of_origin" in dict1["scan"][keys]: pull_dict["filenames"].append(dict1[0]["meta"]["original_filename"].split("/")[-1])
pull_dict["filenames"].append(dict1["scan"][keys]["file_of_origin"].split("/")[-1])
else:
pull_dict["filenames"].append(dict1["meta"]["original_filename"].split("/")[-1])
pull_dict["fit_area"].append(dict1["scan"][keys]["fit"]["fit_area"]) pull_dict["fit_area"].append(dict1[keys]["fit"]["fit_area"])
pull_dict["int_area"].append(dict1["scan"][keys]["fit"]["int_area"]) pull_dict["int_area"].append(dict1[keys]["fit"]["int_area"])
pull_dict["Counts"].append(dict1["scan"][keys]["Counts"]) pull_dict["Counts"].append(dict1[keys]["Counts"])
for key in variables: for key in variables:
for i in variables[key]: for i in variables[key]:
pull_dict[i].append(_finditem(dict1["scan"][keys], i)) pull_dict[i].append(_finditem(dict1[keys], i))
return pd.DataFrame(data=pull_dict) return pd.DataFrame(data=pull_dict)
@ -284,42 +282,6 @@ def merge(scan1, scan2, keep=True, monitor=100000):
print("merging done") print("merging done")
def add_dict(dict1, dict2):
"""adds two dictionaries, meta of the new is saved as meata+original_filename and
measurements are shifted to continue with numbering of first dict
:arg dict1 : dictionarry to add to
:arg dict2 : dictionarry from which to take the measurements
:return dict1 : combined dictionary
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
dat file"""
try:
if dict1["meta"]["zebra_mode"] != dict2["meta"]["zebra_mode"]:
print("You are trying to add scans measured with different zebra modes")
return
# this is for the qscan case
except KeyError:
print("Zebra mode not specified")
max_measurement_dict1 = len(dict1["scan"])
new_filenames = np.arange(
max_measurement_dict1 + 1, max_measurement_dict1 + 1 + len(dict2["scan"])
)
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
if new_meta_name not in dict1:
for keys, name in zip(dict2["scan"], new_filenames):
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
dict1["scan"][name] = dict2["scan"][keys]
dict1[new_meta_name] = dict2["meta"]
else:
raise KeyError(
str(
"The file %s has alredy been added to %s"
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"])
)
)
return dict1
def auto(dict): def auto(dict):
"""takes just unique tuples from all tuples in dictionary returend by scan_dict """takes just unique tuples from all tuples in dictionary returend by scan_dict
intendet for automatic merge if you doesent want to specify what scans to merge together intendet for automatic merge if you doesent want to specify what scans to merge together
@ -344,31 +306,31 @@ def scan_dict(dict, precision=0.5):
note: can be checked by "not d", true if empty note: can be checked by "not d", true if empty
""" """
if dict["meta"]["zebra_mode"] == "bi": if dict[0]["meta"]["zebra_mode"] == "bi":
angles = ["twotheta", "omega", "chi", "phi"] angles = ["twotheta", "omega", "chi", "phi"]
elif dict["meta"]["zebra_mode"] == "nb": elif dict[0]["meta"]["zebra_mode"] == "nb":
angles = ["gamma", "omega", "nu"] angles = ["gamma", "omega", "nu"]
else: else:
print("Unknown zebra mode") print("Unknown zebra mode")
return return
d = {} d = {}
for i in range(len(dict["scan"])): for i in range(len(dict)):
for j in range(len(dict["scan"])): for j in range(len(dict)):
if dict["scan"][i] != dict["scan"][j]: if dict[i] != dict[j]:
itup = list() itup = list()
for k in angles: for k in angles:
itup.append(abs(abs(dict["scan"][i][k]) - abs(dict["scan"][j][k]))) itup.append(abs(abs(dict[i][k]) - abs(dict[j][k])))
if all(i <= precision for i in itup): if all(i <= precision for i in itup):
print(itup) print(itup)
print([dict["scan"][i][k] for k in angles]) print([dict[i][k] for k in angles])
print([dict["scan"][j][k] for k in angles]) print([dict[j][k] for k in angles])
if str([np.around(dict["scan"][i][k], 0) for k in angles]) not in d: if str([np.around(dict[i][k], 0) for k in angles]) not in d:
d[str([np.around(dict["scan"][i][k], 0) for k in angles])] = list() d[str([np.around(dict[i][k], 0) for k in angles])] = list()
d[str([np.around(dict["scan"][i][k], 0) for k in angles])].append((i, j)) d[str([np.around(dict[i][k], 0) for k in angles])].append((i, j))
else: else:
d[str([np.around(dict["scan"][i][k], 0) for k in angles])].append((i, j)) d[str([np.around(dict[i][k], 0) for k in angles])].append((i, j))
else: else:
pass pass
@ -400,15 +362,15 @@ def variables(dictionary):
# find all variables that are in all scans # find all variables that are in all scans
stdev_precision = 0.05 stdev_precision = 0.05
all_vars = list() all_vars = list()
for keys in range(len(dictionary["scan"])): for keys in range(len(dictionary)):
all_vars.append([key for key in dictionary["scan"][keys] if key != "params"]) all_vars.append([key for key in dictionary[keys] if key != "params"])
if dictionary["scan"][keys]["params"]: if dictionary[keys]["params"]:
all_vars.append(key for key in dictionary["scan"][keys]["params"]) all_vars.append(key for key in dictionary[keys]["params"])
all_vars = [i for sublist in all_vars for i in sublist] all_vars = [i for sublist in all_vars for i in sublist]
# get the ones that are in all scans # get the ones that are in all scans
b = collections.Counter(all_vars) b = collections.Counter(all_vars)
inall = [key for key in b if b[key] == len(dictionary["scan"])] inall = [key for key in b if b[key] == len(dictionary)]
# delete those that are obviously wrong # delete those that are obviously wrong
wrong = [ wrong = [
"NP", "NP",
@ -433,15 +395,15 @@ def variables(dictionary):
# check for primary variable, needs to be list, we dont suspect the # check for primary variable, needs to be list, we dont suspect the
# primary variable be as a parameter (be in scan[params]) # primary variable be as a parameter (be in scan[params])
primary_candidates = list() primary_candidates = list()
for key in range(len(dictionary["scan"])): for key in range(len(dictionary)):
for i in inall_red: for i in inall_red:
if isinstance(_finditem(dictionary["scan"][key], i), list): if isinstance(_finditem(dictionary[key], i), list):
if np.std(_finditem(dictionary["scan"][key], i)) > stdev_precision: if np.std(_finditem(dictionary[key], i)) > stdev_precision:
primary_candidates.append(i) primary_candidates.append(i)
# check which of the primary are in every scan # check which of the primary are in every scan
primary_candidates = collections.Counter(primary_candidates) primary_candidates = collections.Counter(primary_candidates)
second_round_primary_candidates = [ second_round_primary_candidates = [
key for key in primary_candidates if primary_candidates[key] == len(dictionary["scan"]) key for key in primary_candidates if primary_candidates[key] == len(dictionary)
] ]
if len(second_round_primary_candidates) == 1: if len(second_round_primary_candidates) == 1:
@ -455,29 +417,29 @@ def variables(dictionary):
# print("secondary candidates", secondary_candidates) # print("secondary candidates", secondary_candidates)
# select arrays and floats and ints # select arrays and floats and ints
second_round_secondary_candidates = list() second_round_secondary_candidates = list()
for key in range(len(dictionary["scan"])): for key in range(len(dictionary)):
for i in secondary_candidates: for i in secondary_candidates:
if isinstance(_finditem(dictionary["scan"][key], i), float): if isinstance(_finditem(dictionary[key], i), float):
second_round_secondary_candidates.append(i) second_round_secondary_candidates.append(i)
elif isinstance(_finditem(dictionary["scan"][key], i), int): elif isinstance(_finditem(dictionary[key], i), int):
second_round_secondary_candidates.append(i) second_round_secondary_candidates.append(i)
elif isinstance(_finditem(dictionary["scan"][key], i), list): elif isinstance(_finditem(dictionary[key], i), list):
if np.std(_finditem(dictionary["scan"][key], i)) < stdev_precision: if np.std(_finditem(dictionary[key], i)) < stdev_precision:
second_round_secondary_candidates.append(i) second_round_secondary_candidates.append(i)
second_round_secondary_candidates = collections.Counter(second_round_secondary_candidates) second_round_secondary_candidates = collections.Counter(second_round_secondary_candidates)
second_round_secondary_candidates = [ second_round_secondary_candidates = [
key key
for key in second_round_secondary_candidates for key in second_round_secondary_candidates
if second_round_secondary_candidates[key] == len(dictionary["scan"]) if second_round_secondary_candidates[key] == len(dictionary)
] ]
# print("secondary candidates after second round", second_round_secondary_candidates) # print("secondary candidates after second round", second_round_secondary_candidates)
# now we check if they vary between the scans # now we check if they vary between the scans
third_round_sec_candidates = list() third_round_sec_candidates = list()
for i in second_round_secondary_candidates: for i in second_round_secondary_candidates:
check_array = list() check_array = list()
for keys in range(len(dictionary["scan"])): for keys in range(len(dictionary)):
check_array.append(np.average(_finditem(dictionary["scan"][keys], i))) check_array.append(np.average(_finditem(dictionary[keys], i)))
# print(i, check_array, np.std(check_array)) # print(i, check_array, np.std(check_array))
if np.std(check_array) > stdev_precision: if np.std(check_array) > stdev_precision:
third_round_sec_candidates.append(i) third_round_sec_candidates.append(i)