Use 'scan' instead of 'meas' for array of counts
This commit is contained in:
parent
b204758523
commit
199f8d9f61
@ -77,16 +77,16 @@ def create():
|
||||
_, ext = os.path.splitext(new)
|
||||
det_data = pyzebra.parse_1D(file, ext)
|
||||
|
||||
meas_list = list(det_data["meas"].keys())
|
||||
scan_list = list(det_data["scan"].keys())
|
||||
hkl = [
|
||||
f'{int(m["h_index"])} {int(m["k_index"])} {int(m["l_index"])}'
|
||||
for m in det_data["meas"].values()
|
||||
for m in det_data["scan"].values()
|
||||
]
|
||||
meas_table_source.data.update(
|
||||
measurement=meas_list, hkl=hkl, peaks=[0] * len(meas_list), fit=[0] * len(meas_list)
|
||||
scan_table_source.data.update(
|
||||
scan=scan_list, hkl=hkl, peaks=[0] * len(scan_list), fit=[0] * len(scan_list)
|
||||
)
|
||||
meas_table_source.selected.indices = []
|
||||
meas_table_source.selected.indices = [0]
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
ccl_file_select = Select(title="Available .ccl files")
|
||||
ccl_file_select.on_change("value", ccl_file_select_callback)
|
||||
@ -97,45 +97,45 @@ def create():
|
||||
_, ext = os.path.splitext(upload_button.filename)
|
||||
det_data = pyzebra.parse_1D(file, ext)
|
||||
|
||||
meas_list = list(det_data["meas"].keys())
|
||||
scan_list = list(det_data["scan"].keys())
|
||||
hkl = [
|
||||
f'{int(m["h_index"])} {int(m["k_index"])} {int(m["l_index"])}'
|
||||
for m in det_data["meas"].values()
|
||||
for m in det_data["scan"].values()
|
||||
]
|
||||
meas_table_source.data.update(
|
||||
measurement=meas_list, hkl=hkl, peaks=[0] * len(meas_list), fit=[0] * len(meas_list)
|
||||
scan_table_source.data.update(
|
||||
scan=scan_list, hkl=hkl, peaks=[0] * len(scan_list), fit=[0] * len(scan_list)
|
||||
)
|
||||
meas_table_source.selected.indices = []
|
||||
meas_table_source.selected.indices = [0]
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
upload_button = FileInput(accept=".ccl")
|
||||
upload_button.on_change("value", upload_button_callback)
|
||||
|
||||
def _update_table():
|
||||
num_of_peaks = [meas.get("num_of_peaks", 0) for meas in det_data["meas"].values()]
|
||||
fit_ok = [(1 if "fit" in meas else 0) for meas in det_data["meas"].values()]
|
||||
meas_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
|
||||
num_of_peaks = [scan.get("num_of_peaks", 0) for scan in det_data["scan"].values()]
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"].values()]
|
||||
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
|
||||
|
||||
def _update_plot(ind):
|
||||
nonlocal peak_pos_textinput_lock
|
||||
peak_pos_textinput_lock = True
|
||||
|
||||
meas = det_data["meas"][ind]
|
||||
y = meas["Counts"]
|
||||
x = meas["om"]
|
||||
scan = det_data["scan"][ind]
|
||||
y = scan["Counts"]
|
||||
x = scan["om"]
|
||||
|
||||
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
|
||||
|
||||
num_of_peaks = meas.get("num_of_peaks")
|
||||
num_of_peaks = scan.get("num_of_peaks")
|
||||
if num_of_peaks is not None and num_of_peaks > 0:
|
||||
peak_indexes = meas["peak_indexes"]
|
||||
peak_indexes = scan["peak_indexes"]
|
||||
if len(peak_indexes) == 1:
|
||||
peak_pos_textinput.value = str(meas["om"][peak_indexes[0]])
|
||||
peak_pos_textinput.value = str(scan["om"][peak_indexes[0]])
|
||||
else:
|
||||
peak_pos_textinput.value = str([meas["om"][ind] for ind in peak_indexes])
|
||||
peak_pos_textinput.value = str([scan["om"][ind] for ind in peak_indexes])
|
||||
|
||||
plot_peak_source.data.update(x=meas["om"][peak_indexes], y=meas["peak_heights"])
|
||||
plot_line_smooth_source.data.update(x=x, y=meas["smooth_peaks"])
|
||||
plot_peak_source.data.update(x=scan["om"][peak_indexes], y=scan["peak_heights"])
|
||||
plot_line_smooth_source.data.update(x=x, y=scan["smooth_peaks"])
|
||||
else:
|
||||
peak_pos_textinput.value = None
|
||||
plot_peak_source.data.update(x=[], y=[])
|
||||
@ -143,10 +143,10 @@ def create():
|
||||
|
||||
peak_pos_textinput_lock = False
|
||||
|
||||
fit = meas.get("fit")
|
||||
fit = scan.get("fit")
|
||||
if fit is not None:
|
||||
plot_gauss_source.data.update(x=x, y=meas["fit"]["comps"]["gaussian"])
|
||||
plot_bkg_source.data.update(x=x, y=meas["fit"]["comps"]["background"])
|
||||
plot_gauss_source.data.update(x=x, y=scan["fit"]["comps"]["gaussian"])
|
||||
plot_bkg_source.data.update(x=x, y=scan["fit"]["comps"]["background"])
|
||||
params = fit["result"].params
|
||||
fit_output_textinput.value = (
|
||||
"%s \n"
|
||||
@ -226,16 +226,16 @@ def create():
|
||||
numfit_max_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(numfit_max_span)
|
||||
|
||||
# Measurement select
|
||||
def meas_table_callback(_attr, _old, new):
|
||||
# Scan select
|
||||
def scan_table_callback(_attr, _old, new):
|
||||
if new:
|
||||
_update_plot(meas_table_source.data["measurement"][new[-1]])
|
||||
_update_plot(scan_table_source.data["scan"][new[-1]])
|
||||
|
||||
meas_table_source = ColumnDataSource(dict(measurement=[], hkl=[], peaks=[], fit=[]))
|
||||
meas_table = DataTable(
|
||||
source=meas_table_source,
|
||||
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], peaks=[], fit=[]))
|
||||
scan_table = DataTable(
|
||||
source=scan_table_source,
|
||||
columns=[
|
||||
TableColumn(field="measurement", title="Meas"),
|
||||
TableColumn(field="scan", title="scan"),
|
||||
TableColumn(field="hkl", title="hkl"),
|
||||
TableColumn(field="peaks", title="Peaks"),
|
||||
TableColumn(field="fit", title="Fit"),
|
||||
@ -244,20 +244,20 @@ def create():
|
||||
index_position=None,
|
||||
)
|
||||
|
||||
meas_table_source.selected.on_change("indices", meas_table_callback)
|
||||
scan_table_source.selected.on_change("indices", scan_table_callback)
|
||||
|
||||
def peak_pos_textinput_callback(_attr, _old, new):
|
||||
if new is not None and not peak_pos_textinput_lock:
|
||||
sel_ind = meas_table_source.selected.indices[-1]
|
||||
meas_name = meas_table_source.data["measurement"][sel_ind]
|
||||
meas = det_data["meas"][meas_name]
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
scan_name = scan_table_source.data["scan"][sel_ind]
|
||||
scan = det_data["scan"][scan_name]
|
||||
|
||||
meas["num_of_peaks"] = 1
|
||||
peak_ind = (np.abs(meas["om"] - float(new))).argmin()
|
||||
meas["peak_indexes"] = np.array([peak_ind], dtype=np.int64)
|
||||
meas["peak_heights"] = np.array([meas["smooth_peaks"][peak_ind]])
|
||||
scan["num_of_peaks"] = 1
|
||||
peak_ind = (np.abs(scan["om"] - float(new))).argmin()
|
||||
scan["peak_indexes"] = np.array([peak_ind], dtype=np.int64)
|
||||
scan["peak_heights"] = np.array([scan["smooth_peaks"][peak_ind]])
|
||||
_update_table()
|
||||
_update_plot(meas_name)
|
||||
_update_plot(scan_name)
|
||||
|
||||
peak_pos_textinput = TextInput(title="Peak position:", default_size=145)
|
||||
peak_pos_textinput.on_change("value", peak_pos_textinput_callback)
|
||||
@ -323,9 +323,9 @@ def create():
|
||||
fit_output_textinput = TextAreaInput(title="Fit results:", width=450, height=400)
|
||||
|
||||
def peakfind_all_button_callback():
|
||||
for meas in det_data["meas"].values():
|
||||
for scan in det_data["scan"].values():
|
||||
pyzebra.ccl_findpeaks(
|
||||
meas,
|
||||
scan,
|
||||
int_threshold=peak_int_ratio_spinner.value,
|
||||
prominence=peak_prominence_spinner.value,
|
||||
smooth=smooth_toggle.active,
|
||||
@ -335,17 +335,17 @@ def create():
|
||||
|
||||
_update_table()
|
||||
|
||||
sel_ind = meas_table_source.selected.indices[-1]
|
||||
_update_plot(meas_table_source.data["measurement"][sel_ind])
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
_update_plot(scan_table_source.data["scan"][sel_ind])
|
||||
|
||||
peakfind_all_button = Button(label="Peak Find All", button_type="primary", default_size=145)
|
||||
peakfind_all_button.on_click(peakfind_all_button_callback)
|
||||
|
||||
def peakfind_button_callback():
|
||||
sel_ind = meas_table_source.selected.indices[-1]
|
||||
meas = meas_table_source.data["measurement"][sel_ind]
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
scan = scan_table_source.data["scan"][sel_ind]
|
||||
pyzebra.ccl_findpeaks(
|
||||
det_data["meas"][meas],
|
||||
det_data["scan"][scan],
|
||||
int_threshold=peak_int_ratio_spinner.value,
|
||||
prominence=peak_prominence_spinner.value,
|
||||
smooth=smooth_toggle.active,
|
||||
@ -354,15 +354,15 @@ def create():
|
||||
)
|
||||
|
||||
_update_table()
|
||||
_update_plot(meas)
|
||||
_update_plot(scan)
|
||||
|
||||
peakfind_button = Button(label="Peak Find Current", default_size=145)
|
||||
peakfind_button.on_click(peakfind_button_callback)
|
||||
|
||||
def fit_all_button_callback():
|
||||
for meas in det_data["meas"].values():
|
||||
for scan in det_data["scan"].values():
|
||||
pyzebra.fitccl(
|
||||
meas,
|
||||
scan,
|
||||
guess=[
|
||||
centre_guess.value,
|
||||
sigma_guess.value,
|
||||
@ -395,19 +395,19 @@ def create():
|
||||
numfit_max=integ_to.value,
|
||||
)
|
||||
|
||||
sel_ind = meas_table_source.selected.indices[-1]
|
||||
_update_plot(meas_table_source.data["measurement"][sel_ind])
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
_update_plot(scan_table_source.data["scan"][sel_ind])
|
||||
_update_table()
|
||||
|
||||
fit_all_button = Button(label="Fit All", button_type="primary", default_size=145)
|
||||
fit_all_button.on_click(fit_all_button_callback)
|
||||
|
||||
def fit_button_callback():
|
||||
sel_ind = meas_table_source.selected.indices[-1]
|
||||
meas = meas_table_source.data["measurement"][sel_ind]
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
scan = scan_table_source.data["scan"][sel_ind]
|
||||
|
||||
pyzebra.fitccl(
|
||||
det_data["meas"][meas],
|
||||
det_data["scan"][scan],
|
||||
guess=[
|
||||
centre_guess.value,
|
||||
sigma_guess.value,
|
||||
@ -440,7 +440,7 @@ def create():
|
||||
numfit_max=integ_to.value,
|
||||
)
|
||||
|
||||
_update_plot(meas)
|
||||
_update_plot(scan)
|
||||
_update_table()
|
||||
|
||||
fit_button = Button(label="Fit Current", default_size=145)
|
||||
@ -541,7 +541,7 @@ def create():
|
||||
tab_layout = column(
|
||||
row(proposal_textinput, ccl_file_select),
|
||||
row(column(Spacer(height=5), upload_div), upload_button),
|
||||
row(meas_table, plot, Spacer(width=30), fit_output_textinput, export_layout),
|
||||
row(scan_table, plot, Spacer(width=30), fit_output_textinput, export_layout),
|
||||
row(findpeak_controls, Spacer(width=30), fitpeak_controls),
|
||||
)
|
||||
|
||||
|
@ -12,17 +12,17 @@ def add_dict(dict1, dict2):
|
||||
:return dict1 : combined dictionary
|
||||
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
|
||||
dat file"""
|
||||
max_measurement_dict1 = max([int(str(keys)[1:]) for keys in dict1["meas"]])
|
||||
max_measurement_dict1 = max([int(str(keys)[1:]) for keys in dict1["scan"]])
|
||||
if dict2["meta"]["data_type"] == ".ccl":
|
||||
new_filenames = [
|
||||
"M" + str(x + max_measurement_dict1)
|
||||
for x in [int(str(keys)[1:]) for keys in dict2["meas"]]
|
||||
for x in [int(str(keys)[1:]) for keys in dict2["scan"]]
|
||||
]
|
||||
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
|
||||
if new_meta_name not in dict1:
|
||||
for keys, name in zip(dict2["meas"], new_filenames):
|
||||
dict2["meas"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
|
||||
dict1["meas"][name] = dict2["meas"][keys]
|
||||
for keys, name in zip(dict2["scan"], new_filenames):
|
||||
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
|
||||
dict1["scan"][name] = dict2["scan"][keys]
|
||||
|
||||
dict1[new_meta_name] = dict2["meta"]
|
||||
|
||||
@ -40,14 +40,14 @@ def add_dict(dict1, dict2):
|
||||
d["h_index"] = float(hkl.split()[-3])
|
||||
d["k_index"] = float(hkl.split()[-2])
|
||||
d["l_index"] = float(hkl.split()[-1])
|
||||
d["number_of_measurements"] = len(dict2["meas"]["NP"])
|
||||
d["om"] = dict2["meas"]["om"]
|
||||
d["Counts"] = dict2["meas"]["Counts"]
|
||||
d["monitor"] = dict2["meas"]["Monitor1"][0]
|
||||
d["number_of_measurements"] = len(dict2["scan"]["NP"])
|
||||
d["om"] = dict2["scan"]["om"]
|
||||
d["Counts"] = dict2["scan"]["Counts"]
|
||||
d["monitor"] = dict2["scan"]["Monitor1"][0]
|
||||
d["temperature"] = dict2["meta"]["temp"]
|
||||
d["mag_field"] = dict2["meta"]["mf"]
|
||||
d["omega_angle"] = dict2["meta"]["omega"]
|
||||
dict1["meas"][new_name] = d
|
||||
dict1["scan"][new_name] = d
|
||||
print(hkl.split())
|
||||
for keys in d:
|
||||
print(keys)
|
||||
@ -80,18 +80,18 @@ def scan_dict(dict):
|
||||
"""
|
||||
|
||||
d = {}
|
||||
for i in dict["meas"]:
|
||||
for j in dict["meas"]:
|
||||
if dict["meas"][str(i)] != dict["meas"][str(j)]:
|
||||
for i in dict["scan"]:
|
||||
for j in dict["scan"]:
|
||||
if dict["scan"][str(i)] != dict["scan"][str(j)]:
|
||||
itup = (
|
||||
dict["meas"][str(i)]["h_index"],
|
||||
dict["meas"][str(i)]["k_index"],
|
||||
dict["meas"][str(i)]["l_index"],
|
||||
dict["scan"][str(i)]["h_index"],
|
||||
dict["scan"][str(i)]["k_index"],
|
||||
dict["scan"][str(i)]["l_index"],
|
||||
)
|
||||
jtup = (
|
||||
dict["meas"][str(j)]["h_index"],
|
||||
dict["meas"][str(j)]["k_index"],
|
||||
dict["meas"][str(j)]["l_index"],
|
||||
dict["scan"][str(j)]["h_index"],
|
||||
dict["scan"][str(j)]["k_index"],
|
||||
dict["scan"][str(j)]["l_index"],
|
||||
)
|
||||
if itup != jtup:
|
||||
pass
|
||||
@ -109,52 +109,52 @@ def scan_dict(dict):
|
||||
|
||||
def compare_hkl(dict1, dict2):
|
||||
"""Compares two dictionaries based on hkl indexes and return dictionary with str(h k l) as
|
||||
key and tuple with keys to same measurement in dict1 and dict2
|
||||
key and tuple with keys to same scan in dict1 and dict2
|
||||
:arg dict1 : first dictionary
|
||||
:arg dict2 : second dictionary
|
||||
:return d : dict with matches
|
||||
example of one key: '0.0 0.0 -1.0 : ('M1', 'M9')' meaning that 001 hkl measurement is M1 in
|
||||
example of one key: '0.0 0.0 -1.0 : ('M1', 'M9')' meaning that 001 hkl scan is M1 in
|
||||
first dict and M9 in second"""
|
||||
d = {}
|
||||
dupl = 0
|
||||
for keys in dict1["meas"]:
|
||||
for key in dict2["meas"]:
|
||||
for keys in dict1["scan"]:
|
||||
for key in dict2["scan"]:
|
||||
if (
|
||||
dict1["meas"][str(keys)]["h_index"] == dict2["meas"][str(key)]["h_index"]
|
||||
and dict1["meas"][str(keys)]["k_index"] == dict2["meas"][str(key)]["k_index"]
|
||||
and dict1["meas"][str(keys)]["l_index"] == dict2["meas"][str(key)]["l_index"]
|
||||
dict1["scan"][str(keys)]["h_index"] == dict2["scan"][str(key)]["h_index"]
|
||||
and dict1["scan"][str(keys)]["k_index"] == dict2["scan"][str(key)]["k_index"]
|
||||
and dict1["scan"][str(keys)]["l_index"] == dict2["scan"][str(key)]["l_index"]
|
||||
):
|
||||
|
||||
if (
|
||||
str(
|
||||
(
|
||||
str(dict1["meas"][str(keys)]["h_index"])
|
||||
str(dict1["scan"][str(keys)]["h_index"])
|
||||
+ " "
|
||||
+ str(dict1["meas"][str(keys)]["k_index"])
|
||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
||||
+ " "
|
||||
+ str(dict1["meas"][str(keys)]["l_index"])
|
||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
||||
)
|
||||
)
|
||||
not in d
|
||||
):
|
||||
d[
|
||||
str(
|
||||
str(dict1["meas"][str(keys)]["h_index"])
|
||||
str(dict1["scan"][str(keys)]["h_index"])
|
||||
+ " "
|
||||
+ str(dict1["meas"][str(keys)]["k_index"])
|
||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
||||
+ " "
|
||||
+ str(dict1["meas"][str(keys)]["l_index"])
|
||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
||||
)
|
||||
] = (str(keys), str(key))
|
||||
else:
|
||||
dupl = dupl + 1
|
||||
d[
|
||||
str(
|
||||
str(dict1["meas"][str(keys)]["h_index"])
|
||||
str(dict1["scan"][str(keys)]["h_index"])
|
||||
+ " "
|
||||
+ str(dict1["meas"][str(keys)]["k_index"])
|
||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
||||
+ " "
|
||||
+ str(dict1["meas"][str(keys)]["l_index"])
|
||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
||||
+ "_dupl"
|
||||
+ str(dupl)
|
||||
)
|
||||
@ -176,16 +176,16 @@ def create_tuples(x, y, y_err):
|
||||
|
||||
|
||||
def normalize(dict, key, monitor):
|
||||
"""Normalizes the measurement to monitor, checks if sigma exists, otherwise creates it
|
||||
"""Normalizes the scan to monitor, checks if sigma exists, otherwise creates it
|
||||
:arg dict : dictionary to from which to tkae the scan
|
||||
:arg key : which scan to normalize from dict1
|
||||
:arg monitor : final monitor
|
||||
:return counts - normalized counts
|
||||
:return sigma - normalized sigma"""
|
||||
|
||||
counts = np.array(dict["meas"][key]["Counts"])
|
||||
sigma = np.sqrt(counts) if "sigma" not in dict["meas"][key] else dict["meas"][key]["sigma"]
|
||||
monitor_ratio = monitor / dict["meas"][key]["monitor"]
|
||||
counts = np.array(dict["scan"][key]["Counts"])
|
||||
sigma = np.sqrt(counts) if "sigma" not in dict["scan"][key] else dict["scan"][key]["sigma"]
|
||||
monitor_ratio = monitor / dict["scan"][key]["monitor"]
|
||||
scaled_counts = counts * monitor_ratio
|
||||
scaled_sigma = np.array(sigma) * monitor_ratio
|
||||
|
||||
@ -195,19 +195,19 @@ def normalize(dict, key, monitor):
|
||||
def merge(dict1, dict2, keys, auto=True, monitor=100000):
|
||||
"""merges the two tuples and sorts them, if om value is same, Counts value is average
|
||||
averaging is propagated into sigma if dict1 == dict2, key[1] is deleted after merging
|
||||
:arg dict1 : dictionary to which measurement will be merged
|
||||
:arg dict2 : dictionary from which measurement will be merged
|
||||
:arg dict1 : dictionary to which scan will be merged
|
||||
:arg dict2 : dictionary from which scan will be merged
|
||||
:arg keys : tuple with key to dict1 and dict2
|
||||
:arg auto : if true, when monitors are same, does not change it, if flase, takes monitor always
|
||||
:arg monitor : final monitor after merging
|
||||
note: dict1 and dict2 can be same dict
|
||||
:return dict1 with merged scan"""
|
||||
if auto:
|
||||
if dict1["meas"][keys[0]]["monitor"] == dict2["meas"][keys[1]]["monitor"]:
|
||||
monitor = dict1["meas"][keys[0]]["monitor"]
|
||||
if dict1["scan"][keys[0]]["monitor"] == dict2["scan"][keys[1]]["monitor"]:
|
||||
monitor = dict1["scan"][keys[0]]["monitor"]
|
||||
|
||||
# load om and Counts
|
||||
x1, x2 = dict1["meas"][keys[0]]["om"], dict2["meas"][keys[1]]["om"]
|
||||
x1, x2 = dict1["scan"][keys[0]]["om"], dict2["scan"][keys[1]]["om"]
|
||||
cor_y1, y_err1 = normalize(dict1, keys[0], monitor=monitor)
|
||||
cor_y2, y_err2 = normalize(dict2, keys[1], monitor=monitor)
|
||||
# creates touples (om, Counts, sigma) for sorting and further processing
|
||||
@ -237,40 +237,40 @@ def merge(dict1, dict2, keys, auto=True, monitor=100000):
|
||||
continue
|
||||
|
||||
if dict1 == dict2:
|
||||
del dict1["meas"][keys[1]]
|
||||
del dict1["scan"][keys[1]]
|
||||
|
||||
note = (
|
||||
f"This measurement was merged with measurement {keys[1]} from "
|
||||
f"This scan was merged with scan {keys[1]} from "
|
||||
f'file {dict2["meta"]["original_filename"]} \n'
|
||||
)
|
||||
if "notes" not in dict1["meas"][str(keys[0])]:
|
||||
dict1["meas"][str(keys[0])]["notes"] = note
|
||||
if "notes" not in dict1["scan"][str(keys[0])]:
|
||||
dict1["scan"][str(keys[0])]["notes"] = note
|
||||
else:
|
||||
dict1["meas"][str(keys[0])]["notes"] += note
|
||||
dict1["scan"][str(keys[0])]["notes"] += note
|
||||
|
||||
dict1["meas"][keys[0]]["om"] = om
|
||||
dict1["meas"][keys[0]]["Counts"] = Counts
|
||||
dict1["meas"][keys[0]]["sigma"] = sigma
|
||||
dict1["meas"][keys[0]]["monitor"] = monitor
|
||||
dict1["scan"][keys[0]]["om"] = om
|
||||
dict1["scan"][keys[0]]["Counts"] = Counts
|
||||
dict1["scan"][keys[0]]["sigma"] = sigma
|
||||
dict1["scan"][keys[0]]["monitor"] = monitor
|
||||
print("merging done")
|
||||
return dict1
|
||||
|
||||
|
||||
def substract_measurement(dict1, dict2, keys, auto=True, monitor=100000):
|
||||
"""Substracts two measurement (measurement key2 from dict2 from measurent key1 in dict1), expects om to be same
|
||||
:arg dict1 : dictionary to which measurement will be merged
|
||||
:arg dict2 : dictionary from which measurement will be merged
|
||||
"""Substracts two scan (scan key2 from dict2 from measurent key1 in dict1), expects om to be same
|
||||
:arg dict1 : dictionary to which scan will be merged
|
||||
:arg dict2 : dictionary from which scan will be merged
|
||||
:arg keys : tuple with key to dict1 and dict2
|
||||
:arg auto : if true, when monitors are same, does not change it, if flase, takes monitor always
|
||||
:arg monitor : final monitor after merging
|
||||
:returns d : dict1 with substracted Counts from dict2 and sigma that comes from the substraction"""
|
||||
|
||||
if len(dict1["meas"][keys[0]]["om"]) != len(dict2["meas"][keys[1]]["om"]):
|
||||
if len(dict1["scan"][keys[0]]["om"]) != len(dict2["scan"][keys[1]]["om"]):
|
||||
raise ValueError("Omegas have different lengths, cannot be substracted")
|
||||
|
||||
if auto:
|
||||
if dict1["meas"][keys[0]]["monitor"] == dict2["meas"][keys[1]]["monitor"]:
|
||||
monitor = dict1["meas"][keys[0]]["monitor"]
|
||||
if dict1["scan"][keys[0]]["monitor"] == dict2["scan"][keys[1]]["monitor"]:
|
||||
monitor = dict1["scan"][keys[0]]["monitor"]
|
||||
|
||||
cor_y1, y_err1 = normalize(dict1, keys[0], monitor=monitor)
|
||||
cor_y2, y_err2 = normalize(dict2, keys[1], monitor=monitor)
|
||||
@ -288,21 +288,21 @@ def substract_measurement(dict1, dict2, keys, auto=True, monitor=100000):
|
||||
|
||||
if len([num for num in res_nom if num < 0]) >= 0.3 * len(res_nom):
|
||||
print(
|
||||
f"Warning! percentage of negative numbers in measurement subsracted {keys[0]} is "
|
||||
f"Warning! percentage of negative numbers in scan subsracted {keys[0]} is "
|
||||
f"{len([num for num in res_nom if num < 0]) / len(res_nom)}"
|
||||
)
|
||||
|
||||
dict1["meas"][str(keys[0])]["Counts"] = res_nom
|
||||
dict1["meas"][str(keys[0])]["sigma"] = res_err
|
||||
dict1["meas"][str(keys[0])]["monitor"] = monitor
|
||||
dict1["scan"][str(keys[0])]["Counts"] = res_nom
|
||||
dict1["scan"][str(keys[0])]["sigma"] = res_err
|
||||
dict1["scan"][str(keys[0])]["monitor"] = monitor
|
||||
note = (
|
||||
f'Measurement {keys[1]} from file {dict2["meta"]["original_filename"]} '
|
||||
f"was substracted from this measurement \n"
|
||||
f'Scan {keys[1]} from file {dict2["meta"]["original_filename"]} '
|
||||
f"was substracted from this scan \n"
|
||||
)
|
||||
if "notes" not in dict1["meas"][str(keys[0])]:
|
||||
dict1["meas"][str(keys[0])]["notes"] = note
|
||||
if "notes" not in dict1["scan"][str(keys[0])]:
|
||||
dict1["scan"][str(keys[0])]["notes"] = note
|
||||
else:
|
||||
dict1["meas"][str(keys[0])]["notes"] += note
|
||||
dict1["scan"][str(keys[0])]["notes"] += note
|
||||
return dict1
|
||||
|
||||
|
||||
@ -311,7 +311,7 @@ def compare_dict(dict1, dict2):
|
||||
:arg dict1 : dictionary 1 (ccl)
|
||||
:arg dict2 : dictionary 2 (ccl)
|
||||
:returns warning : dictionary with keys from primary files (if they differ) with
|
||||
information of how many measurement differ and which ones differ
|
||||
information of how many scan differ and which ones differ
|
||||
:returns report_string string comparing all different values respecively of measurements"""
|
||||
|
||||
if dict1["meta"]["data_type"] != dict2["meta"]["data_type"]:
|
||||
@ -371,48 +371,48 @@ def compare_dict(dict1, dict2):
|
||||
# compare Measurements
|
||||
S.append(
|
||||
"Number of measurements in %s = %s \n"
|
||||
% (dict1["meta"]["original_filename"], len(dict1["meas"]))
|
||||
% (dict1["meta"]["original_filename"], len(dict1["scan"]))
|
||||
)
|
||||
S.append(
|
||||
"Number of measurements in %s = %s \n"
|
||||
% (dict2["meta"]["original_filename"], len(dict2["meas"]))
|
||||
% (dict2["meta"]["original_filename"], len(dict2["scan"]))
|
||||
)
|
||||
S.append("Different values in Measurements:\n")
|
||||
select_set = ["om", "Counts", "sigma"]
|
||||
exlude_set = ["time", "Counts", "date", "notes"]
|
||||
for keys1 in comp:
|
||||
for key2 in dict1["meas"][str(comp[str(keys1)][0])]:
|
||||
for key2 in dict1["scan"][str(comp[str(keys1)][0])]:
|
||||
if key2 in exlude_set:
|
||||
continue
|
||||
if key2 not in select_set:
|
||||
try:
|
||||
if (
|
||||
dict1["meas"][comp[str(keys1)][0]][str(key2)]
|
||||
!= dict2["meas"][str(comp[str(keys1)][1])][str(key2)]
|
||||
dict1["scan"][comp[str(keys1)][0]][str(key2)]
|
||||
!= dict2["scan"][str(comp[str(keys1)][1])][str(key2)]
|
||||
):
|
||||
S.append(
|
||||
"Measurement value "
|
||||
"Scan value "
|
||||
"%s"
|
||||
", with hkl %s differs in meausrements %s and %s \n"
|
||||
% (key2, keys1, comp[str(keys1)][0], comp[str(keys1)][1])
|
||||
)
|
||||
S.append(
|
||||
" dict1: %s \n"
|
||||
% str(dict1["meas"][comp[str(keys1)][0]][str(key2)])
|
||||
% str(dict1["scan"][comp[str(keys1)][0]][str(key2)])
|
||||
)
|
||||
S.append(
|
||||
" dict2: %s \n"
|
||||
% str(dict2["meas"][comp[str(keys1)][1]][str(key2)])
|
||||
% str(dict2["scan"][comp[str(keys1)][1]][str(key2)])
|
||||
)
|
||||
if key2 not in conflicts:
|
||||
conflicts[key2] = {}
|
||||
conflicts[key2]["amount"] = 1
|
||||
conflicts[key2]["meas"] = str(comp[str(keys1)])
|
||||
conflicts[key2]["scan"] = str(comp[str(keys1)])
|
||||
else:
|
||||
|
||||
conflicts[key2]["amount"] = conflicts[key2]["amount"] + 1
|
||||
conflicts[key2]["meas"] = (
|
||||
conflicts[key2]["meas"] + " " + (str(comp[str(keys1)]))
|
||||
conflicts[key2]["scan"] = (
|
||||
conflicts[key2]["scan"] + " " + (str(comp[str(keys1)]))
|
||||
)
|
||||
except KeyError as e:
|
||||
print("Missing keys, some files were probably merged or substracted")
|
||||
@ -420,11 +420,11 @@ def compare_dict(dict1, dict2):
|
||||
|
||||
else:
|
||||
try:
|
||||
comparison = list(dict1["meas"][comp[str(keys1)][0]][str(key2)]) == list(
|
||||
dict2["meas"][comp[str(keys1)][1]][str(key2)]
|
||||
comparison = list(dict1["scan"][comp[str(keys1)][0]][str(key2)]) == list(
|
||||
dict2["scan"][comp[str(keys1)][1]][str(key2)]
|
||||
)
|
||||
if len(list(dict1["meas"][comp[str(keys1)][0]][str(key2)])) != len(
|
||||
list(dict2["meas"][comp[str(keys1)][1]][str(key2)])
|
||||
if len(list(dict1["scan"][comp[str(keys1)][0]][str(key2)])) != len(
|
||||
list(dict2["scan"][comp[str(keys1)][1]][str(key2)])
|
||||
):
|
||||
if str("different length of %s" % key2) not in warnings:
|
||||
warnings[str("different length of %s" % key2)] = list()
|
||||
@ -437,27 +437,27 @@ def compare_dict(dict1, dict2):
|
||||
)
|
||||
if not comparison:
|
||||
S.append(
|
||||
"Measurement value "
|
||||
"Scan value "
|
||||
"%s"
|
||||
" differs in measurement %s and %s \n"
|
||||
" differs in scan %s and %s \n"
|
||||
% (key2, comp[str(keys1)][0], comp[str(keys1)][1])
|
||||
)
|
||||
S.append(
|
||||
" dict1: %s \n"
|
||||
% str(list(dict1["meas"][comp[str(keys1)][0]][str(key2)]))
|
||||
% str(list(dict1["scan"][comp[str(keys1)][0]][str(key2)]))
|
||||
)
|
||||
S.append(
|
||||
" dict2: %s \n"
|
||||
% str(list(dict2["meas"][comp[str(keys1)][1]][str(key2)]))
|
||||
% str(list(dict2["scan"][comp[str(keys1)][1]][str(key2)]))
|
||||
)
|
||||
if key2 not in conflicts:
|
||||
conflicts[key2] = {}
|
||||
conflicts[key2]["amount"] = 1
|
||||
conflicts[key2]["meas"] = str(comp[str(keys1)])
|
||||
conflicts[key2]["scan"] = str(comp[str(keys1)])
|
||||
else:
|
||||
conflicts[key2]["amount"] = conflicts[key2]["amount"] + 1
|
||||
conflicts[key2]["meas"] = (
|
||||
conflicts[key2]["meas"] + " " + (str(comp[str(keys1)]))
|
||||
conflicts[key2]["scan"] = (
|
||||
conflicts[key2]["scan"] + " " + (str(comp[str(keys1)]))
|
||||
)
|
||||
except KeyError as e:
|
||||
print("Missing keys, some files were probably merged or substracted")
|
||||
@ -465,7 +465,7 @@ def compare_dict(dict1, dict2):
|
||||
|
||||
for keys in conflicts:
|
||||
try:
|
||||
conflicts[str(keys)]["meas"] = conflicts[str(keys)]["meas"].split(" ")
|
||||
conflicts[str(keys)]["scan"] = conflicts[str(keys)]["scan"].split(" ")
|
||||
except:
|
||||
continue
|
||||
report_string = "".join(S)
|
||||
@ -480,18 +480,18 @@ def guess_next(dict1, dict2, comp):
|
||||
if (
|
||||
abs(
|
||||
(
|
||||
dict1["meas"][str(comp[keys][0])]["temperature"]
|
||||
- dict2["meas"][str(comp[keys][1])]["temperature"]
|
||||
dict1["scan"][str(comp[keys][0])]["temperature"]
|
||||
- dict2["scan"][str(comp[keys][1])]["temperature"]
|
||||
)
|
||||
/ dict2["meas"][str(comp[keys][1])]["temperature"]
|
||||
/ dict2["scan"][str(comp[keys][1])]["temperature"]
|
||||
)
|
||||
< threshold
|
||||
and abs(
|
||||
(
|
||||
dict1["meas"][str(comp[keys][0])]["mag_field"]
|
||||
- dict2["meas"][str(comp[keys][1])]["mag_field"]
|
||||
dict1["scan"][str(comp[keys][0])]["mag_field"]
|
||||
- dict2["scan"][str(comp[keys][1])]["mag_field"]
|
||||
)
|
||||
/ dict2["meas"][str(comp[keys][1])]["mag_field"]
|
||||
/ dict2["scan"][str(comp[keys][1])]["mag_field"]
|
||||
)
|
||||
< threshold
|
||||
):
|
||||
|
@ -5,11 +5,11 @@ from scipy.signal import savgol_filter
|
||||
|
||||
|
||||
def ccl_findpeaks(
|
||||
meas, int_threshold=0.8, prominence=50, smooth=False, window_size=7, poly_order=3
|
||||
scan, int_threshold=0.8, prominence=50, smooth=False, window_size=7, poly_order=3
|
||||
):
|
||||
|
||||
"""function iterates through the dictionary created by load_cclv2 and locates peaks for each measurement
|
||||
args: meas - a single measurement,
|
||||
"""function iterates through the dictionary created by load_cclv2 and locates peaks for each scan
|
||||
args: scan - a single scan,
|
||||
|
||||
int_threshold - fraction of threshold_intensity/max_intensity, must be positive num between 0 and 1
|
||||
i.e. will only detect peaks above 75% of max intensity
|
||||
@ -54,8 +54,8 @@ def ccl_findpeaks(
|
||||
prominence = 50
|
||||
print("Invalid value for prominence, select positive number, new value set to:", prominence)
|
||||
|
||||
omega = meas["om"]
|
||||
counts = np.array(meas["Counts"])
|
||||
omega = scan["om"]
|
||||
counts = np.array(scan["Counts"])
|
||||
if smooth:
|
||||
itp = interp1d(omega, counts, kind="linear")
|
||||
absintensity = [abs(number) for number in counts]
|
||||
@ -69,7 +69,7 @@ def ccl_findpeaks(
|
||||
peaks, properties = sc.signal.find_peaks(
|
||||
smooth_peaks, height=int_threshold * max(smooth_peaks), prominence=prominence
|
||||
)
|
||||
meas["num_of_peaks"] = len(peaks)
|
||||
meas["peak_indexes"] = peaks
|
||||
meas["peak_heights"] = properties["peak_heights"]
|
||||
meas["smooth_peaks"] = smooth_peaks # smoothed curve
|
||||
scan["num_of_peaks"] = len(peaks)
|
||||
scan["peak_indexes"] = peaks
|
||||
scan["peak_heights"] = properties["peak_heights"]
|
||||
scan["smooth_peaks"] = smooth_peaks # smoothed curve
|
||||
|
@ -30,42 +30,42 @@ def export_comm(data, path, lorentz=False):
|
||||
padding = [4, 6, 10, 8]
|
||||
|
||||
with open(str(path + extension), "w") as out_file:
|
||||
for key, meas in data["meas"].items():
|
||||
if "fit" not in meas:
|
||||
print("Measurement skipped - no fit value for:", key)
|
||||
for key, scan in data["scan"].items():
|
||||
if "fit" not in scan:
|
||||
print("Scan skipped - no fit value for:", key)
|
||||
continue
|
||||
meas_number_str = f"{key:{align}{padding[0]}}"
|
||||
h_str = f'{int(meas["h_index"]):{padding[1]}}'
|
||||
k_str = f'{int(meas["k_index"]):{padding[1]}}'
|
||||
l_str = f'{int(meas["l_index"]):{padding[1]}}'
|
||||
scan_number_str = f"{key:{align}{padding[0]}}"
|
||||
h_str = f'{int(scan["h_index"]):{padding[1]}}'
|
||||
k_str = f'{int(scan["k_index"]):{padding[1]}}'
|
||||
l_str = f'{int(scan["l_index"]):{padding[1]}}'
|
||||
if data["meta"]["area_method"] == "fit":
|
||||
area = float(meas["fit"]["fit_area"].n)
|
||||
area = float(scan["fit"]["fit_area"].n)
|
||||
sigma_str = (
|
||||
f'{"{:8.2f}".format(float(meas["fit"]["fit_area"].s)):{align}{padding[2]}}'
|
||||
f'{"{:8.2f}".format(float(scan["fit"]["fit_area"].s)):{align}{padding[2]}}'
|
||||
)
|
||||
elif data["meta"]["area_method"] == "integ":
|
||||
area = float(meas["fit"]["int_area"].n)
|
||||
area = float(scan["fit"]["int_area"].n)
|
||||
sigma_str = (
|
||||
f'{"{:8.2f}".format(float(meas["fit"]["int_area"].s)):{align}{padding[2]}}'
|
||||
f'{"{:8.2f}".format(float(scan["fit"]["int_area"].s)):{align}{padding[2]}}'
|
||||
)
|
||||
|
||||
if zebra_mode == "bi":
|
||||
area = correction(area, lorentz, zebra_mode, meas["twotheta_angle"])
|
||||
area = correction(area, lorentz, zebra_mode, scan["twotheta_angle"])
|
||||
int_str = f'{"{:8.2f}".format(area):{align}{padding[2]}}'
|
||||
angle_str1 = f'{meas["twotheta_angle"]:{padding[3]}}'
|
||||
angle_str2 = f'{meas["omega_angle"]:{padding[3]}}'
|
||||
angle_str3 = f'{meas["chi_angle"]:{padding[3]}}'
|
||||
angle_str4 = f'{meas["phi_angle"]:{padding[3]}}'
|
||||
angle_str1 = f'{scan["twotheta_angle"]:{padding[3]}}'
|
||||
angle_str2 = f'{scan["omega_angle"]:{padding[3]}}'
|
||||
angle_str3 = f'{scan["chi_angle"]:{padding[3]}}'
|
||||
angle_str4 = f'{scan["phi_angle"]:{padding[3]}}'
|
||||
elif zebra_mode == "nb":
|
||||
area = correction(area, lorentz, zebra_mode, meas["gamma_angle"], meas["nu_angle"])
|
||||
area = correction(area, lorentz, zebra_mode, scan["gamma_angle"], scan["nu_angle"])
|
||||
int_str = f'{"{:8.2f}".format(area):{align}{padding[2]}}'
|
||||
angle_str1 = f'{meas["gamma_angle"]:{padding[3]}}'
|
||||
angle_str2 = f'{meas["omega_angle"]:{padding[3]}}'
|
||||
angle_str3 = f'{meas["nu_angle"]:{padding[3]}}'
|
||||
angle_str4 = f'{meas["unkwn_angle"]:{padding[3]}}'
|
||||
angle_str1 = f'{scan["gamma_angle"]:{padding[3]}}'
|
||||
angle_str2 = f'{scan["omega_angle"]:{padding[3]}}'
|
||||
angle_str3 = f'{scan["nu_angle"]:{padding[3]}}'
|
||||
angle_str4 = f'{scan["unkwn_angle"]:{padding[3]}}'
|
||||
|
||||
line = (
|
||||
meas_number_str
|
||||
scan_number_str
|
||||
+ h_str
|
||||
+ l_str
|
||||
+ k_str
|
||||
|
@ -32,7 +32,7 @@ def create_uncertanities(y, y_err):
|
||||
|
||||
|
||||
def fitccl(
|
||||
meas,
|
||||
scan,
|
||||
guess,
|
||||
vary,
|
||||
constraints_min,
|
||||
@ -42,7 +42,7 @@ def fitccl(
|
||||
binning=None,
|
||||
):
|
||||
"""Made for fitting of ccl date where 1 peak is expected. Allows for combination of gaussian and linear model combination
|
||||
:param meas: measurement in the data dict (i.e. M123)
|
||||
:param scan: scan in the data dict (i.e. M123)
|
||||
:param guess: initial guess for the fitting, if none, some values are added automatically in order (see below)
|
||||
:param vary: True if parameter can vary during fitting, False if it to be fixed
|
||||
:param numfit_min: minimal value on x axis for numerical integration - if none is centre of gaussian minus 3 sigma
|
||||
@ -59,33 +59,33 @@ def fitccl(
|
||||
constraints_min = [23, None, 50, 0, 0]
|
||||
constraints_min = [80, None, 1000, 0, 100]
|
||||
"""
|
||||
if len(meas["peak_indexes"]) > 1:
|
||||
if len(scan["peak_indexes"]) > 1:
|
||||
# return in case of more than 1 peaks
|
||||
print("More than 1 peak, measurement skipped")
|
||||
print("More than 1 peak, scan skipped")
|
||||
return
|
||||
if binning is None or binning == 0 or binning == 1:
|
||||
x = list(meas["om"])
|
||||
y = list(meas["Counts"])
|
||||
y_err = list(np.sqrt(y)) if meas.get("sigma", None) is None else list(meas["sigma"])
|
||||
print(meas["peak_indexes"])
|
||||
if not meas["peak_indexes"]:
|
||||
x = list(scan["om"])
|
||||
y = list(scan["Counts"])
|
||||
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
|
||||
print(scan["peak_indexes"])
|
||||
if not scan["peak_indexes"]:
|
||||
centre = np.mean(x)
|
||||
else:
|
||||
centre = x[int(meas["peak_indexes"])]
|
||||
centre = x[int(scan["peak_indexes"])]
|
||||
else:
|
||||
x = list(meas["om"])
|
||||
if not meas["peak_indexes"]:
|
||||
x = list(scan["om"])
|
||||
if not scan["peak_indexes"]:
|
||||
centre = np.mean(x)
|
||||
else:
|
||||
centre = x[int(meas["peak_indexes"])]
|
||||
centre = x[int(scan["peak_indexes"])]
|
||||
x = bin_data(x, binning)
|
||||
y = list(meas["Counts"])
|
||||
y_err = list(np.sqrt(y)) if meas.get("sigma", None) is None else list(meas["sigma"])
|
||||
y = list(scan["Counts"])
|
||||
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
|
||||
combined = bin_data(create_uncertanities(y, y_err), binning)
|
||||
y = [combined[i].n for i in range(len(combined))]
|
||||
y_err = [combined[i].s for i in range(len(combined))]
|
||||
|
||||
if len(meas["peak_indexes"]) == 0:
|
||||
if len(scan["peak_indexes"]) == 0:
|
||||
# Case for no peak, gaussian in centre, sigma as 20% of range
|
||||
print("No peak")
|
||||
peak_index = find_nearest(x, np.mean(x))
|
||||
@ -96,10 +96,10 @@ def fitccl(
|
||||
guess[4] = np.mean(y) if guess[4] is None else guess[4]
|
||||
constraints_min[2] = 0
|
||||
|
||||
elif len(meas["peak_indexes"]) == 1:
|
||||
elif len(scan["peak_indexes"]) == 1:
|
||||
# case for one peak, takse into account users guesses
|
||||
print("one peak")
|
||||
peak_height = meas["peak_heights"]
|
||||
peak_height = scan["peak_heights"]
|
||||
guess[0] = centre if guess[0] is None else guess[0]
|
||||
guess[1] = 0.1 if guess[1] is None else guess[1]
|
||||
guess[2] = float(peak_height / 10) if guess[2] is None else float(guess[2])
|
||||
@ -144,11 +144,11 @@ def fitccl(
|
||||
fit_area = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
|
||||
comps = result.eval_components()
|
||||
|
||||
if len(meas["peak_indexes"]) == 0:
|
||||
if len(scan["peak_indexes"]) == 0:
|
||||
# for case of no peak, there is no reason to integrate, therefore fit and int are equal
|
||||
int_area = fit_area
|
||||
|
||||
elif len(meas["peak_indexes"]) == 1:
|
||||
elif len(scan["peak_indexes"]) == 1:
|
||||
gauss_3sigmamin = find_nearest(
|
||||
x, result.params["g_cen"].value - 3 * result.params["g_width"].value
|
||||
)
|
||||
@ -224,4 +224,4 @@ def fitccl(
|
||||
d["result"] = result
|
||||
d["comps"] = comps
|
||||
d["numfit"] = [numfit_min, numfit_max]
|
||||
meas["fit"] = d
|
||||
scan["fit"] = d
|
||||
|
@ -58,7 +58,7 @@ META_VARS_FLOAT = (
|
||||
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j")
|
||||
|
||||
CCL_FIRST_LINE = (
|
||||
# the first element is `measurement_number`, which we don't save to metadata
|
||||
# the first element is `scan_number`, which we don't save to metadata
|
||||
("h_index", float),
|
||||
("k_index", float),
|
||||
("l_index", float),
|
||||
@ -100,8 +100,8 @@ def load_1D(filepath):
|
||||
|
||||
:arg filepath
|
||||
:returns det_variables
|
||||
- dictionary of all detector/scan variables and dictinionary for every measurement.
|
||||
Names of these dictionaries are M + measurement number. They include HKL indeces, angles,
|
||||
- dictionary of all detector/scan variables and dictinionary for every scan.
|
||||
Names of these dictionaries are M + scan number. They include HKL indeces, angles,
|
||||
monitors, stepsize and array of counts
|
||||
"""
|
||||
with open(filepath, "r") as infile:
|
||||
@ -130,7 +130,7 @@ def parse_1D(fileobj, data_type):
|
||||
break
|
||||
|
||||
# read data
|
||||
measurements = {}
|
||||
scan = {}
|
||||
if data_type == ".ccl":
|
||||
decimal = list()
|
||||
|
||||
@ -144,7 +144,7 @@ def parse_1D(fileobj, data_type):
|
||||
d = {}
|
||||
|
||||
# first line
|
||||
measurement_number, *params = line.split()
|
||||
scan_number, *params = line.split()
|
||||
for param, (param_name, param_type) in zip(params, ccl_first_line):
|
||||
d[param_name] = param_type(param)
|
||||
|
||||
@ -170,7 +170,7 @@ def parse_1D(fileobj, data_type):
|
||||
counts.extend(map(int, next(fileobj).split()))
|
||||
d["Counts"] = counts
|
||||
|
||||
measurements[int(measurement_number)] = d
|
||||
scan[int(scan_number)] = d
|
||||
|
||||
if all(decimal):
|
||||
metadata["indices"] = "hkl"
|
||||
@ -209,7 +209,7 @@ def parse_1D(fileobj, data_type):
|
||||
data_cols["phi_angle"] = metadata["phi"]
|
||||
data_cols["nu_angle"] = metadata["nu"]
|
||||
|
||||
measurements[1] = dict(data_cols)
|
||||
scan[1] = dict(data_cols)
|
||||
|
||||
else:
|
||||
print("Unknown file extention")
|
||||
@ -218,4 +218,4 @@ def parse_1D(fileobj, data_type):
|
||||
metadata["data_type"] = data_type
|
||||
metadata["area_method"] = "fit"
|
||||
|
||||
return {"meta": metadata, "meas": measurements}
|
||||
return {"meta": metadata, "scan": scan}
|
||||
|
@ -38,12 +38,10 @@ def load_dats(filepath):
|
||||
dict1 = add_dict(dict1, load_1D(file_list[i][0]))
|
||||
else:
|
||||
dict1 = add_dict(dict1, load_1D(file_list[i]))
|
||||
dict1["meas"][i + 1]["params"] = {}
|
||||
dict1["scan"][i + 1]["params"] = {}
|
||||
if data_type == "txt":
|
||||
for x in range(len(col_names) - 1):
|
||||
dict1["meas"][i + 1]["params"][
|
||||
col_names[x + 1]
|
||||
] = file_list[i][x + 1]
|
||||
dict1["scan"][i + 1]["params"][col_names[x + 1]] = file_list[i][x + 1]
|
||||
|
||||
return dict1
|
||||
|
||||
@ -55,7 +53,7 @@ def create_dataframe(dict1):
|
||||
# create dictionary to which we pull only wanted items before transforming it to pd.dataframe
|
||||
pull_dict = {}
|
||||
pull_dict["filenames"] = list()
|
||||
for key in dict1["meas"][1]["params"]:
|
||||
for key in dict1["scan"][1]["params"]:
|
||||
pull_dict[key] = list()
|
||||
pull_dict["temperature"] = list()
|
||||
pull_dict["mag_field"] = list()
|
||||
@ -65,21 +63,19 @@ def create_dataframe(dict1):
|
||||
pull_dict["Counts"] = list()
|
||||
|
||||
# populate the dict
|
||||
for keys in dict1["meas"]:
|
||||
if "file_of_origin" in dict1["meas"][keys]:
|
||||
pull_dict["filenames"].append(
|
||||
dict1["meas"][keys]["file_of_origin"].split("/")[-1]
|
||||
)
|
||||
for keys in dict1["scan"]:
|
||||
if "file_of_origin" in dict1["scan"][keys]:
|
||||
pull_dict["filenames"].append(dict1["scan"][keys]["file_of_origin"].split("/")[-1])
|
||||
else:
|
||||
pull_dict["filenames"].append(dict1["meta"]["original_filename"].split("/")[-1])
|
||||
for key in dict1["meas"][keys]["params"]:
|
||||
pull_dict[str(key)].append(float(dict1["meas"][keys]["params"][key]))
|
||||
pull_dict["temperature"].append(dict1["meas"][keys]["temperature"])
|
||||
pull_dict["mag_field"].append(dict1["meas"][keys]["mag_field"])
|
||||
pull_dict["fit_area"].append(dict1["meas"][keys]["fit"]["fit_area"])
|
||||
pull_dict["int_area"].append(dict1["meas"][keys]["fit"]["int_area"])
|
||||
pull_dict["om"].append(dict1["meas"][keys]["om"])
|
||||
pull_dict["Counts"].append(dict1["meas"][keys]["Counts"])
|
||||
for key in dict1["scan"][keys]["params"]:
|
||||
pull_dict[str(key)].append(float(dict1["scan"][keys]["params"][key]))
|
||||
pull_dict["temperature"].append(dict1["scan"][keys]["temperature"])
|
||||
pull_dict["mag_field"].append(dict1["scan"][keys]["mag_field"])
|
||||
pull_dict["fit_area"].append(dict1["scan"][keys]["fit"]["fit_area"])
|
||||
pull_dict["int_area"].append(dict1["scan"][keys]["fit"]["int_area"])
|
||||
pull_dict["om"].append(dict1["scan"][keys]["om"])
|
||||
pull_dict["Counts"].append(dict1["scan"][keys]["Counts"])
|
||||
|
||||
return pd.DataFrame(data=pull_dict)
|
||||
|
||||
@ -145,12 +141,14 @@ def make_graph(data, sorting_parameter, style):
|
||||
plt.clim(color_matrix.mean(), color_matrix.max())
|
||||
|
||||
return fig
|
||||
|
||||
|
||||
def save_dict(obj, name):
|
||||
""" saves dictionary as pickle file in binary format
|
||||
:arg obj - object to save
|
||||
:arg name - name of the file
|
||||
NOTE: path should be added later"""
|
||||
with open(name + '.pkl', 'wb') as f:
|
||||
with open(name + ".pkl", "wb") as f:
|
||||
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
|
||||
@ -159,15 +157,17 @@ def load_dict(name):
|
||||
:arg name - name of the file to load
|
||||
NOTE: expect the file in the same folder, path should be added later
|
||||
:return dictionary"""
|
||||
with open(name + '.pkl', 'rb') as f:
|
||||
with open(name + ".pkl", "rb") as f:
|
||||
return pickle.load(f)
|
||||
|
||||
|
||||
# pickle, mat, h5, txt, csv, json
|
||||
def save_table(data, filetype, name, path=None):
|
||||
print("Saving: ", filetype)
|
||||
path = "" if path is None else path
|
||||
if filetype == "pickle":
|
||||
# to work with uncertanities, see uncertanity module
|
||||
with open(path + name + '.pkl', 'wb') as f:
|
||||
with open(path + name + ".pkl", "wb") as f:
|
||||
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
|
||||
if filetype == "mat":
|
||||
# matlab doesent allow some special character to be in var names, also cant start with
|
||||
@ -176,21 +176,23 @@ def save_table(data, filetype, name, path=None):
|
||||
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
|
||||
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
|
||||
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
|
||||
data = data.drop(columns=['fit_area', 'int_area'])
|
||||
remove_characters = [" ", "[", "]", "{", "}", "(",")"]
|
||||
data = data.drop(columns=["fit_area", "int_area"])
|
||||
remove_characters = [" ", "[", "]", "{", "}", "(", ")"]
|
||||
for character in remove_characters:
|
||||
data.columns = [data.columns[i].replace(character,"") for i in range(len(data.columns))]
|
||||
sio.savemat((path + name + '.mat'), {name: col.values for name, col in data.items()})
|
||||
data.columns = [
|
||||
data.columns[i].replace(character, "") for i in range(len(data.columns))
|
||||
]
|
||||
sio.savemat((path + name + ".mat"), {name: col.values for name, col in data.items()})
|
||||
if filetype == "csv" or "txt":
|
||||
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
|
||||
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
|
||||
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
|
||||
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
|
||||
data = data.drop(columns=['fit_area', 'int_area', 'om', 'Counts'])
|
||||
data = data.drop(columns=["fit_area", "int_area", "om", "Counts"])
|
||||
if filetype == "csv":
|
||||
data.to_csv(path + name + '.csv')
|
||||
data.to_csv(path + name + ".csv")
|
||||
if filetype == "txt":
|
||||
with open((path + name + '.txt'), 'w') as outfile:
|
||||
with open((path + name + ".txt"), "w") as outfile:
|
||||
data.to_string(outfile)
|
||||
if filetype == "h5":
|
||||
hdf = pd.HDFStore((path + name + ".h5"))
|
||||
@ -198,9 +200,3 @@ def save_table(data, filetype, name, path=None):
|
||||
hdf.close()
|
||||
if filetype == "json":
|
||||
data.to_json((path + name + ".json"))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user