From 15d634bb8279f983c3ae930802b0c0938eac5add Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 13:58:16 +0200 Subject: [PATCH 001/159] moved radial_integration algo out of work function --- dap/algos/__init__.py | 2 +- dap/algos/radprof.py | 46 +++++++++++++++++++++++++++++++++++++++++++ dap/worker.py | 45 +++--------------------------------------- 3 files changed, 50 insertions(+), 43 deletions(-) diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index 064c713..45f00c7 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -1,4 +1,4 @@ -from .radprof import prepare_radial_profile, radial_profile +from .radprof import calc_radial_integration diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index bad21f8..1796bac 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -1,6 +1,52 @@ import numpy as np +def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_radial_integration, r_radial_integration): + data_copy_1 = np.copy(data) + + if keep_pixels is None and pixel_mask_pf is not None: + keep_pixels = (pixel_mask_pf != 0) + if center_radial_integration is None: + center_radial_integration = [results["beam_center_x"], results["beam_center_y"]] + r_radial_integration = None + if r_radial_integration is None: + r_radial_integration, nr_radial_integration = prepare_radial_profile(data_copy_1, center_radial_integration, keep_pixels) + r_min_max = [int(np.min(r_radial_integration)), int(np.max(r_radial_integration)) + 1] + + + apply_threshold = results.get("apply_threshold", False) + + if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + data_copy_1[data_copy_1 < threshold_min] = np.nan + if threshold_max > threshold_min: + data_copy_1[data_copy_1 > threshold_max] = np.nan + + rp = radial_profile(data_copy_1, r_radial_integration, nr_radial_integration, keep_pixels) + + silent_region_min = results.get("radial_integration_silent_min", None) + silent_region_max = results.get("radial_integration_silent_max", None) + + if ( + silent_region_min is not None and + silent_region_max is not None and + silent_region_max > silent_region_min and + silent_region_min > r_min_max[0] and + silent_region_max < r_min_max[1] + ): + + integral_silent_region = np.sum(rp[silent_region_min:silent_region_max]) + rp = rp / integral_silent_region + results["radint_normalised"] = [silent_region_min, silent_region_max] + + results["radint_I"] = list(rp[r_min_max[0]:]) + results["radint_q"] = r_min_max + + return keep_pixels, center_radial_integration, r_radial_integration + + + def radial_profile(data, r, nr, keep_pixels=None): if keep_pixels is not None: tbin = np.bincount(r, data[keep_pixels].ravel()) diff --git a/dap/worker.py b/dap/worker.py index 066d0f9..d98a1c3 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -10,7 +10,7 @@ import numpy as np import zmq from peakfinder8_extension import peakfinder_8 -from algos import prepare_radial_profile, radial_profile +from algos import calc_radial_integration FLAGS = 0 @@ -250,51 +250,12 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["saturated_pixels_x"] = saturated_pixels_coordinates[1].tolist() results["saturated_pixels_y"] = saturated_pixels_coordinates[0].tolist() + # pump probe analysis do_radial_integration = results.get("do_radial_integration", False) - if do_radial_integration: + keep_pixels, center_radial_integration, r_radial_integration = calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_radial_integration, r_radial_integration) - data_copy_1 = np.copy(data) - - if keep_pixels is None and pixel_mask_pf is not None: - keep_pixels = pixel_mask_pf!=0 - if center_radial_integration is None: - center_radial_integration = [results["beam_center_x"], results["beam_center_y"]] - r_radial_integration = None - if r_radial_integration is None: - r_radial_integration, nr_radial_integration = prepare_radial_profile(data_copy_1, center_radial_integration, keep_pixels) - r_min_max = [int(np.min(r_radial_integration)), int(np.max(r_radial_integration)) + 1] - - - apply_threshold = results.get("apply_threshold", False) - - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - data_copy_1[data_copy_1 < threshold_min] = np.nan - if threshold_max > threshold_min: - data_copy_1[data_copy_1 > threshold_max] = np.nan - - rp = radial_profile(data_copy_1, r_radial_integration, nr_radial_integration, keep_pixels) - - silent_region_min = results.get("radial_integration_silent_min", None) - silent_region_max = results.get("radial_integration_silent_max", None) - - if ( - silent_region_min is not None and - silent_region_max is not None and - silent_region_max > silent_region_min and - silent_region_min > r_min_max[0] and - silent_region_max < r_min_max[1] - ): - - integral_silent_region = np.sum(rp[silent_region_min:silent_region_max]) - rp = rp / integral_silent_region - results["radint_normalised"] = [silent_region_min, silent_region_max] - - results["radint_I"] = list(rp[r_min_max[0]:]) - results["radint_q"] = r_min_max #copy image to work with peakfinder, just in case d = np.copy(data) -- 2.49.0 From 0ec91cd5d94d1cc77c760f82a60e6aaf123bb89a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 14:18:37 +0200 Subject: [PATCH 002/159] moved apply_additional_mask out of work function --- dap/algos/__init__.py | 1 + dap/worker.py | 59 +++---------------------------------------- 2 files changed, 4 insertions(+), 56 deletions(-) diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index 45f00c7..f77c042 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -1,4 +1,5 @@ from .radprof import calc_radial_integration +from .addmask import calc_apply_additional_mask diff --git a/dap/worker.py b/dap/worker.py index d98a1c3..4f2b6fc 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -10,7 +10,7 @@ import numpy as np import zmq from peakfinder8_extension import peakfinder_8 -from algos import calc_radial_integration +from algos import calc_radial_integration, calc_apply_additional_mask FLAGS = 0 @@ -187,61 +187,8 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # add additional mask at the edge of modules for JF06T08 apply_additional_mask = results.get("apply_additional_mask", False) - if detector == "JF06T08V04" and apply_additional_mask: - # edge pixels - pixel_mask_pf[67:1097,1063] = 0 - pixel_mask_pf[0:1030, 1100] = 0 - - pixel_mask_pf[1106:2136, 1131] = 0 - pixel_mask_pf[1039:2069, 1168] = 0 - - pixel_mask_pf[1039:2069, 1718] = 0 - pixel_mask_pf[1039:2069, 1681] = 0 - - pixel_mask_pf[1106:2136, 618] = 0 - - pixel_mask_pf[1106:2136, 581] = 0 - - pixel_mask_pf[67:1097,513] = 0 - - pixel_mask_pf[67:1097, 550] = 0 - - pixel_mask_pf[0:1030, 1650] = 0 - - pixel_mask_pf[0:1030, 1613] = 0 - - pixel_mask_pf[1106, 68:582] = 0 - - pixel_mask_pf[1096, 550:1064] = 0 - pixel_mask_pf[1106, 618:1132] = 0 - - pixel_mask_pf[1029, 1100:1614] = 0 - pixel_mask_pf[1039, 1168:1682] = 0 - - pixel_mask_pf[1039, 1718:2230] = 0 - - pixel_mask_pf[1096, 0:513] = 0 - - pixel_mask_pf[1029, 1650:2163] = 0 - - pixel_mask_pf[2068, 1168:2232] = 0 - - pixel_mask_pf[67,0:1063] = 0 - - #bad region in left bottom inner module - pixel_mask_pf[842:1097, 669:671] = 0 - - #second bad region in left bottom inner module - pixel_mask_pf[1094, 620:807] = 0 - - # vertical line in upper left bottom module - pixel_mask_pf[842:1072, 87:90] = 0 - - pixel_mask_pf[1794, 1503:1550] = 0 - - if detector == "JF17T16V01" and apply_additional_mask: - # mask module 11 - pixel_mask_pf[2619:3333,1577:2607] = 0 + if apply_additional_mask: + calc_apply_additional_mask(detector, pixel_mask_pf) if pixel_mask_corrected is not None: data_s = copy(image) -- 2.49.0 From 49aa4bad143bc1b5b61acbd77bf1ce744fea476e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 14:25:27 +0200 Subject: [PATCH 003/159] moved apply_additional_mask out of work function --- dap/algos/addmask.py | 61 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 dap/algos/addmask.py diff --git a/dap/algos/addmask.py b/dap/algos/addmask.py new file mode 100644 index 0000000..bae5302 --- /dev/null +++ b/dap/algos/addmask.py @@ -0,0 +1,61 @@ +#TODO: find a better way to handle this + +def calc_apply_additional_mask(detector, pixel_mask_pf): + if detector == "JF06T08V04": + # edge pixels + pixel_mask_pf[67:1097,1063] = 0 + pixel_mask_pf[0:1030, 1100] = 0 + + pixel_mask_pf[1106:2136, 1131] = 0 + pixel_mask_pf[1039:2069, 1168] = 0 + + pixel_mask_pf[1039:2069, 1718] = 0 + pixel_mask_pf[1039:2069, 1681] = 0 + + pixel_mask_pf[1106:2136, 618] = 0 + + pixel_mask_pf[1106:2136, 581] = 0 + + pixel_mask_pf[67:1097,513] = 0 + + pixel_mask_pf[67:1097, 550] = 0 + + pixel_mask_pf[0:1030, 1650] = 0 + + pixel_mask_pf[0:1030, 1613] = 0 + + pixel_mask_pf[1106, 68:582] = 0 + + pixel_mask_pf[1096, 550:1064] = 0 + pixel_mask_pf[1106, 618:1132] = 0 + + pixel_mask_pf[1029, 1100:1614] = 0 + pixel_mask_pf[1039, 1168:1682] = 0 + + pixel_mask_pf[1039, 1718:2230] = 0 + + pixel_mask_pf[1096, 0:513] = 0 + + pixel_mask_pf[1029, 1650:2163] = 0 + + pixel_mask_pf[2068, 1168:2232] = 0 + + pixel_mask_pf[67,0:1063] = 0 + + #bad region in left bottom inner module + pixel_mask_pf[842:1097, 669:671] = 0 + + #second bad region in left bottom inner module + pixel_mask_pf[1094, 620:807] = 0 + + # vertical line in upper left bottom module + pixel_mask_pf[842:1072, 87:90] = 0 + + pixel_mask_pf[1794, 1503:1550] = 0 + + if detector == "JF17T16V01": + # mask module 11 + pixel_mask_pf[2619:3333,1577:2607] = 0 + + + -- 2.49.0 From e567e3f406cd2e545c54e3a77b0e879034235a70 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 14:26:35 +0200 Subject: [PATCH 004/159] tried to group bad masked regions a bit better --- dap/algos/addmask.py | 51 ++++++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 28 deletions(-) diff --git a/dap/algos/addmask.py b/dap/algos/addmask.py index bae5302..d547359 100644 --- a/dap/algos/addmask.py +++ b/dap/algos/addmask.py @@ -3,56 +3,51 @@ def calc_apply_additional_mask(detector, pixel_mask_pf): if detector == "JF06T08V04": # edge pixels - pixel_mask_pf[67:1097,1063] = 0 pixel_mask_pf[0:1030, 1100] = 0 - - pixel_mask_pf[1106:2136, 1131] = 0 - pixel_mask_pf[1039:2069, 1168] = 0 - - pixel_mask_pf[1039:2069, 1718] = 0 - pixel_mask_pf[1039:2069, 1681] = 0 - - pixel_mask_pf[1106:2136, 618] = 0 - - pixel_mask_pf[1106:2136, 581] = 0 - - pixel_mask_pf[67:1097,513] = 0 - - pixel_mask_pf[67:1097, 550] = 0 - + pixel_mask_pf[0:1030, 1613] = 0 pixel_mask_pf[0:1030, 1650] = 0 - pixel_mask_pf[0:1030, 1613] = 0 + pixel_mask_pf[67, 0:1063] = 0 - pixel_mask_pf[1106, 68:582] = 0 - - pixel_mask_pf[1096, 550:1064] = 0 - pixel_mask_pf[1106, 618:1132] = 0 + pixel_mask_pf[67:1097, 513] = 0 + pixel_mask_pf[67:1097, 550] = 0 + pixel_mask_pf[67:1097, 1063] = 0 pixel_mask_pf[1029, 1100:1614] = 0 - pixel_mask_pf[1039, 1168:1682] = 0 + pixel_mask_pf[1029, 1650:2163] = 0 + pixel_mask_pf[1039, 1168:1682] = 0 pixel_mask_pf[1039, 1718:2230] = 0 - pixel_mask_pf[1096, 0:513] = 0 + pixel_mask_pf[1039:2069, 1168] = 0 + pixel_mask_pf[1039:2069, 1681] = 0 + pixel_mask_pf[1039:2069, 1718] = 0 - pixel_mask_pf[1029, 1650:2163] = 0 + pixel_mask_pf[1096, 0:513] = 0 + pixel_mask_pf[1096, 550:1064] = 0 + + pixel_mask_pf[1106, 68:582] = 0 + pixel_mask_pf[1106, 618:1132] = 0 + + pixel_mask_pf[1106:2136, 581] = 0 + pixel_mask_pf[1106:2136, 618] = 0 + pixel_mask_pf[1106:2136, 1131] = 0 pixel_mask_pf[2068, 1168:2232] = 0 - pixel_mask_pf[67,0:1063] = 0 - - #bad region in left bottom inner module + # first bad region in left bottom inner module pixel_mask_pf[842:1097, 669:671] = 0 - #second bad region in left bottom inner module + # second bad region in left bottom inner module pixel_mask_pf[1094, 620:807] = 0 # vertical line in upper left bottom module pixel_mask_pf[842:1072, 87:90] = 0 + # horizontal line? pixel_mask_pf[1794, 1503:1550] = 0 + if detector == "JF17T16V01": # mask module 11 pixel_mask_pf[2619:3333,1577:2607] = 0 -- 2.49.0 From b78f4a61fd10a5b4e379e97ab2e1a0ee6f219a26 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 14:56:10 +0200 Subject: [PATCH 005/159] de-duplicate sending --- dap/worker.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 4f2b6fc..e2434a0 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -364,24 +364,21 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if apply_aggregation and "aggregation_max" in results: if forceSendVisualisation: - visualisation_socket.send_json(results, FLAGS | zmq.SNDMORE) - visualisation_socket.send(data, FLAGS, copy=True, track=True) + pass else: data = np.empty((2, 2), dtype=np.uint16) results["type"] = str(data.dtype) results["shape"] = data.shape - visualisation_socket.send_json(results, FLAGS | zmq.SNDMORE) - visualisation_socket.send(data, FLAGS, copy=True, track=True) else: if results["is_good_frame"] and (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1): - visualisation_socket.send_json(results, FLAGS | zmq.SNDMORE) - visualisation_socket.send(data, FLAGS, copy=True, track=True) + pass else: data = np.empty((2, 2), dtype=np.uint16) results["type"] = str(data.dtype) results["shape"] = data.shape - visualisation_socket.send_json(results, FLAGS | zmq.SNDMORE) - visualisation_socket.send(data, FLAGS, copy=True, track=True) + + visualisation_socket.send_json(results, FLAGS | zmq.SNDMORE) + visualisation_socket.send(data, FLAGS, copy=True, track=True) -- 2.49.0 From a96a5d64233254581328603ac21d3c486a767c04 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 15:05:42 +0200 Subject: [PATCH 006/159] removed unused True path --- dap/worker.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index e2434a0..6ace835 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -363,16 +363,12 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host accumulator_socket.send_json(results, FLAGS) if apply_aggregation and "aggregation_max" in results: - if forceSendVisualisation: - pass - else: + if not forceSendVisualisation: data = np.empty((2, 2), dtype=np.uint16) results["type"] = str(data.dtype) results["shape"] = data.shape else: - if results["is_good_frame"] and (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1): - pass - else: + if not results["is_good_frame"] or not (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1): data = np.empty((2, 2), dtype=np.uint16) results["type"] = str(data.dtype) results["shape"] = data.shape -- 2.49.0 From cd76af01faf93d2fb8a351b9b98e146d50f70b6d Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 15:13:41 +0200 Subject: [PATCH 007/159] DRY --- dap/worker.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 6ace835..e3c5b5e 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -362,16 +362,14 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host accumulator_socket.send_json(results, FLAGS) - if apply_aggregation and "aggregation_max" in results: - if not forceSendVisualisation: - data = np.empty((2, 2), dtype=np.uint16) - results["type"] = str(data.dtype) - results["shape"] = data.shape - else: - if not results["is_good_frame"] or not (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1): - data = np.empty((2, 2), dtype=np.uint16) - results["type"] = str(data.dtype) - results["shape"] = data.shape + + send_empty_cond1 = (apply_aggregation and "aggregation_max" in results and not forceSendVisualisation) + send_empty_cond2 = (not results["is_good_frame"] or not (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1)) + + if send_empty_cond1 or send_empty_cond2: + data = np.empty((2, 2), dtype=np.uint16) + results["type"] = str(data.dtype) + results["shape"] = data.shape visualisation_socket.send_json(results, FLAGS | zmq.SNDMORE) visualisation_socket.send(data, FLAGS, copy=True, track=True) -- 2.49.0 From 57e22c01b5f50eab18e73ff63cfae5567045a97e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 15:25:25 +0200 Subject: [PATCH 008/159] spaces --- dap/worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index e3c5b5e..9c7311e 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -109,7 +109,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if worker == 0: print(f"({pulseid}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) print(f" --> {peakfinder_parameters}", flush=True) - print("",flush=True) + print(flush=True) except Exception as e: print(f"({pulseid}) problem ({e}) to read peakfinder parameters file, worker : {worker}", flush=True) @@ -246,7 +246,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host roi_results_normalised[iRoi] = np.nanmean(data_roi) results["roi_intensities_x"].append([roi_x1[iRoi], roi_x2[iRoi]]) - results["roi_intensities_proj_x"].append(np.nansum(data_roi,axis=0).tolist()) + results["roi_intensities_proj_x"].append(np.nansum(data_roi, axis=0).tolist()) results["roi_intensities"] = [float(r) for r in roi_results] results["roi_intensities_normalised"] = [float(r) for r in roi_results_normalised ] -- 2.49.0 From 444bf1de06fda323776cc7619bc9a2af2e0b8e46 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 15:57:57 +0200 Subject: [PATCH 009/159] moved zmq receiving/sending into separate class --- dap/worker.py | 32 ++++++-------------------------- dap/zmqsocks.py | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 26 deletions(-) create mode 100644 dap/zmqsocks.py diff --git a/dap/worker.py b/dap/worker.py index 9c7311e..eec10fa 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -7,10 +7,10 @@ from time import sleep import jungfrau_utils as ju import numpy as np -import zmq from peakfinder8_extension import peakfinder_8 from algos import calc_radial_integration, calc_apply_additional_mask +from zmqsocks import ZMQSockets FLAGS = 0 @@ -57,27 +57,11 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host ju_stream_adapter = ju.StreamAdapter() - zmq_context = zmq.Context(io_threads=4) - poller = zmq.Poller() + zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) # all the normal workers worker = 1 -# receive from backend: - backend_socket = zmq_context.socket(zmq.PULL) - backend_socket.connect(backend_address) - - poller.register(backend_socket, zmq.POLLIN) - - accumulator_socket = zmq_context.socket(zmq.PUSH) - accumulator_socket.connect(f"tcp://{accumulator_host}:{accumulator_port}") - - visualisation_socket = zmq_context.socket(zmq.PUB) - visualisation_socket.connect(f"tcp://{visualisation_host}:{visualisation_port}") - -# in case of problem with communication to visualisation, keep in 0mq buffer only few messages - visualisation_socket.set_hwm(10) - keep_pixels = None r_radial_integration = None center_radial_integration = None @@ -113,13 +97,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host except Exception as e: print(f"({pulseid}) problem ({e}) to read peakfinder parameters file, worker : {worker}", flush=True) - events = dict(poller.poll(2000)) # check every 2 seconds in each worker - if backend_socket not in events: + if not zmq_socks.has_data(): continue - metadata = backend_socket.recv_json(FLAGS) - image = backend_socket.recv(FLAGS, copy=False, track=False) - image = np.frombuffer(image, dtype=metadata["type"]).reshape(metadata["shape"]) + image, metadata = zmq_socks.get_data() results = copy(metadata) if results["shape"][0] == 2 and results["shape"][1] == 2: @@ -360,7 +341,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["shape"] = data.shape - accumulator_socket.send_json(results, FLAGS) + zmq_socks.send_accumulator(results) send_empty_cond1 = (apply_aggregation and "aggregation_max" in results and not forceSendVisualisation) @@ -371,8 +352,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["type"] = str(data.dtype) results["shape"] = data.shape - visualisation_socket.send_json(results, FLAGS | zmq.SNDMORE) - visualisation_socket.send(data, FLAGS, copy=True, track=True) + zmq_socks.send_visualisation(results, data) diff --git a/dap/zmqsocks.py b/dap/zmqsocks.py new file mode 100644 index 0000000..be25cc8 --- /dev/null +++ b/dap/zmqsocks.py @@ -0,0 +1,49 @@ +import numpy as np +import zmq + + +FLAGS = 0 + + +class ZMQSockets: + + def __init__(self, backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port): + zmq_context = zmq.Context(io_threads=4) + self.poller = poller = zmq.Poller() + + # receive from backend: + self.backend_socket = backend_socket = zmq_context.socket(zmq.PULL) + backend_socket.connect(backend_address) + + poller.register(backend_socket, zmq.POLLIN) + + self.accumulator_socket = accumulator_socket = zmq_context.socket(zmq.PUSH) + accumulator_socket.connect(f"tcp://{accumulator_host}:{accumulator_port}") + + self.visualisation_socket = visualisation_socket = zmq_context.socket(zmq.PUB) + visualisation_socket.connect(f"tcp://{visualisation_host}:{visualisation_port}") + + # in case of problem with communication to visualisation, keep in 0mq buffer only few messages + visualisation_socket.set_hwm(10) + + + def has_data(self): + events = dict(self.poller.poll(2000)) # check every 2 seconds in each worker + return (self.backend_socket in events) + + def get_data(self): + metadata = self.backend_socket.recv_json(FLAGS) + image = self.backend_socket.recv(FLAGS, copy=False, track=False) + image = np.frombuffer(image, dtype=metadata["type"]).reshape(metadata["shape"]) + return image, metadata + + + def send_accumulator(self, results): + self.accumulator_socket.send_json(results, FLAGS) + + def send_visualisation(self, results, data): + self.visualisation_socket.send_json(results, FLAGS | zmq.SNDMORE) + self.visualisation_socket.send(data, FLAGS, copy=True, track=True) + + + -- 2.49.0 From 1e7d4a0a0245957c03af1e7a26cfad5b8288e89a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 15:58:56 +0200 Subject: [PATCH 010/159] moved zmq receiving/sending into separate class --- dap/worker.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index eec10fa..fee9e96 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -13,10 +13,6 @@ from algos import calc_radial_integration, calc_apply_additional_mask from zmqsocks import ZMQSockets -FLAGS = 0 - - - def main(): parser = argparse.ArgumentParser() -- 2.49.0 From 399b8875166a4902152f0d2a195844aa7d318ecc Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 17:02:57 +0200 Subject: [PATCH 011/159] added/use json_load and read_bit utils --- dap/utils.py | 16 ++++++++++++++++ dap/worker.py | 16 +++++++--------- 2 files changed, 23 insertions(+), 9 deletions(-) create mode 100644 dap/utils.py diff --git a/dap/utils.py b/dap/utils.py new file mode 100644 index 0000000..0e687ca --- /dev/null +++ b/dap/utils.py @@ -0,0 +1,16 @@ +import json + + +def json_load(filename, *args, **kwargs): + with open(filename, "r") as f: + return json.load(f, *args, **kwargs) + + +def read_bit(bits, n): + """ + read the n-th bit from bits as boolean + """ + return bool((bits >> n) & 1) + + + diff --git a/dap/worker.py b/dap/worker.py index fee9e96..13c1452 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -1,5 +1,4 @@ import argparse -import json import os from copy import copy from random import randint @@ -11,6 +10,7 @@ from peakfinder8_extension import peakfinder_8 from algos import calc_radial_integration, calc_apply_additional_mask from zmqsocks import ZMQSockets +from utils import json_load, read_bit def main(): @@ -45,8 +45,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host peakfinder_parameters = {} peakfinder_parameters_time = -1 if fn_peakfinder_parameters is not None and os.path.exists(fn_peakfinder_parameters): - with open(fn_peakfinder_parameters, "r") as read_file: - peakfinder_parameters = json.load(read_file) + peakfinder_parameters = json_load(fn_peakfinder_parameters) peakfinder_parameters_time = os.path.getmtime(fn_peakfinder_parameters) pulseid = 0 @@ -82,8 +81,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if time_delta > 2.0: old_peakfinder_parameters = peakfinder_parameters sleep(0.5) - with open(fn_peakfinder_parameters, "r") as read_file: - peakfinder_parameters = json.load(read_file) + peakfinder_parameters = json_load(fn_peakfinder_parameters) peakfinder_parameters_time = new_time center_radial_integration = None if worker == 0: @@ -112,10 +110,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["is_hit_frame"] = False daq_rec = results.get("daq_rec", 0) - event_laser = bool((daq_rec >> 16) & 1) - event_darkshot = bool((daq_rec >> 17) & 1) -# event_fel = bool((daq_rec >> 18) & 1) - event_ppicker = bool((daq_rec >> 19) & 1) + event_laser = read_bit(daq_rec, 16) + event_darkshot = read_bit(daq_rec, 17) +# event_fel = read_bit(daq_rec, 18) + event_ppicker = read_bit(daq_rec, 19) if not event_darkshot: results["laser_on"] = event_laser -- 2.49.0 From a01a6e6ed5ada299945812d73740a280ad55fd38 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 17:27:04 +0200 Subject: [PATCH 012/159] renamed variable d to pfdata --- dap/worker.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 13c1452..6dc4f20 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -180,11 +180,11 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host #copy image to work with peakfinder, just in case - d = np.copy(data) + pfdata = np.copy(data) # make all masked pixels values nans if pixel_mask_pf is not None: - d[pixel_mask_pf != 1] = np.nan + pfdata[pixel_mask_pf != 1] = np.nan apply_threshold = results.get("apply_threshold", False) threshold_value_choice = results.get("threshold_value", "NaN") @@ -192,9 +192,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - d[d < threshold_min] = threshold_value + pfdata[pfdata < threshold_min] = threshold_value if threshold_max > threshold_min: - d[d > threshold_max] = threshold_value + pfdata[pfdata > threshold_max] = threshold_value # if roi calculation request is present, make it roi_x1 = results.get("roi_x1", []) @@ -212,7 +212,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["roi_intensities_proj_x"] = [] for iRoi in range(len(roi_x1)): - data_roi = np.copy(d[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) + data_roi = np.copy(pfdata[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) roi_results[iRoi] = np.nansum(data_roi) if threshold_value_choice == "NaN": @@ -252,7 +252,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host hitfinder_min_pix_count = int(results["hitfinder_min_pix_count"]) hitfinder_adc_thresh = results["hitfinder_adc_thresh"] - asic_ny, asic_nx = d.shape + asic_ny, asic_nx = pfdata.shape nasics_y, nasics_x = 1, 1 hitfinder_max_pix_count = 100 max_num_peaks = 10000 @@ -263,12 +263,12 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # in case of further modification with the mask, make a new one, independent from real mask maskPr = np.copy(pixel_mask_pf) - y, x = np.indices(d.shape) + y, x = np.indices(pfdata.shape) pix_r = np.sqrt((x-x_beam)**2 + (y-y_beam)**2) peak_list_x, peak_list_y, peak_list_value = peakfinder_8( max_num_peaks, - d.astype(np.float32), + pfdata.astype(np.float32), maskPr.astype(np.int8), pix_r.astype(np.float32), asic_nx, asic_ny, -- 2.49.0 From 6a926bcedee440f1a6bbc78493d60e2ac0d0d962 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 17:51:08 +0200 Subject: [PATCH 013/159] moved peakfinder_analysis out of work function --- dap/algos/__init__.py | 1 + dap/algos/peakfind.py | 63 +++++++++++++++++++++++++++++++++++++++++++ dap/worker.py | 58 +++------------------------------------ 3 files changed, 67 insertions(+), 55 deletions(-) create mode 100644 dap/algos/peakfind.py diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index f77c042..a738167 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -1,5 +1,6 @@ from .radprof import calc_radial_integration from .addmask import calc_apply_additional_mask +from .peakfind import calc_peakfinder_analysis diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py new file mode 100644 index 0000000..7ad8e40 --- /dev/null +++ b/dap/algos/peakfind.py @@ -0,0 +1,63 @@ +from copy import copy + +import numpy as np + +from peakfinder8_extension import peakfinder_8 + + +def calc_peakfinder_analysis(results, pfdata, pixel_mask_pf): + x_beam = results["beam_center_x"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 + y_beam = results["beam_center_y"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 + hitfinder_min_snr = results["hitfinder_min_snr"] + hitfinder_min_pix_count = int(results["hitfinder_min_pix_count"]) + hitfinder_adc_thresh = results["hitfinder_adc_thresh"] + + asic_ny, asic_nx = pfdata.shape + nasics_y, nasics_x = 1, 1 + hitfinder_max_pix_count = 100 + max_num_peaks = 10000 + + # usually don't need to change this value, rather robust + hitfinder_local_bg_radius= 20. + + # in case of further modification with the mask, make a new one, independent from real mask + maskPr = np.copy(pixel_mask_pf) + + y, x = np.indices(pfdata.shape) + pix_r = np.sqrt((x-x_beam)**2 + (y-y_beam)**2) + + peak_list_x, peak_list_y, peak_list_value = peakfinder_8( + max_num_peaks, + pfdata.astype(np.float32), + maskPr.astype(np.int8), + pix_r.astype(np.float32), + asic_nx, asic_ny, + nasics_x, nasics_y, + hitfinder_adc_thresh, + hitfinder_min_snr, + hitfinder_min_pix_count, + hitfinder_max_pix_count, + hitfinder_local_bg_radius + ) + + number_of_spots = len(peak_list_x) + results["number_of_spots"] = number_of_spots + if number_of_spots != 0: + results["spot_x"] = [-1.0] * number_of_spots + results["spot_y"] = [-1.0] * number_of_spots + results["spot_intensity"] = copy(peak_list_value) + for i in range(number_of_spots): + results["spot_x"][i] = peak_list_x[i] + 0.5 + results["spot_y"][i] = peak_list_y[i] + 0.5 + else: + results["spot_x"] = [] + results["spot_y"] = [] + results["spot_intensity"] = [] + + npeaks_threshold_hit = results.get("npeaks_threshold_hit", 15) + + if number_of_spots >= npeaks_threshold_hit: + results["is_hit_frame"] = True + + + diff --git a/dap/worker.py b/dap/worker.py index 6dc4f20..9de6835 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -6,9 +6,8 @@ from time import sleep import jungfrau_utils as ju import numpy as np -from peakfinder8_extension import peakfinder_8 -from algos import calc_radial_integration, calc_apply_additional_mask +from algos import calc_radial_integration, calc_apply_additional_mask, calc_peakfinder_analysis from zmqsocks import ZMQSockets from utils import json_load, read_bit @@ -246,60 +245,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # in case all needed parameters are present, make peakfinding do_peakfinder_analysis = results.get("do_peakfinder_analysis", False) if do_peakfinder_analysis and pixel_mask_pf is not None and all(k in results for k in ("beam_center_x", "beam_center_y", "hitfinder_min_snr", "hitfinder_min_pix_count", "hitfinder_adc_thresh")): - x_beam = results["beam_center_x"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 - y_beam = results["beam_center_y"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 - hitfinder_min_snr = results["hitfinder_min_snr"] - hitfinder_min_pix_count = int(results["hitfinder_min_pix_count"]) - hitfinder_adc_thresh = results["hitfinder_adc_thresh"] - - asic_ny, asic_nx = pfdata.shape - nasics_y, nasics_x = 1, 1 - hitfinder_max_pix_count = 100 - max_num_peaks = 10000 - - # usually don't need to change this value, rather robust - hitfinder_local_bg_radius= 20. - - # in case of further modification with the mask, make a new one, independent from real mask - maskPr = np.copy(pixel_mask_pf) - - y, x = np.indices(pfdata.shape) - pix_r = np.sqrt((x-x_beam)**2 + (y-y_beam)**2) - - peak_list_x, peak_list_y, peak_list_value = peakfinder_8( - max_num_peaks, - pfdata.astype(np.float32), - maskPr.astype(np.int8), - pix_r.astype(np.float32), - asic_nx, asic_ny, - nasics_x, nasics_y, - hitfinder_adc_thresh, - hitfinder_min_snr, - hitfinder_min_pix_count, - hitfinder_max_pix_count, - hitfinder_local_bg_radius - ) - - - number_of_spots = len(peak_list_x) - results["number_of_spots"] = number_of_spots - if number_of_spots != 0: - results["spot_x"] = [-1.0] * number_of_spots - results["spot_y"] = [-1.0] * number_of_spots - results["spot_intensity"] = copy(peak_list_value) - for i in range(number_of_spots): - results["spot_x"][i] = peak_list_x[i] + 0.5 - results["spot_y"][i] = peak_list_y[i] + 0.5 - else: - results["spot_x"] = [] - results["spot_y"] = [] - results["spot_intensity"] = [] - - npeaks_threshold_hit = results.get("npeaks_threshold_hit", 15) - - if number_of_spots >= npeaks_threshold_hit: - results["is_hit_frame"] = True + calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) +# ??? forceSendVisualisation = False if data.dtype != np.uint16: apply_threshold = results.get("apply_threshold", False) -- 2.49.0 From c3044653c2bb6aeffc6eb15e4922ace42832938c Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 17:51:57 +0200 Subject: [PATCH 014/159] renamed variable pfdata to data --- dap/algos/peakfind.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index 7ad8e40..49510cd 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -5,14 +5,14 @@ import numpy as np from peakfinder8_extension import peakfinder_8 -def calc_peakfinder_analysis(results, pfdata, pixel_mask_pf): +def calc_peakfinder_analysis(results, data, pixel_mask_pf): x_beam = results["beam_center_x"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 y_beam = results["beam_center_y"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 hitfinder_min_snr = results["hitfinder_min_snr"] hitfinder_min_pix_count = int(results["hitfinder_min_pix_count"]) hitfinder_adc_thresh = results["hitfinder_adc_thresh"] - asic_ny, asic_nx = pfdata.shape + asic_ny, asic_nx = data.shape nasics_y, nasics_x = 1, 1 hitfinder_max_pix_count = 100 max_num_peaks = 10000 @@ -23,12 +23,12 @@ def calc_peakfinder_analysis(results, pfdata, pixel_mask_pf): # in case of further modification with the mask, make a new one, independent from real mask maskPr = np.copy(pixel_mask_pf) - y, x = np.indices(pfdata.shape) + y, x = np.indices(data.shape) pix_r = np.sqrt((x-x_beam)**2 + (y-y_beam)**2) peak_list_x, peak_list_y, peak_list_value = peakfinder_8( max_num_peaks, - pfdata.astype(np.float32), + data.astype(np.float32), maskPr.astype(np.int8), pix_r.astype(np.float32), asic_nx, asic_ny, -- 2.49.0 From 5d714e1fa37114106c85fc42127413eaed86c22a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 18:09:56 +0200 Subject: [PATCH 015/159] moved spi_analysis out of work function --- dap/algos/__init__.py | 1 + dap/algos/spiana.py | 16 ++++++++++++++++ dap/worker.py | 16 ++-------------- 3 files changed, 19 insertions(+), 14 deletions(-) create mode 100644 dap/algos/spiana.py diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index a738167..7d98ed2 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -2,5 +2,6 @@ from .radprof import calc_radial_integration from .addmask import calc_apply_additional_mask from .peakfind import calc_peakfinder_analysis +from .spiana import calc_spi_analysis diff --git a/dap/algos/spiana.py b/dap/algos/spiana.py new file mode 100644 index 0000000..9e8235e --- /dev/null +++ b/dap/algos/spiana.py @@ -0,0 +1,16 @@ + +def calc_spi_analysis(results): + if "spi_limit" in results and len(results["spi_limit"]) == 2: + + number_of_spots = 0 + if results["roi_intensities_normalised"][0] >= results["spi_limit"][0]: + number_of_spots += 25 + if results["roi_intensities_normalised"][1] >= results["spi_limit"][1]: + number_of_spots += 50 + + results["number_of_spots"] = number_of_spots + if number_of_spots > 0: + results["is_hit_frame"] = True + + + diff --git a/dap/worker.py b/dap/worker.py index 9de6835..257b31b 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -7,7 +7,7 @@ from time import sleep import jungfrau_utils as ju import numpy as np -from algos import calc_radial_integration, calc_apply_additional_mask, calc_peakfinder_analysis +from algos import calc_radial_integration, calc_apply_additional_mask, calc_peakfinder_analysis, calc_spi_analysis from zmqsocks import ZMQSockets from utils import json_load, read_bit @@ -227,20 +227,8 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # SPI analysis do_spi_analysis = results.get("do_spi_analysis", False) - if do_spi_analysis and "roi_intensities_normalised" in results and len(results["roi_intensities_normalised"]) >= 2: - - if "spi_limit" in results and len(results["spi_limit"]) == 2: - - number_of_spots = 0 - if results["roi_intensities_normalised"][0] >= results["spi_limit"][0]: - number_of_spots += 25 - if results["roi_intensities_normalised"][1] >= results["spi_limit"][1]: - number_of_spots += 50 - - results["number_of_spots"] = number_of_spots - if number_of_spots > 0: - results["is_hit_frame"] = True + calc_spi_analysis(results) # in case all needed parameters are present, make peakfinding do_peakfinder_analysis = results.get("do_peakfinder_analysis", False) -- 2.49.0 From 56a5f94805a15b2f861cbe5d402d9c65db73f63b Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 18:11:49 +0200 Subject: [PATCH 016/159] added todo note --- dap/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index 257b31b..ac9e41a 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -255,7 +255,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host data += data_summed n_aggregated_images += 1 data_summed = data.copy() - data_summed[data == -np.nan] = -np.nan + data_summed[data == -np.nan] = -np.nan #TODO: this does nothing results["aggregated_images"] = n_aggregated_images results["worker"] = worker if n_aggregated_images >= results["aggregation_max"]: -- 2.49.0 From 3e3916b7866087eb398fd5d226bd7795f99f8034 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 18:55:02 +0200 Subject: [PATCH 017/159] moved ROI algo out of work function --- dap/algos/__init__.py | 1 + dap/algos/roi.py | 29 +++++++++++++++++++++++++++++ dap/worker.py | 25 ++----------------------- 3 files changed, 32 insertions(+), 23 deletions(-) create mode 100644 dap/algos/roi.py diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index 7d98ed2..dbd53b6 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -3,5 +3,6 @@ from .radprof import calc_radial_integration from .addmask import calc_apply_additional_mask from .peakfind import calc_peakfinder_analysis from .spiana import calc_spi_analysis +from .roi import calc_roi diff --git a/dap/algos/roi.py b/dap/algos/roi.py new file mode 100644 index 0000000..00d764b --- /dev/null +++ b/dap/algos/roi.py @@ -0,0 +1,29 @@ +import numpy as np + + +def calc_roi(results, pfdata, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, threshold_value_choice): + roi_results = [0] * len(roi_x1) + roi_results_normalised = [0] * len(roi_x1) + + if pixel_mask_pf is not None: + + results["roi_intensities_x"] = [] + results["roi_intensities_proj_x"] = [] + + for iRoi in range(len(roi_x1)): + data_roi = np.copy(pfdata[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) + + roi_results[iRoi] = np.nansum(data_roi) + if threshold_value_choice == "NaN": + roi_results_normalised[iRoi] = roi_results[iRoi] / ((roi_y2[iRoi] - roi_y1[iRoi]) * (roi_x2[iRoi] - roi_x1[iRoi])) + else: + roi_results_normalised[iRoi] = np.nanmean(data_roi) + + results["roi_intensities_x"].append([roi_x1[iRoi], roi_x2[iRoi]]) + results["roi_intensities_proj_x"].append(np.nansum(data_roi, axis=0).tolist()) + + results["roi_intensities"] = [float(r) for r in roi_results] + results["roi_intensities_normalised"] = [float(r) for r in roi_results_normalised ] + + + diff --git a/dap/worker.py b/dap/worker.py index ac9e41a..3364f8d 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -7,7 +7,7 @@ from time import sleep import jungfrau_utils as ju import numpy as np -from algos import calc_radial_integration, calc_apply_additional_mask, calc_peakfinder_analysis, calc_spi_analysis +from algos import calc_radial_integration, calc_apply_additional_mask, calc_peakfinder_analysis, calc_spi_analysis, calc_roi from zmqsocks import ZMQSockets from utils import json_load, read_bit @@ -202,28 +202,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host roi_y2 = results.get("roi_y2", []) if len(roi_x1) > 0 and len(roi_x1) == len(roi_x2) and len(roi_x1) == len(roi_y1) and len(roi_x1) == len(roi_y2): - roi_results = [0] * len(roi_x1) - roi_results_normalised = [0] * len(roi_x1) - - if pixel_mask_pf is not None: - - results["roi_intensities_x"] = [] - results["roi_intensities_proj_x"] = [] - - for iRoi in range(len(roi_x1)): - data_roi = np.copy(pfdata[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) - - roi_results[iRoi] = np.nansum(data_roi) - if threshold_value_choice == "NaN": - roi_results_normalised[iRoi] = roi_results[iRoi] / ((roi_y2[iRoi] - roi_y1[iRoi]) * (roi_x2[iRoi] - roi_x1[iRoi])) - else: - roi_results_normalised[iRoi] = np.nanmean(data_roi) - - results["roi_intensities_x"].append([roi_x1[iRoi], roi_x2[iRoi]]) - results["roi_intensities_proj_x"].append(np.nansum(data_roi, axis=0).tolist()) - - results["roi_intensities"] = [float(r) for r in roi_results] - results["roi_intensities_normalised"] = [float(r) for r in roi_results_normalised ] + calc_roi(results, pfdata, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, threshold_value_choice) # SPI analysis do_spi_analysis = results.get("do_spi_analysis", False) -- 2.49.0 From d4575a665606ae3b64388585660d738f7db5eb96 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 18:56:11 +0200 Subject: [PATCH 018/159] de-arrow --- dap/algos/roi.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index 00d764b..580c33e 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -5,25 +5,26 @@ def calc_roi(results, pfdata, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, thr roi_results = [0] * len(roi_x1) roi_results_normalised = [0] * len(roi_x1) - if pixel_mask_pf is not None: + if pixel_mask_pf is None: + return - results["roi_intensities_x"] = [] - results["roi_intensities_proj_x"] = [] + results["roi_intensities_x"] = [] + results["roi_intensities_proj_x"] = [] - for iRoi in range(len(roi_x1)): - data_roi = np.copy(pfdata[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) + for iRoi in range(len(roi_x1)): + data_roi = np.copy(pfdata[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) - roi_results[iRoi] = np.nansum(data_roi) - if threshold_value_choice == "NaN": - roi_results_normalised[iRoi] = roi_results[iRoi] / ((roi_y2[iRoi] - roi_y1[iRoi]) * (roi_x2[iRoi] - roi_x1[iRoi])) - else: - roi_results_normalised[iRoi] = np.nanmean(data_roi) + roi_results[iRoi] = np.nansum(data_roi) + if threshold_value_choice == "NaN": + roi_results_normalised[iRoi] = roi_results[iRoi] / ((roi_y2[iRoi] - roi_y1[iRoi]) * (roi_x2[iRoi] - roi_x1[iRoi])) + else: + roi_results_normalised[iRoi] = np.nanmean(data_roi) - results["roi_intensities_x"].append([roi_x1[iRoi], roi_x2[iRoi]]) - results["roi_intensities_proj_x"].append(np.nansum(data_roi, axis=0).tolist()) + results["roi_intensities_x"].append([roi_x1[iRoi], roi_x2[iRoi]]) + results["roi_intensities_proj_x"].append(np.nansum(data_roi, axis=0).tolist()) - results["roi_intensities"] = [float(r) for r in roi_results] - results["roi_intensities_normalised"] = [float(r) for r in roi_results_normalised ] + results["roi_intensities"] = [float(r) for r in roi_results] + results["roi_intensities_normalised"] = [float(r) for r in roi_results_normalised] -- 2.49.0 From 3c52784d489f6b14324fed82d6a966b3b3f7030e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 18:57:08 +0200 Subject: [PATCH 019/159] cleanup --- dap/algos/roi.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index 580c33e..76775ec 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -2,12 +2,12 @@ import numpy as np def calc_roi(results, pfdata, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, threshold_value_choice): - roi_results = [0] * len(roi_x1) - roi_results_normalised = [0] * len(roi_x1) - if pixel_mask_pf is None: return + roi_results = [0] * len(roi_x1) + roi_results_normalised = [0] * len(roi_x1) + results["roi_intensities_x"] = [] results["roi_intensities_proj_x"] = [] -- 2.49.0 From 8a72928a20b00727117c70a8b22a0b1e4942b87a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 18:57:40 +0200 Subject: [PATCH 020/159] renamed variable pfdata to data --- dap/algos/roi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index 76775ec..c1d8bb6 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -1,7 +1,7 @@ import numpy as np -def calc_roi(results, pfdata, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, threshold_value_choice): +def calc_roi(results, data, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, threshold_value_choice): if pixel_mask_pf is None: return @@ -12,7 +12,7 @@ def calc_roi(results, pfdata, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, thr results["roi_intensities_proj_x"] = [] for iRoi in range(len(roi_x1)): - data_roi = np.copy(pfdata[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) + data_roi = np.copy(data[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) roi_results[iRoi] = np.nansum(data_roi) if threshold_value_choice == "NaN": -- 2.49.0 From 96878e1b67821732b9471c0df8d63471c0fd4218 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 18:59:40 +0200 Subject: [PATCH 021/159] cleanup --- dap/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index 3364f8d..cb3fb7b 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -201,7 +201,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host roi_y1 = results.get("roi_y1", []) roi_y2 = results.get("roi_y2", []) - if len(roi_x1) > 0 and len(roi_x1) == len(roi_x2) and len(roi_x1) == len(roi_y1) and len(roi_x1) == len(roi_y2): + if len(roi_x1) > 0 and len(roi_x1) == len(roi_x2) == len(roi_y1) == len(roi_y2): calc_roi(results, pfdata, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, threshold_value_choice) # SPI analysis -- 2.49.0 From 7b2a1c3935d1e369d49c1cfc7d7a79df1deab98f Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 19:48:32 +0200 Subject: [PATCH 022/159] cleanup; logic also works correctly for number_of_spots == 0 --- dap/algos/peakfind.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index 49510cd..d503178 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -8,6 +8,7 @@ from peakfinder8_extension import peakfinder_8 def calc_peakfinder_analysis(results, data, pixel_mask_pf): x_beam = results["beam_center_x"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 y_beam = results["beam_center_y"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 + hitfinder_min_snr = results["hitfinder_min_snr"] hitfinder_min_pix_count = int(results["hitfinder_min_pix_count"]) hitfinder_adc_thresh = results["hitfinder_adc_thresh"] @@ -18,7 +19,7 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): max_num_peaks = 10000 # usually don't need to change this value, rather robust - hitfinder_local_bg_radius= 20. + hitfinder_local_bg_radius = 20. # in case of further modification with the mask, make a new one, independent from real mask maskPr = np.copy(pixel_mask_pf) @@ -42,17 +43,12 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): number_of_spots = len(peak_list_x) results["number_of_spots"] = number_of_spots - if number_of_spots != 0: - results["spot_x"] = [-1.0] * number_of_spots - results["spot_y"] = [-1.0] * number_of_spots - results["spot_intensity"] = copy(peak_list_value) - for i in range(number_of_spots): - results["spot_x"][i] = peak_list_x[i] + 0.5 - results["spot_y"][i] = peak_list_y[i] + 0.5 - else: - results["spot_x"] = [] - results["spot_y"] = [] - results["spot_intensity"] = [] + results["spot_x"] = [-1.0] * number_of_spots + results["spot_y"] = [-1.0] * number_of_spots + results["spot_intensity"] = copy(peak_list_value) + for i in range(number_of_spots): + results["spot_x"][i] = peak_list_x[i] + 0.5 + results["spot_y"][i] = peak_list_y[i] + 0.5 npeaks_threshold_hit = results.get("npeaks_threshold_hit", 15) -- 2.49.0 From a4918c989c3f88bee6d2ccf0e413276be372f2e7 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 19:53:57 +0200 Subject: [PATCH 023/159] more pythonic --- dap/algos/peakfind.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index d503178..8d57cba 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -43,12 +43,9 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): number_of_spots = len(peak_list_x) results["number_of_spots"] = number_of_spots - results["spot_x"] = [-1.0] * number_of_spots - results["spot_y"] = [-1.0] * number_of_spots + results["spot_x"] = [x + 0.5 for x in peak_list_x] + results["spot_y"] = [y + 0.5 for y in peak_list_y] results["spot_intensity"] = copy(peak_list_value) - for i in range(number_of_spots): - results["spot_x"][i] = peak_list_x[i] + 0.5 - results["spot_y"][i] = peak_list_y[i] + 0.5 npeaks_threshold_hit = results.get("npeaks_threshold_hit", 15) -- 2.49.0 From 629d2c84bb0a701f7e562aceaad47ebc86beb9b1 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 19:57:36 +0200 Subject: [PATCH 024/159] de-arrow --- dap/algos/spiana.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/dap/algos/spiana.py b/dap/algos/spiana.py index 9e8235e..65cc56b 100644 --- a/dap/algos/spiana.py +++ b/dap/algos/spiana.py @@ -1,16 +1,21 @@ def calc_spi_analysis(results): - if "spi_limit" in results and len(results["spi_limit"]) == 2: + if "spi_limit" not in results: + return - number_of_spots = 0 - if results["roi_intensities_normalised"][0] >= results["spi_limit"][0]: - number_of_spots += 25 - if results["roi_intensities_normalised"][1] >= results["spi_limit"][1]: - number_of_spots += 50 + if len(results["spi_limit"]) != 2: + return - results["number_of_spots"] = number_of_spots - if number_of_spots > 0: - results["is_hit_frame"] = True + number_of_spots = 0 + if results["roi_intensities_normalised"][0] >= results["spi_limit"][0]: + number_of_spots += 25 + if results["roi_intensities_normalised"][1] >= results["spi_limit"][1]: + number_of_spots += 50 + + results["number_of_spots"] = number_of_spots + + if number_of_spots > 0: + results["is_hit_frame"] = True -- 2.49.0 From 1dd37e1684df834cf76ac021bbaa93e42d7b7d29 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 20:06:12 +0200 Subject: [PATCH 025/159] moved additional checks inside function; unpack variables from results dict --- dap/algos/spiana.py | 16 +++++++++++++--- dap/worker.py | 2 +- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/dap/algos/spiana.py b/dap/algos/spiana.py index 65cc56b..d68369f 100644 --- a/dap/algos/spiana.py +++ b/dap/algos/spiana.py @@ -3,13 +3,23 @@ def calc_spi_analysis(results): if "spi_limit" not in results: return - if len(results["spi_limit"]) != 2: + spi_limit = results["spi_limit"] + + if len(spi_limit) != 2: + return + + if "roi_intensities_normalised" not in results: + return + + roi_intensities_normalised = results["roi_intensities_normalised"] + + if len(roi_intensities_normalised) < 2: return number_of_spots = 0 - if results["roi_intensities_normalised"][0] >= results["spi_limit"][0]: + if roi_intensities_normalised[0] >= spi_limit[0]: number_of_spots += 25 - if results["roi_intensities_normalised"][1] >= results["spi_limit"][1]: + if roi_intensities_normalised[1] >= spi_limit[1]: number_of_spots += 50 results["number_of_spots"] = number_of_spots diff --git a/dap/worker.py b/dap/worker.py index cb3fb7b..561018d 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -206,7 +206,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # SPI analysis do_spi_analysis = results.get("do_spi_analysis", False) - if do_spi_analysis and "roi_intensities_normalised" in results and len(results["roi_intensities_normalised"]) >= 2: + if do_spi_analysis: calc_spi_analysis(results) # in case all needed parameters are present, make peakfinding -- 2.49.0 From 26913abbd5927b4ec2bf2816665ee1cb74d40ba6 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 20:20:25 +0200 Subject: [PATCH 026/159] removed useless copy --- dap/algos/roi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index c1d8bb6..6f585ee 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -12,7 +12,7 @@ def calc_roi(results, data, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, thres results["roi_intensities_proj_x"] = [] for iRoi in range(len(roi_x1)): - data_roi = np.copy(data[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]]) + data_roi = data[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]] roi_results[iRoi] = np.nansum(data_roi) if threshold_value_choice == "NaN": -- 2.49.0 From 12e008aeba14eb8453160471c14127c1ee284d35 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 21:25:30 +0200 Subject: [PATCH 027/159] moved additional checks inside function --- dap/algos/peakfind.py | 7 +++++++ dap/worker.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index 8d57cba..2331f0e 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -6,6 +6,13 @@ from peakfinder8_extension import peakfinder_8 def calc_peakfinder_analysis(results, data, pixel_mask_pf): + if pixel_mask_pf is None: + return + + for k in ("beam_center_x", "beam_center_y", "hitfinder_min_snr", "hitfinder_min_pix_count", "hitfinder_adc_thresh"): + if k not in results: + return + x_beam = results["beam_center_x"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 y_beam = results["beam_center_y"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 diff --git a/dap/worker.py b/dap/worker.py index 561018d..2c9d9f6 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -211,7 +211,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # in case all needed parameters are present, make peakfinding do_peakfinder_analysis = results.get("do_peakfinder_analysis", False) - if do_peakfinder_analysis and pixel_mask_pf is not None and all(k in results for k in ("beam_center_x", "beam_center_y", "hitfinder_min_snr", "hitfinder_min_pix_count", "hitfinder_adc_thresh")): + if do_peakfinder_analysis: calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? -- 2.49.0 From 1db20f8b8215143ff2e8ccdce8032ae61cda650e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 30 Jul 2024 22:23:28 +0200 Subject: [PATCH 028/159] moved additional checks inside function --- dap/algos/roi.py | 17 ++++++++++++++++- dap/worker.py | 10 +++------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index 6f585ee..1420fe5 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -1,10 +1,25 @@ import numpy as np -def calc_roi(results, data, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, threshold_value_choice): +def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): if pixel_mask_pf is None: return + for k in ("roi_x1", "roi_x2", "roi_y1", "roi_y2"): + if k not in results: + return + + roi_x1 = results["roi_x1"] + roi_x2 = results["roi_x2"] + roi_y1 = results["roi_y1"] + roi_y2 = results["roi_y2"] + + if len(roi_x1) == 0: + return + + if not (len(roi_x1) == len(roi_x2) == len(roi_y1) == len(roi_y2)): + return + roi_results = [0] * len(roi_x1) roi_results_normalised = [0] * len(roi_x1) diff --git a/dap/worker.py b/dap/worker.py index 2c9d9f6..614b096 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -196,13 +196,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pfdata[pfdata > threshold_max] = threshold_value # if roi calculation request is present, make it - roi_x1 = results.get("roi_x1", []) - roi_x2 = results.get("roi_x2", []) - roi_y1 = results.get("roi_y1", []) - roi_y2 = results.get("roi_y2", []) - - if len(roi_x1) > 0 and len(roi_x1) == len(roi_x2) == len(roi_y1) == len(roi_y2): - calc_roi(results, pfdata, roi_x1, roi_x2, roi_y1, roi_y2, pixel_mask_pf, threshold_value_choice) + do_roi = ("roi_x1" in results) + if do_roi: + calc_roi(results, pfdata, pixel_mask_pf, threshold_value_choice) # SPI analysis do_spi_analysis = results.get("do_spi_analysis", False) -- 2.49.0 From a25bd23b674e33ee62339880440fcf7a8cf7c108 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 11:09:46 +0200 Subject: [PATCH 029/159] cleaned/restructured --- dap/algos/roi.py | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index 1420fe5..9eaea84 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -20,26 +20,37 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): if not (len(roi_x1) == len(roi_x2) == len(roi_y1) == len(roi_y2)): return - roi_results = [0] * len(roi_x1) - roi_results_normalised = [0] * len(roi_x1) + roi_results = [] + roi_results_normalised = [] - results["roi_intensities_x"] = [] - results["roi_intensities_proj_x"] = [] + roi_intensities_x = [] + roi_intensities_proj_x = [] - for iRoi in range(len(roi_x1)): - data_roi = data[roi_y1[iRoi]:roi_y2[iRoi], roi_x1[iRoi]:roi_x2[iRoi]] + for ix1, ix2, iy1, iy2 in zip(roi_x1, roi_x2, roi_y1, roi_y2): + data_roi = data[iy1:iy2, ix1:ix2] + + roi_sum = np.nansum(data_roi) + roi_results.append(roi_sum) - roi_results[iRoi] = np.nansum(data_roi) if threshold_value_choice == "NaN": - roi_results_normalised[iRoi] = roi_results[iRoi] / ((roi_y2[iRoi] - roi_y1[iRoi]) * (roi_x2[iRoi] - roi_x1[iRoi])) + roi_area = (y2 - y1) * (x2 - x1) + roi_sum_norm = roi_sum / roi_area else: - roi_results_normalised[iRoi] = np.nanmean(data_roi) + roi_sum_norm = np.nanmean(data_roi) - results["roi_intensities_x"].append([roi_x1[iRoi], roi_x2[iRoi]]) - results["roi_intensities_proj_x"].append(np.nansum(data_roi, axis=0).tolist()) + roi_results_normalised.append(roi_sum_norm) + + roi_intensity_x = [x1, x2] + roi_intensity_proj_x = np.nansum(data_roi, axis=0).tolist() + + roi_intensities_x.append(roi_intensity_x) + roi_intensities_proj_x.append(roi_intensity_proj_x) results["roi_intensities"] = [float(r) for r in roi_results] results["roi_intensities_normalised"] = [float(r) for r in roi_results_normalised] + results["roi_intensities_x"] = roi_intensities_x + results["roi_intensities_proj_x"] = roi_intensities_proj_x + -- 2.49.0 From abeab715ddeb0080c56d7f70ce99ca966b2452f4 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 11:12:10 +0200 Subject: [PATCH 030/159] consistent naming --- dap/algos/roi.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index 9eaea84..4174ae4 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -20,8 +20,8 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): if not (len(roi_x1) == len(roi_x2) == len(roi_y1) == len(roi_y2)): return - roi_results = [] - roi_results_normalised = [] + roi_intensities = [] + roi_intensities_normalised = [] roi_intensities_x = [] roi_intensities_proj_x = [] @@ -30,7 +30,7 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): data_roi = data[iy1:iy2, ix1:ix2] roi_sum = np.nansum(data_roi) - roi_results.append(roi_sum) + roi_intensities.append(roi_sum) if threshold_value_choice == "NaN": roi_area = (y2 - y1) * (x2 - x1) @@ -38,7 +38,7 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): else: roi_sum_norm = np.nanmean(data_roi) - roi_results_normalised.append(roi_sum_norm) + roi_intensities_normalised.append(roi_sum_norm) roi_intensity_x = [x1, x2] roi_intensity_proj_x = np.nansum(data_roi, axis=0).tolist() @@ -46,8 +46,8 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): roi_intensities_x.append(roi_intensity_x) roi_intensities_proj_x.append(roi_intensity_proj_x) - results["roi_intensities"] = [float(r) for r in roi_results] - results["roi_intensities_normalised"] = [float(r) for r in roi_results_normalised] + results["roi_intensities"] = [float(r) for r in roi_intensities] + results["roi_intensities_normalised"] = [float(r) for r in roi_intensities_normalised] results["roi_intensities_x"] = roi_intensities_x results["roi_intensities_proj_x"] = roi_intensities_proj_x -- 2.49.0 From a834d23f42bb00680cad8b3a03dec36794abaa5e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 11:12:35 +0200 Subject: [PATCH 031/159] typos --- dap/algos/roi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index 4174ae4..b486e00 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -33,14 +33,14 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): roi_intensities.append(roi_sum) if threshold_value_choice == "NaN": - roi_area = (y2 - y1) * (x2 - x1) + roi_area = (iy2 - iy1) * (ix2 - ix1) roi_sum_norm = roi_sum / roi_area else: roi_sum_norm = np.nanmean(data_roi) roi_intensities_normalised.append(roi_sum_norm) - roi_intensity_x = [x1, x2] + roi_intensity_x = [ix1, ix2] roi_intensity_proj_x = np.nansum(data_roi, axis=0).tolist() roi_intensities_x.append(roi_intensity_x) -- 2.49.0 From 55a719fa6c8361eef23f321b1eef2225786adf09 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 11:16:20 +0200 Subject: [PATCH 032/159] removed useless copy/conversion --- dap/algos/roi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index b486e00..f240eff 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -46,8 +46,8 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): roi_intensities_x.append(roi_intensity_x) roi_intensities_proj_x.append(roi_intensity_proj_x) - results["roi_intensities"] = [float(r) for r in roi_intensities] - results["roi_intensities_normalised"] = [float(r) for r in roi_intensities_normalised] + results["roi_intensities"] = roi_intensities + results["roi_intensities_normalised"] = roi_intensities_normalised results["roi_intensities_x"] = roi_intensities_x results["roi_intensities_proj_x"] = roi_intensities_proj_x -- 2.49.0 From 7662f8604dbb7cdc0c6f0d01f2db04cc03434b70 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 11:21:03 +0200 Subject: [PATCH 033/159] naming, cleanup --- dap/algos/roi.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index f240eff..708789b 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -22,7 +22,6 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): roi_intensities = [] roi_intensities_normalised = [] - roi_intensities_x = [] roi_intensities_proj_x = [] @@ -30,25 +29,23 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): data_roi = data[iy1:iy2, ix1:ix2] roi_sum = np.nansum(data_roi) - roi_intensities.append(roi_sum) if threshold_value_choice == "NaN": - roi_area = (iy2 - iy1) * (ix2 - ix1) + roi_area = (ix2 - ix1) * (iy2 - iy1) roi_sum_norm = roi_sum / roi_area else: roi_sum_norm = np.nanmean(data_roi) + roi_indices_x = [ix1, ix2] + roi_proj_x = np.nansum(data_roi, axis=0).tolist() + + roi_intensities.append(roi_sum) roi_intensities_normalised.append(roi_sum_norm) - - roi_intensity_x = [ix1, ix2] - roi_intensity_proj_x = np.nansum(data_roi, axis=0).tolist() - - roi_intensities_x.append(roi_intensity_x) - roi_intensities_proj_x.append(roi_intensity_proj_x) + roi_intensities_x.append(roi_indices_x) + roi_intensities_proj_x.append(roi_proj_x) results["roi_intensities"] = roi_intensities results["roi_intensities_normalised"] = roi_intensities_normalised - results["roi_intensities_x"] = roi_intensities_x results["roi_intensities_proj_x"] = roi_intensities_proj_x -- 2.49.0 From 41ff6c32040fc4b9fc65e1de2a5b15533c1bc202 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 11:23:44 +0200 Subject: [PATCH 034/159] renamed data_copy_1 -> data --- dap/algos/radprof.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 1796bac..624635e 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -2,7 +2,7 @@ import numpy as np def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_radial_integration, r_radial_integration): - data_copy_1 = np.copy(data) + data = np.copy(data) if keep_pixels is None and pixel_mask_pf is not None: keep_pixels = (pixel_mask_pf != 0) @@ -10,7 +10,7 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra center_radial_integration = [results["beam_center_x"], results["beam_center_y"]] r_radial_integration = None if r_radial_integration is None: - r_radial_integration, nr_radial_integration = prepare_radial_profile(data_copy_1, center_radial_integration, keep_pixels) + r_radial_integration, nr_radial_integration = prepare_radial_profile(data, center_radial_integration, keep_pixels) r_min_max = [int(np.min(r_radial_integration)), int(np.max(r_radial_integration)) + 1] @@ -19,11 +19,11 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - data_copy_1[data_copy_1 < threshold_min] = np.nan + data[data < threshold_min] = np.nan if threshold_max > threshold_min: - data_copy_1[data_copy_1 > threshold_max] = np.nan + data[data > threshold_max] = np.nan - rp = radial_profile(data_copy_1, r_radial_integration, nr_radial_integration, keep_pixels) + rp = radial_profile(data, r_radial_integration, nr_radial_integration, keep_pixels) silent_region_min = results.get("radial_integration_silent_min", None) silent_region_max = results.get("radial_integration_silent_max", None) -- 2.49.0 From 1c129bc5becc8407947cb39055d68202f4f67f79 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 17:22:05 +0200 Subject: [PATCH 035/159] removed useless copy (seems unlikely that peakfinder_8 changes the mask AND astype copies anyway unless explicity disabled) --- dap/algos/peakfind.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index 2331f0e..67ba59f 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -28,16 +28,13 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): # usually don't need to change this value, rather robust hitfinder_local_bg_radius = 20. - # in case of further modification with the mask, make a new one, independent from real mask - maskPr = np.copy(pixel_mask_pf) - y, x = np.indices(data.shape) pix_r = np.sqrt((x-x_beam)**2 + (y-y_beam)**2) peak_list_x, peak_list_y, peak_list_value = peakfinder_8( max_num_peaks, data.astype(np.float32), - maskPr.astype(np.int8), + pixel_mask_pf.astype(np.int8), pix_r.astype(np.float32), asic_nx, asic_ny, nasics_x, nasics_y, -- 2.49.0 From 851a186538620479dd9e664c50043f440f61fb8b Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 17:50:40 +0200 Subject: [PATCH 036/159] readability --- dap/algos/radprof.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 624635e..02d5504 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -6,13 +6,18 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra if keep_pixels is None and pixel_mask_pf is not None: keep_pixels = (pixel_mask_pf != 0) + if center_radial_integration is None: - center_radial_integration = [results["beam_center_x"], results["beam_center_y"]] + center_radial_integration = [ + results["beam_center_x"], + results["beam_center_y"] + ] r_radial_integration = None + if r_radial_integration is None: r_radial_integration, nr_radial_integration = prepare_radial_profile(data, center_radial_integration, keep_pixels) - r_min_max = [int(np.min(r_radial_integration)), int(np.max(r_radial_integration)) + 1] - + r_min = int(np.min(r_radial_integration)) + r_max = int(np.max(r_radial_integration)) + 1 apply_threshold = results.get("apply_threshold", False) @@ -32,16 +37,16 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra silent_region_min is not None and silent_region_max is not None and silent_region_max > silent_region_min and - silent_region_min > r_min_max[0] and - silent_region_max < r_min_max[1] + silent_region_min > r_min and + silent_region_max < r_max ): - - integral_silent_region = np.sum(rp[silent_region_min:silent_region_max]) + silent_region = rp[silent_region_min:silent_region_max] + integral_silent_region = np.sum(silent_region) rp = rp / integral_silent_region results["radint_normalised"] = [silent_region_min, silent_region_max] - results["radint_I"] = list(rp[r_min_max[0]:]) - results["radint_q"] = r_min_max + results["radint_I"] = rp[r_min:].tolist() + results["radint_q"] = [r_min, r_max] return keep_pixels, center_radial_integration, r_radial_integration -- 2.49.0 From 596a4e7e0a604e588e19dd4d41e7f05b841f4208 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 19:42:59 +0200 Subject: [PATCH 037/159] less convoluted --- dap/algos/radprof.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 02d5504..fa3cec3 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -54,21 +54,21 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra def radial_profile(data, r, nr, keep_pixels=None): if keep_pixels is not None: - tbin = np.bincount(r, data[keep_pixels].ravel()) - else: - tbin = np.bincount(r, data.ravel()) - radialprofile = tbin / nr - return radialprofile + data = data[keep_pixels] + data = data.ravel() + tbin = np.bincount(r, data) + rp = tbin / nr + return rp def prepare_radial_profile(data, center, keep_pixels=None): - y, x = np.indices((data.shape)) - r = np.sqrt((x - center[0])**2 + (y - center[1])**2) + y, x = np.indices(data.shape) + x0, y0 = center + rad = np.sqrt((x - x0)**2 + (y - y0)**2) if keep_pixels is not None: - r = r[keep_pixels].astype(int).ravel() - else: - r = r.astype(np.int).ravel() - nr = np.bincount(r) - return r, nr + rad = rad[keep_pixels] + rad = rad.astype(int).ravel() + nr = np.bincount(rad) + return rad, nr -- 2.49.0 From 7ea32d2a4d6344e9b72ca210db26c87205d426e0 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 31 Jul 2024 19:43:07 +0200 Subject: [PATCH 038/159] spaces --- dap/algos/peakfind.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index 67ba59f..82ea29c 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -29,7 +29,7 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): hitfinder_local_bg_radius = 20. y, x = np.indices(data.shape) - pix_r = np.sqrt((x-x_beam)**2 + (y-y_beam)**2) + pix_r = np.sqrt((x - x_beam)**2 + (y - y_beam)**2) peak_list_x, peak_list_y, peak_list_value = peakfinder_8( max_num_peaks, -- 2.49.0 From 1af3e9c3ec8f484b91b8cf084e774864733a458d Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 1 Aug 2024 09:02:58 +0200 Subject: [PATCH 039/159] added todo notes about TBD logic changes --- dap/algos/radprof.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index fa3cec3..101be6b 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -25,6 +25,7 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) data[data < threshold_min] = np.nan + #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed if threshold_max > threshold_min: data[data > threshold_max] = np.nan @@ -36,6 +37,7 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra if ( silent_region_min is not None and silent_region_max is not None and + #TODO: skipping entirely is a guess, but not obvious -- better to ensure the order min < max by switching them if needed silent_region_max > silent_region_min and silent_region_min > r_min and silent_region_max < r_max -- 2.49.0 From ac8967611dec6b32c4b4f15dca787669412b48a5 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 1 Aug 2024 09:05:43 +0200 Subject: [PATCH 040/159] use the same validation logic as in other cases --- dap/algos/spiana.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/dap/algos/spiana.py b/dap/algos/spiana.py index d68369f..fb535c2 100644 --- a/dap/algos/spiana.py +++ b/dap/algos/spiana.py @@ -1,18 +1,15 @@ def calc_spi_analysis(results): - if "spi_limit" not in results: - return + for k in ("spi_limit", "roi_intensities_normalised"): + if k not in results: + return spi_limit = results["spi_limit"] + roi_intensities_normalised = results["roi_intensities_normalised"] if len(spi_limit) != 2: return - if "roi_intensities_normalised" not in results: - return - - roi_intensities_normalised = results["roi_intensities_normalised"] - if len(roi_intensities_normalised) < 2: return -- 2.49.0 From 3924abcba4f071863162c715dcf5f4bfa21b9202 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 1 Aug 2024 09:16:40 +0200 Subject: [PATCH 041/159] comments, todo note --- dap/algos/peakfind.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index 82ea29c..0ed3932 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -13,8 +13,9 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): if k not in results: return - x_beam = results["beam_center_x"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 - y_beam = results["beam_center_y"] - 0.5 # to coordinates where position of first pixel/point is 0.5, 0.5 + # to coordinates where position of first pixel/point is 0.5, 0.5 + x_beam = results["beam_center_x"] - 0.5 + y_beam = results["beam_center_y"] - 0.5 hitfinder_min_snr = results["hitfinder_min_snr"] hitfinder_min_pix_count = int(results["hitfinder_min_pix_count"]) @@ -47,9 +48,11 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): number_of_spots = len(peak_list_x) results["number_of_spots"] = number_of_spots + + # to coordinates where position of first pixel/point is 1, 1 results["spot_x"] = [x + 0.5 for x in peak_list_x] results["spot_y"] = [y + 0.5 for y in peak_list_y] - results["spot_intensity"] = copy(peak_list_value) + results["spot_intensity"] = copy(peak_list_value) #TODO: why is this copy needed? npeaks_threshold_hit = results.get("npeaks_threshold_hit", 15) -- 2.49.0 From 2facda5d0733013d04a9b9eb7a8801cd71c1deba Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 1 Aug 2024 09:16:56 +0200 Subject: [PATCH 042/159] comments --- dap/algos/peakfind.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index 0ed3932..639721d 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -13,7 +13,7 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): if k not in results: return - # to coordinates where position of first pixel/point is 0.5, 0.5 + # to coordinates where position of first pixel/point is (0.5, 0.5) x_beam = results["beam_center_x"] - 0.5 y_beam = results["beam_center_y"] - 0.5 @@ -49,7 +49,7 @@ def calc_peakfinder_analysis(results, data, pixel_mask_pf): number_of_spots = len(peak_list_x) results["number_of_spots"] = number_of_spots - # to coordinates where position of first pixel/point is 1, 1 + # to coordinates where position of first pixel/point is (1, 1) results["spot_x"] = [x + 0.5 for x in peak_list_x] results["spot_y"] = [y + 0.5 for y in peak_list_y] results["spot_intensity"] = copy(peak_list_value) #TODO: why is this copy needed? -- 2.49.0 From a34db9287ea93a19c88199b0695cf9d4acdf6240 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 1 Aug 2024 09:19:49 +0200 Subject: [PATCH 043/159] isort --- dap/algos/__init__.py | 4 ++-- dap/worker.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index dbd53b6..b2775e6 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -1,8 +1,8 @@ -from .radprof import calc_radial_integration from .addmask import calc_apply_additional_mask from .peakfind import calc_peakfinder_analysis -from .spiana import calc_spi_analysis +from .radprof import calc_radial_integration from .roi import calc_roi +from .spiana import calc_spi_analysis diff --git a/dap/worker.py b/dap/worker.py index 614b096..6225ee7 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -7,9 +7,9 @@ from time import sleep import jungfrau_utils as ju import numpy as np -from algos import calc_radial_integration, calc_apply_additional_mask, calc_peakfinder_analysis, calc_spi_analysis, calc_roi -from zmqsocks import ZMQSockets +from algos import calc_apply_additional_mask, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis from utils import json_load, read_bit +from zmqsocks import ZMQSockets def main(): -- 2.49.0 From 92149d0614c75c48ebadd1662e59e14445ce053b Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 12:01:01 +0200 Subject: [PATCH 044/159] moved do_* checks inside function for better overview --- dap/algos/addmask.py | 4 ++++ dap/algos/peakfind.py | 4 ++++ dap/algos/spiana.py | 4 ++++ dap/worker.py | 24 ++++++------------------ 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/dap/algos/addmask.py b/dap/algos/addmask.py index d547359..41e8576 100644 --- a/dap/algos/addmask.py +++ b/dap/algos/addmask.py @@ -1,6 +1,10 @@ #TODO: find a better way to handle this def calc_apply_additional_mask(detector, pixel_mask_pf): + apply_additional_mask = results.get("apply_additional_mask", False) + if not apply_additional_mask: + return + if detector == "JF06T08V04": # edge pixels pixel_mask_pf[0:1030, 1100] = 0 diff --git a/dap/algos/peakfind.py b/dap/algos/peakfind.py index 639721d..6667d87 100644 --- a/dap/algos/peakfind.py +++ b/dap/algos/peakfind.py @@ -6,6 +6,10 @@ from peakfinder8_extension import peakfinder_8 def calc_peakfinder_analysis(results, data, pixel_mask_pf): + do_peakfinder_analysis = results.get("do_peakfinder_analysis", False) + if not do_peakfinder_analysis: + return + if pixel_mask_pf is None: return diff --git a/dap/algos/spiana.py b/dap/algos/spiana.py index fb535c2..593aaf7 100644 --- a/dap/algos/spiana.py +++ b/dap/algos/spiana.py @@ -1,5 +1,9 @@ def calc_spi_analysis(results): + do_spi_analysis = results.get("do_spi_analysis", False) + if not do_spi_analysis: + return + for k in ("spi_limit", "roi_intensities_normalised"): if k not in results: return diff --git a/dap/worker.py b/dap/worker.py index 6225ee7..07fe014 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -159,10 +159,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host else: pixel_mask_pf = None -# add additional mask at the edge of modules for JF06T08 - apply_additional_mask = results.get("apply_additional_mask", False) - if apply_additional_mask: - calc_apply_additional_mask(detector, pixel_mask_pf) + + calc_apply_additional_mask(detector, pixel_mask_pf) + if pixel_mask_corrected is not None: data_s = copy(image) @@ -195,20 +194,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if threshold_max > threshold_min: pfdata[pfdata > threshold_max] = threshold_value - # if roi calculation request is present, make it - do_roi = ("roi_x1" in results) - if do_roi: - calc_roi(results, pfdata, pixel_mask_pf, threshold_value_choice) - -# SPI analysis - do_spi_analysis = results.get("do_spi_analysis", False) - if do_spi_analysis: - calc_spi_analysis(results) - -# in case all needed parameters are present, make peakfinding - do_peakfinder_analysis = results.get("do_peakfinder_analysis", False) - if do_peakfinder_analysis: - calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) + calc_roi(results, pfdata, pixel_mask_pf, threshold_value_choice) + calc_spi_analysis(results) + calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? forceSendVisualisation = False -- 2.49.0 From 53d2d965d945e40fb14eff11b8e0578972040db3 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 12:11:19 +0200 Subject: [PATCH 045/159] forgotten results argument --- dap/algos/addmask.py | 2 +- dap/worker.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/addmask.py b/dap/algos/addmask.py index 41e8576..cdfaf3c 100644 --- a/dap/algos/addmask.py +++ b/dap/algos/addmask.py @@ -1,6 +1,6 @@ #TODO: find a better way to handle this -def calc_apply_additional_mask(detector, pixel_mask_pf): +def calc_apply_additional_mask(results, detector, pixel_mask_pf): apply_additional_mask = results.get("apply_additional_mask", False) if not apply_additional_mask: return diff --git a/dap/worker.py b/dap/worker.py index 07fe014..4c701a8 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -160,7 +160,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = None - calc_apply_additional_mask(detector, pixel_mask_pf) + calc_apply_additional_mask(results, detector, pixel_mask_pf) if pixel_mask_corrected is not None: -- 2.49.0 From a690654343bf6c63739cde30e9389909de1ad36a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 12:14:27 +0200 Subject: [PATCH 046/159] moved thresholding out of work function --- dap/algos/__init__.py | 1 + dap/algos/thresh.py | 17 +++++++++++++++++ dap/worker.py | 12 ++---------- 3 files changed, 20 insertions(+), 10 deletions(-) create mode 100644 dap/algos/thresh.py diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index b2775e6..8b023af 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -4,5 +4,6 @@ from .peakfind import calc_peakfinder_analysis from .radprof import calc_radial_integration from .roi import calc_roi from .spiana import calc_spi_analysis +from .thresh import calc_apply_threshold diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py new file mode 100644 index 0000000..eb3e345 --- /dev/null +++ b/dap/algos/thresh.py @@ -0,0 +1,17 @@ +import numpy as np + + +def calc_apply_threshold(results, pfdata): + apply_threshold = results.get("apply_threshold", False) + threshold_value_choice = results.get("threshold_value", "NaN") + threshold_value = 0 if threshold_value_choice == "0" else np.nan + if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + pfdata[pfdata < threshold_min] = threshold_value + if threshold_max > threshold_min: + pfdata[pfdata > threshold_max] = threshold_value + return threshold_value_choice + + + diff --git a/dap/worker.py b/dap/worker.py index 4c701a8..b252a33 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -7,7 +7,7 @@ from time import sleep import jungfrau_utils as ju import numpy as np -from algos import calc_apply_additional_mask, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis +from algos import calc_apply_additional_mask, calc_apply_threshold, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis from utils import json_load, read_bit from zmqsocks import ZMQSockets @@ -184,15 +184,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if pixel_mask_pf is not None: pfdata[pixel_mask_pf != 1] = np.nan - apply_threshold = results.get("apply_threshold", False) - threshold_value_choice = results.get("threshold_value", "NaN") - threshold_value = 0 if threshold_value_choice == "0" else np.nan - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - pfdata[pfdata < threshold_min] = threshold_value - if threshold_max > threshold_min: - pfdata[pfdata > threshold_max] = threshold_value + threshold_value_choice = calc_apply_threshold(results, pfdata) calc_roi(results, pfdata, pixel_mask_pf, threshold_value_choice) calc_spi_analysis(results) -- 2.49.0 From e9d187c7129c5e66275703312279d09eb37d5659 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 12:15:19 +0200 Subject: [PATCH 047/159] renamed variable pfdata to data --- dap/algos/thresh.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py index eb3e345..64c5d42 100644 --- a/dap/algos/thresh.py +++ b/dap/algos/thresh.py @@ -1,16 +1,18 @@ import numpy as np -def calc_apply_threshold(results, pfdata): +def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) threshold_value_choice = results.get("threshold_value", "NaN") threshold_value = 0 if threshold_value_choice == "0" else np.nan + if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - pfdata[pfdata < threshold_min] = threshold_value + data[data < threshold_min] = threshold_value if threshold_max > threshold_min: - pfdata[pfdata > threshold_max] = threshold_value + data[data > threshold_max] = threshold_value + return threshold_value_choice -- 2.49.0 From 40c0a08d7c2d51d909bc045a1c364f7e2a6b8134 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 12:18:56 +0200 Subject: [PATCH 048/159] added todo notes --- dap/algos/radprof.py | 1 + dap/algos/thresh.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 101be6b..d9e6617 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -21,6 +21,7 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra apply_threshold = results.get("apply_threshold", False) + #TODO: this is duplicated in calc_apply_threshold if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py index 64c5d42..c902348 100644 --- a/dap/algos/thresh.py +++ b/dap/algos/thresh.py @@ -6,10 +6,12 @@ def calc_apply_threshold(results, data): threshold_value_choice = results.get("threshold_value", "NaN") threshold_value = 0 if threshold_value_choice == "0" else np.nan + #TODO: this is duplicated in calc_radial_integration if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) data[data < threshold_min] = threshold_value + #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed if threshold_max > threshold_min: data[data > threshold_max] = threshold_value -- 2.49.0 From b1b92e4b5266780f51854cfd1815398bbccfdee0 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 12:28:09 +0200 Subject: [PATCH 049/159] simpler logic --- dap/algos/roi.py | 6 ++++-- dap/algos/thresh.py | 2 -- dap/worker.py | 5 ++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index 708789b..efcdcab 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -1,7 +1,7 @@ import numpy as np -def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): +def calc_roi(results, data, pixel_mask_pf): if pixel_mask_pf is None: return @@ -20,6 +20,8 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): if not (len(roi_x1) == len(roi_x2) == len(roi_y1) == len(roi_y2)): return + threshold_value = results.get("threshold_value", "NaN") + roi_intensities = [] roi_intensities_normalised = [] roi_intensities_x = [] @@ -30,7 +32,7 @@ def calc_roi(results, data, pixel_mask_pf, threshold_value_choice): roi_sum = np.nansum(data_roi) - if threshold_value_choice == "NaN": + if threshold_value == "NaN": roi_area = (ix2 - ix1) * (iy2 - iy1) roi_sum_norm = roi_sum / roi_area else: diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py index c902348..1c18427 100644 --- a/dap/algos/thresh.py +++ b/dap/algos/thresh.py @@ -15,7 +15,5 @@ def calc_apply_threshold(results, data): if threshold_max > threshold_min: data[data > threshold_max] = threshold_value - return threshold_value_choice - diff --git a/dap/worker.py b/dap/worker.py index b252a33..da46b21 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -184,9 +184,8 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if pixel_mask_pf is not None: pfdata[pixel_mask_pf != 1] = np.nan - threshold_value_choice = calc_apply_threshold(results, pfdata) - - calc_roi(results, pfdata, pixel_mask_pf, threshold_value_choice) + calc_apply_threshold(results, pfdata) + calc_roi(results, pfdata, pixel_mask_pf) calc_spi_analysis(results) calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) -- 2.49.0 From d9b2ae4ac2bebf084430122f5e8370444a13affb Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 12:39:45 +0200 Subject: [PATCH 050/159] use the same validation logic as in other cases --- dap/algos/thresh.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py index 1c18427..843be71 100644 --- a/dap/algos/thresh.py +++ b/dap/algos/thresh.py @@ -3,17 +3,23 @@ import numpy as np def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) + if not apply_threshold: + return + + for k in ("threshold_min", "threshold_max"): + if k not in results: + return + threshold_value_choice = results.get("threshold_value", "NaN") - threshold_value = 0 if threshold_value_choice == "0" else np.nan + threshold_value = 0 if threshold_value_choice == "0" else np.nan #TODO #TODO: this is duplicated in calc_radial_integration - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = threshold_value - #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed - if threshold_max > threshold_min: - data[data > threshold_max] = threshold_value + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + data[data < threshold_min] = threshold_value + #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed + if threshold_max > threshold_min: + data[data > threshold_max] = threshold_value -- 2.49.0 From 61b1d55a9ea529a47a7d4e8ff9bed74e171a824f Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 13:10:06 +0200 Subject: [PATCH 051/159] added notes on in place changes --- dap/worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index da46b21..2438dff 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -160,7 +160,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = None - calc_apply_additional_mask(results, detector, pixel_mask_pf) + calc_apply_additional_mask(results, detector, pixel_mask_pf) # changes pixel_mask_pf in place if pixel_mask_corrected is not None: @@ -184,7 +184,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if pixel_mask_pf is not None: pfdata[pixel_mask_pf != 1] = np.nan - calc_apply_threshold(results, pfdata) + calc_apply_threshold(results, pfdata) # changes pfdata in place calc_roi(results, pfdata, pixel_mask_pf) calc_spi_analysis(results) calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) -- 2.49.0 From 1402b08060fed052f5c4cdff221ccd1bf1bee0fb Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 13:12:30 +0200 Subject: [PATCH 052/159] only copy if there are really in-place changes to be done --- dap/algos/radprof.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index d9e6617..54f78cb 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -2,8 +2,6 @@ import numpy as np def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_radial_integration, r_radial_integration): - data = np.copy(data) - if keep_pixels is None and pixel_mask_pf is not None: keep_pixels = (pixel_mask_pf != 0) @@ -25,6 +23,7 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) + data = np.copy(data) # do the following in-place changes on a copy data[data < threshold_min] = np.nan #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed if threshold_max > threshold_min: -- 2.49.0 From 6af0166f07bb06945a5b440c36ce0c6857545f18 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 13:16:09 +0200 Subject: [PATCH 053/159] added todo notes --- dap/algos/radprof.py | 2 +- dap/worker.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 54f78cb..b1b3fcf 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -3,7 +3,7 @@ import numpy as np def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_radial_integration, r_radial_integration): if keep_pixels is None and pixel_mask_pf is not None: - keep_pixels = (pixel_mask_pf != 0) + keep_pixels = (pixel_mask_pf != 0) #TODO: boolean mask if center_radial_integration is None: center_radial_integration = [ diff --git a/dap/worker.py b/dap/worker.py index 2438dff..8f7f521 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -182,7 +182,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # make all masked pixels values nans if pixel_mask_pf is not None: - pfdata[pixel_mask_pf != 1] = np.nan + pfdata[pixel_mask_pf != 1] = np.nan #TODO: boolean mask calc_apply_threshold(results, pfdata) # changes pfdata in place calc_roi(results, pfdata, pixel_mask_pf) @@ -216,7 +216,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host forceSendVisualisation = True data_summed = None n_aggregated_images = 1 - data[pixel_mask_pf == 0] = np.NaN + data[pixel_mask_pf == 0] = np.NaN #TODO: boolean mask else: data = image -- 2.49.0 From bf7f62adffd3cfcba5be6844447326efcd136e47 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 13:26:02 +0200 Subject: [PATCH 054/159] moved masking pixels out of work function --- dap/algos/__init__.py | 1 + dap/algos/mask.py | 9 +++++++++ dap/worker.py | 7 ++----- 3 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 dap/algos/mask.py diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index 8b023af..e0d580e 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -1,5 +1,6 @@ from .addmask import calc_apply_additional_mask +from .mask import calc_mask_pixels from .peakfind import calc_peakfinder_analysis from .radprof import calc_radial_integration from .roi import calc_roi diff --git a/dap/algos/mask.py b/dap/algos/mask.py new file mode 100644 index 0000000..1ed79fa --- /dev/null +++ b/dap/algos/mask.py @@ -0,0 +1,9 @@ +import numpy as np + + +def calc_mask_pixels(pfdata, pixel_mask_pf): + if pixel_mask_pf is not None: + pfdata[pixel_mask_pf != 1] = np.nan #TODO: boolean mask + + + diff --git a/dap/worker.py b/dap/worker.py index 8f7f521..3f19c4d 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -7,7 +7,7 @@ from time import sleep import jungfrau_utils as ju import numpy as np -from algos import calc_apply_additional_mask, calc_apply_threshold, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis +from algos import calc_apply_additional_mask, calc_apply_threshold, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis from utils import json_load, read_bit from zmqsocks import ZMQSockets @@ -180,10 +180,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host #copy image to work with peakfinder, just in case pfdata = np.copy(data) - # make all masked pixels values nans - if pixel_mask_pf is not None: - pfdata[pixel_mask_pf != 1] = np.nan #TODO: boolean mask - + calc_mask_pixels(pfdata, pixel_mask_pf) # changes pfdata in place calc_apply_threshold(results, pfdata) # changes pfdata in place calc_roi(results, pfdata, pixel_mask_pf) calc_spi_analysis(results) -- 2.49.0 From 0d06b7b5edda9a9aa749e3934bfcf9ddfc0b6953 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 13:44:54 +0200 Subject: [PATCH 055/159] removed commented code for old beamtime --- dap/worker.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 3f19c4d..09bf17c 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -122,11 +122,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if select_only_ppicker_events and not event_ppicker: continue -# special settings for p20270, only few shots were opened by pulse-picker -# if detector in ["JF06T32V02"]: -# if event_ppicker: -# results["number_of_spots"] = 50 -# results["is_hit_frame"] = True double_pixels = results.get("double_pixels", "mask") -- 2.49.0 From e5fc7b27501c98514156a5551fdc9a512e26f092 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 13:53:25 +0200 Subject: [PATCH 056/159] cleanup --- dap/worker.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 09bf17c..7e0b8bc 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -90,15 +90,18 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host except Exception as e: print(f"({pulseid}) problem ({e}) to read peakfinder parameters file, worker : {worker}", flush=True) + if not zmq_socks.has_data(): continue image, metadata = zmq_socks.get_data() - results = copy(metadata) - if results["shape"][0] == 2 and results["shape"][1] == 2: + if metadata["shape"] == [2, 2]: # this is used as marker for empty images continue + results = metadata.copy() + + pulseid = results.get("pulse_id", 0) results.update(peakfinder_parameters) -- 2.49.0 From 87a79a265cf85f088a75c9c8445d58371064c3a0 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 13:55:14 +0200 Subject: [PATCH 057/159] naming consistency --- dap/worker.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 7e0b8bc..683c138 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -47,7 +47,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host peakfinder_parameters = json_load(fn_peakfinder_parameters) peakfinder_parameters_time = os.path.getmtime(fn_peakfinder_parameters) - pulseid = 0 + pulse_id = 0 ju_stream_adapter = ju.StreamAdapter() @@ -84,11 +84,11 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host peakfinder_parameters_time = new_time center_radial_integration = None if worker == 0: - print(f"({pulseid}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) + print(f"({pulse_id}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) print(f" --> {peakfinder_parameters}", flush=True) print(flush=True) except Exception as e: - print(f"({pulseid}) problem ({e}) to read peakfinder parameters file, worker : {worker}", flush=True) + print(f"({pulse_id}) problem ({e}) to read peakfinder parameters file, worker : {worker}", flush=True) if not zmq_socks.has_data(): @@ -102,10 +102,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results = metadata.copy() - pulseid = results.get("pulse_id", 0) + pulse_id = results.get("pulse_id", 0) results.update(peakfinder_parameters) - detector = results.get("detector_name", "") + detector_name = results.get("detector_name", "") results["laser_on"] = False results["number_of_spots"] = 0 @@ -158,7 +158,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = None - calc_apply_additional_mask(results, detector, pixel_mask_pf) # changes pixel_mask_pf in place + calc_apply_additional_mask(results, detector_name, pixel_mask_pf) # changes pixel_mask_pf in place if pixel_mask_corrected is not None: -- 2.49.0 From 19f1c8e3752e35de9fd56d2b81a08d917de1a6a7 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 14:05:05 +0200 Subject: [PATCH 058/159] condensed logic --- dap/worker.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 683c138..0a9a820 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -107,18 +107,17 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host detector_name = results.get("detector_name", "") - results["laser_on"] = False results["number_of_spots"] = 0 results["is_hit_frame"] = False + daq_rec = results.get("daq_rec", 0) event_laser = read_bit(daq_rec, 16) event_darkshot = read_bit(daq_rec, 17) # event_fel = read_bit(daq_rec, 18) event_ppicker = read_bit(daq_rec, 19) - if not event_darkshot: - results["laser_on"] = event_laser + results["laser_on"] = event_laser and not event_darkshot # Filter only ppicker events, if requested; skipping all other events select_only_ppicker_events = results.get("select_only_ppicker_events", False) -- 2.49.0 From d8d984f5a3ba3c468c0ad9587e29a8163d3550c0 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 14:12:29 +0200 Subject: [PATCH 059/159] read detector_name only where it is needed --- dap/algos/addmask.py | 10 +++++++--- dap/worker.py | 4 +--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/dap/algos/addmask.py b/dap/algos/addmask.py index cdfaf3c..ffb187c 100644 --- a/dap/algos/addmask.py +++ b/dap/algos/addmask.py @@ -1,11 +1,15 @@ #TODO: find a better way to handle this -def calc_apply_additional_mask(results, detector, pixel_mask_pf): +def calc_apply_additional_mask(results, pixel_mask_pf): apply_additional_mask = results.get("apply_additional_mask", False) if not apply_additional_mask: return - if detector == "JF06T08V04": + detector_name = results.get("detector_name", None) + if not detector_name: + return + + if detector_name == "JF06T08V04": # edge pixels pixel_mask_pf[0:1030, 1100] = 0 pixel_mask_pf[0:1030, 1613] = 0 @@ -52,7 +56,7 @@ def calc_apply_additional_mask(results, detector, pixel_mask_pf): pixel_mask_pf[1794, 1503:1550] = 0 - if detector == "JF17T16V01": + elif detector_name == "JF17T16V01": # mask module 11 pixel_mask_pf[2619:3333,1577:2607] = 0 diff --git a/dap/worker.py b/dap/worker.py index 0a9a820..558a817 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -105,8 +105,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pulse_id = results.get("pulse_id", 0) results.update(peakfinder_parameters) - detector_name = results.get("detector_name", "") - results["number_of_spots"] = 0 results["is_hit_frame"] = False @@ -157,7 +155,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = None - calc_apply_additional_mask(results, detector_name, pixel_mask_pf) # changes pixel_mask_pf in place + calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place if pixel_mask_corrected is not None: -- 2.49.0 From 092479c52d8cda96f89daadecc4d3fde5279580a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 14:23:06 +0200 Subject: [PATCH 060/159] cleaned up comment --- dap/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index 558a817..7c620f9 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -117,7 +117,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["laser_on"] = event_laser and not event_darkshot -# Filter only ppicker events, if requested; skipping all other events + # if requested, filter on ppicker events by skipping other events select_only_ppicker_events = results.get("select_only_ppicker_events", False) if select_only_ppicker_events and not event_ppicker: continue -- 2.49.0 From 6dccb5bbfdf405c8d1cac3288a3b440ffd7aa672 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 14:30:19 +0200 Subject: [PATCH 061/159] only apply additional mask if the mask has changed --- dap/worker.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 7c620f9..003b001 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -150,14 +150,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if pixel_mask_corrected is not None: #pixel_mask_corrected = np.ascontiguousarray(pixel_mask_corrected) pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected).astype(np.int8, copy=False) - + calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place else: pixel_mask_pf = None - - calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place - - if pixel_mask_corrected is not None: data_s = copy(image) saturated_pixels_coordinates = ju_stream_adapter.handler.get_saturated_pixels(data_s, mask=True, geometry=True, gap_pixels=True, double_pixels=double_pixels) -- 2.49.0 From af3eb6853b8c7a22c1d565b0995ddb85fad51927 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 14:36:35 +0200 Subject: [PATCH 062/159] naming consistency --- dap/worker.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 003b001..b3fa95b 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -62,7 +62,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results = {} - pedestal_file_name_saved = None + pedestal_name_saved = None pixel_mask_corrected = None pixel_mask_pf = None @@ -125,11 +125,11 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host double_pixels = results.get("double_pixels", "mask") - pedestal_file_name = metadata.get("pedestal_name", None) - if pedestal_file_name is not None and pedestal_file_name != pedestal_file_name_saved: + pedestal_name = metadata.get("pedestal_name", None) + if pedestal_name is not None and pedestal_name != pedestal_name_saved: pixel_mask_current = ju_stream_adapter.handler.pixel_mask ju_stream_adapter.handler.pixel_mask = pixel_mask_current - pedestal_file_name_saved = pedestal_file_name + pedestal_name_saved = pedestal_name data = ju_stream_adapter.process(image, metadata, double_pixels=double_pixels) -- 2.49.0 From d9d79f3e87e1376a33ae916608413b08c8fca114 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 14:55:37 +0200 Subject: [PATCH 063/159] more pythonic check --- dap/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index b3fa95b..393998b 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -134,7 +134,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host data = ju_stream_adapter.process(image, metadata, double_pixels=double_pixels) # pedestal file is not in stream, skip this frame - if ju_stream_adapter.handler.pedestal_file is None or ju_stream_adapter.handler.pedestal_file == "": + if not ju_stream_adapter.handler.pedestal_file: continue data = np.ascontiguousarray(data) -- 2.49.0 From 062cc6055cc718c441a7cce03d2197882aa6b17e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 15:10:52 +0200 Subject: [PATCH 064/159] use the same validation logic as in other cases --- dap/algos/mask.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dap/algos/mask.py b/dap/algos/mask.py index 1ed79fa..0015d49 100644 --- a/dap/algos/mask.py +++ b/dap/algos/mask.py @@ -2,8 +2,10 @@ import numpy as np def calc_mask_pixels(pfdata, pixel_mask_pf): - if pixel_mask_pf is not None: - pfdata[pixel_mask_pf != 1] = np.nan #TODO: boolean mask + if pixel_mask_pf is None: + return + + pfdata[pixel_mask_pf != 1] = np.nan #TODO: boolean mask -- 2.49.0 From dd7eda4834a217d4e7e12a960665c8544a9b1f20 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 15:11:13 +0200 Subject: [PATCH 065/159] renamed variable pfdata to data --- dap/algos/mask.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/mask.py b/dap/algos/mask.py index 0015d49..7c73ef1 100644 --- a/dap/algos/mask.py +++ b/dap/algos/mask.py @@ -1,11 +1,11 @@ import numpy as np -def calc_mask_pixels(pfdata, pixel_mask_pf): +def calc_mask_pixels(data, pixel_mask_pf): if pixel_mask_pf is None: return - pfdata[pixel_mask_pf != 1] = np.nan #TODO: boolean mask + data[pixel_mask_pf != 1] = np.nan #TODO: boolean mask -- 2.49.0 From de3ea150b29e34868a811887aa789b96d6b11983 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 15:49:47 +0200 Subject: [PATCH 066/159] all mask comparisons are for equality --- dap/algos/mask.py | 2 +- dap/algos/radprof.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/mask.py b/dap/algos/mask.py index 7c73ef1..febd40f 100644 --- a/dap/algos/mask.py +++ b/dap/algos/mask.py @@ -5,7 +5,7 @@ def calc_mask_pixels(data, pixel_mask_pf): if pixel_mask_pf is None: return - data[pixel_mask_pf != 1] = np.nan #TODO: boolean mask + data[pixel_mask_pf == 0] = np.nan #TODO: boolean mask diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index b1b3fcf..5a958e0 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -3,7 +3,7 @@ import numpy as np def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_radial_integration, r_radial_integration): if keep_pixels is None and pixel_mask_pf is not None: - keep_pixels = (pixel_mask_pf != 0) #TODO: boolean mask + keep_pixels = (pixel_mask_pf == 1) #TODO: boolean mask if center_radial_integration is None: center_radial_integration = [ -- 2.49.0 From 8cd2f07d8055fb6e2b4ced35979077fc857cb58a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 15:54:31 +0200 Subject: [PATCH 067/159] leave pixel_mask_pf as boolean array (int8 is only needed for peakfinder_8, where it is type casted anyway) --- dap/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index 393998b..0753be1 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -149,7 +149,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host r_radial_integration = None if pixel_mask_corrected is not None: #pixel_mask_corrected = np.ascontiguousarray(pixel_mask_corrected) - pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected).astype(np.int8, copy=False) + pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected) calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place else: pixel_mask_pf = None -- 2.49.0 From 88f881fffd89d69a1843ed93a70ed65ba4dbd8d0 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 16:01:13 +0200 Subject: [PATCH 068/159] removed keep_pixels and use boolean array pixel_mask_pf instead --- dap/algos/radprof.py | 11 ++++------- dap/worker.py | 5 +---- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 5a958e0..6d2b4db 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -1,10 +1,7 @@ import numpy as np -def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_radial_integration, r_radial_integration): - if keep_pixels is None and pixel_mask_pf is not None: - keep_pixels = (pixel_mask_pf == 1) #TODO: boolean mask - +def calc_radial_integration(results, data, pixel_mask_pf, center_radial_integration, r_radial_integration): if center_radial_integration is None: center_radial_integration = [ results["beam_center_x"], @@ -13,7 +10,7 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra r_radial_integration = None if r_radial_integration is None: - r_radial_integration, nr_radial_integration = prepare_radial_profile(data, center_radial_integration, keep_pixels) + r_radial_integration, nr_radial_integration = prepare_radial_profile(data, center_radial_integration, keep_pixels=pixel_mask_pf) r_min = int(np.min(r_radial_integration)) r_max = int(np.max(r_radial_integration)) + 1 @@ -29,7 +26,7 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra if threshold_max > threshold_min: data[data > threshold_max] = np.nan - rp = radial_profile(data, r_radial_integration, nr_radial_integration, keep_pixels) + rp = radial_profile(data, r_radial_integration, nr_radial_integration, keep_pixels=pixel_mask_pf) silent_region_min = results.get("radial_integration_silent_min", None) silent_region_max = results.get("radial_integration_silent_max", None) @@ -50,7 +47,7 @@ def calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_ra results["radint_I"] = rp[r_min:].tolist() results["radint_q"] = [r_min, r_max] - return keep_pixels, center_radial_integration, r_radial_integration + return center_radial_integration, r_radial_integration diff --git a/dap/worker.py b/dap/worker.py index 0753be1..5c1c4b5 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -56,7 +56,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # all the normal workers worker = 1 - keep_pixels = None r_radial_integration = None center_radial_integration = None @@ -145,10 +144,8 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host id_pixel_mask_2 = id(pixel_mask_corrected) if id_pixel_mask_1 != id_pixel_mask_2: - keep_pixels = None r_radial_integration = None if pixel_mask_corrected is not None: - #pixel_mask_corrected = np.ascontiguousarray(pixel_mask_corrected) pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected) calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place else: @@ -165,7 +162,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # pump probe analysis do_radial_integration = results.get("do_radial_integration", False) if do_radial_integration: - keep_pixels, center_radial_integration, r_radial_integration = calc_radial_integration(results, data, keep_pixels, pixel_mask_pf, center_radial_integration, r_radial_integration) + center_radial_integration, r_radial_integration = calc_radial_integration(results, data, pixel_mask_pf, center_radial_integration, r_radial_integration) #copy image to work with peakfinder, just in case -- 2.49.0 From ca3b42a7972e81d1bdc480fcea1c274395d4c319 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 16:11:57 +0200 Subject: [PATCH 069/159] use the boolean pixel_mask_pf --- dap/algos/mask.py | 2 +- dap/worker.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/mask.py b/dap/algos/mask.py index febd40f..7ba422f 100644 --- a/dap/algos/mask.py +++ b/dap/algos/mask.py @@ -5,7 +5,7 @@ def calc_mask_pixels(data, pixel_mask_pf): if pixel_mask_pf is None: return - data[pixel_mask_pf == 0] = np.nan #TODO: boolean mask + data[~pixel_mask_pf] = np.nan diff --git a/dap/worker.py b/dap/worker.py index 5c1c4b5..b3bf7f7 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -201,7 +201,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host forceSendVisualisation = True data_summed = None n_aggregated_images = 1 - data[pixel_mask_pf == 0] = np.NaN #TODO: boolean mask + data[~pixel_mask_pf] = np.nan else: data = image -- 2.49.0 From eb39550983431fd8e5d6114d73057ef1aabe3325 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 16:14:24 +0200 Subject: [PATCH 070/159] added todo note --- dap/algos/roi.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index efcdcab..f36ddb3 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -2,6 +2,7 @@ import numpy as np def calc_roi(results, data, pixel_mask_pf): + #TODO: why is this checked here? if pixel_mask_pf is None: return -- 2.49.0 From f6c97e9749bba6248fdce3b26c1b24c06f168633 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 16:14:42 +0200 Subject: [PATCH 071/159] added check for pixel_mask_pf being None --- dap/worker.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index b3bf7f7..591b206 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -201,7 +201,8 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host forceSendVisualisation = True data_summed = None n_aggregated_images = 1 - data[~pixel_mask_pf] = np.nan + if pixel_mask_pf is not None: + data[~pixel_mask_pf] = np.nan else: data = image -- 2.49.0 From 098146c97b47066b91c788e2dd1414f28fc15dda Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 16:25:16 +0200 Subject: [PATCH 072/159] use ndarray.copy consistently --- dap/worker.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 591b206..f31d631 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -1,6 +1,5 @@ import argparse import os -from copy import copy from random import randint from time import sleep @@ -152,7 +151,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = None if pixel_mask_corrected is not None: - data_s = copy(image) + data_s = image.copy() saturated_pixels_coordinates = ju_stream_adapter.handler.get_saturated_pixels(data_s, mask=True, geometry=True, gap_pixels=True, double_pixels=double_pixels) results["saturated_pixels"] = len(saturated_pixels_coordinates[0]) results["saturated_pixels_x"] = saturated_pixels_coordinates[1].tolist() @@ -166,7 +165,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host #copy image to work with peakfinder, just in case - pfdata = np.copy(data) + pfdata = data.copy() calc_mask_pixels(pfdata, pixel_mask_pf) # changes pfdata in place calc_apply_threshold(results, pfdata) # changes pfdata in place -- 2.49.0 From 26e2a3f61935e4d186aa68dcca983d94dde8fd1c Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 16:50:04 +0200 Subject: [PATCH 073/159] easier to read/distinguish names --- dap/algos/radprof.py | 48 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 6d2b4db..4b42e6e 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -1,18 +1,18 @@ import numpy as np -def calc_radial_integration(results, data, pixel_mask_pf, center_radial_integration, r_radial_integration): - if center_radial_integration is None: - center_radial_integration = [ +def calc_radial_integration(results, data, pixel_mask_pf, center, rad): + if center is None: + center = [ results["beam_center_x"], results["beam_center_y"] ] - r_radial_integration = None + rad = None - if r_radial_integration is None: - r_radial_integration, nr_radial_integration = prepare_radial_profile(data, center_radial_integration, keep_pixels=pixel_mask_pf) - r_min = int(np.min(r_radial_integration)) - r_max = int(np.max(r_radial_integration)) + 1 + if rad is None: + rad, norm = prepare_radial_profile(data, center, keep_pixels=pixel_mask_pf) + r_min = int(np.min(rad)) + r_max = int(np.max(rad)) + 1 apply_threshold = results.get("apply_threshold", False) @@ -26,37 +26,37 @@ def calc_radial_integration(results, data, pixel_mask_pf, center_radial_integrat if threshold_max > threshold_min: data[data > threshold_max] = np.nan - rp = radial_profile(data, r_radial_integration, nr_radial_integration, keep_pixels=pixel_mask_pf) + rp = radial_profile(data, rad, norm, keep_pixels=pixel_mask_pf) - silent_region_min = results.get("radial_integration_silent_min", None) - silent_region_max = results.get("radial_integration_silent_max", None) + silent_min = results.get("radial_integration_silent_min", None) + silent_max = results.get("radial_integration_silent_max", None) if ( - silent_region_min is not None and - silent_region_max is not None and + silent_min is not None and + silent_max is not None and #TODO: skipping entirely is a guess, but not obvious -- better to ensure the order min < max by switching them if needed - silent_region_max > silent_region_min and - silent_region_min > r_min and - silent_region_max < r_max + silent_max > silent_min and + silent_min > r_min and + silent_max < r_max ): - silent_region = rp[silent_region_min:silent_region_max] + silent_region = rp[silent_min:silent_max] integral_silent_region = np.sum(silent_region) rp = rp / integral_silent_region - results["radint_normalised"] = [silent_region_min, silent_region_max] + results["radint_normalised"] = [silent_min, silent_max] results["radint_I"] = rp[r_min:].tolist() results["radint_q"] = [r_min, r_max] - return center_radial_integration, r_radial_integration + return center, rad -def radial_profile(data, r, nr, keep_pixels=None): +def radial_profile(data, rad, norm, keep_pixels=None): if keep_pixels is not None: data = data[keep_pixels] data = data.ravel() - tbin = np.bincount(r, data) - rp = tbin / nr + tbin = np.bincount(rad, data) + rp = tbin / norm return rp def prepare_radial_profile(data, center, keep_pixels=None): @@ -66,8 +66,8 @@ def prepare_radial_profile(data, center, keep_pixels=None): if keep_pixels is not None: rad = rad[keep_pixels] rad = rad.astype(int).ravel() - nr = np.bincount(rad) - return rad, nr + norm = np.bincount(rad) + return rad, norm -- 2.49.0 From 46308b16a946f6d4df246993875c3cb38d7a6929 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 16:52:34 +0200 Subject: [PATCH 074/159] function order --- dap/algos/radprof.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 4b42e6e..c54bc02 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -50,15 +50,6 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad): return center, rad - -def radial_profile(data, rad, norm, keep_pixels=None): - if keep_pixels is not None: - data = data[keep_pixels] - data = data.ravel() - tbin = np.bincount(rad, data) - rp = tbin / norm - return rp - def prepare_radial_profile(data, center, keep_pixels=None): y, x = np.indices(data.shape) x0, y0 = center @@ -70,4 +61,13 @@ def prepare_radial_profile(data, center, keep_pixels=None): return rad, norm +def radial_profile(data, rad, norm, keep_pixels=None): + if keep_pixels is not None: + data = data[keep_pixels] + data = data.ravel() + tbin = np.bincount(rad, data) + rp = tbin / norm + return rp + + -- 2.49.0 From ac732cc4b7358133cf79b97076f965093a4ecc13 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 17:05:18 +0200 Subject: [PATCH 075/159] easier to read name --- dap/worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index f31d631..37bf25f 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -55,7 +55,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # all the normal workers worker = 1 - r_radial_integration = None + rad_radial_integration = None center_radial_integration = None results = {} @@ -143,7 +143,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host id_pixel_mask_2 = id(pixel_mask_corrected) if id_pixel_mask_1 != id_pixel_mask_2: - r_radial_integration = None + rad_radial_integration = None if pixel_mask_corrected is not None: pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected) calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place @@ -161,7 +161,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # pump probe analysis do_radial_integration = results.get("do_radial_integration", False) if do_radial_integration: - center_radial_integration, r_radial_integration = calc_radial_integration(results, data, pixel_mask_pf, center_radial_integration, r_radial_integration) + center_radial_integration, rad_radial_integration = calc_radial_integration(results, data, pixel_mask_pf, center_radial_integration, rad_radial_integration) #copy image to work with peakfinder, just in case -- 2.49.0 From bbc58ca5e62acf860bcc36a6afa0d305d1cf4ece Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 17:23:11 +0200 Subject: [PATCH 076/159] need to also store the norm between iterations; added some comments on why the center/rad/norm are reset and where; r_min/r_max need to be re-calclated every time (or also stored); added todo note --- dap/algos/radprof.py | 13 +++++++------ dap/worker.py | 9 +++++---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index c54bc02..a014ddc 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -1,18 +1,19 @@ import numpy as np -def calc_radial_integration(results, data, pixel_mask_pf, center, rad): +def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): if center is None: center = [ results["beam_center_x"], results["beam_center_y"] ] - rad = None + rad = norm = None - if rad is None: + if rad is None or norm is None: rad, norm = prepare_radial_profile(data, center, keep_pixels=pixel_mask_pf) - r_min = int(np.min(rad)) - r_max = int(np.max(rad)) + 1 + + r_min = int(np.min(rad)) + r_max = int(np.max(rad)) + 1 apply_threshold = results.get("apply_threshold", False) @@ -44,7 +45,7 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad): rp = rp / integral_silent_region results["radint_normalised"] = [silent_min, silent_max] - results["radint_I"] = rp[r_min:].tolist() + results["radint_I"] = rp[r_min:].tolist() #TODO: why not stop at r_max? results["radint_q"] = [r_min, r_max] return center, rad diff --git a/dap/worker.py b/dap/worker.py index 37bf25f..b672838 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -55,8 +55,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # all the normal workers worker = 1 - rad_radial_integration = None center_radial_integration = None + rad_radial_integration = None + norm_radial_integration = None results = {} @@ -80,7 +81,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host sleep(0.5) peakfinder_parameters = json_load(fn_peakfinder_parameters) peakfinder_parameters_time = new_time - center_radial_integration = None + center_radial_integration = None # beam_center_x/beam_center_y might have changed if worker == 0: print(f"({pulse_id}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) print(f" --> {peakfinder_parameters}", flush=True) @@ -143,7 +144,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host id_pixel_mask_2 = id(pixel_mask_corrected) if id_pixel_mask_1 != id_pixel_mask_2: - rad_radial_integration = None + rad_radial_integration = norm_radial_integration = None # if the pixel mask changes, the radii and normalization need to be re-calculated if pixel_mask_corrected is not None: pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected) calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place @@ -161,7 +162,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # pump probe analysis do_radial_integration = results.get("do_radial_integration", False) if do_radial_integration: - center_radial_integration, rad_radial_integration = calc_radial_integration(results, data, pixel_mask_pf, center_radial_integration, rad_radial_integration) + center_radial_integration, rad_radial_integration, norm_radial_integration = calc_radial_integration(results, data, pixel_mask_pf, center_radial_integration, rad_radial_integration, norm_radial_integration) #copy image to work with peakfinder, just in case -- 2.49.0 From 68619f11812f814a46e1f6acbf36b0311b362903 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 17:25:28 +0200 Subject: [PATCH 077/159] rad is already int --- dap/algos/radprof.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index a014ddc..9cdd601 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -12,8 +12,8 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): if rad is None or norm is None: rad, norm = prepare_radial_profile(data, center, keep_pixels=pixel_mask_pf) - r_min = int(np.min(rad)) - r_max = int(np.max(rad)) + 1 + r_min = min(rad) + r_max = max(rad) + 1 apply_threshold = results.get("apply_threshold", False) -- 2.49.0 From 88c81a905b3bec6c70910cca3cd0c5570b67b2d5 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 18:50:52 +0200 Subject: [PATCH 078/159] use ndarray.copy consistently --- dap/algos/radprof.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 9cdd601..ed7c5be 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -21,7 +21,7 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - data = np.copy(data) # do the following in-place changes on a copy + data = data.copy() # do the following in-place changes on a copy data[data < threshold_min] = np.nan #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed if threshold_max > threshold_min: -- 2.49.0 From 8c11dd8f52bf86e9b2e171577024390ee0fe2bf9 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 19:01:04 +0200 Subject: [PATCH 079/159] get_saturated_pixels does not change data in place, hence no copy is needed; use kwargs defaults --- dap/worker.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index b672838..7aa6f1f 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -152,8 +152,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = None if pixel_mask_corrected is not None: - data_s = image.copy() - saturated_pixels_coordinates = ju_stream_adapter.handler.get_saturated_pixels(data_s, mask=True, geometry=True, gap_pixels=True, double_pixels=double_pixels) + saturated_pixels_coordinates = ju_stream_adapter.handler.get_saturated_pixels(image, double_pixels=double_pixels) results["saturated_pixels"] = len(saturated_pixels_coordinates[0]) results["saturated_pixels_x"] = saturated_pixels_coordinates[1].tolist() results["saturated_pixels_y"] = saturated_pixels_coordinates[0].tolist() -- 2.49.0 From 2857d6412b7a670f6b6f2ef679b9445e1e0d1758 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 19:02:43 +0200 Subject: [PATCH 080/159] readability --- dap/worker.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 7aa6f1f..fdd1813 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -152,10 +152,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = None if pixel_mask_corrected is not None: - saturated_pixels_coordinates = ju_stream_adapter.handler.get_saturated_pixels(image, double_pixels=double_pixels) - results["saturated_pixels"] = len(saturated_pixels_coordinates[0]) - results["saturated_pixels_x"] = saturated_pixels_coordinates[1].tolist() - results["saturated_pixels_y"] = saturated_pixels_coordinates[0].tolist() + saturated_pixels_y, saturated_pixels_x = ju_stream_adapter.handler.get_saturated_pixels(image, double_pixels=double_pixels) + results["saturated_pixels"] = len(saturated_pixels_x) + results["saturated_pixels_x"] = saturated_pixels_x.tolist() + results["saturated_pixels_y"] = saturated_pixels_y.tolist() # pump probe analysis -- 2.49.0 From 2903203743c73afbe7046c0ee65246772a76648a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 19:09:50 +0200 Subject: [PATCH 081/159] use kwargs defaults --- dap/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index fdd1813..e4fe41b 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -140,7 +140,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # starting from ju 3.3.1 pedestal file is cached in library, re-calculated only if parameters (and/or pedestal file) are changed id_pixel_mask_1 = id(pixel_mask_corrected) - pixel_mask_corrected = ju_stream_adapter.handler.get_pixel_mask(geometry=True, gap_pixels=True, double_pixels=double_pixels) + pixel_mask_corrected = ju_stream_adapter.handler.get_pixel_mask(double_pixels=double_pixels) id_pixel_mask_2 = id(pixel_mask_corrected) if id_pixel_mask_1 != id_pixel_mask_2: -- 2.49.0 From b2ae7292ab19397d11b9200ac8b61db6b602b1fb Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 19:17:09 +0200 Subject: [PATCH 082/159] results is always copied from metadata --- dap/worker.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index e4fe41b..a41c6aa 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -59,8 +59,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host rad_radial_integration = None norm_radial_integration = None - results = {} - pedestal_name_saved = None pixel_mask_corrected = None -- 2.49.0 From fb6ed0d116c3f8a4986e74ce25765b86b850039f Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 20:28:46 +0200 Subject: [PATCH 083/159] worker is always 1, removed it and commented unreachable print outs --- dap/worker.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index a41c6aa..2490c7b 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -52,8 +52,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) -# all the normal workers - worker = 1 center_radial_integration = None rad_radial_integration = None @@ -67,6 +65,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host n_aggregated_images = 1 data_summed = None + while True: # check if peakfinder parameters changed and then re-read it @@ -75,17 +74,17 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host new_time = os.path.getmtime(fn_peakfinder_parameters) time_delta = new_time - peakfinder_parameters_time if time_delta > 2.0: - old_peakfinder_parameters = peakfinder_parameters +# old_peakfinder_parameters = peakfinder_parameters sleep(0.5) peakfinder_parameters = json_load(fn_peakfinder_parameters) peakfinder_parameters_time = new_time center_radial_integration = None # beam_center_x/beam_center_y might have changed - if worker == 0: - print(f"({pulse_id}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) - print(f" --> {peakfinder_parameters}", flush=True) - print(flush=True) +# if worker == 0: +# print(f"({pulse_id}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) +# print(f" --> {peakfinder_parameters}", flush=True) +# print(flush=True) except Exception as e: - print(f"({pulse_id}) problem ({e}) to read peakfinder parameters file, worker : {worker}", flush=True) + print(f"({pulse_id}) problem ({e}) to read peakfinder parameters file", flush=True) if not zmq_socks.has_data(): @@ -193,7 +192,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host data_summed = data.copy() data_summed[data == -np.nan] = -np.nan #TODO: this does nothing results["aggregated_images"] = n_aggregated_images - results["worker"] = worker + results["worker"] = 1 #TODO: keep this for backwards compatibility? if n_aggregated_images >= results["aggregation_max"]: forceSendVisualisation = True data_summed = None -- 2.49.0 From 45c6d99f7ad061432e40e021d836355afb2712bb Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 2 Aug 2024 20:28:58 +0200 Subject: [PATCH 084/159] forgotten return value --- dap/algos/radprof.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index ed7c5be..697aa35 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -48,7 +48,7 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): results["radint_I"] = rp[r_min:].tolist() #TODO: why not stop at r_max? results["radint_q"] = [r_min, r_max] - return center, rad + return center, rad, norm def prepare_radial_profile(data, center, keep_pixels=None): -- 2.49.0 From 26f41b5383a251595ce63af3e3ae63e1db1aab81 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 5 Aug 2024 18:03:53 +0200 Subject: [PATCH 085/159] made radprof helper memoizable --- dap/algos/radprof.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 697aa35..cbea3a3 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -10,7 +10,7 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): rad = norm = None if rad is None or norm is None: - rad, norm = prepare_radial_profile(data, center, keep_pixels=pixel_mask_pf) + rad, norm = prepare_radial_profile(data.shape, center, pixel_mask_pf) r_min = min(rad) r_max = max(rad) + 1 @@ -27,7 +27,7 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): if threshold_max > threshold_min: data[data > threshold_max] = np.nan - rp = radial_profile(data, rad, norm, keep_pixels=pixel_mask_pf) + rp = radial_profile(data, rad, norm, pixel_mask_pf) silent_min = results.get("radial_integration_silent_min", None) silent_max = results.get("radial_integration_silent_max", None) @@ -51,8 +51,8 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): return center, rad, norm -def prepare_radial_profile(data, center, keep_pixels=None): - y, x = np.indices(data.shape) +def prepare_radial_profile(shape, center, keep_pixels): + y, x = np.indices(shape) x0, y0 = center rad = np.sqrt((x - x0)**2 + (y - y0)**2) if keep_pixels is not None: @@ -62,7 +62,7 @@ def prepare_radial_profile(data, center, keep_pixels=None): return rad, norm -def radial_profile(data, rad, norm, keep_pixels=None): +def radial_profile(data, rad, norm, keep_pixels): if keep_pixels is not None: data = data[keep_pixels] data = data.ravel() -- 2.49.0 From 58c18d9b8ca62af99e9d20dfe3644d796b5bc3d8 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 5 Aug 2024 18:04:26 +0200 Subject: [PATCH 086/159] added algos/utils folder and npmemo --- dap/algos/utils/__init__.py | 4 ++ dap/algos/utils/npmemo.py | 77 +++++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 dap/algos/utils/__init__.py create mode 100644 dap/algos/utils/npmemo.py diff --git a/dap/algos/utils/__init__.py b/dap/algos/utils/__init__.py new file mode 100644 index 0000000..9efc8a4 --- /dev/null +++ b/dap/algos/utils/__init__.py @@ -0,0 +1,4 @@ + +from .npmemo import npmemo + + diff --git a/dap/algos/utils/npmemo.py b/dap/algos/utils/npmemo.py new file mode 100644 index 0000000..5ea6670 --- /dev/null +++ b/dap/algos/utils/npmemo.py @@ -0,0 +1,77 @@ +import functools +#import hashlib +import numpy as np + + +def npmemo(func): + """ + numpy array aware memoizer + """ + cache = {} + + @functools.wraps(func) + def wrapper(*args): + key = make_key(args) + try: + return cache[key] + except KeyError: + cache[key] = res = func(*args) + return res + +# wrapper.cache = cache + return wrapper + + +def make_key(args): + return tuple(make_key_entry(i) for i in args) + +def make_key_entry(x): + if isinstance(x, np.ndarray): + return np_array_hash(x) + return x + +def np_array_hash(arr): +# return id(arr) # this has been used so far + res = arr.tobytes() +# res = hashlib.sha256(res).hexdigest() # if tobytes was too large, we could hash it +# res = (arr.shape, res) # tobytes does not take shape into account + return res + + + + + +if __name__ == "__main__": + @npmemo + def expensive(arr, offset): + print("recalc", arr, offset) + return np.dot(arr, arr) + offset + + def test(arr, offset): + print("first") + res1 = expensive(arr, offset) + print("second") + res2 = expensive(arr, offset) + print() + assert np.array_equal(res1, res2) + + arrays = ( + [1, 2, 3], + [1, 2, 3, 4], + [1, 2, 3, 4] + ) + + offsets = ( + 2, + 2, + 5 + ) + + for a, o in zip(arrays, offsets): + a = np.array(a) + test(a, o) + +# print(expensive.cache) + + + -- 2.49.0 From a6af8ee19cdc1270990017ea57845f2bd8bbd766 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 5 Aug 2024 18:15:31 +0200 Subject: [PATCH 087/159] use npmemo --- dap/algos/radprof.py | 19 +++++++++---------- dap/worker.py | 8 +------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index cbea3a3..e3a4b28 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -1,16 +1,15 @@ import numpy as np +from .utils import npmemo -def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): - if center is None: - center = [ - results["beam_center_x"], - results["beam_center_y"] - ] - rad = norm = None - if rad is None or norm is None: - rad, norm = prepare_radial_profile(data.shape, center, pixel_mask_pf) +def calc_radial_integration(results, data, pixel_mask_pf): + center = [ + results["beam_center_x"], + results["beam_center_y"] + ] + + rad, norm = prepare_radial_profile(data.shape, center, pixel_mask_pf) r_min = min(rad) r_max = max(rad) + 1 @@ -48,9 +47,9 @@ def calc_radial_integration(results, data, pixel_mask_pf, center, rad, norm): results["radint_I"] = rp[r_min:].tolist() #TODO: why not stop at r_max? results["radint_q"] = [r_min, r_max] - return center, rad, norm +@npmemo def prepare_radial_profile(shape, center, keep_pixels): y, x = np.indices(shape) x0, y0 = center diff --git a/dap/worker.py b/dap/worker.py index 2490c7b..e5b63f7 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -53,10 +53,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) - center_radial_integration = None - rad_radial_integration = None - norm_radial_integration = None - pedestal_name_saved = None pixel_mask_corrected = None @@ -78,7 +74,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host sleep(0.5) peakfinder_parameters = json_load(fn_peakfinder_parameters) peakfinder_parameters_time = new_time - center_radial_integration = None # beam_center_x/beam_center_y might have changed # if worker == 0: # print(f"({pulse_id}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) # print(f" --> {peakfinder_parameters}", flush=True) @@ -141,7 +136,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host id_pixel_mask_2 = id(pixel_mask_corrected) if id_pixel_mask_1 != id_pixel_mask_2: - rad_radial_integration = norm_radial_integration = None # if the pixel mask changes, the radii and normalization need to be re-calculated if pixel_mask_corrected is not None: pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected) calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place @@ -158,7 +152,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # pump probe analysis do_radial_integration = results.get("do_radial_integration", False) if do_radial_integration: - center_radial_integration, rad_radial_integration, norm_radial_integration = calc_radial_integration(results, data, pixel_mask_pf, center_radial_integration, rad_radial_integration, norm_radial_integration) + calc_radial_integration(results, data, pixel_mask_pf) #copy image to work with peakfinder, just in case -- 2.49.0 From 3a63baf90951f1d92bc12d6b2c250cd4a5730272 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 5 Aug 2024 18:18:46 +0200 Subject: [PATCH 088/159] dont repack center --- dap/algos/radprof.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index e3a4b28..7adba8a 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -4,12 +4,10 @@ from .utils import npmemo def calc_radial_integration(results, data, pixel_mask_pf): - center = [ - results["beam_center_x"], - results["beam_center_y"] - ] + center_x = results["beam_center_x"] + center_y = results["beam_center_y"] - rad, norm = prepare_radial_profile(data.shape, center, pixel_mask_pf) + rad, norm = prepare_radial_profile(data.shape, center_x, center_y, pixel_mask_pf) r_min = min(rad) r_max = max(rad) + 1 @@ -50,9 +48,8 @@ def calc_radial_integration(results, data, pixel_mask_pf): @npmemo -def prepare_radial_profile(shape, center, keep_pixels): +def prepare_radial_profile(shape, x0, y0, keep_pixels): y, x = np.indices(shape) - x0, y0 = center rad = np.sqrt((x - x0)**2 + (y - y0)**2) if keep_pixels is not None: rad = rad[keep_pixels] -- 2.49.0 From e036e033f9731c60567d8ba6ebc2bf42a6b54ad5 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 5 Aug 2024 18:20:35 +0200 Subject: [PATCH 089/159] moved do_radial_integration check inside function for better overview --- dap/algos/radprof.py | 4 ++++ dap/worker.py | 5 +---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 7adba8a..1b90973 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -4,6 +4,10 @@ from .utils import npmemo def calc_radial_integration(results, data, pixel_mask_pf): + do_radial_integration = results.get("do_radial_integration", False) + if not do_radial_integration: + return + center_x = results["beam_center_x"] center_y = results["beam_center_y"] diff --git a/dap/worker.py b/dap/worker.py index e5b63f7..8e754a1 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -150,10 +150,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # pump probe analysis - do_radial_integration = results.get("do_radial_integration", False) - if do_radial_integration: - calc_radial_integration(results, data, pixel_mask_pf) - + calc_radial_integration(results, data, pixel_mask_pf) #copy image to work with peakfinder, just in case pfdata = data.copy() -- 2.49.0 From 7b56e45cb6904717248e9fbb0cfbe259327e71ac Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 5 Aug 2024 18:22:36 +0200 Subject: [PATCH 090/159] probably better to limit the cache in size --- dap/algos/utils/npmemo.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/dap/algos/utils/npmemo.py b/dap/algos/utils/npmemo.py index 5ea6670..0711f3e 100644 --- a/dap/algos/utils/npmemo.py +++ b/dap/algos/utils/npmemo.py @@ -5,8 +5,10 @@ import numpy as np def npmemo(func): """ - numpy array aware memoizer + numpy array aware memoizer with size limit """ + maxsize = 10 + order = [] cache = {} @functools.wraps(func) @@ -15,6 +17,10 @@ def npmemo(func): try: return cache[key] except KeyError: + if len(cache) >= maxsize: + oldest = order.pop(0) + cache.pop(oldest) + order.append(key) cache[key] = res = func(*args) return res @@ -71,6 +77,10 @@ if __name__ == "__main__": a = np.array(a) test(a, o) + for a, o in zip(arrays, offsets): + a = np.array(a) + test(a, o) + # print(expensive.cache) -- 2.49.0 From 242ec6ff5105b3546a09ee9c28b397b806a093a7 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 14:03:33 +0200 Subject: [PATCH 091/159] moved one half of ju_stream_adapter code into separate class --- dap/algos/__init__.py | 1 + dap/algos/jfdata.py | 42 ++++++++++++++++++++++++++++++++++++++++++ dap/worker.py | 25 ++++++------------------- 3 files changed, 49 insertions(+), 19 deletions(-) create mode 100644 dap/algos/jfdata.py diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index e0d580e..1ca0eec 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -1,5 +1,6 @@ from .addmask import calc_apply_additional_mask +from .jfdata import JFData from .mask import calc_mask_pixels from .peakfind import calc_peakfinder_analysis from .radprof import calc_radial_integration diff --git a/dap/algos/jfdata.py b/dap/algos/jfdata.py new file mode 100644 index 0000000..df520a6 --- /dev/null +++ b/dap/algos/jfdata.py @@ -0,0 +1,42 @@ +import numpy as np + +import jungfrau_utils as ju + +from .addmask import calc_apply_additional_mask + + +class JFData: + + def __init__(self): + self.ju_stream_adapter = ju.StreamAdapter() + self.id_pixel_mask_corrected = None + self.pixel_mask_pf = None + + + def get_pixel_mask(self, results, double_pixels): + pixel_mask_corrected = self.ju_stream_adapter.handler.get_pixel_mask(double_pixels=double_pixels) + if pixel_mask_corrected is None: + self.id_pixel_mask_corrected = None + self.pixel_mask_pf = None + return None + + # starting from ju 3.3.1 pedestal file is cached in library, re-calculated only if parameters (and/or pedestal file) have changed + new_id_pixel_mask_corrected = id(pixel_mask_corrected) + old_id_pixel_mask_corrected = self.id_pixel_mask_corrected + if new_id_pixel_mask_corrected == old_id_pixel_mask_corrected: + return self.pixel_mask_pf + + pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected) + calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place + + self.id_pixel_mask_corrected = new_id_pixel_mask_corrected + self.pixel_mask_pf = pixel_mask_pf + + return pixel_mask_pf + + + def get_saturated_pixels(self, image, double_pixels): + return self.ju_stream_adapter.handler.get_saturated_pixels(image, double_pixels=double_pixels) + + + diff --git a/dap/worker.py b/dap/worker.py index 8e754a1..c5825bc 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -3,10 +3,9 @@ import os from random import randint from time import sleep -import jungfrau_utils as ju import numpy as np -from algos import calc_apply_additional_mask, calc_apply_threshold, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis +from algos import calc_apply_threshold, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis, JFData from utils import json_load, read_bit from zmqsocks import ZMQSockets @@ -48,16 +47,14 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pulse_id = 0 - ju_stream_adapter = ju.StreamAdapter() + jfdata = JFData() + ju_stream_adapter = jfdata.ju_stream_adapter zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) pedestal_name_saved = None - pixel_mask_corrected = None - pixel_mask_pf = None - n_aggregated_images = 1 data_summed = None @@ -130,20 +127,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host data = np.ascontiguousarray(data) - # starting from ju 3.3.1 pedestal file is cached in library, re-calculated only if parameters (and/or pedestal file) are changed - id_pixel_mask_1 = id(pixel_mask_corrected) - pixel_mask_corrected = ju_stream_adapter.handler.get_pixel_mask(double_pixels=double_pixels) - id_pixel_mask_2 = id(pixel_mask_corrected) + pixel_mask_pf = jfdata.get_pixel_mask(results, double_pixels) - if id_pixel_mask_1 != id_pixel_mask_2: - if pixel_mask_corrected is not None: - pixel_mask_pf = np.ascontiguousarray(pixel_mask_corrected) - calc_apply_additional_mask(results, pixel_mask_pf) # changes pixel_mask_pf in place - else: - pixel_mask_pf = None - - if pixel_mask_corrected is not None: - saturated_pixels_y, saturated_pixels_x = ju_stream_adapter.handler.get_saturated_pixels(image, double_pixels=double_pixels) + if pixel_mask_pf is not None: + saturated_pixels_y, saturated_pixels_x = jfdata.get_saturated_pixels(image, double_pixels) results["saturated_pixels"] = len(saturated_pixels_x) results["saturated_pixels_x"] = saturated_pixels_x.tolist() results["saturated_pixels_y"] = saturated_pixels_y.tolist() -- 2.49.0 From 2e80fc0e6e559a213480901ad4a57e302f6a83e1 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 15:20:09 +0200 Subject: [PATCH 092/159] moved the other half of ju_stream_adapter code into separate class --- dap/algos/jfdata.py | 13 +++++++++++++ dap/worker.py | 11 +++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/dap/algos/jfdata.py b/dap/algos/jfdata.py index df520a6..7d6848e 100644 --- a/dap/algos/jfdata.py +++ b/dap/algos/jfdata.py @@ -13,6 +13,19 @@ class JFData: self.pixel_mask_pf = None + def refresh_pixel_mask(self): + pixel_mask_current = self.ju_stream_adapter.handler.pixel_mask + self.ju_stream_adapter.handler.pixel_mask = pixel_mask_current + + + def process(self, image, metadata, double_pixels): + return self.ju_stream_adapter.process(image, metadata, double_pixels=double_pixels) + + + def has_pedestal_file(self): + return bool(self.ju_stream_adapter.handler.pedestal_file) + + def get_pixel_mask(self, results, double_pixels): pixel_mask_corrected = self.ju_stream_adapter.handler.get_pixel_mask(double_pixels=double_pixels) if pixel_mask_corrected is None: diff --git a/dap/worker.py b/dap/worker.py index c5825bc..dfd2839 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -48,7 +48,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pulse_id = 0 jfdata = JFData() - ju_stream_adapter = jfdata.ju_stream_adapter zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) @@ -114,15 +113,15 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host double_pixels = results.get("double_pixels", "mask") pedestal_name = metadata.get("pedestal_name", None) + if pedestal_name is not None and pedestal_name != pedestal_name_saved: - pixel_mask_current = ju_stream_adapter.handler.pixel_mask - ju_stream_adapter.handler.pixel_mask = pixel_mask_current + jfdata.refresh_pixel_mask() pedestal_name_saved = pedestal_name - data = ju_stream_adapter.process(image, metadata, double_pixels=double_pixels) + data = jfdata.process(image, metadata, double_pixels) - # pedestal file is not in stream, skip this frame - if not ju_stream_adapter.handler.pedestal_file: + # the pedestal file is loaded in process(), this check needs to be afterwards + if not jfdata.has_pedestal_file(): continue data = np.ascontiguousarray(data) -- 2.49.0 From 68c7bd78665850bcec19b56b9bfb80eb0f93e2da Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 15:56:28 +0200 Subject: [PATCH 093/159] moved pedestal_name_saved into JFData --- dap/algos/jfdata.py | 14 ++++++++++++++ dap/worker.py | 6 +----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/dap/algos/jfdata.py b/dap/algos/jfdata.py index 7d6848e..8588415 100644 --- a/dap/algos/jfdata.py +++ b/dap/algos/jfdata.py @@ -9,10 +9,24 @@ class JFData: def __init__(self): self.ju_stream_adapter = ju.StreamAdapter() + self.pedestal_name_saved = None self.id_pixel_mask_corrected = None self.pixel_mask_pf = None + def ensure_current_pixel_mask(self, pedestal_name): + if pedestal_name is None: + return + + new_pedestal_name = pedestal_name + old_pedestal_name = self.pedestal_name_saved + if new_pedestal_name == old_pedestal_name: + return + + self.refresh_pixel_mask() + self.pedestal_name_saved = pedestal_name + + def refresh_pixel_mask(self): pixel_mask_current = self.ju_stream_adapter.handler.pixel_mask self.ju_stream_adapter.handler.pixel_mask = pixel_mask_current diff --git a/dap/worker.py b/dap/worker.py index dfd2839..6bcd0c2 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -52,8 +52,6 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) - pedestal_name_saved = None - n_aggregated_images = 1 data_summed = None @@ -114,9 +112,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pedestal_name = metadata.get("pedestal_name", None) - if pedestal_name is not None and pedestal_name != pedestal_name_saved: - jfdata.refresh_pixel_mask() - pedestal_name_saved = pedestal_name + jfdata.ensure_current_pixel_mask(pedestal_name) data = jfdata.process(image, metadata, double_pixels) -- 2.49.0 From 9fb9595c83b3e5aed48a859e5d51758e29ddf3ce Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 16:07:04 +0200 Subject: [PATCH 094/159] order --- dap/worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 6bcd0c2..ddb1f16 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -108,12 +108,12 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host continue - double_pixels = results.get("double_pixels", "mask") - pedestal_name = metadata.get("pedestal_name", None) jfdata.ensure_current_pixel_mask(pedestal_name) + double_pixels = results.get("double_pixels", "mask") + data = jfdata.process(image, metadata, double_pixels) # the pedestal file is loaded in process(), this check needs to be afterwards -- 2.49.0 From 159574f4e8dff9c671546f13325a4c75ddd6babc Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 16:37:56 +0200 Subject: [PATCH 095/159] combined a bit more of the logic --- dap/algos/jfdata.py | 9 ++++++--- dap/worker.py | 5 +---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/dap/algos/jfdata.py b/dap/algos/jfdata.py index 8588415..92bea4d 100644 --- a/dap/algos/jfdata.py +++ b/dap/algos/jfdata.py @@ -33,11 +33,14 @@ class JFData: def process(self, image, metadata, double_pixels): - return self.ju_stream_adapter.process(image, metadata, double_pixels=double_pixels) + data = self.ju_stream_adapter.process(image, metadata, double_pixels=double_pixels) + # the pedestal file is loaded in process(), this check needs to be afterwards + if not self.ju_stream_adapter.handler.pedestal_file: + return None - def has_pedestal_file(self): - return bool(self.ju_stream_adapter.handler.pedestal_file) + data = np.ascontiguousarray(data) + return data def get_pixel_mask(self, results, double_pixels): diff --git a/dap/worker.py b/dap/worker.py index ddb1f16..fca3b83 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -116,12 +116,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host data = jfdata.process(image, metadata, double_pixels) - # the pedestal file is loaded in process(), this check needs to be afterwards - if not jfdata.has_pedestal_file(): + if not data: continue - data = np.ascontiguousarray(data) - pixel_mask_pf = jfdata.get_pixel_mask(results, double_pixels) if pixel_mask_pf is not None: -- 2.49.0 From 6639b46afbbd6f74107547a47857fe9f66c9ef76 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 17:01:02 +0200 Subject: [PATCH 096/159] order --- dap/worker.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index fca3b83..dab2485 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -45,13 +45,14 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host peakfinder_parameters = json_load(fn_peakfinder_parameters) peakfinder_parameters_time = os.path.getmtime(fn_peakfinder_parameters) - pulse_id = 0 jfdata = JFData() zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) + pulse_id = 0 + n_aggregated_images = 1 data_summed = None @@ -84,10 +85,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if metadata["shape"] == [2, 2]: # this is used as marker for empty images continue + pulse_id = metadata.get("pulse_id", 0) + results = metadata.copy() - - - pulse_id = results.get("pulse_id", 0) results.update(peakfinder_parameters) results["number_of_spots"] = 0 @@ -128,11 +128,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["saturated_pixels_y"] = saturated_pixels_y.tolist() -# pump probe analysis calc_radial_integration(results, data, pixel_mask_pf) - #copy image to work with peakfinder, just in case - pfdata = data.copy() + pfdata = data.copy() #TODO: is this copy needed? calc_mask_pixels(pfdata, pixel_mask_pf) # changes pfdata in place calc_apply_threshold(results, pfdata) # changes pfdata in place -- 2.49.0 From 9b2fc0efeaa1d95f6f5985b24a075cf66315e95a Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 20:51:45 +0200 Subject: [PATCH 097/159] turned utils into folder --- dap/utils/__init__.py | 4 ++++ dap/{ => utils}/utils.py | 0 2 files changed, 4 insertions(+) create mode 100644 dap/utils/__init__.py rename dap/{ => utils}/utils.py (100%) diff --git a/dap/utils/__init__.py b/dap/utils/__init__.py new file mode 100644 index 0000000..30ce09d --- /dev/null +++ b/dap/utils/__init__.py @@ -0,0 +1,4 @@ + +from .utils import * + + diff --git a/dap/utils.py b/dap/utils/utils.py similarity index 100% rename from dap/utils.py rename to dap/utils/utils.py -- 2.49.0 From f1373936c0079a5022697f8b6b6415209bbec20d Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 20:56:56 +0200 Subject: [PATCH 098/159] added BufferedJSON --- dap/utils/bufjson.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 dap/utils/bufjson.py diff --git a/dap/utils/bufjson.py b/dap/utils/bufjson.py new file mode 100644 index 0000000..2b243cd --- /dev/null +++ b/dap/utils/bufjson.py @@ -0,0 +1,35 @@ +import os +from time import sleep + +from .utils import json_load + + +class BufferedJSON: + + def __init__(self, fname): + self.fname = fname + self.last_time = self.get_time() + self.last_data = self.get_data() + + + def load(self): + current_time = self.get_time() + time_delta = current_time - self.last_time + if time_delta <= 2: #TODO: is that a good time? + return self.last_data + + sleep(0.5) #TODO: why? + current_data = self.get_data() + self.last_time = current_time + self.last_data = current_data + return current_data + + + def get_time(self): + return os.path.getmtime(self.fname) + + def get_data(self, *args, **kwargs): + return json_load(self.fname, *args, **kwargs) + + + -- 2.49.0 From 9dbd11d657815d7c86aba31ab576ddeed078710d Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 6 Aug 2024 20:59:14 +0200 Subject: [PATCH 099/159] added BufferedJSON --- dap/utils/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dap/utils/__init__.py b/dap/utils/__init__.py index 30ce09d..11637e3 100644 --- a/dap/utils/__init__.py +++ b/dap/utils/__init__.py @@ -1,4 +1,5 @@ from .utils import * +from .bufjson import BufferedJSON -- 2.49.0 From 018fa6cf4ba630a76bc16710e8634b964e953b76 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 7 Aug 2024 15:27:13 +0200 Subject: [PATCH 100/159] use BufferedJSON --- dap/worker.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index dab2485..514038e 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -1,12 +1,11 @@ import argparse import os from random import randint -from time import sleep import numpy as np from algos import calc_apply_threshold, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis, JFData -from utils import json_load, read_bit +from utils import BufferedJSON, read_bit from zmqsocks import ZMQSockets @@ -40,10 +39,9 @@ def main(): def work(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port, fn_peakfinder_parameters, skip_frames_rate): peakfinder_parameters = {} - peakfinder_parameters_time = -1 + bj_peakfinder_parameters = None if fn_peakfinder_parameters is not None and os.path.exists(fn_peakfinder_parameters): - peakfinder_parameters = json_load(fn_peakfinder_parameters) - peakfinder_parameters_time = os.path.getmtime(fn_peakfinder_parameters) + bj_peakfinder_parameters = BufferedJSON(fn_peakfinder_parameters) jfdata = JFData() @@ -61,14 +59,8 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # check if peakfinder parameters changed and then re-read it try: - if peakfinder_parameters_time > 0: - new_time = os.path.getmtime(fn_peakfinder_parameters) - time_delta = new_time - peakfinder_parameters_time - if time_delta > 2.0: -# old_peakfinder_parameters = peakfinder_parameters - sleep(0.5) - peakfinder_parameters = json_load(fn_peakfinder_parameters) - peakfinder_parameters_time = new_time + if bj_peakfinder_parameters: + peakfinder_parameters = bj_peakfinder_parameters.load() # if worker == 0: # print(f"({pulse_id}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) # print(f" --> {peakfinder_parameters}", flush=True) -- 2.49.0 From bff86898e983dbe08dfe923962c244feb1f10dbb Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 7 Aug 2024 16:41:33 +0200 Subject: [PATCH 101/159] moved json_load to bufjson.py --- dap/utils/bufjson.py | 9 +++++++-- dap/utils/utils.py | 7 ------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/dap/utils/bufjson.py b/dap/utils/bufjson.py index 2b243cd..342e5be 100644 --- a/dap/utils/bufjson.py +++ b/dap/utils/bufjson.py @@ -1,8 +1,7 @@ +import json import os from time import sleep -from .utils import json_load - class BufferedJSON: @@ -33,3 +32,9 @@ class BufferedJSON: +def json_load(filename, *args, **kwargs): + with open(filename, "r") as f: + return json.load(f, *args, **kwargs) + + + diff --git a/dap/utils/utils.py b/dap/utils/utils.py index 0e687ca..070d9ef 100644 --- a/dap/utils/utils.py +++ b/dap/utils/utils.py @@ -1,10 +1,3 @@ -import json - - -def json_load(filename, *args, **kwargs): - with open(filename, "r") as f: - return json.load(f, *args, **kwargs) - def read_bit(bits, n): """ -- 2.49.0 From 3b599164faa04b204b051f1612f0eb489882b129 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 7 Aug 2024 16:42:58 +0200 Subject: [PATCH 102/159] file naming --- dap/utils/__init__.py | 2 +- dap/utils/{utils.py => bits.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename dap/utils/{utils.py => bits.py} (100%) diff --git a/dap/utils/__init__.py b/dap/utils/__init__.py index 11637e3..4474c56 100644 --- a/dap/utils/__init__.py +++ b/dap/utils/__init__.py @@ -1,5 +1,5 @@ -from .utils import * +from .bits import read_bit from .bufjson import BufferedJSON diff --git a/dap/utils/utils.py b/dap/utils/bits.py similarity index 100% rename from dap/utils/utils.py rename to dap/utils/bits.py -- 2.49.0 From 8531f0a28d329be55ab7dd99a055e3a0dbb85f5f Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 7 Aug 2024 17:02:04 +0200 Subject: [PATCH 103/159] added BufferedJSON.exists() method, use it in get_time/get_data --- dap/utils/bufjson.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/dap/utils/bufjson.py b/dap/utils/bufjson.py index 342e5be..7ac7be6 100644 --- a/dap/utils/bufjson.py +++ b/dap/utils/bufjson.py @@ -25,11 +25,20 @@ class BufferedJSON: def get_time(self): + if not self.exists(): + return -1 return os.path.getmtime(self.fname) def get_data(self, *args, **kwargs): + if not self.exists(): + return {} return json_load(self.fname, *args, **kwargs) + def exists(self): + if not self.fname: + return False + return os.path.exists(self.fname) + def json_load(filename, *args, **kwargs): -- 2.49.0 From a184e49eee303897f4c37ed1c36ba996b52a8485 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 7 Aug 2024 17:05:30 +0200 Subject: [PATCH 104/159] use that BufferedJSON works for non-existing files --- dap/worker.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 514038e..9c982ce 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -1,5 +1,4 @@ import argparse -import os from random import randint import numpy as np @@ -38,11 +37,7 @@ def main(): def work(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port, fn_peakfinder_parameters, skip_frames_rate): - peakfinder_parameters = {} - bj_peakfinder_parameters = None - if fn_peakfinder_parameters is not None and os.path.exists(fn_peakfinder_parameters): - bj_peakfinder_parameters = BufferedJSON(fn_peakfinder_parameters) - + bj_peakfinder_parameters = BufferedJSON(fn_peakfinder_parameters) jfdata = JFData() @@ -59,8 +54,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # check if peakfinder parameters changed and then re-read it try: - if bj_peakfinder_parameters: - peakfinder_parameters = bj_peakfinder_parameters.load() + peakfinder_parameters = bj_peakfinder_parameters.load() # if worker == 0: # print(f"({pulse_id}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) # print(f" --> {peakfinder_parameters}", flush=True) -- 2.49.0 From 0914b8d1690e0c54dd2617f0536ff1feef092607 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 7 Aug 2024 17:27:42 +0200 Subject: [PATCH 105/159] removed commented part and reworded printout; todo notes --- dap/utils/bufjson.py | 1 + dap/worker.py | 8 +------- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/dap/utils/bufjson.py b/dap/utils/bufjson.py index 7ac7be6..7e6fcfe 100644 --- a/dap/utils/bufjson.py +++ b/dap/utils/bufjson.py @@ -17,6 +17,7 @@ class BufferedJSON: if time_delta <= 2: #TODO: is that a good time? return self.last_data + #TODO: logging for change? sleep(0.5) #TODO: why? current_data = self.get_data() self.last_time = current_time diff --git a/dap/worker.py b/dap/worker.py index 9c982ce..2dfbfd7 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -51,16 +51,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host while True: - -# check if peakfinder parameters changed and then re-read it try: peakfinder_parameters = bj_peakfinder_parameters.load() -# if worker == 0: -# print(f"({pulse_id}) update peakfinder parameters {old_peakfinder_parameters}", flush=True) -# print(f" --> {peakfinder_parameters}", flush=True) -# print(flush=True) except Exception as e: - print(f"({pulse_id}) problem ({e}) to read peakfinder parameters file", flush=True) + print(f"({pulse_id}) cannot read peakfinder parameters file: {e}", flush=True) #TODO: logging? if not zmq_socks.has_data(): -- 2.49.0 From 474df5596965d1fe25bdee6730e7b5cf00691959 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 7 Aug 2024 17:30:55 +0200 Subject: [PATCH 106/159] use the current pid, not the one from the iteration before --- dap/worker.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 2dfbfd7..8b920dd 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -44,19 +44,11 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) - pulse_id = 0 - n_aggregated_images = 1 data_summed = None while True: - try: - peakfinder_parameters = bj_peakfinder_parameters.load() - except Exception as e: - print(f"({pulse_id}) cannot read peakfinder parameters file: {e}", flush=True) #TODO: logging? - - if not zmq_socks.has_data(): continue @@ -65,8 +57,15 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if metadata["shape"] == [2, 2]: # this is used as marker for empty images continue + pulse_id = metadata.get("pulse_id", 0) + try: + peakfinder_parameters = bj_peakfinder_parameters.load() + except Exception as e: + print(f"({pulse_id}) cannot read peakfinder parameters file: {e}", flush=True) #TODO: logging? + + results = metadata.copy() results.update(peakfinder_parameters) -- 2.49.0 From f77f109ed28c7009bcde56146b8d303558af7d0f Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 7 Aug 2024 18:27:21 +0200 Subject: [PATCH 107/159] removed the one case of camelCase --- dap/worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 8b920dd..1a402be 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -118,7 +118,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - forceSendVisualisation = False + force_send_visualisation = False if data.dtype != np.uint16: apply_threshold = results.get("apply_threshold", False) apply_aggregation = results.get("apply_aggregation", False) @@ -141,7 +141,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["aggregated_images"] = n_aggregated_images results["worker"] = 1 #TODO: keep this for backwards compatibility? if n_aggregated_images >= results["aggregation_max"]: - forceSendVisualisation = True + force_send_visualisation = True data_summed = None n_aggregated_images = 1 if pixel_mask_pf is not None: @@ -157,7 +157,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host zmq_socks.send_accumulator(results) - send_empty_cond1 = (apply_aggregation and "aggregation_max" in results and not forceSendVisualisation) + send_empty_cond1 = (apply_aggregation and "aggregation_max" in results and not force_send_visualisation) send_empty_cond2 = (not results["is_good_frame"] or not (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1)) if send_empty_cond1 or send_empty_cond2: -- 2.49.0 From 779d1e1d772446bcd965d6b7c3c5b582b236f0ec Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 8 Aug 2024 15:40:32 +0200 Subject: [PATCH 108/159] moved calculation of force_send_visualisation out of work function --- dap/algos/__init__.py | 1 + dap/algos/forcesend.py | 40 ++++++++++++++++++++++++++++++++++++++++ dap/worker.py | 36 ++++-------------------------------- 3 files changed, 45 insertions(+), 32 deletions(-) create mode 100644 dap/algos/forcesend.py diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index 1ca0eec..f30eec4 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -1,6 +1,7 @@ from .addmask import calc_apply_additional_mask from .jfdata import JFData +from .forcesend import calc_force_send from .mask import calc_mask_pixels from .peakfind import calc_peakfinder_analysis from .radprof import calc_radial_integration diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py new file mode 100644 index 0000000..f336523 --- /dev/null +++ b/dap/algos/forcesend.py @@ -0,0 +1,40 @@ +import numpy as np + + +def calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, data_summed): + force_send_visualisation = False + if data.dtype != np.uint16: + apply_threshold = results.get("apply_threshold", False) + apply_aggregation = results.get("apply_aggregation", False) + if not apply_aggregation: + data_summed = None + n_aggregated_images = 1 + if apply_threshold or apply_aggregation: + if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + data[data < threshold_min] = 0.0 + if threshold_max > threshold_min: + data[data > threshold_max] = 0.0 + if apply_aggregation and "aggregation_max" in results: + if data_summed is not None: + data += data_summed + n_aggregated_images += 1 + data_summed = data.copy() + data_summed[data == -np.nan] = -np.nan #TODO: this does nothing + results["aggregated_images"] = n_aggregated_images + results["worker"] = 1 #TODO: keep this for backwards compatibility? + if n_aggregated_images >= results["aggregation_max"]: + force_send_visualisation = True + data_summed = None + n_aggregated_images = 1 + if pixel_mask_pf is not None: + data[~pixel_mask_pf] = np.nan + + else: + data = image + + return data, force_send_visualisation, n_aggregated_images, data_summed + + + diff --git a/dap/worker.py b/dap/worker.py index 1a402be..77cae96 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -3,7 +3,7 @@ from random import randint import numpy as np -from algos import calc_apply_threshold, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis, JFData +from algos import calc_apply_threshold, calc_force_send, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis, JFData from utils import BufferedJSON, read_bit from zmqsocks import ZMQSockets @@ -118,37 +118,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - force_send_visualisation = False - if data.dtype != np.uint16: - apply_threshold = results.get("apply_threshold", False) - apply_aggregation = results.get("apply_aggregation", False) - if not apply_aggregation: - data_summed = None - n_aggregated_images = 1 - if apply_threshold or apply_aggregation: - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = 0.0 - if threshold_max > threshold_min: - data[data > threshold_max] = 0.0 - if apply_aggregation and "aggregation_max" in results: - if data_summed is not None: - data += data_summed - n_aggregated_images += 1 - data_summed = data.copy() - data_summed[data == -np.nan] = -np.nan #TODO: this does nothing - results["aggregated_images"] = n_aggregated_images - results["worker"] = 1 #TODO: keep this for backwards compatibility? - if n_aggregated_images >= results["aggregation_max"]: - force_send_visualisation = True - data_summed = None - n_aggregated_images = 1 - if pixel_mask_pf is not None: - data[~pixel_mask_pf] = np.nan - - else: - data = image + data, force_send_visualisation, n_aggregated_images, data_summed = calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, data_summed) results["type"] = str(data.dtype) results["shape"] = data.shape @@ -157,6 +127,8 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host zmq_socks.send_accumulator(results) + apply_aggregation = results.get("apply_aggregation", False) + send_empty_cond1 = (apply_aggregation and "aggregation_max" in results and not force_send_visualisation) send_empty_cond2 = (not results["is_good_frame"] or not (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1)) -- 2.49.0 From 7987125bf56b0fccd63ab37cfbbd5914da218550 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 8 Aug 2024 16:07:08 +0200 Subject: [PATCH 109/159] some new lines --- dap/algos/forcesend.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index f336523..d98f0e8 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -3,12 +3,15 @@ import numpy as np def calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, data_summed): force_send_visualisation = False + if data.dtype != np.uint16: apply_threshold = results.get("apply_threshold", False) apply_aggregation = results.get("apply_aggregation", False) + if not apply_aggregation: data_summed = None n_aggregated_images = 1 + if apply_threshold or apply_aggregation: if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) @@ -16,6 +19,7 @@ def calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, da data[data < threshold_min] = 0.0 if threshold_max > threshold_min: data[data > threshold_max] = 0.0 + if apply_aggregation and "aggregation_max" in results: if data_summed is not None: data += data_summed @@ -24,10 +28,12 @@ def calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, da data_summed[data == -np.nan] = -np.nan #TODO: this does nothing results["aggregated_images"] = n_aggregated_images results["worker"] = 1 #TODO: keep this for backwards compatibility? + if n_aggregated_images >= results["aggregation_max"]: force_send_visualisation = True data_summed = None n_aggregated_images = 1 + if pixel_mask_pf is not None: data[~pixel_mask_pf] = np.nan -- 2.49.0 From a7327ddbf75c8e8acc2b82717dfb103f80a9940d Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 8 Aug 2024 16:07:54 +0200 Subject: [PATCH 110/159] early exit 1 --- dap/algos/forcesend.py | 60 ++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index d98f0e8..cc60598 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -4,41 +4,43 @@ import numpy as np def calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, data_summed): force_send_visualisation = False - if data.dtype != np.uint16: - apply_threshold = results.get("apply_threshold", False) - apply_aggregation = results.get("apply_aggregation", False) + if data.dtype == np.uint16: + return data, force_send_visualisation, n_aggregated_images, data_summed - if not apply_aggregation: - data_summed = None - n_aggregated_images = 1 + apply_threshold = results.get("apply_threshold", False) + apply_aggregation = results.get("apply_aggregation", False) - if apply_threshold or apply_aggregation: - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = 0.0 - if threshold_max > threshold_min: - data[data > threshold_max] = 0.0 + if not apply_aggregation: + data_summed = None + n_aggregated_images = 1 - if apply_aggregation and "aggregation_max" in results: - if data_summed is not None: - data += data_summed - n_aggregated_images += 1 - data_summed = data.copy() - data_summed[data == -np.nan] = -np.nan #TODO: this does nothing - results["aggregated_images"] = n_aggregated_images - results["worker"] = 1 #TODO: keep this for backwards compatibility? + if apply_threshold or apply_aggregation: + if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + data[data < threshold_min] = 0.0 + if threshold_max > threshold_min: + data[data > threshold_max] = 0.0 - if n_aggregated_images >= results["aggregation_max"]: - force_send_visualisation = True - data_summed = None - n_aggregated_images = 1 + if apply_aggregation and "aggregation_max" in results: + if data_summed is not None: + data += data_summed + n_aggregated_images += 1 + data_summed = data.copy() + data_summed[data == -np.nan] = -np.nan #TODO: this does nothing + results["aggregated_images"] = n_aggregated_images + results["worker"] = 1 #TODO: keep this for backwards compatibility? - if pixel_mask_pf is not None: - data[~pixel_mask_pf] = np.nan + if n_aggregated_images >= results["aggregation_max"]: + force_send_visualisation = True + data_summed = None + n_aggregated_images = 1 - else: - data = image + if pixel_mask_pf is not None: + data[~pixel_mask_pf] = np.nan + + else: + data = image return data, force_send_visualisation, n_aggregated_images, data_summed -- 2.49.0 From 93f0702c4178cb1ebe7580027644b013e43fe011 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 8 Aug 2024 16:09:36 +0200 Subject: [PATCH 111/159] early exit 2 --- dap/algos/forcesend.py | 52 +++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index cc60598..1262608 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -14,33 +14,33 @@ def calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, da data_summed = None n_aggregated_images = 1 - if apply_threshold or apply_aggregation: - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = 0.0 - if threshold_max > threshold_min: - data[data > threshold_max] = 0.0 - - if apply_aggregation and "aggregation_max" in results: - if data_summed is not None: - data += data_summed - n_aggregated_images += 1 - data_summed = data.copy() - data_summed[data == -np.nan] = -np.nan #TODO: this does nothing - results["aggregated_images"] = n_aggregated_images - results["worker"] = 1 #TODO: keep this for backwards compatibility? - - if n_aggregated_images >= results["aggregation_max"]: - force_send_visualisation = True - data_summed = None - n_aggregated_images = 1 - - if pixel_mask_pf is not None: - data[~pixel_mask_pf] = np.nan - - else: + if not apply_threshold and not apply_aggregation: data = image + return data, force_send_visualisation, n_aggregated_images, data_summed + + if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + data[data < threshold_min] = 0.0 + if threshold_max > threshold_min: + data[data > threshold_max] = 0.0 + + if apply_aggregation and "aggregation_max" in results: + if data_summed is not None: + data += data_summed + n_aggregated_images += 1 + data_summed = data.copy() + data_summed[data == -np.nan] = -np.nan #TODO: this does nothing + results["aggregated_images"] = n_aggregated_images + results["worker"] = 1 #TODO: keep this for backwards compatibility? + + if n_aggregated_images >= results["aggregation_max"]: + force_send_visualisation = True + data_summed = None + n_aggregated_images = 1 + + if pixel_mask_pf is not None: + data[~pixel_mask_pf] = np.nan return data, force_send_visualisation, n_aggregated_images, data_summed -- 2.49.0 From bfaa80c9d0fdd98e14d90d20201d2004608e37ea Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 8 Aug 2024 16:30:06 +0200 Subject: [PATCH 112/159] reorder --- dap/algos/forcesend.py | 12 ++++++------ dap/worker.py | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 1262608..309a2f7 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -1,22 +1,22 @@ import numpy as np -def calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, data_summed): +def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregated_images): force_send_visualisation = False if data.dtype == np.uint16: - return data, force_send_visualisation, n_aggregated_images, data_summed + return data, force_send_visualisation, data_summed, n_aggregated_images - apply_threshold = results.get("apply_threshold", False) apply_aggregation = results.get("apply_aggregation", False) + apply_threshold = results.get("apply_threshold", False) if not apply_aggregation: data_summed = None n_aggregated_images = 1 - if not apply_threshold and not apply_aggregation: + if not apply_aggregation and not apply_threshold: data = image - return data, force_send_visualisation, n_aggregated_images, data_summed + return data, force_send_visualisation, data_summed, n_aggregated_images if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) @@ -42,7 +42,7 @@ def calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, da if pixel_mask_pf is not None: data[~pixel_mask_pf] = np.nan - return data, force_send_visualisation, n_aggregated_images, data_summed + return data, force_send_visualisation, data_summed, n_aggregated_images diff --git a/dap/worker.py b/dap/worker.py index 77cae96..25655bd 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -44,8 +44,8 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) - n_aggregated_images = 1 data_summed = None + n_aggregated_images = 1 while True: @@ -118,7 +118,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - data, force_send_visualisation, n_aggregated_images, data_summed = calc_force_send(results, data, pixel_mask_pf, image, n_aggregated_images, data_summed) + data, force_send_visualisation, data_summed, n_aggregated_images = calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregated_images) results["type"] = str(data.dtype) results["shape"] = data.shape -- 2.49.0 From 2a12864feb7458a236fece3ffda2dcf0f44c9b4b Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 8 Aug 2024 16:38:52 +0200 Subject: [PATCH 113/159] removed line that does nothing (nan has no sign, nan always compares to False -> this never overwrites any element) --- dap/algos/forcesend.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 309a2f7..ace85ad 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -30,7 +30,6 @@ def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregat data += data_summed n_aggregated_images += 1 data_summed = data.copy() - data_summed[data == -np.nan] = -np.nan #TODO: this does nothing results["aggregated_images"] = n_aggregated_images results["worker"] = 1 #TODO: keep this for backwards compatibility? -- 2.49.0 From 74e73c1247b3a0e0c746a26ef827dbb543251be7 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 11:33:55 +0200 Subject: [PATCH 114/159] split into functions --- dap/algos/forcesend.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index ace85ad..3ee36ff 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -18,6 +18,18 @@ def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregat data = image return data, force_send_visualisation, data_summed, n_aggregated_images + calc_apply_threshold(results, data) # changes data in place + + data, force_send_visualisation, data_summed, n_aggregated_images = calc_apply_aggregation(results, data, data_summed, n_aggregated_images) + + calc_apply_pixel_mask(data, pixel_mask_pf) # changes data in place + + return data, force_send_visualisation, data_summed, n_aggregated_images + + + +def calc_apply_threshold(results, data): + apply_threshold = results.get("apply_threshold", False) if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) @@ -25,6 +37,11 @@ def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregat if threshold_max > threshold_min: data[data > threshold_max] = 0.0 + +def calc_apply_aggregation(results, data, data_summed, n_aggregated_images): + force_send_visualisation = False + + apply_aggregation = results.get("apply_aggregation", False) if apply_aggregation and "aggregation_max" in results: if data_summed is not None: data += data_summed @@ -38,10 +55,12 @@ def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregat data_summed = None n_aggregated_images = 1 - if pixel_mask_pf is not None: - data[~pixel_mask_pf] = np.nan - return data, force_send_visualisation, data_summed, n_aggregated_images +def calc_apply_pixel_mask(data, pixel_mask_pf): + if pixel_mask_pf is not None: + data[~pixel_mask_pf] = np.nan + + -- 2.49.0 From 03b556d61f412f223b49555c14a962fb202a1298 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 12:46:00 +0200 Subject: [PATCH 115/159] flatten the code; early exit --- dap/algos/forcesend.py | 46 ++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 3ee36ff..183c017 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -30,30 +30,42 @@ def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregat def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = 0.0 - if threshold_max > threshold_min: - data[data > threshold_max] = 0.0 + if not apply_threshold: + return + + for k in ("threshold_min", "threshold_max"): + if k not in results: + return + + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + data[data < threshold_min] = 0.0 + if threshold_max > threshold_min: + data[data > threshold_max] = 0.0 def calc_apply_aggregation(results, data, data_summed, n_aggregated_images): force_send_visualisation = False apply_aggregation = results.get("apply_aggregation", False) - if apply_aggregation and "aggregation_max" in results: - if data_summed is not None: - data += data_summed - n_aggregated_images += 1 - data_summed = data.copy() - results["aggregated_images"] = n_aggregated_images - results["worker"] = 1 #TODO: keep this for backwards compatibility? + if not apply_aggregation: + return data, force_send_visualisation, data_summed, n_aggregated_images - if n_aggregated_images >= results["aggregation_max"]: - force_send_visualisation = True - data_summed = None - n_aggregated_images = 1 + if "aggregation_max" not in results: + return data, force_send_visualisation, data_summed, n_aggregated_images + + if data_summed is not None: + data += data_summed + n_aggregated_images += 1 + data_summed = data.copy() + + results["aggregated_images"] = n_aggregated_images + results["worker"] = 1 #TODO: keep this for backwards compatibility? + + if n_aggregated_images >= results["aggregation_max"]: + force_send_visualisation = True + data_summed = None + n_aggregated_images = 1 return data, force_send_visualisation, data_summed, n_aggregated_images -- 2.49.0 From a7429115d75ca5568d2e80a0f5af6e93a468565e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 12:56:15 +0200 Subject: [PATCH 116/159] the usual todo note --- dap/algos/forcesend.py | 2 ++ dap/algos/radprof.py | 2 +- dap/algos/thresh.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 183c017..384cf2e 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -37,9 +37,11 @@ def calc_apply_threshold(results, data): if k not in results: return + #TODO: this is duplicated in calc_apply_threshold and calc_radial_integration threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) data[data < threshold_min] = 0.0 + #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed if threshold_max > threshold_min: data[data > threshold_max] = 0.0 diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 1b90973..b48456e 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -18,7 +18,7 @@ def calc_radial_integration(results, data, pixel_mask_pf): apply_threshold = results.get("apply_threshold", False) - #TODO: this is duplicated in calc_apply_threshold + #TODO: this is duplicated in calc_apply_threshold and calc_force_send if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py index 843be71..6efb896 100644 --- a/dap/algos/thresh.py +++ b/dap/algos/thresh.py @@ -13,7 +13,7 @@ def calc_apply_threshold(results, data): threshold_value_choice = results.get("threshold_value", "NaN") threshold_value = 0 if threshold_value_choice == "0" else np.nan #TODO - #TODO: this is duplicated in calc_radial_integration + #TODO: this is duplicated in calc_radial_integration and calc_force_send threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) data[data < threshold_min] = threshold_value -- 2.49.0 From f12a736355105a76537a6ebb4dd98289cfd2e603 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 14:54:30 +0200 Subject: [PATCH 117/159] simpler logic --- dap/algos/forcesend.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 384cf2e..96aac64 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -56,10 +56,14 @@ def calc_apply_aggregation(results, data, data_summed, n_aggregated_images): if "aggregation_max" not in results: return data, force_send_visualisation, data_summed, n_aggregated_images - if data_summed is not None: - data += data_summed + if data_summed is None: + data_summed = data.copy() + n_aggregated_images = 1 + else: + data_summed += data n_aggregated_images += 1 - data_summed = data.copy() + + data = data_summed results["aggregated_images"] = n_aggregated_images results["worker"] = 1 #TODO: keep this for backwards compatibility? -- 2.49.0 From 48c6bbe7c70b8e9e7108d2c4a7f60d7195a5fe74 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 15:04:22 +0200 Subject: [PATCH 118/159] zero is zero --- dap/algos/forcesend.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 96aac64..84dd6da 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -40,10 +40,10 @@ def calc_apply_threshold(results, data): #TODO: this is duplicated in calc_apply_threshold and calc_radial_integration threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = 0.0 + data[data < threshold_min] = 0 #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed if threshold_max > threshold_min: - data[data > threshold_max] = 0.0 + data[data > threshold_max] = 0 def calc_apply_aggregation(results, data, data_summed, n_aggregated_images): -- 2.49.0 From 2103583aca1cb7deda00fe4bf3a811dd38b19caf Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 15:41:46 +0200 Subject: [PATCH 119/159] moved calc_apply_threshold out of calc_radial_integration --- dap/algos/radprof.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index b48456e..a083cd4 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -16,17 +16,7 @@ def calc_radial_integration(results, data, pixel_mask_pf): r_min = min(rad) r_max = max(rad) + 1 - apply_threshold = results.get("apply_threshold", False) - - #TODO: this is duplicated in calc_apply_threshold and calc_force_send - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - data = data.copy() # do the following in-place changes on a copy - data[data < threshold_min] = np.nan - #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed - if threshold_max > threshold_min: - data[data > threshold_max] = np.nan + data = calc_apply_threshold(results, data) rp = radial_profile(data, rad, norm, pixel_mask_pf) @@ -72,3 +62,20 @@ def radial_profile(data, rad, norm, keep_pixels): +def calc_apply_threshold(results, data): + apply_threshold = results.get("apply_threshold", False) + + #TODO: this is duplicated in calc_apply_threshold and calc_force_send + if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + data = data.copy() # do the following in-place changes on a copy + data[data < threshold_min] = np.nan + #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed + if threshold_max > threshold_min: + data[data > threshold_max] = np.nan + + return data + + + -- 2.49.0 From 5f055c8718b3adb116ab9c5c67094255d16b5ae0 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 15:54:54 +0200 Subject: [PATCH 120/159] flatten the code; early exit --- dap/algos/radprof.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index a083cd4..e1d877d 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -64,16 +64,21 @@ def radial_profile(data, rad, norm, keep_pixels): def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) + if not apply_threshold: + return + + for k in ("threshold_min", "threshold_max"): + if k not in results: + return #TODO: this is duplicated in calc_apply_threshold and calc_force_send - if apply_threshold and all(k in results for k in ("threshold_min", "threshold_max")): - threshold_min = float(results["threshold_min"]) - threshold_max = float(results["threshold_max"]) - data = data.copy() # do the following in-place changes on a copy - data[data < threshold_min] = np.nan - #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed - if threshold_max > threshold_min: - data[data > threshold_max] = np.nan + threshold_min = float(results["threshold_min"]) + threshold_max = float(results["threshold_max"]) + data = data.copy() # do the following in-place changes on a copy + data[data < threshold_min] = np.nan + #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed + if threshold_max > threshold_min: + data[data > threshold_max] = np.nan return data -- 2.49.0 From 442a7dab2815179c01c442b54cf18bfc89cbbe9e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 15:57:17 +0200 Subject: [PATCH 121/159] moved comments out --- dap/algos/forcesend.py | 2 +- dap/algos/radprof.py | 5 +++-- dap/algos/thresh.py | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 84dd6da..c9b1e08 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -28,6 +28,7 @@ def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregat +#TODO: this is duplicated in calc_apply_threshold and calc_radial_integration def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) if not apply_threshold: @@ -37,7 +38,6 @@ def calc_apply_threshold(results, data): if k not in results: return - #TODO: this is duplicated in calc_apply_threshold and calc_radial_integration threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) data[data < threshold_min] = 0 diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index e1d877d..00fc3c8 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -62,6 +62,7 @@ def radial_profile(data, rad, norm, keep_pixels): +#TODO: this is duplicated in calc_apply_threshold and calc_force_send def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) if not apply_threshold: @@ -71,10 +72,10 @@ def calc_apply_threshold(results, data): if k not in results: return - #TODO: this is duplicated in calc_apply_threshold and calc_force_send + data = data.copy() # do the following in-place changes on a copy + threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - data = data.copy() # do the following in-place changes on a copy data[data < threshold_min] = np.nan #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed if threshold_max > threshold_min: diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py index 6efb896..075e8e7 100644 --- a/dap/algos/thresh.py +++ b/dap/algos/thresh.py @@ -1,6 +1,7 @@ import numpy as np +#TODO: this is duplicated in calc_radial_integration and calc_force_send def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) if not apply_threshold: @@ -13,7 +14,6 @@ def calc_apply_threshold(results, data): threshold_value_choice = results.get("threshold_value", "NaN") threshold_value = 0 if threshold_value_choice == "0" else np.nan #TODO - #TODO: this is duplicated in calc_radial_integration and calc_force_send threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) data[data < threshold_min] = threshold_value -- 2.49.0 From 34eb466622cafab0dc6690dfe2fcfb7130346288 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 17:10:00 +0200 Subject: [PATCH 122/159] fixed inconsistent returns --- dap/algos/radprof.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 00fc3c8..8dbc92e 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -66,11 +66,11 @@ def radial_profile(data, rad, norm, keep_pixels): def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) if not apply_threshold: - return + return data for k in ("threshold_min", "threshold_max"): if k not in results: - return + return data data = data.copy() # do the following in-place changes on a copy -- 2.49.0 From dcdacf4bdbd1f502b63050426dea370c259f7a1c Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 19:18:06 +0200 Subject: [PATCH 123/159] re-use function --- dap/algos/forcesend.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index c9b1e08..63ea339 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -1,5 +1,7 @@ import numpy as np +from .mask import calc_mask_pixels + def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregated_images): force_send_visualisation = False @@ -22,7 +24,7 @@ def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregat data, force_send_visualisation, data_summed, n_aggregated_images = calc_apply_aggregation(results, data, data_summed, n_aggregated_images) - calc_apply_pixel_mask(data, pixel_mask_pf) # changes data in place + calc_mask_pixels(data, pixel_mask_pf) # changes data in place return data, force_send_visualisation, data_summed, n_aggregated_images @@ -76,9 +78,4 @@ def calc_apply_aggregation(results, data, data_summed, n_aggregated_images): return data, force_send_visualisation, data_summed, n_aggregated_images -def calc_apply_pixel_mask(data, pixel_mask_pf): - if pixel_mask_pf is not None: - data[~pixel_mask_pf] = np.nan - - -- 2.49.0 From 2acf11670945d6d24b5e4335a8cb80004893d6ee Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 9 Aug 2024 19:45:32 +0200 Subject: [PATCH 124/159] re-use thresh function --- dap/algos/forcesend.py | 8 ++++---- dap/algos/radprof.py | 7 +++---- dap/algos/thresh.py | 15 ++++++++++++--- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 63ea339..f1f205d 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -1,6 +1,7 @@ import numpy as np from .mask import calc_mask_pixels +from .thresh import threshold def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregated_images): @@ -42,10 +43,9 @@ def calc_apply_threshold(results, data): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = 0 - #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed - if threshold_max > threshold_min: - data[data > threshold_max] = 0 + + threshold(data, threshold_min, threshold_max, 0) + def calc_apply_aggregation(results, data, data_summed, n_aggregated_images): diff --git a/dap/algos/radprof.py b/dap/algos/radprof.py index 8dbc92e..42bc268 100644 --- a/dap/algos/radprof.py +++ b/dap/algos/radprof.py @@ -1,5 +1,6 @@ import numpy as np +from .thresh import threshold from .utils import npmemo @@ -76,10 +77,8 @@ def calc_apply_threshold(results, data): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = np.nan - #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed - if threshold_max > threshold_min: - data[data > threshold_max] = np.nan + + threshold(data, threshold_min, threshold_max, np.nan) return data diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py index 075e8e7..4cd2ac4 100644 --- a/dap/algos/thresh.py +++ b/dap/algos/thresh.py @@ -16,10 +16,19 @@ def calc_apply_threshold(results, data): threshold_min = float(results["threshold_min"]) threshold_max = float(results["threshold_max"]) - data[data < threshold_min] = threshold_value + + threshold(data, threshold_min, threshold_max, threshold_value) + + + +def threshold(data, vmin, vmax, replacement): + """ + threshold data in place by replacing values < vmin and values > vmax with replacement + """ + data[data < vmin] = replacement #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed - if threshold_max > threshold_min: - data[data > threshold_max] = threshold_value + if vmax > vmin: + data[data > vmin] = replacement -- 2.49.0 From a8c73bab0fb729a4b75b91da6cff3817d52a4ed3 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 12 Aug 2024 10:04:10 +0200 Subject: [PATCH 125/159] count correctly: 1, 1, 2, 3, ... -> 0, 1, 2, 3, ... --- dap/algos/forcesend.py | 4 ++-- dap/worker.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index f1f205d..73d19c2 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -15,7 +15,7 @@ def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregat if not apply_aggregation: data_summed = None - n_aggregated_images = 1 + n_aggregated_images = 0 if not apply_aggregation and not apply_threshold: data = image @@ -73,7 +73,7 @@ def calc_apply_aggregation(results, data, data_summed, n_aggregated_images): if n_aggregated_images >= results["aggregation_max"]: force_send_visualisation = True data_summed = None - n_aggregated_images = 1 + n_aggregated_images = 0 return data, force_send_visualisation, data_summed, n_aggregated_images diff --git a/dap/worker.py b/dap/worker.py index 25655bd..3919c33 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -45,7 +45,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host data_summed = None - n_aggregated_images = 1 + n_aggregated_images = 0 while True: -- 2.49.0 From d8b4931a288c2a9aad565eedbb8500fa2b3f9009 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 13 Aug 2024 09:52:24 +0200 Subject: [PATCH 126/159] added/use Aggregator to encapsulate data_summed and n_aggregated_images --- dap/algos/forcesend.py | 34 ++++++++++++++-------------------- dap/utils/__init__.py | 1 + dap/utils/aggregator.py | 26 ++++++++++++++++++++++++++ dap/worker.py | 8 +++----- 4 files changed, 44 insertions(+), 25 deletions(-) create mode 100644 dap/utils/aggregator.py diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 73d19c2..1c243b4 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -4,30 +4,29 @@ from .mask import calc_mask_pixels from .thresh import threshold -def calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregated_images): +def calc_force_send(results, data, pixel_mask_pf, image, aggregator): force_send_visualisation = False if data.dtype == np.uint16: - return data, force_send_visualisation, data_summed, n_aggregated_images + return data, force_send_visualisation, aggregator apply_aggregation = results.get("apply_aggregation", False) apply_threshold = results.get("apply_threshold", False) if not apply_aggregation: - data_summed = None - n_aggregated_images = 0 + aggregator.reset() if not apply_aggregation and not apply_threshold: data = image - return data, force_send_visualisation, data_summed, n_aggregated_images + return data, force_send_visualisation, aggregator calc_apply_threshold(results, data) # changes data in place - data, force_send_visualisation, data_summed, n_aggregated_images = calc_apply_aggregation(results, data, data_summed, n_aggregated_images) + data, force_send_visualisation, aggregator = calc_apply_aggregation(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place - return data, force_send_visualisation, data_summed, n_aggregated_images + return data, force_send_visualisation, aggregator @@ -48,34 +47,29 @@ def calc_apply_threshold(results, data): -def calc_apply_aggregation(results, data, data_summed, n_aggregated_images): +def calc_apply_aggregation(results, data, aggregator): force_send_visualisation = False apply_aggregation = results.get("apply_aggregation", False) if not apply_aggregation: - return data, force_send_visualisation, data_summed, n_aggregated_images + return data, force_send_visualisation, aggregator if "aggregation_max" not in results: - return data, force_send_visualisation, data_summed, n_aggregated_images + return data, force_send_visualisation, aggregator - if data_summed is None: - data_summed = data.copy() - n_aggregated_images = 1 - else: - data_summed += data - n_aggregated_images += 1 + aggregator += data - data = data_summed + data = aggregator.data + n_aggregated_images = aggregator.counter results["aggregated_images"] = n_aggregated_images results["worker"] = 1 #TODO: keep this for backwards compatibility? if n_aggregated_images >= results["aggregation_max"]: force_send_visualisation = True - data_summed = None - n_aggregated_images = 0 + aggregator.reset() - return data, force_send_visualisation, data_summed, n_aggregated_images + return data, force_send_visualisation, aggregator diff --git a/dap/utils/__init__.py b/dap/utils/__init__.py index 4474c56..45f8cbe 100644 --- a/dap/utils/__init__.py +++ b/dap/utils/__init__.py @@ -1,4 +1,5 @@ +from .aggregator import Aggregator from .bits import read_bit from .bufjson import BufferedJSON diff --git a/dap/utils/aggregator.py b/dap/utils/aggregator.py new file mode 100644 index 0000000..bd6d315 --- /dev/null +++ b/dap/utils/aggregator.py @@ -0,0 +1,26 @@ + +class Aggregator: + + def __init__(self): + self.reset() + + def reset(self): + self.data = None + self.counter = 0 + + def add(self, item): + if self.data is None: + self.data = item.copy() + self.counter = 1 + else: + self.data += item + self.counter += 1 + return self + + __iadd__ = add + + def __repr__(self): + return f"{self.data!r} / {self.counter}" + + + diff --git a/dap/worker.py b/dap/worker.py index 3919c33..9f8c9f1 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -4,7 +4,7 @@ from random import randint import numpy as np from algos import calc_apply_threshold, calc_force_send, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis, JFData -from utils import BufferedJSON, read_bit +from utils import Aggregator, BufferedJSON, read_bit from zmqsocks import ZMQSockets @@ -43,9 +43,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host zmq_socks = ZMQSockets(backend_address, accumulator_host, accumulator_port, visualisation_host, visualisation_port) - - data_summed = None - n_aggregated_images = 0 + aggregator = Aggregator() while True: @@ -118,7 +116,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - data, force_send_visualisation, data_summed, n_aggregated_images = calc_force_send(results, data, pixel_mask_pf, image, data_summed, n_aggregated_images) + data, force_send_visualisation, aggregator = calc_force_send(results, data, pixel_mask_pf, image, aggregator) results["type"] = str(data.dtype) results["shape"] = data.shape -- 2.49.0 From e0e9cf02d74f567bf487192970959ca729159fc0 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 13 Aug 2024 09:52:38 +0200 Subject: [PATCH 127/159] added ignore list --- dap/.gitignore | 162 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) create mode 100644 dap/.gitignore diff --git a/dap/.gitignore b/dap/.gitignore new file mode 100644 index 0000000..82f9275 --- /dev/null +++ b/dap/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ -- 2.49.0 From fd2345c6112dc7e18d9d6a4dc3111dd93be29a94 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 13 Aug 2024 13:48:45 +0200 Subject: [PATCH 128/159] aggregator changes are in-place --- dap/algos/forcesend.py | 14 +++++++------- dap/worker.py | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index 1c243b4..e0a1866 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -8,7 +8,7 @@ def calc_force_send(results, data, pixel_mask_pf, image, aggregator): force_send_visualisation = False if data.dtype == np.uint16: - return data, force_send_visualisation, aggregator + return data, force_send_visualisation apply_aggregation = results.get("apply_aggregation", False) apply_threshold = results.get("apply_threshold", False) @@ -18,15 +18,15 @@ def calc_force_send(results, data, pixel_mask_pf, image, aggregator): if not apply_aggregation and not apply_threshold: data = image - return data, force_send_visualisation, aggregator + return data, force_send_visualisation calc_apply_threshold(results, data) # changes data in place - data, force_send_visualisation, aggregator = calc_apply_aggregation(results, data, aggregator) + data, force_send_visualisation = calc_apply_aggregation(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place - return data, force_send_visualisation, aggregator + return data, force_send_visualisation @@ -52,10 +52,10 @@ def calc_apply_aggregation(results, data, aggregator): apply_aggregation = results.get("apply_aggregation", False) if not apply_aggregation: - return data, force_send_visualisation, aggregator + return data, force_send_visualisation if "aggregation_max" not in results: - return data, force_send_visualisation, aggregator + return data, force_send_visualisation aggregator += data @@ -69,7 +69,7 @@ def calc_apply_aggregation(results, data, aggregator): force_send_visualisation = True aggregator.reset() - return data, force_send_visualisation, aggregator + return data, force_send_visualisation diff --git a/dap/worker.py b/dap/worker.py index 9f8c9f1..69ed855 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -116,7 +116,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - data, force_send_visualisation, aggregator = calc_force_send(results, data, pixel_mask_pf, image, aggregator) + data, force_send_visualisation = calc_force_send(results, data, pixel_mask_pf, image, aggregator) results["type"] = str(data.dtype) results["shape"] = data.shape -- 2.49.0 From c8d94732716e40034b468036586237c5194902f8 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 15 Aug 2024 10:00:02 +0200 Subject: [PATCH 129/159] breaking this apart a bit more --- dap/worker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 69ed855..ae811f4 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -128,9 +128,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host apply_aggregation = results.get("apply_aggregation", False) send_empty_cond1 = (apply_aggregation and "aggregation_max" in results and not force_send_visualisation) - send_empty_cond2 = (not results["is_good_frame"] or not (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1)) + send_empty_cond2 = (not results["is_good_frame"]) + send_empty_cond3 = (not (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1)) - if send_empty_cond1 or send_empty_cond2: + if send_empty_cond1 or send_empty_cond2 or send_empty_cond3: data = np.empty((2, 2), dtype=np.uint16) results["type"] = str(data.dtype) results["shape"] = data.shape -- 2.49.0 From faa774e4dc2fee97bc472975d50f9fcd8f694761 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 15 Aug 2024 10:07:50 +0200 Subject: [PATCH 130/159] de morgan --- dap/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index ae811f4..dd2ad74 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -129,7 +129,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host send_empty_cond1 = (apply_aggregation and "aggregation_max" in results and not force_send_visualisation) send_empty_cond2 = (not results["is_good_frame"]) - send_empty_cond3 = (not (results["is_hit_frame"] or randint(1, skip_frames_rate) == 1)) + send_empty_cond3 = (not results["is_hit_frame"] and randint(1, skip_frames_rate) != 1) if send_empty_cond1 or send_empty_cond2 or send_empty_cond3: data = np.empty((2, 2), dtype=np.uint16) -- 2.49.0 From 17e2aa10969ddb3df64f2f1f2bb6cf79c57c5b00 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 19 Aug 2024 12:24:21 +0200 Subject: [PATCH 131/159] tried to make logic self-explanatory --- dap/worker.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index dd2ad74..371ceb4 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -116,7 +116,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - data, force_send_visualisation = calc_force_send(results, data, pixel_mask_pf, image, aggregator) + data, aggregation_is_ready = calc_force_send(results, data, pixel_mask_pf, image, aggregator) results["type"] = str(data.dtype) results["shape"] = data.shape @@ -126,12 +126,17 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host apply_aggregation = results.get("apply_aggregation", False) + aggregation_is_enabled = (apply_aggregation and "aggregation_max" in results) + aggregation_is_enabled_but_not_ready = (aggregation_is_enabled and not aggregation_is_ready) - send_empty_cond1 = (apply_aggregation and "aggregation_max" in results and not force_send_visualisation) - send_empty_cond2 = (not results["is_good_frame"]) - send_empty_cond3 = (not results["is_hit_frame"] and randint(1, skip_frames_rate) != 1) + is_bad_frame = (not results["is_good_frame"]) - if send_empty_cond1 or send_empty_cond2 or send_empty_cond3: + # hits are sent at full rate, but no-hits are sent at reduced frequency + is_no_hit_frame = (not results["is_hit_frame"]) + random_skip = (randint(1, skip_frames_rate) != 1) + is_no_hit_frame_and_skipped = (is_no_hit_frame and random_skip) + + if aggregation_is_enabled_but_not_ready or is_bad_frame or is_no_hit_frame_and_skipped: data = np.empty((2, 2), dtype=np.uint16) results["type"] = str(data.dtype) results["shape"] = data.shape -- 2.49.0 From cea2b0c741e8c97745268f602ec20ffa375cdd3e Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 19 Aug 2024 12:41:49 +0200 Subject: [PATCH 132/159] moved random skip logic into separate file, added some comments on how it works --- dap/utils/__init__.py | 1 + dap/utils/randskip.py | 20 ++++++++++++++++++++ dap/worker.py | 5 ++--- 3 files changed, 23 insertions(+), 3 deletions(-) create mode 100644 dap/utils/randskip.py diff --git a/dap/utils/__init__.py b/dap/utils/__init__.py index 45f8cbe..c195111 100644 --- a/dap/utils/__init__.py +++ b/dap/utils/__init__.py @@ -2,5 +2,6 @@ from .aggregator import Aggregator from .bits import read_bit from .bufjson import BufferedJSON +from .randskip import randskip diff --git a/dap/utils/randskip.py b/dap/utils/randskip.py new file mode 100644 index 0000000..fcc8758 --- /dev/null +++ b/dap/utils/randskip.py @@ -0,0 +1,20 @@ +from random import randint + + +def randskip(skip_rate): + return (randint(1, skip_rate) != 1) + + +# from randint docs: +# randint(a, b) +# Return random integer in range [a, b], including both end points. + +# thus: +# randskip(1) -> False 100% of times (never skip) +# randskip(10) -> False 10% of times (skip 90%) +# randskip(100) -> False 1% of times (skip 99%) + +#TODO: does this actually make sense? + + + diff --git a/dap/worker.py b/dap/worker.py index 371ceb4..a6c0f06 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -1,10 +1,9 @@ import argparse -from random import randint import numpy as np from algos import calc_apply_threshold, calc_force_send, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis, JFData -from utils import Aggregator, BufferedJSON, read_bit +from utils import Aggregator, BufferedJSON, randskip, read_bit from zmqsocks import ZMQSockets @@ -133,7 +132,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host # hits are sent at full rate, but no-hits are sent at reduced frequency is_no_hit_frame = (not results["is_hit_frame"]) - random_skip = (randint(1, skip_frames_rate) != 1) + random_skip = randskip(skip_frames_rate) is_no_hit_frame_and_skipped = (is_no_hit_frame and random_skip) if aggregation_is_enabled_but_not_ready or is_bad_frame or is_no_hit_frame_and_skipped: -- 2.49.0 From 52e7260fb13636b0c801a680df5f7a61fc4bb479 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 19 Aug 2024 12:59:07 +0200 Subject: [PATCH 133/159] added (commented) an alternative idea --- dap/utils/randskip.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/dap/utils/randskip.py b/dap/utils/randskip.py index fcc8758..1c4f924 100644 --- a/dap/utils/randskip.py +++ b/dap/utils/randskip.py @@ -15,6 +15,26 @@ def randskip(skip_rate): # randskip(100) -> False 1% of times (skip 99%) #TODO: does this actually make sense? +# the following seems much clearer: + + +#from random import random + +#def randskip(percentage): +# """ +# Return True percentage % of times +# Return False (100 - percentage) % of times +# """ +# percentage /= 100 +# return random() < percentage + + +## thus: +# randskip(0) -> False 100% of times (never skip) +# randskip(1) -> False 99% of times (skip 1%) +# randskip(10) -> False 10% of times (skip 90%) +# randskip(99) -> False 1% of times (skip 99%) +# randskip(100) -> False 0% of times (always skip) -- 2.49.0 From d3ac74835e6499592cb22466ed6840878ef80cb6 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Mon, 19 Aug 2024 13:02:36 +0200 Subject: [PATCH 134/159] simplified --- dap/utils/randskip.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dap/utils/randskip.py b/dap/utils/randskip.py index 1c4f924..3870b14 100644 --- a/dap/utils/randskip.py +++ b/dap/utils/randskip.py @@ -25,8 +25,7 @@ def randskip(skip_rate): # Return True percentage % of times # Return False (100 - percentage) % of times # """ -# percentage /= 100 -# return random() < percentage +# return 100 * random() < percentage ## thus: -- 2.49.0 From 547b2b7fb1cab4c57cc67d690d4d4b84db1cc2c8 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 20 Aug 2024 22:11:16 +0200 Subject: [PATCH 135/159] disentangle --- dap/algos/forcesend.py | 47 ++++++++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index e0a1866..dc41ee1 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -5,10 +5,15 @@ from .thresh import threshold def calc_force_send(results, data, pixel_mask_pf, image, aggregator): - force_send_visualisation = False + data = calc_data(results, data, pixel_mask_pf, image, aggregator) + force_send_visualisation = calc_aggregation_ready(results, data, aggregator) + return data, force_send_visualisation + + +def calc_data(results, data, pixel_mask_pf, image, aggregator): if data.dtype == np.uint16: - return data, force_send_visualisation + return data apply_aggregation = results.get("apply_aggregation", False) apply_threshold = results.get("apply_threshold", False) @@ -18,15 +23,15 @@ def calc_force_send(results, data, pixel_mask_pf, image, aggregator): if not apply_aggregation and not apply_threshold: data = image - return data, force_send_visualisation + return data calc_apply_threshold(results, data) # changes data in place - data, force_send_visualisation = calc_apply_aggregation(results, data, aggregator) + data = calc_apply_aggregation(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place - return data, force_send_visualisation + return data @@ -48,14 +53,12 @@ def calc_apply_threshold(results, data): def calc_apply_aggregation(results, data, aggregator): - force_send_visualisation = False - apply_aggregation = results.get("apply_aggregation", False) if not apply_aggregation: - return data, force_send_visualisation + return data if "aggregation_max" not in results: - return data, force_send_visualisation + return data aggregator += data @@ -65,11 +68,33 @@ def calc_apply_aggregation(results, data, aggregator): results["aggregated_images"] = n_aggregated_images results["worker"] = 1 #TODO: keep this for backwards compatibility? + return data + + + +def calc_aggregation_ready(results, data, aggregator): + if data.dtype == np.uint16: + return False + + apply_aggregation = results.get("apply_aggregation", False) + apply_threshold = results.get("apply_threshold", False) + + if not apply_aggregation and not apply_threshold: + return False + + if not apply_aggregation: + return False + + if "aggregation_max" not in results: + return False + + n_aggregated_images = aggregator.counter + if n_aggregated_images >= results["aggregation_max"]: - force_send_visualisation = True aggregator.reset() + return True - return data, force_send_visualisation + return False -- 2.49.0 From 4b01cb0dca5323638db28b5895281a7de8b6dbbb Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 20 Aug 2024 22:17:56 +0200 Subject: [PATCH 136/159] naming; switch order --- dap/algos/forcesend.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dap/algos/forcesend.py b/dap/algos/forcesend.py index dc41ee1..2f76b12 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/forcesend.py @@ -6,8 +6,8 @@ from .thresh import threshold def calc_force_send(results, data, pixel_mask_pf, image, aggregator): data = calc_data(results, data, pixel_mask_pf, image, aggregator) - force_send_visualisation = calc_aggregation_ready(results, data, aggregator) - return data, force_send_visualisation + aggregation_ready = calc_aggregation_ready(results, data, aggregator) + return data, aggregation_ready @@ -90,11 +90,11 @@ def calc_aggregation_ready(results, data, aggregator): n_aggregated_images = aggregator.counter - if n_aggregated_images >= results["aggregation_max"]: - aggregator.reset() - return True + if n_aggregated_images < results["aggregation_max"]: + return False - return False + aggregator.reset() + return True -- 2.49.0 From 18d6dd0b7b25ce55bdb6cd8d1457fdd7029ab8cd Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Tue, 20 Aug 2024 22:23:36 +0200 Subject: [PATCH 137/159] consistent naming with the other algos --- dap/algos/__init__.py | 2 +- dap/algos/{forcesend.py => aggregation.py} | 6 +++--- dap/worker.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) rename dap/algos/{forcesend.py => aggregation.py} (92%) diff --git a/dap/algos/__init__.py b/dap/algos/__init__.py index f30eec4..2b5f92e 100644 --- a/dap/algos/__init__.py +++ b/dap/algos/__init__.py @@ -1,7 +1,7 @@ from .addmask import calc_apply_additional_mask +from .aggregation import calc_apply_aggregation from .jfdata import JFData -from .forcesend import calc_force_send from .mask import calc_mask_pixels from .peakfind import calc_peakfinder_analysis from .radprof import calc_radial_integration diff --git a/dap/algos/forcesend.py b/dap/algos/aggregation.py similarity index 92% rename from dap/algos/forcesend.py rename to dap/algos/aggregation.py index 2f76b12..eeaf929 100644 --- a/dap/algos/forcesend.py +++ b/dap/algos/aggregation.py @@ -4,7 +4,7 @@ from .mask import calc_mask_pixels from .thresh import threshold -def calc_force_send(results, data, pixel_mask_pf, image, aggregator): +def calc_apply_aggregation(results, data, pixel_mask_pf, image, aggregator): data = calc_data(results, data, pixel_mask_pf, image, aggregator) aggregation_ready = calc_aggregation_ready(results, data, aggregator) return data, aggregation_ready @@ -27,7 +27,7 @@ def calc_data(results, data, pixel_mask_pf, image, aggregator): calc_apply_threshold(results, data) # changes data in place - data = calc_apply_aggregation(results, data, aggregator) + data = calc_aggregate(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place @@ -52,7 +52,7 @@ def calc_apply_threshold(results, data): -def calc_apply_aggregation(results, data, aggregator): +def calc_aggregate(results, data, aggregator): apply_aggregation = results.get("apply_aggregation", False) if not apply_aggregation: return data diff --git a/dap/worker.py b/dap/worker.py index a6c0f06..f11fd0d 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -2,7 +2,7 @@ import argparse import numpy as np -from algos import calc_apply_threshold, calc_force_send, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis, JFData +from algos import calc_apply_aggregation, calc_apply_threshold, calc_mask_pixels, calc_peakfinder_analysis, calc_radial_integration, calc_roi, calc_spi_analysis, JFData from utils import Aggregator, BufferedJSON, randskip, read_bit from zmqsocks import ZMQSockets @@ -115,7 +115,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - data, aggregation_is_ready = calc_force_send(results, data, pixel_mask_pf, image, aggregator) + data, aggregation_is_ready = calc_apply_aggregation(results, data, pixel_mask_pf, image, aggregator) results["type"] = str(data.dtype) results["shape"] = data.shape -- 2.49.0 From b1f104a71d2418e9c4e7c92c2a158c407acf1228 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 21 Aug 2024 15:55:23 +0200 Subject: [PATCH 138/159] disentangle --- dap/algos/aggregation.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index eeaf929..dae7358 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -5,13 +5,14 @@ from .thresh import threshold def calc_apply_aggregation(results, data, pixel_mask_pf, image, aggregator): - data = calc_data(results, data, pixel_mask_pf, image, aggregator) + data = calc_data(results, data, image, aggregator) + calc_mask_pixels(data, pixel_mask_pf) # changes data in place aggregation_ready = calc_aggregation_ready(results, data, aggregator) return data, aggregation_ready -def calc_data(results, data, pixel_mask_pf, image, aggregator): +def calc_data(results, data, image, aggregator): if data.dtype == np.uint16: return data @@ -27,11 +28,7 @@ def calc_data(results, data, pixel_mask_pf, image, aggregator): calc_apply_threshold(results, data) # changes data in place - data = calc_aggregate(results, data, aggregator) - - calc_mask_pixels(data, pixel_mask_pf) # changes data in place - - return data + return calc_aggregate(results, data, aggregator) -- 2.49.0 From d0c1621109a65887af93a75f13d8286a583e28c7 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 28 Aug 2024 10:08:18 +0200 Subject: [PATCH 139/159] consistency: removed special case that sends the raw data `image` instead of the processed image `data` [special case triggered if apply_aggregation and apply_threshold are off, but both can still be not applied if the respective parameters (aggregation_max, threshold_min, threshold_max) are not given, which would send `data`] --- dap/algos/aggregation.py | 7 +++---- dap/worker.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index dae7358..ae83523 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -4,15 +4,15 @@ from .mask import calc_mask_pixels from .thresh import threshold -def calc_apply_aggregation(results, data, pixel_mask_pf, image, aggregator): - data = calc_data(results, data, image, aggregator) +def calc_apply_aggregation(results, data, pixel_mask_pf, aggregator): + data = calc_data(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place aggregation_ready = calc_aggregation_ready(results, data, aggregator) return data, aggregation_ready -def calc_data(results, data, image, aggregator): +def calc_data(results, data, aggregator): if data.dtype == np.uint16: return data @@ -23,7 +23,6 @@ def calc_data(results, data, image, aggregator): aggregator.reset() if not apply_aggregation and not apply_threshold: - data = image return data calc_apply_threshold(results, data) # changes data in place diff --git a/dap/worker.py b/dap/worker.py index f11fd0d..492ce39 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -115,7 +115,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - data, aggregation_is_ready = calc_apply_aggregation(results, data, pixel_mask_pf, image, aggregator) + data, aggregation_is_ready = calc_apply_aggregation(results, data, pixel_mask_pf, aggregator) results["type"] = str(data.dtype) results["shape"] = data.shape -- 2.49.0 From f2f871c880de3ba6589c0fe49ba9fed54972e1c2 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 28 Aug 2024 10:13:21 +0200 Subject: [PATCH 140/159] renamed image -> raw_data --- dap/worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 492ce39..ecec014 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -49,7 +49,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if not zmq_socks.has_data(): continue - image, metadata = zmq_socks.get_data() + raw_data, metadata = zmq_socks.get_data() if metadata["shape"] == [2, 2]: # this is used as marker for empty images continue @@ -90,7 +90,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host double_pixels = results.get("double_pixels", "mask") - data = jfdata.process(image, metadata, double_pixels) + data = jfdata.process(raw_data, metadata, double_pixels) if not data: continue @@ -98,7 +98,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = jfdata.get_pixel_mask(results, double_pixels) if pixel_mask_pf is not None: - saturated_pixels_y, saturated_pixels_x = jfdata.get_saturated_pixels(image, double_pixels) + saturated_pixels_y, saturated_pixels_x = jfdata.get_saturated_pixels(raw_data, double_pixels) results["saturated_pixels"] = len(saturated_pixels_x) results["saturated_pixels_x"] = saturated_pixels_x.tolist() results["saturated_pixels_y"] = saturated_pixels_y.tolist() -- 2.49.0 From 6ca0ebf945fedde9496e36a59c035c549ab259bc Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 28 Aug 2024 10:16:30 +0200 Subject: [PATCH 141/159] renamed data -> image --- dap/worker.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index ecec014..13720e4 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -90,9 +90,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host double_pixels = results.get("double_pixels", "mask") - data = jfdata.process(raw_data, metadata, double_pixels) + image = jfdata.process(raw_data, metadata, double_pixels) - if not data: + if not image: continue pixel_mask_pf = jfdata.get_pixel_mask(results, double_pixels) @@ -104,9 +104,9 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host results["saturated_pixels_y"] = saturated_pixels_y.tolist() - calc_radial_integration(results, data, pixel_mask_pf) + calc_radial_integration(results, image, pixel_mask_pf) - pfdata = data.copy() #TODO: is this copy needed? + pfdata = image.copy() #TODO: is this copy needed? calc_mask_pixels(pfdata, pixel_mask_pf) # changes pfdata in place calc_apply_threshold(results, pfdata) # changes pfdata in place @@ -115,10 +115,10 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) # ??? - data, aggregation_is_ready = calc_apply_aggregation(results, data, pixel_mask_pf, aggregator) + image, aggregation_is_ready = calc_apply_aggregation(results, image, pixel_mask_pf, aggregator) - results["type"] = str(data.dtype) - results["shape"] = data.shape + results["type"] = str(image.dtype) + results["shape"] = image.shape zmq_socks.send_accumulator(results) @@ -136,11 +136,11 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host is_no_hit_frame_and_skipped = (is_no_hit_frame and random_skip) if aggregation_is_enabled_but_not_ready or is_bad_frame or is_no_hit_frame_and_skipped: - data = np.empty((2, 2), dtype=np.uint16) - results["type"] = str(data.dtype) - results["shape"] = data.shape + image = np.empty((2, 2), dtype=np.uint16) + results["type"] = str(image.dtype) + results["shape"] = image.shape - zmq_socks.send_visualisation(results, data) + zmq_socks.send_visualisation(results, image) -- 2.49.0 From 8981eab522768495a2d04b9d3d4cbd8eaaf41791 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 28 Aug 2024 10:19:01 +0200 Subject: [PATCH 142/159] renamed pfdata -> pfimage --- dap/worker.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index 13720e4..d79c8cb 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -106,13 +106,13 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host calc_radial_integration(results, image, pixel_mask_pf) - pfdata = image.copy() #TODO: is this copy needed? + pfimage = image.copy() #TODO: is this copy needed? - calc_mask_pixels(pfdata, pixel_mask_pf) # changes pfdata in place - calc_apply_threshold(results, pfdata) # changes pfdata in place - calc_roi(results, pfdata, pixel_mask_pf) + calc_mask_pixels(pfimage, pixel_mask_pf) # changes pfimage in place + calc_apply_threshold(results, pfimage) # changes pfimage in place + calc_roi(results, pfimage, pixel_mask_pf) calc_spi_analysis(results) - calc_peakfinder_analysis(results, pfdata, pixel_mask_pf) + calc_peakfinder_analysis(results, pfimage, pixel_mask_pf) # ??? image, aggregation_is_ready = calc_apply_aggregation(results, image, pixel_mask_pf, aggregator) -- 2.49.0 From b99a32d9d716ba1bb3ac7c0c66b347ab2c221077 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 28 Aug 2024 10:21:32 +0200 Subject: [PATCH 143/159] renamed raw_data -> raw_image --- dap/worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dap/worker.py b/dap/worker.py index d79c8cb..4904952 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -49,7 +49,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host if not zmq_socks.has_data(): continue - raw_data, metadata = zmq_socks.get_data() + raw_image, metadata = zmq_socks.get_data() if metadata["shape"] == [2, 2]: # this is used as marker for empty images continue @@ -90,7 +90,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host double_pixels = results.get("double_pixels", "mask") - image = jfdata.process(raw_data, metadata, double_pixels) + image = jfdata.process(raw_image, metadata, double_pixels) if not image: continue @@ -98,7 +98,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host pixel_mask_pf = jfdata.get_pixel_mask(results, double_pixels) if pixel_mask_pf is not None: - saturated_pixels_y, saturated_pixels_x = jfdata.get_saturated_pixels(raw_data, double_pixels) + saturated_pixels_y, saturated_pixels_x = jfdata.get_saturated_pixels(raw_image, double_pixels) results["saturated_pixels"] = len(saturated_pixels_x) results["saturated_pixels_x"] = saturated_pixels_x.tolist() results["saturated_pixels_y"] = saturated_pixels_y.tolist() -- 2.49.0 From 1e6af40d56a72f057ac8d68a241f8234c793498f Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 28 Aug 2024 10:39:01 +0200 Subject: [PATCH 144/159] moved calc_apply_threshold out of calc_data, removed special case check --- dap/algos/aggregation.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index ae83523..3b10789 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -5,6 +5,7 @@ from .thresh import threshold def calc_apply_aggregation(results, data, pixel_mask_pf, aggregator): + calc_apply_threshold(results, data) # changes data in place data = calc_data(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place aggregation_ready = calc_aggregation_ready(results, data, aggregator) @@ -17,16 +18,10 @@ def calc_data(results, data, aggregator): return data apply_aggregation = results.get("apply_aggregation", False) - apply_threshold = results.get("apply_threshold", False) if not apply_aggregation: aggregator.reset() - if not apply_aggregation and not apply_threshold: - return data - - calc_apply_threshold(results, data) # changes data in place - return calc_aggregate(results, data, aggregator) -- 2.49.0 From cb79be0a3e82af477d1a288597ad1dd9f01126aa Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 28 Aug 2024 10:53:21 +0200 Subject: [PATCH 145/159] added/use Aggregator.is_ready --- dap/algos/aggregation.py | 4 ++-- dap/utils/aggregator.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index 3b10789..b728db4 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -79,9 +79,9 @@ def calc_aggregation_ready(results, data, aggregator): if "aggregation_max" not in results: return False - n_aggregated_images = aggregator.counter + aggregation_max = results["aggregation_max"] - if n_aggregated_images < results["aggregation_max"]: + if not aggregator.is_ready(aggregation_max): return False aggregator.reset() diff --git a/dap/utils/aggregator.py b/dap/utils/aggregator.py index bd6d315..1d3cfab 100644 --- a/dap/utils/aggregator.py +++ b/dap/utils/aggregator.py @@ -19,6 +19,9 @@ class Aggregator: __iadd__ = add + def is_ready(self, nmax): + return (self.counter >= nmax) + def __repr__(self): return f"{self.data!r} / {self.counter}" -- 2.49.0 From cc0113507bd6db70fcdf41186ece4db01c9ef009 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 29 Aug 2024 10:36:31 +0200 Subject: [PATCH 146/159] removed unreachable special case for raw data (uint16) reaching aggregation [adapter.process calls handler.process with conversion=handler.can_convert(), which checks if gain and pedestal are truthy; with conversion, the dtype is float32 or int32; if handler.pedestal_file is falsy, the image is skipped upon receive -> special case can never trigger] --- dap/algos/aggregation.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index b728db4..18109f0 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -14,9 +14,6 @@ def calc_apply_aggregation(results, data, pixel_mask_pf, aggregator): def calc_data(results, data, aggregator): - if data.dtype == np.uint16: - return data - apply_aggregation = results.get("apply_aggregation", False) if not apply_aggregation: @@ -64,9 +61,6 @@ def calc_aggregate(results, data, aggregator): def calc_aggregation_ready(results, data, aggregator): - if data.dtype == np.uint16: - return False - apply_aggregation = results.get("apply_aggregation", False) apply_threshold = results.get("apply_threshold", False) -- 2.49.0 From 470aa52ff96ab9c9923b010e3f96ba3f472ce9f4 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 29 Aug 2024 10:56:46 +0200 Subject: [PATCH 147/159] use the proper check from JU [this includes checking for the gain now, which probably was ommitted before since the gain file rarely changes and always exists] --- dap/algos/jfdata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dap/algos/jfdata.py b/dap/algos/jfdata.py index 92bea4d..bd2fa1e 100644 --- a/dap/algos/jfdata.py +++ b/dap/algos/jfdata.py @@ -35,8 +35,8 @@ class JFData: def process(self, image, metadata, double_pixels): data = self.ju_stream_adapter.process(image, metadata, double_pixels=double_pixels) - # the pedestal file is loaded in process(), this check needs to be afterwards - if not self.ju_stream_adapter.handler.pedestal_file: + # pedestal and gain files are loaded in process(), this check needs to be afterwards + if not self.ju_stream_adapter.handler.can_convert(): return None data = np.ascontiguousarray(data) -- 2.49.0 From 1350093d5ac28679259a32b12868e02706086971 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 30 Aug 2024 11:52:35 +0200 Subject: [PATCH 148/159] removed apply_threshold check from calc_aggregation_ready as these are independent operations --- dap/algos/aggregation.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index 18109f0..10b733e 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -62,11 +62,6 @@ def calc_aggregate(results, data, aggregator): def calc_aggregation_ready(results, data, aggregator): apply_aggregation = results.get("apply_aggregation", False) - apply_threshold = results.get("apply_threshold", False) - - if not apply_aggregation and not apply_threshold: - return False - if not apply_aggregation: return False -- 2.49.0 From 7d57d6c5e0941b7e2a182a8a45ba8697c7851bbd Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 30 Aug 2024 11:54:10 +0200 Subject: [PATCH 149/159] removed unused data argument and numpy import --- dap/algos/aggregation.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index 10b733e..afff5d7 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -1,5 +1,3 @@ -import numpy as np - from .mask import calc_mask_pixels from .thresh import threshold @@ -8,7 +6,7 @@ def calc_apply_aggregation(results, data, pixel_mask_pf, aggregator): calc_apply_threshold(results, data) # changes data in place data = calc_data(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place - aggregation_ready = calc_aggregation_ready(results, data, aggregator) + aggregation_ready = calc_aggregation_ready(results, aggregator) return data, aggregation_ready @@ -60,7 +58,7 @@ def calc_aggregate(results, data, aggregator): -def calc_aggregation_ready(results, data, aggregator): +def calc_aggregation_ready(results, aggregator): apply_aggregation = results.get("apply_aggregation", False) if not apply_aggregation: return False -- 2.49.0 From 4db7f4f8cdbad30c5d1ec2764fd3ccb513c9ffcf Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 30 Aug 2024 14:53:05 +0200 Subject: [PATCH 150/159] moved "not ready" due to missing aggregation_max into Aggregator --- dap/algos/aggregation.py | 5 +---- dap/utils/aggregator.py | 2 ++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index afff5d7..baf317e 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -63,10 +63,7 @@ def calc_aggregation_ready(results, aggregator): if not apply_aggregation: return False - if "aggregation_max" not in results: - return False - - aggregation_max = results["aggregation_max"] + aggregation_max = results.get("aggregation_max") if not aggregator.is_ready(aggregation_max): return False diff --git a/dap/utils/aggregator.py b/dap/utils/aggregator.py index 1d3cfab..d1f18f7 100644 --- a/dap/utils/aggregator.py +++ b/dap/utils/aggregator.py @@ -20,6 +20,8 @@ class Aggregator: __iadd__ = add def is_ready(self, nmax): + if nmax is None: + return False return (self.counter >= nmax) def __repr__(self): -- 2.49.0 From 7a05d8f095c2c36f861512be6829f571a85835ac Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 30 Aug 2024 15:56:27 +0200 Subject: [PATCH 151/159] moved aggregator.reset() [this now also resets if aggregation_max wasnt given] --- dap/algos/aggregation.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index baf317e..19a741d 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -4,23 +4,13 @@ from .thresh import threshold def calc_apply_aggregation(results, data, pixel_mask_pf, aggregator): calc_apply_threshold(results, data) # changes data in place - data = calc_data(results, data, aggregator) + data = calc_aggregate(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place aggregation_ready = calc_aggregation_ready(results, aggregator) return data, aggregation_ready -def calc_data(results, data, aggregator): - apply_aggregation = results.get("apply_aggregation", False) - - if not apply_aggregation: - aggregator.reset() - - return calc_aggregate(results, data, aggregator) - - - #TODO: this is duplicated in calc_apply_threshold and calc_radial_integration def calc_apply_threshold(results, data): apply_threshold = results.get("apply_threshold", False) @@ -41,9 +31,11 @@ def calc_apply_threshold(results, data): def calc_aggregate(results, data, aggregator): apply_aggregation = results.get("apply_aggregation", False) if not apply_aggregation: + aggregator.reset() return data if "aggregation_max" not in results: + aggregator.reset() return data aggregator += data -- 2.49.0 From a65553e13ad5aec6df379ed9dddb934767188800 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 30 Aug 2024 17:38:16 +0200 Subject: [PATCH 152/159] removed apply_aggregation check [aggregator cannot be ready if apply_aggregation is False] --- dap/algos/aggregation.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index 19a741d..87987fd 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -51,12 +51,7 @@ def calc_aggregate(results, data, aggregator): def calc_aggregation_ready(results, aggregator): - apply_aggregation = results.get("apply_aggregation", False) - if not apply_aggregation: - return False - aggregation_max = results.get("aggregation_max") - if not aggregator.is_ready(aggregation_max): return False -- 2.49.0 From 35c50b472ffb15f32cffbc84e5dd47caab69e14b Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Fri, 30 Aug 2024 18:01:13 +0200 Subject: [PATCH 153/159] split updating nmax and checking readiness --- dap/algos/aggregation.py | 5 +++-- dap/utils/aggregator.py | 9 +++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index 87987fd..f7d3e71 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -51,8 +51,9 @@ def calc_aggregate(results, data, aggregator): def calc_aggregation_ready(results, aggregator): - aggregation_max = results.get("aggregation_max") - if not aggregator.is_ready(aggregation_max): + aggregator.nmax = results.get("aggregation_max") + + if not aggregator.is_ready(): return False aggregator.reset() diff --git a/dap/utils/aggregator.py b/dap/utils/aggregator.py index d1f18f7..03f7ba8 100644 --- a/dap/utils/aggregator.py +++ b/dap/utils/aggregator.py @@ -7,6 +7,7 @@ class Aggregator: def reset(self): self.data = None self.counter = 0 + self.nmax = None def add(self, item): if self.data is None: @@ -19,13 +20,13 @@ class Aggregator: __iadd__ = add - def is_ready(self, nmax): - if nmax is None: + def is_ready(self): + if self.nmax is None: return False - return (self.counter >= nmax) + return (self.counter >= self.nmax) def __repr__(self): - return f"{self.data!r} / {self.counter}" + return f"{self.data!r} # ({self.counter} / {self.nmax})" -- 2.49.0 From f18af38dc673d06ac380a345ab425140db2647f7 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 4 Sep 2024 17:30:43 +0200 Subject: [PATCH 154/159] reordered operations --- dap/algos/aggregation.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/dap/algos/aggregation.py b/dap/algos/aggregation.py index f7d3e71..6281fbc 100644 --- a/dap/algos/aggregation.py +++ b/dap/algos/aggregation.py @@ -3,11 +3,18 @@ from .thresh import threshold def calc_apply_aggregation(results, data, pixel_mask_pf, aggregator): + # last round was ready, restart + if aggregator.is_ready(): + aggregator.reset() + calc_apply_threshold(results, data) # changes data in place data = calc_aggregate(results, data, aggregator) calc_mask_pixels(data, pixel_mask_pf) # changes data in place - aggregation_ready = calc_aggregation_ready(results, aggregator) - return data, aggregation_ready + + aggregator.nmax = results.get("aggregation_max") + aggregation_is_ready = aggregator.is_ready() + + return data, aggregation_is_ready @@ -50,14 +57,3 @@ def calc_aggregate(results, data, aggregator): -def calc_aggregation_ready(results, aggregator): - aggregator.nmax = results.get("aggregation_max") - - if not aggregator.is_ready(): - return False - - aggregator.reset() - return True - - - -- 2.49.0 From 05905d183c4e088c66a20e631791e63ada5d593b Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Wed, 11 Sep 2024 21:36:14 +0200 Subject: [PATCH 155/159] bumped version of JF06 (4 -> 5) --- dap/algos/addmask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/algos/addmask.py b/dap/algos/addmask.py index ffb187c..42c9842 100644 --- a/dap/algos/addmask.py +++ b/dap/algos/addmask.py @@ -9,7 +9,7 @@ def calc_apply_additional_mask(results, pixel_mask_pf): if not detector_name: return - if detector_name == "JF06T08V04": + if detector_name == "JF06T08V05": # edge pixels pixel_mask_pf[0:1030, 1100] = 0 pixel_mask_pf[0:1030, 1613] = 0 -- 2.49.0 From 5bf58d1193ab56c518c992273fa51650aa707a0c Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 3 Oct 2024 11:02:24 +0200 Subject: [PATCH 156/159] fixed dtype of "roi_intensities" --- dap/algos/roi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index f36ddb3..e144370 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -31,7 +31,7 @@ def calc_roi(results, data, pixel_mask_pf): for ix1, ix2, iy1, iy2 in zip(roi_x1, roi_x2, roi_y1, roi_y2): data_roi = data[iy1:iy2, ix1:ix2] - roi_sum = np.nansum(data_roi) + roi_sum = np.nansum(data_roi, dtype=float) # data_roi is np.float32, which cannot be json serialized if threshold_value == "NaN": roi_area = (ix2 - ix1) * (iy2 - iy1) -- 2.49.0 From df17ae536ccbd5e809cd6501b6f80d8d6f8aa937 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 3 Oct 2024 11:03:55 +0200 Subject: [PATCH 157/159] numpy arrays do not like to be treated as booleans --- dap/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/worker.py b/dap/worker.py index 4904952..63fdca3 100644 --- a/dap/worker.py +++ b/dap/worker.py @@ -92,7 +92,7 @@ def work(backend_address, accumulator_host, accumulator_port, visualisation_host image = jfdata.process(raw_image, metadata, double_pixels) - if not image: + if image is None: continue pixel_mask_pf = jfdata.get_pixel_mask(results, double_pixels) -- 2.49.0 From 98bbe76936d65f4b986e5317d977343d9228bbe0 Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 3 Oct 2024 11:50:49 +0200 Subject: [PATCH 158/159] typo: vmin instead of vmax --- dap/algos/thresh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/algos/thresh.py b/dap/algos/thresh.py index 4cd2ac4..69da60e 100644 --- a/dap/algos/thresh.py +++ b/dap/algos/thresh.py @@ -28,7 +28,7 @@ def threshold(data, vmin, vmax, replacement): data[data < vmin] = replacement #TODO: skipping max is a guess, but not obvious/symmetric -- better to ensure the order min < max by switching them if needed if vmax > vmin: - data[data > vmin] = replacement + data[data > vmax] = replacement -- 2.49.0 From 7a57201ca327ac964375f7b79452f9b8e4e628ab Mon Sep 17 00:00:00 2001 From: Sven Augustin Date: Thu, 3 Oct 2024 12:13:39 +0200 Subject: [PATCH 159/159] fixed dtype of "roi_intensities_normalised" if threshold_value != "NaN" --- dap/algos/roi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dap/algos/roi.py b/dap/algos/roi.py index e144370..451416b 100644 --- a/dap/algos/roi.py +++ b/dap/algos/roi.py @@ -37,7 +37,7 @@ def calc_roi(results, data, pixel_mask_pf): roi_area = (ix2 - ix1) * (iy2 - iy1) roi_sum_norm = roi_sum / roi_area else: - roi_sum_norm = np.nanmean(data_roi) + roi_sum_norm = np.nanmean(data_roi, dtype=float) # data_roi is np.float32, which cannot be json serialized roi_indices_x = [ix1, ix2] roi_proj_x = np.nansum(data_roi, axis=0).tolist() -- 2.49.0