From 6a150e8d98deaf5e29bf2cbd7d3ef485f5761616 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 10 Dec 2024 17:21:05 +0100 Subject: [PATCH 001/120] WIP --- include/aare/ClusterFile.hpp | 2 +- include/aare/ClusterFinder.hpp | 145 +++++++++++++++------------------ include/aare/ClusterVector.hpp | 76 +++++++++++++++++ include/aare/Pedestal.hpp | 2 + include/aare/defs.hpp | 2 +- python/examples/play.py | 56 +++++++++++-- python/src/cluster.hpp | 83 +++++++++++++++++-- 7 files changed, 268 insertions(+), 98 deletions(-) create mode 100644 include/aare/ClusterVector.hpp diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 2baf0f4..f866dd6 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,4 +1,4 @@ - +#pragma once #include "aare/defs.hpp" #include diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index addb6db..33a00ea 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -1,4 +1,6 @@ #pragma once +#include "aare/ClusterFile.hpp" +#include "aare/ClusterVector.hpp" #include "aare/Dtype.hpp" #include "aare/NDArray.hpp" #include "aare/NDView.hpp" @@ -9,7 +11,7 @@ namespace aare { /** enum to define the event types */ -enum eventType { +enum class eventType { PEDESTAL, /** pedestal */ NEIGHBOUR, /** neighbour i.e. below threshold, but in the cluster of a photon */ @@ -33,118 +35,101 @@ class ClusterFinder { Pedestal m_pedestal; public: - ClusterFinder(Shape<2> image_size, Shape<2>cluster_size, double nSigma = 5.0, - double threshold = 0.0) - : m_image_size(image_size), m_cluster_sizeX(cluster_size[0]), m_cluster_sizeY(cluster_size[1]), - m_threshold(threshold), m_nSigma(nSigma), + ClusterFinder(Shape<2> image_size, Shape<2> cluster_size, + double nSigma = 5.0, double threshold = 0.0) + : m_image_size(image_size), m_cluster_sizeX(cluster_size[0]), + m_cluster_sizeY(cluster_size[1]), m_threshold(threshold), + m_nSigma(nSigma), c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)), c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)), m_pedestal(image_size[0], image_size[1]) { - - // c2 = sqrt((cluster_sizeY + 1) / 2 * (cluster_sizeX + 1) / 2); - // c3 = sqrt(cluster_sizeX * cluster_sizeY); - }; + fmt::print("TypeIndex: {}\n", sizeof(Dtype)); + }; void push_pedestal_frame(NDView frame) { m_pedestal.push(frame); } - NDArray pedestal() { - return m_pedestal.mean(); - } + NDArray pedestal() { return m_pedestal.mean(); } - std::vector - find_clusters_without_threshold(NDView frame, - // Pedestal &pedestal, - bool late_update = false) { - struct pedestal_update { - int x; - int y; - FRAME_TYPE value; - }; - std::vector pedestal_updates; + NDArray noise() { return m_pedestal.std(); } - std::vector clusters; - std::vector> eventMask; - for (int i = 0; i < frame.shape(0); i++) { - eventMask.push_back(std::vector(frame.shape(1))); - } - long double val; - long double max; + ClusterVector + find_clusters_without_threshold(NDView frame) { + // std::vector clusters; + // std::vector clusters; //Hard coded 3x3 cluster + // clusters.reserve(2000); + ClusterVector clusters(m_cluster_sizeX, m_cluster_sizeY); + eventType event_type = eventType::PEDESTAL; + + // TODO! deal with even size clusters + // currently 3,3 -> +/- 1 + // 4,4 -> +/- 2 + short dy = m_cluster_sizeY / 2; + short dx = m_cluster_sizeX / 2; for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { - // initialize max and total - max = std::numeric_limits::min(); - long double total = 0; - eventMask[iy][ix] = PEDESTAL; + PEDESTAL_TYPE max = std::numeric_limits::min(); + PEDESTAL_TYPE total = 0; - for (short ir = -(m_cluster_sizeY / 2); - ir < (m_cluster_sizeY / 2) + 1; ir++) { - for (short ic = -(m_cluster_sizeX / 2); - ic < (m_cluster_sizeX / 2) + 1; ic++) { + for (short ir = -dy; ir < dy + 1; ir++) { + for (short ic = -dx; ic < dx + 1; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { - val = frame(iy + ir, ix + ic) - - m_pedestal.mean(iy + ir, ix + ic); + PEDESTAL_TYPE val = + frame(iy + ir, ix + ic) - + m_pedestal.mean(iy + ir, ix + ic); + total += val; - if (val > max) { - max = val; - } + max = std::max(max, val); } } } - auto rms = m_pedestal.std(iy, ix); + PEDESTAL_TYPE rms = m_pedestal.std(iy, ix); + PEDESTAL_TYPE value = (frame(iy, ix) - m_pedestal.mean(iy, ix)); - if (frame(iy, ix) - m_pedestal.mean(iy, ix) < -m_nSigma * rms) { - eventMask[iy][ix] = NEGATIVE_PEDESTAL; - continue; + if (value < -m_nSigma * rms) { + continue; // NEGATIVE_PEDESTAL go to next pixel + // TODO! No pedestal update??? } else if (max > m_nSigma * rms) { - eventMask[iy][ix] = PHOTON; - + event_type = eventType::PHOTON; + if (value < max) + continue; // Not max go to the next pixel } else if (total > c3 * m_nSigma * rms) { - eventMask[iy][ix] = PHOTON; + event_type = eventType::PHOTON; } else { - if (late_update) { - pedestal_updates.push_back({ix, iy, frame(iy, ix)}); - } else { - m_pedestal.push(iy, ix, frame(iy, ix)); - } - continue; + m_pedestal.push(iy, ix, frame(iy, ix)); + continue; // It was a pedestal value nothing to store } - if (eventMask[iy][ix] == PHOTON && - (frame(iy, ix) - m_pedestal.mean(iy, ix)) >= max) { - eventMask[iy][ix] = PHOTON_MAX; - DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, - Dtype(typeid(PEDESTAL_TYPE))); - cluster.x = ix; - cluster.y = iy; - short i = 0; - for (short ir = -(m_cluster_sizeY / 2); - ir < (m_cluster_sizeY / 2) + 1; ir++) { - for (short ic = -(m_cluster_sizeX / 2); - ic < (m_cluster_sizeX / 2) + 1; ic++) { + // Store cluster + if (event_type == eventType::PHOTON && value >= max) { + event_type = eventType::PHOTON_MAX; + + short i = 0; + std::vector cluster_data(m_cluster_sizeX * + m_cluster_sizeY); + + for (short ir = -dy; ir < dy + 1; ir++) { + for (short ic = -dx; ic < dx + 1; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { PEDESTAL_TYPE tmp = static_cast( frame(iy + ir, ix + ic)) - m_pedestal.mean(iy + ir, ix + ic); - cluster.set(i, tmp); + cluster_data[i] = tmp; i++; } } } - clusters.push_back(cluster); + clusters.push_back( + ix, iy, + reinterpret_cast(cluster_data.data())); } } } - if (late_update) { - for (auto &update : pedestal_updates) { - m_pedestal.push(update.y, update.x, update.value); - } - } return clusters; } @@ -176,7 +161,7 @@ class ClusterFinder { // iterate over frame pixels for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { - eventMask[iy][ix] = PEDESTAL; + eventMask[iy][ix] = eventType::PEDESTAL; // initialize max and total FRAME_TYPE max = std::numeric_limits::min(); long double total = 0; @@ -184,7 +169,7 @@ class ClusterFinder { pedestal.push(iy, ix, frame(iy, ix)); continue; } - eventMask[iy][ix] = NEIGHBOUR; + eventMask[iy][ix] = eventType::NEIGHBOUR; // iterate over cluster pixels around the current pixel (ix,iy) for (short ir = -(m_cluster_sizeY / 2); ir < (m_cluster_sizeY / 2) + 1; ir++) { @@ -220,18 +205,18 @@ class ClusterFinder { tthr2 = tthr - tthr2; } if (total > tthr1 || max > tthr) { - eventMask[iy][ix] = PHOTON; + eventMask[iy][ix] = eventType::PHOTON; nph(iy, ix) += 1; rest(iy, ix) -= m_threshold; } else { pedestal.push(iy, ix, frame(iy, ix)); continue; } - if (eventMask[iy][ix] == PHOTON && + if (eventMask[iy][ix] == eventType::PHOTON && frame(iy, ix) - pedestal.mean(iy, ix) >= max) { - eventMask[iy][ix] = PHOTON_MAX; + eventMask[iy][ix] = eventType::PHOTON_MAX; DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, - Dtype(typeid(FRAME_TYPE))); + Dtype(typeid(FRAME_TYPE))); cluster.x = ix; cluster.y = iy; short i = 0; diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp new file mode 100644 index 0000000..6016e87 --- /dev/null +++ b/include/aare/ClusterVector.hpp @@ -0,0 +1,76 @@ +#pragma once +#include +#include + +template class ClusterVector { + int32_t m_cluster_size_x; + int32_t m_cluster_size_y; + std::byte *m_data{}; + size_t m_size{0}; + size_t m_capacity{10}; + + public: + ClusterVector(int32_t cluster_size_x, int32_t cluster_size_y) + : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y) { + size_t num_bytes = element_offset() * m_capacity; + m_data = new std::byte[num_bytes]{}; + // fmt::print("Allocating {} bytes\n", num_bytes); + } + + // data better hold data of the right size! + void push_back(int32_t x, int32_t y, const std::byte *data) { + if (m_size == m_capacity) { + m_capacity *= 2; + std::byte *new_data = + new std::byte[element_offset()*m_capacity]{}; + std::copy(m_data, + m_data + element_offset()*m_size, + new_data); + delete[] m_data; + m_data = new_data; + } + std::byte *ptr = element_ptr(m_size); + *reinterpret_cast(ptr) = x; + ptr += sizeof(int32_t); + *reinterpret_cast(ptr) = y; + ptr += sizeof(int32_t); + + std::copy(data, data + m_cluster_size_x * m_cluster_size_y * sizeof(T), + ptr); + m_size++; + } + + std::vector sum(){ + std::vector sums(m_size); + for (size_t i = 0; i < m_size; i++) { + T sum = 0; + std::byte *ptr = element_ptr(i) + 2 * sizeof(int32_t); + for (size_t j = 0; j < m_cluster_size_x * m_cluster_size_y; j++) { + sum += *reinterpret_cast(ptr); + ptr += sizeof(T); + } + sums[i] = sum; + } + return sums; + } + + size_t size() const { return m_size; } + size_t element_offset() const { + return sizeof(m_cluster_size_x) + sizeof(m_cluster_size_y) + + m_cluster_size_x * m_cluster_size_y * sizeof(T); + } + size_t element_offset(size_t i) const { + return element_offset() * i; + } + + std::byte* element_ptr(size_t i) { + return m_data + element_offset(i); + } + + int16_t cluster_size_x() const { return m_cluster_size_x; } + int16_t cluster_size_y() const { return m_cluster_size_y; } + + std::byte *data() { return m_data; } + + ~ClusterVector() { delete[] m_data; } +}; \ No newline at end of file diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index b5f245b..216c204 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -18,6 +18,8 @@ template class Pedestal { uint32_t m_samples; NDArray m_cur_samples; + + //TODO! in case of int needs to be changed to uint64_t NDArray m_sum; NDArray m_sum2; diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 35b9624..7466410 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -47,7 +47,7 @@ class DynamicCluster { int cluster_sizeY; int16_t x; int16_t y; - Dtype dt; + Dtype dt; // 4 bytes private: std::byte *m_data; diff --git a/python/examples/play.py b/python/examples/play.py index 633b7e2..c5abcbf 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,15 +1,53 @@ +import sys +sys.path.append('/home/l_msdetect/erik/aare/build') + +#Our normal python imports +from pathlib import Path import matplotlib.pyplot as plt import numpy as np -plt.ion() -from pathlib import Path -from aare import ClusterFile +import boost_histogram as bh +import time -base = Path('~/data/aare_test_data/clusters').expanduser() +from aare import File, ClusterFinder, VarClusterFinder -f = ClusterFile(base / 'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust') -# f = ClusterFile(base / 'single_frame_97_clustrers.clust') +base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/') + +f = File(base/'Moench03new/cu_half_speed_master_4.json') +cf = ClusterFinder((400,400), (3,3)) +for i in range(1000): + cf.push_pedestal_frame(f.read_frame()) + +fig, ax = plt.subplots() +im = ax.imshow(cf.pedestal()) +cf.pedestal() +cf.noise() + +N = 200 +t0 = time.perf_counter() +hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) +f.seek(0) + +t0 = time.perf_counter() +data = f.read_n(N) +t_elapsed = time.perf_counter()-t0 + +print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS') + +clusters = [] +for frame in data: + clusters += [cf.find_clusters_without_threshold(frame)] -for i in range(10): - fn, cl = f.read_frame() - print(fn, cl.size) +t_elapsed = time.perf_counter()-t0 +print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') + + +t0 = time.perf_counter() +total_clusters = 0 +for cl in clusters: + arr = np.array(cl, copy = False) + hist1.fill(arr['data'].sum(axis = 1).sum(axis = 1)) + total_clusters += cl.size +# t_elapsed = time.perf_counter()-t0 +print(f'Filling histogram with {total_clusters} clusters took: {t_elapsed:.3f}s') +print(f'Cluster per frame {total_clusters/N:.3f}') \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 6932281..b9296fd 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -1,4 +1,5 @@ #include "aare/ClusterFinder.hpp" +#include "aare/ClusterVector.hpp" #include "aare/NDView.hpp" #include "aare/Pedestal.hpp" #include "np_helper.hpp" @@ -9,30 +10,98 @@ #include namespace py = pybind11; +using pd_type = double; + +template +void define_cluster_vector(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("ClusterVector_{}", typestr); + py::class_>(m, class_name.c_str(), py::buffer_protocol()) + .def(py::init()) + .def_property_readonly("size", &ClusterVector::size) + .def("element_offset", + py::overload_cast<>(&ClusterVector::element_offset, py::const_)) + .def_property_readonly("fmt", + [typestr](ClusterVector &v) { + return fmt::format( + "i:x:\ni:y:\n({},{}){}:data:", v.cluster_size_x(), + v.cluster_size_y(), typestr); + }) + .def("sum", &ClusterVector::sum) + .def_buffer([typestr](ClusterVector &v) -> py::buffer_info { + return py::buffer_info( + v.data(), /* Pointer to buffer */ + v.element_offset(), /* Size of one scalar */ + fmt::format("i:x:\ni:y:\n({},{}){}:data:", v.cluster_size_x(), + v.cluster_size_y(), + typestr), /* Format descriptor */ + 1, /* Number of dimensions */ + {v.size()}, /* Buffer dimensions */ + {v.element_offset()} /* Strides (in bytes) for each index */ + ); + }); +} void define_cluster_finder_bindings(py::module &m) { - py::class_>(m, "ClusterFinder") + py::class_>(m, "ClusterFinder") .def(py::init, Shape<2>>()) .def("push_pedestal_frame", - [](ClusterFinder &self, + [](ClusterFinder &self, py::array_t frame) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) .def("pedestal", - [](ClusterFinder &self) { - auto pd = new NDArray{}; + [](ClusterFinder &self) { + auto pd = new NDArray{}; *pd = self.pedestal(); return return_image_data(pd); }) + .def("noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) .def("find_clusters_without_threshold", - [](ClusterFinder &self, + [](ClusterFinder &self, py::array_t frame) { auto view = make_view_2d(frame); - auto clusters = self.find_clusters_without_threshold(view); - return clusters; + auto *vec = new ClusterVector( + self.find_clusters_without_threshold(view)); + return vec; }); + m.def("hello", []() { + fmt::print("Hello from C++\n"); + auto v = new ClusterVector(3, 3); + int data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + v->push_back(5, 30, reinterpret_cast(data)); + v->push_back(5, 55, reinterpret_cast(data)); + v->push_back(5, 20, reinterpret_cast(data)); + v->push_back(5, 30, reinterpret_cast(data)); + + return v; + }); + + define_cluster_vector(m, "i"); + define_cluster_vector(m, "d"); + + // py::class_>(m, "ClusterVector", py::buffer_protocol()) + // .def(py::init()) + // .def("size", &ClusterVector::size) + // .def("element_offset", + // py::overload_cast<>(&ClusterVector::element_offset, py::const_)) + // .def_buffer([](ClusterVector &v) -> py::buffer_info { + // return py::buffer_info( + // v.data(), /* Pointer to buffer */ + // v.element_offset(), /* Size of one scalar */ + // fmt::format("h:x:\nh:y:\n({},{})i:data:", v.cluster_size_x(), + // v.cluster_size_y()), /* Format descriptor */ 1, /* Number of + // dimensions */ {v.size()}, /* Buffer dimensions */ + // {v.element_offset()} /* Strides (in bytes) for each index */ + // ); + // }); + py::class_(m, "DynamicCluster", py::buffer_protocol()) .def(py::init()) .def("size", &DynamicCluster::size) From 7f2a23d5b1639e49547c27e4aebdc1a0d95cbd14 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 10 Dec 2024 22:00:12 +0100 Subject: [PATCH 002/120] accumulating clusters in one array --- include/aare/ClusterFinder.hpp | 44 ++++++++------ include/aare/ClusterVector.hpp | 107 ++++++++++++++++++++++++--------- python/examples/play.py | 37 +++++++----- python/src/cluster.hpp | 19 ++++-- 4 files changed, 137 insertions(+), 70 deletions(-) diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 33a00ea..5bd8866 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -23,7 +23,7 @@ enum class eventType { UNDEFINED_EVENT = -1 /** undefined */ }; -template +template class ClusterFinder { Shape<2> m_image_size; const int m_cluster_sizeX; @@ -33,6 +33,8 @@ class ClusterFinder { const double c2; const double c3; Pedestal m_pedestal; + ClusterVector m_clusters; + public: ClusterFinder(Shape<2> image_size, Shape<2> cluster_size, @@ -42,9 +44,10 @@ class ClusterFinder { m_nSigma(nSigma), c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)), c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)), - m_pedestal(image_size[0], image_size[1]) { - fmt::print("TypeIndex: {}\n", sizeof(Dtype)); - }; + m_pedestal(image_size[0], image_size[1]), + m_clusters(m_cluster_sizeX, m_cluster_sizeY, 1'000'000) { + // clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY, 2000); + }; void push_pedestal_frame(NDView frame) { m_pedestal.push(frame); @@ -54,17 +57,20 @@ class ClusterFinder { NDArray noise() { return m_pedestal.std(); } - ClusterVector - find_clusters_without_threshold(NDView frame) { - // std::vector clusters; - // std::vector clusters; //Hard coded 3x3 cluster - // clusters.reserve(2000); - ClusterVector clusters(m_cluster_sizeX, m_cluster_sizeY); + ClusterVector steal_clusters() { + ClusterVector tmp = std::move(m_clusters); + m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY, 2000); + return tmp; + } + void + find_clusters(NDView frame) { + // // size_t capacity = 2000; + // // ClusterVector clusters(m_cluster_sizeX, m_cluster_sizeY, capacity); eventType event_type = eventType::PEDESTAL; - // TODO! deal with even size clusters - // currently 3,3 -> +/- 1 - // 4,4 -> +/- 2 + // // TODO! deal with even size clusters + // // currently 3,3 -> +/- 1 + // // 4,4 -> +/- 2 short dy = m_cluster_sizeY / 2; short dx = m_cluster_sizeX / 2; @@ -108,29 +114,29 @@ class ClusterFinder { event_type = eventType::PHOTON_MAX; short i = 0; - std::vector cluster_data(m_cluster_sizeX * + std::vector cluster_data(m_cluster_sizeX * m_cluster_sizeY); for (short ir = -dy; ir < dy + 1; ir++) { for (short ic = -dx; ic < dx + 1; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { - PEDESTAL_TYPE tmp = - static_cast( + CT tmp = + static_cast( frame(iy + ir, ix + ic)) - m_pedestal.mean(iy + ir, ix + ic); - cluster_data[i] = tmp; + cluster_data[i] = tmp; //Watch for out of bounds access i++; } } } - clusters.push_back( + m_clusters.push_back( ix, iy, reinterpret_cast(cluster_data.data())); } } } - return clusters; + // return clusters; } // template diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 6016e87..5445cbf 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -1,33 +1,74 @@ #pragma once #include #include +#include + +#include template class ClusterVector { int32_t m_cluster_size_x; int32_t m_cluster_size_y; std::byte *m_data{}; size_t m_size{0}; - size_t m_capacity{10}; + size_t m_capacity; public: - ClusterVector(int32_t cluster_size_x, int32_t cluster_size_y) - : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y) { - size_t num_bytes = element_offset() * m_capacity; - m_data = new std::byte[num_bytes]{}; - // fmt::print("Allocating {} bytes\n", num_bytes); + ClusterVector(int32_t cluster_size_x, int32_t cluster_size_y, + size_t capacity = 1024) + : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), + m_capacity(capacity) { + allocate_buffer(capacity); + } + ~ClusterVector() { + fmt::print("~ClusterVector - size: {}, capacity: {}\n", m_size, + m_capacity); + delete[] m_data; + } + + // ClusterVector(const ClusterVector & other){ + // m_cluster_size_x = other.m_cluster_size_x; + // m_cluster_size_y = other.m_cluster_size_y; + // m_data = new std::byte[other.m_capacity]; + // std::copy(other.m_data, other.m_data + other.m_capacity, m_data); + // m_size = other.m_size; + // m_capacity = other.m_capacity; + // } + + ClusterVector(ClusterVector &&other) noexcept + : m_cluster_size_x(other.m_cluster_size_x), + m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data), + m_size(other.m_size), m_capacity(other.m_capacity) { + other.m_data = nullptr; + other.m_size = 0; + other.m_capacity = 0; + } + + //Move assignment operator + ClusterVector& operator=(ClusterVector &&other) noexcept { + if (this != &other) { + delete[] m_data; + m_cluster_size_x = other.m_cluster_size_x; + m_cluster_size_y = other.m_cluster_size_y; + m_data = other.m_data; + m_size = other.m_size; + m_capacity = other.m_capacity; + other.m_data = nullptr; + other.m_size = 0; + other.m_capacity = 0; + } + return *this; + } + + void reserve(size_t capacity) { + if (capacity > m_capacity) { + allocate_buffer(capacity); + } } // data better hold data of the right size! void push_back(int32_t x, int32_t y, const std::byte *data) { if (m_size == m_capacity) { - m_capacity *= 2; - std::byte *new_data = - new std::byte[element_offset()*m_capacity]{}; - std::copy(m_data, - m_data + element_offset()*m_size, - new_data); - delete[] m_data; - m_data = new_data; + allocate_buffer(m_capacity * 2); } std::byte *ptr = element_ptr(m_size); *reinterpret_cast(ptr) = x; @@ -40,16 +81,17 @@ template class ClusterVector { m_size++; } - std::vector sum(){ + std::vector sum() { std::vector sums(m_size); + const size_t stride = element_offset(); + const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; + std::byte *ptr = m_data + 2 * sizeof(int32_t); // skip x and y + for (size_t i = 0; i < m_size; i++) { - T sum = 0; - std::byte *ptr = element_ptr(i) + 2 * sizeof(int32_t); - for (size_t j = 0; j < m_cluster_size_x * m_cluster_size_y; j++) { - sum += *reinterpret_cast(ptr); - ptr += sizeof(T); - } - sums[i] = sum; + sums[i] = + std::accumulate(reinterpret_cast(ptr), + reinterpret_cast(ptr) + n_pixels, T{}); + ptr += stride; } return sums; } @@ -59,18 +101,25 @@ template class ClusterVector { return sizeof(m_cluster_size_x) + sizeof(m_cluster_size_y) + m_cluster_size_x * m_cluster_size_y * sizeof(T); } - size_t element_offset(size_t i) const { - return element_offset() * i; - } + size_t element_offset(size_t i) const { return element_offset() * i; } - std::byte* element_ptr(size_t i) { - return m_data + element_offset(i); - } + std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } int16_t cluster_size_x() const { return m_cluster_size_x; } int16_t cluster_size_y() const { return m_cluster_size_y; } std::byte *data() { return m_data; } - ~ClusterVector() { delete[] m_data; } + private: + void allocate_buffer(size_t new_capacity) { + size_t num_bytes = element_offset() * new_capacity; + fmt::print( + "ClusterVector allocating {} elements for a total of {} bytes\n", + new_capacity, num_bytes); + std::byte *new_data = new std::byte[num_bytes]{}; + std::copy(m_data, m_data + element_offset() * m_size, new_data); + delete[] m_data; + m_data = new_data; + m_capacity = new_capacity; + } }; \ No newline at end of file diff --git a/python/examples/play.py b/python/examples/play.py index c5abcbf..986b718 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -22,7 +22,9 @@ im = ax.imshow(cf.pedestal()) cf.pedestal() cf.noise() -N = 200 + + +N = 500 t0 = time.perf_counter() hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) f.seek(0) @@ -31,23 +33,26 @@ t0 = time.perf_counter() data = f.read_n(N) t_elapsed = time.perf_counter()-t0 -print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS') -clusters = [] +n_bytes = data.itemsize*data.size + +print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s') + + for frame in data: - clusters += [cf.find_clusters_without_threshold(frame)] + a = cf.find_clusters(frame) +clusters = cf.steal_clusters() -t_elapsed = time.perf_counter()-t0 -print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') - - -t0 = time.perf_counter() -total_clusters = 0 -for cl in clusters: - arr = np.array(cl, copy = False) - hist1.fill(arr['data'].sum(axis = 1).sum(axis = 1)) - total_clusters += cl.size # t_elapsed = time.perf_counter()-t0 -print(f'Filling histogram with {total_clusters} clusters took: {t_elapsed:.3f}s') -print(f'Cluster per frame {total_clusters/N:.3f}') \ No newline at end of file +# print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') + + +# t0 = time.perf_counter() +# total_clusters = clusters.size + +# hist1.fill(clusters.sum()) + +# t_elapsed = time.perf_counter()-t0 +# print(f'Filling histogram with the sum of {total_clusters} clusters took: {t_elapsed:.3f}s, {total_clusters/t_elapsed:.3g} clust/s') +# print(f'Average number of clusters per frame {total_clusters/N:.3f}') \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index b9296fd..480aea1 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -26,12 +26,15 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { "i:x:\ni:y:\n({},{}){}:data:", v.cluster_size_x(), v.cluster_size_y(), typestr); }) - .def("sum", &ClusterVector::sum) + .def("sum", [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); + }) .def_buffer([typestr](ClusterVector &v) -> py::buffer_info { return py::buffer_info( v.data(), /* Pointer to buffer */ v.element_offset(), /* Size of one scalar */ - fmt::format("i:x:\ni:y:\n({},{}){}:data:", v.cluster_size_x(), + fmt::format("i:x:\ni:y:\n{}{}:data:", v.cluster_size_x()* v.cluster_size_y(), typestr), /* Format descriptor */ 1, /* Number of dimensions */ @@ -62,13 +65,17 @@ void define_cluster_finder_bindings(py::module &m) { *arr = self.noise(); return return_image_data(arr); }) - .def("find_clusters_without_threshold", + .def("steal_clusters", + [](ClusterFinder &self) { + auto v = new ClusterVector(self.steal_clusters()); + return v; + }) + .def("find_clusters", [](ClusterFinder &self, py::array_t frame) { auto view = make_view_2d(frame); - auto *vec = new ClusterVector( - self.find_clusters_without_threshold(view)); - return vec; + self.find_clusters(view); + return; }); m.def("hello", []() { From 60534add92fefbc151cb0d4af571008d3db89426 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 11 Dec 2024 09:54:33 +0100 Subject: [PATCH 003/120] WIP --- include/aare/ClusterFile.hpp | 25 +++-- include/aare/ClusterVector.hpp | 46 ++++---- include/aare/File.hpp | 1 + python/src/cluster.hpp | 20 ++-- python/src/cluster_file.hpp | 33 ++++-- python/src/file.hpp | 3 +- src/ClusterFile.cpp | 192 +++++++++++++++++++-------------- src/File.cpp | 2 + 8 files changed, 198 insertions(+), 124 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index f866dd6..edcb91e 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,6 +1,7 @@ #pragma once #include "aare/defs.hpp" +#include "aare/ClusterVector.hpp" #include #include @@ -38,30 +39,40 @@ struct ClusterAnalysis { double etay; }; - - +/* +Binary cluster file. Expects data to be layed out as: +int32_t frame_number +uint32_t number_of_clusters +int16_t x, int16_t y, int32_t data[9] x number_of_clusters +int32_t frame_number +uint32_t number_of_clusters +.... +*/ class ClusterFile { FILE *fp{}; uint32_t m_num_left{}; size_t m_chunk_size{}; + const std::string m_mode; public: - ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000); + ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, + const std::string &mode = "r"); ~ClusterFile(); std::vector read_clusters(size_t n_clusters); std::vector read_frame(int32_t &out_fnum); + void write_frame(int32_t frame_number, const ClusterVector& clusters); std::vector read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); + double *eta2x, double *eta2y, double *eta3x, + double *eta3y); int analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y); + double *eta2x, double *eta2y, double *eta3x, + double *eta3y); size_t chunk_size() const { return m_chunk_size; } void close(); - }; } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 5445cbf..76e7e21 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -6,14 +6,25 @@ #include template class ClusterVector { - int32_t m_cluster_size_x; - int32_t m_cluster_size_y; + using value_type = T; + using coord_t = int16_t; + coord_t m_cluster_size_x; + coord_t m_cluster_size_y; std::byte *m_data{}; size_t m_size{0}; size_t m_capacity; + /* + Format string used in the python bindings to create a numpy + array from the buffer + = - native byte order + h - short + d - double + i - int + */ + constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:" ; public: - ClusterVector(int32_t cluster_size_x, int32_t cluster_size_y, + ClusterVector(coord_t cluster_size_x, coord_t cluster_size_y, size_t capacity = 1024) : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), m_capacity(capacity) { @@ -25,15 +36,8 @@ template class ClusterVector { delete[] m_data; } - // ClusterVector(const ClusterVector & other){ - // m_cluster_size_x = other.m_cluster_size_x; - // m_cluster_size_y = other.m_cluster_size_y; - // m_data = new std::byte[other.m_capacity]; - // std::copy(other.m_data, other.m_data + other.m_capacity, m_data); - // m_size = other.m_size; - // m_capacity = other.m_capacity; - // } - + + //Move constructor ClusterVector(ClusterVector &&other) noexcept : m_cluster_size_x(other.m_cluster_size_x), m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data), @@ -66,15 +70,15 @@ template class ClusterVector { } // data better hold data of the right size! - void push_back(int32_t x, int32_t y, const std::byte *data) { + void push_back(coord_t x, coord_t y, const std::byte *data) { if (m_size == m_capacity) { allocate_buffer(m_capacity * 2); } std::byte *ptr = element_ptr(m_size); - *reinterpret_cast(ptr) = x; - ptr += sizeof(int32_t); - *reinterpret_cast(ptr) = y; - ptr += sizeof(int32_t); + *reinterpret_cast(ptr) = x; + ptr += sizeof(coord_t); + *reinterpret_cast(ptr) = y; + ptr += sizeof(coord_t); std::copy(data, data + m_cluster_size_x * m_cluster_size_y * sizeof(T), ptr); @@ -85,7 +89,7 @@ template class ClusterVector { std::vector sums(m_size); const size_t stride = element_offset(); const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; - std::byte *ptr = m_data + 2 * sizeof(int32_t); // skip x and y + std::byte *ptr = m_data + 2 * sizeof(coord_t); // skip x and y for (size_t i = 0; i < m_size; i++) { sums[i] = @@ -109,6 +113,12 @@ template class ClusterVector { int16_t cluster_size_y() const { return m_cluster_size_y; } std::byte *data() { return m_data; } + const std::byte *data() const { return m_data; } + + const std::string_view fmt_base() const { + //TODO! how do we match on coord_t? + return m_fmt_base; + } private: void allocate_buffer(size_t new_capacity) { diff --git a/include/aare/File.hpp b/include/aare/File.hpp index b29171a..7aa30e1 100644 --- a/include/aare/File.hpp +++ b/include/aare/File.hpp @@ -44,6 +44,7 @@ class File { void read_into(std::byte *image_buf); void read_into(std::byte *image_buf, size_t n_frames); + size_t frame_number(); //!< get the frame number at the current position size_t frame_number(size_t frame_index); //!< get the frame number at the given frame index size_t bytes_per_frame() const; size_t pixels_per_frame() const; diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 480aea1..0fef093 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -21,25 +21,25 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { .def("element_offset", py::overload_cast<>(&ClusterVector::element_offset, py::const_)) .def_property_readonly("fmt", - [typestr](ClusterVector &v) { + [typestr](ClusterVector &self) { return fmt::format( - "i:x:\ni:y:\n({},{}){}:data:", v.cluster_size_x(), - v.cluster_size_y(), typestr); + self.fmt_base(), self.cluster_size_x(), + self.cluster_size_y(), typestr); }) .def("sum", [](ClusterVector &self) { auto *vec = new std::vector(self.sum()); return return_vector(vec); }) - .def_buffer([typestr](ClusterVector &v) -> py::buffer_info { + .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { return py::buffer_info( - v.data(), /* Pointer to buffer */ - v.element_offset(), /* Size of one scalar */ - fmt::format("i:x:\ni:y:\n{}{}:data:", v.cluster_size_x()* - v.cluster_size_y(), + self.data(), /* Pointer to buffer */ + self.element_offset(), /* Size of one scalar */ + fmt::format(self.fmt_base(), self.cluster_size_x(), + self.cluster_size_y(), typestr), /* Format descriptor */ 1, /* Number of dimensions */ - {v.size()}, /* Buffer dimensions */ - {v.element_offset()} /* Strides (in bytes) for each index */ + {self.size()}, /* Buffer dimensions */ + {self.element_offset()} /* Strides (in bytes) for each index */ ); }); } diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 6f37c3d..543073f 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -1,7 +1,6 @@ #include "aare/ClusterFile.hpp" #include "aare/defs.hpp" - #include #include #include @@ -18,33 +17,47 @@ void define_cluster_file_io_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(Cluster, x, y, data); py::class_(m, "ClusterFile") - .def(py::init(), py::arg(), py::arg("chunk_size") = 1000) + .def(py::init(), + py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r") .def("read_clusters", [](ClusterFile &self, size_t n_clusters) { - auto* vec = new std::vector(self.read_clusters(n_clusters)); + auto *vec = + new std::vector(self.read_clusters(n_clusters)); return return_vector(vec); }) .def("read_frame", [](ClusterFile &self) { int32_t frame_number; - auto* vec = new std::vector(self.read_frame(frame_number)); + auto *vec = + new std::vector(self.read_frame(frame_number)); return py::make_tuple(frame_number, return_vector(vec)); }) + .def("write_frame", &ClusterFile::write_frame) .def("read_cluster_with_cut", - [](ClusterFile &self, size_t n_clusters, py::array_t noise_map, int nx, int ny) { + [](ClusterFile &self, size_t n_clusters, + py::array_t noise_map, int nx, int ny) { auto view = make_view_2d(noise_map); - auto* vec = new std::vector(self.read_cluster_with_cut(n_clusters, view.data(), nx, ny)); + auto *vec = + new std::vector(self.read_cluster_with_cut( + n_clusters, view.data(), nx, ny)); return return_vector(vec); }) .def("__enter__", [](ClusterFile &self) { return &self; }) - .def("__exit__", [](ClusterFile &self) { self.close();}) + .def("__exit__", + [](ClusterFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + self.close(); + }) .def("__iter__", [](ClusterFile &self) { return &self; }) .def("__next__", [](ClusterFile &self) { - auto vec = new std::vector(self.read_clusters(self.chunk_size())); - if(vec->size() == 0) { + auto vec = + new std::vector(self.read_clusters(self.chunk_size())); + if (vec->size() == 0) { throw py::stop_iteration(); } return return_vector(vec); }); - } \ No newline at end of file diff --git a/python/src/file.hpp b/python/src/file.hpp index 3c44c43..30fa82f 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -51,7 +51,8 @@ void define_file_io_bindings(py::module &m) { .def(py::init()) - .def("frame_number", &File::frame_number) + .def("frame_number", py::overload_cast<>(&File::frame_number)) + .def("frame_number", py::overload_cast(&File::frame_number)) .def_property_readonly("bytes_per_frame", &File::bytes_per_frame) .def_property_readonly("pixels_per_frame", &File::pixels_per_frame) .def("seek", &File::seek) diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 3daa9d6..182726b 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -2,25 +2,54 @@ namespace aare { -ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size): m_chunk_size(chunk_size) { - fp = fopen(fname.c_str(), "rb"); - if (!fp) { - throw std::runtime_error("Could not open file: " + fname.string()); +ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, + const std::string &mode) + : m_chunk_size(chunk_size), m_mode(mode) { + + if (mode == "r") { + fp = fopen(fname.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + fname.string()); + } + } else if (mode == "w") { + fp = fopen(fname.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + fname.string()); + } + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } + +} + +ClusterFile::~ClusterFile() { close(); } + +void ClusterFile::close() { + if (fp) { + fclose(fp); + fp = nullptr; } } -ClusterFile::~ClusterFile() { - close(); -} - -void ClusterFile::close(){ - if (fp){ - fclose(fp); - fp = nullptr; - } +void ClusterFile::write_frame(int32_t frame_number, const ClusterVector& clusters){ + if (m_mode != "w") { + throw std::runtime_error("File not opened for writing"); + } + if(!(clusters.cluster_size_x()==3) && !(clusters.cluster_size_y()==3)){ + throw std::runtime_error("Only 3x3 clusters are supported"); + } + fwrite(&frame_number, sizeof(frame_number), 1, fp); + uint32_t n_clusters = clusters.size(); + fwrite(&n_clusters, sizeof(n_clusters), 1, fp); + fwrite(clusters.data(), clusters.element_offset(), clusters.size(), fp); + // write clusters + // fwrite(clusters.data(), sizeof(Cluster), clusters.size(), fp); } std::vector ClusterFile::read_clusters(size_t n_clusters) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } std::vector clusters(n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! @@ -38,7 +67,8 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { } else { nn = nph; } - nph_read += fread(reinterpret_cast(buf + nph_read), sizeof(Cluster), nn, fp); + nph_read += fread(reinterpret_cast(buf + nph_read), + sizeof(Cluster), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -52,8 +82,8 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { else nn = nph; - nph_read += - fread(reinterpret_cast(buf + nph_read), sizeof(Cluster), nn, fp); + nph_read += fread(reinterpret_cast(buf + nph_read), + sizeof(Cluster), nn, fp); m_num_left = nph - nn; } if (nph_read >= n_clusters) @@ -68,8 +98,12 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { } std::vector ClusterFile::read_frame(int32_t &out_fnum) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } if (m_num_left) { - throw std::runtime_error("There are still photons left in the last frame"); + throw std::runtime_error( + "There are still photons left in the last frame"); } if (fread(&out_fnum, sizeof(out_fnum), 1, fp) != 1) { @@ -82,17 +116,19 @@ std::vector ClusterFile::read_frame(int32_t &out_fnum) { } std::vector clusters(n_clusters); - if (fread(clusters.data(), sizeof(Cluster), n_clusters, fp) != static_cast(n_clusters)) { + if (fread(clusters.data(), sizeof(Cluster), n_clusters, fp) != + static_cast(n_clusters)) { throw std::runtime_error("Could not read clusters"); } return clusters; - } std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny) { - + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } std::vector clusters(n_clusters); // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, // uint32_t *n_left, double *noise_map, int @@ -124,7 +160,8 @@ std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, } for (size_t iph = 0; iph < nn; iph++) { // read photons 1 by 1 - size_t n_read = fread(reinterpret_cast(ptr), sizeof(Cluster), 1, fp); + size_t n_read = + fread(reinterpret_cast(ptr), sizeof(Cluster), 1, fp); if (n_read != 1) { clusters.resize(nph_read); return clusters; @@ -158,71 +195,71 @@ std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, break; } } - if (nph_read < n_clusters) { - // // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // // printf("%d\n",nph_read); + if (nph_read < n_clusters) { + // // keep on reading frames and photons until reaching + // n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + // // printf("%d\n",nph_read); - if (fread(&nph, sizeof(nph), 1, fp)) { - // // printf("** %d\n",nph); - m_num_left = nph; - for (size_t iph = 0; iph < nph; iph++) { - // // read photons 1 by 1 - size_t n_read = - fread(reinterpret_cast(ptr), sizeof(Cluster), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - // return nph_read; - } - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && - ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, - NULL, - NULL, NULL, NULL); - // noise = noise_map[ptr->y * nx + ptr->x]; - noise = noise_map[ptr->y + ny * ptr->x]; - if (tot1 > noise || t2max > 2 * noise || - tot3 > 3 * noise) { - ; - } else - good = 0; - } else { - printf("Bad pixel number %d %d\n", ptr->x, - ptr->y); good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; + if (fread(&nph, sizeof(nph), 1, fp)) { + // // printf("** %d\n",nph); + m_num_left = nph; + for (size_t iph = 0; iph < nph; iph++) { + // // read photons 1 by 1 + size_t n_read = fread(reinterpret_cast(ptr), + sizeof(Cluster), 1, fp); + if (n_read != 1) { + clusters.resize(nph_read); + return clusters; + // return nph_read; } + good = 1; + if (noise_map) { + if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && + ptr->y < ny) { + tot1 = ptr->data[4]; + analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, + NULL, NULL, NULL); + // noise = noise_map[ptr->y * nx + ptr->x]; + noise = noise_map[ptr->y + ny * ptr->x]; + if (tot1 > noise || t2max > 2 * noise || + tot3 > 3 * noise) { + ; + } else + good = 0; + } else { + printf("Bad pixel number %d %d\n", ptr->x, ptr->y); + good = 0; + } + } + if (good) { + ptr++; + nph_read++; + } + (m_num_left)--; + if (nph_read >= n_clusters) + break; } - if (nph_read >= n_clusters) - break; } + if (nph_read >= n_clusters) + break; } - // printf("%d\n",nph_read); - clusters.resize(nph_read); - return clusters; - + } + // printf("%d\n",nph_read); + clusters.resize(nph_read); + return clusters; } -int ClusterFile::analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { +int ClusterFile::analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, + char *quad, double *eta2x, double *eta2y, + double *eta3x, double *eta3y) { return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); } -int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y) { +int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, + char *quad, double *eta2x, double *eta2y, + double *eta3x, double *eta3y) { int ok = 1; @@ -263,7 +300,8 @@ int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *qua c = i; } } - //printf("*** %d %d %d %d -- %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); + // printf("*** %d %d %d %d -- + // %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); if (quad) *quad = c; if (t2) @@ -318,6 +356,4 @@ int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *qua return ok; } - - } // namespace aare \ No newline at end of file diff --git a/src/File.cpp b/src/File.cpp index d45e903..37e4c57 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -58,6 +58,8 @@ void File::read_into(std::byte *image_buf) { file_impl->read_into(image_buf); } void File::read_into(std::byte *image_buf, size_t n_frames) { file_impl->read_into(image_buf, n_frames); } + +size_t File::frame_number() { return file_impl->frame_number(tell()); } size_t File::frame_number(size_t frame_index) { return file_impl->frame_number(frame_index); } From b3a9e9576b5733e92498dc7f7b036d4e646c6e3b Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 11 Dec 2024 16:27:36 +0100 Subject: [PATCH 004/120] WIP --- CMakeLists.txt | 1 + docs/conf.py.in | 1 - docs/src/ClusterVector.rst | 6 + docs/src/index.rst | 1 + include/aare/ClusterFinder.hpp | 305 +++++++++++++++++---------------- include/aare/ClusterVector.hpp | 52 +++++- python/src/cluster.hpp | 41 +---- src/ClusterVector.test.cpp | 77 +++++++++ 8 files changed, 301 insertions(+), 183 deletions(-) create mode 100644 docs/src/ClusterVector.rst create mode 100644 src/ClusterVector.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index f67d655..a38e8fd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -344,6 +344,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp diff --git a/docs/conf.py.in b/docs/conf.py.in index 3702330..ad73575 100644 --- a/docs/conf.py.in +++ b/docs/conf.py.in @@ -29,7 +29,6 @@ version = '@PROJECT_VERSION@' # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['breathe', - 'sphinx_rtd_theme', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', ] diff --git a/docs/src/ClusterVector.rst b/docs/src/ClusterVector.rst new file mode 100644 index 0000000..bb2a0d8 --- /dev/null +++ b/docs/src/ClusterVector.rst @@ -0,0 +1,6 @@ +ClusterVector +============= + +.. doxygenclass:: aare::ClusterVector + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/src/index.rst b/docs/src/index.rst index 228d7c4..4316a2c 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -46,6 +46,7 @@ AARE Dtype ClusterFinder ClusterFile + ClusterVector Pedestal RawFile RawSubFile diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 5bd8866..7111cf9 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -23,62 +23,83 @@ enum class eventType { UNDEFINED_EVENT = -1 /** undefined */ }; -template +template class ClusterFinder { Shape<2> m_image_size; const int m_cluster_sizeX; const int m_cluster_sizeY; - const double m_threshold; - const double m_nSigma; - const double c2; - const double c3; + // const PEDESTAL_TYPE m_threshold; + const PEDESTAL_TYPE m_nSigma; + const PEDESTAL_TYPE c2; + const PEDESTAL_TYPE c3; Pedestal m_pedestal; ClusterVector m_clusters; - public: + /** + * @brief Construct a new ClusterFinder object + * @param image_size size of the image + * @param cluster_size size of the cluster (x, y) + * @param nSigma number of sigma above the pedestal to consider a photon + * @param capacity initial capacity of the cluster vector + * + */ ClusterFinder(Shape<2> image_size, Shape<2> cluster_size, - double nSigma = 5.0, double threshold = 0.0) + PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 1000000) : m_image_size(image_size), m_cluster_sizeX(cluster_size[0]), - m_cluster_sizeY(cluster_size[1]), m_threshold(threshold), + m_cluster_sizeY(cluster_size[1]), m_nSigma(nSigma), c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)), c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)), m_pedestal(image_size[0], image_size[1]), - m_clusters(m_cluster_sizeX, m_cluster_sizeY, 1'000'000) { - // clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY, 2000); - }; + m_clusters(m_cluster_sizeX, m_cluster_sizeY, capacity) {}; void push_pedestal_frame(NDView frame) { m_pedestal.push(frame); } NDArray pedestal() { return m_pedestal.mean(); } - NDArray noise() { return m_pedestal.std(); } - ClusterVector steal_clusters() { + /** + * @brief Move the clusters from the ClusterVector in the ClusterFinder to a + * new ClusterVector and return it. + * @param realloc_same_capacity if true the new ClusterVector will have the + * same capacity as the old one + * + */ + ClusterVector steal_clusters(bool realloc_same_capacity = false) { ClusterVector tmp = std::move(m_clusters); - m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY, 2000); + if (realloc_same_capacity) + m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY, + tmp.capacity()); + else + m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY); return tmp; } - void - find_clusters(NDView frame) { - // // size_t capacity = 2000; - // // ClusterVector clusters(m_cluster_sizeX, m_cluster_sizeY, capacity); - eventType event_type = eventType::PEDESTAL; - + void find_clusters(NDView frame) { // // TODO! deal with even size clusters // // currently 3,3 -> +/- 1 // // 4,4 -> +/- 2 short dy = m_cluster_sizeY / 2; short dx = m_cluster_sizeX / 2; + std::vector cluster_data(m_cluster_sizeX * m_cluster_sizeY); for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { + PEDESTAL_TYPE max = std::numeric_limits::min(); PEDESTAL_TYPE total = 0; + // What can we short circuit here? + PEDESTAL_TYPE rms = m_pedestal.std(iy, ix); + PEDESTAL_TYPE value = (frame(iy, ix) - m_pedestal.mean(iy, ix)); + + if (value < -m_nSigma * rms) + continue; // NEGATIVE_PEDESTAL go to next pixel + // TODO! No pedestal update??? + for (short ir = -dy; ir < dy + 1; ir++) { for (short ic = -dx; ic < dx + 1; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && @@ -92,159 +113,157 @@ class ClusterFinder { } } } - PEDESTAL_TYPE rms = m_pedestal.std(iy, ix); - PEDESTAL_TYPE value = (frame(iy, ix) - m_pedestal.mean(iy, ix)); - if (value < -m_nSigma * rms) { - continue; // NEGATIVE_PEDESTAL go to next pixel - // TODO! No pedestal update??? - } else if (max > m_nSigma * rms) { - event_type = eventType::PHOTON; + if ((max > m_nSigma * rms)) { if (value < max) continue; // Not max go to the next pixel + // but also no pedestal update } else if (total > c3 * m_nSigma * rms) { - event_type = eventType::PHOTON; + // pass } else { m_pedestal.push(iy, ix, frame(iy, ix)); continue; // It was a pedestal value nothing to store } // Store cluster - if (event_type == eventType::PHOTON && value >= max) { - event_type = eventType::PHOTON_MAX; + if (value == max) { + // Zero out the cluster data + std::fill(cluster_data.begin(), cluster_data.end(), 0); - short i = 0; - std::vector cluster_data(m_cluster_sizeX * - m_cluster_sizeY); - - for (short ir = -dy; ir < dy + 1; ir++) { - for (short ic = -dx; ic < dx + 1; ic++) { + // Fill the cluster data since we have a photon to store + // It's worth redoing the look since most of the time we + // don't have a photon + int i = 0; + for (int ir = -dy; ir < dy + 1; ir++) { + for (int ic = -dx; ic < dx + 1; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { CT tmp = - static_cast( - frame(iy + ir, ix + ic)) - + static_cast(frame(iy + ir, ix + ic)) - m_pedestal.mean(iy + ir, ix + ic); - cluster_data[i] = tmp; //Watch for out of bounds access + cluster_data[i] = + tmp; // Watch for out of bounds access i++; } } } + + // Add the cluster to the output ClusterVector m_clusters.push_back( ix, iy, reinterpret_cast(cluster_data.data())); } } } - // return clusters; } - // template - std::vector - find_clusters_with_threshold(NDView frame, - Pedestal &pedestal) { - assert(m_threshold > 0); - std::vector clusters; - std::vector> eventMask; - for (int i = 0; i < frame.shape(0); i++) { - eventMask.push_back(std::vector(frame.shape(1))); - } - double tthr, tthr1, tthr2; + // // template + // std::vector + // find_clusters_with_threshold(NDView frame, + // Pedestal &pedestal) { + // assert(m_threshold > 0); + // std::vector clusters; + // std::vector> eventMask; + // for (int i = 0; i < frame.shape(0); i++) { + // eventMask.push_back(std::vector(frame.shape(1))); + // } + // double tthr, tthr1, tthr2; - NDArray rest({frame.shape(0), frame.shape(1)}); - NDArray nph({frame.shape(0), frame.shape(1)}); - // convert to n photons - // nph = (frame-pedestal.mean()+0.5*m_threshold)/m_threshold; // can be - // optimized with expression templates? - for (int iy = 0; iy < frame.shape(0); iy++) { - for (int ix = 0; ix < frame.shape(1); ix++) { - auto val = frame(iy, ix) - pedestal.mean(iy, ix); - nph(iy, ix) = (val + 0.5 * m_threshold) / m_threshold; - nph(iy, ix) = nph(iy, ix) < 0 ? 0 : nph(iy, ix); - rest(iy, ix) = val - nph(iy, ix) * m_threshold; - } - } - // iterate over frame pixels - for (int iy = 0; iy < frame.shape(0); iy++) { - for (int ix = 0; ix < frame.shape(1); ix++) { - eventMask[iy][ix] = eventType::PEDESTAL; - // initialize max and total - FRAME_TYPE max = std::numeric_limits::min(); - long double total = 0; - if (rest(iy, ix) <= 0.25 * m_threshold) { - pedestal.push(iy, ix, frame(iy, ix)); - continue; - } - eventMask[iy][ix] = eventType::NEIGHBOUR; - // iterate over cluster pixels around the current pixel (ix,iy) - for (short ir = -(m_cluster_sizeY / 2); - ir < (m_cluster_sizeY / 2) + 1; ir++) { - for (short ic = -(m_cluster_sizeX / 2); - ic < (m_cluster_sizeX / 2) + 1; ic++) { - if (ix + ic >= 0 && ix + ic < frame.shape(1) && - iy + ir >= 0 && iy + ir < frame.shape(0)) { - auto val = frame(iy + ir, ix + ic) - - pedestal.mean(iy + ir, ix + ic); - total += val; - if (val > max) { - max = val; - } - } - } - } + // NDArray rest({frame.shape(0), frame.shape(1)}); + // NDArray nph({frame.shape(0), frame.shape(1)}); + // // convert to n photons + // // nph = (frame-pedestal.mean()+0.5*m_threshold)/m_threshold; // can + // be + // // optimized with expression templates? + // for (int iy = 0; iy < frame.shape(0); iy++) { + // for (int ix = 0; ix < frame.shape(1); ix++) { + // auto val = frame(iy, ix) - pedestal.mean(iy, ix); + // nph(iy, ix) = (val + 0.5 * m_threshold) / m_threshold; + // nph(iy, ix) = nph(iy, ix) < 0 ? 0 : nph(iy, ix); + // rest(iy, ix) = val - nph(iy, ix) * m_threshold; + // } + // } + // // iterate over frame pixels + // for (int iy = 0; iy < frame.shape(0); iy++) { + // for (int ix = 0; ix < frame.shape(1); ix++) { + // eventMask[iy][ix] = eventType::PEDESTAL; + // // initialize max and total + // FRAME_TYPE max = std::numeric_limits::min(); + // long double total = 0; + // if (rest(iy, ix) <= 0.25 * m_threshold) { + // pedestal.push(iy, ix, frame(iy, ix)); + // continue; + // } + // eventMask[iy][ix] = eventType::NEIGHBOUR; + // // iterate over cluster pixels around the current pixel + // (ix,iy) for (short ir = -(m_cluster_sizeY / 2); + // ir < (m_cluster_sizeY / 2) + 1; ir++) { + // for (short ic = -(m_cluster_sizeX / 2); + // ic < (m_cluster_sizeX / 2) + 1; ic++) { + // if (ix + ic >= 0 && ix + ic < frame.shape(1) && + // iy + ir >= 0 && iy + ir < frame.shape(0)) { + // auto val = frame(iy + ir, ix + ic) - + // pedestal.mean(iy + ir, ix + ic); + // total += val; + // if (val > max) { + // max = val; + // } + // } + // } + // } - auto rms = pedestal.std(iy, ix); - if (m_nSigma == 0) { - tthr = m_threshold; - tthr1 = m_threshold; - tthr2 = m_threshold; - } else { - tthr = m_nSigma * rms; - tthr1 = m_nSigma * rms * c3; - tthr2 = m_nSigma * rms * c2; + // auto rms = pedestal.std(iy, ix); + // if (m_nSigma == 0) { + // tthr = m_threshold; + // tthr1 = m_threshold; + // tthr2 = m_threshold; + // } else { + // tthr = m_nSigma * rms; + // tthr1 = m_nSigma * rms * c3; + // tthr2 = m_nSigma * rms * c2; - if (m_threshold > 2 * tthr) - tthr = m_threshold - tthr; - if (m_threshold > 2 * tthr1) - tthr1 = tthr - tthr1; - if (m_threshold > 2 * tthr2) - tthr2 = tthr - tthr2; - } - if (total > tthr1 || max > tthr) { - eventMask[iy][ix] = eventType::PHOTON; - nph(iy, ix) += 1; - rest(iy, ix) -= m_threshold; - } else { - pedestal.push(iy, ix, frame(iy, ix)); - continue; - } - if (eventMask[iy][ix] == eventType::PHOTON && - frame(iy, ix) - pedestal.mean(iy, ix) >= max) { - eventMask[iy][ix] = eventType::PHOTON_MAX; - DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, - Dtype(typeid(FRAME_TYPE))); - cluster.x = ix; - cluster.y = iy; - short i = 0; - for (short ir = -(m_cluster_sizeY / 2); - ir < (m_cluster_sizeY / 2) + 1; ir++) { - for (short ic = -(m_cluster_sizeX / 2); - ic < (m_cluster_sizeX / 2) + 1; ic++) { - if (ix + ic >= 0 && ix + ic < frame.shape(1) && - iy + ir >= 0 && iy + ir < frame.shape(0)) { - auto tmp = frame(iy + ir, ix + ic) - - pedestal.mean(iy + ir, ix + ic); - cluster.set(i, tmp); - i++; - } - } - } - clusters.push_back(cluster); - } - } - } - return clusters; - } + // if (m_threshold > 2 * tthr) + // tthr = m_threshold - tthr; + // if (m_threshold > 2 * tthr1) + // tthr1 = tthr - tthr1; + // if (m_threshold > 2 * tthr2) + // tthr2 = tthr - tthr2; + // } + // if (total > tthr1 || max > tthr) { + // eventMask[iy][ix] = eventType::PHOTON; + // nph(iy, ix) += 1; + // rest(iy, ix) -= m_threshold; + // } else { + // pedestal.push(iy, ix, frame(iy, ix)); + // continue; + // } + // if (eventMask[iy][ix] == eventType::PHOTON && + // frame(iy, ix) - pedestal.mean(iy, ix) >= max) { + // eventMask[iy][ix] = eventType::PHOTON_MAX; + // DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, + // Dtype(typeid(FRAME_TYPE))); + // cluster.x = ix; + // cluster.y = iy; + // short i = 0; + // for (short ir = -(m_cluster_sizeY / 2); + // ir < (m_cluster_sizeY / 2) + 1; ir++) { + // for (short ic = -(m_cluster_sizeX / 2); + // ic < (m_cluster_sizeX / 2) + 1; ic++) { + // if (ix + ic >= 0 && ix + ic < frame.shape(1) && + // iy + ir >= 0 && iy + ir < frame.shape(0)) { + // auto tmp = frame(iy + ir, ix + ic) - + // pedestal.mean(iy + ir, ix + ic); + // cluster.set(i, tmp); + // i++; + // } + // } + // } + // clusters.push_back(cluster); + // } + // } + // } + // return clusters; + // } }; } // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 76e7e21..53aeed7 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -2,9 +2,18 @@ #include #include #include +#include #include +namespace aare { + +/** + * @brief ClusterVector is a container for clusters of various sizes. It uses a + * contiguous memory buffer to store the clusters. + * @note push_back can invalidate pointers to elements in the container + * @tparam T data type of the pixels in the cluster + */ template class ClusterVector { using value_type = T; using coord_t = int16_t; @@ -24,6 +33,12 @@ template class ClusterVector { constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:" ; public: + /** + * @brief Construct a new ClusterVector object + * @param cluster_size_x size of the cluster in x direction + * @param cluster_size_y size of the cluster in y direction + * @param capacity initial capacity of the buffer in number of clusters + */ ClusterVector(coord_t cluster_size_x, coord_t cluster_size_y, size_t capacity = 1024) : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), @@ -31,8 +46,6 @@ template class ClusterVector { allocate_buffer(capacity); } ~ClusterVector() { - fmt::print("~ClusterVector - size: {}, capacity: {}\n", m_size, - m_capacity); delete[] m_data; } @@ -63,13 +76,24 @@ template class ClusterVector { return *this; } + /** + * @brief Reserve space for at least capacity clusters + * @param capacity number of clusters to reserve space for + * @note If capacity is less than the current capacity, the function does nothing. + */ void reserve(size_t capacity) { if (capacity > m_capacity) { allocate_buffer(capacity); } } - // data better hold data of the right size! + /** + * @brief Add a cluster to the vector + * @param x x-coordinate of the cluster + * @param y y-coordinate of the cluster + * @param data pointer to the data of the cluster + * @warning The data pointer must point to a buffer of size cluster_size_x * cluster_size_y * sizeof(T) + */ void push_back(coord_t x, coord_t y, const std::byte *data) { if (m_size == m_capacity) { allocate_buffer(m_capacity * 2); @@ -85,6 +109,10 @@ template class ClusterVector { m_size++; } + /** + * @brief Sum the pixels in each cluster + * @return std::vector vector of sums for each cluster + */ std::vector sum() { std::vector sums(m_size); const size_t stride = element_offset(); @@ -101,12 +129,23 @@ template class ClusterVector { } size_t size() const { return m_size; } + size_t capacity() const { return m_capacity; } + + /** + * @brief Return the offset in bytes for a single cluster + */ size_t element_offset() const { return sizeof(m_cluster_size_x) + sizeof(m_cluster_size_y) + m_cluster_size_x * m_cluster_size_y * sizeof(T); } + /** + * @brief Return the offset in bytes for the i-th cluster + */ size_t element_offset(size_t i) const { return element_offset() * i; } + /** + * @brief Return a pointer to the i-th cluster + */ std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } int16_t cluster_size_x() const { return m_cluster_size_x; } @@ -123,13 +162,12 @@ template class ClusterVector { private: void allocate_buffer(size_t new_capacity) { size_t num_bytes = element_offset() * new_capacity; - fmt::print( - "ClusterVector allocating {} elements for a total of {} bytes\n", - new_capacity, num_bytes); std::byte *new_data = new std::byte[num_bytes]{}; std::copy(m_data, m_data + element_offset() * m_size, new_data); delete[] m_data; m_data = new_data; m_capacity = new_capacity; } -}; \ No newline at end of file +}; + +} // namespace aare \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 0fef093..0467c98 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -10,7 +10,7 @@ #include namespace py = pybind11; -using pd_type = double; +using pd_type = float; template void define_cluster_vector(py::module &m, const std::string &typestr) { @@ -46,7 +46,9 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { void define_cluster_finder_bindings(py::module &m) { py::class_>(m, "ClusterFinder") - .def(py::init, Shape<2>>()) + .def(py::init, Shape<2>, pd_type, size_t>(), py::arg("image_size"), + py::arg("cluster_size"), py::arg("n_sigma") = 5.0, + py::arg("capacity") = 1'000'000) .def("push_pedestal_frame", [](ClusterFinder &self, py::array_t frame) { @@ -66,10 +68,10 @@ void define_cluster_finder_bindings(py::module &m) { return return_image_data(arr); }) .def("steal_clusters", - [](ClusterFinder &self) { - auto v = new ClusterVector(self.steal_clusters()); + [](ClusterFinder &self, bool realloc_same_capacity) { + auto v = new ClusterVector(self.steal_clusters(realloc_same_capacity)); return v; - }) + }, py::arg("realloc_same_capacity") = false) .def("find_clusters", [](ClusterFinder &self, py::array_t frame) { @@ -78,36 +80,11 @@ void define_cluster_finder_bindings(py::module &m) { return; }); - m.def("hello", []() { - fmt::print("Hello from C++\n"); - auto v = new ClusterVector(3, 3); - int data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - v->push_back(5, 30, reinterpret_cast(data)); - v->push_back(5, 55, reinterpret_cast(data)); - v->push_back(5, 20, reinterpret_cast(data)); - v->push_back(5, 30, reinterpret_cast(data)); - - return v; - }); - + define_cluster_vector(m, "i"); define_cluster_vector(m, "d"); + define_cluster_vector(m, "f"); - // py::class_>(m, "ClusterVector", py::buffer_protocol()) - // .def(py::init()) - // .def("size", &ClusterVector::size) - // .def("element_offset", - // py::overload_cast<>(&ClusterVector::element_offset, py::const_)) - // .def_buffer([](ClusterVector &v) -> py::buffer_info { - // return py::buffer_info( - // v.data(), /* Pointer to buffer */ - // v.element_offset(), /* Size of one scalar */ - // fmt::format("h:x:\nh:y:\n({},{})i:data:", v.cluster_size_x(), - // v.cluster_size_y()), /* Format descriptor */ 1, /* Number of - // dimensions */ {v.size()}, /* Buffer dimensions */ - // {v.element_offset()} /* Strides (in bytes) for each index */ - // ); - // }); py::class_(m, "DynamicCluster", py::buffer_protocol()) .def(py::init()) diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp new file mode 100644 index 0000000..ef4e0ee --- /dev/null +++ b/src/ClusterVector.test.cpp @@ -0,0 +1,77 @@ +#include +#include "aare/ClusterVector.hpp" + +// #include +#include + +using aare::ClusterVector; + +TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { + struct Cluster_i2x2 { + int16_t x; + int16_t y; + int32_t data[4]; + }; + + ClusterVector cv(2, 2, 4); + REQUIRE(cv.capacity() == 4); + REQUIRE(cv.size() == 0); + REQUIRE(cv.cluster_size_x() == 2); + REQUIRE(cv.cluster_size_y() == 2); + // int16_t, int16_t, 2x2 int32_t = 20 bytes + REQUIRE(cv.element_offset() == 20); + + //Create a cluster and push back into the vector + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + REQUIRE(cv.size() == 1); + REQUIRE(cv.capacity() == 4); + + //Read the cluster back out using copy. TODO! Can we improve the API? + Cluster_i2x2 c2; + std::byte *ptr = cv.element_ptr(0); + std::copy(ptr, ptr + cv.element_offset(), reinterpret_cast(&c2)); + + //Check that the data is the same + REQUIRE(c1.x == c2.x); + REQUIRE(c1.y == c2.y); + for(size_t i = 0; i < 4; i++) { + REQUIRE(c1.data[i] == c2.data[i]); + } +} + +TEST_CASE("Summing 3x1 clusters of int64"){ + struct Cluster_l3x1{ + int16_t x; + int16_t y; + int64_t data[3]; + }; + + ClusterVector cv(3, 1, 2); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 0); + REQUIRE(cv.cluster_size_x() == 3); + REQUIRE(cv.cluster_size_y() == 1); + + //Create a cluster and push back into the vector + Cluster_l3x1 c1 = {1, 2, {3, 4, 5}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 1); + + Cluster_l3x1 c2 = {6, 7, {8, 9, 10}}; + cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 2); + + Cluster_l3x1 c3 = {11, 12, {13, 14, 15}}; + cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + REQUIRE(cv.capacity() == 4); + REQUIRE(cv.size() == 3); + + auto sums = cv.sum(); + REQUIRE(sums.size() == 3); + REQUIRE(sums[0] == 12); + REQUIRE(sums[1] == 27); + REQUIRE(sums[2] == 42); +} \ No newline at end of file From a0f481c0ee179b4dea5fe7621f1a64287dc463b5 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Thu, 12 Dec 2024 14:34:10 +0100 Subject: [PATCH 005/120] mod pedestal --- CMakeLists.txt | 27 +++++++++----- include/aare/ClusterFinder.hpp | 8 ++-- include/aare/ClusterVector.hpp | 29 ++++++++------- include/aare/NDArray.hpp | 10 ++--- include/aare/Pedestal.hpp | 67 ++++++++++++++++++---------------- python/aare/__init__.py | 3 +- python/src/cluster.hpp | 21 ++++++++++- python/src/cluster_file.hpp | 10 ++++- python/src/module.cpp | 4 +- src/ClusterVector.test.cpp | 37 +++++++++++++++++-- src/Frame.test.cpp | 9 +++-- tests/CMakeLists.txt | 6 +-- tests/test.cpp | 19 +++++++++- 13 files changed, 170 insertions(+), 80 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a38e8fd..24b7b30 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,7 +40,7 @@ option(AARE_DOCS "Build documentation" OFF) option(AARE_VERBOSE "Verbose output" OFF) option(AARE_CUSTOM_ASSERT "Use custom assert" OFF) option(AARE_INSTALL_PYTHONEXT "Install the python extension in the install tree under CMAKE_INSTALL_PREFIX/aare/" OFF) - +option(AARE_ASAN "Enable AddressSanitizer" OFF) # Configure which of the dependencies to use FetchContent for option(AARE_FETCH_FMT "Use FetchContent to download fmt" ON) @@ -225,13 +225,6 @@ if(CMAKE_BUILD_TYPE STREQUAL "Release") target_compile_options(aare_compiler_flags INTERFACE -O3) else() message(STATUS "Debug build") - target_compile_options( - aare_compiler_flags - INTERFACE - -Og - -ggdb3 - ) - endif() # Common flags for GCC and Clang @@ -256,7 +249,21 @@ target_compile_options( endif() #GCC/Clang specific - +if(AARE_ASAN) + message(STATUS "AddressSanitizer enabled") + target_compile_options( + aare_compiler_flags + INTERFACE + -fsanitize=address,undefined,pointer-compare + -fno-omit-frame-pointer + ) + target_link_libraries( + aare_compiler_flags + INTERFACE + -fsanitize=address,undefined,pointer-compare + -fno-omit-frame-pointer + ) +endif() @@ -316,6 +323,8 @@ target_include_directories(aare_core PUBLIC "$" ) + + target_link_libraries( aare_core PUBLIC diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 7111cf9..a98114d 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -82,8 +82,8 @@ class ClusterFinder { // // TODO! deal with even size clusters // // currently 3,3 -> +/- 1 // // 4,4 -> +/- 2 - short dy = m_cluster_sizeY / 2; - short dx = m_cluster_sizeX / 2; + int dy = m_cluster_sizeY / 2; + int dx = m_cluster_sizeX / 2; std::vector cluster_data(m_cluster_sizeX * m_cluster_sizeY); for (int iy = 0; iy < frame.shape(0); iy++) { @@ -100,8 +100,8 @@ class ClusterFinder { continue; // NEGATIVE_PEDESTAL go to next pixel // TODO! No pedestal update??? - for (short ir = -dy; ir < dy + 1; ir++) { - for (short ic = -dx; ic < dx + 1; ic++) { + for (int ir = -dy; ir < dy + 1; ir++) { + for (int ic = -dx; ic < dx + 1; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { PEDESTAL_TYPE val = diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 53aeed7..ce8d935 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -13,12 +13,12 @@ namespace aare { * contiguous memory buffer to store the clusters. * @note push_back can invalidate pointers to elements in the container * @tparam T data type of the pixels in the cluster + * @tparam CoordType data type of the x and y coordinates of the cluster (normally int16_t) */ -template class ClusterVector { +template class ClusterVector { using value_type = T; - using coord_t = int16_t; - coord_t m_cluster_size_x; - coord_t m_cluster_size_y; + size_t m_cluster_size_x; + size_t m_cluster_size_y; std::byte *m_data{}; size_t m_size{0}; size_t m_capacity; @@ -39,7 +39,7 @@ template class ClusterVector { * @param cluster_size_y size of the cluster in y direction * @param capacity initial capacity of the buffer in number of clusters */ - ClusterVector(coord_t cluster_size_x, coord_t cluster_size_y, + ClusterVector(size_t cluster_size_x, size_t cluster_size_y, size_t capacity = 1024) : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), m_capacity(capacity) { @@ -94,21 +94,22 @@ template class ClusterVector { * @param data pointer to the data of the cluster * @warning The data pointer must point to a buffer of size cluster_size_x * cluster_size_y * sizeof(T) */ - void push_back(coord_t x, coord_t y, const std::byte *data) { + void push_back(CoordType x, CoordType y, const std::byte *data) { if (m_size == m_capacity) { allocate_buffer(m_capacity * 2); } std::byte *ptr = element_ptr(m_size); - *reinterpret_cast(ptr) = x; - ptr += sizeof(coord_t); - *reinterpret_cast(ptr) = y; - ptr += sizeof(coord_t); + *reinterpret_cast(ptr) = x; + ptr += sizeof(CoordType); + *reinterpret_cast(ptr) = y; + ptr += sizeof(CoordType); std::copy(data, data + m_cluster_size_x * m_cluster_size_y * sizeof(T), ptr); m_size++; } + /** * @brief Sum the pixels in each cluster * @return std::vector vector of sums for each cluster @@ -117,7 +118,7 @@ template class ClusterVector { std::vector sums(m_size); const size_t stride = element_offset(); const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; - std::byte *ptr = m_data + 2 * sizeof(coord_t); // skip x and y + std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y for (size_t i = 0; i < m_size; i++) { sums[i] = @@ -135,7 +136,7 @@ template class ClusterVector { * @brief Return the offset in bytes for a single cluster */ size_t element_offset() const { - return sizeof(m_cluster_size_x) + sizeof(m_cluster_size_y) + + return 2*sizeof(CoordType) + m_cluster_size_x * m_cluster_size_y * sizeof(T); } /** @@ -148,8 +149,8 @@ template class ClusterVector { */ std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } - int16_t cluster_size_x() const { return m_cluster_size_x; } - int16_t cluster_size_y() const { return m_cluster_size_y; } + size_t cluster_size_x() const { return m_cluster_size_x; } + size_t cluster_size_y() const { return m_cluster_size_y; } std::byte *data() { return m_data; } const std::byte *data() const { return m_data; } diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 346646c..15beb02 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -87,7 +87,7 @@ class NDArray : public ArrayExpr, Ndim> { // Conversion operator from array expression to array template NDArray(ArrayExpr &&expr) : NDArray(expr.shape()) { - for (int i = 0; i < size_; ++i) { + for (size_t i = 0; i < size_; ++i) { data_[i] = expr[i]; } } @@ -159,11 +159,11 @@ class NDArray : public ArrayExpr, Ndim> { } // TODO! is int the right type for index? - T &operator()(int i) { return data_[i]; } - const T &operator()(int i) const { return data_[i]; } + T &operator()(int64_t i) { return data_[i]; } + const T &operator()(int64_t i) const { return data_[i]; } - T &operator[](int i) { return data_[i]; } - const T &operator[](int i) const { return data_[i]; } + T &operator[](int64_t i) { return data_[i]; } + const T &operator[](int64_t i) const { return data_[i]; } T *data() { return data_; } std::byte *buffer() { return reinterpret_cast(data_); } diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index 216c204..bb2ea2c 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -23,31 +23,43 @@ template class Pedestal { NDArray m_sum; NDArray m_sum2; + //Cache mean since it is used over and over in the ClusterFinder + //This optimization is related to the access pattern of the ClusterFinder + //Relies on having more reads than pushes to the pedestal + NDArray m_mean; + public: Pedestal(uint32_t rows, uint32_t cols, uint32_t n_samples = 1000) : m_rows(rows), m_cols(cols), m_samples(n_samples), m_cur_samples(NDArray({rows, cols}, 0)), m_sum(NDArray({rows, cols})), - m_sum2(NDArray({rows, cols})) { + m_sum2(NDArray({rows, cols})), + m_mean(NDArray({rows, cols})) { assert(rows > 0 && cols > 0 && n_samples > 0); m_sum = 0; m_sum2 = 0; + m_mean = 0; } ~Pedestal() = default; NDArray mean() { - NDArray mean_array({m_rows, m_cols}); - for (uint32_t i = 0; i < m_rows * m_cols; i++) { - mean_array(i / m_cols, i % m_cols) = mean(i / m_cols, i % m_cols); - } - return mean_array; + return m_mean; } SUM_TYPE mean(const uint32_t row, const uint32_t col) const { + return m_mean(row, col); + } + + SUM_TYPE std(const uint32_t row, const uint32_t col) const { + return std::sqrt(variance(row, col)); + } + + SUM_TYPE variance(const uint32_t row, const uint32_t col) const { if (m_cur_samples(row, col) == 0) { return 0.0; } - return m_sum(row, col) / m_cur_samples(row, col); + return m_sum2(row, col) / m_cur_samples(row, col) - + mean(row, col) * mean(row, col); } NDArray variance() { @@ -59,13 +71,7 @@ template class Pedestal { return variance_array; } - SUM_TYPE variance(const uint32_t row, const uint32_t col) const { - if (m_cur_samples(row, col) == 0) { - return 0.0; - } - return m_sum2(row, col) / m_cur_samples(row, col) - - mean(row, col) * mean(row, col); - } + NDArray std() { NDArray standard_deviation_array({m_rows, m_cols}); @@ -77,14 +83,12 @@ template class Pedestal { return standard_deviation_array; } - SUM_TYPE std(const uint32_t row, const uint32_t col) const { - return std::sqrt(variance(row, col)); - } + void clear() { - for (uint32_t i = 0; i < m_rows * m_cols; i++) { - clear(i / m_cols, i % m_cols); - } + m_sum = 0; + m_sum2 = 0; + m_cur_samples = 0; } @@ -104,8 +108,8 @@ template class Pedestal { "Frame shape does not match pedestal shape"); } - for (uint32_t row = 0; row < m_rows; row++) { - for (uint32_t col = 0; col < m_cols; col++) { + for (size_t row = 0; row < m_rows; row++) { + for (size_t col = 0; col < m_cols; col++) { push(row, col, frame(row, col)); } } @@ -134,18 +138,17 @@ template class Pedestal { template void push(const uint32_t row, const uint32_t col, const T val_) { SUM_TYPE val = static_cast(val_); - const uint32_t idx = index(row, col); - if (m_cur_samples(idx) < m_samples) { - m_sum(idx) += val; - m_sum2(idx) += val * val; - m_cur_samples(idx)++; + if (m_cur_samples(row, col) < m_samples) { + m_sum(row, col) += val; + m_sum2(row, col) += val * val; + m_cur_samples(row, col)++; } else { - m_sum(idx) += val - m_sum(idx) / m_cur_samples(idx); - m_sum2(idx) += val * val - m_sum2(idx) / m_cur_samples(idx); + m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col); + m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col); } + //Since we just did a push we know that m_cur_samples(row, col) is at least 1 + m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col); } - uint32_t index(const uint32_t row, const uint32_t col) const { - return row * m_cols + col; - }; + }; } // namespace aare \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 5641d85..fb34c7a 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -3,9 +3,10 @@ from . import _aare from ._aare import File, RawMasterFile, RawSubFile -from ._aare import Pedestal, ClusterFinder, VarClusterFinder +from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile +from ._aare import hitmap from .CtbRawFile import CtbRawFile from .RawFile import RawFile diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 0467c98..d11c706 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -80,7 +80,26 @@ void define_cluster_finder_bindings(py::module &m) { return; }); - + m.def("hitmap", [](std::array image_size, ClusterVector& cv){ + + py::array_t hitmap(image_size); + auto r = hitmap.mutable_unchecked<2>(); + + // Initialize hitmap to 0 + for (py::ssize_t i = 0; i < r.shape(0); i++) + for (py::ssize_t j = 0; j < r.shape(1); j++) + r(i, j) = 0; + + size_t stride = cv.element_offset(); + auto ptr = cv.data(); + for(size_t i=0; i(ptr); + auto y = *reinterpret_cast(ptr+sizeof(int16_t)); + r(y, x) += 1; + ptr += stride; + } + return hitmap; + }); define_cluster_vector(m, "i"); define_cluster_vector(m, "d"); define_cluster_vector(m, "f"); diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 543073f..aa7fd23 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -10,6 +10,12 @@ #include #include +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + + namespace py = pybind11; using namespace ::aare; @@ -60,4 +66,6 @@ void define_cluster_file_io_bindings(py::module &m) { } return return_vector(vec); }); -} \ No newline at end of file +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 7963ac4..14a686a 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -22,8 +22,8 @@ PYBIND11_MODULE(_aare, m) { define_raw_master_file_bindings(m); define_var_cluster_finder_bindings(m); define_pixel_map_bindings(m); - define_pedestal_bindings(m, "Pedestal"); - define_pedestal_bindings(m, "Pedestal_float32"); + define_pedestal_bindings(m, "Pedestal_d"); + define_pedestal_bindings(m, "Pedestal_f"); define_cluster_finder_bindings(m); define_cluster_file_io_bindings(m); } \ No newline at end of file diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index ef4e0ee..24a482b 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -1,7 +1,7 @@ #include #include "aare/ClusterVector.hpp" -// #include +#include #include using aare::ClusterVector; @@ -44,10 +44,10 @@ TEST_CASE("Summing 3x1 clusters of int64"){ struct Cluster_l3x1{ int16_t x; int16_t y; - int64_t data[3]; + int32_t data[3]; }; - ClusterVector cv(3, 1, 2); + ClusterVector cv(3, 1, 2); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 3); @@ -74,4 +74,35 @@ TEST_CASE("Summing 3x1 clusters of int64"){ REQUIRE(sums[0] == 12); REQUIRE(sums[1] == 27); REQUIRE(sums[2] == 42); +} + +TEST_CASE("Storing floats"){ + struct Cluster_f4x2{ + int16_t x; + int16_t y; + float data[8]; + }; + + ClusterVector cv(2, 4, 2); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 0); + REQUIRE(cv.cluster_size_x() == 2); + REQUIRE(cv.cluster_size_y() == 4); + + //Create a cluster and push back into the vector + Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 1); + + + Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}}; + cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + REQUIRE(cv.capacity() == 2); + REQUIRE(cv.size() == 2); + + auto sums = cv.sum(); + REQUIRE(sums.size() == 2); + REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6)); + REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); } \ No newline at end of file diff --git a/src/Frame.test.cpp b/src/Frame.test.cpp index 33bbbb6..4063701 100644 --- a/src/Frame.test.cpp +++ b/src/Frame.test.cpp @@ -19,7 +19,7 @@ TEST_CASE("Construct a frame") { // data should be initialized to 0 for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { - uint8_t *data = (uint8_t *)frame.pixel_ptr(i, j); + uint8_t *data = reinterpret_cast(frame.pixel_ptr(i, j)); REQUIRE(data != nullptr); REQUIRE(*data == 0); } @@ -40,7 +40,7 @@ TEST_CASE("Set a value in a 8 bit frame") { // only the value we did set should be non-zero for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { - uint8_t *data = (uint8_t *)frame.pixel_ptr(i, j); + uint8_t *data = reinterpret_cast(frame.pixel_ptr(i, j)); REQUIRE(data != nullptr); if (i == 5 && j == 7) { REQUIRE(*data == value); @@ -65,7 +65,7 @@ TEST_CASE("Set a value in a 64 bit frame") { // only the value we did set should be non-zero for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { - uint64_t *data = (uint64_t *)frame.pixel_ptr(i, j); + uint64_t *data = reinterpret_cast(frame.pixel_ptr(i, j)); REQUIRE(data != nullptr); if (i == 5 && j == 7) { REQUIRE(*data == value); @@ -149,4 +149,5 @@ TEST_CASE("test explicit copy constructor") { REQUIRE(frame2.bitdepth() == bitdepth); REQUIRE(frame2.bytes() == rows * cols * bitdepth / 8); REQUIRE(frame2.data() != data); -} \ No newline at end of file +} + diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3170f7c..1906508 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -17,8 +17,8 @@ endif() list(APPEND CMAKE_MODULE_PATH ${Catch2_SOURCE_DIR}/extras) add_executable(tests test.cpp) -target_link_libraries(tests PRIVATE Catch2::Catch2WithMain) - +target_link_libraries(tests PRIVATE Catch2::Catch2WithMain aare_core aare_compiler_flags) +# target_compile_options(tests PRIVATE -fno-omit-frame-pointer -fsanitize=address) set_target_properties(tests PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} OUTPUT_NAME run_tests @@ -34,7 +34,7 @@ set(TestSources target_sources(tests PRIVATE ${TestSources} ) #Work around to remove, this is not the way to do it =) -target_link_libraries(tests PRIVATE aare_core aare_compiler_flags) +# target_link_libraries(tests PRIVATE aare_core aare_compiler_flags) #configure a header to pass test file paths diff --git a/tests/test.cpp b/tests/test.cpp index 7c638e4..513f690 100644 --- a/tests/test.cpp +++ b/tests/test.cpp @@ -3,6 +3,7 @@ #include #include #include +#include TEST_CASE("Test suite can find data assets", "[.integration]") { auto fpath = test_data_path() / "numpy" / "test_numpy_file.npy"; @@ -18,4 +19,20 @@ TEST_CASE("Test suite can open data assets", "[.integration]") { TEST_CASE("Test float32 and char8") { REQUIRE(sizeof(float) == 4); REQUIRE(CHAR_BIT == 8); -} \ No newline at end of file +} + +/** + * Uncomment the following tests to verify that asan is working + */ + +// TEST_CASE("trigger asan stack"){ +// int arr[5] = {1,2,3,4,5}; +// int val = arr[7]; +// fmt::print("val: {}\n", val); +// } + +// TEST_CASE("trigger asan heap"){ +// auto *ptr = new int[5]; +// ptr[70] = 5; +// fmt::print("ptr: {}\n", ptr[70]); +// } \ No newline at end of file From f88b53387faf72acda8e26b069be93b199c20ac0 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Thu, 12 Dec 2024 17:58:04 +0100 Subject: [PATCH 006/120] WIP --- include/aare/ClusterFinder.hpp | 3 +- include/aare/Pedestal.hpp | 65 ++++++++++++++++++++++++++++++---- python/src/pedestal.hpp | 7 +++- 3 files changed, 67 insertions(+), 8 deletions(-) diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index a98114d..8bd77cc 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -121,7 +121,8 @@ class ClusterFinder { } else if (total > c3 * m_nSigma * rms) { // pass } else { - m_pedestal.push(iy, ix, frame(iy, ix)); + // m_pedestal.push(iy, ix, frame(iy, ix)); + m_pedestal.push_fast(iy, ix, frame(iy, ix)); continue; // It was a pedestal value nothing to store } diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index bb2ea2c..bda94f2 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -98,7 +98,9 @@ template class Pedestal { m_sum2(row, col) = 0; m_cur_samples(row, col) = 0; } - // frame level operations + + + template void push(NDView frame) { assert(frame.size() == m_rows * m_cols); @@ -113,12 +115,32 @@ template class Pedestal { push(row, col, frame(row, col)); } } - - // // TODO: test the effect of #pragma omp parallel for - // for (uint32_t index = 0; index < m_rows * m_cols; index++) { - // push(index / m_cols, index % m_cols, frame(index)); - // } } + + /** + * Push but don't update the cached mean. Speeds up the process + * when intitializing the pedestal. + * + */ + template void push_no_update(NDView frame) { + assert(frame.size() == m_rows * m_cols); + + // TODO! move away from m_rows, m_cols + if (frame.shape() != std::array{m_rows, m_cols}) { + throw std::runtime_error( + "Frame shape does not match pedestal shape"); + } + + for (size_t row = 0; row < m_rows; row++) { + for (size_t col = 0; col < m_cols; col++) { + push_no_update(row, col, frame(row, col)); + } + } + } + + + + template void push(Frame &frame) { assert(frame.rows() == static_cast(m_rows) && frame.cols() == static_cast(m_cols)); @@ -150,5 +172,36 @@ template class Pedestal { m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col); } + template + void push_no_update(const uint32_t row, const uint32_t col, const T val_) { + SUM_TYPE val = static_cast(val_); + if (m_cur_samples(row, col) < m_samples) { + m_sum(row, col) += val; + m_sum2(row, col) += val * val; + m_cur_samples(row, col)++; + } else { + m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col); + m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col); + } + } + + /** + * @brief Update the mean of the pedestal. This is used after having done + * push_no_update. It is not necessary to call this function after push. + */ + void update_mean(){ + m_mean = m_sum / m_cur_samples; + } + + template + void push_fast(const uint32_t row, const uint32_t col, const T val_){ + //Assume we reached the steady state where all pixels have + //m_samples samples + SUM_TYPE val = static_cast(val_); + m_sum(row, col) += val - m_sum(row, col) / m_samples; + m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples; + m_mean(row, col) = m_sum(row, col) / m_samples; + } + }; } // namespace aare \ No newline at end of file diff --git a/python/src/pedestal.hpp b/python/src/pedestal.hpp index 4d5d043..77148dc 100644 --- a/python/src/pedestal.hpp +++ b/python/src/pedestal.hpp @@ -43,5 +43,10 @@ template void define_pedestal_bindings(py::module &m, const .def("push", [](Pedestal &pedestal, py::array_t &f) { auto v = make_view_2d(f); pedestal.push(v); - }); + }) + .def("push_no_update", [](Pedestal &pedestal, py::array_t &f) { + auto v = make_view_2d(f); + pedestal.push_no_update(v); + }, py::arg().noconvert()) + .def("update_mean", &Pedestal::update_mean); } \ No newline at end of file From 29b1dc8df3321d2f399068a7b4992863193e450d Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 13 Dec 2024 14:57:36 +0100 Subject: [PATCH 007/120] missing header --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 24b7b30..cd1cd94 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -282,6 +282,7 @@ set(PUBLICHEADERS include/aare/ClusterFinder.hpp include/aare/ClusterFile.hpp include/aare/CtbRawFile.hpp + include/aare/ClusterVector.hpp include/aare/defs.hpp include/aare/Dtype.hpp include/aare/File.hpp From e6098c02efdad6c006979c3604cc7333f903175f Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Mon, 16 Dec 2024 14:24:46 +0100 Subject: [PATCH 008/120] bumped version --- conda-recipe/meta.yaml | 2 +- include/aare/ClusterFile.hpp | 29 ++++---- include/aare/ClusterVector.hpp | 8 ++- pyproject.toml | 2 +- python/src/cluster_file.hpp | 15 +++-- src/ClusterFile.cpp | 120 ++++++++++++++++++++++++++------- 6 files changed, 129 insertions(+), 47 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 86fc9a8..c3c823b 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2024.11.28.dev0 #TODO! how to not duplicate this? + version: 2024.12.16.dev0 #TODO! how to not duplicate this? source: diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index edcb91e..a484560 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,13 +1,14 @@ #pragma once -#include "aare/defs.hpp" #include "aare/ClusterVector.hpp" +#include "aare/NDArray.hpp" +#include "aare/defs.hpp" #include #include namespace aare { -struct Cluster { +struct Cluster3x3 { int16_t x; int16_t y; int32_t data[9]; @@ -58,21 +59,23 @@ class ClusterFile { ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, const std::string &mode = "r"); ~ClusterFile(); - std::vector read_clusters(size_t n_clusters); - std::vector read_frame(int32_t &out_fnum); - void write_frame(int32_t frame_number, const ClusterVector& clusters); - std::vector + std::vector read_clusters(size_t n_clusters); + std::vector read_frame(int32_t &out_fnum); + void write_frame(int32_t frame_number, + const ClusterVector &clusters); + std::vector read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); - int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y); - int analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y); - size_t chunk_size() const { return m_chunk_size; } void close(); }; +int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, + double *eta2x, double *eta2y, double *eta3x, double *eta3y); +int analyze_cluster(Cluster3x3& cl, int32_t *t2, int32_t *t3, char *quad, + double *eta2x, double *eta2y, double *eta3x, double *eta3y); + +NDArray calculate_eta2( ClusterVector& clusters); +std::array calculate_eta2( Cluster3x3& cl); + } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index ce8d935..98d4b37 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -148,12 +148,18 @@ template class ClusterVector { * @brief Return a pointer to the i-th cluster */ std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } + const std::byte * element_ptr(size_t i) const { return m_data + element_offset(i); } size_t cluster_size_x() const { return m_cluster_size_x; } size_t cluster_size_y() const { return m_cluster_size_y; } std::byte *data() { return m_data; } - const std::byte *data() const { return m_data; } + std::byte const *data() const { return m_data; } + + template + V& at(size_t i) { + return *reinterpret_cast(element_ptr(i)); + } const std::string_view fmt_base() const { //TODO! how do we match on coord_t? diff --git a/pyproject.toml b/pyproject.toml index f194c68..b839003 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2024.11.28.dev0" +version = "2024.12.16.dev0" [tool.scikit-build] cmake.verbose = true diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index aa7fd23..82870c4 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -20,7 +20,7 @@ namespace py = pybind11; using namespace ::aare; void define_cluster_file_io_bindings(py::module &m) { - PYBIND11_NUMPY_DTYPE(Cluster, x, y, data); + PYBIND11_NUMPY_DTYPE(Cluster3x3, x, y, data); py::class_(m, "ClusterFile") .def(py::init(self.read_clusters(n_clusters)); + new std::vector(self.read_clusters(n_clusters)); return return_vector(vec); }) .def("read_frame", [](ClusterFile &self) { int32_t frame_number; auto *vec = - new std::vector(self.read_frame(frame_number)); + new std::vector(self.read_frame(frame_number)); return py::make_tuple(frame_number, return_vector(vec)); }) .def("write_frame", &ClusterFile::write_frame) @@ -45,7 +45,7 @@ void define_cluster_file_io_bindings(py::module &m) { py::array_t noise_map, int nx, int ny) { auto view = make_view_2d(noise_map); auto *vec = - new std::vector(self.read_cluster_with_cut( + new std::vector(self.read_cluster_with_cut( n_clusters, view.data(), nx, ny)); return return_vector(vec); }) @@ -60,12 +60,17 @@ void define_cluster_file_io_bindings(py::module &m) { .def("__iter__", [](ClusterFile &self) { return &self; }) .def("__next__", [](ClusterFile &self) { auto vec = - new std::vector(self.read_clusters(self.chunk_size())); + new std::vector(self.read_clusters(self.chunk_size())); if (vec->size() == 0) { throw py::stop_iteration(); } return return_vector(vec); }); + + m.def("calculate_eta2", []( aare::ClusterVector &clusters) { + auto eta2 = new NDArray(calculate_eta2(clusters)); + return return_image_data(eta2); + }); } #pragma GCC diagnostic pop \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 182726b..855e0e7 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -1,5 +1,7 @@ #include "aare/ClusterFile.hpp" +#include + namespace aare { ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, @@ -9,17 +11,18 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, if (mode == "r") { fp = fopen(fname.c_str(), "rb"); if (!fp) { - throw std::runtime_error("Could not open file for reading: " + fname.string()); + throw std::runtime_error("Could not open file for reading: " + + fname.string()); } } else if (mode == "w") { fp = fopen(fname.c_str(), "wb"); if (!fp) { - throw std::runtime_error("Could not open file for writing: " + fname.string()); + throw std::runtime_error("Could not open file for writing: " + + fname.string()); } } else { throw std::runtime_error("Unsupported mode: " + mode); } - } ClusterFile::~ClusterFile() { close(); } @@ -31,11 +34,13 @@ void ClusterFile::close() { } } -void ClusterFile::write_frame(int32_t frame_number, const ClusterVector& clusters){ +void ClusterFile::write_frame(int32_t frame_number, + const ClusterVector &clusters) { if (m_mode != "w") { throw std::runtime_error("File not opened for writing"); } - if(!(clusters.cluster_size_x()==3) && !(clusters.cluster_size_y()==3)){ + if (!(clusters.cluster_size_x() == 3) && + !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } fwrite(&frame_number, sizeof(frame_number), 1, fp); @@ -46,18 +51,18 @@ void ClusterFile::write_frame(int32_t frame_number, const ClusterVector // fwrite(clusters.data(), sizeof(Cluster), clusters.size(), fp); } -std::vector ClusterFile::read_clusters(size_t n_clusters) { +std::vector ClusterFile::read_clusters(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - std::vector clusters(n_clusters); + std::vector clusters(n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; uint32_t nn = m_num_left; uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - auto buf = reinterpret_cast(clusters.data()); + auto buf = reinterpret_cast(clusters.data()); // if there are photons left from previous frame read them first if (nph) { if (nph > n_clusters) { @@ -68,7 +73,7 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { nn = nph; } nph_read += fread(reinterpret_cast(buf + nph_read), - sizeof(Cluster), nn, fp); + sizeof(Cluster3x3), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -83,7 +88,7 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { nn = nph; nph_read += fread(reinterpret_cast(buf + nph_read), - sizeof(Cluster), nn, fp); + sizeof(Cluster3x3), nn, fp); m_num_left = nph - nn; } if (nph_read >= n_clusters) @@ -97,7 +102,7 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } -std::vector ClusterFile::read_frame(int32_t &out_fnum) { +std::vector ClusterFile::read_frame(int32_t &out_fnum) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -114,22 +119,22 @@ std::vector ClusterFile::read_frame(int32_t &out_fnum) { if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - std::vector clusters(n_clusters); + std::vector clusters(n_clusters); - if (fread(clusters.data(), sizeof(Cluster), n_clusters, fp) != + if (fread(clusters.data(), sizeof(Cluster3x3), n_clusters, fp) != static_cast(n_clusters)) { throw std::runtime_error("Could not read clusters"); } return clusters; } -std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, - double *noise_map, - int nx, int ny) { +std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, + double *noise_map, + int nx, int ny) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - std::vector clusters(n_clusters); + std::vector clusters(n_clusters); // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, // uint32_t *n_left, double *noise_map, int // nx, int ny) { @@ -143,7 +148,7 @@ std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, int32_t t2max, tot1; int32_t tot3; // Cluster *ptr = buf; - Cluster *ptr = clusters.data(); + Cluster3x3 *ptr = clusters.data(); int good = 1; double noise; // read photons left from previous frame @@ -161,7 +166,7 @@ std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, for (size_t iph = 0; iph < nn; iph++) { // read photons 1 by 1 size_t n_read = - fread(reinterpret_cast(ptr), sizeof(Cluster), 1, fp); + fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); if (n_read != 1) { clusters.resize(nph_read); return clusters; @@ -207,7 +212,7 @@ std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, for (size_t iph = 0; iph < nph; iph++) { // // read photons 1 by 1 size_t n_read = fread(reinterpret_cast(ptr), - sizeof(Cluster), 1, fp); + sizeof(Cluster3x3), 1, fp); if (n_read != 1) { clusters.resize(nph_read); return clusters; @@ -250,16 +255,78 @@ std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, return clusters; } -int ClusterFile::analyze_cluster(Cluster cl, int32_t *t2, int32_t *t3, - char *quad, double *eta2x, double *eta2y, - double *eta3x, double *eta3y) { +NDArray calculate_eta2(ClusterVector &clusters) { + NDArray eta2({clusters.size(), 2}); + for (size_t i = 0; i < clusters.size(); i++) { + // int32_t t2; + // auto* ptr = reinterpret_cast (clusters.element_ptr(i) + 2 * + // sizeof(int16_t)); analyze_cluster(clusters.at(i), &t2, + // nullptr, nullptr, &eta2(i,0), &eta2(i,1) , nullptr, nullptr); + auto [x, y] = calculate_eta2(clusters.at(i)); + eta2(i, 0) = x; + eta2(i, 1) = y; + } + return eta2; +} + +std::array calculate_eta2(Cluster3x3 &cl) { + std::array eta2{}; + + std::array tot2; + tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; + tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; + tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; + tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; + + auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); + + switch (c) { + case cBottomLeft: + if ((cl.data[3] + cl.data[4]) != 0) + eta2[0] = + static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + if ((cl.data[1] + cl.data[4]) != 0) + eta2[1] = + static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + break; + case cBottomRight: + if ((cl.data[2] + cl.data[5]) != 0) + eta2[0] = + static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); + if ((cl.data[1] + cl.data[4]) != 0) + eta2[1] = + static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + break; + case cTopLeft: + if ((cl.data[7] + cl.data[4]) != 0) + eta2[0] = + static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + if ((cl.data[7] + cl.data[4]) != 0) + eta2[1] = + static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + break; + case cTopRight: + if ((cl.data[5] + cl.data[4]) != 0) + eta2[0] = + static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); + if ((cl.data[7] + cl.data[4]) != 0) + eta2[1] = + static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + break; + // default:; + } + return eta2; +} + +int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, + double *eta2x, double *eta2y, double *eta3x, + double *eta3y) { return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); } -int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, - char *quad, double *eta2x, double *eta2y, - double *eta3x, double *eta3y) { +int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, + double *eta2x, double *eta2y, double *eta3x, double *eta3y) { int ok = 1; @@ -307,6 +374,7 @@ int ClusterFile::analyze_data(int32_t *data, int32_t *t2, int32_t *t3, if (t2) *t2 = t2max; } + if (t3) *t3 = tot3; From d07da42745e6fae65c4e8219eddd9e89cecc2296 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 7 Jan 2025 12:27:01 +0100 Subject: [PATCH 009/120] bitdepths --- include/aare/RawSubFile.hpp | 3 +++ src/ClusterFile.cpp | 2 +- src/RawSubFile.cpp | 48 +++++++++++++++++++++++-------------- 3 files changed, 34 insertions(+), 19 deletions(-) diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 4d78670..89c278e 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -66,6 +66,9 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / 8; } +private: + template + void read_with_map(std::byte *image_buf); }; diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 855e0e7..0e5b93e 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -256,7 +256,7 @@ std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, } NDArray calculate_eta2(ClusterVector &clusters) { - NDArray eta2({clusters.size(), 2}); + NDArray eta2({static_cast(clusters.size()), 2}); for (size_t i = 0; i < clusters.size(); i++) { // int32_t t2; // auto* ptr = reinterpret_cast (clusters.element_ptr(i) + 2 * diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index 6fae7ce..4612747 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -9,11 +9,13 @@ namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, DetectorType detector, size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row, uint32_t pos_col) - : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), m_rows(rows), m_cols(cols), - m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) { + : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), + m_rows(rows), m_cols(cols), + m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), + m_pos_col(pos_col) { if (m_detector_type == DetectorType::Moench03_old) { m_pixel_map = GenerateMoench03PixelMap(); - }else if(m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0){ + } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) { m_pixel_map = GenerateEigerFlipRowsPixelMap(); } @@ -51,37 +53,48 @@ size_t RawSubFile::tell() { return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame()); } - void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { - if(header){ - m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); + if (header) { + m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); } else { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } - //TODO! expand support for different bitdepths - if(m_pixel_map){ + // TODO! expand support for different bitdepths + if (m_pixel_map) { // read into a temporary buffer and then copy the data to the buffer // in the correct order - // currently this only supports 16 bit data! - auto part_buffer = new std::byte[bytes_per_frame()]; - m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); - auto *data = reinterpret_cast(image_buf); - auto *part_data = reinterpret_cast(part_buffer); - for (size_t i = 0; i < pixels_per_frame(); i++) { - data[i] = part_data[(*m_pixel_map)(i)]; + // TODO! add 4 bit support + if(m_bitdepth == 8){ + read_with_map(image_buf); + }else if (m_bitdepth == 16) { + read_with_map(image_buf); + } else if (m_bitdepth == 32) { + read_with_map(image_buf); + }else{ + throw std::runtime_error("Unsupported bitdepth for read with pixel map"); } - delete[] part_buffer; + } else { // read directly into the buffer m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } } +template +void RawSubFile::read_with_map(std::byte *image_buf) { + auto part_buffer = new std::byte[bytes_per_frame()]; + m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); + auto *data = reinterpret_cast(image_buf); + auto *part_data = reinterpret_cast(part_buffer); + for (size_t i = 0; i < pixels_per_frame(); i++) { + data[i] = part_data[(*m_pixel_map)(i)]; + } + delete[] part_buffer; +} size_t RawSubFile::rows() const { return m_rows; } size_t RawSubFile::cols() const { return m_cols; } - void RawSubFile::get_part(std::byte *buffer, size_t frame_index) { seek(frame_index); read_into(buffer, nullptr); @@ -94,5 +107,4 @@ size_t RawSubFile::frame_number(size_t frame_index) { return h.frameNumber; } - } // namespace aare \ No newline at end of file From acdce8454bdf624c7e76df9068eb4c6f66d80ec6 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 7 Jan 2025 15:01:43 +0100 Subject: [PATCH 010/120] moved pd to double --- python/src/cluster.hpp | 121 +++++++++++++++++++++-------------------- 1 file changed, 62 insertions(+), 59 deletions(-) diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index d11c706..5d22091 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -10,7 +10,7 @@ #include namespace py = pybind11; -using pd_type = float; +using pd_type = double; template void define_cluster_vector(py::module &m, const std::string &typestr) { @@ -21,90 +21,93 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { .def("element_offset", py::overload_cast<>(&ClusterVector::element_offset, py::const_)) .def_property_readonly("fmt", - [typestr](ClusterVector &self) { - return fmt::format( - self.fmt_base(), self.cluster_size_x(), - self.cluster_size_y(), typestr); + [typestr](ClusterVector &self) { + return fmt::format( + self.fmt_base(), self.cluster_size_x(), + self.cluster_size_y(), typestr); + }) + .def("sum", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); }) - .def("sum", [](ClusterVector &self) { - auto *vec = new std::vector(self.sum()); - return return_vector(vec); - }) .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { return py::buffer_info( self.data(), /* Pointer to buffer */ self.element_offset(), /* Size of one scalar */ fmt::format(self.fmt_base(), self.cluster_size_x(), self.cluster_size_y(), - typestr), /* Format descriptor */ - 1, /* Number of dimensions */ - {self.size()}, /* Buffer dimensions */ - {self.element_offset()} /* Strides (in bytes) for each index */ + typestr), /* Format descriptor */ + 1, /* Number of dimensions */ + {self.size()}, /* Buffer dimensions */ + {self.element_offset()} /* Strides (in bytes) for each index */ ); }); } void define_cluster_finder_bindings(py::module &m) { py::class_>(m, "ClusterFinder") - .def(py::init, Shape<2>, pd_type, size_t>(), py::arg("image_size"), - py::arg("cluster_size"), py::arg("n_sigma") = 5.0, - py::arg("capacity") = 1'000'000) + .def(py::init, Shape<2>, pd_type, size_t>(), + py::arg("image_size"), py::arg("cluster_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) .def("push_pedestal_frame", [](ClusterFinder &self, py::array_t frame) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) - .def("pedestal", - [](ClusterFinder &self) { - auto pd = new NDArray{}; - *pd = self.pedestal(); - return return_image_data(pd); - }) - .def("noise", - [](ClusterFinder &self) { - auto arr = new NDArray{}; - *arr = self.noise(); - return return_image_data(arr); - }) - .def("steal_clusters", - [](ClusterFinder &self, bool realloc_same_capacity) { - auto v = new ClusterVector(self.steal_clusters(realloc_same_capacity)); - return v; - }, py::arg("realloc_same_capacity") = false) - .def("find_clusters", - [](ClusterFinder &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.find_clusters(view); - return; - }); + .def_property_readonly("pedestal", + [](ClusterFinder &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly("noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) + .def( + "steal_clusters", + [](ClusterFinder &self, + bool realloc_same_capacity) { + auto v = new ClusterVector( + self.steal_clusters(realloc_same_capacity)); + return v; + }, + py::arg("realloc_same_capacity") = false) + .def("find_clusters", [](ClusterFinder &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.find_clusters(view); + return; + }); - m.def("hitmap", [](std::array image_size, ClusterVector& cv){ - - py::array_t hitmap(image_size); - auto r = hitmap.mutable_unchecked<2>(); + m.def("hitmap", + [](std::array image_size, ClusterVector &cv) { + py::array_t hitmap(image_size); + auto r = hitmap.mutable_unchecked<2>(); - // Initialize hitmap to 0 - for (py::ssize_t i = 0; i < r.shape(0); i++) - for (py::ssize_t j = 0; j < r.shape(1); j++) - r(i, j) = 0; + // Initialize hitmap to 0 + for (py::ssize_t i = 0; i < r.shape(0); i++) + for (py::ssize_t j = 0; j < r.shape(1); j++) + r(i, j) = 0; - size_t stride = cv.element_offset(); - auto ptr = cv.data(); - for(size_t i=0; i(ptr); - auto y = *reinterpret_cast(ptr+sizeof(int16_t)); - r(y, x) += 1; - ptr += stride; - } - return hitmap; - }); + size_t stride = cv.element_offset(); + auto ptr = cv.data(); + for (size_t i = 0; i < cv.size(); i++) { + auto x = *reinterpret_cast(ptr); + auto y = *reinterpret_cast(ptr + sizeof(int16_t)); + r(y, x) += 1; + ptr += stride; + } + return hitmap; + }); define_cluster_vector(m, "i"); define_cluster_vector(m, "d"); define_cluster_vector(m, "f"); - py::class_(m, "DynamicCluster", py::buffer_protocol()) .def(py::init()) .def("size", &DynamicCluster::size) From 21ce7a3efaceac0ce1ad264b052b3f748dda1859 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 7 Jan 2025 16:33:16 +0100 Subject: [PATCH 011/120] bumped version --- conda-recipe/meta.yaml | 2 +- include/aare/ClusterFinder.hpp | 112 +-------------------------------- include/aare/Pedestal.hpp | 4 +- pyproject.toml | 2 +- 4 files changed, 6 insertions(+), 114 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index c3c823b..dd2b682 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2024.12.16.dev0 #TODO! how to not duplicate this? + version: 2025.1.7.dev0 #TODO! how to not duplicate this? source: diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 8bd77cc..aa17d19 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -121,8 +121,8 @@ class ClusterFinder { } else if (total > c3 * m_nSigma * rms) { // pass } else { - // m_pedestal.push(iy, ix, frame(iy, ix)); - m_pedestal.push_fast(iy, ix, frame(iy, ix)); + // m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option + m_pedestal.push_fast(iy, ix, frame(iy, ix)); // Assume we have reached n_samples in the pedestal, slight performance improvement continue; // It was a pedestal value nothing to store } @@ -157,114 +157,6 @@ class ClusterFinder { } } } - - // // template - // std::vector - // find_clusters_with_threshold(NDView frame, - // Pedestal &pedestal) { - // assert(m_threshold > 0); - // std::vector clusters; - // std::vector> eventMask; - // for (int i = 0; i < frame.shape(0); i++) { - // eventMask.push_back(std::vector(frame.shape(1))); - // } - // double tthr, tthr1, tthr2; - - // NDArray rest({frame.shape(0), frame.shape(1)}); - // NDArray nph({frame.shape(0), frame.shape(1)}); - // // convert to n photons - // // nph = (frame-pedestal.mean()+0.5*m_threshold)/m_threshold; // can - // be - // // optimized with expression templates? - // for (int iy = 0; iy < frame.shape(0); iy++) { - // for (int ix = 0; ix < frame.shape(1); ix++) { - // auto val = frame(iy, ix) - pedestal.mean(iy, ix); - // nph(iy, ix) = (val + 0.5 * m_threshold) / m_threshold; - // nph(iy, ix) = nph(iy, ix) < 0 ? 0 : nph(iy, ix); - // rest(iy, ix) = val - nph(iy, ix) * m_threshold; - // } - // } - // // iterate over frame pixels - // for (int iy = 0; iy < frame.shape(0); iy++) { - // for (int ix = 0; ix < frame.shape(1); ix++) { - // eventMask[iy][ix] = eventType::PEDESTAL; - // // initialize max and total - // FRAME_TYPE max = std::numeric_limits::min(); - // long double total = 0; - // if (rest(iy, ix) <= 0.25 * m_threshold) { - // pedestal.push(iy, ix, frame(iy, ix)); - // continue; - // } - // eventMask[iy][ix] = eventType::NEIGHBOUR; - // // iterate over cluster pixels around the current pixel - // (ix,iy) for (short ir = -(m_cluster_sizeY / 2); - // ir < (m_cluster_sizeY / 2) + 1; ir++) { - // for (short ic = -(m_cluster_sizeX / 2); - // ic < (m_cluster_sizeX / 2) + 1; ic++) { - // if (ix + ic >= 0 && ix + ic < frame.shape(1) && - // iy + ir >= 0 && iy + ir < frame.shape(0)) { - // auto val = frame(iy + ir, ix + ic) - - // pedestal.mean(iy + ir, ix + ic); - // total += val; - // if (val > max) { - // max = val; - // } - // } - // } - // } - - // auto rms = pedestal.std(iy, ix); - // if (m_nSigma == 0) { - // tthr = m_threshold; - // tthr1 = m_threshold; - // tthr2 = m_threshold; - // } else { - // tthr = m_nSigma * rms; - // tthr1 = m_nSigma * rms * c3; - // tthr2 = m_nSigma * rms * c2; - - // if (m_threshold > 2 * tthr) - // tthr = m_threshold - tthr; - // if (m_threshold > 2 * tthr1) - // tthr1 = tthr - tthr1; - // if (m_threshold > 2 * tthr2) - // tthr2 = tthr - tthr2; - // } - // if (total > tthr1 || max > tthr) { - // eventMask[iy][ix] = eventType::PHOTON; - // nph(iy, ix) += 1; - // rest(iy, ix) -= m_threshold; - // } else { - // pedestal.push(iy, ix, frame(iy, ix)); - // continue; - // } - // if (eventMask[iy][ix] == eventType::PHOTON && - // frame(iy, ix) - pedestal.mean(iy, ix) >= max) { - // eventMask[iy][ix] = eventType::PHOTON_MAX; - // DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, - // Dtype(typeid(FRAME_TYPE))); - // cluster.x = ix; - // cluster.y = iy; - // short i = 0; - // for (short ir = -(m_cluster_sizeY / 2); - // ir < (m_cluster_sizeY / 2) + 1; ir++) { - // for (short ic = -(m_cluster_sizeX / 2); - // ic < (m_cluster_sizeX / 2) + 1; ic++) { - // if (ix + ic >= 0 && ix + ic < frame.shape(1) && - // iy + ir >= 0 && iy + ir < frame.shape(0)) { - // auto tmp = frame(iy + ir, ix + ic) - - // pedestal.mean(iy + ir, ix + ic); - // cluster.set(i, tmp); - // i++; - // } - // } - // } - // clusters.push_back(cluster); - // } - // } - // } - // return clusters; - // } }; } // namespace aare \ No newline at end of file diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index bda94f2..ab73cb9 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -165,8 +165,8 @@ template class Pedestal { m_sum2(row, col) += val * val; m_cur_samples(row, col)++; } else { - m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col); - m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col); + m_sum(row, col) += val - m_sum(row, col) / m_samples; + m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples; } //Since we just did a push we know that m_cur_samples(row, col) is at least 1 m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col); diff --git a/pyproject.toml b/pyproject.toml index b839003..35bdefb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2024.12.16.dev0" +version = "2025.1.7.dev0" [tool.scikit-build] cmake.verbose = true From dc9e10016dd76ba4778a3fdf82c8faf5fed016de Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 8 Jan 2025 16:45:24 +0100 Subject: [PATCH 012/120] WIP --- include/aare/CircularFifo.hpp | 97 ++++++++++++ include/aare/ClusterCollector.hpp | 52 +++++++ include/aare/ClusterFinder.hpp | 14 -- include/aare/ClusterFinderMT.hpp | 189 +++++++++++++++++++++++ include/aare/ClusterVector.hpp | 15 +- include/aare/ProducerConsumerQueue.hpp | 203 +++++++++++++++++++++++++ python/examples/play.py | 66 +++++--- python/src/cluster.hpp | 56 +++++++ python/src/module.cpp | 3 + 9 files changed, 661 insertions(+), 34 deletions(-) create mode 100644 include/aare/CircularFifo.hpp create mode 100644 include/aare/ClusterCollector.hpp create mode 100644 include/aare/ClusterFinderMT.hpp create mode 100644 include/aare/ProducerConsumerQueue.hpp diff --git a/include/aare/CircularFifo.hpp b/include/aare/CircularFifo.hpp new file mode 100644 index 0000000..8098082 --- /dev/null +++ b/include/aare/CircularFifo.hpp @@ -0,0 +1,97 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +template class CircularFifo { + uint32_t fifo_size; + aare::ProducerConsumerQueue free_slots; + aare::ProducerConsumerQueue filled_slots; + + public: + CircularFifo() : CircularFifo(100){}; + CircularFifo(uint32_t size) : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) { + + // TODO! how do we deal with alignment for writing? alignas??? + // Do we give the user a chance to provide memory locations? + // Templated allocator? + for (size_t i = 0; i < fifo_size; ++i) { + free_slots.write(ItemType{}); + } + } + + bool next() { + // TODO! avoid default constructing ItemType + ItemType it; + if (!filled_slots.read(it)) + return false; + if (!free_slots.write(std::move(it))) + return false; + return true; + } + + ~CircularFifo() {} + + using value_type = ItemType; + + auto numFilledSlots() const noexcept { return filled_slots.sizeGuess(); } + auto numFreeSlots() const noexcept { return free_slots.sizeGuess(); } + auto isFull() const noexcept { return filled_slots.isFull(); } + + ItemType pop_free() { + ItemType v; + while (!free_slots.read(v)) + ; + return std::move(v); + // return v; + } + + bool try_pop_free(ItemType &v) { return free_slots.read(v); } + + ItemType pop_value(std::chrono::nanoseconds wait, std::atomic &stopped) { + ItemType v; + while (!filled_slots.read(v) && !stopped) { + std::this_thread::sleep_for(wait); + } + return std::move(v); + } + + ItemType pop_value() { + ItemType v; + while (!filled_slots.read(v)) + ; + return std::move(v); + } + + ItemType *frontPtr() { return filled_slots.frontPtr(); } + + // TODO! Add function to move item from filled to free to be used + // with the frontPtr function + + template void push_value(Args &&...recordArgs) { + while (!filled_slots.write(std::forward(recordArgs)...)) + ; + } + + template bool try_push_value(Args &&...recordArgs) { + return filled_slots.write(std::forward(recordArgs)...); + } + + template void push_free(Args &&...recordArgs) { + while (!free_slots.write(std::forward(recordArgs)...)) + ; + } + + template bool try_push_free(Args &&...recordArgs) { + return free_slots.write(std::forward(recordArgs)...); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterCollector.hpp b/include/aare/ClusterCollector.hpp new file mode 100644 index 0000000..0738062 --- /dev/null +++ b/include/aare/ClusterCollector.hpp @@ -0,0 +1,52 @@ +#pragma once +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFinderMT.hpp" + +namespace aare { + +class ClusterCollector{ + ProducerConsumerQueue>* m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::vector> m_clusters; + + void process(){ + m_stopped = false; + fmt::print("ClusterCollector started\n"); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); + clusters != nullptr) { + m_clusters.push_back(std::move(*clusters)); + m_source->popFront(); + }else{ + std::this_thread::sleep_for(m_default_wait); + } + } + fmt::print("ClusterCollector stopped\n"); + m_stopped = true; + } + + public: + ClusterCollector(ClusterFinderMT* source){ + m_source = source->sink(); + m_thread = std::thread(&ClusterCollector::process, this); + } + void stop(){ + m_stop_requested = true; + m_thread.join(); + } + std::vector> steal_clusters(){ + if(!m_stopped){ + throw std::runtime_error("ClusterCollector is still running"); + } + return std::move(m_clusters); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index aa17d19..2fe33a7 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -10,26 +10,12 @@ namespace aare { -/** enum to define the event types */ -enum class eventType { - PEDESTAL, /** pedestal */ - NEIGHBOUR, /** neighbour i.e. below threshold, but in the cluster of a - photon */ - PHOTON, /** photon i.e. above threshold */ - PHOTON_MAX, /** maximum of a cluster satisfying the photon conditions */ - NEGATIVE_PEDESTAL, /** negative value, will not be accounted for as pedestal - in order to avoid drift of the pedestal towards - negative values */ - UNDEFINED_EVENT = -1 /** undefined */ -}; - template class ClusterFinder { Shape<2> m_image_size; const int m_cluster_sizeX; const int m_cluster_sizeY; - // const PEDESTAL_TYPE m_threshold; const PEDESTAL_TYPE m_nSigma; const PEDESTAL_TYPE c2; const PEDESTAL_TYPE c3; diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp new file mode 100644 index 0000000..d3cbaf6 --- /dev/null +++ b/include/aare/ClusterFinderMT.hpp @@ -0,0 +1,189 @@ +#pragma once +#include +#include +#include +#include +#include + +#include "aare/NDArray.hpp" +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +enum class FrameType { + DATA, + PEDESTAL, +}; + +struct FrameWrapper { + FrameType type; + uint64_t frame_number; + NDArray data; +}; + + +template +class ClusterFinderMT { + size_t m_current_thread{0}; + size_t m_n_threads{0}; + using Finder = ClusterFinder; + using InputQueue = ProducerConsumerQueue; + using OutputQueue = ProducerConsumerQueue>; + std::vector> m_input_queues; + std::vector> m_output_queues; + + + OutputQueue m_sink{1000}; //All clusters go into this queue + + + std::vector> m_cluster_finders; + std::vector m_threads; + std::thread m_collect_thread; + std::chrono::milliseconds m_default_wait{1}; + + std::atomic m_stop_requested{false}; + std::atomic m_processing_threads_stopped{false}; + + void process(int thread_id) { + auto cf = m_cluster_finders[thread_id].get(); + auto q = m_input_queues[thread_id].get(); + // TODO! Avoid indexing into the vector every time + fmt::print("Thread {} started\n", thread_id); + //TODO! is this check enough to make sure we process all the frames? + while (!m_stop_requested || !q->isEmpty()) { + if (FrameWrapper *frame = q->frontPtr(); + frame != nullptr) { + // fmt::print("Thread {} got frame {}, type: {}\n", thread_id, + // frame->frame_number, static_cast(frame->type)); + + switch (frame->type) { + case FrameType::DATA: + cf->find_clusters( + frame->data.view()); + m_output_queues[thread_id]->write(cf->steal_clusters()); + + break; + + case FrameType::PEDESTAL: + m_cluster_finders[thread_id]->push_pedestal_frame( + frame->data.view()); + break; + + default: + break; + } + + // frame is processed now discard it + m_input_queues[thread_id]->popFront(); + } else { + std::this_thread::sleep_for(m_default_wait); + } + + } + fmt::print("Thread {} stopped\n", thread_id); + } + + /** + * @brief Collect all the clusters from the output queues and write them to the sink + */ + void collect(){ + bool empty = true; + while(!m_stop_requested || !empty || !m_processing_threads_stopped){ + empty = true; + for (auto &queue : m_output_queues) { + if (!queue->isEmpty()) { + + while(!m_sink.write(std::move(*queue->frontPtr()))){ + std::this_thread::sleep_for(m_default_wait); + } + queue->popFront(); + empty = false; + } + } + } + } + + public: + ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size, + PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000, + size_t n_threads = 3) + : m_n_threads(n_threads) { + fmt::print("ClusterFinderMT: using {} threads\n", n_threads); + for (size_t i = 0; i < n_threads; i++) { + m_cluster_finders.push_back( + std::make_unique>( + image_size, cluster_size, nSigma, capacity)); + } + for (size_t i = 0; i < n_threads; i++) { + m_input_queues.emplace_back(std::make_unique(200)); + m_output_queues.emplace_back(std::make_unique(200)); + + } + + start(); + } + + ProducerConsumerQueue> *sink() { return &m_sink; } + + void start(){ + for (size_t i = 0; i < m_n_threads; i++) { + m_threads.push_back( + std::thread(&ClusterFinderMT::process, this, i)); + } + m_collect_thread = std::thread(&ClusterFinderMT::collect, this); + } + + void stop() { + m_stop_requested = true; + for (auto &thread : m_threads) { + thread.join(); + } + m_processing_threads_stopped = true; + m_collect_thread.join(); + } + + void sync(){ + for (auto &q : m_input_queues) { + while(!q->isEmpty()){ + std::this_thread::sleep_for(m_default_wait); + } + } + } + + void push_pedestal_frame(NDView frame) { + FrameWrapper fw{FrameType::PEDESTAL, 0, NDArray(frame)}; + + for (auto &queue : m_input_queues) { + while (!queue->write(fw)) { + // fmt::print("push_pedestal_frame: queue full\n"); + std::this_thread::sleep_for(m_default_wait); + } + } + } + + void find_clusters(NDView frame) { + FrameWrapper fw{FrameType::DATA, 0, NDArray(frame)}; + while (!m_input_queues[m_current_thread%m_n_threads]->write(fw)) { + std::this_thread::sleep_for(m_default_wait); + } + m_current_thread++; + } + + ClusterVector steal_clusters(bool realloc_same_capacity = false) { + ClusterVector clusters(3,3); + for (auto &finder : m_cluster_finders) { + clusters += finder->steal_clusters(); + } + return clusters; + } + + + // void push(FrameWrapper&& frame) { + // //TODO! need to loop until we are successful + // auto rc = m_input_queue.write(std::move(frame)); + // fmt::print("pushed frame {}\n", rc); + // } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 98d4b37..a1b3a62 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -22,6 +22,7 @@ template class ClusterVector { std::byte *m_data{}; size_t m_size{0}; size_t m_capacity; + uint64_t m_frame_number{0}; //TODO! Check frame number size and type /* Format string used in the python bindings to create a numpy array from the buffer @@ -39,7 +40,7 @@ template class ClusterVector { * @param cluster_size_y size of the cluster in y direction * @param capacity initial capacity of the buffer in number of clusters */ - ClusterVector(size_t cluster_size_x, size_t cluster_size_y, + ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3, size_t capacity = 1024) : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), m_capacity(capacity) { @@ -108,7 +109,14 @@ template class ClusterVector { ptr); m_size++; } - + ClusterVector& operator+=(const ClusterVector& other){ + if (m_size + other.m_size > m_capacity) { + allocate_buffer(m_capacity + other.m_size); + } + std::copy(other.m_data, other.m_data + other.m_size * element_offset(), m_data + m_size * element_offset()); + m_size += other.m_size; + return *this; + } /** * @brief Sum the pixels in each cluster @@ -166,6 +174,9 @@ template class ClusterVector { return m_fmt_base; } + uint64_t frame_number() const { return m_frame_number; } + void set_frame_number(uint64_t frame_number) { m_frame_number = frame_number; } + private: void allocate_buffer(size_t new_capacity) { size_t num_bytes = element_offset() * new_capacity; diff --git a/include/aare/ProducerConsumerQueue.hpp b/include/aare/ProducerConsumerQueue.hpp new file mode 100644 index 0000000..426b9e2 --- /dev/null +++ b/include/aare/ProducerConsumerQueue.hpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author Bo Hu (bhu@fb.com) +// @author Jordan DeLong (delong.j@fb.com) + +// Changes made by PSD Detector Group: +// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size = 128; from folly/lang/Align.h +// Changed extension to .hpp +// Changed namespace to aare + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +constexpr std::size_t hardware_destructive_interference_size = 128; +namespace aare { + +/* + * ProducerConsumerQueue is a one producer and one consumer queue + * without locks. + */ +template struct ProducerConsumerQueue { + typedef T value_type; + + ProducerConsumerQueue(const ProducerConsumerQueue &) = delete; + ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete; + + + ProducerConsumerQueue(ProducerConsumerQueue &&other){ + size_ = other.size_; + records_ = other.records_; + other.records_ = nullptr; + readIndex_ = other.readIndex_.load(std::memory_order_acquire); + writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); + } + ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other){ + size_ = other.size_; + records_ = other.records_; + other.records_ = nullptr; + readIndex_ = other.readIndex_.load(std::memory_order_acquire); + writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); + return *this; + } + + + ProducerConsumerQueue():ProducerConsumerQueue(2){}; + // size must be >= 2. + // + // Also, note that the number of usable slots in the queue at any + // given time is actually (size-1), so if you start with an empty queue, + // isFull() will return true after size-1 insertions. + explicit ProducerConsumerQueue(uint32_t size) + : size_(size), records_(static_cast(std::malloc(sizeof(T) * size))), readIndex_(0), writeIndex_(0) { + assert(size >= 2); + if (!records_) { + throw std::bad_alloc(); + } + } + + ~ProducerConsumerQueue() { + // We need to destruct anything that may still exist in our queue. + // (No real synchronization needed at destructor time: only one + // thread can be doing this.) + if (!std::is_trivially_destructible::value) { + size_t readIndex = readIndex_; + size_t endIndex = writeIndex_; + while (readIndex != endIndex) { + records_[readIndex].~T(); + if (++readIndex == size_) { + readIndex = 0; + } + } + } + + std::free(records_); + } + + template bool write(Args &&...recordArgs) { + auto const currentWrite = writeIndex_.load(std::memory_order_relaxed); + auto nextRecord = currentWrite + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + new (&records_[currentWrite]) T(std::forward(recordArgs)...); + writeIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // queue is full + return false; + } + + // move (or copy) the value at the front of the queue to given variable + bool read(T &record) { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return false; + } + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + record = std::move(records_[currentRead]); + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // pointer to the value at the front of the queue (for use in-place) or + // nullptr if empty. + T *frontPtr() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return nullptr; + } + return &records_[currentRead]; + } + + // queue must not be empty + void popFront() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + assert(currentRead != writeIndex_.load(std::memory_order_acquire)); + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + } + + bool isEmpty() const { + return readIndex_.load(std::memory_order_acquire) == writeIndex_.load(std::memory_order_acquire); + } + + bool isFull() const { + auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + return false; + } + // queue is full + return true; + } + + // * If called by consumer, then true size may be more (because producer may + // be adding items concurrently). + // * If called by producer, then true size may be less (because consumer may + // be removing items concurrently). + // * It is undefined to call this from any other thread. + size_t sizeGuess() const { + int ret = writeIndex_.load(std::memory_order_acquire) - readIndex_.load(std::memory_order_acquire); + if (ret < 0) { + ret += size_; + } + return ret; + } + + // maximum number of items in the queue. + size_t capacity() const { return size_ - 1; } + + private: + using AtomicIndex = std::atomic; + + char pad0_[hardware_destructive_interference_size]; + // const uint32_t size_; + uint32_t size_; + // T *const records_; + T* records_; + + alignas(hardware_destructive_interference_size) AtomicIndex readIndex_; + alignas(hardware_destructive_interference_size) AtomicIndex writeIndex_; + + char pad1_[hardware_destructive_interference_size - sizeof(AtomicIndex)]; +}; + +} // namespace aare \ No newline at end of file diff --git a/python/examples/play.py b/python/examples/play.py index 986b718..9c07c99 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -13,36 +13,66 @@ from aare import File, ClusterFinder, VarClusterFinder base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/') f = File(base/'Moench03new/cu_half_speed_master_4.json') -cf = ClusterFinder((400,400), (3,3)) + + +from aare._aare import ClusterFinderMT, ClusterCollector + + +cf = ClusterFinderMT((400,400), (3,3), n_threads = 3) +collector = ClusterCollector(cf) + for i in range(1000): - cf.push_pedestal_frame(f.read_frame()) + img = f.read_frame() + cf.push_pedestal_frame(img) +print('Pedestal done') +cf.sync() -fig, ax = plt.subplots() -im = ax.imshow(cf.pedestal()) -cf.pedestal() -cf.noise() +for i in range(100): + img = f.read_frame() + cf.find_clusters(img) + + +# time.sleep(1) +cf.stop() +collector.stop() +cv = collector.steal_clusters() +print(f'Processed {len(cv)} frames') + +print('Done') -N = 500 -t0 = time.perf_counter() -hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) -f.seek(0) -t0 = time.perf_counter() -data = f.read_n(N) -t_elapsed = time.perf_counter()-t0 +# cf = ClusterFinder((400,400), (3,3)) +# for i in range(1000): +# cf.push_pedestal_frame(f.read_frame()) + +# fig, ax = plt.subplots() +# im = ax.imshow(cf.pedestal()) +# cf.pedestal() +# cf.noise() -n_bytes = data.itemsize*data.size -print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s') +# N = 500 +# t0 = time.perf_counter() +# hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) +# f.seek(0) + +# t0 = time.perf_counter() +# data = f.read_n(N) +# t_elapsed = time.perf_counter()-t0 -for frame in data: - a = cf.find_clusters(frame) +# n_bytes = data.itemsize*data.size -clusters = cf.steal_clusters() +# print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s') + + +# for frame in data: +# a = cf.find_clusters(frame) + +# clusters = cf.steal_clusters() # t_elapsed = time.perf_counter()-t0 # print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 5d22091..90c9f21 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -1,5 +1,7 @@ #include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" #include "aare/ClusterVector.hpp" +#include "aare/ClusterCollector.hpp" #include "aare/NDView.hpp" #include "aare/Pedestal.hpp" #include "np_helper.hpp" @@ -7,11 +9,13 @@ #include #include #include +#include #include namespace py = pybind11; using pd_type = double; + template void define_cluster_vector(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterVector_{}", typestr); @@ -31,6 +35,9 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum()); return return_vector(vec); }) + .def_property_readonly("capacity", &ClusterVector::capacity) + .def_property("frame_number", &ClusterVector::frame_number, + &ClusterVector::set_frame_number) .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { return py::buffer_info( self.data(), /* Pointer to buffer */ @@ -45,6 +52,55 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { }); } +void define_cluster_finder_mt_bindings(py::module &m) { + py::class_>(m, "ClusterFinderMT") + .def(py::init, Shape<2>, pd_type, size_t, size_t>(), + py::arg("image_size"), py::arg("cluster_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 1000, + py::arg("n_threads") = 3) + .def("push_pedestal_frame", + [](ClusterFinderMT &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.push_pedestal_frame(view); + }) + .def("find_clusters", + [](ClusterFinderMT &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.find_clusters(view); + return; + }) + .def("sync", &ClusterFinderMT::sync) + .def( + "steal_clusters", + [](ClusterFinderMT &self, + bool realloc_same_capacity) { + auto v = new ClusterVector( + self.steal_clusters(realloc_same_capacity)); + return v; + }, + py::arg("realloc_same_capacity") = false) + .def("stop", &ClusterFinderMT::stop); +} + + +void define_cluster_collector_bindings(py::module &m) { + py::class_(m, "ClusterCollector") + .def(py::init*>()) + .def("stop", &ClusterCollector::stop) + .def("steal_clusters", + [](ClusterCollector &self) { + auto v = new std::vector>( + self.steal_clusters()); + return v; + }, py::return_value_policy::take_ownership); + + + +} + + void define_cluster_finder_bindings(py::module &m) { py::class_>(m, "ClusterFinder") .def(py::init, Shape<2>, pd_type, size_t>(), diff --git a/python/src/module.cpp b/python/src/module.cpp index 14a686a..0ef868b 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -25,5 +25,8 @@ PYBIND11_MODULE(_aare, m) { define_pedestal_bindings(m, "Pedestal_d"); define_pedestal_bindings(m, "Pedestal_f"); define_cluster_finder_bindings(m); + define_cluster_finder_mt_bindings(m); define_cluster_file_io_bindings(m); + define_cluster_collector_bindings(m); + } \ No newline at end of file From cc95561eda6e10c015dab7949bb3d8baad98b9a4 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Thu, 9 Jan 2025 16:53:22 +0100 Subject: [PATCH 013/120] MultiThreaded Cluster finder --- conda-recipe/meta.yaml | 2 +- include/aare/ClusterFile.hpp | 7 +-- include/aare/ClusterFileSink.hpp | 56 +++++++++++++++++ include/aare/ClusterFinder.hpp | 4 +- include/aare/ClusterFinderMT.hpp | 101 +++++++++++++++++++++---------- include/aare/ClusterVector.hpp | 20 +++++- include/aare/File.hpp | 2 + pyproject.toml | 2 +- python/aare/__init__.py | 2 + python/examples/play.py | 15 ++--- python/src/cluster.hpp | 82 ++++++++++++++----------- python/src/cluster_file.hpp | 18 +++--- python/src/file.hpp | 33 ++++++++++ python/src/module.cpp | 1 + src/ClusterFile.cpp | 32 +++++----- src/File.cpp | 2 + 16 files changed, 268 insertions(+), 111 deletions(-) create mode 100644 include/aare/ClusterFileSink.hpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index dd2b682..6eeec38 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.1.7.dev0 #TODO! how to not duplicate this? + version: 2025.1.9.dev0 #TODO! how to not duplicate this? source: diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index a484560..8274078 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -59,10 +59,9 @@ class ClusterFile { ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, const std::string &mode = "r"); ~ClusterFile(); - std::vector read_clusters(size_t n_clusters); - std::vector read_frame(int32_t &out_fnum); - void write_frame(int32_t frame_number, - const ClusterVector &clusters); + ClusterVector read_clusters(size_t n_clusters); + ClusterVector read_frame(); + void write_frame(const ClusterVector &clusters); std::vector read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp new file mode 100644 index 0000000..158fdeb --- /dev/null +++ b/include/aare/ClusterFileSink.hpp @@ -0,0 +1,56 @@ +#pragma once +#include +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFinderMT.hpp" + +namespace aare{ + +class ClusterFileSink{ + ProducerConsumerQueue>* m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::ofstream m_file; + + + void process(){ + m_stopped = false; + fmt::print("ClusterFileSink started\n"); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); + clusters != nullptr) { + // Write clusters to file + int32_t frame_number = clusters->frame_number(); //TODO! Should we store frame number already as int? + uint32_t num_clusters = clusters->size(); + m_file.write(reinterpret_cast(&frame_number), sizeof(frame_number)); + m_file.write(reinterpret_cast(&num_clusters), sizeof(num_clusters)); + m_file.write(reinterpret_cast(clusters->data()), clusters->size() * clusters->item_size()); + m_source->popFront(); + }else{ + std::this_thread::sleep_for(m_default_wait); + } + } + fmt::print("ClusterFileSink stopped\n"); + m_stopped = true; + } + + public: + ClusterFileSink(ClusterFinderMT* source, const std::filesystem::path& fname){ + m_source = source->sink(); + m_thread = std::thread(&ClusterFileSink::process, this); + m_file.open(fname, std::ios::binary); + } + void stop(){ + m_stop_requested = true; + m_thread.join(); + m_file.close(); + } +}; + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 2fe33a7..4a06c31 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -64,13 +64,13 @@ class ClusterFinder { m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY); return tmp; } - void find_clusters(NDView frame) { + void find_clusters(NDView frame, uint64_t frame_number = 0) { // // TODO! deal with even size clusters // // currently 3,3 -> +/- 1 // // 4,4 -> +/- 2 int dy = m_cluster_sizeY / 2; int dx = m_cluster_sizeX / 2; - + m_clusters.set_frame_number(frame_number); std::vector cluster_data(m_cluster_sizeX * m_cluster_sizeY); for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index d3cbaf6..9765512 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -7,6 +7,7 @@ #include "aare/NDArray.hpp" #include "aare/ProducerConsumerQueue.hpp" +#include "aare/ClusterFinder.hpp" namespace aare { @@ -21,7 +22,6 @@ struct FrameWrapper { NDArray data; }; - template class ClusterFinderMT { @@ -32,10 +32,8 @@ class ClusterFinderMT { using OutputQueue = ProducerConsumerQueue>; std::vector> m_input_queues; std::vector> m_output_queues; - - - OutputQueue m_sink{1000}; //All clusters go into this queue + OutputQueue m_sink{1000}; // All clusters go into this queue std::vector> m_cluster_finders; std::vector m_threads; @@ -43,26 +41,24 @@ class ClusterFinderMT { std::chrono::milliseconds m_default_wait{1}; std::atomic m_stop_requested{false}; - std::atomic m_processing_threads_stopped{false}; + std::atomic m_processing_threads_stopped{true}; void process(int thread_id) { auto cf = m_cluster_finders[thread_id].get(); auto q = m_input_queues[thread_id].get(); // TODO! Avoid indexing into the vector every time fmt::print("Thread {} started\n", thread_id); - //TODO! is this check enough to make sure we process all the frames? - while (!m_stop_requested || !q->isEmpty()) { - if (FrameWrapper *frame = q->frontPtr(); - frame != nullptr) { + // TODO! is this check enough to make sure we process all the frames? + while (!m_stop_requested || !q->isEmpty()) { + if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) { // fmt::print("Thread {} got frame {}, type: {}\n", thread_id, - // frame->frame_number, static_cast(frame->type)); + // frame->frame_number, static_cast(frame->type)); switch (frame->type) { case FrameType::DATA: - cf->find_clusters( - frame->data.view()); + cf->find_clusters(frame->data.view(), frame->frame_number); m_output_queues[thread_id]->write(cf->steal_clusters()); - + break; case FrameType::PEDESTAL: @@ -79,22 +75,22 @@ class ClusterFinderMT { } else { std::this_thread::sleep_for(m_default_wait); } - } fmt::print("Thread {} stopped\n", thread_id); } /** - * @brief Collect all the clusters from the output queues and write them to the sink + * @brief Collect all the clusters from the output queues and write them to + * the sink */ - void collect(){ + void collect() { bool empty = true; - while(!m_stop_requested || !empty || !m_processing_threads_stopped){ + while (!m_stop_requested || !empty || !m_processing_threads_stopped) { empty = true; for (auto &queue : m_output_queues) { if (!queue->isEmpty()) { - while(!m_sink.write(std::move(*queue->frontPtr()))){ + while (!m_sink.write(std::move(*queue->frontPtr()))) { std::this_thread::sleep_for(m_default_wait); } queue->popFront(); @@ -118,7 +114,6 @@ class ClusterFinderMT { for (size_t i = 0; i < n_threads; i++) { m_input_queues.emplace_back(std::make_unique(200)); m_output_queues.emplace_back(std::make_unique(200)); - } start(); @@ -126,14 +121,22 @@ class ClusterFinderMT { ProducerConsumerQueue> *sink() { return &m_sink; } - void start(){ + /** + * @brief Start all threads + */ + + void start() { for (size_t i = 0; i < m_n_threads; i++) { m_threads.push_back( std::thread(&ClusterFinderMT::process, this, i)); } + m_processing_threads_stopped = false; m_collect_thread = std::thread(&ClusterFinderMT::collect, this); } + /** + * @brief Stop all threads + */ void stop() { m_stop_requested = true; for (auto &thread : m_threads) { @@ -143,42 +146,74 @@ class ClusterFinderMT { m_collect_thread.join(); } - void sync(){ + /** + * @brief Wait for all the queues to be empty + */ + void sync() { for (auto &q : m_input_queues) { - while(!q->isEmpty()){ + while (!q->isEmpty()) { std::this_thread::sleep_for(m_default_wait); } } + for (auto &q : m_output_queues) { + while (!q->isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + while (!m_sink.isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } } + /** + * @brief Push a pedestal frame to all the cluster finders. The frames is + * expected to be dark. No photon finding is done. Just pedestal update. + */ void push_pedestal_frame(NDView frame) { - FrameWrapper fw{FrameType::PEDESTAL, 0, NDArray(frame)}; + FrameWrapper fw{FrameType::PEDESTAL, 0, + NDArray(frame)}; // TODO! copies the data! for (auto &queue : m_input_queues) { while (!queue->write(fw)) { - // fmt::print("push_pedestal_frame: queue full\n"); std::this_thread::sleep_for(m_default_wait); } } } - void find_clusters(NDView frame) { - FrameWrapper fw{FrameType::DATA, 0, NDArray(frame)}; - while (!m_input_queues[m_current_thread%m_n_threads]->write(fw)) { + /** + * @brief Push the frame to the queue of the next available thread. Function + * returns once the frame is in a queue. + * @note Spin locks with a default wait if the queue is full. + */ + void find_clusters(NDView frame, uint64_t frame_number = 0) { + FrameWrapper fw{FrameType::DATA, frame_number, + NDArray(frame)}; // TODO! copies the data! + while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) { std::this_thread::sleep_for(m_default_wait); } m_current_thread++; } - ClusterVector steal_clusters(bool realloc_same_capacity = false) { - ClusterVector clusters(3,3); - for (auto &finder : m_cluster_finders) { - clusters += finder->steal_clusters(); + auto pedestal() { + if (m_cluster_finders.empty()) { + throw std::runtime_error("No cluster finders available"); } - return clusters; + if(!m_processing_threads_stopped){ + throw std::runtime_error("ClusterFinderMT is still running"); + } + return m_cluster_finders[0]->pedestal(); + } + + auto noise() { + if (m_cluster_finders.empty()) { + throw std::runtime_error("No cluster finders available"); + } + if(!m_processing_threads_stopped){ + throw std::runtime_error("ClusterFinderMT is still running"); + } + return m_cluster_finders[0]->noise(); } - // void push(FrameWrapper&& frame) { // //TODO! need to loop until we are successful // auto rc = m_input_queue.write(std::move(frame)); diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index a1b3a62..efad448 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -41,9 +41,9 @@ template class ClusterVector { * @param capacity initial capacity of the buffer in number of clusters */ ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3, - size_t capacity = 1024) + size_t capacity = 1024, uint64_t frame_number = 0) : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), - m_capacity(capacity) { + m_capacity(capacity), m_frame_number(frame_number) { allocate_buffer(capacity); } ~ClusterVector() { @@ -55,7 +55,7 @@ template class ClusterVector { ClusterVector(ClusterVector &&other) noexcept : m_cluster_size_x(other.m_cluster_size_x), m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data), - m_size(other.m_size), m_capacity(other.m_capacity) { + m_size(other.m_size), m_capacity(other.m_capacity), m_frame_number(other.m_frame_number) { other.m_data = nullptr; other.m_size = 0; other.m_capacity = 0; @@ -70,9 +70,11 @@ template class ClusterVector { m_data = other.m_data; m_size = other.m_size; m_capacity = other.m_capacity; + m_frame_number = other.m_frame_number; other.m_data = nullptr; other.m_size = 0; other.m_capacity = 0; + other.m_frame_number = 0; } return *this; } @@ -147,6 +149,12 @@ template class ClusterVector { return 2*sizeof(CoordType) + m_cluster_size_x * m_cluster_size_y * sizeof(T); } + + /** + * @brief Return the size in bytes of a single cluster + */ + size_t item_size() const { return element_offset(); } + /** * @brief Return the offset in bytes for the i-th cluster */ @@ -176,6 +184,12 @@ template class ClusterVector { uint64_t frame_number() const { return m_frame_number; } void set_frame_number(uint64_t frame_number) { m_frame_number = frame_number; } + void resize(size_t new_size) { + if (new_size > m_capacity) { + allocate_buffer(new_size); + } + m_size = new_size; + } private: void allocate_buffer(size_t new_capacity) { diff --git a/include/aare/File.hpp b/include/aare/File.hpp index 7aa30e1..1cef898 100644 --- a/include/aare/File.hpp +++ b/include/aare/File.hpp @@ -36,6 +36,8 @@ class File { File(File &&other) noexcept; File& operator=(File &&other) noexcept; ~File() = default; + + // void close(); //!< close the file Frame read_frame(); //!< read one frame from the file at the current position Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number diff --git a/pyproject.toml b/pyproject.toml index 35bdefb..1b75d02 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.1.7.dev0" +version = "2025.1.9.dev0" [tool.scikit-build] cmake.verbose = true diff --git a/python/aare/__init__.py b/python/aare/__init__.py index fb34c7a..b0c9de2 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -8,6 +8,8 @@ from ._aare import DetectorType from ._aare import ClusterFile from ._aare import hitmap +from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink + from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters diff --git a/python/examples/play.py b/python/examples/play.py index 9c07c99..95da2e5 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -8,18 +8,19 @@ import numpy as np import boost_histogram as bh import time -from aare import File, ClusterFinder, VarClusterFinder +from aare import File, ClusterFinder, VarClusterFinder, ClusterFile base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/') f = File(base/'Moench03new/cu_half_speed_master_4.json') -from aare._aare import ClusterFinderMT, ClusterCollector +from aare._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink cf = ClusterFinderMT((400,400), (3,3), n_threads = 3) -collector = ClusterCollector(cf) +# collector = ClusterCollector(cf) +out_file = ClusterFileSink(cf, "test.clust") for i in range(1000): img = f.read_frame() @@ -34,13 +35,13 @@ for i in range(100): # time.sleep(1) cf.stop() -collector.stop() -cv = collector.steal_clusters() -print(f'Processed {len(cv)} frames') - +out_file.stop() print('Done') +cfile = ClusterFile("test.clust") + + # cf = ClusterFinder((400,400), (3,3)) diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 90c9f21..459de44 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -1,7 +1,8 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" #include "aare/ClusterFinder.hpp" #include "aare/ClusterFinderMT.hpp" #include "aare/ClusterVector.hpp" -#include "aare/ClusterCollector.hpp" #include "aare/NDView.hpp" #include "aare/Pedestal.hpp" #include "np_helper.hpp" @@ -9,13 +10,12 @@ #include #include #include -#include #include +#include namespace py = pybind11; using pd_type = double; - template void define_cluster_vector(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterVector_{}", typestr); @@ -64,42 +64,51 @@ void define_cluster_finder_mt_bindings(py::module &m) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) - .def("find_clusters", - [](ClusterFinderMT &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.find_clusters(view); - return; - }) - .def("sync", &ClusterFinderMT::sync) .def( - "steal_clusters", + "find_clusters", [](ClusterFinderMT &self, - bool realloc_same_capacity) { - auto v = new ClusterVector( - self.steal_clusters(realloc_same_capacity)); - return v; + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; }, - py::arg("realloc_same_capacity") = false) - .def("stop", &ClusterFinderMT::stop); + py::arg(), py::arg("frame_number") = 0) + .def("sync", &ClusterFinderMT::sync) + .def("stop", &ClusterFinderMT::stop) + .def_property_readonly("pedestal", + [](ClusterFinderMT &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly("noise", + [](ClusterFinderMT &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }); } - void define_cluster_collector_bindings(py::module &m) { py::class_(m, "ClusterCollector") - .def(py::init*>()) + .def(py::init *>()) .def("stop", &ClusterCollector::stop) - .def("steal_clusters", - [](ClusterCollector &self) { - auto v = new std::vector>( - self.steal_clusters()); - return v; - }, py::return_value_policy::take_ownership); - - - + .def( + "steal_clusters", + [](ClusterCollector &self) { + auto v = + new std::vector>(self.steal_clusters()); + return v; + }, + py::return_value_policy::take_ownership); } +void define_cluster_file_sink_bindings(py::module &m) { + py::class_(m, "ClusterFileSink") + .def(py::init *, + const std::filesystem::path &>()) + .def("stop", &ClusterFileSink::stop); +} void define_cluster_finder_bindings(py::module &m) { py::class_>(m, "ClusterFinder") @@ -133,12 +142,15 @@ void define_cluster_finder_bindings(py::module &m) { return v; }, py::arg("realloc_same_capacity") = false) - .def("find_clusters", [](ClusterFinder &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.find_clusters(view); - return; - }); + .def( + "find_clusters", + [](ClusterFinder &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view); + return; + }, + py::arg(), py::arg("frame_number") = 0); m.def("hitmap", [](std::array image_size, ClusterVector &cv) { diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 82870c4..5280e5f 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -28,16 +28,13 @@ void define_cluster_file_io_bindings(py::module &m) { py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r") .def("read_clusters", [](ClusterFile &self, size_t n_clusters) { - auto *vec = - new std::vector(self.read_clusters(n_clusters)); - return return_vector(vec); + auto v = new ClusterVector(self.read_clusters(n_clusters)); + return v; }) .def("read_frame", [](ClusterFile &self) { - int32_t frame_number; - auto *vec = - new std::vector(self.read_frame(frame_number)); - return py::make_tuple(frame_number, return_vector(vec)); + auto v = new ClusterVector(self.read_frame()); + return v; }) .def("write_frame", &ClusterFile::write_frame) .def("read_cluster_with_cut", @@ -59,12 +56,11 @@ void define_cluster_file_io_bindings(py::module &m) { }) .def("__iter__", [](ClusterFile &self) { return &self; }) .def("__next__", [](ClusterFile &self) { - auto vec = - new std::vector(self.read_clusters(self.chunk_size())); - if (vec->size() == 0) { + auto v = new ClusterVector(self.read_clusters(self.chunk_size())); + if (v->size() == 0) { throw py::stop_iteration(); } - return return_vector(vec); + return v; }); m.def("calculate_eta2", []( aare::ClusterVector &clusters) { diff --git a/python/src/file.hpp b/python/src/file.hpp index 30fa82f..f20e0ce 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -124,8 +124,41 @@ void define_file_io_bindings(py::module &m) { self.read_into(reinterpret_cast(image.mutable_data()), n_frames); return image; + }) + .def("__enter__", [](File &self) { return &self; }) + .def("__exit__", + [](File &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](File &self) { return &self; }) + .def("__next__", [](File &self) { + + try{ + const uint8_t item_size = self.bytes_per_pixel(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(self.rows()); + shape.push_back(self.cols()); + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into( + reinterpret_cast(image.mutable_data())); + return image; + }catch(std::runtime_error &e){ + throw py::stop_iteration(); + } }); + py::class_(m, "FileConfig") .def(py::init<>()) .def_readwrite("rows", &FileConfig::rows) diff --git a/python/src/module.cpp b/python/src/module.cpp index 0ef868b..451a6b8 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -28,5 +28,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_finder_mt_bindings(m); define_cluster_file_io_bindings(m); define_cluster_collector_bindings(m); + define_cluster_file_sink_bindings(m); } \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 0e5b93e..f62b7c9 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -34,8 +34,7 @@ void ClusterFile::close() { } } -void ClusterFile::write_frame(int32_t frame_number, - const ClusterVector &clusters) { +void ClusterFile::write_frame(const ClusterVector &clusters) { if (m_mode != "w") { throw std::runtime_error("File not opened for writing"); } @@ -43,26 +42,27 @@ void ClusterFile::write_frame(int32_t frame_number, !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } + int32_t frame_number = clusters.frame_number(); fwrite(&frame_number, sizeof(frame_number), 1, fp); uint32_t n_clusters = clusters.size(); fwrite(&n_clusters, sizeof(n_clusters), 1, fp); fwrite(clusters.data(), clusters.element_offset(), clusters.size(), fp); - // write clusters - // fwrite(clusters.data(), sizeof(Cluster), clusters.size(), fp); } -std::vector ClusterFile::read_clusters(size_t n_clusters) { +ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - std::vector clusters(n_clusters); + + ClusterVector clusters(3,3, n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; uint32_t nn = m_num_left; uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - auto buf = reinterpret_cast(clusters.data()); + // auto buf = reinterpret_cast(clusters.data()); + auto buf = clusters.data(); // if there are photons left from previous frame read them first if (nph) { if (nph > n_clusters) { @@ -73,7 +73,7 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { nn = nph; } nph_read += fread(reinterpret_cast(buf + nph_read), - sizeof(Cluster3x3), nn, fp); + clusters.item_size(), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -88,7 +88,7 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { nn = nph; nph_read += fread(reinterpret_cast(buf + nph_read), - sizeof(Cluster3x3), nn, fp); + clusters.item_size(), nn, fp); m_num_left = nph - nn; } if (nph_read >= n_clusters) @@ -102,7 +102,7 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } -std::vector ClusterFile::read_frame(int32_t &out_fnum) { +ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -110,8 +110,8 @@ std::vector ClusterFile::read_frame(int32_t &out_fnum) { throw std::runtime_error( "There are still photons left in the last frame"); } - - if (fread(&out_fnum, sizeof(out_fnum), 1, fp) != 1) { + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { throw std::runtime_error("Could not read frame number"); } @@ -119,15 +119,19 @@ std::vector ClusterFile::read_frame(int32_t &out_fnum) { if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - std::vector clusters(n_clusters); + // std::vector clusters(n_clusters); + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); - if (fread(clusters.data(), sizeof(Cluster3x3), n_clusters, fp) != + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != static_cast(n_clusters)) { throw std::runtime_error("Could not read clusters"); } + clusters.resize(n_clusters); return clusters; } + std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny) { diff --git a/src/File.cpp b/src/File.cpp index 37e4c57..1180967 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -45,6 +45,8 @@ File& File::operator=(File &&other) noexcept { return *this; } +// void File::close() { file_impl->close(); } + Frame File::read_frame() { return file_impl->read_frame(); } Frame File::read_frame(size_t frame_index) { return file_impl->read_frame(frame_index); From caf7b4ecdb2c3a3716d2472a8cd769310a8dd102 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 10 Jan 2025 10:22:04 +0100 Subject: [PATCH 014/120] added docs for ClusterFinderMT --- docs/CMakeLists.txt | 23 +------ docs/src/ClusterFinderMT.rst | 7 ++ docs/src/index.rst | 2 + docs/src/pyClusterVector.rst | 33 +++++++++ include/aare/ClusterFinderMT.hpp | 85 ++++++++++++++++------- include/aare/ClusterVector.hpp | 109 ++++++++++++++++++++---------- python/aare/__init__.py | 2 +- python/examples/play.py | 23 +++++++ python/src/cluster.hpp | 12 ++-- python/src/cluster_file.hpp | 4 +- src/ClusterFile.cpp | 2 +- src/ClusterVector.test.cpp | 112 ++++++++++++++++++++++++++++--- 12 files changed, 309 insertions(+), 105 deletions(-) create mode 100644 docs/src/ClusterFinderMT.rst create mode 100644 docs/src/pyClusterVector.rst diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 118fd5c..c693f0e 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -12,28 +12,7 @@ set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}) file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst") -# set(SPHINX_SOURCE_FILES -# src/index.rst -# src/Installation.rst -# src/Requirements.rst -# src/NDArray.rst -# src/NDView.rst -# src/File.rst -# src/Frame.rst -# src/Dtype.rst -# src/ClusterFinder.rst -# src/ClusterFile.rst -# src/Pedestal.rst -# src/RawFile.rst -# src/RawSubFile.rst -# src/RawMasterFile.rst -# src/VarClusterFinder.rst -# src/pyVarClusterFinder.rst -# src/pyFile.rst -# src/pyCtbRawFile.rst -# src/pyRawFile.rst -# src/pyRawMasterFile.rst -# ) + foreach(filename ${SPHINX_SOURCE_FILES}) diff --git a/docs/src/ClusterFinderMT.rst b/docs/src/ClusterFinderMT.rst new file mode 100644 index 0000000..b15eb8b --- /dev/null +++ b/docs/src/ClusterFinderMT.rst @@ -0,0 +1,7 @@ +ClusterFinderMT +================== + + +.. doxygenclass:: aare::ClusterFinderMT + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/src/index.rst b/docs/src/index.rst index 4316a2c..e6c927f 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -30,6 +30,7 @@ AARE pyFile pyCtbRawFile pyClusterFile + pyClusterVector pyRawFile pyRawMasterFile pyVarClusterFinder @@ -45,6 +46,7 @@ AARE File Dtype ClusterFinder + ClusterFinderMT ClusterFile ClusterVector Pedestal diff --git a/docs/src/pyClusterVector.rst b/docs/src/pyClusterVector.rst new file mode 100644 index 0000000..4277920 --- /dev/null +++ b/docs/src/pyClusterVector.rst @@ -0,0 +1,33 @@ +ClusterVector +================ + +The ClusterVector, holds clusters from the ClusterFinder. Since it is templated +in C++ we use a suffix indicating the data type in python. The suffix is +``_i`` for integer, ``_f`` for float, and ``_d`` for double. + +At the moment the functionality from python is limited and it is not supported +to push_back clusters to the vector. The intended use case is to pass it to +C++ functions that support the ClusterVector or to view it as a numpy array. + +**View ClusterVector as numpy array** + +.. code:: python + + from aare import ClusterFile + with ClusterFile("path/to/file") as f: + cluster_vector = f.read_frame() + + # Create a copy of the cluster data in a numpy array + clusters = np.array(cluster_vector) + + # Avoid copying the data by passing copy=False + clusters = np.array(cluster_vector, copy = False) + + +.. py:currentmodule:: aare + +.. autoclass:: ClusterVector_i + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index 9765512..6090ca8 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -5,9 +5,9 @@ #include #include +#include "aare/ClusterFinder.hpp" #include "aare/NDArray.hpp" #include "aare/ProducerConsumerQueue.hpp" -#include "aare/ClusterFinder.hpp" namespace aare { @@ -22,6 +22,14 @@ struct FrameWrapper { NDArray data; }; +/** + * @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses + * a producer-consumer queue to distribute the frames to the threads. The + * clusters are collected in a single output queue. + * @tparam FRAME_TYPE type of the frame data + * @tparam PEDESTAL_TYPE type of the pedestal data + * @tparam CT type of the cluster data + */ template class ClusterFinderMT { @@ -43,31 +51,28 @@ class ClusterFinderMT { std::atomic m_stop_requested{false}; std::atomic m_processing_threads_stopped{true}; + /** + * @brief Function called by the processing threads. It reads the frames + * from the input queue and processes them. + */ void process(int thread_id) { auto cf = m_cluster_finders[thread_id].get(); auto q = m_input_queues[thread_id].get(); - // TODO! Avoid indexing into the vector every time - fmt::print("Thread {} started\n", thread_id); - // TODO! is this check enough to make sure we process all the frames? + bool realloc_same_capacity = true; + while (!m_stop_requested || !q->isEmpty()) { if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) { - // fmt::print("Thread {} got frame {}, type: {}\n", thread_id, - // frame->frame_number, static_cast(frame->type)); switch (frame->type) { case FrameType::DATA: cf->find_clusters(frame->data.view(), frame->frame_number); - m_output_queues[thread_id]->write(cf->steal_clusters()); - + m_output_queues[thread_id]->write(cf->steal_clusters(realloc_same_capacity)); break; case FrameType::PEDESTAL: m_cluster_finders[thread_id]->push_pedestal_frame( frame->data.view()); break; - - default: - break; } // frame is processed now discard it @@ -76,7 +81,6 @@ class ClusterFinderMT { std::this_thread::sleep_for(m_default_wait); } } - fmt::print("Thread {} stopped\n", thread_id); } /** @@ -101,11 +105,19 @@ class ClusterFinderMT { } public: + /** + * @brief Construct a new ClusterFinderMT object + * @param image_size size of the image + * @param cluster_size size of the cluster + * @param nSigma number of sigma above the pedestal to consider a photon + * @param capacity initial capacity of the cluster vector. Should match + * expected number of clusters in a frame per frame. + * @param n_threads number of threads to use + */ ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size, PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000, size_t n_threads = 3) : m_n_threads(n_threads) { - fmt::print("ClusterFinderMT: using {} threads\n", n_threads); for (size_t i = 0; i < n_threads; i++) { m_cluster_finders.push_back( std::make_unique>( @@ -115,39 +127,48 @@ class ClusterFinderMT { m_input_queues.emplace_back(std::make_unique(200)); m_output_queues.emplace_back(std::make_unique(200)); } - + //TODO! Should we start automatically? start(); } + /** + * @brief Return the sink queue where all the clusters are collected + * @warning You need to empty this queue otherwise the cluster finder will wait forever + */ ProducerConsumerQueue> *sink() { return &m_sink; } /** - * @brief Start all threads + * @brief Start all processing threads */ - void start() { + m_processing_threads_stopped = false; + m_stop_requested = false; + for (size_t i = 0; i < m_n_threads; i++) { m_threads.push_back( std::thread(&ClusterFinderMT::process, this, i)); } - m_processing_threads_stopped = false; + m_collect_thread = std::thread(&ClusterFinderMT::collect, this); } /** - * @brief Stop all threads + * @brief Stop all processing threads */ void stop() { m_stop_requested = true; + for (auto &thread : m_threads) { thread.join(); } + m_threads.clear(); + m_processing_threads_stopped = true; m_collect_thread.join(); } /** - * @brief Wait for all the queues to be empty + * @brief Wait for all the queues to be empty. Mostly used for timing tests. */ void sync() { for (auto &q : m_input_queues) { @@ -194,24 +215,38 @@ class ClusterFinderMT { m_current_thread++; } - auto pedestal() { + /** + * @brief Return the pedestal currently used by the cluster finder + * @param thread_index index of the thread + */ + auto pedestal(size_t thread_index = 0) { if (m_cluster_finders.empty()) { throw std::runtime_error("No cluster finders available"); } - if(!m_processing_threads_stopped){ + if (!m_processing_threads_stopped) { throw std::runtime_error("ClusterFinderMT is still running"); } - return m_cluster_finders[0]->pedestal(); + if (thread_index >= m_cluster_finders.size()) { + throw std::runtime_error("Thread index out of range"); + } + return m_cluster_finders[thread_index]->pedestal(); } - auto noise() { + /** + * @brief Return the noise currently used by the cluster finder + * @param thread_index index of the thread + */ + auto noise(size_t thread_index = 0) { if (m_cluster_finders.empty()) { throw std::runtime_error("No cluster finders available"); } - if(!m_processing_threads_stopped){ + if (!m_processing_threads_stopped) { throw std::runtime_error("ClusterFinderMT is still running"); } - return m_cluster_finders[0]->noise(); + if (thread_index >= m_cluster_finders.size()) { + throw std::runtime_error("Thread index out of range"); + } + return m_cluster_finders[thread_index]->noise(); } // void push(FrameWrapper&& frame) { diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index efad448..2c3b6c2 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -9,20 +9,24 @@ namespace aare { /** - * @brief ClusterVector is a container for clusters of various sizes. It uses a - * contiguous memory buffer to store the clusters. + * @brief ClusterVector is a container for clusters of various sizes. It uses a + * contiguous memory buffer to store the clusters. It is templated on the data + * type and the coordinate type of the clusters. * @note push_back can invalidate pointers to elements in the container + * @warning ClusterVector is currently move only to catch unintended copies, but + * this might change since there are probably use cases where copying is needed. * @tparam T data type of the pixels in the cluster - * @tparam CoordType data type of the x and y coordinates of the cluster (normally int16_t) + * @tparam CoordType data type of the x and y coordinates of the cluster + * (normally int16_t) */ -template class ClusterVector { +template class ClusterVector { using value_type = T; size_t m_cluster_size_x; size_t m_cluster_size_y; std::byte *m_data{}; size_t m_size{0}; size_t m_capacity; - uint64_t m_frame_number{0}; //TODO! Check frame number size and type + uint64_t m_frame_number{0}; // TODO! Check frame number size and type /* Format string used in the python bindings to create a numpy array from the buffer @@ -31,7 +35,7 @@ template class ClusterVector { d - double i - int */ - constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:" ; + constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:"; public: /** @@ -39,6 +43,8 @@ template class ClusterVector { * @param cluster_size_x size of the cluster in x direction * @param cluster_size_y size of the cluster in y direction * @param capacity initial capacity of the buffer in number of clusters + * @param frame_number frame number of the clusters. Default is 0, which is + * also used to indicate that the clusters come from many frames */ ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3, size_t capacity = 1024, uint64_t frame_number = 0) @@ -46,23 +52,22 @@ template class ClusterVector { m_capacity(capacity), m_frame_number(frame_number) { allocate_buffer(capacity); } - ~ClusterVector() { - delete[] m_data; - } - - //Move constructor + ~ClusterVector() { delete[] m_data; } + + // Move constructor ClusterVector(ClusterVector &&other) noexcept : m_cluster_size_x(other.m_cluster_size_x), m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data), - m_size(other.m_size), m_capacity(other.m_capacity), m_frame_number(other.m_frame_number) { + m_size(other.m_size), m_capacity(other.m_capacity), + m_frame_number(other.m_frame_number) { other.m_data = nullptr; other.m_size = 0; other.m_capacity = 0; } - //Move assignment operator - ClusterVector& operator=(ClusterVector &&other) noexcept { + // Move assignment operator + ClusterVector &operator=(ClusterVector &&other) noexcept { if (this != &other) { delete[] m_data; m_cluster_size_x = other.m_cluster_size_x; @@ -82,7 +87,8 @@ template class ClusterVector { /** * @brief Reserve space for at least capacity clusters * @param capacity number of clusters to reserve space for - * @note If capacity is less than the current capacity, the function does nothing. + * @note If capacity is less than the current capacity, the function does + * nothing. */ void reserve(size_t capacity) { if (capacity > m_capacity) { @@ -95,7 +101,8 @@ template class ClusterVector { * @param x x-coordinate of the cluster * @param y y-coordinate of the cluster * @param data pointer to the data of the cluster - * @warning The data pointer must point to a buffer of size cluster_size_x * cluster_size_y * sizeof(T) + * @warning The data pointer must point to a buffer of size cluster_size_x * + * cluster_size_y * sizeof(T) */ void push_back(CoordType x, CoordType y, const std::byte *data) { if (m_size == m_capacity) { @@ -111,11 +118,12 @@ template class ClusterVector { ptr); m_size++; } - ClusterVector& operator+=(const ClusterVector& other){ + ClusterVector &operator+=(const ClusterVector &other) { if (m_size + other.m_size > m_capacity) { allocate_buffer(m_capacity + other.m_size); } - std::copy(other.m_data, other.m_data + other.m_size * element_offset(), m_data + m_size * element_offset()); + std::copy(other.m_data, other.m_data + other.m_size * item_size(), + m_data + m_size * item_size()); m_size += other.m_size; return *this; } @@ -126,7 +134,7 @@ template class ClusterVector { */ std::vector sum() { std::vector sums(m_size); - const size_t stride = element_offset(); + const size_t stride = item_size(); const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y @@ -139,32 +147,41 @@ template class ClusterVector { return sums; } - size_t size() const { return m_size; } - size_t capacity() const { return m_capacity; } - /** - * @brief Return the offset in bytes for a single cluster + * @brief Return the number of clusters in the vector */ - size_t element_offset() const { - return 2*sizeof(CoordType) + - m_cluster_size_x * m_cluster_size_y * sizeof(T); - } + size_t size() const { return m_size; } + + /** + * @brief Return the capacity of the buffer in number of clusters. This is + * the number of clusters that can be stored in the current buffer without reallocation. + */ + size_t capacity() const { return m_capacity; } /** * @brief Return the size in bytes of a single cluster */ - size_t item_size() const { return element_offset(); } + size_t item_size() const { + return 2 * sizeof(CoordType) + + m_cluster_size_x * m_cluster_size_y * sizeof(T); + } /** * @brief Return the offset in bytes for the i-th cluster */ - size_t element_offset(size_t i) const { return element_offset() * i; } + size_t element_offset(size_t i) const { return item_size() * i; } /** * @brief Return a pointer to the i-th cluster */ std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } - const std::byte * element_ptr(size_t i) const { return m_data + element_offset(i); } + + /** + * @brief Return a pointer to the i-th cluster + */ + const std::byte *element_ptr(size_t i) const { + return m_data + element_offset(i); + } size_t cluster_size_x() const { return m_cluster_size_x; } size_t cluster_size_y() const { return m_cluster_size_y; } @@ -172,19 +189,37 @@ template class ClusterVector { std::byte *data() { return m_data; } std::byte const *data() const { return m_data; } - template - V& at(size_t i) { - return *reinterpret_cast(element_ptr(i)); + /** + * @brief Return a reference to the i-th cluster casted to type V + * @tparam V type of the cluster + */ + template V &at(size_t i) { + return *reinterpret_cast(element_ptr(i)); } const std::string_view fmt_base() const { - //TODO! how do we match on coord_t? + // TODO! how do we match on coord_t? return m_fmt_base; } + /** + * @brief Return the frame number of the clusters. 0 is used to indicate that + * the clusters come from many frames + */ uint64_t frame_number() const { return m_frame_number; } - void set_frame_number(uint64_t frame_number) { m_frame_number = frame_number; } + + void set_frame_number(uint64_t frame_number) { + m_frame_number = frame_number; + } + + /** + * @brief Resize the vector to contain new_size clusters. If new_size is greater than the current capacity, a new buffer is allocated. + * If the size is smaller no memory is freed, size is just updated. + * @param new_size new size of the vector + * @warning The additional clusters are not initialized + */ void resize(size_t new_size) { + //TODO! Should we initialize the new clusters? if (new_size > m_capacity) { allocate_buffer(new_size); } @@ -193,9 +228,9 @@ template class ClusterVector { private: void allocate_buffer(size_t new_capacity) { - size_t num_bytes = element_offset() * new_capacity; + size_t num_bytes = item_size() * new_capacity; std::byte *new_data = new std::byte[num_bytes]{}; - std::copy(m_data, m_data + element_offset() * m_size, new_data); + std::copy(m_data, m_data + item_size() * m_size, new_data); delete[] m_data; m_data = new_data; m_capacity = new_capacity; diff --git a/python/aare/__init__.py b/python/aare/__init__.py index b0c9de2..58112a6 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -8,7 +8,7 @@ from ._aare import DetectorType from ._aare import ClusterFile from ._aare import hitmap -from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink +from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i from .CtbRawFile import CtbRawFile from .RawFile import RawFile diff --git a/python/examples/play.py b/python/examples/play.py index 95da2e5..ca232ba 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -35,11 +35,34 @@ for i in range(100): # time.sleep(1) cf.stop() +time.sleep(1) +print('Second run') +cf.start() +for i in range(100): + img = f.read_frame() + cf.find_clusters(img) + +cf.stop() +print('Third run') +cf.start() +for i in range(129): + img = f.read_frame() + cf.find_clusters(img) + +cf.stop() out_file.stop() print('Done') cfile = ClusterFile("test.clust") +i = 0 +while True: + try: + cv = cfile.read_frame() + i+=1 + except RuntimeError: + break +print(f'Read {i} frames') diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 459de44..5b7d20e 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -22,8 +22,7 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { py::class_>(m, class_name.c_str(), py::buffer_protocol()) .def(py::init()) .def_property_readonly("size", &ClusterVector::size) - .def("element_offset", - py::overload_cast<>(&ClusterVector::element_offset, py::const_)) + .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", [typestr](ClusterVector &self) { return fmt::format( @@ -41,13 +40,13 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { return py::buffer_info( self.data(), /* Pointer to buffer */ - self.element_offset(), /* Size of one scalar */ + self.item_size(), /* Size of one scalar */ fmt::format(self.fmt_base(), self.cluster_size_x(), self.cluster_size_y(), typestr), /* Format descriptor */ 1, /* Number of dimensions */ {self.size()}, /* Buffer dimensions */ - {self.element_offset()} /* Strides (in bytes) for each index */ + {self.item_size()} /* Strides (in bytes) for each index */ ); }); } @@ -56,7 +55,7 @@ void define_cluster_finder_mt_bindings(py::module &m) { py::class_>(m, "ClusterFinderMT") .def(py::init, Shape<2>, pd_type, size_t, size_t>(), py::arg("image_size"), py::arg("cluster_size"), - py::arg("n_sigma") = 5.0, py::arg("capacity") = 1000, + py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048, py::arg("n_threads") = 3) .def("push_pedestal_frame", [](ClusterFinderMT &self, @@ -75,6 +74,7 @@ void define_cluster_finder_mt_bindings(py::module &m) { py::arg(), py::arg("frame_number") = 0) .def("sync", &ClusterFinderMT::sync) .def("stop", &ClusterFinderMT::stop) + .def("start", &ClusterFinderMT::start) .def_property_readonly("pedestal", [](ClusterFinderMT &self) { auto pd = new NDArray{}; @@ -162,7 +162,7 @@ void define_cluster_finder_bindings(py::module &m) { for (py::ssize_t j = 0; j < r.shape(1); j++) r(i, j) = 0; - size_t stride = cv.element_offset(); + size_t stride = cv.item_size(); auto ptr = cv.data(); for (size_t i = 0; i < cv.size(); i++) { auto x = *reinterpret_cast(ptr); diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 5280e5f..82d4453 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -30,12 +30,12 @@ void define_cluster_file_io_bindings(py::module &m) { [](ClusterFile &self, size_t n_clusters) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; - }) + },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); return v; - }) + },py::return_value_policy::take_ownership) .def("write_frame", &ClusterFile::write_frame) .def("read_cluster_with_cut", [](ClusterFile &self, size_t n_clusters, diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index f62b7c9..dab4552 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -46,7 +46,7 @@ void ClusterFile::write_frame(const ClusterVector &clusters) { fwrite(&frame_number, sizeof(frame_number), 1, fp); uint32_t n_clusters = clusters.size(); fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.element_offset(), clusters.size(), fp); + fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); } ClusterVector ClusterFile::read_clusters(size_t n_clusters) { diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 24a482b..8ca3b1e 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -6,12 +6,14 @@ using aare::ClusterVector; +struct Cluster_i2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; + TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { - struct Cluster_i2x2 { - int16_t x; - int16_t y; - int32_t data[4]; - }; + ClusterVector cv(2, 2, 4); REQUIRE(cv.capacity() == 4); @@ -19,7 +21,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { REQUIRE(cv.cluster_size_x() == 2); REQUIRE(cv.cluster_size_y() == 2); // int16_t, int16_t, 2x2 int32_t = 20 bytes - REQUIRE(cv.element_offset() == 20); + REQUIRE(cv.item_size() == 20); //Create a cluster and push back into the vector Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; @@ -30,7 +32,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { //Read the cluster back out using copy. TODO! Can we improve the API? Cluster_i2x2 c2; std::byte *ptr = cv.element_ptr(0); - std::copy(ptr, ptr + cv.element_offset(), reinterpret_cast(&c2)); + std::copy(ptr, ptr + cv.item_size(), reinterpret_cast(&c2)); //Check that the data is the same REQUIRE(c1.x == c2.x); @@ -83,8 +85,8 @@ TEST_CASE("Storing floats"){ float data[8]; }; - ClusterVector cv(2, 4, 2); - REQUIRE(cv.capacity() == 2); + ClusterVector cv(2, 4, 10); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 2); REQUIRE(cv.cluster_size_y() == 4); @@ -92,17 +94,105 @@ TEST_CASE("Storing floats"){ //Create a cluster and push back into the vector Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}}; cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); - REQUIRE(cv.capacity() == 2); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 1); Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}}; cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); - REQUIRE(cv.capacity() == 2); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 2); auto sums = cv.sum(); REQUIRE(sums.size() == 2); REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6)); REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); +} + +TEST_CASE("Push back more than initial capacity"){ + + ClusterVector cv(2, 2, 2); + auto initial_data = cv.data(); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + REQUIRE(cv.size() == 1); + REQUIRE(cv.capacity() == 2); + + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + REQUIRE(cv.size() == 2); + REQUIRE(cv.capacity() == 2); + + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + REQUIRE(cv.size() == 3); + REQUIRE(cv.capacity() == 4); + + Cluster_i2x2* ptr = reinterpret_cast(cv.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + + //We should have allocated a new buffer, since we outgrew the initial capacity + REQUIRE(initial_data != cv.data()); + +} + +TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){ + ClusterVector cv1(2, 2, 12); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + + ClusterVector cv2(2, 2, 2); + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + + cv1 += cv2; + REQUIRE(cv1.size() == 4); + REQUIRE(cv1.capacity() == 12); + + Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + REQUIRE(ptr[3].x == 16); + REQUIRE(ptr[3].y == 17); +} + +TEST_CASE("Concatenate two cluster vectors where we need to allocate"){ + ClusterVector cv1(2, 2, 2); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + + ClusterVector cv2(2, 2, 2); + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + + cv1 += cv2; + REQUIRE(cv1.size() == 4); + REQUIRE(cv1.capacity() == 4); + + Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + REQUIRE(ptr[3].x == 16); + REQUIRE(ptr[3].y == 17); } \ No newline at end of file From 7550a2cb9749750a5bc39caaa9267e3eb4708e06 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 10 Jan 2025 15:33:56 +0100 Subject: [PATCH 015/120] fixing read bug --- python/src/cluster.hpp | 34 +++++++++++++++++----------------- python/src/cluster_file.hpp | 2 +- src/ClusterFile.cpp | 4 ++-- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 5b7d20e..768593a 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -39,14 +39,14 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { &ClusterVector::set_frame_number) .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { return py::buffer_info( - self.data(), /* Pointer to buffer */ + self.data(), /* Pointer to buffer */ self.item_size(), /* Size of one scalar */ fmt::format(self.fmt_base(), self.cluster_size_x(), self.cluster_size_y(), - typestr), /* Format descriptor */ - 1, /* Number of dimensions */ - {self.size()}, /* Buffer dimensions */ - {self.item_size()} /* Strides (in bytes) for each index */ + typestr), /* Format descriptor */ + 1, /* Number of dimensions */ + {self.size()}, /* Buffer dimensions */ + {self.item_size()} /* Strides (in bytes) for each index */ ); }); } @@ -75,18 +75,18 @@ void define_cluster_finder_mt_bindings(py::module &m) { .def("sync", &ClusterFinderMT::sync) .def("stop", &ClusterFinderMT::stop) .def("start", &ClusterFinderMT::start) - .def_property_readonly("pedestal", - [](ClusterFinderMT &self) { - auto pd = new NDArray{}; - *pd = self.pedestal(); - return return_image_data(pd); - }) - .def_property_readonly("noise", - [](ClusterFinderMT &self) { - auto arr = new NDArray{}; - *arr = self.noise(); - return return_image_data(arr); - }); + .def("pedestal", + [](ClusterFinderMT &self, size_t thread_index) { + auto pd = new NDArray{}; + *pd = self.pedestal(thread_index); + return return_image_data(pd); + },py::arg("thread_index") = 0) + .def("noise", + [](ClusterFinderMT &self, size_t thread_index) { + auto arr = new NDArray{}; + *arr = self.noise(thread_index); + return return_image_data(arr); + },py::arg("thread_index") = 0); } void define_cluster_collector_bindings(py::module &m) { diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 82d4453..baae7a1 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -35,7 +35,7 @@ void define_cluster_file_io_bindings(py::module &m) { [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); return v; - },py::return_value_policy::take_ownership) + }) .def("write_frame", &ClusterFile::write_frame) .def("read_cluster_with_cut", [](ClusterFile &self, size_t n_clusters, diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index dab4552..4d0d6df 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -72,7 +72,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { } else { nn = nph; } - nph_read += fread(reinterpret_cast(buf + nph_read), + nph_read += fread((buf + nph_read*clusters.item_size()), clusters.item_size(), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -87,7 +87,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { else nn = nph; - nph_read += fread(reinterpret_cast(buf + nph_read), + nph_read += fread((buf + nph_read*clusters.item_size()), clusters.item_size(), nn, fp); m_num_left = nph - nn; } From 7ce02006f279b84e8782397ea27ebb56e7de519a Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 10 Jan 2025 17:26:23 +0100 Subject: [PATCH 016/120] clear pedestal --- include/aare/ClusterFile.hpp | 9 ++++++- include/aare/ClusterFinder.hpp | 1 + include/aare/ClusterFinderMT.hpp | 9 +++++++ include/aare/Pedestal.hpp | 4 ++- python/src/cluster.hpp | 2 ++ src/ClusterFile.cpp | 42 +++++++++++++++++--------------- 6 files changed, 46 insertions(+), 21 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 8274078..8a0a907 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -33,6 +33,12 @@ typedef enum { pTopRight = 8 } pixel; +struct Eta2 { + double x; + double y; + corner c; +}; + struct ClusterAnalysis { uint32_t c; int32_t tot; @@ -74,7 +80,8 @@ int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, int analyze_cluster(Cluster3x3& cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); + NDArray calculate_eta2( ClusterVector& clusters); -std::array calculate_eta2( Cluster3x3& cl); +Eta2 calculate_eta2( Cluster3x3& cl); } // namespace aare diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 4a06c31..84b207b 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -47,6 +47,7 @@ class ClusterFinder { NDArray pedestal() { return m_pedestal.mean(); } NDArray noise() { return m_pedestal.std(); } + void clear_pedestal() { m_pedestal.clear(); } /** * @brief Move the clusters from the ClusterVector in the ClusterFinder to a diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index 6090ca8..1efb843 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -215,6 +215,15 @@ class ClusterFinderMT { m_current_thread++; } + void clear_pedestal() { + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + for (auto &cf : m_cluster_finders) { + cf->clear_pedestal(); + } + } + /** * @brief Return the pedestal currently used by the cluster finder * @param thread_index index of the thread diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index ab73cb9..102d730 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -89,6 +89,7 @@ template class Pedestal { m_sum = 0; m_sum2 = 0; m_cur_samples = 0; + m_mean = 0; } @@ -97,6 +98,7 @@ template class Pedestal { m_sum(row, col) = 0; m_sum2(row, col) = 0; m_cur_samples(row, col) = 0; + m_mean(row, col) = 0; } @@ -119,7 +121,7 @@ template class Pedestal { /** * Push but don't update the cached mean. Speeds up the process - * when intitializing the pedestal. + * when initializing the pedestal. * */ template void push_no_update(NDView frame) { diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 768593a..e971886 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -72,6 +72,7 @@ void define_cluster_finder_mt_bindings(py::module &m) { return; }, py::arg(), py::arg("frame_number") = 0) + .def("clear_pedestal", &ClusterFinderMT::clear_pedestal) .def("sync", &ClusterFinderMT::sync) .def("stop", &ClusterFinderMT::stop) .def("start", &ClusterFinderMT::start) @@ -121,6 +122,7 @@ void define_cluster_finder_bindings(py::module &m) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) + .def("clear_pedestal", &ClusterFinder::clear_pedestal) .def_property_readonly("pedestal", [](ClusterFinder &self) { auto pd = new NDArray{}; diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 4d0d6df..48f5c9a 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -262,19 +262,19 @@ std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, NDArray calculate_eta2(ClusterVector &clusters) { NDArray eta2({static_cast(clusters.size()), 2}); for (size_t i = 0; i < clusters.size(); i++) { - // int32_t t2; - // auto* ptr = reinterpret_cast (clusters.element_ptr(i) + 2 * - // sizeof(int16_t)); analyze_cluster(clusters.at(i), &t2, - // nullptr, nullptr, &eta2(i,0), &eta2(i,1) , nullptr, nullptr); - auto [x, y] = calculate_eta2(clusters.at(i)); - eta2(i, 0) = x; - eta2(i, 1) = y; + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; } return eta2; } -std::array calculate_eta2(Cluster3x3 &cl) { - std::array eta2{}; +/** + * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct + * containing etay, etax and the corner of the cluster. +*/ +Eta2 calculate_eta2(Cluster3x3 &cl) { + Eta2 eta{}; std::array tot2; tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; @@ -287,39 +287,43 @@ std::array calculate_eta2(Cluster3x3 &cl) { switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[1] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomLeft; break; case cBottomRight: if ((cl.data[2] + cl.data[5]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); if ((cl.data[1] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomRight; break; case cTopLeft: if ((cl.data[7] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopLeft; break; case cTopRight: if ((cl.data[5] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopRight; break; - // default:; + // no default to allow compiler to warn about missing cases } - return eta2; + return eta; } int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, From d0f435a7ab30317e433c9e4e45436729e74dd05a Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 10 Jan 2025 19:02:50 +0100 Subject: [PATCH 017/120] bounds checking on subfiles --- python/examples/play.py | 154 +++++++++++++++++++++------------------- src/RawFile.cpp | 12 ++++ 2 files changed, 91 insertions(+), 75 deletions(-) diff --git a/python/examples/play.py b/python/examples/play.py index ca232ba..588662d 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -14,99 +14,103 @@ base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/') f = File(base/'Moench03new/cu_half_speed_master_4.json') +for i, frame in enumerate(f): + print(f'{i}', end='\r') +print() + from aare._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink -cf = ClusterFinderMT((400,400), (3,3), n_threads = 3) -# collector = ClusterCollector(cf) -out_file = ClusterFileSink(cf, "test.clust") +# cf = ClusterFinderMT((400,400), (3,3), n_threads = 3) +# # collector = ClusterCollector(cf) +# out_file = ClusterFileSink(cf, "test.clust") -for i in range(1000): - img = f.read_frame() - cf.push_pedestal_frame(img) -print('Pedestal done') -cf.sync() - -for i in range(100): - img = f.read_frame() - cf.find_clusters(img) - - -# time.sleep(1) -cf.stop() -time.sleep(1) -print('Second run') -cf.start() -for i in range(100): - img = f.read_frame() - cf.find_clusters(img) - -cf.stop() -print('Third run') -cf.start() -for i in range(129): - img = f.read_frame() - cf.find_clusters(img) - -cf.stop() -out_file.stop() -print('Done') - - -cfile = ClusterFile("test.clust") -i = 0 -while True: - try: - cv = cfile.read_frame() - i+=1 - except RuntimeError: - break -print(f'Read {i} frames') - - - - -# cf = ClusterFinder((400,400), (3,3)) # for i in range(1000): -# cf.push_pedestal_frame(f.read_frame()) +# img = f.read_frame() +# cf.push_pedestal_frame(img) +# print('Pedestal done') +# cf.sync() -# fig, ax = plt.subplots() -# im = ax.imshow(cf.pedestal()) -# cf.pedestal() -# cf.noise() +# for i in range(100): +# img = f.read_frame() +# cf.find_clusters(img) + + +# # time.sleep(1) +# cf.stop() +# time.sleep(1) +# print('Second run') +# cf.start() +# for i in range(100): +# img = f.read_frame() +# cf.find_clusters(img) + +# cf.stop() +# print('Third run') +# cf.start() +# for i in range(129): +# img = f.read_frame() +# cf.find_clusters(img) + +# cf.stop() +# out_file.stop() +# print('Done') + + +# cfile = ClusterFile("test.clust") +# i = 0 +# while True: +# try: +# cv = cfile.read_frame() +# i+=1 +# except RuntimeError: +# break +# print(f'Read {i} frames') -# N = 500 -# t0 = time.perf_counter() -# hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) -# f.seek(0) -# t0 = time.perf_counter() -# data = f.read_n(N) -# t_elapsed = time.perf_counter()-t0 +# # cf = ClusterFinder((400,400), (3,3)) +# # for i in range(1000): +# # cf.push_pedestal_frame(f.read_frame()) + +# # fig, ax = plt.subplots() +# # im = ax.imshow(cf.pedestal()) +# # cf.pedestal() +# # cf.noise() -# n_bytes = data.itemsize*data.size -# print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s') +# # N = 500 +# # t0 = time.perf_counter() +# # hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) +# # f.seek(0) + +# # t0 = time.perf_counter() +# # data = f.read_n(N) +# # t_elapsed = time.perf_counter()-t0 -# for frame in data: -# a = cf.find_clusters(frame) +# # n_bytes = data.itemsize*data.size -# clusters = cf.steal_clusters() - -# t_elapsed = time.perf_counter()-t0 -# print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') +# # print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s') -# t0 = time.perf_counter() -# total_clusters = clusters.size +# # for frame in data: +# # a = cf.find_clusters(frame) -# hist1.fill(clusters.sum()) +# # clusters = cf.steal_clusters() -# t_elapsed = time.perf_counter()-t0 -# print(f'Filling histogram with the sum of {total_clusters} clusters took: {t_elapsed:.3f}s, {total_clusters/t_elapsed:.3g} clust/s') -# print(f'Average number of clusters per frame {total_clusters/N:.3f}') \ No newline at end of file +# # t_elapsed = time.perf_counter()-t0 +# # print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') + + +# # t0 = time.perf_counter() +# # total_clusters = clusters.size + +# # hist1.fill(clusters.sum()) + +# # t_elapsed = time.perf_counter()-t0 +# # print(f'Filling histogram with the sum of {total_clusters} clusters took: {t_elapsed:.3f}s, {total_clusters/t_elapsed:.3g} clust/s') +# # print(f'Average number of clusters per frame {total_clusters/N:.3f}') \ No newline at end of file diff --git a/src/RawFile.cpp b/src/RawFile.cpp index 744064f..b8c49cf 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -278,6 +278,10 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect if (n_subfile_parts != 1) { for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { auto subfile_id = frame_index / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } frame_numbers[part_idx] = subfiles[subfile_id][part_idx]->frame_number( frame_index % m_master.max_frames_per_file()); @@ -311,6 +315,10 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { auto corrected_idx = frame_indices[part_idx]; auto subfile_id = corrected_idx / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } // This is where we start writing auto offset = (m_module_pixel_0[part_idx].y * m_cols + @@ -343,6 +351,10 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect auto pos = m_module_pixel_0[part_idx]; auto corrected_idx = frame_indices[part_idx]; auto subfile_id = corrected_idx / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); subfiles[subfile_id][part_idx]->read_into(part_buffer, header); From f6d736facdacdece5c6cad3e9c3ce0eddd42dfd6 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 15 Jan 2025 09:15:41 +0100 Subject: [PATCH 018/120] docs for ClusterFile --- include/aare/ClusterFile.hpp | 66 +++++++-- python/src/cluster_file.hpp | 18 +-- src/ClusterFile.cpp | 255 ++++++++++++++++++----------------- 3 files changed, 197 insertions(+), 142 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 8a0a907..b796763 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -55,6 +55,19 @@ int32_t frame_number uint32_t number_of_clusters .... */ + +/** + * @brief Class to read and write cluster files + * Expects data to be laid out as: + * + * + * int32_t frame_number + * uint32_t number_of_clusters + * int16_t x, int16_t y, int32_t data[9] x number_of_clusters + * int32_t frame_number + * uint32_t number_of_clusters + * etc. + */ class ClusterFile { FILE *fp{}; uint32_t m_num_left{}; @@ -62,26 +75,61 @@ class ClusterFile { const std::string m_mode; public: + /** + * @brief Construct a new Cluster File object + * @param fname path to the file + * @param chunk_size number of clusters to read at a time when iterating + * over the file + * @param mode mode to open the file in. "r" for reading, "w" for writing, + * "a" for appending + * @throws std::runtime_error if the file could not be opened + */ ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, const std::string &mode = "r"); + + ~ClusterFile(); - ClusterVector read_clusters(size_t n_clusters); - ClusterVector read_frame(); - void write_frame(const ClusterVector &clusters); - std::vector - read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); + /** + * @brief Read n_clusters clusters from the file discarding frame numbers. + * If EOF is reached the returned vector will have less than n_clusters + * clusters + */ + ClusterVector read_clusters(size_t n_clusters); + + /** + * @brief Read a single frame from the file and return the clusters. The + * cluster vector will have the frame number set. + * @throws std::runtime_error if the file is not opened for reading or the file pointer not + * at the beginning of a frame + */ + ClusterVector read_frame(); + + + void write_frame(const ClusterVector &clusters); + + // Need to be migrated to support NDArray and return a ClusterVector + // std::vector + // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); + + /** + * @brief Return the chunk size + */ size_t chunk_size() const { return m_chunk_size; } + + + /** + * @brief Close the file. If not closed the file will be closed in the destructor + */ void close(); }; int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3& cl, int32_t *t2, int32_t *t3, char *quad, +int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); - -NDArray calculate_eta2( ClusterVector& clusters); -Eta2 calculate_eta2( Cluster3x3& cl); +NDArray calculate_eta2(ClusterVector &clusters); +Eta2 calculate_eta2(Cluster3x3 &cl); } // namespace aare diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index baae7a1..8a431b5 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -37,15 +37,15 @@ void define_cluster_file_io_bindings(py::module &m) { return v; }) .def("write_frame", &ClusterFile::write_frame) - .def("read_cluster_with_cut", - [](ClusterFile &self, size_t n_clusters, - py::array_t noise_map, int nx, int ny) { - auto view = make_view_2d(noise_map); - auto *vec = - new std::vector(self.read_cluster_with_cut( - n_clusters, view.data(), nx, ny)); - return return_vector(vec); - }) + // .def("read_cluster_with_cut", + // [](ClusterFile &self, size_t n_clusters, + // py::array_t noise_map, int nx, int ny) { + // auto view = make_view_2d(noise_map); + // auto *vec = + // new std::vector(self.read_cluster_with_cut( + // n_clusters, view.data(), nx, ny)); + // return return_vector(vec); + // }) .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", [](ClusterFile &self, diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 48f5c9a..2928d26 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -20,6 +20,12 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, throw std::runtime_error("Could not open file for writing: " + fname.string()); } + } else if (mode == "a") { + fp = fopen(fname.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + fname.string()); + } } else { throw std::runtime_error("Unsupported mode: " + mode); } @@ -35,7 +41,7 @@ void ClusterFile::close() { } void ClusterFile::write_frame(const ClusterVector &clusters) { - if (m_mode != "w") { + if (m_mode != "w" && m_mode != "a") { throw std::runtime_error("File not opened for writing"); } if (!(clusters.cluster_size_x() == 3) && @@ -132,134 +138,135 @@ ClusterVector ClusterFile::read_frame() { } -std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, - double *noise_map, - int nx, int ny) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - std::vector clusters(n_clusters); - // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, - // uint32_t *n_left, double *noise_map, int - // nx, int ny) { - int iframe = 0; - // uint32_t nph = *n_left; - uint32_t nph = m_num_left; - // uint32_t nn = *n_left; - uint32_t nn = m_num_left; - size_t nph_read = 0; +// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, +// double *noise_map, +// int nx, int ny) { +// if (m_mode != "r") { +// throw std::runtime_error("File not opened for reading"); +// } +// std::vector clusters(n_clusters); +// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, +// // uint32_t *n_left, double *noise_map, int +// // nx, int ny) { +// int iframe = 0; +// // uint32_t nph = *n_left; +// uint32_t nph = m_num_left; +// // uint32_t nn = *n_left; +// uint32_t nn = m_num_left; +// size_t nph_read = 0; - int32_t t2max, tot1; - int32_t tot3; - // Cluster *ptr = buf; - Cluster3x3 *ptr = clusters.data(); - int good = 1; - double noise; - // read photons left from previous frame - if (noise_map) - printf("Using noise map\n"); +// int32_t t2max, tot1; +// int32_t tot3; +// // Cluster *ptr = buf; +// Cluster3x3 *ptr = clusters.data(); +// int good = 1; +// double noise; +// // read photons left from previous frame +// if (noise_map) +// printf("Using noise map\n"); - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to - // read we read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - for (size_t iph = 0; iph < nn; iph++) { - // read photons 1 by 1 - size_t n_read = - fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - } - // TODO! error handling on read - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, - NULL); - noise = noise_map[ptr->y * nx + ptr->x]; - if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { - ; - } else { - good = 0; - printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, - tot1, t2max, tot3); - } - } else { - printf("Bad pixel number %d %d\n", ptr->x, ptr->y); - good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; - } - } - if (nph_read < n_clusters) { - // // keep on reading frames and photons until reaching - // n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // // printf("%d\n",nph_read); +// if (nph) { +// if (nph > n_clusters) { +// // if we have more photons left in the frame then photons to +// // read we read directly the requested number +// nn = n_clusters; +// } else { +// nn = nph; +// } +// for (size_t iph = 0; iph < nn; iph++) { +// // read photons 1 by 1 +// size_t n_read = +// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); +// if (n_read != 1) { +// clusters.resize(nph_read); +// return clusters; +// } +// // TODO! error handling on read +// good = 1; +// if (noise_map) { +// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { +// tot1 = ptr->data[4]; +// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, +// NULL); +// noise = noise_map[ptr->y * nx + ptr->x]; +// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { +// ; +// } else { +// good = 0; +// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, +// tot1, t2max, tot3); +// } +// } else { +// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); +// good = 0; +// } +// } +// if (good) { +// ptr++; +// nph_read++; +// } +// (m_num_left)--; +// if (nph_read >= n_clusters) +// break; +// } +// } +// if (nph_read < n_clusters) { +// // // keep on reading frames and photons until reaching +// // n_clusters +// while (fread(&iframe, sizeof(iframe), 1, fp)) { +// // // printf("%d\n",nph_read); - if (fread(&nph, sizeof(nph), 1, fp)) { - // // printf("** %d\n",nph); - m_num_left = nph; - for (size_t iph = 0; iph < nph; iph++) { - // // read photons 1 by 1 - size_t n_read = fread(reinterpret_cast(ptr), - sizeof(Cluster3x3), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - // return nph_read; - } - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && - ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, - NULL, NULL, NULL); - // noise = noise_map[ptr->y * nx + ptr->x]; - noise = noise_map[ptr->y + ny * ptr->x]; - if (tot1 > noise || t2max > 2 * noise || - tot3 > 3 * noise) { - ; - } else - good = 0; - } else { - printf("Bad pixel number %d %d\n", ptr->x, ptr->y); - good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; - } - } - if (nph_read >= n_clusters) - break; - } - } - // printf("%d\n",nph_read); - clusters.resize(nph_read); - return clusters; -} +// if (fread(&nph, sizeof(nph), 1, fp)) { +// // // printf("** %d\n",nph); +// m_num_left = nph; +// for (size_t iph = 0; iph < nph; iph++) { +// // // read photons 1 by 1 +// size_t n_read = fread(reinterpret_cast(ptr), +// sizeof(Cluster3x3), 1, fp); +// if (n_read != 1) { +// clusters.resize(nph_read); +// return clusters; +// // return nph_read; +// } +// good = 1; +// if (noise_map) { +// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && +// ptr->y < ny) { +// tot1 = ptr->data[4]; +// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, +// NULL, NULL, NULL); +// // noise = noise_map[ptr->y * nx + ptr->x]; +// noise = noise_map[ptr->y + ny * ptr->x]; +// if (tot1 > noise || t2max > 2 * noise || +// tot3 > 3 * noise) { +// ; +// } else +// good = 0; +// } else { +// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); +// good = 0; +// } +// } +// if (good) { +// ptr++; +// nph_read++; +// } +// (m_num_left)--; +// if (nph_read >= n_clusters) +// break; +// } +// } +// if (nph_read >= n_clusters) +// break; +// } +// } +// // printf("%d\n",nph_read); +// clusters.resize(nph_read); +// return clusters; +// } NDArray calculate_eta2(ClusterVector &clusters) { + //TOTO! make work with 2x2 clusters NDArray eta2({static_cast(clusters.size()), 2}); for (size_t i = 0; i < clusters.size(); i++) { auto e = calculate_eta2(clusters.at(i)); From 6cde968c60792f2124cfe5ef661d1238d57a6d4f Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 15 Jan 2025 16:12:06 +0100 Subject: [PATCH 019/120] summing 2x2 --- include/aare/ClusterVector.hpp | 48 +++++++++++++++++++++++++++------- python/examples/play.py | 14 +++++----- python/src/cluster.hpp | 4 +++ 3 files changed, 51 insertions(+), 15 deletions(-) diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 2c3b6c2..60ecd88 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -1,4 +1,6 @@ #pragma once +#include +#include #include #include #include @@ -147,24 +149,51 @@ template class ClusterVector { return sums; } + std::vector sum_2x2() { + std::vector sums(m_size); + const size_t stride = item_size(); + + if (m_cluster_size_x != 3 || m_cluster_size_y != 3) { + throw std::runtime_error( + "Only 3x3 clusters are supported for the 2x2 sum."); + } + const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; + std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y + + for (size_t i = 0; i < m_size; i++) { + std::array total; + auto T_ptr = reinterpret_cast(ptr); + total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4]; + total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5]; + total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7]; + total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8]; + + sums[i] = *std::max_element(total.begin(), total.end()); + ptr += stride; + } + + return sums; + } + /** * @brief Return the number of clusters in the vector */ size_t size() const { return m_size; } /** - * @brief Return the capacity of the buffer in number of clusters. This is - * the number of clusters that can be stored in the current buffer without reallocation. + * @brief Return the capacity of the buffer in number of clusters. This is + * the number of clusters that can be stored in the current buffer without + * reallocation. */ size_t capacity() const { return m_capacity; } /** * @brief Return the size in bytes of a single cluster */ - size_t item_size() const { + size_t item_size() const { return 2 * sizeof(CoordType) + m_cluster_size_x * m_cluster_size_y * sizeof(T); - } + } /** * @brief Return the offset in bytes for the i-th cluster @@ -203,8 +232,8 @@ template class ClusterVector { } /** - * @brief Return the frame number of the clusters. 0 is used to indicate that - * the clusters come from many frames + * @brief Return the frame number of the clusters. 0 is used to indicate + * that the clusters come from many frames */ uint64_t frame_number() const { return m_frame_number; } @@ -213,13 +242,14 @@ template class ClusterVector { } /** - * @brief Resize the vector to contain new_size clusters. If new_size is greater than the current capacity, a new buffer is allocated. - * If the size is smaller no memory is freed, size is just updated. + * @brief Resize the vector to contain new_size clusters. If new_size is + * greater than the current capacity, a new buffer is allocated. If the size + * is smaller no memory is freed, size is just updated. * @param new_size new size of the vector * @warning The additional clusters are not initialized */ void resize(size_t new_size) { - //TODO! Should we initialize the new clusters? + // TODO! Should we initialize the new clusters? if (new_size > m_capacity) { allocate_buffer(new_size); } diff --git a/python/examples/play.py b/python/examples/play.py index 588662d..25bbe12 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -10,16 +10,18 @@ import time from aare import File, ClusterFinder, VarClusterFinder, ClusterFile -base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/') +base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/ci/aare_test_data/clusters/') -f = File(base/'Moench03new/cu_half_speed_master_4.json') +f = ClusterFile(base/'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust') -for i, frame in enumerate(f): - print(f'{i}', end='\r') -print() +c = f.read_clusters(100) + +# for i, frame in enumerate(f): +# print(f'{i}', end='\r') +# print() -from aare._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink +# from aare._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink # cf = ClusterFinderMT((400,400), (3,3), n_threads = 3) diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index e971886..0e7aac9 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -34,6 +34,10 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum()); return return_vector(vec); }) + .def("sum_2x2", [](ClusterVector &self) { + auto *vec = new std::vector(self.sum_2x2()); + return return_vector(vec); + }) .def_property_readonly("capacity", &ClusterVector::capacity) .def_property("frame_number", &ClusterVector::frame_number, &ClusterVector::set_frame_number) From 078e5d81ecd3e7ddc9cb66c93a133066169079d6 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 15 Jan 2025 16:40:34 +0100 Subject: [PATCH 020/120] docs --- include/aare/ClusterVector.hpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 60ecd88..73257ce 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -149,6 +149,12 @@ template class ClusterVector { return sums; } + /** + * @brief Return the maximum sum of the 2x2 subclusters in each cluster + * @return std::vector vector of sums for each cluster + * @throws std::runtime_error if the cluster size is not 3x3 + * @warning Only 3x3 clusters are supported for the 2x2 sum. + */ std::vector sum_2x2() { std::vector sums(m_size); const size_t stride = item_size(); From 5a3ca2ae2de712cb7015219a53245a9f7cb34581 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 5 Feb 2025 14:40:26 +0100 Subject: [PATCH 021/120] Decoding for ADC SAR 05 64->16bit (#124) Co-authored-by: Patrick --- CMakeLists.txt | 2 ++ include/aare/decode.hpp | 12 ++++++++++++ python/aare/transform.py | 7 ++++++- python/src/ctb_raw_file.hpp | 27 +++++++++++++++++++++++++++ src/decode.cpp | 32 ++++++++++++++++++++++++++++++++ 5 files changed, 79 insertions(+), 1 deletion(-) create mode 100644 include/aare/decode.hpp create mode 100644 src/decode.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index cd1cd94..b8cc777 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -283,6 +283,7 @@ set(PUBLICHEADERS include/aare/ClusterFile.hpp include/aare/CtbRawFile.hpp include/aare/ClusterVector.hpp + include/aare/decode.hpp include/aare/defs.hpp include/aare/Dtype.hpp include/aare/File.hpp @@ -307,6 +308,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp new file mode 100644 index 0000000..7ff0963 --- /dev/null +++ b/include/aare/decode.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +namespace aare { + + +uint16_t adc_sar_05_decode64to16(uint64_t input); + +void adc_sar_05_decode64to16(NDView input, NDView output); + +} // namespace aare \ No newline at end of file diff --git a/python/aare/transform.py b/python/aare/transform.py index 414eb27..bbb3b5d 100644 --- a/python/aare/transform.py +++ b/python/aare/transform.py @@ -2,6 +2,10 @@ import numpy as np from . import _aare +class AdcSar05Transform64to16: + def __call__(self, data): + return _aare.adc_sar_05_decode64to16(data) + class Moench05Transform: #Could be moved to C++ without changing the interface def __init__(self): @@ -45,4 +49,5 @@ class Matterhorn02Transform: moench05 = Moench05Transform() moench05_1g = Moench05Transform1g() moench05_old = Moench05TransformOld() -matterhorn02 = Matterhorn02Transform() \ No newline at end of file +matterhorn02 = Matterhorn02Transform() +adc_sar_05_64to16 = AdcSar05Transform64to16() \ No newline at end of file diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 39c1001..5aeb387 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -7,6 +7,7 @@ #include "aare/RawSubFile.hpp" #include "aare/defs.hpp" +#include "aare/decode.hpp" // #include "aare/fClusterFileV2.hpp" #include @@ -23,6 +24,32 @@ using namespace ::aare; void define_ctb_raw_file_io_bindings(py::module &m) { +m.def("adc_sar_05_decode64to16", [](py::array_t input) { + + + if(input.ndim() != 2){ + throw std::runtime_error("Only 2D arrays are supported at this moment"); + } + + //Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), input.shape(1)/8}; + py::array_t output(shape); + + //Create a view of the input and output arrays + NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); + + NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); + + adc_sar_05_decode64to16(input_view, output_view); + // for (size_t i=0; i!=input_view.size(); ++i) { + // output_view(i) = decode_adc(input_view(i)); + // } + + + + return output; +}); + py::class_(m, "CtbRawFile") .def(py::init()) .def("read_frame", diff --git a/src/decode.cpp b/src/decode.cpp new file mode 100644 index 0000000..a525faa --- /dev/null +++ b/src/decode.cpp @@ -0,0 +1,32 @@ +#include "aare/decode.hpp" + +namespace aare { + +uint16_t adc_sar_05_decode64to16(uint64_t input){ + + //we want bits 29,19,28,18,31,21,27,20,24,23,25,22 and then pad to 16 + uint16_t output = 0; + output |= ((input >> 22) & 1) << 11; + output |= ((input >> 25) & 1) << 10; + output |= ((input >> 23) & 1) << 9; + output |= ((input >> 24) & 1) << 8; + output |= ((input >> 20) & 1) << 7; + output |= ((input >> 27) & 1) << 6; + output |= ((input >> 21) & 1) << 5; + output |= ((input >> 31) & 1) << 4; + output |= ((input >> 18) & 1) << 3; + output |= ((input >> 28) & 1) << 2; + output |= ((input >> 19) & 1) << 1; + output |= ((input >> 29) & 1) << 0; + return output; +} + +void adc_sar_05_decode64to16(NDView input, NDView output){ + for(size_t i = 0; i < input.shape(0); i++){ + for(size_t j = 0; j < input.shape(1); j++){ + output(i,j) = adc_sar_05_decode64to16(input(i,j)); + } + } +} + +} // namespace aare From fff536782bd1dafaee36e8cd1e100b995f7fc069 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 5 Feb 2025 15:13:53 +0100 Subject: [PATCH 022/120] disable auto upload --- .github/workflows/deploy.yml | 4 ++++ conda-recipe/meta.yaml | 2 +- pyproject.toml | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 81edde3..f50de2a 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -36,6 +36,10 @@ jobs: - name: Enable upload if: github.ref == 'refs/heads/main' run: conda config --set anaconda_upload yes + + - name: Disable upload + if: github.ref == 'refs/heads/main' + run: conda config --set anaconda_upload no - name: Build env: diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 6eeec38..978fa18 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.1.9.dev0 #TODO! how to not duplicate this? + version: 2025.2.5.dev0 #TODO! how to not duplicate this? source: diff --git a/pyproject.toml b/pyproject.toml index 1b75d02..7ffd22a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.1.9.dev0" +version = "2025.2.5.dev0" [tool.scikit-build] cmake.verbose = true From 1ba43b69d33ba135070a6f0d8e2747164e66bf5e Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 5 Feb 2025 15:16:16 +0100 Subject: [PATCH 023/120] fix --- .github/workflows/deploy.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index f50de2a..cef2a6c 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -33,13 +33,15 @@ jobs: - name: Prepare run: conda install conda-build=24.9 conda-verify pytest anaconda-client + + - name: Disable upload + run: conda config --set anaconda_upload no + - name: Enable upload if: github.ref == 'refs/heads/main' run: conda config --set anaconda_upload yes - - - name: Disable upload - if: github.ref == 'refs/heads/main' - run: conda config --set anaconda_upload no + + - name: Build env: From cd5a7386962b0f7c067fa94b534f7880fb95cd2b Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 5 Feb 2025 15:44:45 +0100 Subject: [PATCH 024/120] disable upload on dev --- ...{deploy.yml => build_and_deploy_conda.yml} | 8 ---- .github/workflows/build_conda.yml | 43 +++++++++++++++++++ 2 files changed, 43 insertions(+), 8 deletions(-) rename .github/workflows/{deploy.yml => build_and_deploy_conda.yml} (87%) create mode 100644 .github/workflows/build_conda.yml diff --git a/.github/workflows/deploy.yml b/.github/workflows/build_and_deploy_conda.yml similarity index 87% rename from .github/workflows/deploy.yml rename to .github/workflows/build_and_deploy_conda.yml index cef2a6c..90e75c1 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/build_and_deploy_conda.yml @@ -4,7 +4,6 @@ on: push: branches: - main - - developer jobs: build: @@ -33,16 +32,9 @@ jobs: - name: Prepare run: conda install conda-build=24.9 conda-verify pytest anaconda-client - - - name: Disable upload - run: conda config --set anaconda_upload no - - name: Enable upload - if: github.ref == 'refs/heads/main' run: conda config --set anaconda_upload yes - - - name: Build env: CONDA_TOKEN: ${{ secrets.CONDA_TOKEN }} diff --git a/.github/workflows/build_conda.yml b/.github/workflows/build_conda.yml new file mode 100644 index 0000000..6d67b87 --- /dev/null +++ b/.github/workflows/build_conda.yml @@ -0,0 +1,43 @@ +name: Build pkgs and deploy if on main + +on: + push: + branches: + - main + - developer + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] # macos-12, windows-2019] + python-version: ["3.12",] + + runs-on: ${{ matrix.platform }} + + # The setup-miniconda action needs this to activate miniconda + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3.0.4 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + + - name: Prepare + run: conda install conda-build=24.9 conda-verify pytest anaconda-client + + - name: Disable upload + run: conda config --set anaconda_upload no + + - name: Build + env: + CONDA_TOKEN: ${{ secrets.CONDA_TOKEN }} + run: conda build conda-recipe + From e96fe31f114a5683d460661b23c64925110d3446 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 5 Feb 2025 15:55:55 +0100 Subject: [PATCH 025/120] removed main and token --- .github/workflows/build_conda.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/build_conda.yml b/.github/workflows/build_conda.yml index 6d67b87..0b3e55c 100644 --- a/.github/workflows/build_conda.yml +++ b/.github/workflows/build_conda.yml @@ -3,7 +3,6 @@ name: Build pkgs and deploy if on main on: push: branches: - - main - developer jobs: @@ -37,7 +36,5 @@ jobs: run: conda config --set anaconda_upload no - name: Build - env: - CONDA_TOKEN: ${{ secrets.CONDA_TOKEN }} run: conda build conda-recipe From 4c750cc3bea3b46dfc6be3e71cb369922869670a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 11 Feb 2025 11:08:22 +0100 Subject: [PATCH 026/120] Fixing ROI read of RawFile (#125) - Bugfixes - New abstraction for detector geometry - Tests for updating geo with ROI --- CMakeLists.txt | 3 + include/aare/RawFile.hpp | 15 ++- include/aare/RawMasterFile.hpp | 11 -- include/aare/defs.hpp | 33 ++++- include/aare/geo_helpers.hpp | 16 +++ python/examples/play.py | 10 +- src/RawFile.cpp | 148 +++++++-------------- src/RawFile.test.cpp | 5 + src/RawSubFile.cpp | 2 +- src/geo_helpers.cpp | 71 ++++++++++ src/geo_helpers.test.cpp | 230 +++++++++++++++++++++++++++++++++ 11 files changed, 422 insertions(+), 122 deletions(-) create mode 100644 include/aare/geo_helpers.hpp create mode 100644 src/geo_helpers.cpp create mode 100644 src/geo_helpers.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index b8cc777..165c435 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -289,6 +289,7 @@ set(PUBLICHEADERS include/aare/File.hpp include/aare/FileInterface.hpp include/aare/Frame.hpp + include/aare/geo_helpers.hpp include/aare/NDArray.hpp include/aare/NDView.hpp include/aare/NumpyFile.hpp @@ -311,6 +312,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp @@ -352,6 +354,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp diff --git a/include/aare/RawFile.hpp b/include/aare/RawFile.hpp index eb044e3..f744ac2 100644 --- a/include/aare/RawFile.hpp +++ b/include/aare/RawFile.hpp @@ -34,15 +34,19 @@ class RawFile : public FileInterface { size_t n_subfile_parts{}; // d0,d1...dn //TODO! move to vector of SubFile instead of pointers std::vector> subfiles; //subfiles[f0,f1...fn][d0,d1...dn] - std::vector positions; - std::vector m_module_pixel_0; + // std::vector positions; + ModuleConfig cfg{0, 0}; RawMasterFile m_master; size_t m_current_frame{}; - size_t m_rows{}; - size_t m_cols{}; + + // std::vector m_module_pixel_0; + // size_t m_rows{}; + // size_t m_cols{}; + + DetectorGeometry m_geometry; public: /** @@ -111,11 +115,12 @@ class RawFile : public FileInterface { */ static DetectorHeader read_header(const std::filesystem::path &fname); - void update_geometry_with_roi(); + // void update_geometry_with_roi(); int find_number_of_subfiles(); void open_subfiles(); void find_geometry(); }; + } // namespace aare \ No newline at end of file diff --git a/include/aare/RawMasterFile.hpp b/include/aare/RawMasterFile.hpp index 42c324e..beaeb29 100644 --- a/include/aare/RawMasterFile.hpp +++ b/include/aare/RawMasterFile.hpp @@ -62,17 +62,6 @@ class ScanParameters { }; -struct ROI{ - int64_t xmin{}; - int64_t xmax{}; - int64_t ymin{}; - int64_t ymax{}; - - int64_t height() const { return ymax - ymin; } - int64_t width() const { return xmax - xmin; } -}; - - /** * @brief Class for parsing a master file either in our .json format or the old * .raw format diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 7466410..db1a47b 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -179,13 +179,42 @@ template struct t_xy { using xy = t_xy; +/** + * @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module + */ struct ModuleGeometry{ - int x{}; - int y{}; + int origin_x{}; + int origin_y{}; int height{}; int width{}; + int row_index{}; + int col_index{}; }; +/** + * @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0 + * for each module is located + */ +struct DetectorGeometry{ + int modules_x{}; + int modules_y{}; + int pixels_x{}; + int pixels_y{}; + int module_gap_row{}; + int module_gap_col{}; + std::vector module_pixel_0; +}; + +struct ROI{ + int64_t xmin{}; + int64_t xmax{}; + int64_t ymin{}; + int64_t ymax{}; + + int64_t height() const { return ymax - ymin; } + int64_t width() const { return xmax - xmin; } + }; + using dynamic_shape = std::vector; diff --git a/include/aare/geo_helpers.hpp b/include/aare/geo_helpers.hpp new file mode 100644 index 0000000..d0d5d1a --- /dev/null +++ b/include/aare/geo_helpers.hpp @@ -0,0 +1,16 @@ +#pragma once +#include "aare/defs.hpp" +#include "aare/RawMasterFile.hpp" //ROI refactor away +namespace aare{ + +/** + * @brief Update the detector geometry given a region of interest + * + * @param geo + * @param roi + * @return DetectorGeometry + */ +DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi); + + +} // namespace aare \ No newline at end of file diff --git a/python/examples/play.py b/python/examples/play.py index 25bbe12..316c196 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -8,13 +8,15 @@ import numpy as np import boost_histogram as bh import time -from aare import File, ClusterFinder, VarClusterFinder, ClusterFile +from aare import RawFile -base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/ci/aare_test_data/clusters/') +f = RawFile('/mnt/sls_det_storage/jungfrau_data1/vadym_tests/jf12_M431/laser_scan/laserScan_pedestal_G0_master_0.json') -f = ClusterFile(base/'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust') +print(f'{f.frame_number(1)}') -c = f.read_clusters(100) +for i in range(10): + header, img = f.read_frame() + print(header['frameNumber'], img.shape) # for i, frame in enumerate(f): # print(f'{i}', end='\r') diff --git a/src/RawFile.cpp b/src/RawFile.cpp index b8c49cf..ef622ee 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -1,6 +1,7 @@ #include "aare/RawFile.hpp" #include "aare/PixelMap.hpp" #include "aare/defs.hpp" +#include "aare/geo_helpers.hpp" #include #include @@ -21,8 +22,11 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) find_geometry(); - update_geometry_with_roi(); + if (m_master.roi()){ + m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value()); + } + open_subfiles(); } else { throw std::runtime_error(LOCATION + @@ -72,9 +76,13 @@ size_t RawFile::n_mod() const { return n_subfile_parts; } size_t RawFile::bytes_per_frame() { - return m_rows * m_cols * m_master.bitdepth() / 8; + // return m_rows * m_cols * m_master.bitdepth() / 8; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8; +} +size_t RawFile::pixels_per_frame() { + // return m_rows * m_cols; + return m_geometry.pixels_x * m_geometry.pixels_y; } -size_t RawFile::pixels_per_frame() { return m_rows * m_cols; } DetectorType RawFile::detector_type() const { return m_master.detector_type(); } @@ -92,8 +100,8 @@ void RawFile::seek(size_t frame_index) { size_t RawFile::tell() { return m_current_frame; }; size_t RawFile::total_frames() const { return m_master.frames_in_file(); } -size_t RawFile::rows() const { return m_rows; } -size_t RawFile::cols() const { return m_cols; } +size_t RawFile::rows() const { return m_geometry.pixels_y; } +size_t RawFile::cols() const { return m_geometry.pixels_x; } size_t RawFile::bitdepth() const { return m_master.bitdepth(); } xy RawFile::geometry() { return m_master.geometry(); } @@ -102,11 +110,12 @@ void RawFile::open_subfiles() { for (size_t i = 0; i != n_subfiles; ++i) { auto v = std::vector(n_subfile_parts); for (size_t j = 0; j != n_subfile_parts; ++j) { - auto pos = m_module_pixel_0[j]; + auto pos = m_geometry.module_pixel_0[j]; + fmt::println("POS: {} {} {} {}", pos.origin_x, pos.origin_y, pos.width, pos.height); v[j] = new RawSubFile(m_master.data_fname(j, i), m_master.detector_type(), pos.height, pos.width, m_master.bitdepth(), - positions[j].row, positions[j].col); + pos.row_index, pos.col_index); } subfiles.push_back(v); @@ -149,112 +158,49 @@ int RawFile::find_number_of_subfiles() { RawMasterFile RawFile::master() const { return m_master; } +/** + * @brief Find the geometry of the detector by opening all the subfiles and + * reading the headers. + */ void RawFile::find_geometry() { + + //Hold the maximal row and column number found + //Later used for calculating the total number of rows and columns uint16_t r{}; uint16_t c{}; for (size_t i = 0; i < n_subfile_parts; i++) { - auto h = this->read_header(m_master.data_fname(i, 0)); + auto h = read_header(m_master.data_fname(i, 0)); r = std::max(r, h.row); c = std::max(c, h.column); - positions.push_back({h.row, h.column}); + // positions.push_back({h.row, h.column}); + ModuleGeometry g; - g.x = h.column * m_master.pixels_x(); - g.y = h.row * m_master.pixels_y(); + g.origin_x = h.column * m_master.pixels_x(); + g.origin_y = h.row * m_master.pixels_y(); + g.row_index = h.row; + g.col_index = h.column; g.width = m_master.pixels_x(); g.height = m_master.pixels_y(); - m_module_pixel_0.push_back(g); + m_geometry.module_pixel_0.push_back(g); } r++; c++; - m_rows = (r * m_master.pixels_y()); - m_cols = (c * m_master.pixels_x()); - - m_rows += static_cast((r - 1) * cfg.module_gap_row); - -#ifdef AARE_VERBOSE - fmt::print("\nRawFile::find_geometry()\n"); - for (size_t i = 0; i < m_module_pixel_0.size(); i++) { - fmt::print("Module {} at position: (r:{},c:{})\n", i, - m_module_pixel_0[i].y, m_module_pixel_0[i].x); - } - fmt::print("Image size: {}x{}\n\n", m_rows, m_cols); -#endif -} - -void RawFile::update_geometry_with_roi() { - // TODO! implement this - if (m_master.roi()) { - auto roi = m_master.roi().value(); - - // TODO! can we do this cleaner? - int pos_y = 0; - int pos_y_increment = 0; - for (size_t row = 0; row < m_master.geometry().row; row++) { - int pos_x = 0; - for (size_t col = 0; col < m_master.geometry().col; col++) { - auto &m = m_module_pixel_0[row * m_master.geometry().col + col]; - auto original_height = m.height; - auto original_width = m.width; - - // module is to the left of the roi - if (m.x + m.width < roi.xmin) { - m.width = 0; - - // roi is in module - } else { - // here we only arrive when the roi is in or to the left of - // the module - if (roi.xmin > m.x) { - m.width -= roi.xmin - m.x; - } - if (roi.xmax < m.x + m.width) { - m.width -= m.x + original_width - roi.xmax; - } - m.x = pos_x; - pos_x += m.width; - } - - if (m.y + m.height < roi.ymin) { - m.height = 0; - } else { - if ((roi.ymin > m.y) && (roi.ymin < m.y + m.height)) { - m.height -= roi.ymin - m.y; - - } - if (roi.ymax < m.y + m.height) { - m.height -= m.y + original_height - roi.ymax; - } - m.y = pos_y; - pos_y_increment = m.height; - } - } - // increment pos_y - pos_y += pos_y_increment; - } - - m_rows = roi.height(); - m_cols = roi.width(); - } - -#ifdef AARE_VERBOSE - fmt::print("RawFile::update_geometry_with_roi()\n"); - for (const auto &m : m_module_pixel_0) { - fmt::print("Module at position: (r:{}, c:{}, h:{}, w:{})\n", m.y, m.x, - m.height, m.width); - } - fmt::print("Updated image size: {}x{}\n\n", m_rows, m_cols); - fmt::print("\n"); -#endif + m_geometry.pixels_y = (r * m_master.pixels_y()); + m_geometry.pixels_x = (c * m_master.pixels_x()); + m_geometry.modules_x = c; + m_geometry.modules_y = r; + m_geometry.pixels_y += static_cast((r - 1) * cfg.module_gap_row); } + Frame RawFile::get_frame(size_t frame_index) { - auto f = Frame(m_rows, m_cols, Dtype::from_bitdepth(m_master.bitdepth())); + auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x, Dtype::from_bitdepth(m_master.bitdepth())); std::byte *frame_buffer = f.data(); get_frame_into(frame_index, frame_buffer); return f; @@ -321,10 +267,10 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect } // This is where we start writing - auto offset = (m_module_pixel_0[part_idx].y * m_cols + - m_module_pixel_0[part_idx].x)*m_master.bitdepth()/8; + auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x + + m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8; - if (m_module_pixel_0[part_idx].x!=0) + if (m_geometry.module_pixel_0[part_idx].origin_x!=0) throw std::runtime_error(LOCATION + "Implementation error. x pos not 0."); //TODO! Risk for out of range access @@ -348,7 +294,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect // level for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { - auto pos = m_module_pixel_0[part_idx]; + auto pos = m_geometry.module_pixel_0[part_idx]; auto corrected_idx = frame_indices[part_idx]; auto subfile_id = corrected_idx / m_master.max_frames_per_file(); if (subfile_id >= subfiles.size()) { @@ -364,9 +310,9 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect for (size_t cur_row = 0; cur_row < static_cast(pos.height); cur_row++) { - auto irow = (pos.y + cur_row); - auto icol = pos.x; - auto dest = (irow * this->m_cols + icol); + auto irow = (pos.origin_y + cur_row); + auto icol = pos.origin_x; + auto dest = (irow * this->m_geometry.pixels_x + icol); dest = dest * m_master.bitdepth() / 8; memcpy(frame_buffer + dest, part_buffer + cur_row * pos.width * @@ -412,4 +358,8 @@ RawFile::~RawFile() { } } + + + + } // namespace aare \ No newline at end of file diff --git a/src/RawFile.test.cpp b/src/RawFile.test.cpp index faefd28..5f9b2e1 100644 --- a/src/RawFile.test.cpp +++ b/src/RawFile.test.cpp @@ -1,10 +1,13 @@ #include "aare/File.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI +#include "aare/RawFile.hpp" #include #include #include "test_config.hpp" + using aare::File; TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") { @@ -148,3 +151,5 @@ TEST_CASE("Read file with unordered frames", "[.integration]") { File f(fpath); REQUIRE_THROWS((f.read_frame())); } + + diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index 4612747..a3bb79c 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -44,7 +44,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, void RawSubFile::seek(size_t frame_index) { if (frame_index >= n_frames) { - throw std::runtime_error("Frame number out of range"); + throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames)); } m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); } diff --git a/src/geo_helpers.cpp b/src/geo_helpers.cpp new file mode 100644 index 0000000..e823f22 --- /dev/null +++ b/src/geo_helpers.cpp @@ -0,0 +1,71 @@ + +#include "aare/geo_helpers.hpp" +#include "fmt/core.h" + +namespace aare{ + +DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { + #ifdef AARE_VERBOSE + fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}", + roi.xmin, roi.xmax, roi.ymin, roi.ymax); + fmt::println("Geometry: {} {} {} {} {} {}", + geo.modules_x, geo.modules_y, geo.pixels_x, geo.pixels_y, geo.module_gap_row, geo.module_gap_col); + #endif + int pos_y = 0; + int pos_y_increment = 0; + for (size_t row = 0; row < geo.modules_y; row++) { + int pos_x = 0; + for (size_t col = 0; col < geo.modules_x; col++) { + auto &m = geo.module_pixel_0[row * geo.modules_x + col]; + auto original_height = m.height; + auto original_width = m.width; + + // module is to the left of the roi + if (m.origin_x + m.width < roi.xmin) { + m.width = 0; + + // roi is in module + } else { + // here we only arrive when the roi is in or to the left of + // the module + if (roi.xmin > m.origin_x) { + m.width -= roi.xmin - m.origin_x; + } + if (roi.xmax < m.origin_x + original_width) { + m.width -= m.origin_x + original_width - roi.xmax; + } + m.origin_x = pos_x; + pos_x += m.width; + } + + if (m.origin_y + m.height < roi.ymin) { + m.height = 0; + } else { + if ((roi.ymin > m.origin_y) && (roi.ymin < m.origin_y + m.height)) { + m.height -= roi.ymin - m.origin_y; + + } + if (roi.ymax < m.origin_y + original_height) { + m.height -= m.origin_y + original_height - roi.ymax; + } + m.origin_y = pos_y; + pos_y_increment = m.height; + } + #ifdef AARE_VERBOSE + fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width, m.height); + #endif + } + // increment pos_y + pos_y += pos_y_increment; + } + + // m_rows = roi.height(); + // m_cols = roi.width(); + geo.pixels_x = roi.width(); + geo.pixels_y = roi.height(); + + return geo; + +} + +} // namespace aare \ No newline at end of file diff --git a/src/geo_helpers.test.cpp b/src/geo_helpers.test.cpp new file mode 100644 index 0000000..08ee96c --- /dev/null +++ b/src/geo_helpers.test.cpp @@ -0,0 +1,230 @@ +#include "aare/File.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI +#include "aare/RawFile.hpp" + +#include +#include + +#include "aare/geo_helpers.hpp" +#include "test_config.hpp" + +TEST_CASE("Simple ROIs on one module"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + + + geo.pixels_x = 1024; + geo.pixels_y = 512; + geo.modules_x = 1; + geo.modules_y = 1; + geo.module_pixel_0.push_back(mod); + + SECTION("ROI is the whole module"){ + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 1024; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1024); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 512); + REQUIRE(updated_geo.module_pixel_0[0].width == 1024); + } + SECTION("ROI is the top left corner of the module"){ + aare::ROI roi; + roi.xmin = 100; + roi.xmax = 200; + roi.ymin = 150; + roi.ymax = 200; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 100); + REQUIRE(updated_geo.pixels_y == 50); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 50); + REQUIRE(updated_geo.module_pixel_0[0].width == 100); + } + + SECTION("ROI is a small square"){ + aare::ROI roi; + roi.xmin = 1000; + roi.xmax = 1010; + roi.ymin = 500; + roi.ymax = 510; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 10); + REQUIRE(updated_geo.pixels_y == 10); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 10); + REQUIRE(updated_geo.module_pixel_0[0].width == 10); + } + SECTION("ROI is a few columns"){ + aare::ROI roi; + roi.xmin = 750; + roi.xmax = 800; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 50); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 512); + REQUIRE(updated_geo.module_pixel_0[0].width == 50); + } +} + + + +TEST_CASE("Two modules side by side"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 2048; + geo.pixels_y = 512; + geo.modules_x = 2; + geo.modules_y = 1; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + + SECTION("ROI is the whole image"){ + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 2048; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 2048); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 1); + } + SECTION("rectangle on both modules"){ + aare::ROI roi; + roi.xmin = 800; + roi.xmax = 1300; + roi.ymin = 200; + roi.ymax = 499; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 500); + REQUIRE(updated_geo.pixels_y == 299); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 299); + REQUIRE(updated_geo.module_pixel_0[0].width == 224); + REQUIRE(updated_geo.module_pixel_0[1].height == 299); + REQUIRE(updated_geo.module_pixel_0[1].width == 276); + } +} + +TEST_CASE("Three modules side by side"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + aare::ROI roi; + roi.xmin = 700; + roi.xmax = 2500; + roi.ymin = 0; + roi.ymax = 123; + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 3072; + geo.pixels_y = 512; + geo.modules_x = 3; + geo.modules_y = 1; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 2048; + geo.module_pixel_0.push_back(mod); + + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1800); + REQUIRE(updated_geo.pixels_y == 123); + REQUIRE(updated_geo.modules_x == 3); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 123); + REQUIRE(updated_geo.module_pixel_0[0].width == 324); + REQUIRE(updated_geo.module_pixel_0[1].height == 123); + REQUIRE(updated_geo.module_pixel_0[1].width == 1024); + REQUIRE(updated_geo.module_pixel_0[2].height == 123); + REQUIRE(updated_geo.module_pixel_0[2].width == 452); +} + +TEST_CASE("Four modules as a square"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + aare::ROI roi; + roi.xmin = 500; + roi.xmax = 2000; + roi.ymin = 500; + roi.ymax = 600; + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 2048; + geo.pixels_y = 1024; + geo.modules_x = 2; + geo.modules_y = 2; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 0; + mod.origin_y = 512; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1500); + REQUIRE(updated_geo.pixels_y == 100); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 2); + REQUIRE(updated_geo.module_pixel_0[0].height == 12); + REQUIRE(updated_geo.module_pixel_0[0].width == 524); + REQUIRE(updated_geo.module_pixel_0[1].height == 12); + REQUIRE(updated_geo.module_pixel_0[1].width == 976); + REQUIRE(updated_geo.module_pixel_0[2].height == 88); + REQUIRE(updated_geo.module_pixel_0[2].width == 524); + REQUIRE(updated_geo.module_pixel_0[3].height == 88); + REQUIRE(updated_geo.module_pixel_0[3].width == 976); +} \ No newline at end of file From d86cb533c8d279c63f82f8c929fcda8549de10b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 11 Feb 2025 11:48:01 +0100 Subject: [PATCH 027/120] Fix minor warnings (#126) - Unused variables - signed vs. unsigned - added -flto=auto --- CMakeLists.txt | 1 + include/aare/ClusterVector.hpp | 1 - python/src/cluster.hpp | 2 +- python/src/file.hpp | 7 ++++++- src/decode.cpp | 4 ++-- src/geo_helpers.cpp | 4 ++-- 6 files changed, 12 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 165c435..6cab73a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -244,6 +244,7 @@ target_compile_options( -Wvla -Wdouble-promotion -Werror=return-type #important can cause segfault in optimzed builds + -flto=auto ) endif() #GCC/Clang specific diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 73257ce..febf06c 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -163,7 +163,6 @@ template class ClusterVector { throw std::runtime_error( "Only 3x3 clusters are supported for the 2x2 sum."); } - const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y for (size_t i = 0; i < m_size; i++) { diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 0e7aac9..792b7e6 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -153,7 +153,7 @@ void define_cluster_finder_bindings(py::module &m) { [](ClusterFinder &self, py::array_t frame, uint64_t frame_number) { auto view = make_view_2d(frame); - self.find_clusters(view); + self.find_clusters(view, frame_number); return; }, py::arg(), py::arg("frame_number") = 0); diff --git a/python/src/file.hpp b/python/src/file.hpp index f20e0ce..c3c800c 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -20,6 +20,11 @@ namespace py = pybind11; using namespace ::aare; +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + void define_file_io_bindings(py::module &m) { @@ -238,7 +243,7 @@ void define_file_io_bindings(py::module &m) { return image; }); - +#pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") // .def(py::init<>()) // .def_readwrite("frame_number", &ClusterHeader::frame_number) diff --git a/src/decode.cpp b/src/decode.cpp index a525faa..8af8319 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -22,8 +22,8 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){ } void adc_sar_05_decode64to16(NDView input, NDView output){ - for(size_t i = 0; i < input.shape(0); i++){ - for(size_t j = 0; j < input.shape(1); j++){ + for(int64_t i = 0; i < input.shape(0); i++){ + for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_05_decode64to16(input(i,j)); } } diff --git a/src/geo_helpers.cpp b/src/geo_helpers.cpp index e823f22..39086ec 100644 --- a/src/geo_helpers.cpp +++ b/src/geo_helpers.cpp @@ -13,9 +13,9 @@ DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { #endif int pos_y = 0; int pos_y_increment = 0; - for (size_t row = 0; row < geo.modules_y; row++) { + for (int row = 0; row < geo.modules_y; row++) { int pos_x = 0; - for (size_t col = 0; col < geo.modules_x; col++) { + for (int col = 0; col < geo.modules_x; col++) { auto &m = geo.module_pixel_0[row * geo.modules_x + col]; auto original_height = m.height; auto original_width = m.width; From f7031d7f87c0e159fa111c05d87d1d2d2892c734 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 12 Feb 2025 10:52:55 +0100 Subject: [PATCH 028/120] Update CMakeLists.txt Removed flto=auto which caused issues with gcc 8.5 --- CMakeLists.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6cab73a..c068360 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -244,7 +244,6 @@ target_compile_options( -Wvla -Wdouble-promotion -Werror=return-type #important can cause segfault in optimzed builds - -flto=auto ) endif() #GCC/Clang specific @@ -442,4 +441,4 @@ if(AARE_MASTER_PROJECT) set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}") set(PROJECT_LIBRARIES aare-core aare-compiler-flags ) include(cmake/package_config.cmake) -endif() \ No newline at end of file +endif() From 2faa317bdf942bd1d4b72958e186459ccc1448b2 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 12 Feb 2025 10:59:18 +0100 Subject: [PATCH 029/120] removed debug line --- src/RawFile.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/RawFile.cpp b/src/RawFile.cpp index ef622ee..e704add 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -111,7 +111,6 @@ void RawFile::open_subfiles() { auto v = std::vector(n_subfile_parts); for (size_t j = 0; j != n_subfile_parts; ++j) { auto pos = m_geometry.module_pixel_0[j]; - fmt::println("POS: {} {} {} {}", pos.origin_x, pos.origin_y, pos.width, pos.height); v[j] = new RawSubFile(m_master.data_fname(j, i), m_master.detector_type(), pos.height, pos.width, m_master.bitdepth(), From c0c5e07ad8c1f090c5435f0cadc81f408360ce4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 12 Feb 2025 16:17:32 +0100 Subject: [PATCH 030/120] added decoding of adc_sar_04 (#127) --- include/aare/decode.hpp | 3 ++- python/aare/transform.py | 5 +++++ python/src/ctb_raw_file.hpp | 23 +++++++++++++++++++---- src/decode.cpp | 29 +++++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 5 deletions(-) diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp index 7ff0963..1c3c479 100644 --- a/include/aare/decode.hpp +++ b/include/aare/decode.hpp @@ -6,7 +6,8 @@ namespace aare { uint16_t adc_sar_05_decode64to16(uint64_t input); - +uint16_t adc_sar_04_decode64to16(uint64_t input); void adc_sar_05_decode64to16(NDView input, NDView output); +void adc_sar_04_decode64to16(NDView input, NDView output); } // namespace aare \ No newline at end of file diff --git a/python/aare/transform.py b/python/aare/transform.py index bbb3b5d..2f66942 100644 --- a/python/aare/transform.py +++ b/python/aare/transform.py @@ -2,6 +2,10 @@ import numpy as np from . import _aare +class AdcSar04Transform64to16: + def __call__(self, data): + return _aare.adc_sar_04_decode64to16(data) + class AdcSar05Transform64to16: def __call__(self, data): return _aare.adc_sar_05_decode64to16(data) @@ -50,4 +54,5 @@ moench05 = Moench05Transform() moench05_1g = Moench05Transform1g() moench05_old = Moench05TransformOld() matterhorn02 = Matterhorn02Transform() +adc_sar_04_64to16 = AdcSar04Transform64to16() adc_sar_05_64to16 = AdcSar05Transform64to16() \ No newline at end of file diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 5aeb387..9ce656d 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -37,15 +37,30 @@ m.def("adc_sar_05_decode64to16", [](py::array_t input) { //Create a view of the input and output arrays NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); - NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); adc_sar_05_decode64to16(input_view, output_view); - // for (size_t i=0; i!=input_view.size(); ++i) { - // output_view(i) = decode_adc(input_view(i)); - // } + + return output; +}); +m.def("adc_sar_04_decode64to16", [](py::array_t input) { + + + if(input.ndim() != 2){ + throw std::runtime_error("Only 2D arrays are supported at this moment"); + } + + //Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), input.shape(1)/8}; + py::array_t output(shape); + + //Create a view of the input and output arrays + NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); + + adc_sar_04_decode64to16(input_view, output_view); return output; }); diff --git a/src/decode.cpp b/src/decode.cpp index 8af8319..17c033d 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -29,4 +29,33 @@ void adc_sar_05_decode64to16(NDView input, NDView outpu } } +uint16_t adc_sar_04_decode64to16(uint64_t input){ + + // bit_map = array([15,17,19,21,23,4,6,8,10,12,14,16] LSB->MSB + uint16_t output = 0; + output |= ((input >> 16) & 1) << 11; + output |= ((input >> 14) & 1) << 10; + output |= ((input >> 12) & 1) << 9; + output |= ((input >> 10) & 1) << 8; + output |= ((input >> 8) & 1) << 7; + output |= ((input >> 6) & 1) << 6; + output |= ((input >> 4) & 1) << 5; + output |= ((input >> 23) & 1) << 4; + output |= ((input >> 21) & 1) << 3; + output |= ((input >> 19) & 1) << 2; + output |= ((input >> 17) & 1) << 1; + output |= ((input >> 15) & 1) << 0; + return output; +} + +void adc_sar_04_decode64to16(NDView input, NDView output){ + for(int64_t i = 0; i < input.shape(0); i++){ + for(int64_t j = 0; j < input.shape(1); j++){ + output(i,j) = adc_sar_04_decode64to16(input(i,j)); + } + } +} + + + } // namespace aare From 7309cff47cf762050a74efb37612765671b316fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 12 Feb 2025 16:35:48 +0100 Subject: [PATCH 031/120] Added fitting with lmfit (#128) - added stand alone fitting using: https://jugit.fz-juelich.de/mlz/lmfit.git - fit_gaus, fit_pol1 with and without errors - multi threaded fitting --------- Co-authored-by: JulianHeymes --- CMakeLists.txt | 40 ++++- docs/src/index.rst | 2 + docs/src/pyFit.rst | 19 +++ include/aare/Fit.hpp | 76 +++++++++ include/aare/utils/task.hpp | 8 + patches/lmfit.patch | 13 ++ python/CMakeLists.txt | 12 +- python/aare/__init__.py | 8 +- python/aare/func.py | 1 + python/aare/utils.py | 6 +- python/examples/fits.py | 79 ++++++++++ python/examples/play.py | 136 +++++----------- python/src/fit.hpp | 223 +++++++++++++++++++++++++++ python/src/module.cpp | 2 + python/src/np_helper.hpp | 66 +------- src/Fit.cpp | 300 ++++++++++++++++++++++++++++++++++++ src/utils/task.cpp | 30 ++++ src/utils/task.test.cpp | 32 ++++ 18 files changed, 893 insertions(+), 160 deletions(-) create mode 100644 docs/src/pyFit.rst create mode 100644 include/aare/Fit.hpp create mode 100644 include/aare/utils/task.hpp create mode 100644 patches/lmfit.patch create mode 100644 python/aare/func.py create mode 100644 python/examples/fits.py create mode 100644 python/src/fit.hpp create mode 100644 src/Fit.cpp create mode 100644 src/utils/task.cpp create mode 100644 src/utils/task.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index c068360..62a3878 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,6 +48,7 @@ option(AARE_FETCH_PYBIND11 "Use FetchContent to download pybind11" ON) option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON) option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON) option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON) +option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON) #Convenience option to use system libraries only (no FetchContent) @@ -76,6 +77,34 @@ endif() set(CMAKE_EXPORT_COMPILE_COMMANDS ON) +if(AARE_FETCH_LMFIT) + set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${lmfit_patch} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL + ) + #Disable what we don't need from lmfit + set(BUILD_TESTING OFF CACHE BOOL "") + set(LMFIT_CPPTEST OFF CACHE BOOL "") + set(LIB_MAN OFF CACHE BOOL "") + set(LMFIT_CPPTEST OFF CACHE BOOL "") + set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + + + FetchContent_MakeAvailable(lmfit) + set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) + + target_include_directories (lmfit PUBLIC "${libzmq_SOURCE_DIR}/lib") + message(STATUS "lmfit include dir: ${lmfit_SOURCE_DIR}/lib") +else() + find_package(lmfit REQUIRED) +endif() + + if(AARE_FETCH_ZMQ) # Fetchcontent_Declare is deprecated need to find a way to update this # for now setting the policy to old is enough @@ -127,8 +156,8 @@ if (AARE_FETCH_FMT) LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} -) + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + ) else() find_package(fmt 6 REQUIRED) endif() @@ -146,7 +175,6 @@ if (AARE_FETCH_JSON) install( TARGETS nlohmann_json EXPORT "${TARGETS_EXPORT_NAME}" - ) message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}") else() @@ -287,6 +315,7 @@ set(PUBLICHEADERS include/aare/defs.hpp include/aare/Dtype.hpp include/aare/File.hpp + include/aare/Fit.hpp include/aare/FileInterface.hpp include/aare/Frame.hpp include/aare/geo_helpers.hpp @@ -300,6 +329,7 @@ set(PUBLICHEADERS include/aare/RawMasterFile.hpp include/aare/RawSubFile.hpp include/aare/VarClusterFinder.hpp + include/aare/utils/task.hpp ) @@ -312,6 +342,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp @@ -319,6 +350,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp ) @@ -338,6 +370,7 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags + lmfit ) set_target_properties(aare_core PROPERTIES @@ -364,6 +397,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp ) target_sources(tests PRIVATE ${TestSources} ) diff --git a/docs/src/index.rst b/docs/src/index.rst index e6c927f..905caea 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -35,6 +35,8 @@ AARE pyRawMasterFile pyVarClusterFinder + pyFit + .. toctree:: :caption: C++ API diff --git a/docs/src/pyFit.rst b/docs/src/pyFit.rst new file mode 100644 index 0000000..abaa3cf --- /dev/null +++ b/docs/src/pyFit.rst @@ -0,0 +1,19 @@ + +Fit +======== + +.. py:currentmodule:: aare + + +**Functions** + +.. autofunction:: gaus + +.. autofunction:: pol1 + + +**Fitting** + +.. autofunction:: fit_gaus + +.. autofunction:: fit_pol1 \ No newline at end of file diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp new file mode 100644 index 0000000..20ef4ef --- /dev/null +++ b/include/aare/Fit.hpp @@ -0,0 +1,76 @@ +#pragma once + +#include +#include +#include + +#include "aare/NDArray.hpp" + +namespace aare { + +namespace func { +double gaus(const double x, const double *par); +NDArray gaus(NDView x, NDView par); + +double pol1(const double x, const double *par); +NDArray pol1(NDView x, NDView par); + +} // namespace func + +static constexpr int DEFAULT_NUM_THREADS = 4; + +/** + * @brief Fit a 1D Gaussian to data. + * @param data data to fit + * @param x x values + */ +NDArray fit_gaus(NDView x, NDView y); + + +/** + * @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values] + * @param x x values + * @param y y vales, layout [row, col, values] + * @param n_threads number of threads to use + */ +NDArray fit_gaus(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); + + +/** + * @brief Fit a 1D Gaussian with error estimates + * @param x x values + * @param y y vales, layout [row, col, values] + * @param y_err error in y, layout [row, col, values] + * @param par_out output parameters + * @param par_err_out output error parameters + */ +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out); + +/** + * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout [row, col, values] + * @param x x values + * @param y y vales, layout [row, col, values] + * @param y_err error in y, layout [row, col, values] + * @param par_out output parameters, layout [row, col, values] + * @param par_err_out output parameter errors, layout [row, col, values] + * @param n_threads number of threads to use + */ +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); + + +NDArray fit_pol1(NDView x, NDView y); + +NDArray fit_pol1(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); + +void fit_pol1(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out); + +//TODO! not sure we need to offer the different version in C++ +void fit_pol1(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); + +} // namespace aare \ No newline at end of file diff --git a/include/aare/utils/task.hpp b/include/aare/utils/task.hpp new file mode 100644 index 0000000..a6ee142 --- /dev/null +++ b/include/aare/utils/task.hpp @@ -0,0 +1,8 @@ + +#include +#include + +namespace aare { +std::vector> split_task(int first, int last, int n_threads); + +} // namespace aare \ No newline at end of file diff --git a/patches/lmfit.patch b/patches/lmfit.patch new file mode 100644 index 0000000..22063bf --- /dev/null +++ b/patches/lmfit.patch @@ -0,0 +1,13 @@ +diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt +index 4efb7ed..6533660 100644 +--- a/lib/CMakeLists.txt ++++ b/lib/CMakeLists.txt +@@ -11,7 +11,7 @@ target_compile_definitions(${lib} PRIVATE "LMFIT_EXPORT") # for Windows DLL expo + + target_include_directories(${lib} + PUBLIC +- $ ++ $ + $ + ) + diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 89ad5e7..2aaa222 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -28,6 +28,7 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags) set( PYTHON_FILES aare/__init__.py aare/CtbRawFile.py + aare/func.py aare/RawFile.py aare/transform.py aare/ScanParameters.py @@ -43,10 +44,17 @@ set_target_properties(_aare PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare ) +set(PYTHON_EXAMPLES + examples/play.py + examples/fits.py +) -# Copy the examples/scripts to the build directory -configure_file(examples/play.py ${CMAKE_BINARY_DIR}/play.py) + +# Copy the python examples to the build directory +foreach(FILE ${PYTHON_EXAMPLES}) + configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) +endforeach(FILE ${PYTHON_EXAMPLES}) if(AARE_INSTALL_PYTHONEXT) diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 58112a6..f4c19cc 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -10,8 +10,14 @@ from ._aare import hitmap from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i +from ._aare import fit_gaus, fit_pol1 + from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters -from .utils import random_pixels, random_pixel \ No newline at end of file +from .utils import random_pixels, random_pixel, flat_list + + +#make functions available in the top level API +from .func import * diff --git a/python/aare/func.py b/python/aare/func.py new file mode 100644 index 0000000..ca60cf2 --- /dev/null +++ b/python/aare/func.py @@ -0,0 +1 @@ +from ._aare import gaus, pol1 \ No newline at end of file diff --git a/python/aare/utils.py b/python/aare/utils.py index d53f844..4708921 100644 --- a/python/aare/utils.py +++ b/python/aare/utils.py @@ -20,4 +20,8 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024): Returns: tuple: (row, col) """ - return random_pixels(1, xmin, xmax, ymin, ymax)[0] \ No newline at end of file + return random_pixels(1, xmin, xmax, ymin, ymax)[0] + +def flat_list(xss): + """Flatten a list of lists.""" + return [x for xs in xss for x in xs] \ No newline at end of file diff --git a/python/examples/fits.py b/python/examples/fits.py new file mode 100644 index 0000000..aa3aef6 --- /dev/null +++ b/python/examples/fits.py @@ -0,0 +1,79 @@ +import matplotlib.pyplot as plt +import numpy as np +from aare import fit_gaus, fit_pol1 +from aare import gaus, pol1 + +textpm = f"±" # +textmu = f"μ" # +textsigma = f"σ" # + + + +# ================================= Gauss fit ================================= +# Parameters +mu = np.random.uniform(1, 100) # Mean of Gaussian +sigma = np.random.uniform(4, 20) # Standard deviation +num_points = 10000 # Number of points for smooth distribution +noise_sigma = 100 + +# Generate Gaussian distribution +data = np.random.normal(mu, sigma, num_points) + +# Generate errors for each point +errors = np.abs(np.random.normal(0, sigma, num_points)) # Errors with mean 0, std 0.5 + +# Create subplot +fig0, ax0 = plt.subplots(1, 1, num=0, figsize=(12, 8)) + +x = np.histogram(data, bins=30)[1][:-1] + 0.05 +y = np.histogram(data, bins=30)[0] +yerr = errors[:30] + + +# Add the errors as error bars in the step plot +ax0.errorbar(x, y, yerr=yerr, fmt=". ", capsize=5) +ax0.grid() + +par, err = fit_gaus(x, y, yerr) +print(par, err) + +x = np.linspace(x[0], x[-1], 1000) +ax0.plot(x, gaus(x, par), marker="") +ax0.set(xlabel="x", ylabel="Counts", title=f"A0 = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n" + f"{textmu} = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n" + f"{textsigma} = {par[2]:0.2f}{textpm}{err[2]:0.2f}\n" + f"(init: {textmu}: {mu:0.2f}, {textsigma}: {sigma:0.2f})") +fig0.tight_layout() + + + +# ================================= pol1 fit ================================= +# Parameters +n_points = 40 + +# Generate random slope and intercept (origin) +slope = np.random.uniform(-10, 10) # Random slope between 0.5 and 2.0 +intercept = np.random.uniform(-10, 10) # Random intercept between -10 and 10 + +# Generate random x values +x_values = np.random.uniform(-10, 10, n_points) + +# Calculate y values based on the linear function y = mx + b + error +errors = np.abs(np.random.normal(0, np.random.uniform(1, 5), n_points)) +var_points = np.random.normal(0, np.random.uniform(0.1, 2), n_points) +y_values = slope * x_values + intercept + var_points + +fig1, ax1 = plt.subplots(1, 1, num=1, figsize=(12, 8)) +ax1.errorbar(x_values, y_values, yerr=errors, fmt=". ", capsize=5) +par, err = fit_pol1(x_values, y_values, errors) + + +x = np.linspace(np.min(x_values), np.max(x_values), 1000) +ax1.plot(x, pol1(x, par), marker="") +ax1.set(xlabel="x", ylabel="y", title=f"a = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n" + f"b = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n" + f"(init: {slope:0.2f}, {intercept:0.2f})") +fig1.tight_layout() + +plt.show() + diff --git a/python/examples/play.py b/python/examples/play.py index 316c196..f1a869b 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -8,6 +8,28 @@ import numpy as np import boost_histogram as bh import time +<<<<<<< HEAD +from aare import File, ClusterFinder, VarClusterFinder, ClusterFile, CtbRawFile +from aare import gaus, fit_gaus + +base = Path('/mnt/sls_det_storage/moench_data/Julian/MOENCH05/20250113_first_xrays_redo/raw_files/') +cluster_file = Path('/home/l_msdetect/erik/tmp/Cu.clust') + +t0 = time.perf_counter() +offset= -0.5 +hist3d = bh.Histogram( + bh.axis.Regular(160, 0+offset, 160+offset), #x + bh.axis.Regular(150, 0+offset, 150+offset), #y + bh.axis.Regular(200, 0, 6000), #ADU +) + +total_clusters = 0 +with ClusterFile(cluster_file, chunk_size = 1000) as f: + for i, clusters in enumerate(f): + arr = np.array(clusters) + total_clusters += clusters.size + hist3d.fill(arr['y'],arr['x'], clusters.sum_2x2()) #python talks [row, col] cluster finder [x,y] +======= from aare import RawFile f = RawFile('/mnt/sls_det_storage/jungfrau_data1/vadym_tests/jf12_M431/laser_scan/laserScan_pedestal_G0_master_0.json') @@ -17,104 +39,30 @@ print(f'{f.frame_number(1)}') for i in range(10): header, img = f.read_frame() print(header['frameNumber'], img.shape) +>>>>>>> developer -# for i, frame in enumerate(f): -# print(f'{i}', end='\r') -# print() + +t_elapsed = time.perf_counter()-t0 +print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') +histogram_data = hist3d.counts() +x = hist3d.axes[2].edges[:-1] -# from aare._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink +y = histogram_data[100,100,:] +xx = np.linspace(x[0], x[-1]) +# fig, ax = plt.subplots() +# ax.step(x, y, where = 'post') +y_err = np.sqrt(y) +y_err = np.zeros(y.size) +y_err += 1 -# cf = ClusterFinderMT((400,400), (3,3), n_threads = 3) -# # collector = ClusterCollector(cf) -# out_file = ClusterFileSink(cf, "test.clust") +# par = fit_gaus2(y,x, y_err) +# ax.plot(xx, gaus(xx,par)) +# print(par) -# for i in range(1000): -# img = f.read_frame() -# cf.push_pedestal_frame(img) -# print('Pedestal done') -# cf.sync() +res = fit_gaus(y,x) +res2 = fit_gaus(y,x, y_err) +print(res) +print(res2) -# for i in range(100): -# img = f.read_frame() -# cf.find_clusters(img) - - -# # time.sleep(1) -# cf.stop() -# time.sleep(1) -# print('Second run') -# cf.start() -# for i in range(100): -# img = f.read_frame() -# cf.find_clusters(img) - -# cf.stop() -# print('Third run') -# cf.start() -# for i in range(129): -# img = f.read_frame() -# cf.find_clusters(img) - -# cf.stop() -# out_file.stop() -# print('Done') - - -# cfile = ClusterFile("test.clust") -# i = 0 -# while True: -# try: -# cv = cfile.read_frame() -# i+=1 -# except RuntimeError: -# break -# print(f'Read {i} frames') - - - - -# # cf = ClusterFinder((400,400), (3,3)) -# # for i in range(1000): -# # cf.push_pedestal_frame(f.read_frame()) - -# # fig, ax = plt.subplots() -# # im = ax.imshow(cf.pedestal()) -# # cf.pedestal() -# # cf.noise() - - - -# # N = 500 -# # t0 = time.perf_counter() -# # hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) -# # f.seek(0) - -# # t0 = time.perf_counter() -# # data = f.read_n(N) -# # t_elapsed = time.perf_counter()-t0 - - -# # n_bytes = data.itemsize*data.size - -# # print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s') - - -# # for frame in data: -# # a = cf.find_clusters(frame) - -# # clusters = cf.steal_clusters() - -# # t_elapsed = time.perf_counter()-t0 -# # print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') - - -# # t0 = time.perf_counter() -# # total_clusters = clusters.size - -# # hist1.fill(clusters.sum()) - -# # t_elapsed = time.perf_counter()-t0 -# # print(f'Filling histogram with the sum of {total_clusters} clusters took: {t_elapsed:.3f}s, {total_clusters/t_elapsed:.3g} clust/s') -# # print(f'Average number of clusters per frame {total_clusters/N:.3f}') \ No newline at end of file diff --git a/python/src/fit.hpp b/python/src/fit.hpp new file mode 100644 index 0000000..60cdecc --- /dev/null +++ b/python/src/fit.hpp @@ -0,0 +1,223 @@ +#include +#include +#include +#include +#include + +#include "aare/Fit.hpp" + +namespace py = pybind11; + +void define_fit_bindings(py::module &m) { + + // TODO! Evaluate without converting to double + m.def( + "gaus", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::gaus(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )", py::arg("x"), py::arg("par")); + + m.def( + "pol1", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::pol1(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D polynomial function for all points in x using parameters par. (p0+p1*x) + + Parameters + ---------- + x : array_like + The points at which to evaluate the polynomial function. + par : array_like + The parameters of the polynomial function. The first element is the intercept, and the second element is the slope. + )", py::arg("x"), py::arg("par")); + + m.def( + "fit_gaus", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + auto y_view = make_view_3d(y); + auto x_view = make_view_1d(x); + *par = aare::fit_gaus(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto y_view = make_view_1d(y); + auto x_view = make_view_1d(x); + *par = aare::fit_gaus(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D Gaussian to data. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_gaus", + [](py::array_t x, + py::array_t y, + py::array_t + y_err, int n_threads) { + if (y.ndim() == 3) { + // Allocate memory for the output + // Need to have pointers to allow python to manage + // the memory + auto par = new NDArray({y.shape(0), y.shape(1), 3}); + auto par_err = + new NDArray({y.shape(0), y.shape(1), 3}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), + par_err->view(), n_threads); + // return return_image_data(par); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else if (y.ndim() == 1) { + // Allocate memory for the output + // Need to have pointers to allow python to manage + // the memory + auto par = new NDArray({3}); + auto par_err = new NDArray({3}); + + // Decode the numpy arrays + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), + par_err->view()); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D Gaussian to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + + m.def( + "fit_pol1", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_pol1(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_pol1(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_pol1", + [](py::array_t x, + py::array_t y, + py::array_t + y_err, int n_threads) { + if (y.ndim() == 3) { + auto par = + new NDArray({y.shape(0), y.shape(1), 2}); + auto par_err = + new NDArray({y.shape(0), y.shape(1), 2}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_pol1(x_view, y_view,y_view_err, par->view(), + par_err->view(), n_threads); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + + } else if (y.ndim() == 1) { + auto par = new NDArray({2}); + auto par_err = new NDArray({2}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), + par_err->view()); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D polynomial to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); +} \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 451a6b8..70d143f 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -8,6 +8,7 @@ #include "pedestal.hpp" #include "cluster.hpp" #include "cluster_file.hpp" +#include "fit.hpp" //Pybind stuff #include @@ -29,5 +30,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_file_io_bindings(m); define_cluster_collector_bindings(m); define_cluster_file_sink_bindings(m); + define_fit_bindings(m); } \ No newline at end of file diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index e0c145b..6e92830 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -39,65 +39,6 @@ template py::array return_vector(std::vector *vec) { free_when_done); // numpy array references this parent } -// template py::array do_read(Reader &r, size_t n_frames) { -// py::array image; -// if (n_frames == 0) -// n_frames = r.total_frames(); - -// std::array shape{static_cast(n_frames), r.rows(), -// r.cols()}; -// const uint8_t item_size = r.bytes_per_pixel(); -// if (item_size == 1) { -// image = py::array_t( -// shape); -// } else if (item_size == 2) { -// image = -// py::array_t( -// shape); -// } else if (item_size == 4) { -// image = -// py::array_t( -// shape); -// } -// r.read_into(reinterpret_cast(image.mutable_data()), n_frames); -// return image; -// } - -// py::array return_frame(pl::Frame *ptr) { -// py::capsule free_when_done(ptr, [](void *f) { -// pl::Frame *foo = reinterpret_cast(f); -// delete foo; -// }); - -// const uint8_t item_size = ptr->bytes_per_pixel(); -// std::vector shape; -// for (auto val : ptr->shape()) -// if (val > 1) -// shape.push_back(val); - -// std::vector strides; -// if (shape.size() == 1) -// strides.push_back(item_size); -// else if (shape.size() == 2) { -// strides.push_back(item_size * shape[1]); -// strides.push_back(item_size); -// } - -// if (item_size == 1) -// return py::array_t( -// shape, strides, -// reinterpret_cast(ptr->data()), free_when_done); -// else if (item_size == 2) -// return py::array_t(shape, strides, -// reinterpret_cast(ptr->data()), -// free_when_done); -// else if (item_size == 4) -// return py::array_t(shape, strides, -// reinterpret_cast(ptr->data()), -// free_when_done); -// return {}; -// } - // todo rewrite generic template auto get_shape_3d(py::array_t arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; @@ -111,6 +52,13 @@ template auto get_shape_2d(py::array_t arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } +template auto get_shape_1d(py::array_t arr) { + return aare::Shape<1>{arr.shape(0)}; +} + template auto make_view_2d(py::array_t arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); +} +template auto make_view_1d(py::array_t arr) { + return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); } \ No newline at end of file diff --git a/src/Fit.cpp b/src/Fit.cpp new file mode 100644 index 0000000..08ecaec --- /dev/null +++ b/src/Fit.cpp @@ -0,0 +1,300 @@ +#include "aare/Fit.hpp" +#include "aare/utils/task.hpp" + +#include +#include + +#include + +namespace aare { + +namespace func { + +double gaus(const double x, const double *par) { + return par[0] * exp(-pow(x - par[1], 2) / (2 * pow(par[2], 2))); +} + +NDArray gaus(NDView x, NDView par) { + NDArray y({x.shape(0)}, 0); + for (size_t i = 0; i < x.size(); i++) { + y(i) = gaus(x(i), par.data()); + } + return y; +} + +double pol1(const double x, const double *par) { return par[0] * x + par[1]; } + +NDArray pol1(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (size_t i = 0; i < x.size(); i++) { + y(i) = pol1(x(i), par.data()); + } + return y; +} + +} // namespace func + +NDArray fit_gaus(NDView x, NDView y) { + NDArray result({3}, 0); + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0, 0}; + auto e = std::max_element(y.begin(), y.end()); + auto idx = std::distance(y.begin(), e); + + start_par[0] = *e; // For amplitude we use the maximum value + start_par[1] = + x[idx]; // For the mean we use the x value of the maximum value + + // For sigma we estimate the fwhm and divide by 2.35 + // assuming equally spaced x values + auto delta = x[1] - x[0]; + start_par[2] = + std::count_if(y.begin(), y.end(), + [e, delta](double val) { return val > *e / 2; }) * + delta / 2.35; + + lmfit::result_t res(start_par); + lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), + aare::func::gaus, &control, &res.status); + + result(0) = res.par[0]; + result(1) = res.par[1]; + result(2) = res.par[2]; + + return result; +} + +NDArray fit_gaus(NDView x, NDView y, + int n_threads) { + NDArray result({y.shape(0), y.shape(1), 3}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_gaus(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + } + } + }; + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + + return result; +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out) { + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 3 || par_err_out.size() != 3) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 3"); + } + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0, 0}; + std::vector start_par_err{0, 0, 0}; + std::vector start_cov{0, 0, 0, 0, 0, 0, 0, 0, 0}; + + auto e = std::max_element(y.begin(), y.end()); + auto idx = std::distance(y.begin(), e); + start_par[0] = *e; // For amplitude we use the maximum value + start_par[1] = + x[idx]; // For the mean we use the x value of the maximum value + + // For sigma we estimate the fwhm and divide by 2.35 + // assuming equally spaced x values + auto delta = x[1] - x[0]; + start_par[2] = + std::count_if(y.begin(), y.end(), + [e, delta](double val) { return val > *e / 2; }) * + delta / 2.35; + + lmfit::result_t res(start_par); + lmfit::result_t res_err(start_par_err); + lmfit::result_t cov(start_cov); + + // TODO can we make lmcurve write the result directly where is should be? + lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, + &control, &res.status); + + par_out(0) = res.par[0]; + par_out(1) = res.par[1]; + par_out(2) = res.par[2]; + par_err_out(0) = res_err.par[0]; + par_err_out(1) = res_err.par[1]; + par_err_out(2) = res_err.par[2]; +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out) { + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 2 || par_err_out.size() != 2) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 2"); + } + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0}; + std::vector start_par_err{0, 0}; + std::vector start_cov{0, 0, 0, 0}; + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = + (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value + start_par[1] = + *y1 - ((*y2 - *y1) / (x2 - x1)) * + x1; // For the mean we use the x value of the maximum value + + lmfit::result_t res(start_par); + lmfit::result_t res_err(start_par_err); + lmfit::result_t cov(start_cov); + + lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, + &control, &res.status); + + par_out(0) = res.par[0]; + par_out(1) = res.par[1]; + par_err_out(0) = res_err.par[0]; + par_err_out(1) = res_err.par[1]; +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } +} + +NDArray fit_pol1(NDView x, NDView y) { + // // Check that we have the correct sizes + // if (y.size() != x.size() || y.size() != y_err.size() || + // par_out.size() != 2 || par_err_out.size() != 2) { + // throw std::runtime_error("Data, x, data_err must have the same size " + // "and par_out, par_err_out must have size 2"); + // } + NDArray par({2}, 0); + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0}; + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = (*y2 - *y1) / (x2 - x1); + start_par[1] = *y1 - ((*y2 - *y1) / (x2 - x1)) * x1; + + lmfit::result_t res(start_par); + + lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), + aare::func::pol1, &control, &res.status); + + par(0) = res.par[0]; + par(1) = res.par[1]; + return par; +} + +NDArray fit_pol1(NDView x, NDView y, + int n_threads) { + NDArray result({y.shape(0), y.shape(1), 2}, 0); + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_pol1(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + return result; +} + +} // namespace aare \ No newline at end of file diff --git a/src/utils/task.cpp b/src/utils/task.cpp new file mode 100644 index 0000000..af6756e --- /dev/null +++ b/src/utils/task.cpp @@ -0,0 +1,30 @@ +#include "aare/utils/task.hpp" + +namespace aare { + +std::vector> split_task(int first, int last, + int n_threads) { + std::vector> vec; + vec.reserve(n_threads); + + int n_frames = last - first; + + if (n_threads >= n_frames) { + for (int i = 0; i != n_frames; ++i) { + vec.push_back({i, i + 1}); + } + return vec; + } + + int step = (n_frames) / n_threads; + for (int i = 0; i != n_threads; ++i) { + int start = step * i; + int stop = step * (i + 1); + if (i == n_threads - 1) + stop = last; + vec.push_back({start, stop}); + } + return vec; +} + +} // namespace aare \ No newline at end of file diff --git a/src/utils/task.test.cpp b/src/utils/task.test.cpp new file mode 100644 index 0000000..e19994a --- /dev/null +++ b/src/utils/task.test.cpp @@ -0,0 +1,32 @@ +#include "aare/utils/task.hpp" + +#include +#include + + +TEST_CASE("Split a range into multiple tasks"){ + + auto tasks = aare::split_task(0, 10, 3); + REQUIRE(tasks.size() == 3); + REQUIRE(tasks[0].first == 0); + REQUIRE(tasks[0].second == 3); + REQUIRE(tasks[1].first == 3); + REQUIRE(tasks[1].second == 6); + REQUIRE(tasks[2].first == 6); + REQUIRE(tasks[2].second == 10); + + tasks = aare::split_task(0, 10, 1); + REQUIRE(tasks.size() == 1); + REQUIRE(tasks[0].first == 0); + REQUIRE(tasks[0].second == 10); + + tasks = aare::split_task(0, 10, 10); + REQUIRE(tasks.size() == 10); + for (int i = 0; i < 10; i++){ + REQUIRE(tasks[i].first == i); + REQUIRE(tasks[i].second == i+1); + } + + + +} \ No newline at end of file From dcb9a98faaadbeca0799899ceaa44363a6982651 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 12 Feb 2025 16:49:30 +0100 Subject: [PATCH 032/120] bumped version --- conda-recipe/meta.yaml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 978fa18..c405e90 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.2.5.dev0 #TODO! how to not duplicate this? + version: 2025.2.12 #TODO! how to not duplicate this? source: diff --git a/pyproject.toml b/pyproject.toml index 7ffd22a..74e624f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.2.5.dev0" +version = "2025.2.12" [tool.scikit-build] cmake.verbose = true From dadf5f48699ca44c72df5c94e0feeb0751346dc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 12 Feb 2025 16:50:31 +0100 Subject: [PATCH 033/120] Added fitting, fixed roi etc (#129) Co-authored-by: Patrick Co-authored-by: JulianHeymes --- ...{deploy.yml => build_and_deploy_conda.yml} | 2 - .github/workflows/build_conda.yml | 40 +++ CMakeLists.txt | 47 ++- conda-recipe/meta.yaml | 2 +- docs/CMakeLists.txt | 23 +- docs/src/ClusterFinderMT.rst | 7 + docs/src/index.rst | 4 + docs/src/pyClusterVector.rst | 33 ++ docs/src/pyFit.rst | 19 + include/aare/CircularFifo.hpp | 97 +++++ include/aare/ClusterCollector.hpp | 52 +++ include/aare/ClusterFile.hpp | 72 +++- include/aare/ClusterFileSink.hpp | 56 +++ include/aare/ClusterFinder.hpp | 131 +------ include/aare/ClusterFinderMT.hpp | 268 ++++++++++++++ include/aare/ClusterVector.hpp | 163 +++++++-- include/aare/File.hpp | 2 + include/aare/Fit.hpp | 76 ++++ include/aare/Pedestal.hpp | 8 +- include/aare/ProducerConsumerQueue.hpp | 203 +++++++++++ include/aare/RawFile.hpp | 15 +- include/aare/RawMasterFile.hpp | 11 - include/aare/RawSubFile.hpp | 3 + include/aare/decode.hpp | 13 + include/aare/defs.hpp | 33 +- include/aare/geo_helpers.hpp | 16 + include/aare/utils/task.hpp | 8 + patches/lmfit.patch | 13 + pyproject.toml | 2 +- python/CMakeLists.txt | 12 +- python/aare/__init__.py | 10 +- python/aare/func.py | 1 + python/aare/transform.py | 12 +- python/aare/utils.py | 6 +- python/examples/fits.py | 79 ++++ python/examples/play.py | 86 +++-- python/src/cluster.hpp | 195 +++++++--- python/src/cluster_file.hpp | 38 +- python/src/ctb_raw_file.hpp | 42 +++ python/src/file.hpp | 40 ++- python/src/fit.hpp | 223 ++++++++++++ python/src/module.cpp | 6 + python/src/np_helper.hpp | 66 +--- src/ClusterFile.cpp | 337 +++++++++--------- src/ClusterVector.test.cpp | 112 +++++- src/File.cpp | 2 + src/Fit.cpp | 300 ++++++++++++++++ src/RawFile.cpp | 159 ++++----- src/RawFile.test.cpp | 5 + src/RawSubFile.cpp | 50 ++- src/decode.cpp | 61 ++++ src/geo_helpers.cpp | 71 ++++ src/geo_helpers.test.cpp | 230 ++++++++++++ src/utils/task.cpp | 30 ++ src/utils/task.test.cpp | 32 ++ 55 files changed, 2931 insertions(+), 693 deletions(-) rename .github/workflows/{deploy.yml => build_and_deploy_conda.yml} (94%) create mode 100644 .github/workflows/build_conda.yml create mode 100644 docs/src/ClusterFinderMT.rst create mode 100644 docs/src/pyClusterVector.rst create mode 100644 docs/src/pyFit.rst create mode 100644 include/aare/CircularFifo.hpp create mode 100644 include/aare/ClusterCollector.hpp create mode 100644 include/aare/ClusterFileSink.hpp create mode 100644 include/aare/ClusterFinderMT.hpp create mode 100644 include/aare/Fit.hpp create mode 100644 include/aare/ProducerConsumerQueue.hpp create mode 100644 include/aare/decode.hpp create mode 100644 include/aare/geo_helpers.hpp create mode 100644 include/aare/utils/task.hpp create mode 100644 patches/lmfit.patch create mode 100644 python/aare/func.py create mode 100644 python/examples/fits.py create mode 100644 python/src/fit.hpp create mode 100644 src/Fit.cpp create mode 100644 src/decode.cpp create mode 100644 src/geo_helpers.cpp create mode 100644 src/geo_helpers.test.cpp create mode 100644 src/utils/task.cpp create mode 100644 src/utils/task.test.cpp diff --git a/.github/workflows/deploy.yml b/.github/workflows/build_and_deploy_conda.yml similarity index 94% rename from .github/workflows/deploy.yml rename to .github/workflows/build_and_deploy_conda.yml index 81edde3..90e75c1 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/build_and_deploy_conda.yml @@ -4,7 +4,6 @@ on: push: branches: - main - - developer jobs: build: @@ -34,7 +33,6 @@ jobs: run: conda install conda-build=24.9 conda-verify pytest anaconda-client - name: Enable upload - if: github.ref == 'refs/heads/main' run: conda config --set anaconda_upload yes - name: Build diff --git a/.github/workflows/build_conda.yml b/.github/workflows/build_conda.yml new file mode 100644 index 0000000..0b3e55c --- /dev/null +++ b/.github/workflows/build_conda.yml @@ -0,0 +1,40 @@ +name: Build pkgs and deploy if on main + +on: + push: + branches: + - developer + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] # macos-12, windows-2019] + python-version: ["3.12",] + + runs-on: ${{ matrix.platform }} + + # The setup-miniconda action needs this to activate miniconda + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3.0.4 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + + - name: Prepare + run: conda install conda-build=24.9 conda-verify pytest anaconda-client + + - name: Disable upload + run: conda config --set anaconda_upload no + + - name: Build + run: conda build conda-recipe + diff --git a/CMakeLists.txt b/CMakeLists.txt index cd1cd94..62a3878 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,6 +48,7 @@ option(AARE_FETCH_PYBIND11 "Use FetchContent to download pybind11" ON) option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON) option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON) option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON) +option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON) #Convenience option to use system libraries only (no FetchContent) @@ -76,6 +77,34 @@ endif() set(CMAKE_EXPORT_COMPILE_COMMANDS ON) +if(AARE_FETCH_LMFIT) + set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${lmfit_patch} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL + ) + #Disable what we don't need from lmfit + set(BUILD_TESTING OFF CACHE BOOL "") + set(LMFIT_CPPTEST OFF CACHE BOOL "") + set(LIB_MAN OFF CACHE BOOL "") + set(LMFIT_CPPTEST OFF CACHE BOOL "") + set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + + + FetchContent_MakeAvailable(lmfit) + set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) + + target_include_directories (lmfit PUBLIC "${libzmq_SOURCE_DIR}/lib") + message(STATUS "lmfit include dir: ${lmfit_SOURCE_DIR}/lib") +else() + find_package(lmfit REQUIRED) +endif() + + if(AARE_FETCH_ZMQ) # Fetchcontent_Declare is deprecated need to find a way to update this # for now setting the policy to old is enough @@ -127,8 +156,8 @@ if (AARE_FETCH_FMT) LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} -) + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + ) else() find_package(fmt 6 REQUIRED) endif() @@ -146,7 +175,6 @@ if (AARE_FETCH_JSON) install( TARGETS nlohmann_json EXPORT "${TARGETS_EXPORT_NAME}" - ) message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}") else() @@ -283,11 +311,14 @@ set(PUBLICHEADERS include/aare/ClusterFile.hpp include/aare/CtbRawFile.hpp include/aare/ClusterVector.hpp + include/aare/decode.hpp include/aare/defs.hpp include/aare/Dtype.hpp include/aare/File.hpp + include/aare/Fit.hpp include/aare/FileInterface.hpp include/aare/Frame.hpp + include/aare/geo_helpers.hpp include/aare/NDArray.hpp include/aare/NDView.hpp include/aare/NumpyFile.hpp @@ -298,6 +329,7 @@ set(PUBLICHEADERS include/aare/RawMasterFile.hpp include/aare/RawSubFile.hpp include/aare/VarClusterFinder.hpp + include/aare/utils/task.hpp ) @@ -307,14 +339,18 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp ) @@ -334,6 +370,7 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags + lmfit ) set_target_properties(aare_core PROPERTIES @@ -350,6 +387,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp @@ -359,6 +397,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp ) target_sources(tests PRIVATE ${TestSources} ) @@ -436,4 +475,4 @@ if(AARE_MASTER_PROJECT) set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}") set(PROJECT_LIBRARIES aare-core aare-compiler-flags ) include(cmake/package_config.cmake) -endif() \ No newline at end of file +endif() diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index c3c823b..c405e90 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2024.12.16.dev0 #TODO! how to not duplicate this? + version: 2025.2.12 #TODO! how to not duplicate this? source: diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 118fd5c..c693f0e 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -12,28 +12,7 @@ set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}) file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst") -# set(SPHINX_SOURCE_FILES -# src/index.rst -# src/Installation.rst -# src/Requirements.rst -# src/NDArray.rst -# src/NDView.rst -# src/File.rst -# src/Frame.rst -# src/Dtype.rst -# src/ClusterFinder.rst -# src/ClusterFile.rst -# src/Pedestal.rst -# src/RawFile.rst -# src/RawSubFile.rst -# src/RawMasterFile.rst -# src/VarClusterFinder.rst -# src/pyVarClusterFinder.rst -# src/pyFile.rst -# src/pyCtbRawFile.rst -# src/pyRawFile.rst -# src/pyRawMasterFile.rst -# ) + foreach(filename ${SPHINX_SOURCE_FILES}) diff --git a/docs/src/ClusterFinderMT.rst b/docs/src/ClusterFinderMT.rst new file mode 100644 index 0000000..b15eb8b --- /dev/null +++ b/docs/src/ClusterFinderMT.rst @@ -0,0 +1,7 @@ +ClusterFinderMT +================== + + +.. doxygenclass:: aare::ClusterFinderMT + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/src/index.rst b/docs/src/index.rst index 4316a2c..905caea 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -30,10 +30,13 @@ AARE pyFile pyCtbRawFile pyClusterFile + pyClusterVector pyRawFile pyRawMasterFile pyVarClusterFinder + pyFit + .. toctree:: :caption: C++ API @@ -45,6 +48,7 @@ AARE File Dtype ClusterFinder + ClusterFinderMT ClusterFile ClusterVector Pedestal diff --git a/docs/src/pyClusterVector.rst b/docs/src/pyClusterVector.rst new file mode 100644 index 0000000..4277920 --- /dev/null +++ b/docs/src/pyClusterVector.rst @@ -0,0 +1,33 @@ +ClusterVector +================ + +The ClusterVector, holds clusters from the ClusterFinder. Since it is templated +in C++ we use a suffix indicating the data type in python. The suffix is +``_i`` for integer, ``_f`` for float, and ``_d`` for double. + +At the moment the functionality from python is limited and it is not supported +to push_back clusters to the vector. The intended use case is to pass it to +C++ functions that support the ClusterVector or to view it as a numpy array. + +**View ClusterVector as numpy array** + +.. code:: python + + from aare import ClusterFile + with ClusterFile("path/to/file") as f: + cluster_vector = f.read_frame() + + # Create a copy of the cluster data in a numpy array + clusters = np.array(cluster_vector) + + # Avoid copying the data by passing copy=False + clusters = np.array(cluster_vector, copy = False) + + +.. py:currentmodule:: aare + +.. autoclass:: ClusterVector_i + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/docs/src/pyFit.rst b/docs/src/pyFit.rst new file mode 100644 index 0000000..abaa3cf --- /dev/null +++ b/docs/src/pyFit.rst @@ -0,0 +1,19 @@ + +Fit +======== + +.. py:currentmodule:: aare + + +**Functions** + +.. autofunction:: gaus + +.. autofunction:: pol1 + + +**Fitting** + +.. autofunction:: fit_gaus + +.. autofunction:: fit_pol1 \ No newline at end of file diff --git a/include/aare/CircularFifo.hpp b/include/aare/CircularFifo.hpp new file mode 100644 index 0000000..8098082 --- /dev/null +++ b/include/aare/CircularFifo.hpp @@ -0,0 +1,97 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +template class CircularFifo { + uint32_t fifo_size; + aare::ProducerConsumerQueue free_slots; + aare::ProducerConsumerQueue filled_slots; + + public: + CircularFifo() : CircularFifo(100){}; + CircularFifo(uint32_t size) : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) { + + // TODO! how do we deal with alignment for writing? alignas??? + // Do we give the user a chance to provide memory locations? + // Templated allocator? + for (size_t i = 0; i < fifo_size; ++i) { + free_slots.write(ItemType{}); + } + } + + bool next() { + // TODO! avoid default constructing ItemType + ItemType it; + if (!filled_slots.read(it)) + return false; + if (!free_slots.write(std::move(it))) + return false; + return true; + } + + ~CircularFifo() {} + + using value_type = ItemType; + + auto numFilledSlots() const noexcept { return filled_slots.sizeGuess(); } + auto numFreeSlots() const noexcept { return free_slots.sizeGuess(); } + auto isFull() const noexcept { return filled_slots.isFull(); } + + ItemType pop_free() { + ItemType v; + while (!free_slots.read(v)) + ; + return std::move(v); + // return v; + } + + bool try_pop_free(ItemType &v) { return free_slots.read(v); } + + ItemType pop_value(std::chrono::nanoseconds wait, std::atomic &stopped) { + ItemType v; + while (!filled_slots.read(v) && !stopped) { + std::this_thread::sleep_for(wait); + } + return std::move(v); + } + + ItemType pop_value() { + ItemType v; + while (!filled_slots.read(v)) + ; + return std::move(v); + } + + ItemType *frontPtr() { return filled_slots.frontPtr(); } + + // TODO! Add function to move item from filled to free to be used + // with the frontPtr function + + template void push_value(Args &&...recordArgs) { + while (!filled_slots.write(std::forward(recordArgs)...)) + ; + } + + template bool try_push_value(Args &&...recordArgs) { + return filled_slots.write(std::forward(recordArgs)...); + } + + template void push_free(Args &&...recordArgs) { + while (!free_slots.write(std::forward(recordArgs)...)) + ; + } + + template bool try_push_free(Args &&...recordArgs) { + return free_slots.write(std::forward(recordArgs)...); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterCollector.hpp b/include/aare/ClusterCollector.hpp new file mode 100644 index 0000000..0738062 --- /dev/null +++ b/include/aare/ClusterCollector.hpp @@ -0,0 +1,52 @@ +#pragma once +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFinderMT.hpp" + +namespace aare { + +class ClusterCollector{ + ProducerConsumerQueue>* m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::vector> m_clusters; + + void process(){ + m_stopped = false; + fmt::print("ClusterCollector started\n"); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); + clusters != nullptr) { + m_clusters.push_back(std::move(*clusters)); + m_source->popFront(); + }else{ + std::this_thread::sleep_for(m_default_wait); + } + } + fmt::print("ClusterCollector stopped\n"); + m_stopped = true; + } + + public: + ClusterCollector(ClusterFinderMT* source){ + m_source = source->sink(); + m_thread = std::thread(&ClusterCollector::process, this); + } + void stop(){ + m_stop_requested = true; + m_thread.join(); + } + std::vector> steal_clusters(){ + if(!m_stopped){ + throw std::runtime_error("ClusterCollector is still running"); + } + return std::move(m_clusters); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index a484560..b796763 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -33,6 +33,12 @@ typedef enum { pTopRight = 8 } pixel; +struct Eta2 { + double x; + double y; + corner c; +}; + struct ClusterAnalysis { uint32_t c; int32_t tot; @@ -49,6 +55,19 @@ int32_t frame_number uint32_t number_of_clusters .... */ + +/** + * @brief Class to read and write cluster files + * Expects data to be laid out as: + * + * + * int32_t frame_number + * uint32_t number_of_clusters + * int16_t x, int16_t y, int32_t data[9] x number_of_clusters + * int32_t frame_number + * uint32_t number_of_clusters + * etc. + */ class ClusterFile { FILE *fp{}; uint32_t m_num_left{}; @@ -56,26 +75,61 @@ class ClusterFile { const std::string m_mode; public: + /** + * @brief Construct a new Cluster File object + * @param fname path to the file + * @param chunk_size number of clusters to read at a time when iterating + * over the file + * @param mode mode to open the file in. "r" for reading, "w" for writing, + * "a" for appending + * @throws std::runtime_error if the file could not be opened + */ ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, const std::string &mode = "r"); + + ~ClusterFile(); - std::vector read_clusters(size_t n_clusters); - std::vector read_frame(int32_t &out_fnum); - void write_frame(int32_t frame_number, - const ClusterVector &clusters); - std::vector - read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); + /** + * @brief Read n_clusters clusters from the file discarding frame numbers. + * If EOF is reached the returned vector will have less than n_clusters + * clusters + */ + ClusterVector read_clusters(size_t n_clusters); + + /** + * @brief Read a single frame from the file and return the clusters. The + * cluster vector will have the frame number set. + * @throws std::runtime_error if the file is not opened for reading or the file pointer not + * at the beginning of a frame + */ + ClusterVector read_frame(); + + + void write_frame(const ClusterVector &clusters); + + // Need to be migrated to support NDArray and return a ClusterVector + // std::vector + // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); + + /** + * @brief Return the chunk size + */ size_t chunk_size() const { return m_chunk_size; } + + + /** + * @brief Close the file. If not closed the file will be closed in the destructor + */ void close(); }; int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3& cl, int32_t *t2, int32_t *t3, char *quad, +int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); -NDArray calculate_eta2( ClusterVector& clusters); -std::array calculate_eta2( Cluster3x3& cl); +NDArray calculate_eta2(ClusterVector &clusters); +Eta2 calculate_eta2(Cluster3x3 &cl); } // namespace aare diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp new file mode 100644 index 0000000..158fdeb --- /dev/null +++ b/include/aare/ClusterFileSink.hpp @@ -0,0 +1,56 @@ +#pragma once +#include +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFinderMT.hpp" + +namespace aare{ + +class ClusterFileSink{ + ProducerConsumerQueue>* m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::ofstream m_file; + + + void process(){ + m_stopped = false; + fmt::print("ClusterFileSink started\n"); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); + clusters != nullptr) { + // Write clusters to file + int32_t frame_number = clusters->frame_number(); //TODO! Should we store frame number already as int? + uint32_t num_clusters = clusters->size(); + m_file.write(reinterpret_cast(&frame_number), sizeof(frame_number)); + m_file.write(reinterpret_cast(&num_clusters), sizeof(num_clusters)); + m_file.write(reinterpret_cast(clusters->data()), clusters->size() * clusters->item_size()); + m_source->popFront(); + }else{ + std::this_thread::sleep_for(m_default_wait); + } + } + fmt::print("ClusterFileSink stopped\n"); + m_stopped = true; + } + + public: + ClusterFileSink(ClusterFinderMT* source, const std::filesystem::path& fname){ + m_source = source->sink(); + m_thread = std::thread(&ClusterFileSink::process, this); + m_file.open(fname, std::ios::binary); + } + void stop(){ + m_stop_requested = true; + m_thread.join(); + m_file.close(); + } +}; + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 8bd77cc..84b207b 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -10,26 +10,12 @@ namespace aare { -/** enum to define the event types */ -enum class eventType { - PEDESTAL, /** pedestal */ - NEIGHBOUR, /** neighbour i.e. below threshold, but in the cluster of a - photon */ - PHOTON, /** photon i.e. above threshold */ - PHOTON_MAX, /** maximum of a cluster satisfying the photon conditions */ - NEGATIVE_PEDESTAL, /** negative value, will not be accounted for as pedestal - in order to avoid drift of the pedestal towards - negative values */ - UNDEFINED_EVENT = -1 /** undefined */ -}; - template class ClusterFinder { Shape<2> m_image_size; const int m_cluster_sizeX; const int m_cluster_sizeY; - // const PEDESTAL_TYPE m_threshold; const PEDESTAL_TYPE m_nSigma; const PEDESTAL_TYPE c2; const PEDESTAL_TYPE c3; @@ -61,6 +47,7 @@ class ClusterFinder { NDArray pedestal() { return m_pedestal.mean(); } NDArray noise() { return m_pedestal.std(); } + void clear_pedestal() { m_pedestal.clear(); } /** * @brief Move the clusters from the ClusterVector in the ClusterFinder to a @@ -78,13 +65,13 @@ class ClusterFinder { m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY); return tmp; } - void find_clusters(NDView frame) { + void find_clusters(NDView frame, uint64_t frame_number = 0) { // // TODO! deal with even size clusters // // currently 3,3 -> +/- 1 // // 4,4 -> +/- 2 int dy = m_cluster_sizeY / 2; int dx = m_cluster_sizeX / 2; - + m_clusters.set_frame_number(frame_number); std::vector cluster_data(m_cluster_sizeX * m_cluster_sizeY); for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { @@ -121,8 +108,8 @@ class ClusterFinder { } else if (total > c3 * m_nSigma * rms) { // pass } else { - // m_pedestal.push(iy, ix, frame(iy, ix)); - m_pedestal.push_fast(iy, ix, frame(iy, ix)); + // m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option + m_pedestal.push_fast(iy, ix, frame(iy, ix)); // Assume we have reached n_samples in the pedestal, slight performance improvement continue; // It was a pedestal value nothing to store } @@ -157,114 +144,6 @@ class ClusterFinder { } } } - - // // template - // std::vector - // find_clusters_with_threshold(NDView frame, - // Pedestal &pedestal) { - // assert(m_threshold > 0); - // std::vector clusters; - // std::vector> eventMask; - // for (int i = 0; i < frame.shape(0); i++) { - // eventMask.push_back(std::vector(frame.shape(1))); - // } - // double tthr, tthr1, tthr2; - - // NDArray rest({frame.shape(0), frame.shape(1)}); - // NDArray nph({frame.shape(0), frame.shape(1)}); - // // convert to n photons - // // nph = (frame-pedestal.mean()+0.5*m_threshold)/m_threshold; // can - // be - // // optimized with expression templates? - // for (int iy = 0; iy < frame.shape(0); iy++) { - // for (int ix = 0; ix < frame.shape(1); ix++) { - // auto val = frame(iy, ix) - pedestal.mean(iy, ix); - // nph(iy, ix) = (val + 0.5 * m_threshold) / m_threshold; - // nph(iy, ix) = nph(iy, ix) < 0 ? 0 : nph(iy, ix); - // rest(iy, ix) = val - nph(iy, ix) * m_threshold; - // } - // } - // // iterate over frame pixels - // for (int iy = 0; iy < frame.shape(0); iy++) { - // for (int ix = 0; ix < frame.shape(1); ix++) { - // eventMask[iy][ix] = eventType::PEDESTAL; - // // initialize max and total - // FRAME_TYPE max = std::numeric_limits::min(); - // long double total = 0; - // if (rest(iy, ix) <= 0.25 * m_threshold) { - // pedestal.push(iy, ix, frame(iy, ix)); - // continue; - // } - // eventMask[iy][ix] = eventType::NEIGHBOUR; - // // iterate over cluster pixels around the current pixel - // (ix,iy) for (short ir = -(m_cluster_sizeY / 2); - // ir < (m_cluster_sizeY / 2) + 1; ir++) { - // for (short ic = -(m_cluster_sizeX / 2); - // ic < (m_cluster_sizeX / 2) + 1; ic++) { - // if (ix + ic >= 0 && ix + ic < frame.shape(1) && - // iy + ir >= 0 && iy + ir < frame.shape(0)) { - // auto val = frame(iy + ir, ix + ic) - - // pedestal.mean(iy + ir, ix + ic); - // total += val; - // if (val > max) { - // max = val; - // } - // } - // } - // } - - // auto rms = pedestal.std(iy, ix); - // if (m_nSigma == 0) { - // tthr = m_threshold; - // tthr1 = m_threshold; - // tthr2 = m_threshold; - // } else { - // tthr = m_nSigma * rms; - // tthr1 = m_nSigma * rms * c3; - // tthr2 = m_nSigma * rms * c2; - - // if (m_threshold > 2 * tthr) - // tthr = m_threshold - tthr; - // if (m_threshold > 2 * tthr1) - // tthr1 = tthr - tthr1; - // if (m_threshold > 2 * tthr2) - // tthr2 = tthr - tthr2; - // } - // if (total > tthr1 || max > tthr) { - // eventMask[iy][ix] = eventType::PHOTON; - // nph(iy, ix) += 1; - // rest(iy, ix) -= m_threshold; - // } else { - // pedestal.push(iy, ix, frame(iy, ix)); - // continue; - // } - // if (eventMask[iy][ix] == eventType::PHOTON && - // frame(iy, ix) - pedestal.mean(iy, ix) >= max) { - // eventMask[iy][ix] = eventType::PHOTON_MAX; - // DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, - // Dtype(typeid(FRAME_TYPE))); - // cluster.x = ix; - // cluster.y = iy; - // short i = 0; - // for (short ir = -(m_cluster_sizeY / 2); - // ir < (m_cluster_sizeY / 2) + 1; ir++) { - // for (short ic = -(m_cluster_sizeX / 2); - // ic < (m_cluster_sizeX / 2) + 1; ic++) { - // if (ix + ic >= 0 && ix + ic < frame.shape(1) && - // iy + ir >= 0 && iy + ir < frame.shape(0)) { - // auto tmp = frame(iy + ir, ix + ic) - - // pedestal.mean(iy + ir, ix + ic); - // cluster.set(i, tmp); - // i++; - // } - // } - // } - // clusters.push_back(cluster); - // } - // } - // } - // return clusters; - // } }; } // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp new file mode 100644 index 0000000..1efb843 --- /dev/null +++ b/include/aare/ClusterFinderMT.hpp @@ -0,0 +1,268 @@ +#pragma once +#include +#include +#include +#include +#include + +#include "aare/ClusterFinder.hpp" +#include "aare/NDArray.hpp" +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +enum class FrameType { + DATA, + PEDESTAL, +}; + +struct FrameWrapper { + FrameType type; + uint64_t frame_number; + NDArray data; +}; + +/** + * @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses + * a producer-consumer queue to distribute the frames to the threads. The + * clusters are collected in a single output queue. + * @tparam FRAME_TYPE type of the frame data + * @tparam PEDESTAL_TYPE type of the pedestal data + * @tparam CT type of the cluster data + */ +template +class ClusterFinderMT { + size_t m_current_thread{0}; + size_t m_n_threads{0}; + using Finder = ClusterFinder; + using InputQueue = ProducerConsumerQueue; + using OutputQueue = ProducerConsumerQueue>; + std::vector> m_input_queues; + std::vector> m_output_queues; + + OutputQueue m_sink{1000}; // All clusters go into this queue + + std::vector> m_cluster_finders; + std::vector m_threads; + std::thread m_collect_thread; + std::chrono::milliseconds m_default_wait{1}; + + std::atomic m_stop_requested{false}; + std::atomic m_processing_threads_stopped{true}; + + /** + * @brief Function called by the processing threads. It reads the frames + * from the input queue and processes them. + */ + void process(int thread_id) { + auto cf = m_cluster_finders[thread_id].get(); + auto q = m_input_queues[thread_id].get(); + bool realloc_same_capacity = true; + + while (!m_stop_requested || !q->isEmpty()) { + if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) { + + switch (frame->type) { + case FrameType::DATA: + cf->find_clusters(frame->data.view(), frame->frame_number); + m_output_queues[thread_id]->write(cf->steal_clusters(realloc_same_capacity)); + break; + + case FrameType::PEDESTAL: + m_cluster_finders[thread_id]->push_pedestal_frame( + frame->data.view()); + break; + } + + // frame is processed now discard it + m_input_queues[thread_id]->popFront(); + } else { + std::this_thread::sleep_for(m_default_wait); + } + } + } + + /** + * @brief Collect all the clusters from the output queues and write them to + * the sink + */ + void collect() { + bool empty = true; + while (!m_stop_requested || !empty || !m_processing_threads_stopped) { + empty = true; + for (auto &queue : m_output_queues) { + if (!queue->isEmpty()) { + + while (!m_sink.write(std::move(*queue->frontPtr()))) { + std::this_thread::sleep_for(m_default_wait); + } + queue->popFront(); + empty = false; + } + } + } + } + + public: + /** + * @brief Construct a new ClusterFinderMT object + * @param image_size size of the image + * @param cluster_size size of the cluster + * @param nSigma number of sigma above the pedestal to consider a photon + * @param capacity initial capacity of the cluster vector. Should match + * expected number of clusters in a frame per frame. + * @param n_threads number of threads to use + */ + ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size, + PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000, + size_t n_threads = 3) + : m_n_threads(n_threads) { + for (size_t i = 0; i < n_threads; i++) { + m_cluster_finders.push_back( + std::make_unique>( + image_size, cluster_size, nSigma, capacity)); + } + for (size_t i = 0; i < n_threads; i++) { + m_input_queues.emplace_back(std::make_unique(200)); + m_output_queues.emplace_back(std::make_unique(200)); + } + //TODO! Should we start automatically? + start(); + } + + /** + * @brief Return the sink queue where all the clusters are collected + * @warning You need to empty this queue otherwise the cluster finder will wait forever + */ + ProducerConsumerQueue> *sink() { return &m_sink; } + + /** + * @brief Start all processing threads + */ + void start() { + m_processing_threads_stopped = false; + m_stop_requested = false; + + for (size_t i = 0; i < m_n_threads; i++) { + m_threads.push_back( + std::thread(&ClusterFinderMT::process, this, i)); + } + + m_collect_thread = std::thread(&ClusterFinderMT::collect, this); + } + + /** + * @brief Stop all processing threads + */ + void stop() { + m_stop_requested = true; + + for (auto &thread : m_threads) { + thread.join(); + } + m_threads.clear(); + + m_processing_threads_stopped = true; + m_collect_thread.join(); + } + + /** + * @brief Wait for all the queues to be empty. Mostly used for timing tests. + */ + void sync() { + for (auto &q : m_input_queues) { + while (!q->isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + for (auto &q : m_output_queues) { + while (!q->isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + while (!m_sink.isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + + /** + * @brief Push a pedestal frame to all the cluster finders. The frames is + * expected to be dark. No photon finding is done. Just pedestal update. + */ + void push_pedestal_frame(NDView frame) { + FrameWrapper fw{FrameType::PEDESTAL, 0, + NDArray(frame)}; // TODO! copies the data! + + for (auto &queue : m_input_queues) { + while (!queue->write(fw)) { + std::this_thread::sleep_for(m_default_wait); + } + } + } + + /** + * @brief Push the frame to the queue of the next available thread. Function + * returns once the frame is in a queue. + * @note Spin locks with a default wait if the queue is full. + */ + void find_clusters(NDView frame, uint64_t frame_number = 0) { + FrameWrapper fw{FrameType::DATA, frame_number, + NDArray(frame)}; // TODO! copies the data! + while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) { + std::this_thread::sleep_for(m_default_wait); + } + m_current_thread++; + } + + void clear_pedestal() { + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + for (auto &cf : m_cluster_finders) { + cf->clear_pedestal(); + } + } + + /** + * @brief Return the pedestal currently used by the cluster finder + * @param thread_index index of the thread + */ + auto pedestal(size_t thread_index = 0) { + if (m_cluster_finders.empty()) { + throw std::runtime_error("No cluster finders available"); + } + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + if (thread_index >= m_cluster_finders.size()) { + throw std::runtime_error("Thread index out of range"); + } + return m_cluster_finders[thread_index]->pedestal(); + } + + /** + * @brief Return the noise currently used by the cluster finder + * @param thread_index index of the thread + */ + auto noise(size_t thread_index = 0) { + if (m_cluster_finders.empty()) { + throw std::runtime_error("No cluster finders available"); + } + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + if (thread_index >= m_cluster_finders.size()) { + throw std::runtime_error("Thread index out of range"); + } + return m_cluster_finders[thread_index]->noise(); + } + + // void push(FrameWrapper&& frame) { + // //TODO! need to loop until we are successful + // auto rc = m_input_queue.write(std::move(frame)); + // fmt::print("pushed frame {}\n", rc); + // } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 98d4b37..febf06c 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -1,4 +1,6 @@ #pragma once +#include +#include #include #include #include @@ -9,19 +11,24 @@ namespace aare { /** - * @brief ClusterVector is a container for clusters of various sizes. It uses a - * contiguous memory buffer to store the clusters. + * @brief ClusterVector is a container for clusters of various sizes. It uses a + * contiguous memory buffer to store the clusters. It is templated on the data + * type and the coordinate type of the clusters. * @note push_back can invalidate pointers to elements in the container + * @warning ClusterVector is currently move only to catch unintended copies, but + * this might change since there are probably use cases where copying is needed. * @tparam T data type of the pixels in the cluster - * @tparam CoordType data type of the x and y coordinates of the cluster (normally int16_t) + * @tparam CoordType data type of the x and y coordinates of the cluster + * (normally int16_t) */ -template class ClusterVector { +template class ClusterVector { using value_type = T; size_t m_cluster_size_x; size_t m_cluster_size_y; std::byte *m_data{}; size_t m_size{0}; size_t m_capacity; + uint64_t m_frame_number{0}; // TODO! Check frame number size and type /* Format string used in the python bindings to create a numpy array from the buffer @@ -30,7 +37,7 @@ template class ClusterVector { d - double i - int */ - constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:" ; + constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:"; public: /** @@ -38,30 +45,31 @@ template class ClusterVector { * @param cluster_size_x size of the cluster in x direction * @param cluster_size_y size of the cluster in y direction * @param capacity initial capacity of the buffer in number of clusters + * @param frame_number frame number of the clusters. Default is 0, which is + * also used to indicate that the clusters come from many frames */ - ClusterVector(size_t cluster_size_x, size_t cluster_size_y, - size_t capacity = 1024) + ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3, + size_t capacity = 1024, uint64_t frame_number = 0) : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), - m_capacity(capacity) { + m_capacity(capacity), m_frame_number(frame_number) { allocate_buffer(capacity); } - ~ClusterVector() { - delete[] m_data; - } - - //Move constructor + ~ClusterVector() { delete[] m_data; } + + // Move constructor ClusterVector(ClusterVector &&other) noexcept : m_cluster_size_x(other.m_cluster_size_x), m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data), - m_size(other.m_size), m_capacity(other.m_capacity) { + m_size(other.m_size), m_capacity(other.m_capacity), + m_frame_number(other.m_frame_number) { other.m_data = nullptr; other.m_size = 0; other.m_capacity = 0; } - //Move assignment operator - ClusterVector& operator=(ClusterVector &&other) noexcept { + // Move assignment operator + ClusterVector &operator=(ClusterVector &&other) noexcept { if (this != &other) { delete[] m_data; m_cluster_size_x = other.m_cluster_size_x; @@ -69,9 +77,11 @@ template class ClusterVector { m_data = other.m_data; m_size = other.m_size; m_capacity = other.m_capacity; + m_frame_number = other.m_frame_number; other.m_data = nullptr; other.m_size = 0; other.m_capacity = 0; + other.m_frame_number = 0; } return *this; } @@ -79,7 +89,8 @@ template class ClusterVector { /** * @brief Reserve space for at least capacity clusters * @param capacity number of clusters to reserve space for - * @note If capacity is less than the current capacity, the function does nothing. + * @note If capacity is less than the current capacity, the function does + * nothing. */ void reserve(size_t capacity) { if (capacity > m_capacity) { @@ -92,7 +103,8 @@ template class ClusterVector { * @param x x-coordinate of the cluster * @param y y-coordinate of the cluster * @param data pointer to the data of the cluster - * @warning The data pointer must point to a buffer of size cluster_size_x * cluster_size_y * sizeof(T) + * @warning The data pointer must point to a buffer of size cluster_size_x * + * cluster_size_y * sizeof(T) */ void push_back(CoordType x, CoordType y, const std::byte *data) { if (m_size == m_capacity) { @@ -108,7 +120,15 @@ template class ClusterVector { ptr); m_size++; } - + ClusterVector &operator+=(const ClusterVector &other) { + if (m_size + other.m_size > m_capacity) { + allocate_buffer(m_capacity + other.m_size); + } + std::copy(other.m_data, other.m_data + other.m_size * item_size(), + m_data + m_size * item_size()); + m_size += other.m_size; + return *this; + } /** * @brief Sum the pixels in each cluster @@ -116,7 +136,7 @@ template class ClusterVector { */ std::vector sum() { std::vector sums(m_size); - const size_t stride = element_offset(); + const size_t stride = item_size(); const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y @@ -129,26 +149,73 @@ template class ClusterVector { return sums; } - size_t size() const { return m_size; } - size_t capacity() const { return m_capacity; } - /** - * @brief Return the offset in bytes for a single cluster + * @brief Return the maximum sum of the 2x2 subclusters in each cluster + * @return std::vector vector of sums for each cluster + * @throws std::runtime_error if the cluster size is not 3x3 + * @warning Only 3x3 clusters are supported for the 2x2 sum. */ - size_t element_offset() const { - return 2*sizeof(CoordType) + + std::vector sum_2x2() { + std::vector sums(m_size); + const size_t stride = item_size(); + + if (m_cluster_size_x != 3 || m_cluster_size_y != 3) { + throw std::runtime_error( + "Only 3x3 clusters are supported for the 2x2 sum."); + } + std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y + + for (size_t i = 0; i < m_size; i++) { + std::array total; + auto T_ptr = reinterpret_cast(ptr); + total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4]; + total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5]; + total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7]; + total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8]; + + sums[i] = *std::max_element(total.begin(), total.end()); + ptr += stride; + } + + return sums; + } + + /** + * @brief Return the number of clusters in the vector + */ + size_t size() const { return m_size; } + + /** + * @brief Return the capacity of the buffer in number of clusters. This is + * the number of clusters that can be stored in the current buffer without + * reallocation. + */ + size_t capacity() const { return m_capacity; } + + /** + * @brief Return the size in bytes of a single cluster + */ + size_t item_size() const { + return 2 * sizeof(CoordType) + m_cluster_size_x * m_cluster_size_y * sizeof(T); } + /** * @brief Return the offset in bytes for the i-th cluster */ - size_t element_offset(size_t i) const { return element_offset() * i; } + size_t element_offset(size_t i) const { return item_size() * i; } /** * @brief Return a pointer to the i-th cluster */ std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } - const std::byte * element_ptr(size_t i) const { return m_data + element_offset(i); } + + /** + * @brief Return a pointer to the i-th cluster + */ + const std::byte *element_ptr(size_t i) const { + return m_data + element_offset(i); + } size_t cluster_size_x() const { return m_cluster_size_x; } size_t cluster_size_y() const { return m_cluster_size_y; } @@ -156,21 +223,49 @@ template class ClusterVector { std::byte *data() { return m_data; } std::byte const *data() const { return m_data; } - template - V& at(size_t i) { - return *reinterpret_cast(element_ptr(i)); + /** + * @brief Return a reference to the i-th cluster casted to type V + * @tparam V type of the cluster + */ + template V &at(size_t i) { + return *reinterpret_cast(element_ptr(i)); } const std::string_view fmt_base() const { - //TODO! how do we match on coord_t? + // TODO! how do we match on coord_t? return m_fmt_base; } + /** + * @brief Return the frame number of the clusters. 0 is used to indicate + * that the clusters come from many frames + */ + uint64_t frame_number() const { return m_frame_number; } + + void set_frame_number(uint64_t frame_number) { + m_frame_number = frame_number; + } + + /** + * @brief Resize the vector to contain new_size clusters. If new_size is + * greater than the current capacity, a new buffer is allocated. If the size + * is smaller no memory is freed, size is just updated. + * @param new_size new size of the vector + * @warning The additional clusters are not initialized + */ + void resize(size_t new_size) { + // TODO! Should we initialize the new clusters? + if (new_size > m_capacity) { + allocate_buffer(new_size); + } + m_size = new_size; + } + private: void allocate_buffer(size_t new_capacity) { - size_t num_bytes = element_offset() * new_capacity; + size_t num_bytes = item_size() * new_capacity; std::byte *new_data = new std::byte[num_bytes]{}; - std::copy(m_data, m_data + element_offset() * m_size, new_data); + std::copy(m_data, m_data + item_size() * m_size, new_data); delete[] m_data; m_data = new_data; m_capacity = new_capacity; diff --git a/include/aare/File.hpp b/include/aare/File.hpp index 7aa30e1..1cef898 100644 --- a/include/aare/File.hpp +++ b/include/aare/File.hpp @@ -36,6 +36,8 @@ class File { File(File &&other) noexcept; File& operator=(File &&other) noexcept; ~File() = default; + + // void close(); //!< close the file Frame read_frame(); //!< read one frame from the file at the current position Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp new file mode 100644 index 0000000..20ef4ef --- /dev/null +++ b/include/aare/Fit.hpp @@ -0,0 +1,76 @@ +#pragma once + +#include +#include +#include + +#include "aare/NDArray.hpp" + +namespace aare { + +namespace func { +double gaus(const double x, const double *par); +NDArray gaus(NDView x, NDView par); + +double pol1(const double x, const double *par); +NDArray pol1(NDView x, NDView par); + +} // namespace func + +static constexpr int DEFAULT_NUM_THREADS = 4; + +/** + * @brief Fit a 1D Gaussian to data. + * @param data data to fit + * @param x x values + */ +NDArray fit_gaus(NDView x, NDView y); + + +/** + * @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values] + * @param x x values + * @param y y vales, layout [row, col, values] + * @param n_threads number of threads to use + */ +NDArray fit_gaus(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); + + +/** + * @brief Fit a 1D Gaussian with error estimates + * @param x x values + * @param y y vales, layout [row, col, values] + * @param y_err error in y, layout [row, col, values] + * @param par_out output parameters + * @param par_err_out output error parameters + */ +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out); + +/** + * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout [row, col, values] + * @param x x values + * @param y y vales, layout [row, col, values] + * @param y_err error in y, layout [row, col, values] + * @param par_out output parameters, layout [row, col, values] + * @param par_err_out output parameter errors, layout [row, col, values] + * @param n_threads number of threads to use + */ +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); + + +NDArray fit_pol1(NDView x, NDView y); + +NDArray fit_pol1(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); + +void fit_pol1(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out); + +//TODO! not sure we need to offer the different version in C++ +void fit_pol1(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); + +} // namespace aare \ No newline at end of file diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index bda94f2..102d730 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -89,6 +89,7 @@ template class Pedestal { m_sum = 0; m_sum2 = 0; m_cur_samples = 0; + m_mean = 0; } @@ -97,6 +98,7 @@ template class Pedestal { m_sum(row, col) = 0; m_sum2(row, col) = 0; m_cur_samples(row, col) = 0; + m_mean(row, col) = 0; } @@ -119,7 +121,7 @@ template class Pedestal { /** * Push but don't update the cached mean. Speeds up the process - * when intitializing the pedestal. + * when initializing the pedestal. * */ template void push_no_update(NDView frame) { @@ -165,8 +167,8 @@ template class Pedestal { m_sum2(row, col) += val * val; m_cur_samples(row, col)++; } else { - m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col); - m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col); + m_sum(row, col) += val - m_sum(row, col) / m_samples; + m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples; } //Since we just did a push we know that m_cur_samples(row, col) is at least 1 m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col); diff --git a/include/aare/ProducerConsumerQueue.hpp b/include/aare/ProducerConsumerQueue.hpp new file mode 100644 index 0000000..426b9e2 --- /dev/null +++ b/include/aare/ProducerConsumerQueue.hpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author Bo Hu (bhu@fb.com) +// @author Jordan DeLong (delong.j@fb.com) + +// Changes made by PSD Detector Group: +// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size = 128; from folly/lang/Align.h +// Changed extension to .hpp +// Changed namespace to aare + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +constexpr std::size_t hardware_destructive_interference_size = 128; +namespace aare { + +/* + * ProducerConsumerQueue is a one producer and one consumer queue + * without locks. + */ +template struct ProducerConsumerQueue { + typedef T value_type; + + ProducerConsumerQueue(const ProducerConsumerQueue &) = delete; + ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete; + + + ProducerConsumerQueue(ProducerConsumerQueue &&other){ + size_ = other.size_; + records_ = other.records_; + other.records_ = nullptr; + readIndex_ = other.readIndex_.load(std::memory_order_acquire); + writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); + } + ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other){ + size_ = other.size_; + records_ = other.records_; + other.records_ = nullptr; + readIndex_ = other.readIndex_.load(std::memory_order_acquire); + writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); + return *this; + } + + + ProducerConsumerQueue():ProducerConsumerQueue(2){}; + // size must be >= 2. + // + // Also, note that the number of usable slots in the queue at any + // given time is actually (size-1), so if you start with an empty queue, + // isFull() will return true after size-1 insertions. + explicit ProducerConsumerQueue(uint32_t size) + : size_(size), records_(static_cast(std::malloc(sizeof(T) * size))), readIndex_(0), writeIndex_(0) { + assert(size >= 2); + if (!records_) { + throw std::bad_alloc(); + } + } + + ~ProducerConsumerQueue() { + // We need to destruct anything that may still exist in our queue. + // (No real synchronization needed at destructor time: only one + // thread can be doing this.) + if (!std::is_trivially_destructible::value) { + size_t readIndex = readIndex_; + size_t endIndex = writeIndex_; + while (readIndex != endIndex) { + records_[readIndex].~T(); + if (++readIndex == size_) { + readIndex = 0; + } + } + } + + std::free(records_); + } + + template bool write(Args &&...recordArgs) { + auto const currentWrite = writeIndex_.load(std::memory_order_relaxed); + auto nextRecord = currentWrite + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + new (&records_[currentWrite]) T(std::forward(recordArgs)...); + writeIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // queue is full + return false; + } + + // move (or copy) the value at the front of the queue to given variable + bool read(T &record) { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return false; + } + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + record = std::move(records_[currentRead]); + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // pointer to the value at the front of the queue (for use in-place) or + // nullptr if empty. + T *frontPtr() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return nullptr; + } + return &records_[currentRead]; + } + + // queue must not be empty + void popFront() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + assert(currentRead != writeIndex_.load(std::memory_order_acquire)); + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + } + + bool isEmpty() const { + return readIndex_.load(std::memory_order_acquire) == writeIndex_.load(std::memory_order_acquire); + } + + bool isFull() const { + auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + return false; + } + // queue is full + return true; + } + + // * If called by consumer, then true size may be more (because producer may + // be adding items concurrently). + // * If called by producer, then true size may be less (because consumer may + // be removing items concurrently). + // * It is undefined to call this from any other thread. + size_t sizeGuess() const { + int ret = writeIndex_.load(std::memory_order_acquire) - readIndex_.load(std::memory_order_acquire); + if (ret < 0) { + ret += size_; + } + return ret; + } + + // maximum number of items in the queue. + size_t capacity() const { return size_ - 1; } + + private: + using AtomicIndex = std::atomic; + + char pad0_[hardware_destructive_interference_size]; + // const uint32_t size_; + uint32_t size_; + // T *const records_; + T* records_; + + alignas(hardware_destructive_interference_size) AtomicIndex readIndex_; + alignas(hardware_destructive_interference_size) AtomicIndex writeIndex_; + + char pad1_[hardware_destructive_interference_size - sizeof(AtomicIndex)]; +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/RawFile.hpp b/include/aare/RawFile.hpp index eb044e3..f744ac2 100644 --- a/include/aare/RawFile.hpp +++ b/include/aare/RawFile.hpp @@ -34,15 +34,19 @@ class RawFile : public FileInterface { size_t n_subfile_parts{}; // d0,d1...dn //TODO! move to vector of SubFile instead of pointers std::vector> subfiles; //subfiles[f0,f1...fn][d0,d1...dn] - std::vector positions; - std::vector m_module_pixel_0; + // std::vector positions; + ModuleConfig cfg{0, 0}; RawMasterFile m_master; size_t m_current_frame{}; - size_t m_rows{}; - size_t m_cols{}; + + // std::vector m_module_pixel_0; + // size_t m_rows{}; + // size_t m_cols{}; + + DetectorGeometry m_geometry; public: /** @@ -111,11 +115,12 @@ class RawFile : public FileInterface { */ static DetectorHeader read_header(const std::filesystem::path &fname); - void update_geometry_with_roi(); + // void update_geometry_with_roi(); int find_number_of_subfiles(); void open_subfiles(); void find_geometry(); }; + } // namespace aare \ No newline at end of file diff --git a/include/aare/RawMasterFile.hpp b/include/aare/RawMasterFile.hpp index 42c324e..beaeb29 100644 --- a/include/aare/RawMasterFile.hpp +++ b/include/aare/RawMasterFile.hpp @@ -62,17 +62,6 @@ class ScanParameters { }; -struct ROI{ - int64_t xmin{}; - int64_t xmax{}; - int64_t ymin{}; - int64_t ymax{}; - - int64_t height() const { return ymax - ymin; } - int64_t width() const { return xmax - xmin; } -}; - - /** * @brief Class for parsing a master file either in our .json format or the old * .raw format diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 4d78670..89c278e 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -66,6 +66,9 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / 8; } +private: + template + void read_with_map(std::byte *image_buf); }; diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp new file mode 100644 index 0000000..1c3c479 --- /dev/null +++ b/include/aare/decode.hpp @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +namespace aare { + + +uint16_t adc_sar_05_decode64to16(uint64_t input); +uint16_t adc_sar_04_decode64to16(uint64_t input); +void adc_sar_05_decode64to16(NDView input, NDView output); +void adc_sar_04_decode64to16(NDView input, NDView output); + +} // namespace aare \ No newline at end of file diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 7466410..db1a47b 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -179,13 +179,42 @@ template struct t_xy { using xy = t_xy; +/** + * @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module + */ struct ModuleGeometry{ - int x{}; - int y{}; + int origin_x{}; + int origin_y{}; int height{}; int width{}; + int row_index{}; + int col_index{}; }; +/** + * @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0 + * for each module is located + */ +struct DetectorGeometry{ + int modules_x{}; + int modules_y{}; + int pixels_x{}; + int pixels_y{}; + int module_gap_row{}; + int module_gap_col{}; + std::vector module_pixel_0; +}; + +struct ROI{ + int64_t xmin{}; + int64_t xmax{}; + int64_t ymin{}; + int64_t ymax{}; + + int64_t height() const { return ymax - ymin; } + int64_t width() const { return xmax - xmin; } + }; + using dynamic_shape = std::vector; diff --git a/include/aare/geo_helpers.hpp b/include/aare/geo_helpers.hpp new file mode 100644 index 0000000..d0d5d1a --- /dev/null +++ b/include/aare/geo_helpers.hpp @@ -0,0 +1,16 @@ +#pragma once +#include "aare/defs.hpp" +#include "aare/RawMasterFile.hpp" //ROI refactor away +namespace aare{ + +/** + * @brief Update the detector geometry given a region of interest + * + * @param geo + * @param roi + * @return DetectorGeometry + */ +DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi); + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/utils/task.hpp b/include/aare/utils/task.hpp new file mode 100644 index 0000000..a6ee142 --- /dev/null +++ b/include/aare/utils/task.hpp @@ -0,0 +1,8 @@ + +#include +#include + +namespace aare { +std::vector> split_task(int first, int last, int n_threads); + +} // namespace aare \ No newline at end of file diff --git a/patches/lmfit.patch b/patches/lmfit.patch new file mode 100644 index 0000000..22063bf --- /dev/null +++ b/patches/lmfit.patch @@ -0,0 +1,13 @@ +diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt +index 4efb7ed..6533660 100644 +--- a/lib/CMakeLists.txt ++++ b/lib/CMakeLists.txt +@@ -11,7 +11,7 @@ target_compile_definitions(${lib} PRIVATE "LMFIT_EXPORT") # for Windows DLL expo + + target_include_directories(${lib} + PUBLIC +- $ ++ $ + $ + ) + diff --git a/pyproject.toml b/pyproject.toml index b839003..74e624f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2024.12.16.dev0" +version = "2025.2.12" [tool.scikit-build] cmake.verbose = true diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 89ad5e7..2aaa222 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -28,6 +28,7 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags) set( PYTHON_FILES aare/__init__.py aare/CtbRawFile.py + aare/func.py aare/RawFile.py aare/transform.py aare/ScanParameters.py @@ -43,10 +44,17 @@ set_target_properties(_aare PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare ) +set(PYTHON_EXAMPLES + examples/play.py + examples/fits.py +) -# Copy the examples/scripts to the build directory -configure_file(examples/play.py ${CMAKE_BINARY_DIR}/play.py) + +# Copy the python examples to the build directory +foreach(FILE ${PYTHON_EXAMPLES}) + configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) +endforeach(FILE ${PYTHON_EXAMPLES}) if(AARE_INSTALL_PYTHONEXT) diff --git a/python/aare/__init__.py b/python/aare/__init__.py index fb34c7a..f4c19cc 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -8,8 +8,16 @@ from ._aare import DetectorType from ._aare import ClusterFile from ._aare import hitmap +from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i + +from ._aare import fit_gaus, fit_pol1 + from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters -from .utils import random_pixels, random_pixel \ No newline at end of file +from .utils import random_pixels, random_pixel, flat_list + + +#make functions available in the top level API +from .func import * diff --git a/python/aare/func.py b/python/aare/func.py new file mode 100644 index 0000000..ca60cf2 --- /dev/null +++ b/python/aare/func.py @@ -0,0 +1 @@ +from ._aare import gaus, pol1 \ No newline at end of file diff --git a/python/aare/transform.py b/python/aare/transform.py index 414eb27..2f66942 100644 --- a/python/aare/transform.py +++ b/python/aare/transform.py @@ -2,6 +2,14 @@ import numpy as np from . import _aare +class AdcSar04Transform64to16: + def __call__(self, data): + return _aare.adc_sar_04_decode64to16(data) + +class AdcSar05Transform64to16: + def __call__(self, data): + return _aare.adc_sar_05_decode64to16(data) + class Moench05Transform: #Could be moved to C++ without changing the interface def __init__(self): @@ -45,4 +53,6 @@ class Matterhorn02Transform: moench05 = Moench05Transform() moench05_1g = Moench05Transform1g() moench05_old = Moench05TransformOld() -matterhorn02 = Matterhorn02Transform() \ No newline at end of file +matterhorn02 = Matterhorn02Transform() +adc_sar_04_64to16 = AdcSar04Transform64to16() +adc_sar_05_64to16 = AdcSar05Transform64to16() \ No newline at end of file diff --git a/python/aare/utils.py b/python/aare/utils.py index d53f844..4708921 100644 --- a/python/aare/utils.py +++ b/python/aare/utils.py @@ -20,4 +20,8 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024): Returns: tuple: (row, col) """ - return random_pixels(1, xmin, xmax, ymin, ymax)[0] \ No newline at end of file + return random_pixels(1, xmin, xmax, ymin, ymax)[0] + +def flat_list(xss): + """Flatten a list of lists.""" + return [x for xs in xss for x in xs] \ No newline at end of file diff --git a/python/examples/fits.py b/python/examples/fits.py new file mode 100644 index 0000000..aa3aef6 --- /dev/null +++ b/python/examples/fits.py @@ -0,0 +1,79 @@ +import matplotlib.pyplot as plt +import numpy as np +from aare import fit_gaus, fit_pol1 +from aare import gaus, pol1 + +textpm = f"±" # +textmu = f"μ" # +textsigma = f"σ" # + + + +# ================================= Gauss fit ================================= +# Parameters +mu = np.random.uniform(1, 100) # Mean of Gaussian +sigma = np.random.uniform(4, 20) # Standard deviation +num_points = 10000 # Number of points for smooth distribution +noise_sigma = 100 + +# Generate Gaussian distribution +data = np.random.normal(mu, sigma, num_points) + +# Generate errors for each point +errors = np.abs(np.random.normal(0, sigma, num_points)) # Errors with mean 0, std 0.5 + +# Create subplot +fig0, ax0 = plt.subplots(1, 1, num=0, figsize=(12, 8)) + +x = np.histogram(data, bins=30)[1][:-1] + 0.05 +y = np.histogram(data, bins=30)[0] +yerr = errors[:30] + + +# Add the errors as error bars in the step plot +ax0.errorbar(x, y, yerr=yerr, fmt=". ", capsize=5) +ax0.grid() + +par, err = fit_gaus(x, y, yerr) +print(par, err) + +x = np.linspace(x[0], x[-1], 1000) +ax0.plot(x, gaus(x, par), marker="") +ax0.set(xlabel="x", ylabel="Counts", title=f"A0 = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n" + f"{textmu} = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n" + f"{textsigma} = {par[2]:0.2f}{textpm}{err[2]:0.2f}\n" + f"(init: {textmu}: {mu:0.2f}, {textsigma}: {sigma:0.2f})") +fig0.tight_layout() + + + +# ================================= pol1 fit ================================= +# Parameters +n_points = 40 + +# Generate random slope and intercept (origin) +slope = np.random.uniform(-10, 10) # Random slope between 0.5 and 2.0 +intercept = np.random.uniform(-10, 10) # Random intercept between -10 and 10 + +# Generate random x values +x_values = np.random.uniform(-10, 10, n_points) + +# Calculate y values based on the linear function y = mx + b + error +errors = np.abs(np.random.normal(0, np.random.uniform(1, 5), n_points)) +var_points = np.random.normal(0, np.random.uniform(0.1, 2), n_points) +y_values = slope * x_values + intercept + var_points + +fig1, ax1 = plt.subplots(1, 1, num=1, figsize=(12, 8)) +ax1.errorbar(x_values, y_values, yerr=errors, fmt=". ", capsize=5) +par, err = fit_pol1(x_values, y_values, errors) + + +x = np.linspace(np.min(x_values), np.max(x_values), 1000) +ax1.plot(x, pol1(x, par), marker="") +ax1.set(xlabel="x", ylabel="y", title=f"a = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n" + f"b = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n" + f"(init: {slope:0.2f}, {intercept:0.2f})") +fig1.tight_layout() + +plt.show() + diff --git a/python/examples/play.py b/python/examples/play.py index 986b718..f1a869b 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -8,51 +8,61 @@ import numpy as np import boost_histogram as bh import time -from aare import File, ClusterFinder, VarClusterFinder +<<<<<<< HEAD +from aare import File, ClusterFinder, VarClusterFinder, ClusterFile, CtbRawFile +from aare import gaus, fit_gaus -base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/') - -f = File(base/'Moench03new/cu_half_speed_master_4.json') -cf = ClusterFinder((400,400), (3,3)) -for i in range(1000): - cf.push_pedestal_frame(f.read_frame()) - -fig, ax = plt.subplots() -im = ax.imshow(cf.pedestal()) -cf.pedestal() -cf.noise() - - - -N = 500 -t0 = time.perf_counter() -hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) -f.seek(0) +base = Path('/mnt/sls_det_storage/moench_data/Julian/MOENCH05/20250113_first_xrays_redo/raw_files/') +cluster_file = Path('/home/l_msdetect/erik/tmp/Cu.clust') t0 = time.perf_counter() -data = f.read_n(N) +offset= -0.5 +hist3d = bh.Histogram( + bh.axis.Regular(160, 0+offset, 160+offset), #x + bh.axis.Regular(150, 0+offset, 150+offset), #y + bh.axis.Regular(200, 0, 6000), #ADU +) + +total_clusters = 0 +with ClusterFile(cluster_file, chunk_size = 1000) as f: + for i, clusters in enumerate(f): + arr = np.array(clusters) + total_clusters += clusters.size + hist3d.fill(arr['y'],arr['x'], clusters.sum_2x2()) #python talks [row, col] cluster finder [x,y] +======= +from aare import RawFile + +f = RawFile('/mnt/sls_det_storage/jungfrau_data1/vadym_tests/jf12_M431/laser_scan/laserScan_pedestal_G0_master_0.json') + +print(f'{f.frame_number(1)}') + +for i in range(10): + header, img = f.read_frame() + print(header['frameNumber'], img.shape) +>>>>>>> developer + + t_elapsed = time.perf_counter()-t0 +print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') +histogram_data = hist3d.counts() +x = hist3d.axes[2].edges[:-1] -n_bytes = data.itemsize*data.size +y = histogram_data[100,100,:] +xx = np.linspace(x[0], x[-1]) +# fig, ax = plt.subplots() +# ax.step(x, y, where = 'post') -print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s') +y_err = np.sqrt(y) +y_err = np.zeros(y.size) +y_err += 1 +# par = fit_gaus2(y,x, y_err) +# ax.plot(xx, gaus(xx,par)) +# print(par) -for frame in data: - a = cf.find_clusters(frame) +res = fit_gaus(y,x) +res2 = fit_gaus(y,x, y_err) +print(res) +print(res2) -clusters = cf.steal_clusters() - -# t_elapsed = time.perf_counter()-t0 -# print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') - - -# t0 = time.perf_counter() -# total_clusters = clusters.size - -# hist1.fill(clusters.sum()) - -# t_elapsed = time.perf_counter()-t0 -# print(f'Filling histogram with the sum of {total_clusters} clusters took: {t_elapsed:.3f}s, {total_clusters/t_elapsed:.3g} clust/s') -# print(f'Average number of clusters per frame {total_clusters/N:.3f}') \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index d11c706..792b7e6 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -1,4 +1,7 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" #include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" #include "aare/ClusterVector.hpp" #include "aare/NDView.hpp" #include "aare/Pedestal.hpp" @@ -8,9 +11,10 @@ #include #include #include +#include namespace py = pybind11; -using pd_type = float; +using pd_type = double; template void define_cluster_vector(py::module &m, const std::string &typestr) { @@ -18,93 +22,166 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { py::class_>(m, class_name.c_str(), py::buffer_protocol()) .def(py::init()) .def_property_readonly("size", &ClusterVector::size) - .def("element_offset", - py::overload_cast<>(&ClusterVector::element_offset, py::const_)) + .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", - [typestr](ClusterVector &self) { - return fmt::format( - self.fmt_base(), self.cluster_size_x(), - self.cluster_size_y(), typestr); + [typestr](ClusterVector &self) { + return fmt::format( + self.fmt_base(), self.cluster_size_x(), + self.cluster_size_y(), typestr); + }) + .def("sum", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); }) - .def("sum", [](ClusterVector &self) { - auto *vec = new std::vector(self.sum()); + .def("sum_2x2", [](ClusterVector &self) { + auto *vec = new std::vector(self.sum_2x2()); return return_vector(vec); }) + .def_property_readonly("capacity", &ClusterVector::capacity) + .def_property("frame_number", &ClusterVector::frame_number, + &ClusterVector::set_frame_number) .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { return py::buffer_info( - self.data(), /* Pointer to buffer */ - self.element_offset(), /* Size of one scalar */ + self.data(), /* Pointer to buffer */ + self.item_size(), /* Size of one scalar */ fmt::format(self.fmt_base(), self.cluster_size_x(), self.cluster_size_y(), typestr), /* Format descriptor */ 1, /* Number of dimensions */ - {self.size()}, /* Buffer dimensions */ - {self.element_offset()} /* Strides (in bytes) for each index */ + {self.size()}, /* Buffer dimensions */ + {self.item_size()} /* Strides (in bytes) for each index */ ); }); } +void define_cluster_finder_mt_bindings(py::module &m) { + py::class_>(m, "ClusterFinderMT") + .def(py::init, Shape<2>, pd_type, size_t, size_t>(), + py::arg("image_size"), py::arg("cluster_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048, + py::arg("n_threads") = 3) + .def("push_pedestal_frame", + [](ClusterFinderMT &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.push_pedestal_frame(view); + }) + .def( + "find_clusters", + [](ClusterFinderMT &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0) + .def("clear_pedestal", &ClusterFinderMT::clear_pedestal) + .def("sync", &ClusterFinderMT::sync) + .def("stop", &ClusterFinderMT::stop) + .def("start", &ClusterFinderMT::start) + .def("pedestal", + [](ClusterFinderMT &self, size_t thread_index) { + auto pd = new NDArray{}; + *pd = self.pedestal(thread_index); + return return_image_data(pd); + },py::arg("thread_index") = 0) + .def("noise", + [](ClusterFinderMT &self, size_t thread_index) { + auto arr = new NDArray{}; + *arr = self.noise(thread_index); + return return_image_data(arr); + },py::arg("thread_index") = 0); +} + +void define_cluster_collector_bindings(py::module &m) { + py::class_(m, "ClusterCollector") + .def(py::init *>()) + .def("stop", &ClusterCollector::stop) + .def( + "steal_clusters", + [](ClusterCollector &self) { + auto v = + new std::vector>(self.steal_clusters()); + return v; + }, + py::return_value_policy::take_ownership); +} + +void define_cluster_file_sink_bindings(py::module &m) { + py::class_(m, "ClusterFileSink") + .def(py::init *, + const std::filesystem::path &>()) + .def("stop", &ClusterFileSink::stop); +} + void define_cluster_finder_bindings(py::module &m) { py::class_>(m, "ClusterFinder") - .def(py::init, Shape<2>, pd_type, size_t>(), py::arg("image_size"), - py::arg("cluster_size"), py::arg("n_sigma") = 5.0, - py::arg("capacity") = 1'000'000) + .def(py::init, Shape<2>, pd_type, size_t>(), + py::arg("image_size"), py::arg("cluster_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) .def("push_pedestal_frame", [](ClusterFinder &self, py::array_t frame) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) - .def("pedestal", - [](ClusterFinder &self) { - auto pd = new NDArray{}; - *pd = self.pedestal(); - return return_image_data(pd); - }) - .def("noise", - [](ClusterFinder &self) { - auto arr = new NDArray{}; - *arr = self.noise(); - return return_image_data(arr); - }) - .def("steal_clusters", - [](ClusterFinder &self, bool realloc_same_capacity) { - auto v = new ClusterVector(self.steal_clusters(realloc_same_capacity)); - return v; - }, py::arg("realloc_same_capacity") = false) - .def("find_clusters", - [](ClusterFinder &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.find_clusters(view); - return; - }); + .def("clear_pedestal", &ClusterFinder::clear_pedestal) + .def_property_readonly("pedestal", + [](ClusterFinder &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly("noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) + .def( + "steal_clusters", + [](ClusterFinder &self, + bool realloc_same_capacity) { + auto v = new ClusterVector( + self.steal_clusters(realloc_same_capacity)); + return v; + }, + py::arg("realloc_same_capacity") = false) + .def( + "find_clusters", + [](ClusterFinder &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0); - m.def("hitmap", [](std::array image_size, ClusterVector& cv){ - - py::array_t hitmap(image_size); - auto r = hitmap.mutable_unchecked<2>(); + m.def("hitmap", + [](std::array image_size, ClusterVector &cv) { + py::array_t hitmap(image_size); + auto r = hitmap.mutable_unchecked<2>(); - // Initialize hitmap to 0 - for (py::ssize_t i = 0; i < r.shape(0); i++) - for (py::ssize_t j = 0; j < r.shape(1); j++) - r(i, j) = 0; + // Initialize hitmap to 0 + for (py::ssize_t i = 0; i < r.shape(0); i++) + for (py::ssize_t j = 0; j < r.shape(1); j++) + r(i, j) = 0; - size_t stride = cv.element_offset(); - auto ptr = cv.data(); - for(size_t i=0; i(ptr); - auto y = *reinterpret_cast(ptr+sizeof(int16_t)); - r(y, x) += 1; - ptr += stride; - } - return hitmap; - }); + size_t stride = cv.item_size(); + auto ptr = cv.data(); + for (size_t i = 0; i < cv.size(); i++) { + auto x = *reinterpret_cast(ptr); + auto y = *reinterpret_cast(ptr + sizeof(int16_t)); + r(y, x) += 1; + ptr += stride; + } + return hitmap; + }); define_cluster_vector(m, "i"); define_cluster_vector(m, "d"); define_cluster_vector(m, "f"); - py::class_(m, "DynamicCluster", py::buffer_protocol()) .def(py::init()) .def("size", &DynamicCluster::size) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 82870c4..8a431b5 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -28,27 +28,24 @@ void define_cluster_file_io_bindings(py::module &m) { py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r") .def("read_clusters", [](ClusterFile &self, size_t n_clusters) { - auto *vec = - new std::vector(self.read_clusters(n_clusters)); - return return_vector(vec); - }) + auto v = new ClusterVector(self.read_clusters(n_clusters)); + return v; + },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { - int32_t frame_number; - auto *vec = - new std::vector(self.read_frame(frame_number)); - return py::make_tuple(frame_number, return_vector(vec)); + auto v = new ClusterVector(self.read_frame()); + return v; }) .def("write_frame", &ClusterFile::write_frame) - .def("read_cluster_with_cut", - [](ClusterFile &self, size_t n_clusters, - py::array_t noise_map, int nx, int ny) { - auto view = make_view_2d(noise_map); - auto *vec = - new std::vector(self.read_cluster_with_cut( - n_clusters, view.data(), nx, ny)); - return return_vector(vec); - }) + // .def("read_cluster_with_cut", + // [](ClusterFile &self, size_t n_clusters, + // py::array_t noise_map, int nx, int ny) { + // auto view = make_view_2d(noise_map); + // auto *vec = + // new std::vector(self.read_cluster_with_cut( + // n_clusters, view.data(), nx, ny)); + // return return_vector(vec); + // }) .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", [](ClusterFile &self, @@ -59,12 +56,11 @@ void define_cluster_file_io_bindings(py::module &m) { }) .def("__iter__", [](ClusterFile &self) { return &self; }) .def("__next__", [](ClusterFile &self) { - auto vec = - new std::vector(self.read_clusters(self.chunk_size())); - if (vec->size() == 0) { + auto v = new ClusterVector(self.read_clusters(self.chunk_size())); + if (v->size() == 0) { throw py::stop_iteration(); } - return return_vector(vec); + return v; }); m.def("calculate_eta2", []( aare::ClusterVector &clusters) { diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 39c1001..9ce656d 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -7,6 +7,7 @@ #include "aare/RawSubFile.hpp" #include "aare/defs.hpp" +#include "aare/decode.hpp" // #include "aare/fClusterFileV2.hpp" #include @@ -23,6 +24,47 @@ using namespace ::aare; void define_ctb_raw_file_io_bindings(py::module &m) { +m.def("adc_sar_05_decode64to16", [](py::array_t input) { + + + if(input.ndim() != 2){ + throw std::runtime_error("Only 2D arrays are supported at this moment"); + } + + //Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), input.shape(1)/8}; + py::array_t output(shape); + + //Create a view of the input and output arrays + NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); + + adc_sar_05_decode64to16(input_view, output_view); + + return output; +}); + + +m.def("adc_sar_04_decode64to16", [](py::array_t input) { + + + if(input.ndim() != 2){ + throw std::runtime_error("Only 2D arrays are supported at this moment"); + } + + //Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), input.shape(1)/8}; + py::array_t output(shape); + + //Create a view of the input and output arrays + NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); + + adc_sar_04_decode64to16(input_view, output_view); + + return output; +}); + py::class_(m, "CtbRawFile") .def(py::init()) .def("read_frame", diff --git a/python/src/file.hpp b/python/src/file.hpp index 30fa82f..c3c800c 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -20,6 +20,11 @@ namespace py = pybind11; using namespace ::aare; +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + void define_file_io_bindings(py::module &m) { @@ -124,8 +129,41 @@ void define_file_io_bindings(py::module &m) { self.read_into(reinterpret_cast(image.mutable_data()), n_frames); return image; + }) + .def("__enter__", [](File &self) { return &self; }) + .def("__exit__", + [](File &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](File &self) { return &self; }) + .def("__next__", [](File &self) { + + try{ + const uint8_t item_size = self.bytes_per_pixel(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(self.rows()); + shape.push_back(self.cols()); + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into( + reinterpret_cast(image.mutable_data())); + return image; + }catch(std::runtime_error &e){ + throw py::stop_iteration(); + } }); + py::class_(m, "FileConfig") .def(py::init<>()) .def_readwrite("rows", &FileConfig::rows) @@ -205,7 +243,7 @@ void define_file_io_bindings(py::module &m) { return image; }); - +#pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") // .def(py::init<>()) // .def_readwrite("frame_number", &ClusterHeader::frame_number) diff --git a/python/src/fit.hpp b/python/src/fit.hpp new file mode 100644 index 0000000..60cdecc --- /dev/null +++ b/python/src/fit.hpp @@ -0,0 +1,223 @@ +#include +#include +#include +#include +#include + +#include "aare/Fit.hpp" + +namespace py = pybind11; + +void define_fit_bindings(py::module &m) { + + // TODO! Evaluate without converting to double + m.def( + "gaus", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::gaus(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )", py::arg("x"), py::arg("par")); + + m.def( + "pol1", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::pol1(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D polynomial function for all points in x using parameters par. (p0+p1*x) + + Parameters + ---------- + x : array_like + The points at which to evaluate the polynomial function. + par : array_like + The parameters of the polynomial function. The first element is the intercept, and the second element is the slope. + )", py::arg("x"), py::arg("par")); + + m.def( + "fit_gaus", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + auto y_view = make_view_3d(y); + auto x_view = make_view_1d(x); + *par = aare::fit_gaus(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto y_view = make_view_1d(y); + auto x_view = make_view_1d(x); + *par = aare::fit_gaus(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D Gaussian to data. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_gaus", + [](py::array_t x, + py::array_t y, + py::array_t + y_err, int n_threads) { + if (y.ndim() == 3) { + // Allocate memory for the output + // Need to have pointers to allow python to manage + // the memory + auto par = new NDArray({y.shape(0), y.shape(1), 3}); + auto par_err = + new NDArray({y.shape(0), y.shape(1), 3}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), + par_err->view(), n_threads); + // return return_image_data(par); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else if (y.ndim() == 1) { + // Allocate memory for the output + // Need to have pointers to allow python to manage + // the memory + auto par = new NDArray({3}); + auto par_err = new NDArray({3}); + + // Decode the numpy arrays + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), + par_err->view()); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D Gaussian to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + + m.def( + "fit_pol1", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_pol1(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_pol1(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_pol1", + [](py::array_t x, + py::array_t y, + py::array_t + y_err, int n_threads) { + if (y.ndim() == 3) { + auto par = + new NDArray({y.shape(0), y.shape(1), 2}); + auto par_err = + new NDArray({y.shape(0), y.shape(1), 2}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_pol1(x_view, y_view,y_view_err, par->view(), + par_err->view(), n_threads); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + + } else if (y.ndim() == 1) { + auto par = new NDArray({2}); + auto par_err = new NDArray({2}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), + par_err->view()); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D polynomial to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); +} \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 14a686a..70d143f 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -8,6 +8,7 @@ #include "pedestal.hpp" #include "cluster.hpp" #include "cluster_file.hpp" +#include "fit.hpp" //Pybind stuff #include @@ -25,5 +26,10 @@ PYBIND11_MODULE(_aare, m) { define_pedestal_bindings(m, "Pedestal_d"); define_pedestal_bindings(m, "Pedestal_f"); define_cluster_finder_bindings(m); + define_cluster_finder_mt_bindings(m); define_cluster_file_io_bindings(m); + define_cluster_collector_bindings(m); + define_cluster_file_sink_bindings(m); + define_fit_bindings(m); + } \ No newline at end of file diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index e0c145b..6e92830 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -39,65 +39,6 @@ template py::array return_vector(std::vector *vec) { free_when_done); // numpy array references this parent } -// template py::array do_read(Reader &r, size_t n_frames) { -// py::array image; -// if (n_frames == 0) -// n_frames = r.total_frames(); - -// std::array shape{static_cast(n_frames), r.rows(), -// r.cols()}; -// const uint8_t item_size = r.bytes_per_pixel(); -// if (item_size == 1) { -// image = py::array_t( -// shape); -// } else if (item_size == 2) { -// image = -// py::array_t( -// shape); -// } else if (item_size == 4) { -// image = -// py::array_t( -// shape); -// } -// r.read_into(reinterpret_cast(image.mutable_data()), n_frames); -// return image; -// } - -// py::array return_frame(pl::Frame *ptr) { -// py::capsule free_when_done(ptr, [](void *f) { -// pl::Frame *foo = reinterpret_cast(f); -// delete foo; -// }); - -// const uint8_t item_size = ptr->bytes_per_pixel(); -// std::vector shape; -// for (auto val : ptr->shape()) -// if (val > 1) -// shape.push_back(val); - -// std::vector strides; -// if (shape.size() == 1) -// strides.push_back(item_size); -// else if (shape.size() == 2) { -// strides.push_back(item_size * shape[1]); -// strides.push_back(item_size); -// } - -// if (item_size == 1) -// return py::array_t( -// shape, strides, -// reinterpret_cast(ptr->data()), free_when_done); -// else if (item_size == 2) -// return py::array_t(shape, strides, -// reinterpret_cast(ptr->data()), -// free_when_done); -// else if (item_size == 4) -// return py::array_t(shape, strides, -// reinterpret_cast(ptr->data()), -// free_when_done); -// return {}; -// } - // todo rewrite generic template auto get_shape_3d(py::array_t arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; @@ -111,6 +52,13 @@ template auto get_shape_2d(py::array_t arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } +template auto get_shape_1d(py::array_t arr) { + return aare::Shape<1>{arr.shape(0)}; +} + template auto make_view_2d(py::array_t arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); +} +template auto make_view_1d(py::array_t arr) { + return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); } \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 855e0e7..2928d26 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -20,6 +20,12 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, throw std::runtime_error("Could not open file for writing: " + fname.string()); } + } else if (mode == "a") { + fp = fopen(fname.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + fname.string()); + } } else { throw std::runtime_error("Unsupported mode: " + mode); } @@ -34,35 +40,35 @@ void ClusterFile::close() { } } -void ClusterFile::write_frame(int32_t frame_number, - const ClusterVector &clusters) { - if (m_mode != "w") { +void ClusterFile::write_frame(const ClusterVector &clusters) { + if (m_mode != "w" && m_mode != "a") { throw std::runtime_error("File not opened for writing"); } if (!(clusters.cluster_size_x() == 3) && !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } + int32_t frame_number = clusters.frame_number(); fwrite(&frame_number, sizeof(frame_number), 1, fp); uint32_t n_clusters = clusters.size(); fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.element_offset(), clusters.size(), fp); - // write clusters - // fwrite(clusters.data(), sizeof(Cluster), clusters.size(), fp); + fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); } -std::vector ClusterFile::read_clusters(size_t n_clusters) { +ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - std::vector clusters(n_clusters); + + ClusterVector clusters(3,3, n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; uint32_t nn = m_num_left; uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - auto buf = reinterpret_cast(clusters.data()); + // auto buf = reinterpret_cast(clusters.data()); + auto buf = clusters.data(); // if there are photons left from previous frame read them first if (nph) { if (nph > n_clusters) { @@ -72,8 +78,8 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { } else { nn = nph; } - nph_read += fread(reinterpret_cast(buf + nph_read), - sizeof(Cluster3x3), nn, fp); + nph_read += fread((buf + nph_read*clusters.item_size()), + clusters.item_size(), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -87,8 +93,8 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { else nn = nph; - nph_read += fread(reinterpret_cast(buf + nph_read), - sizeof(Cluster3x3), nn, fp); + nph_read += fread((buf + nph_read*clusters.item_size()), + clusters.item_size(), nn, fp); m_num_left = nph - nn; } if (nph_read >= n_clusters) @@ -102,7 +108,7 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } -std::vector ClusterFile::read_frame(int32_t &out_fnum) { +ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -110,8 +116,8 @@ std::vector ClusterFile::read_frame(int32_t &out_fnum) { throw std::runtime_error( "There are still photons left in the last frame"); } - - if (fread(&out_fnum, sizeof(out_fnum), 1, fp) != 1) { + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { throw std::runtime_error("Could not read frame number"); } @@ -119,158 +125,163 @@ std::vector ClusterFile::read_frame(int32_t &out_fnum) { if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - std::vector clusters(n_clusters); + // std::vector clusters(n_clusters); + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); - if (fread(clusters.data(), sizeof(Cluster3x3), n_clusters, fp) != + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != static_cast(n_clusters)) { throw std::runtime_error("Could not read clusters"); } + clusters.resize(n_clusters); return clusters; } -std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, - double *noise_map, - int nx, int ny) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - std::vector clusters(n_clusters); - // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, - // uint32_t *n_left, double *noise_map, int - // nx, int ny) { - int iframe = 0; - // uint32_t nph = *n_left; - uint32_t nph = m_num_left; - // uint32_t nn = *n_left; - uint32_t nn = m_num_left; - size_t nph_read = 0; - int32_t t2max, tot1; - int32_t tot3; - // Cluster *ptr = buf; - Cluster3x3 *ptr = clusters.data(); - int good = 1; - double noise; - // read photons left from previous frame - if (noise_map) - printf("Using noise map\n"); +// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, +// double *noise_map, +// int nx, int ny) { +// if (m_mode != "r") { +// throw std::runtime_error("File not opened for reading"); +// } +// std::vector clusters(n_clusters); +// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, +// // uint32_t *n_left, double *noise_map, int +// // nx, int ny) { +// int iframe = 0; +// // uint32_t nph = *n_left; +// uint32_t nph = m_num_left; +// // uint32_t nn = *n_left; +// uint32_t nn = m_num_left; +// size_t nph_read = 0; - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to - // read we read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - for (size_t iph = 0; iph < nn; iph++) { - // read photons 1 by 1 - size_t n_read = - fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - } - // TODO! error handling on read - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, - NULL); - noise = noise_map[ptr->y * nx + ptr->x]; - if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { - ; - } else { - good = 0; - printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, - tot1, t2max, tot3); - } - } else { - printf("Bad pixel number %d %d\n", ptr->x, ptr->y); - good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; - } - } - if (nph_read < n_clusters) { - // // keep on reading frames and photons until reaching - // n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // // printf("%d\n",nph_read); +// int32_t t2max, tot1; +// int32_t tot3; +// // Cluster *ptr = buf; +// Cluster3x3 *ptr = clusters.data(); +// int good = 1; +// double noise; +// // read photons left from previous frame +// if (noise_map) +// printf("Using noise map\n"); - if (fread(&nph, sizeof(nph), 1, fp)) { - // // printf("** %d\n",nph); - m_num_left = nph; - for (size_t iph = 0; iph < nph; iph++) { - // // read photons 1 by 1 - size_t n_read = fread(reinterpret_cast(ptr), - sizeof(Cluster3x3), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - // return nph_read; - } - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && - ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, - NULL, NULL, NULL); - // noise = noise_map[ptr->y * nx + ptr->x]; - noise = noise_map[ptr->y + ny * ptr->x]; - if (tot1 > noise || t2max > 2 * noise || - tot3 > 3 * noise) { - ; - } else - good = 0; - } else { - printf("Bad pixel number %d %d\n", ptr->x, ptr->y); - good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; - } - } - if (nph_read >= n_clusters) - break; - } - } - // printf("%d\n",nph_read); - clusters.resize(nph_read); - return clusters; -} +// if (nph) { +// if (nph > n_clusters) { +// // if we have more photons left in the frame then photons to +// // read we read directly the requested number +// nn = n_clusters; +// } else { +// nn = nph; +// } +// for (size_t iph = 0; iph < nn; iph++) { +// // read photons 1 by 1 +// size_t n_read = +// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); +// if (n_read != 1) { +// clusters.resize(nph_read); +// return clusters; +// } +// // TODO! error handling on read +// good = 1; +// if (noise_map) { +// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { +// tot1 = ptr->data[4]; +// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, +// NULL); +// noise = noise_map[ptr->y * nx + ptr->x]; +// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { +// ; +// } else { +// good = 0; +// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, +// tot1, t2max, tot3); +// } +// } else { +// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); +// good = 0; +// } +// } +// if (good) { +// ptr++; +// nph_read++; +// } +// (m_num_left)--; +// if (nph_read >= n_clusters) +// break; +// } +// } +// if (nph_read < n_clusters) { +// // // keep on reading frames and photons until reaching +// // n_clusters +// while (fread(&iframe, sizeof(iframe), 1, fp)) { +// // // printf("%d\n",nph_read); + +// if (fread(&nph, sizeof(nph), 1, fp)) { +// // // printf("** %d\n",nph); +// m_num_left = nph; +// for (size_t iph = 0; iph < nph; iph++) { +// // // read photons 1 by 1 +// size_t n_read = fread(reinterpret_cast(ptr), +// sizeof(Cluster3x3), 1, fp); +// if (n_read != 1) { +// clusters.resize(nph_read); +// return clusters; +// // return nph_read; +// } +// good = 1; +// if (noise_map) { +// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && +// ptr->y < ny) { +// tot1 = ptr->data[4]; +// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, +// NULL, NULL, NULL); +// // noise = noise_map[ptr->y * nx + ptr->x]; +// noise = noise_map[ptr->y + ny * ptr->x]; +// if (tot1 > noise || t2max > 2 * noise || +// tot3 > 3 * noise) { +// ; +// } else +// good = 0; +// } else { +// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); +// good = 0; +// } +// } +// if (good) { +// ptr++; +// nph_read++; +// } +// (m_num_left)--; +// if (nph_read >= n_clusters) +// break; +// } +// } +// if (nph_read >= n_clusters) +// break; +// } +// } +// // printf("%d\n",nph_read); +// clusters.resize(nph_read); +// return clusters; +// } NDArray calculate_eta2(ClusterVector &clusters) { - NDArray eta2({clusters.size(), 2}); + //TOTO! make work with 2x2 clusters + NDArray eta2({static_cast(clusters.size()), 2}); for (size_t i = 0; i < clusters.size(); i++) { - // int32_t t2; - // auto* ptr = reinterpret_cast (clusters.element_ptr(i) + 2 * - // sizeof(int16_t)); analyze_cluster(clusters.at(i), &t2, - // nullptr, nullptr, &eta2(i,0), &eta2(i,1) , nullptr, nullptr); - auto [x, y] = calculate_eta2(clusters.at(i)); - eta2(i, 0) = x; - eta2(i, 1) = y; + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; } return eta2; } -std::array calculate_eta2(Cluster3x3 &cl) { - std::array eta2{}; +/** + * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct + * containing etay, etax and the corner of the cluster. +*/ +Eta2 calculate_eta2(Cluster3x3 &cl) { + Eta2 eta{}; std::array tot2; tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; @@ -283,39 +294,43 @@ std::array calculate_eta2(Cluster3x3 &cl) { switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[1] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomLeft; break; case cBottomRight: if ((cl.data[2] + cl.data[5]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); if ((cl.data[1] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomRight; break; case cTopLeft: if ((cl.data[7] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopLeft; break; case cTopRight: if ((cl.data[5] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopRight; break; - // default:; + // no default to allow compiler to warn about missing cases } - return eta2; + return eta; } int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 24a482b..8ca3b1e 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -6,12 +6,14 @@ using aare::ClusterVector; +struct Cluster_i2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; + TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { - struct Cluster_i2x2 { - int16_t x; - int16_t y; - int32_t data[4]; - }; + ClusterVector cv(2, 2, 4); REQUIRE(cv.capacity() == 4); @@ -19,7 +21,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { REQUIRE(cv.cluster_size_x() == 2); REQUIRE(cv.cluster_size_y() == 2); // int16_t, int16_t, 2x2 int32_t = 20 bytes - REQUIRE(cv.element_offset() == 20); + REQUIRE(cv.item_size() == 20); //Create a cluster and push back into the vector Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; @@ -30,7 +32,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { //Read the cluster back out using copy. TODO! Can we improve the API? Cluster_i2x2 c2; std::byte *ptr = cv.element_ptr(0); - std::copy(ptr, ptr + cv.element_offset(), reinterpret_cast(&c2)); + std::copy(ptr, ptr + cv.item_size(), reinterpret_cast(&c2)); //Check that the data is the same REQUIRE(c1.x == c2.x); @@ -83,8 +85,8 @@ TEST_CASE("Storing floats"){ float data[8]; }; - ClusterVector cv(2, 4, 2); - REQUIRE(cv.capacity() == 2); + ClusterVector cv(2, 4, 10); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 2); REQUIRE(cv.cluster_size_y() == 4); @@ -92,17 +94,105 @@ TEST_CASE("Storing floats"){ //Create a cluster and push back into the vector Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}}; cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); - REQUIRE(cv.capacity() == 2); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 1); Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}}; cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); - REQUIRE(cv.capacity() == 2); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 2); auto sums = cv.sum(); REQUIRE(sums.size() == 2); REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6)); REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); +} + +TEST_CASE("Push back more than initial capacity"){ + + ClusterVector cv(2, 2, 2); + auto initial_data = cv.data(); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + REQUIRE(cv.size() == 1); + REQUIRE(cv.capacity() == 2); + + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + REQUIRE(cv.size() == 2); + REQUIRE(cv.capacity() == 2); + + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + REQUIRE(cv.size() == 3); + REQUIRE(cv.capacity() == 4); + + Cluster_i2x2* ptr = reinterpret_cast(cv.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + + //We should have allocated a new buffer, since we outgrew the initial capacity + REQUIRE(initial_data != cv.data()); + +} + +TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){ + ClusterVector cv1(2, 2, 12); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + + ClusterVector cv2(2, 2, 2); + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + + cv1 += cv2; + REQUIRE(cv1.size() == 4); + REQUIRE(cv1.capacity() == 12); + + Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + REQUIRE(ptr[3].x == 16); + REQUIRE(ptr[3].y == 17); +} + +TEST_CASE("Concatenate two cluster vectors where we need to allocate"){ + ClusterVector cv1(2, 2, 2); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + + ClusterVector cv2(2, 2, 2); + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + + cv1 += cv2; + REQUIRE(cv1.size() == 4); + REQUIRE(cv1.capacity() == 4); + + Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + REQUIRE(ptr[3].x == 16); + REQUIRE(ptr[3].y == 17); } \ No newline at end of file diff --git a/src/File.cpp b/src/File.cpp index 37e4c57..1180967 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -45,6 +45,8 @@ File& File::operator=(File &&other) noexcept { return *this; } +// void File::close() { file_impl->close(); } + Frame File::read_frame() { return file_impl->read_frame(); } Frame File::read_frame(size_t frame_index) { return file_impl->read_frame(frame_index); diff --git a/src/Fit.cpp b/src/Fit.cpp new file mode 100644 index 0000000..08ecaec --- /dev/null +++ b/src/Fit.cpp @@ -0,0 +1,300 @@ +#include "aare/Fit.hpp" +#include "aare/utils/task.hpp" + +#include +#include + +#include + +namespace aare { + +namespace func { + +double gaus(const double x, const double *par) { + return par[0] * exp(-pow(x - par[1], 2) / (2 * pow(par[2], 2))); +} + +NDArray gaus(NDView x, NDView par) { + NDArray y({x.shape(0)}, 0); + for (size_t i = 0; i < x.size(); i++) { + y(i) = gaus(x(i), par.data()); + } + return y; +} + +double pol1(const double x, const double *par) { return par[0] * x + par[1]; } + +NDArray pol1(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (size_t i = 0; i < x.size(); i++) { + y(i) = pol1(x(i), par.data()); + } + return y; +} + +} // namespace func + +NDArray fit_gaus(NDView x, NDView y) { + NDArray result({3}, 0); + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0, 0}; + auto e = std::max_element(y.begin(), y.end()); + auto idx = std::distance(y.begin(), e); + + start_par[0] = *e; // For amplitude we use the maximum value + start_par[1] = + x[idx]; // For the mean we use the x value of the maximum value + + // For sigma we estimate the fwhm and divide by 2.35 + // assuming equally spaced x values + auto delta = x[1] - x[0]; + start_par[2] = + std::count_if(y.begin(), y.end(), + [e, delta](double val) { return val > *e / 2; }) * + delta / 2.35; + + lmfit::result_t res(start_par); + lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), + aare::func::gaus, &control, &res.status); + + result(0) = res.par[0]; + result(1) = res.par[1]; + result(2) = res.par[2]; + + return result; +} + +NDArray fit_gaus(NDView x, NDView y, + int n_threads) { + NDArray result({y.shape(0), y.shape(1), 3}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_gaus(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + } + } + }; + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + + return result; +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out) { + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 3 || par_err_out.size() != 3) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 3"); + } + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0, 0}; + std::vector start_par_err{0, 0, 0}; + std::vector start_cov{0, 0, 0, 0, 0, 0, 0, 0, 0}; + + auto e = std::max_element(y.begin(), y.end()); + auto idx = std::distance(y.begin(), e); + start_par[0] = *e; // For amplitude we use the maximum value + start_par[1] = + x[idx]; // For the mean we use the x value of the maximum value + + // For sigma we estimate the fwhm and divide by 2.35 + // assuming equally spaced x values + auto delta = x[1] - x[0]; + start_par[2] = + std::count_if(y.begin(), y.end(), + [e, delta](double val) { return val > *e / 2; }) * + delta / 2.35; + + lmfit::result_t res(start_par); + lmfit::result_t res_err(start_par_err); + lmfit::result_t cov(start_cov); + + // TODO can we make lmcurve write the result directly where is should be? + lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, + &control, &res.status); + + par_out(0) = res.par[0]; + par_out(1) = res.par[1]; + par_out(2) = res.par[2]; + par_err_out(0) = res_err.par[0]; + par_err_out(1) = res_err.par[1]; + par_err_out(2) = res_err.par[2]; +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out) { + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 2 || par_err_out.size() != 2) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 2"); + } + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0}; + std::vector start_par_err{0, 0}; + std::vector start_cov{0, 0, 0, 0}; + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = + (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value + start_par[1] = + *y1 - ((*y2 - *y1) / (x2 - x1)) * + x1; // For the mean we use the x value of the maximum value + + lmfit::result_t res(start_par); + lmfit::result_t res_err(start_par_err); + lmfit::result_t cov(start_cov); + + lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, + &control, &res.status); + + par_out(0) = res.par[0]; + par_out(1) = res.par[1]; + par_err_out(0) = res_err.par[0]; + par_err_out(1) = res_err.par[1]; +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } +} + +NDArray fit_pol1(NDView x, NDView y) { + // // Check that we have the correct sizes + // if (y.size() != x.size() || y.size() != y_err.size() || + // par_out.size() != 2 || par_err_out.size() != 2) { + // throw std::runtime_error("Data, x, data_err must have the same size " + // "and par_out, par_err_out must have size 2"); + // } + NDArray par({2}, 0); + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0}; + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = (*y2 - *y1) / (x2 - x1); + start_par[1] = *y1 - ((*y2 - *y1) / (x2 - x1)) * x1; + + lmfit::result_t res(start_par); + + lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), + aare::func::pol1, &control, &res.status); + + par(0) = res.par[0]; + par(1) = res.par[1]; + return par; +} + +NDArray fit_pol1(NDView x, NDView y, + int n_threads) { + NDArray result({y.shape(0), y.shape(1), 2}, 0); + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_pol1(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + return result; +} + +} // namespace aare \ No newline at end of file diff --git a/src/RawFile.cpp b/src/RawFile.cpp index 744064f..e704add 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -1,6 +1,7 @@ #include "aare/RawFile.hpp" #include "aare/PixelMap.hpp" #include "aare/defs.hpp" +#include "aare/geo_helpers.hpp" #include #include @@ -21,8 +22,11 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) find_geometry(); - update_geometry_with_roi(); + if (m_master.roi()){ + m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value()); + } + open_subfiles(); } else { throw std::runtime_error(LOCATION + @@ -72,9 +76,13 @@ size_t RawFile::n_mod() const { return n_subfile_parts; } size_t RawFile::bytes_per_frame() { - return m_rows * m_cols * m_master.bitdepth() / 8; + // return m_rows * m_cols * m_master.bitdepth() / 8; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8; +} +size_t RawFile::pixels_per_frame() { + // return m_rows * m_cols; + return m_geometry.pixels_x * m_geometry.pixels_y; } -size_t RawFile::pixels_per_frame() { return m_rows * m_cols; } DetectorType RawFile::detector_type() const { return m_master.detector_type(); } @@ -92,8 +100,8 @@ void RawFile::seek(size_t frame_index) { size_t RawFile::tell() { return m_current_frame; }; size_t RawFile::total_frames() const { return m_master.frames_in_file(); } -size_t RawFile::rows() const { return m_rows; } -size_t RawFile::cols() const { return m_cols; } +size_t RawFile::rows() const { return m_geometry.pixels_y; } +size_t RawFile::cols() const { return m_geometry.pixels_x; } size_t RawFile::bitdepth() const { return m_master.bitdepth(); } xy RawFile::geometry() { return m_master.geometry(); } @@ -102,11 +110,11 @@ void RawFile::open_subfiles() { for (size_t i = 0; i != n_subfiles; ++i) { auto v = std::vector(n_subfile_parts); for (size_t j = 0; j != n_subfile_parts; ++j) { - auto pos = m_module_pixel_0[j]; + auto pos = m_geometry.module_pixel_0[j]; v[j] = new RawSubFile(m_master.data_fname(j, i), m_master.detector_type(), pos.height, pos.width, m_master.bitdepth(), - positions[j].row, positions[j].col); + pos.row_index, pos.col_index); } subfiles.push_back(v); @@ -149,112 +157,49 @@ int RawFile::find_number_of_subfiles() { RawMasterFile RawFile::master() const { return m_master; } +/** + * @brief Find the geometry of the detector by opening all the subfiles and + * reading the headers. + */ void RawFile::find_geometry() { + + //Hold the maximal row and column number found + //Later used for calculating the total number of rows and columns uint16_t r{}; uint16_t c{}; for (size_t i = 0; i < n_subfile_parts; i++) { - auto h = this->read_header(m_master.data_fname(i, 0)); + auto h = read_header(m_master.data_fname(i, 0)); r = std::max(r, h.row); c = std::max(c, h.column); - positions.push_back({h.row, h.column}); + // positions.push_back({h.row, h.column}); + ModuleGeometry g; - g.x = h.column * m_master.pixels_x(); - g.y = h.row * m_master.pixels_y(); + g.origin_x = h.column * m_master.pixels_x(); + g.origin_y = h.row * m_master.pixels_y(); + g.row_index = h.row; + g.col_index = h.column; g.width = m_master.pixels_x(); g.height = m_master.pixels_y(); - m_module_pixel_0.push_back(g); + m_geometry.module_pixel_0.push_back(g); } r++; c++; - m_rows = (r * m_master.pixels_y()); - m_cols = (c * m_master.pixels_x()); - - m_rows += static_cast((r - 1) * cfg.module_gap_row); - -#ifdef AARE_VERBOSE - fmt::print("\nRawFile::find_geometry()\n"); - for (size_t i = 0; i < m_module_pixel_0.size(); i++) { - fmt::print("Module {} at position: (r:{},c:{})\n", i, - m_module_pixel_0[i].y, m_module_pixel_0[i].x); - } - fmt::print("Image size: {}x{}\n\n", m_rows, m_cols); -#endif -} - -void RawFile::update_geometry_with_roi() { - // TODO! implement this - if (m_master.roi()) { - auto roi = m_master.roi().value(); - - // TODO! can we do this cleaner? - int pos_y = 0; - int pos_y_increment = 0; - for (size_t row = 0; row < m_master.geometry().row; row++) { - int pos_x = 0; - for (size_t col = 0; col < m_master.geometry().col; col++) { - auto &m = m_module_pixel_0[row * m_master.geometry().col + col]; - auto original_height = m.height; - auto original_width = m.width; - - // module is to the left of the roi - if (m.x + m.width < roi.xmin) { - m.width = 0; - - // roi is in module - } else { - // here we only arrive when the roi is in or to the left of - // the module - if (roi.xmin > m.x) { - m.width -= roi.xmin - m.x; - } - if (roi.xmax < m.x + m.width) { - m.width -= m.x + original_width - roi.xmax; - } - m.x = pos_x; - pos_x += m.width; - } - - if (m.y + m.height < roi.ymin) { - m.height = 0; - } else { - if ((roi.ymin > m.y) && (roi.ymin < m.y + m.height)) { - m.height -= roi.ymin - m.y; - - } - if (roi.ymax < m.y + m.height) { - m.height -= m.y + original_height - roi.ymax; - } - m.y = pos_y; - pos_y_increment = m.height; - } - } - // increment pos_y - pos_y += pos_y_increment; - } - - m_rows = roi.height(); - m_cols = roi.width(); - } - -#ifdef AARE_VERBOSE - fmt::print("RawFile::update_geometry_with_roi()\n"); - for (const auto &m : m_module_pixel_0) { - fmt::print("Module at position: (r:{}, c:{}, h:{}, w:{})\n", m.y, m.x, - m.height, m.width); - } - fmt::print("Updated image size: {}x{}\n\n", m_rows, m_cols); - fmt::print("\n"); -#endif + m_geometry.pixels_y = (r * m_master.pixels_y()); + m_geometry.pixels_x = (c * m_master.pixels_x()); + m_geometry.modules_x = c; + m_geometry.modules_y = r; + m_geometry.pixels_y += static_cast((r - 1) * cfg.module_gap_row); } + Frame RawFile::get_frame(size_t frame_index) { - auto f = Frame(m_rows, m_cols, Dtype::from_bitdepth(m_master.bitdepth())); + auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x, Dtype::from_bitdepth(m_master.bitdepth())); std::byte *frame_buffer = f.data(); get_frame_into(frame_index, frame_buffer); return f; @@ -278,6 +223,10 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect if (n_subfile_parts != 1) { for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { auto subfile_id = frame_index / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } frame_numbers[part_idx] = subfiles[subfile_id][part_idx]->frame_number( frame_index % m_master.max_frames_per_file()); @@ -311,12 +260,16 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { auto corrected_idx = frame_indices[part_idx]; auto subfile_id = corrected_idx / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } // This is where we start writing - auto offset = (m_module_pixel_0[part_idx].y * m_cols + - m_module_pixel_0[part_idx].x)*m_master.bitdepth()/8; + auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x + + m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8; - if (m_module_pixel_0[part_idx].x!=0) + if (m_geometry.module_pixel_0[part_idx].origin_x!=0) throw std::runtime_error(LOCATION + "Implementation error. x pos not 0."); //TODO! Risk for out of range access @@ -340,9 +293,13 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect // level for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { - auto pos = m_module_pixel_0[part_idx]; + auto pos = m_geometry.module_pixel_0[part_idx]; auto corrected_idx = frame_indices[part_idx]; auto subfile_id = corrected_idx / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); subfiles[subfile_id][part_idx]->read_into(part_buffer, header); @@ -352,9 +309,9 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect for (size_t cur_row = 0; cur_row < static_cast(pos.height); cur_row++) { - auto irow = (pos.y + cur_row); - auto icol = pos.x; - auto dest = (irow * this->m_cols + icol); + auto irow = (pos.origin_y + cur_row); + auto icol = pos.origin_x; + auto dest = (irow * this->m_geometry.pixels_x + icol); dest = dest * m_master.bitdepth() / 8; memcpy(frame_buffer + dest, part_buffer + cur_row * pos.width * @@ -400,4 +357,8 @@ RawFile::~RawFile() { } } + + + + } // namespace aare \ No newline at end of file diff --git a/src/RawFile.test.cpp b/src/RawFile.test.cpp index faefd28..5f9b2e1 100644 --- a/src/RawFile.test.cpp +++ b/src/RawFile.test.cpp @@ -1,10 +1,13 @@ #include "aare/File.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI +#include "aare/RawFile.hpp" #include #include #include "test_config.hpp" + using aare::File; TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") { @@ -148,3 +151,5 @@ TEST_CASE("Read file with unordered frames", "[.integration]") { File f(fpath); REQUIRE_THROWS((f.read_frame())); } + + diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index 6fae7ce..a3bb79c 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -9,11 +9,13 @@ namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, DetectorType detector, size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row, uint32_t pos_col) - : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), m_rows(rows), m_cols(cols), - m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) { + : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), + m_rows(rows), m_cols(cols), + m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), + m_pos_col(pos_col) { if (m_detector_type == DetectorType::Moench03_old) { m_pixel_map = GenerateMoench03PixelMap(); - }else if(m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0){ + } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) { m_pixel_map = GenerateEigerFlipRowsPixelMap(); } @@ -42,7 +44,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, void RawSubFile::seek(size_t frame_index) { if (frame_index >= n_frames) { - throw std::runtime_error("Frame number out of range"); + throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames)); } m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); } @@ -51,37 +53,48 @@ size_t RawSubFile::tell() { return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame()); } - void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { - if(header){ - m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); + if (header) { + m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); } else { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } - //TODO! expand support for different bitdepths - if(m_pixel_map){ + // TODO! expand support for different bitdepths + if (m_pixel_map) { // read into a temporary buffer and then copy the data to the buffer // in the correct order - // currently this only supports 16 bit data! - auto part_buffer = new std::byte[bytes_per_frame()]; - m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); - auto *data = reinterpret_cast(image_buf); - auto *part_data = reinterpret_cast(part_buffer); - for (size_t i = 0; i < pixels_per_frame(); i++) { - data[i] = part_data[(*m_pixel_map)(i)]; + // TODO! add 4 bit support + if(m_bitdepth == 8){ + read_with_map(image_buf); + }else if (m_bitdepth == 16) { + read_with_map(image_buf); + } else if (m_bitdepth == 32) { + read_with_map(image_buf); + }else{ + throw std::runtime_error("Unsupported bitdepth for read with pixel map"); } - delete[] part_buffer; + } else { // read directly into the buffer m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } } +template +void RawSubFile::read_with_map(std::byte *image_buf) { + auto part_buffer = new std::byte[bytes_per_frame()]; + m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); + auto *data = reinterpret_cast(image_buf); + auto *part_data = reinterpret_cast(part_buffer); + for (size_t i = 0; i < pixels_per_frame(); i++) { + data[i] = part_data[(*m_pixel_map)(i)]; + } + delete[] part_buffer; +} size_t RawSubFile::rows() const { return m_rows; } size_t RawSubFile::cols() const { return m_cols; } - void RawSubFile::get_part(std::byte *buffer, size_t frame_index) { seek(frame_index); read_into(buffer, nullptr); @@ -94,5 +107,4 @@ size_t RawSubFile::frame_number(size_t frame_index) { return h.frameNumber; } - } // namespace aare \ No newline at end of file diff --git a/src/decode.cpp b/src/decode.cpp new file mode 100644 index 0000000..17c033d --- /dev/null +++ b/src/decode.cpp @@ -0,0 +1,61 @@ +#include "aare/decode.hpp" + +namespace aare { + +uint16_t adc_sar_05_decode64to16(uint64_t input){ + + //we want bits 29,19,28,18,31,21,27,20,24,23,25,22 and then pad to 16 + uint16_t output = 0; + output |= ((input >> 22) & 1) << 11; + output |= ((input >> 25) & 1) << 10; + output |= ((input >> 23) & 1) << 9; + output |= ((input >> 24) & 1) << 8; + output |= ((input >> 20) & 1) << 7; + output |= ((input >> 27) & 1) << 6; + output |= ((input >> 21) & 1) << 5; + output |= ((input >> 31) & 1) << 4; + output |= ((input >> 18) & 1) << 3; + output |= ((input >> 28) & 1) << 2; + output |= ((input >> 19) & 1) << 1; + output |= ((input >> 29) & 1) << 0; + return output; +} + +void adc_sar_05_decode64to16(NDView input, NDView output){ + for(int64_t i = 0; i < input.shape(0); i++){ + for(int64_t j = 0; j < input.shape(1); j++){ + output(i,j) = adc_sar_05_decode64to16(input(i,j)); + } + } +} + +uint16_t adc_sar_04_decode64to16(uint64_t input){ + + // bit_map = array([15,17,19,21,23,4,6,8,10,12,14,16] LSB->MSB + uint16_t output = 0; + output |= ((input >> 16) & 1) << 11; + output |= ((input >> 14) & 1) << 10; + output |= ((input >> 12) & 1) << 9; + output |= ((input >> 10) & 1) << 8; + output |= ((input >> 8) & 1) << 7; + output |= ((input >> 6) & 1) << 6; + output |= ((input >> 4) & 1) << 5; + output |= ((input >> 23) & 1) << 4; + output |= ((input >> 21) & 1) << 3; + output |= ((input >> 19) & 1) << 2; + output |= ((input >> 17) & 1) << 1; + output |= ((input >> 15) & 1) << 0; + return output; +} + +void adc_sar_04_decode64to16(NDView input, NDView output){ + for(int64_t i = 0; i < input.shape(0); i++){ + for(int64_t j = 0; j < input.shape(1); j++){ + output(i,j) = adc_sar_04_decode64to16(input(i,j)); + } + } +} + + + +} // namespace aare diff --git a/src/geo_helpers.cpp b/src/geo_helpers.cpp new file mode 100644 index 0000000..39086ec --- /dev/null +++ b/src/geo_helpers.cpp @@ -0,0 +1,71 @@ + +#include "aare/geo_helpers.hpp" +#include "fmt/core.h" + +namespace aare{ + +DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { + #ifdef AARE_VERBOSE + fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}", + roi.xmin, roi.xmax, roi.ymin, roi.ymax); + fmt::println("Geometry: {} {} {} {} {} {}", + geo.modules_x, geo.modules_y, geo.pixels_x, geo.pixels_y, geo.module_gap_row, geo.module_gap_col); + #endif + int pos_y = 0; + int pos_y_increment = 0; + for (int row = 0; row < geo.modules_y; row++) { + int pos_x = 0; + for (int col = 0; col < geo.modules_x; col++) { + auto &m = geo.module_pixel_0[row * geo.modules_x + col]; + auto original_height = m.height; + auto original_width = m.width; + + // module is to the left of the roi + if (m.origin_x + m.width < roi.xmin) { + m.width = 0; + + // roi is in module + } else { + // here we only arrive when the roi is in or to the left of + // the module + if (roi.xmin > m.origin_x) { + m.width -= roi.xmin - m.origin_x; + } + if (roi.xmax < m.origin_x + original_width) { + m.width -= m.origin_x + original_width - roi.xmax; + } + m.origin_x = pos_x; + pos_x += m.width; + } + + if (m.origin_y + m.height < roi.ymin) { + m.height = 0; + } else { + if ((roi.ymin > m.origin_y) && (roi.ymin < m.origin_y + m.height)) { + m.height -= roi.ymin - m.origin_y; + + } + if (roi.ymax < m.origin_y + original_height) { + m.height -= m.origin_y + original_height - roi.ymax; + } + m.origin_y = pos_y; + pos_y_increment = m.height; + } + #ifdef AARE_VERBOSE + fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width, m.height); + #endif + } + // increment pos_y + pos_y += pos_y_increment; + } + + // m_rows = roi.height(); + // m_cols = roi.width(); + geo.pixels_x = roi.width(); + geo.pixels_y = roi.height(); + + return geo; + +} + +} // namespace aare \ No newline at end of file diff --git a/src/geo_helpers.test.cpp b/src/geo_helpers.test.cpp new file mode 100644 index 0000000..08ee96c --- /dev/null +++ b/src/geo_helpers.test.cpp @@ -0,0 +1,230 @@ +#include "aare/File.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI +#include "aare/RawFile.hpp" + +#include +#include + +#include "aare/geo_helpers.hpp" +#include "test_config.hpp" + +TEST_CASE("Simple ROIs on one module"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + + + geo.pixels_x = 1024; + geo.pixels_y = 512; + geo.modules_x = 1; + geo.modules_y = 1; + geo.module_pixel_0.push_back(mod); + + SECTION("ROI is the whole module"){ + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 1024; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1024); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 512); + REQUIRE(updated_geo.module_pixel_0[0].width == 1024); + } + SECTION("ROI is the top left corner of the module"){ + aare::ROI roi; + roi.xmin = 100; + roi.xmax = 200; + roi.ymin = 150; + roi.ymax = 200; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 100); + REQUIRE(updated_geo.pixels_y == 50); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 50); + REQUIRE(updated_geo.module_pixel_0[0].width == 100); + } + + SECTION("ROI is a small square"){ + aare::ROI roi; + roi.xmin = 1000; + roi.xmax = 1010; + roi.ymin = 500; + roi.ymax = 510; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 10); + REQUIRE(updated_geo.pixels_y == 10); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 10); + REQUIRE(updated_geo.module_pixel_0[0].width == 10); + } + SECTION("ROI is a few columns"){ + aare::ROI roi; + roi.xmin = 750; + roi.xmax = 800; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 50); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 512); + REQUIRE(updated_geo.module_pixel_0[0].width == 50); + } +} + + + +TEST_CASE("Two modules side by side"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 2048; + geo.pixels_y = 512; + geo.modules_x = 2; + geo.modules_y = 1; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + + SECTION("ROI is the whole image"){ + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 2048; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 2048); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 1); + } + SECTION("rectangle on both modules"){ + aare::ROI roi; + roi.xmin = 800; + roi.xmax = 1300; + roi.ymin = 200; + roi.ymax = 499; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 500); + REQUIRE(updated_geo.pixels_y == 299); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 299); + REQUIRE(updated_geo.module_pixel_0[0].width == 224); + REQUIRE(updated_geo.module_pixel_0[1].height == 299); + REQUIRE(updated_geo.module_pixel_0[1].width == 276); + } +} + +TEST_CASE("Three modules side by side"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + aare::ROI roi; + roi.xmin = 700; + roi.xmax = 2500; + roi.ymin = 0; + roi.ymax = 123; + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 3072; + geo.pixels_y = 512; + geo.modules_x = 3; + geo.modules_y = 1; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 2048; + geo.module_pixel_0.push_back(mod); + + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1800); + REQUIRE(updated_geo.pixels_y == 123); + REQUIRE(updated_geo.modules_x == 3); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 123); + REQUIRE(updated_geo.module_pixel_0[0].width == 324); + REQUIRE(updated_geo.module_pixel_0[1].height == 123); + REQUIRE(updated_geo.module_pixel_0[1].width == 1024); + REQUIRE(updated_geo.module_pixel_0[2].height == 123); + REQUIRE(updated_geo.module_pixel_0[2].width == 452); +} + +TEST_CASE("Four modules as a square"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + aare::ROI roi; + roi.xmin = 500; + roi.xmax = 2000; + roi.ymin = 500; + roi.ymax = 600; + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 2048; + geo.pixels_y = 1024; + geo.modules_x = 2; + geo.modules_y = 2; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 0; + mod.origin_y = 512; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1500); + REQUIRE(updated_geo.pixels_y == 100); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 2); + REQUIRE(updated_geo.module_pixel_0[0].height == 12); + REQUIRE(updated_geo.module_pixel_0[0].width == 524); + REQUIRE(updated_geo.module_pixel_0[1].height == 12); + REQUIRE(updated_geo.module_pixel_0[1].width == 976); + REQUIRE(updated_geo.module_pixel_0[2].height == 88); + REQUIRE(updated_geo.module_pixel_0[2].width == 524); + REQUIRE(updated_geo.module_pixel_0[3].height == 88); + REQUIRE(updated_geo.module_pixel_0[3].width == 976); +} \ No newline at end of file diff --git a/src/utils/task.cpp b/src/utils/task.cpp new file mode 100644 index 0000000..af6756e --- /dev/null +++ b/src/utils/task.cpp @@ -0,0 +1,30 @@ +#include "aare/utils/task.hpp" + +namespace aare { + +std::vector> split_task(int first, int last, + int n_threads) { + std::vector> vec; + vec.reserve(n_threads); + + int n_frames = last - first; + + if (n_threads >= n_frames) { + for (int i = 0; i != n_frames; ++i) { + vec.push_back({i, i + 1}); + } + return vec; + } + + int step = (n_frames) / n_threads; + for (int i = 0; i != n_threads; ++i) { + int start = step * i; + int stop = step * (i + 1); + if (i == n_threads - 1) + stop = last; + vec.push_back({start, stop}); + } + return vec; +} + +} // namespace aare \ No newline at end of file diff --git a/src/utils/task.test.cpp b/src/utils/task.test.cpp new file mode 100644 index 0000000..e19994a --- /dev/null +++ b/src/utils/task.test.cpp @@ -0,0 +1,32 @@ +#include "aare/utils/task.hpp" + +#include +#include + + +TEST_CASE("Split a range into multiple tasks"){ + + auto tasks = aare::split_task(0, 10, 3); + REQUIRE(tasks.size() == 3); + REQUIRE(tasks[0].first == 0); + REQUIRE(tasks[0].second == 3); + REQUIRE(tasks[1].first == 3); + REQUIRE(tasks[1].second == 6); + REQUIRE(tasks[2].first == 6); + REQUIRE(tasks[2].second == 10); + + tasks = aare::split_task(0, 10, 1); + REQUIRE(tasks.size() == 1); + REQUIRE(tasks[0].first == 0); + REQUIRE(tasks[0].second == 10); + + tasks = aare::split_task(0, 10, 10); + REQUIRE(tasks.size() == 10); + for (int i = 0; i < 10; i++){ + REQUIRE(tasks[i].first == i); + REQUIRE(tasks[i].second == i+1); + } + + + +} \ No newline at end of file From 8ff6f9f506efecbe9e8274bdf673f2b850aed1ac Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 18 Feb 2025 15:49:46 +0100 Subject: [PATCH 034/120] fixed linking to lmfit --- CMakeLists.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 62a3878..624259a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -98,8 +98,6 @@ if(AARE_FETCH_LMFIT) FetchContent_MakeAvailable(lmfit) set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) - target_include_directories (lmfit PUBLIC "${libzmq_SOURCE_DIR}/lib") - message(STATUS "lmfit include dir: ${lmfit_SOURCE_DIR}/lib") else() find_package(lmfit REQUIRED) endif() @@ -370,7 +368,7 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags - lmfit + "$" ) set_target_properties(aare_core PROPERTIES From 6a8398848519c307200ad01bb7c4e96b805196f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 18 Feb 2025 21:13:27 +0100 Subject: [PATCH 035/120] Added chi2 to fit results (#131) - fit_gaus and fit_pol1 now return a dict - calculate chi2 after fit - cleaned up code --- CMakeLists.txt | 2 +- include/aare/Fit.hpp | 39 +++-- include/aare/NDArray.hpp | 29 ++++ include/aare/NDView.hpp | 11 +- include/aare/utils/par.hpp | 18 +++ python/CMakeLists.txt | 3 +- python/examples/play.py | 62 ++------ python/src/fit.hpp | 70 +++++---- src/Fit.cpp | 284 ++++++++++++++++--------------------- src/NDArray.test.cpp | 28 ++++ 10 files changed, 291 insertions(+), 255 deletions(-) create mode 100644 include/aare/utils/par.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 624259a..a5a576a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,7 +85,7 @@ if(AARE_FETCH_LMFIT) GIT_TAG main PATCH_COMMAND ${lmfit_patch} UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL + EXCLUDE_FROM_ALL 1 ) #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp index 20ef4ef..6400edd 100644 --- a/include/aare/Fit.hpp +++ b/include/aare/Fit.hpp @@ -17,6 +17,13 @@ NDArray pol1(NDView x, NDView par); } // namespace func +/** + * @brief Estimate the initial parameters for a Gaussian fit + */ +std::array gaus_init_par(const NDView x, const NDView y); + +std::array pol1_init_par(const NDView x, const NDView y); + static constexpr int DEFAULT_NUM_THREADS = 4; /** @@ -26,14 +33,15 @@ static constexpr int DEFAULT_NUM_THREADS = 4; */ NDArray fit_gaus(NDView x, NDView y); - /** * @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values] * @param x x values * @param y y vales, layout [row, col, values] * @param n_threads number of threads to use */ -NDArray fit_gaus(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); +NDArray fit_gaus(NDView x, NDView y, + int n_threads = DEFAULT_NUM_THREADS); + /** @@ -45,10 +53,12 @@ NDArray fit_gaus(NDView x, NDView y, int n_thre * @param par_err_out output error parameters */ void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out); + NDView par_out, NDView par_err_out, + double& chi2); /** - * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout [row, col, values] + * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout + * [row, col, values] * @param x x values * @param y y vales, layout [row, col, values] * @param y_err error in y, layout [row, col, values] @@ -57,20 +67,21 @@ void fit_gaus(NDView x, NDView y, NDView y_err, * @param n_threads number of threads to use */ void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); - + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads = DEFAULT_NUM_THREADS + ); NDArray fit_pol1(NDView x, NDView y); -NDArray fit_pol1(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); +NDArray fit_pol1(NDView x, NDView y, + int n_threads = DEFAULT_NUM_THREADS); -void fit_pol1(NDView x, NDView y, - NDView y_err, NDView par_out, - NDView par_err_out); +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); -//TODO! not sure we need to offer the different version in C++ -void fit_pol1(NDView x, NDView y, - NDView y_err, NDView par_out, - NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); +// TODO! not sure we need to offer the different version in C++ +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out,NDView chi2_out, + int n_threads = DEFAULT_NUM_THREADS); } // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 15beb02..cfa5b5c 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -69,6 +69,11 @@ class NDArray : public ArrayExpr, Ndim> { std::copy(v.begin(), v.end(), begin()); } + template + NDArray(const std::array& arr) : NDArray({Size}) { + std::copy(arr.begin(), arr.end(), begin()); + } + // Move constructor NDArray(NDArray &&other) noexcept : shape_(other.shape_), strides_(c_strides(shape_)), @@ -105,6 +110,20 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator-=(const NDArray &other); NDArray &operator*=(const NDArray &other); + //Write directly to the data array, or create a new one + template + NDArray& operator=(const std::array &other){ + if(Size != size_){ + delete[] data_; + size_ = Size; + data_ = new T[size_]; + } + for (size_t i = 0; i < Size; ++i) { + data_[i] = other[i]; + } + return *this; + } + // NDArray& operator/=(const NDArray& other); template NDArray &operator/=(const NDArray &other) { @@ -135,6 +154,11 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator&=(const T & /*mask*/); + + + + + void sqrt() { for (int i = 0; i < size_; ++i) { data_[i] = std::sqrt(data_[i]); @@ -318,6 +342,9 @@ NDArray &NDArray::operator+=(const T &value) { return *this; } + + + template NDArray NDArray::operator+(const T &value) { NDArray result = *this; @@ -418,4 +445,6 @@ NDArray load(const std::string &pathname, return img; } + + } // namespace aare \ No newline at end of file diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index e3a6d30..f53f758 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -1,5 +1,5 @@ #pragma once - +#include "aare/defs.hpp" #include "aare/ArrayExpr.hpp" #include @@ -99,6 +99,15 @@ template class NDView : public ArrayExpr()); } + + template + NDView& operator=(const std::array &arr) { + if(size() != arr.size()) + throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); + std::copy(arr.begin(), arr.end(), begin()); + return *this; + } + NDView &operator=(const T val) { for (auto it = begin(); it != end(); ++it) *it = val; diff --git a/include/aare/utils/par.hpp b/include/aare/utils/par.hpp new file mode 100644 index 0000000..efb1c77 --- /dev/null +++ b/include/aare/utils/par.hpp @@ -0,0 +1,18 @@ +#include +#include +#include + +namespace aare { + + template + void RunInParallel(F func, const std::vector>& tasks) { + // auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(func, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + } +} // namespace aare \ No newline at end of file diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 2aaa222..4cbd69d 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -49,11 +49,10 @@ set(PYTHON_EXAMPLES examples/fits.py ) - - # Copy the python examples to the build directory foreach(FILE ${PYTHON_EXAMPLES}) configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) + message(STATUS "Copying ${FILE} to ${CMAKE_BINARY_DIR}/${FILE}") endforeach(FILE ${PYTHON_EXAMPLES}) diff --git a/python/examples/play.py b/python/examples/play.py index f1a869b..82aaa7d 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -8,61 +8,15 @@ import numpy as np import boost_histogram as bh import time -<<<<<<< HEAD -from aare import File, ClusterFinder, VarClusterFinder, ClusterFile, CtbRawFile -from aare import gaus, fit_gaus +import aare -base = Path('/mnt/sls_det_storage/moench_data/Julian/MOENCH05/20250113_first_xrays_redo/raw_files/') -cluster_file = Path('/home/l_msdetect/erik/tmp/Cu.clust') +data = np.random.normal(10, 1, 1000) -t0 = time.perf_counter() -offset= -0.5 -hist3d = bh.Histogram( - bh.axis.Regular(160, 0+offset, 160+offset), #x - bh.axis.Regular(150, 0+offset, 150+offset), #y - bh.axis.Regular(200, 0, 6000), #ADU -) +hist = bh.Histogram(bh.axis.Regular(10, 0, 20)) +hist.fill(data) -total_clusters = 0 -with ClusterFile(cluster_file, chunk_size = 1000) as f: - for i, clusters in enumerate(f): - arr = np.array(clusters) - total_clusters += clusters.size - hist3d.fill(arr['y'],arr['x'], clusters.sum_2x2()) #python talks [row, col] cluster finder [x,y] -======= -from aare import RawFile - -f = RawFile('/mnt/sls_det_storage/jungfrau_data1/vadym_tests/jf12_M431/laser_scan/laserScan_pedestal_G0_master_0.json') - -print(f'{f.frame_number(1)}') - -for i in range(10): - header, img = f.read_frame() - print(header['frameNumber'], img.shape) ->>>>>>> developer - - -t_elapsed = time.perf_counter()-t0 -print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') - -histogram_data = hist3d.counts() -x = hist3d.axes[2].edges[:-1] - -y = histogram_data[100,100,:] -xx = np.linspace(x[0], x[-1]) -# fig, ax = plt.subplots() -# ax.step(x, y, where = 'post') - -y_err = np.sqrt(y) -y_err = np.zeros(y.size) -y_err += 1 - -# par = fit_gaus2(y,x, y_err) -# ax.plot(xx, gaus(xx,par)) -# print(par) - -res = fit_gaus(y,x) -res2 = fit_gaus(y,x, y_err) -print(res) -print(res2) +x = hist.axes[0].centers +y = hist.values() +y_err = np.sqrt(y)+1 +res = aare.fit_gaus(x, y, y_err, chi2 = True) \ No newline at end of file diff --git a/python/src/fit.hpp b/python/src/fit.hpp index 60cdecc..2506e9b 100644 --- a/python/src/fit.hpp +++ b/python/src/fit.hpp @@ -7,6 +7,7 @@ #include "aare/Fit.hpp" namespace py = pybind11; +using namespace pybind11::literals; void define_fit_bindings(py::module &m) { @@ -29,7 +30,8 @@ void define_fit_bindings(py::module &m) { The points at which to evaluate the Gaussian function. par : array_like The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. - )", py::arg("x"), py::arg("par")); + )", + py::arg("x"), py::arg("par")); m.def( "pol1", @@ -49,7 +51,8 @@ void define_fit_bindings(py::module &m) { The points at which to evaluate the polynomial function. par : array_like The parameters of the polynomial function. The first element is the intercept, and the second element is the slope. - )", py::arg("x"), py::arg("par")); + )", + py::arg("x"), py::arg("par")); m.def( "fit_gaus", @@ -72,7 +75,7 @@ void define_fit_bindings(py::module &m) { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( Fit a 1D Gaussian to data. Parameters @@ -90,8 +93,8 @@ n_threads : int, optional "fit_gaus", [](py::array_t x, py::array_t y, - py::array_t - y_err, int n_threads) { + py::array_t y_err, + int n_threads) { if (y.ndim() == 3) { // Allocate memory for the output // Need to have pointers to allow python to manage @@ -99,15 +102,20 @@ n_threads : int, optional auto par = new NDArray({y.shape(0), y.shape(1), 3}); auto par_err = new NDArray({y.shape(0), y.shape(1), 3}); + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + // Make views of the numpy arrays auto y_view = make_view_3d(y); auto y_view_err = make_view_3d(y_err); auto x_view = make_view_1d(x); + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), - par_err->view(), n_threads); - // return return_image_data(par); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2->view(), n_threads); + + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 3); } else if (y.ndim() == 1) { // Allocate memory for the output // Need to have pointers to allow python to manage @@ -120,15 +128,20 @@ n_threads : int, optional auto y_view_err = make_view_1d(y_err); auto x_view = make_view_1d(x); + + double chi2 = 0; aare::fit_gaus(x_view, y_view, y_view_err, par->view(), - par_err->view()); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2); + + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 3); + } else { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( Fit a 1D Gaussian to data with error estimates. Parameters @@ -172,11 +185,10 @@ n_threads : int, optional "fit_pol1", [](py::array_t x, py::array_t y, - py::array_t - y_err, int n_threads) { + py::array_t y_err, + int n_threads) { if (y.ndim() == 3) { - auto par = - new NDArray({y.shape(0), y.shape(1), 2}); + auto par = new NDArray({y.shape(0), y.shape(1), 2}); auto par_err = new NDArray({y.shape(0), y.shape(1), 2}); @@ -184,10 +196,15 @@ n_threads : int, optional auto y_view_err = make_view_3d(y_err); auto x_view = make_view_1d(x); - aare::fit_pol1(x_view, y_view,y_view_err, par->view(), - par_err->view(), n_threads); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + } else if (y.ndim() == 1) { auto par = new NDArray({2}); @@ -197,15 +214,18 @@ n_threads : int, optional auto y_view_err = make_view_1d(y_err); auto x_view = make_view_1d(x); + double chi2 = 0; + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), - par_err->view()); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); } else { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( Fit a 1D polynomial to data with error estimates. Parameters diff --git a/src/Fit.cpp b/src/Fit.cpp index 08ecaec..c8ce178 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -1,10 +1,12 @@ #include "aare/Fit.hpp" #include "aare/utils/task.hpp" +#include "aare/utils/par.hpp" #include #include #include +#include namespace aare { @@ -35,33 +37,11 @@ NDArray pol1(NDView x, NDView par) { } // namespace func NDArray fit_gaus(NDView x, NDView y) { - NDArray result({3}, 0); - lm_control_struct control = lm_control_double; + NDArray result = gaus_init_par(x, y); + lm_status_struct status; - // Estimate the initial parameters for the fit - std::vector start_par{0, 0, 0}; - auto e = std::max_element(y.begin(), y.end()); - auto idx = std::distance(y.begin(), e); - - start_par[0] = *e; // For amplitude we use the maximum value - start_par[1] = - x[idx]; // For the mean we use the x value of the maximum value - - // For sigma we estimate the fwhm and divide by 2.35 - // assuming equally spaced x values - auto delta = x[1] - x[0]; - start_par[2] = - std::count_if(y.begin(), y.end(), - [e, delta](double val) { return val > *e / 2; }) * - delta / 2.35; - - lmfit::result_t res(start_par); - lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), - aare::func::gaus, &control, &res.status); - - result(0) = res.par[0]; - result(1) = res.par[1]; - result(2) = res.par[2]; + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::gaus, &lm_control_double, &status); return result; } @@ -81,65 +61,17 @@ NDArray fit_gaus(NDView x, NDView y, } } }; - auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); return result; } -void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, - int n_threads) { - - auto process = [&](ssize_t first_row, ssize_t last_row) { - for (ssize_t row = first_row; row < last_row; row++) { - for (ssize_t col = 0; col < y.shape(1); col++) { - NDView y_view(&y(row, col, 0), {y.shape(2)}); - NDView y_err_view(&y_err(row, col, 0), - {y_err.shape(2)}); - NDView par_out_view(&par_out(row, col, 0), - {par_out.shape(2)}); - NDView par_err_out_view(&par_err_out(row, col, 0), - {par_err_out.shape(2)}); - fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view); - } - } - }; - - auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } -} - -void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out) { - // Check that we have the correct sizes - if (y.size() != x.size() || y.size() != y_err.size() || - par_out.size() != 3 || par_err_out.size() != 3) { - throw std::runtime_error("Data, x, data_err must have the same size " - "and par_out, par_err_out must have size 3"); - } - - lm_control_struct control = lm_control_double; - - // Estimate the initial parameters for the fit - std::vector start_par{0, 0, 0}; - std::vector start_par_err{0, 0, 0}; - std::vector start_cov{0, 0, 0, 0, 0, 0, 0, 0, 0}; - +std::array gaus_init_par(const NDView x, const NDView y) { + std::array start_par{0, 0, 0}; auto e = std::max_element(y.begin(), y.end()); auto idx = std::distance(y.begin(), e); + start_par[0] = *e; // For amplitude we use the maximum value start_par[1] = x[idx]; // For the mean we use the x value of the maximum value @@ -152,66 +84,82 @@ void fit_gaus(NDView x, NDView y, NDView y_err, [e, delta](double val) { return val > *e / 2; }) * delta / 2.35; - lmfit::result_t res(start_par); - lmfit::result_t res_err(start_par_err); - lmfit::result_t cov(start_cov); - - // TODO can we make lmcurve write the result directly where is should be? - lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), - x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, - &control, &res.status); - - par_out(0) = res.par[0]; - par_out(1) = res.par[1]; - par_out(2) = res.par[2]; - par_err_out(0) = res_err.par[0]; - par_err_out(1) = res_err.par[1]; - par_err_out(2) = res_err.par[2]; + return start_par; } -void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out) { + +std::array pol1_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0}; + + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = + (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value + start_par[1] = + *y1 - ((*y2 - *y1) / (x2 - x1)) * + x1; // For the mean we use the x value of the maximum value + return start_par; +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + double &chi2) { + // Check that we have the correct sizes if (y.size() != x.size() || y.size() != y_err.size() || - par_out.size() != 2 || par_err_out.size() != 2) { + par_out.size() != 3 || par_err_out.size() != 3) { throw std::runtime_error("Data, x, data_err must have the same size " - "and par_out, par_err_out must have size 2"); + "and par_out, par_err_out must have size 3"); } - lm_control_struct control = lm_control_double; - // Estimate the initial parameters for the fit - std::vector start_par{0, 0}; - std::vector start_par_err{0, 0}; - std::vector start_cov{0, 0, 0, 0}; + // /* Collection of output parameters for status info. */ + // typedef struct { + // double fnorm; /* norm of the residue vector fvec. */ + // int nfev; /* actual number of iterations. */ + // int outcome; /* Status indicator. Nonnegative values are used as + // index + // for the message text lm_infmsg, set in lmmin.c. */ + // int userbreak; /* Set when function evaluation requests termination. + // */ + // } lm_status_struct; - auto y2 = std::max_element(y.begin(), y.end()); - auto x2 = x[std::distance(y.begin(), y2)]; - auto y1 = std::min_element(y.begin(), y.end()); - auto x1 = x[std::distance(y.begin(), y1)]; - start_par[0] = - (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value - start_par[1] = - *y1 - ((*y2 - *y1) / (x2 - x1)) * - x1; // For the mean we use the x value of the maximum value + lm_status_struct status; + par_out = gaus_init_par(x, y); + std::array cov{0, 0, 0, 0, 0, 0, 0 , 0 , 0}; - lmfit::result_t res(start_par); - lmfit::result_t res_err(start_par_err); - lmfit::result_t cov(start_cov); + // void lmcurve2( const int n_par, double *par, double *parerr, double *covar, const int m_dat, const double *t, const double *y, const double *dy, double (*f)( const double ti, const double *par ), const lm_control_struct *control, lm_status_struct *status); + // n_par - Number of free variables. Length of parameter vector par. + // par - Parameter vector. On input, it must contain a reasonable guess. On output, it contains the solution found to minimize ||r||. + // parerr - Parameter uncertainties vector. Array of length n_par or NULL. On output, unless it or covar is NULL, it contains the weighted parameter uncertainties for the found parameters. + // covar - Covariance matrix. Array of length n_par * n_par or NULL. On output, unless it is NULL, it contains the covariance matrix. + // m_dat - Number of data points. Length of vectors t, y, dy. Must statisfy n_par <= m_dat. + // t - Array of length m_dat. Contains the abcissae (time, or "x") for which function f will be evaluated. + // y - Array of length m_dat. Contains the ordinate values that shall be fitted. + // dy - Array of length m_dat. Contains the standard deviations of the values y. + // f - A user-supplied parametric function f(ti;par). + // control - Parameter collection for tuning the fit procedure. In most cases, the default &lm_control_double is adequate. If f is only computed with single-precision accuracy, &lm_control_float should be used. Parameters are explained in lmmin2(3). + // status - A record used to return information about the minimization process: For details, see lmmin2(3). - lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), - x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, - &control, &res.status); + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, + &lm_control_double, &status); - par_out(0) = res.par[0]; - par_out(1) = res.par[1]; - par_err_out(0) = res_err.par[0]; - par_err_out(1) = res_err.par[1]; + // Calculate chi2 + chi2 = 0; + for (size_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); + } } -void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, int n_threads) { auto process = [&](ssize_t first_row, ssize_t last_row) { @@ -224,21 +172,64 @@ void fit_pol1(NDView x, NDView y, NDView y_err, {par_out.shape(2)}); NDView par_err_out_view(&par_err_out(row, col, 0), {par_err_out.shape(2)}); - fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view); + + fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view, + chi2_out(row, col)); } } }; auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); + RunInParallel(process, tasks); +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 2 || par_err_out.size() != 2) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 2"); } - for (auto &thread : threads) { - thread.join(); + + lm_status_struct status; + par_out = pol1_init_par(x, y); + std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (size_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); +} + NDArray fit_pol1(NDView x, NDView y) { // // Check that we have the correct sizes // if (y.size() != x.size() || y.size() != y_err.size() || @@ -246,28 +237,11 @@ NDArray fit_pol1(NDView x, NDView y) { // throw std::runtime_error("Data, x, data_err must have the same size " // "and par_out, par_err_out must have size 2"); // } - NDArray par({2}, 0); + NDArray par = pol1_init_par(x, y); - lm_control_struct control = lm_control_double; - - // Estimate the initial parameters for the fit - std::vector start_par{0, 0}; - - auto y2 = std::max_element(y.begin(), y.end()); - auto x2 = x[std::distance(y.begin(), y2)]; - auto y1 = std::min_element(y.begin(), y.end()); - auto x1 = x[std::distance(y.begin(), y1)]; - - start_par[0] = (*y2 - *y1) / (x2 - x1); - start_par[1] = *y1 - ((*y2 - *y1) / (x2 - x1)) * x1; - - lmfit::result_t res(start_par); - - lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), - aare::func::pol1, &control, &res.status); - - par(0) = res.par[0]; - par(1) = res.par[1]; + lm_status_struct status; + lmcurve(par.size(), par.data(), x.size(), x.data(), y.data(), + aare::func::pol1, &lm_control_double, &status); return par; } @@ -287,13 +261,7 @@ NDArray fit_pol1(NDView x, NDView y, }; auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } + RunInParallel(process, tasks); return result; } diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 54099fd..942481c 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -379,4 +379,32 @@ TEST_CASE("Elementwise operations on images") { REQUIRE(A(i) == a_val); } } +} + +TEST_CASE("Assign an std::array to a 1D NDArray") { + NDArray a{{5}, 0}; + std::array b{1, 2, 3, 4, 5}; + a = b; + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } +} + +TEST_CASE("Assign an std::array to a 1D NDArray of a different size") { + NDArray a{{3}, 0}; + std::array b{1, 2, 3, 4, 5}; + a = b; + + REQUIRE(a.size() == 5); + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } +} + +TEST_CASE("Construct an NDArray from an std::array") { + std::array b{1, 2, 3, 4, 5}; + NDArray a(b); + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } } \ No newline at end of file From 5d2f25a6e95cf1d0848fc9dbd0a53fd5fb02ddfe Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 18 Feb 2025 21:44:03 +0100 Subject: [PATCH 036/120] bumped version number --- conda-recipe/meta.yaml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index c405e90..7add883 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.2.12 #TODO! how to not duplicate this? + version: 2025.2.18 #TODO! how to not duplicate this? source: diff --git a/pyproject.toml b/pyproject.toml index 74e624f..9c72fd5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.2.12" +version = "2025.2.18" [tool.scikit-build] cmake.verbose = true From b7a47576a15cdb808d30ec67c2b9556510fddc19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 19 Feb 2025 07:19:59 +0100 Subject: [PATCH 037/120] Multi threaded fitting and returning chi2 (#132) Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil --- CMakeLists.txt | 8 +- conda-recipe/meta.yaml | 3 +- include/aare/Fit.hpp | 42 ++++-- include/aare/NDArray.hpp | 29 ++++ include/aare/NDView.hpp | 11 +- include/aare/utils/par.hpp | 18 +++ pyproject.toml | 3 +- python/CMakeLists.txt | 2 +- python/examples/play.py | 34 +---- python/src/fit.hpp | 77 ++++++---- src/Fit.cpp | 294 +++++++++++++++++-------------------- src/NDArray.test.cpp | 28 ++++ 12 files changed, 317 insertions(+), 232 deletions(-) create mode 100644 include/aare/utils/par.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 62a3878..b93b513 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,7 +85,7 @@ if(AARE_FETCH_LMFIT) GIT_TAG main PATCH_COMMAND ${lmfit_patch} UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL + EXCLUDE_FROM_ALL 1 ) #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") @@ -97,9 +97,6 @@ if(AARE_FETCH_LMFIT) FetchContent_MakeAvailable(lmfit) set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) - - target_include_directories (lmfit PUBLIC "${libzmq_SOURCE_DIR}/lib") - message(STATUS "lmfit include dir: ${lmfit_SOURCE_DIR}/lib") else() find_package(lmfit REQUIRED) endif() @@ -370,7 +367,8 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags - lmfit + "$" + ) set_target_properties(aare_core PROPERTIES diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index c405e90..ffa95a7 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.2.12 #TODO! how to not duplicate this? + version: 2025.2.18 #TODO! how to not duplicate this? + source: diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp index 20ef4ef..6fd10aa 100644 --- a/include/aare/Fit.hpp +++ b/include/aare/Fit.hpp @@ -17,6 +17,14 @@ NDArray pol1(NDView x, NDView par); } // namespace func + +/** + * @brief Estimate the initial parameters for a Gaussian fit + */ +std::array gaus_init_par(const NDView x, const NDView y); + +std::array pol1_init_par(const NDView x, const NDView y); + static constexpr int DEFAULT_NUM_THREADS = 4; /** @@ -33,7 +41,11 @@ NDArray fit_gaus(NDView x, NDView y); * @param y y vales, layout [row, col, values] * @param n_threads number of threads to use */ -NDArray fit_gaus(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); + +NDArray fit_gaus(NDView x, NDView y, + int n_threads = DEFAULT_NUM_THREADS); + + /** @@ -45,10 +57,12 @@ NDArray fit_gaus(NDView x, NDView y, int n_thre * @param par_err_out output error parameters */ void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out); + NDView par_out, NDView par_err_out, + double& chi2); /** - * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout [row, col, values] + * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout + * [row, col, values] * @param x x values * @param y y vales, layout [row, col, values] * @param y_err error in y, layout [row, col, values] @@ -57,20 +71,22 @@ void fit_gaus(NDView x, NDView y, NDView y_err, * @param n_threads number of threads to use */ void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); - + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads = DEFAULT_NUM_THREADS + ); NDArray fit_pol1(NDView x, NDView y); -NDArray fit_pol1(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); +NDArray fit_pol1(NDView x, NDView y, + int n_threads = DEFAULT_NUM_THREADS); -void fit_pol1(NDView x, NDView y, - NDView y_err, NDView par_out, - NDView par_err_out); +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); + +// TODO! not sure we need to offer the different version in C++ +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out,NDView chi2_out, + int n_threads = DEFAULT_NUM_THREADS); -//TODO! not sure we need to offer the different version in C++ -void fit_pol1(NDView x, NDView y, - NDView y_err, NDView par_out, - NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); } // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 15beb02..cfa5b5c 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -69,6 +69,11 @@ class NDArray : public ArrayExpr, Ndim> { std::copy(v.begin(), v.end(), begin()); } + template + NDArray(const std::array& arr) : NDArray({Size}) { + std::copy(arr.begin(), arr.end(), begin()); + } + // Move constructor NDArray(NDArray &&other) noexcept : shape_(other.shape_), strides_(c_strides(shape_)), @@ -105,6 +110,20 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator-=(const NDArray &other); NDArray &operator*=(const NDArray &other); + //Write directly to the data array, or create a new one + template + NDArray& operator=(const std::array &other){ + if(Size != size_){ + delete[] data_; + size_ = Size; + data_ = new T[size_]; + } + for (size_t i = 0; i < Size; ++i) { + data_[i] = other[i]; + } + return *this; + } + // NDArray& operator/=(const NDArray& other); template NDArray &operator/=(const NDArray &other) { @@ -135,6 +154,11 @@ class NDArray : public ArrayExpr, Ndim> { NDArray &operator&=(const T & /*mask*/); + + + + + void sqrt() { for (int i = 0; i < size_; ++i) { data_[i] = std::sqrt(data_[i]); @@ -318,6 +342,9 @@ NDArray &NDArray::operator+=(const T &value) { return *this; } + + + template NDArray NDArray::operator+(const T &value) { NDArray result = *this; @@ -418,4 +445,6 @@ NDArray load(const std::string &pathname, return img; } + + } // namespace aare \ No newline at end of file diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index e3a6d30..f53f758 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -1,5 +1,5 @@ #pragma once - +#include "aare/defs.hpp" #include "aare/ArrayExpr.hpp" #include @@ -99,6 +99,15 @@ template class NDView : public ArrayExpr()); } + + template + NDView& operator=(const std::array &arr) { + if(size() != arr.size()) + throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); + std::copy(arr.begin(), arr.end(), begin()); + return *this; + } + NDView &operator=(const T val) { for (auto it = begin(); it != end(); ++it) *it = val; diff --git a/include/aare/utils/par.hpp b/include/aare/utils/par.hpp new file mode 100644 index 0000000..efb1c77 --- /dev/null +++ b/include/aare/utils/par.hpp @@ -0,0 +1,18 @@ +#include +#include +#include + +namespace aare { + + template + void RunInParallel(F func, const std::vector>& tasks) { + // auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(func, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + } +} // namespace aare \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 74e624f..6dc941e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,8 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.2.12" +version = "2025.2.18" + [tool.scikit-build] cmake.verbose = true diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 2aaa222..09de736 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -50,10 +50,10 @@ set(PYTHON_EXAMPLES ) - # Copy the python examples to the build directory foreach(FILE ${PYTHON_EXAMPLES}) configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) + message(STATUS "Copying ${FILE} to ${CMAKE_BINARY_DIR}/${FILE}") endforeach(FILE ${PYTHON_EXAMPLES}) diff --git a/python/examples/play.py b/python/examples/play.py index f1a869b..37754df 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -8,38 +8,20 @@ import numpy as np import boost_histogram as bh import time -<<<<<<< HEAD -from aare import File, ClusterFinder, VarClusterFinder, ClusterFile, CtbRawFile -from aare import gaus, fit_gaus -base = Path('/mnt/sls_det_storage/moench_data/Julian/MOENCH05/20250113_first_xrays_redo/raw_files/') -cluster_file = Path('/home/l_msdetect/erik/tmp/Cu.clust') +import aare -t0 = time.perf_counter() -offset= -0.5 -hist3d = bh.Histogram( - bh.axis.Regular(160, 0+offset, 160+offset), #x - bh.axis.Regular(150, 0+offset, 150+offset), #y - bh.axis.Regular(200, 0, 6000), #ADU -) +data = np.random.normal(10, 1, 1000) -total_clusters = 0 -with ClusterFile(cluster_file, chunk_size = 1000) as f: - for i, clusters in enumerate(f): - arr = np.array(clusters) - total_clusters += clusters.size - hist3d.fill(arr['y'],arr['x'], clusters.sum_2x2()) #python talks [row, col] cluster finder [x,y] -======= -from aare import RawFile +hist = bh.Histogram(bh.axis.Regular(10, 0, 20)) +hist.fill(data) -f = RawFile('/mnt/sls_det_storage/jungfrau_data1/vadym_tests/jf12_M431/laser_scan/laserScan_pedestal_G0_master_0.json') -print(f'{f.frame_number(1)}') +x = hist.axes[0].centers +y = hist.values() +y_err = np.sqrt(y)+1 +res = aare.fit_gaus(x, y, y_err, chi2 = True) -for i in range(10): - header, img = f.read_frame() - print(header['frameNumber'], img.shape) ->>>>>>> developer t_elapsed = time.perf_counter()-t0 diff --git a/python/src/fit.hpp b/python/src/fit.hpp index 60cdecc..8e6cfef 100644 --- a/python/src/fit.hpp +++ b/python/src/fit.hpp @@ -7,6 +7,8 @@ #include "aare/Fit.hpp" namespace py = pybind11; +using namespace pybind11::literals; + void define_fit_bindings(py::module &m) { @@ -29,7 +31,8 @@ void define_fit_bindings(py::module &m) { The points at which to evaluate the Gaussian function. par : array_like The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. - )", py::arg("x"), py::arg("par")); + )", + py::arg("x"), py::arg("par")); m.def( "pol1", @@ -49,7 +52,9 @@ void define_fit_bindings(py::module &m) { The points at which to evaluate the polynomial function. par : array_like The parameters of the polynomial function. The first element is the intercept, and the second element is the slope. - )", py::arg("x"), py::arg("par")); + )", + py::arg("x"), py::arg("par")); + m.def( "fit_gaus", @@ -72,7 +77,8 @@ void define_fit_bindings(py::module &m) { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( + Fit a 1D Gaussian to data. Parameters @@ -90,8 +96,9 @@ n_threads : int, optional "fit_gaus", [](py::array_t x, py::array_t y, - py::array_t - y_err, int n_threads) { + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { // Allocate memory for the output // Need to have pointers to allow python to manage @@ -99,15 +106,20 @@ n_threads : int, optional auto par = new NDArray({y.shape(0), y.shape(1), 3}); auto par_err = new NDArray({y.shape(0), y.shape(1), 3}); + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + // Make views of the numpy arrays auto y_view = make_view_3d(y); auto y_view_err = make_view_3d(y_err); auto x_view = make_view_1d(x); + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), - par_err->view(), n_threads); - // return return_image_data(par); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2->view(), n_threads); + + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 3); } else if (y.ndim() == 1) { // Allocate memory for the output // Need to have pointers to allow python to manage @@ -120,15 +132,21 @@ n_threads : int, optional auto y_view_err = make_view_1d(y_err); auto x_view = make_view_1d(x); + + double chi2 = 0; aare::fit_gaus(x_view, y_view, y_view_err, par->view(), - par_err->view()); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2); + + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 3); + } else { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( + Fit a 1D Gaussian to data with error estimates. Parameters @@ -172,11 +190,11 @@ n_threads : int, optional "fit_pol1", [](py::array_t x, py::array_t y, - py::array_t - y_err, int n_threads) { + py::array_t y_err, + int n_threads) { if (y.ndim() == 3) { - auto par = - new NDArray({y.shape(0), y.shape(1), 2}); + auto par = new NDArray({y.shape(0), y.shape(1), 2}); + auto par_err = new NDArray({y.shape(0), y.shape(1), 2}); @@ -184,10 +202,15 @@ n_threads : int, optional auto y_view_err = make_view_3d(y_err); auto x_view = make_view_1d(x); - aare::fit_pol1(x_view, y_view,y_view_err, par->view(), - par_err->view(), n_threads); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + } else if (y.ndim() == 1) { auto par = new NDArray({2}); @@ -197,15 +220,19 @@ n_threads : int, optional auto y_view_err = make_view_1d(y_err); auto x_view = make_view_1d(x); + double chi2 = 0; + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), - par_err->view()); - return py::make_tuple(return_image_data(par), - return_image_data(par_err)); + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + } else { throw std::runtime_error("Data must be 1D or 3D"); } }, -R"( + R"( Fit a 1D polynomial to data with error estimates. Parameters diff --git a/src/Fit.cpp b/src/Fit.cpp index 08ecaec..3001efd 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -1,11 +1,13 @@ #include "aare/Fit.hpp" #include "aare/utils/task.hpp" - +#include "aare/utils/par.hpp" #include #include - #include +#include + + namespace aare { namespace func { @@ -35,33 +37,11 @@ NDArray pol1(NDView x, NDView par) { } // namespace func NDArray fit_gaus(NDView x, NDView y) { - NDArray result({3}, 0); - lm_control_struct control = lm_control_double; + NDArray result = gaus_init_par(x, y); + lm_status_struct status; - // Estimate the initial parameters for the fit - std::vector start_par{0, 0, 0}; - auto e = std::max_element(y.begin(), y.end()); - auto idx = std::distance(y.begin(), e); - - start_par[0] = *e; // For amplitude we use the maximum value - start_par[1] = - x[idx]; // For the mean we use the x value of the maximum value - - // For sigma we estimate the fwhm and divide by 2.35 - // assuming equally spaced x values - auto delta = x[1] - x[0]; - start_par[2] = - std::count_if(y.begin(), y.end(), - [e, delta](double val) { return val > *e / 2; }) * - delta / 2.35; - - lmfit::result_t res(start_par); - lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), - aare::func::gaus, &control, &res.status); - - result(0) = res.par[0]; - result(1) = res.par[1]; - result(2) = res.par[2]; + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::gaus, &lm_control_double, &status); return result; } @@ -81,65 +61,17 @@ NDArray fit_gaus(NDView x, NDView y, } } }; - auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); return result; } -void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, - int n_threads) { - - auto process = [&](ssize_t first_row, ssize_t last_row) { - for (ssize_t row = first_row; row < last_row; row++) { - for (ssize_t col = 0; col < y.shape(1); col++) { - NDView y_view(&y(row, col, 0), {y.shape(2)}); - NDView y_err_view(&y_err(row, col, 0), - {y_err.shape(2)}); - NDView par_out_view(&par_out(row, col, 0), - {par_out.shape(2)}); - NDView par_err_out_view(&par_err_out(row, col, 0), - {par_err_out.shape(2)}); - fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view); - } - } - }; - - auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } -} - -void fit_gaus(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out) { - // Check that we have the correct sizes - if (y.size() != x.size() || y.size() != y_err.size() || - par_out.size() != 3 || par_err_out.size() != 3) { - throw std::runtime_error("Data, x, data_err must have the same size " - "and par_out, par_err_out must have size 3"); - } - - lm_control_struct control = lm_control_double; - - // Estimate the initial parameters for the fit - std::vector start_par{0, 0, 0}; - std::vector start_par_err{0, 0, 0}; - std::vector start_cov{0, 0, 0, 0, 0, 0, 0, 0, 0}; - +std::array gaus_init_par(const NDView x, const NDView y) { + std::array start_par{0, 0, 0}; auto e = std::max_element(y.begin(), y.end()); auto idx = std::distance(y.begin(), e); + start_par[0] = *e; // For amplitude we use the maximum value start_par[1] = x[idx]; // For the mean we use the x value of the maximum value @@ -152,66 +84,83 @@ void fit_gaus(NDView x, NDView y, NDView y_err, [e, delta](double val) { return val > *e / 2; }) * delta / 2.35; - lmfit::result_t res(start_par); - lmfit::result_t res_err(start_par_err); - lmfit::result_t cov(start_cov); - - // TODO can we make lmcurve write the result directly where is should be? - lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), - x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, - &control, &res.status); - - par_out(0) = res.par[0]; - par_out(1) = res.par[1]; - par_out(2) = res.par[2]; - par_err_out(0) = res_err.par[0]; - par_err_out(1) = res_err.par[1]; - par_err_out(2) = res_err.par[2]; + return start_par; } -void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out) { + +std::array pol1_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0}; + + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = + (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value + start_par[1] = + *y1 - ((*y2 - *y1) / (x2 - x1)) * + x1; // For the mean we use the x value of the maximum value + return start_par; +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + double &chi2) { + // Check that we have the correct sizes if (y.size() != x.size() || y.size() != y_err.size() || - par_out.size() != 2 || par_err_out.size() != 2) { + par_out.size() != 3 || par_err_out.size() != 3) { throw std::runtime_error("Data, x, data_err must have the same size " - "and par_out, par_err_out must have size 2"); + "and par_out, par_err_out must have size 3"); } - lm_control_struct control = lm_control_double; - // Estimate the initial parameters for the fit - std::vector start_par{0, 0}; - std::vector start_par_err{0, 0}; - std::vector start_cov{0, 0, 0, 0}; + // /* Collection of output parameters for status info. */ + // typedef struct { + // double fnorm; /* norm of the residue vector fvec. */ + // int nfev; /* actual number of iterations. */ + // int outcome; /* Status indicator. Nonnegative values are used as + // index + // for the message text lm_infmsg, set in lmmin.c. */ + // int userbreak; /* Set when function evaluation requests termination. + // */ + // } lm_status_struct; - auto y2 = std::max_element(y.begin(), y.end()); - auto x2 = x[std::distance(y.begin(), y2)]; - auto y1 = std::min_element(y.begin(), y.end()); - auto x1 = x[std::distance(y.begin(), y1)]; - start_par[0] = - (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value - start_par[1] = - *y1 - ((*y2 - *y1) / (x2 - x1)) * - x1; // For the mean we use the x value of the maximum value + lm_status_struct status; + par_out = gaus_init_par(x, y); + std::array cov{0, 0, 0, 0, 0, 0, 0 , 0 , 0}; - lmfit::result_t res(start_par); - lmfit::result_t res_err(start_par_err); - lmfit::result_t cov(start_cov); + // void lmcurve2( const int n_par, double *par, double *parerr, double *covar, const int m_dat, const double *t, const double *y, const double *dy, double (*f)( const double ti, const double *par ), const lm_control_struct *control, lm_status_struct *status); + // n_par - Number of free variables. Length of parameter vector par. + // par - Parameter vector. On input, it must contain a reasonable guess. On output, it contains the solution found to minimize ||r||. + // parerr - Parameter uncertainties vector. Array of length n_par or NULL. On output, unless it or covar is NULL, it contains the weighted parameter uncertainties for the found parameters. + // covar - Covariance matrix. Array of length n_par * n_par or NULL. On output, unless it is NULL, it contains the covariance matrix. + // m_dat - Number of data points. Length of vectors t, y, dy. Must statisfy n_par <= m_dat. + // t - Array of length m_dat. Contains the abcissae (time, or "x") for which function f will be evaluated. + // y - Array of length m_dat. Contains the ordinate values that shall be fitted. + // dy - Array of length m_dat. Contains the standard deviations of the values y. + // f - A user-supplied parametric function f(ti;par). + // control - Parameter collection for tuning the fit procedure. In most cases, the default &lm_control_double is adequate. If f is only computed with single-precision accuracy, &lm_control_float should be used. Parameters are explained in lmmin2(3). + // status - A record used to return information about the minimization process: For details, see lmmin2(3). - lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), - x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, - &control, &res.status); + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, + &lm_control_double, &status); - par_out(0) = res.par[0]; - par_out(1) = res.par[1]; - par_err_out(0) = res_err.par[0]; - par_err_out(1) = res_err.par[1]; + // Calculate chi2 + chi2 = 0; + for (size_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); + } } -void fit_pol1(NDView x, NDView y, NDView y_err, - NDView par_out, NDView par_err_out, +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { auto process = [&](ssize_t first_row, ssize_t last_row) { @@ -224,21 +173,69 @@ void fit_pol1(NDView x, NDView y, NDView y_err, {par_out.shape(2)}); NDView par_err_out_view(&par_err_out(row, col, 0), {par_err_out.shape(2)}); - fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view); + + fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view, + chi2_out(row, col)); + } } }; auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); + RunInParallel(process, tasks); +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 2 || par_err_out.size() != 2) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 2"); } - for (auto &thread : threads) { - thread.join(); + + lm_status_struct status; + par_out = pol1_init_par(x, y); + std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (size_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + NDArray fit_pol1(NDView x, NDView y) { // // Check that we have the correct sizes // if (y.size() != x.size() || y.size() != y_err.size() || @@ -246,28 +243,12 @@ NDArray fit_pol1(NDView x, NDView y) { // throw std::runtime_error("Data, x, data_err must have the same size " // "and par_out, par_err_out must have size 2"); // } - NDArray par({2}, 0); + NDArray par = pol1_init_par(x, y); - lm_control_struct control = lm_control_double; + lm_status_struct status; + lmcurve(par.size(), par.data(), x.size(), x.data(), y.data(), + aare::func::pol1, &lm_control_double, &status); - // Estimate the initial parameters for the fit - std::vector start_par{0, 0}; - - auto y2 = std::max_element(y.begin(), y.end()); - auto x2 = x[std::distance(y.begin(), y2)]; - auto y1 = std::min_element(y.begin(), y.end()); - auto x1 = x[std::distance(y.begin(), y1)]; - - start_par[0] = (*y2 - *y1) / (x2 - x1); - start_par[1] = *y1 - ((*y2 - *y1) / (x2 - x1)) * x1; - - lmfit::result_t res(start_par); - - lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), - aare::func::pol1, &control, &res.status); - - par(0) = res.par[0]; - par(1) = res.par[1]; return par; } @@ -287,13 +268,8 @@ NDArray fit_pol1(NDView x, NDView y, }; auto tasks = split_task(0, y.shape(0), n_threads); - std::vector threads; - for (auto &task : tasks) { - threads.push_back(std::thread(process, task.first, task.second)); - } - for (auto &thread : threads) { - thread.join(); - } + + RunInParallel(process, tasks); return result; } diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 54099fd..942481c 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -379,4 +379,32 @@ TEST_CASE("Elementwise operations on images") { REQUIRE(A(i) == a_val); } } +} + +TEST_CASE("Assign an std::array to a 1D NDArray") { + NDArray a{{5}, 0}; + std::array b{1, 2, 3, 4, 5}; + a = b; + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } +} + +TEST_CASE("Assign an std::array to a 1D NDArray of a different size") { + NDArray a{{3}, 0}; + std::array b{1, 2, 3, 4, 5}; + a = b; + + REQUIRE(a.size() == 5); + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } +} + +TEST_CASE("Construct an NDArray from an std::array") { + std::array b{1, 2, 3, 4, 5}; + NDArray a(b); + for (uint32_t i = 0; i < a.size(); ++i) { + REQUIRE(a(i) == b[i]); + } } \ No newline at end of file From 1d2c38c1d4d48ed03f79fe072f64521c2ee54738 Mon Sep 17 00:00:00 2001 From: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com> Date: Wed, 19 Feb 2025 16:11:24 +0100 Subject: [PATCH 038/120] Enable VarClusterFinder (#134) Co-authored-by: xiangyu.xie --- include/aare/VarClusterFinder.hpp | 2 +- python/src/var_cluster.hpp | 35 ++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index d4d51cc..ea62a9d 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -7,7 +7,7 @@ #include "aare/NDArray.hpp" -const int MAX_CLUSTER_SIZE = 200; +const int MAX_CLUSTER_SIZE = 50; namespace aare { template class VarClusterFinder { diff --git a/python/src/var_cluster.hpp b/python/src/var_cluster.hpp index f3a5741..0819a44 100644 --- a/python/src/var_cluster.hpp +++ b/python/src/var_cluster.hpp @@ -19,7 +19,7 @@ using namespace::aare; void define_var_cluster_finder_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(VarClusterFinder::Hit, size, row, col, - reserved, energy, max); + reserved, energy, max, rows, cols, enes); py::class_>(m, "VarClusterFinder") .def(py::init, double>()) @@ -28,6 +28,15 @@ void define_var_cluster_finder_bindings(py::module &m) { auto ptr = new NDArray(self.labeled()); return return_image_data(ptr); }) + .def("set_noiseMap", + [](VarClusterFinder &self, + py::array_t + noise_map) { + auto noise_map_span = make_view_2d(noise_map); + self.set_noiseMap(noise_map_span); + }) + .def("set_peripheralThresholdFactor", + &VarClusterFinder::set_peripheralThresholdFactor) .def("find_clusters", [](VarClusterFinder &self, py::array_t @@ -35,6 +44,30 @@ void define_var_cluster_finder_bindings(py::module &m) { auto view = make_view_2d(img); self.find_clusters(view); }) + .def("find_clusters_X", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.find_clusters_X(img_span); + }) + .def("single_pass", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.single_pass(img_span); + }) + .def("hits", + [](VarClusterFinder &self) { + auto ptr = new std::vector::Hit>( + self.steal_hits()); + return return_vector(ptr); + }) + .def("clear_hits", + [](VarClusterFinder &self) { + self.clear_hits(); + }) .def("steal_hits", [](VarClusterFinder &self) { auto ptr = new std::vector::Hit>( From 8ae6bb76f83b6481bf7e5b1db192d984ea51f577 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 21 Feb 2025 11:18:39 +0100 Subject: [PATCH 039/120] removed warnings added clang-tidy --- .clang-tidy | 42 +++++++++++++++++++++++++++++++++++++ CMakeLists.txt | 3 +++ include/aare/NDArray.hpp | 12 +++++------ include/aare/RawSubFile.hpp | 2 +- include/aare/defs.hpp | 2 ++ python/src/ctb_raw_file.hpp | 4 ++-- python/src/np_helper.hpp | 12 +++++------ python/src/var_cluster.hpp | 2 +- src/Dtype.cpp | 2 +- src/File.cpp | 2 +- src/RawFile.cpp | 3 +-- 11 files changed, 66 insertions(+), 20 deletions(-) create mode 100644 .clang-tidy diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000..a2ab6c1 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,42 @@ + +--- +Checks: '*, + -altera-*, + -android-cloexec-fopen, + -cppcoreguidelines-pro-bounds-array-to-pointer-decay, + -cppcoreguidelines-pro-bounds-pointer-arithmetic, + -fuchsia*, + -readability-else-after-return, + -readability-avoid-const-params-in-decls, + -readability-identifier-length, + -cppcoreguidelines-pro-bounds-constant-array-index, + -cppcoreguidelines-pro-type-reinterpret-cast, + -llvm-header-guard, + -modernize-use-nodiscard, + -misc-non-private-member-variables-in-classes, + -readability-static-accessed-through-instance, + -readability-braces-around-statements, + -readability-isolate-declaration, + -readability-implicit-bool-conversion, + -readability-identifier-length, + -readability-identifier-naming, + -hicpp-signed-bitwise, + -hicpp-no-array-decay, + -hicpp-braces-around-statements, + -google-runtime-references, + -google-readability-todo, + -google-readability-braces-around-statements, + -modernize-use-trailing-return-type, + -llvmlibc-*' + +HeaderFilterRegex: \.hpp +FormatStyle: none +CheckOptions: + - { key: readability-identifier-naming.NamespaceCase, value: lower_case } + # - { key: readability-identifier-naming.FunctionCase, value: lower_case } + - { key: readability-identifier-naming.ClassCase, value: CamelCase } + # - { key: readability-identifier-naming.MethodCase, value: CamelCase } + # - { key: readability-identifier-naming.StructCase, value: CamelCase } + # - { key: readability-identifier-naming.VariableCase, value: lower_case } + - { key: readability-identifier-naming.GlobalConstantCase, value: UPPER_CASE } +... diff --git a/CMakeLists.txt b/CMakeLists.txt index b93b513..cff4c75 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,6 +60,8 @@ if(AARE_SYSTEM_LIBRARIES) set(AARE_FETCH_CATCH OFF CACHE BOOL "Disabled FetchContent for catch2" FORCE) set(AARE_FETCH_JSON OFF CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE) set(AARE_FETCH_ZMQ OFF CACHE BOOL "Disabled FetchContent for libzmq" FORCE) + # Still fetch lmfit when setting AARE_SYSTEM_LIBRARIES since this is not available + # on conda-forge endif() if(AARE_VERBOSE) @@ -78,6 +80,7 @@ endif() set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) + #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) FetchContent_Declare( lmfit diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index cfa5b5c..310d070 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -388,12 +388,12 @@ NDArray NDArray::operator*(const T &value) { result *= value; return result; } -template void NDArray::Print() { - if (shape_[0] < 20 && shape_[1] < 20) - Print_all(); - else - Print_some(); -} +// template void NDArray::Print() { +// if (shape_[0] < 20 && shape_[1] < 20) +// Print_all(); +// else +// Print_some(); +// } template std::ostream &operator<<(std::ostream &os, const NDArray &arr) { diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 89c278e..1d554e8 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -64,7 +64,7 @@ class RawSubFile { size_t bytes_per_frame() const { return m_bytes_per_frame; } size_t pixels_per_frame() const { return m_rows * m_cols; } - size_t bytes_per_pixel() const { return m_bitdepth / 8; } + size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } private: template diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index db1a47b..4559882 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -38,6 +38,8 @@ namespace aare { +inline constexpr size_t bits_per_byte = 8; + void assert_failed(const std::string &msg); diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 9ce656d..56e571b 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -32,7 +32,7 @@ m.def("adc_sar_05_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/8}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays @@ -53,7 +53,7 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/8}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 6e92830..1845196 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -40,25 +40,25 @@ template py::array return_vector(std::vector *vec) { } // todo rewrite generic -template auto get_shape_3d(py::array_t arr) { +template auto get_shape_3d(const py::array_t& arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; } -template auto make_view_3d(py::array_t arr) { +template auto make_view_3d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_3d(arr)); } -template auto get_shape_2d(py::array_t arr) { +template auto get_shape_2d(const py::array_t& arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } -template auto get_shape_1d(py::array_t arr) { +template auto get_shape_1d(const py::array_t& arr) { return aare::Shape<1>{arr.shape(0)}; } -template auto make_view_2d(py::array_t arr) { +template auto make_view_2d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); } -template auto make_view_1d(py::array_t arr) { +template auto make_view_1d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); } \ No newline at end of file diff --git a/python/src/var_cluster.hpp b/python/src/var_cluster.hpp index 0819a44..f7b373f 100644 --- a/python/src/var_cluster.hpp +++ b/python/src/var_cluster.hpp @@ -25,7 +25,7 @@ void define_var_cluster_finder_bindings(py::module &m) { .def(py::init, double>()) .def("labeled", [](VarClusterFinder &self) { - auto ptr = new NDArray(self.labeled()); + auto *ptr = new NDArray(self.labeled()); return return_image_data(ptr); }) .def("set_noiseMap", diff --git a/src/Dtype.cpp b/src/Dtype.cpp index 565d509..b818ea3 100644 --- a/src/Dtype.cpp +++ b/src/Dtype.cpp @@ -70,7 +70,7 @@ uint8_t Dtype::bitdepth() const { /** * @brief Get the number of bytes of the data type */ -size_t Dtype::bytes() const { return bitdepth() / 8; } +size_t Dtype::bytes() const { return bitdepth() / bits_per_byte; } /** * @brief Construct a DType object from a TypeIndex diff --git a/src/File.cpp b/src/File.cpp index 1180967..3c68eff 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -73,7 +73,7 @@ size_t File::tell() const { return file_impl->tell(); } size_t File::rows() const { return file_impl->rows(); } size_t File::cols() const { return file_impl->cols(); } size_t File::bitdepth() const { return file_impl->bitdepth(); } -size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / 8; } +size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / bits_per_byte; } DetectorType File::detector_type() const { return file_impl->detector_type(); } diff --git a/src/RawFile.cpp b/src/RawFile.cpp index e704add..78cb6c5 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -76,8 +76,7 @@ size_t RawFile::n_mod() const { return n_subfile_parts; } size_t RawFile::bytes_per_frame() { - // return m_rows * m_cols * m_master.bitdepth() / 8; - return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / bits_per_byte; } size_t RawFile::pixels_per_frame() { // return m_rows * m_cols; From 5614cb4673e115622267685268bbd409bb28aef2 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 5 Mar 2025 17:40:08 +0100 Subject: [PATCH 040/120] WIP --- CMakeLists.txt | 1 + include/aare/ClusterFile.hpp | 10 +++ include/aare/ClusterVector.hpp | 4 + include/aare/Interpolator.hpp | 29 ++++++ python/aare/__init__.py | 1 + python/examples/play.py | 56 +++++------- python/src/aare.code-workspace | 98 ++++++++++++++++++++ python/src/cluster.hpp | 10 ++- python/src/cluster_file.hpp | 5 ++ python/src/file.hpp | 2 + python/src/interpolation.hpp | 58 ++++++++++++ python/src/module.cpp | 2 + src/ClusterFile.cpp | 108 ++++++++++++++++++++-- src/Interpolator.cpp | 159 +++++++++++++++++++++++++++++++++ src/NDArray.test.cpp | 19 ++++ 15 files changed, 523 insertions(+), 39 deletions(-) create mode 100644 include/aare/Interpolator.hpp create mode 100644 python/src/aare.code-workspace create mode 100644 python/src/interpolation.hpp create mode 100644 src/Interpolator.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index cff4c75..bff2afe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -346,6 +346,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index b796763..5bea342 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -8,11 +8,17 @@ namespace aare { +//TODO! Template this? struct Cluster3x3 { int16_t x; int16_t y; int32_t data[9]; }; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; typedef enum { cBottomLeft = 0, @@ -37,6 +43,7 @@ struct Eta2 { double x; double y; corner c; + int32_t sum; }; struct ClusterAnalysis { @@ -97,6 +104,8 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); + ClusterVector read_clusters(size_t n_clusters, ROI roi); + /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. @@ -131,5 +140,6 @@ int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); +Eta2 calculate_eta2(Cluster2x2 &cl); } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index febf06c..1c15a22 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -231,6 +231,10 @@ template class ClusterVector { return *reinterpret_cast(element_ptr(i)); } + template const V &at(size_t i) const { + return *reinterpret_cast(element_ptr(i)); + } + const std::string_view fmt_base() const { // TODO! how do we match on coord_t? return m_fmt_base; diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp new file mode 100644 index 0000000..4905bce --- /dev/null +++ b/include/aare/Interpolator.hpp @@ -0,0 +1,29 @@ +#pragma once +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFile.hpp" //Cluster_3x3 +namespace aare{ + +struct Photon{ + double x; + double y; + double energy; +}; + +class Interpolator{ + NDArray m_ietax; + NDArray m_ietay; + + NDArray m_etabinsx; + NDArray m_etabinsy; + NDArray m_energy_bins; + public: + Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins); + NDArray get_ietax(){return m_ietax;} + NDArray get_ietay(){return m_ietay;} + + std::vector interpolate(const ClusterVector& clusters); +}; + +} // namespace aare \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index f4c19cc..41deb6c 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -7,6 +7,7 @@ from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile from ._aare import hitmap +from ._aare import ROI from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i diff --git a/python/examples/play.py b/python/examples/play.py index 37754df..05f3c82 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,50 +1,40 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') + #Our normal python imports from pathlib import Path import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np import boost_histogram as bh import time +import tifffile -import aare - -data = np.random.normal(10, 1, 1000) - -hist = bh.Histogram(bh.axis.Regular(10, 0, 20)) -hist.fill(data) +#Directly import what we need from aare +from aare import File, ClusterFile, hitmap +from aare._aare import calculate_eta2, ClusterFinderMT, ClusterCollector -x = hist.axes[0].centers -y = hist.values() -y_err = np.sqrt(y)+1 -res = aare.fit_gaus(x, y, y_err, chi2 = True) +base = Path('/mnt/sls_det_storage/moench_data/tomcat_nanoscope_21042020/09_Moench_650um/') +# for f in base.glob('*'): +# print(f.name) - -t_elapsed = time.perf_counter()-t0 -print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') +cluster_fname = base/'acq_interp_center_3.8Mfr_200V.clust' +flatfield_fname = base/'flatfield_center_200_d0_f000000000000_0.clust' -histogram_data = hist3d.counts() -x = hist3d.axes[2].edges[:-1] - -y = histogram_data[100,100,:] -xx = np.linspace(x[0], x[-1]) -# fig, ax = plt.subplots() -# ax.step(x, y, where = 'post') - -y_err = np.sqrt(y) -y_err = np.zeros(y.size) -y_err += 1 - -# par = fit_gaus2(y,x, y_err) -# ax.plot(xx, gaus(xx,par)) -# print(par) - -res = fit_gaus(y,x) -res2 = fit_gaus(y,x, y_err) -print(res) -print(res2) +cluster_fname.stat().st_size/1e6/4 +image = np.zeros((400,400)) +with ClusterFile(cluster_fname, chunk_size = 1000000) as f: + for clusters in f: + test = hitmap(image.shape, clusters) + break + # image += hitmap(image.shape, clusters) + # break +print('We are back in python') +# fig, ax = plt.subplots(figsize = (7,7)) +# im = ax.imshow(image) +# im.set_clim(0,1) \ No newline at end of file diff --git a/python/src/aare.code-workspace b/python/src/aare.code-workspace new file mode 100644 index 0000000..01045a6 --- /dev/null +++ b/python/src/aare.code-workspace @@ -0,0 +1,98 @@ +{ + "folders": [ + { + "path": "../../.." + }, + { + "path": "../../../../slsDetectorPackage" + } + ], + "settings": { + "files.associations": { + "compare": "cpp", + "cstdint": "cpp", + "cctype": "cpp", + "clocale": "cpp", + "cmath": "cpp", + "csignal": "cpp", + "cstdarg": "cpp", + "cstddef": "cpp", + "cstdio": "cpp", + "cstdlib": "cpp", + "cstring": "cpp", + "ctime": "cpp", + "cwchar": "cpp", + "cwctype": "cpp", + "any": "cpp", + "array": "cpp", + "atomic": "cpp", + "strstream": "cpp", + "bit": "cpp", + "*.tcc": "cpp", + "bitset": "cpp", + "cfenv": "cpp", + "charconv": "cpp", + "chrono": "cpp", + "codecvt": "cpp", + "complex": "cpp", + "concepts": "cpp", + "condition_variable": "cpp", + "deque": "cpp", + "forward_list": "cpp", + "list": "cpp", + "map": "cpp", + "set": "cpp", + "string": "cpp", + "unordered_map": "cpp", + "unordered_set": "cpp", + "vector": "cpp", + "exception": "cpp", + "algorithm": "cpp", + "functional": "cpp", + "iterator": "cpp", + "memory": "cpp", + "memory_resource": "cpp", + "numeric": "cpp", + "optional": "cpp", + "random": "cpp", + "ratio": "cpp", + "source_location": "cpp", + "string_view": "cpp", + "system_error": "cpp", + "tuple": "cpp", + "type_traits": "cpp", + "utility": "cpp", + "format": "cpp", + "fstream": "cpp", + "future": "cpp", + "initializer_list": "cpp", + "iomanip": "cpp", + "iosfwd": "cpp", + "iostream": "cpp", + "istream": "cpp", + "limits": "cpp", + "mutex": "cpp", + "new": "cpp", + "numbers": "cpp", + "ostream": "cpp", + "ranges": "cpp", + "semaphore": "cpp", + "shared_mutex": "cpp", + "span": "cpp", + "sstream": "cpp", + "stdexcept": "cpp", + "stdfloat": "cpp", + "stop_token": "cpp", + "streambuf": "cpp", + "text_encoding": "cpp", + "thread": "cpp", + "cinttypes": "cpp", + "typeindex": "cpp", + "typeinfo": "cpp", + "valarray": "cpp", + "variant": "cpp", + "regex": "cpp", + "*.ipp": "cpp" + } + } +} \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 792b7e6..3db816a 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -20,7 +20,13 @@ template void define_cluster_vector(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterVector_{}", typestr); py::class_>(m, class_name.c_str(), py::buffer_protocol()) - .def(py::init()) + .def(py::init(), + py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3) + .def("push_back", + [](ClusterVector &self, int x, int y, py::array_t data) { + // auto view = make_view_2d(data); + self.push_back(x, y, reinterpret_cast(data.data())); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", @@ -38,6 +44,8 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum_2x2()); return return_vector(vec); }) + .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", &ClusterVector::cluster_size_y) .def_property_readonly("capacity", &ClusterVector::capacity) .def_property("frame_number", &ClusterVector::frame_number, &ClusterVector::set_frame_number) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 8a431b5..f587443 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,6 +31,11 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) + .def("read_clusters", + [](ClusterFile &self, size_t n_clusters, ROI roi) { + auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); + return v; + },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); diff --git a/python/src/file.hpp b/python/src/file.hpp index c3c800c..0d64e16 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -195,6 +195,8 @@ void define_file_io_bindings(py::module &m) { py::class_(m, "ROI") .def(py::init<>()) + .def(py::init(), py::arg("xmin"), + py::arg("xmax"), py::arg("ymin"), py::arg("ymax")) .def_readwrite("xmin", &ROI::xmin) .def_readwrite("xmax", &ROI::xmax) .def_readwrite("ymin", &ROI::ymin) diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp new file mode 100644 index 0000000..02742e1 --- /dev/null +++ b/python/src/interpolation.hpp @@ -0,0 +1,58 @@ +#include "aare/Interpolator.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "np_helper.hpp" +#include +#include +#include +#include + +namespace py = pybind11; +void define_interpolation_bindings(py::module &m) { + + PYBIND11_NUMPY_DTYPE(aare::Photon, x,y,energy); + + py::class_(m, "Interpolator") + .def(py::init([](py::array_t etacube, py::array_t xbins, + py::array_t ybins, py::array_t ebins) { + return Interpolator(make_view_3d(etacube), make_view_1d(xbins), + make_view_1d(ybins), make_view_1d(ebins)); + })) + .def("get_ietax", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }) + .def("interpolate", [](Interpolator& self, const ClusterVector& clusters){ + auto photons = self.interpolate(clusters); + auto* ptr = new std::vector{photons}; + return return_vector(ptr); + }); + + // TODO! Evaluate without converting to double + m.def( + "hej", + []() { + // auto boost_histogram = py::module_::import("boost_histogram"); + // py::object axis = + // boost_histogram.attr("axis").attr("Regular")(10, 0.0, 10.0); + // py::object histogram = boost_histogram.attr("Histogram")(axis); + // return histogram; + // return h; + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )"); +} \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 70d143f..43f48ba 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -9,6 +9,7 @@ #include "cluster.hpp" #include "cluster_file.hpp" #include "fit.hpp" +#include "interpolation.hpp" //Pybind stuff #include @@ -31,5 +32,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_collector_bindings(m); define_cluster_file_sink_bindings(m); define_fit_bindings(m); + define_interpolation_bindings(m); } \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2928d26..37b5e89 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -108,6 +108,79 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } +ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters(3,3); + clusters.reserve(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + // auto buf = reinterpret_cast(clusters.data()); + // auto buf = clusters.data(); + + Cluster3x3 tmp; //this would break if the cluster size changes + + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to read we + // read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + //Read one cluster, in the ROI push back + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number of clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + return clusters; +} + ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); @@ -268,11 +341,23 @@ ClusterVector ClusterFile::read_frame() { NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters NDArray eta2({static_cast(clusters.size()), 2}); - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); } + return eta2; } @@ -290,7 +375,7 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - + eta.sum = tot2[c]; switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) @@ -333,6 +418,19 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { return eta; } + +Eta2 calculate_eta2(Cluster2x2 &cl) { + Eta2 eta{}; + + eta.x = static_cast(cl.data[0]) / (cl.data[0] + cl.data[1]); + eta.y = static_cast(cl.data[0]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; + eta.c = cBottomLeft; //TODO! This is not correct + return eta; +} + + + int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y) { diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp new file mode 100644 index 0000000..a4ecb94 --- /dev/null +++ b/src/Interpolator.cpp @@ -0,0 +1,159 @@ +#include "aare/Interpolator.hpp" + +namespace aare { + +Interpolator::Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins) + : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) { + if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || + etacube.shape(2) != ebins.size()) { + throw std::invalid_argument( + "The shape of the etacube does not match the shape of the bins"); + } + + // Cumulative sum in the x direction, can maybe be combined with a copy? + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t i = 1; i < m_ietax.shape(0); i++) { + m_ietax(i, j, k) += m_ietax(i - 1, j, k); + } + } + } + + // Normalize by the highest row, if norm less than 1 don't do anything + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + auto val = m_ietax(m_ietax.shape(0) - 1, j, k); + double norm = val < 1 ? 1 : val; + for (ssize_t i = 0; i < m_ietax.shape(0); i++) { + m_ietax(i, j, k) /= norm; + } + } + } + + // Cumulative sum in the y direction + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 1; j < m_ietay.shape(1); j++) { + m_ietay(i, j, k) += m_ietay(i, j - 1, k); + } + } + } + + // Normalize by the highest column, if norm less than 1 don't do anything + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + auto val = m_ietay(i, m_ietay.shape(1) - 1, k); + double norm = val < 1 ? 1 : val; + for (ssize_t j = 0; j < m_ietay.shape(1); j++) { + m_ietay(i, j, k) /= norm; + } + } + } + +} + +std::vector Interpolator::interpolate(const ClusterVector& clusters) { + std::vector photons; + photons.reserve(clusters.size()); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + //Now do some actual interpolation. + //Find which energy bin the cluster is in + //TODO! Could we use boost-histogram Axis.index here? + ssize_t idx = std::lower_bound(m_energy_bins.begin(), m_energy_bins.end(), photon.energy)-m_energy_bins.begin(); + auto ix = std::lower_bound(m_etabinsx.begin(), m_etabinsx.end(), eta.x)- m_etabinsx.begin(); + auto iy = std::lower_bound(m_etabinsy.begin(), m_etabinsy.end(), eta.y)- m_etabinsy.begin(); + + + // ibx=(np.abs(etabinsx - ex)).argmin() #Find out which bin the eta should land in + // iby=(np.abs(etabinsy - ey)).argmin() + double dX, dY; + int ex, ey; + // cBottomLeft = 0, + // cBottomRight = 1, + // cTopLeft = 2, + // cTopRight = 3 + switch (eta.c) { + case cTopLeft: + dX = -1.; + dY = 0; + break; + case cTopRight:; + dX = 0; + dY = 0; + break; + case cBottomLeft: + dX = -1.; + dY = -1.; + break; + case cBottomRight: + dX = 0; + dY = -1.; + break; + } + photon.x += m_ietax(ix, iy, idx) + dX + 0.5; + photon.y += m_ietay(ix, iy, idx) + dY + 0.5; + + + // fmt::print("x: {}, y: {}, energy: {}\n", photon.x, photon.y, photon.energy); + photons.push_back(photon); + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + //TODO! Implement 2x2 interpolation + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + //Now do some actual interpolation. + //Find which energy bin the cluster is in + //TODO! Could we use boost-histogram Axis.index here? + ssize_t idx = std::lower_bound(m_energy_bins.begin(), m_energy_bins.end(), photon.energy)-m_energy_bins.begin(); + // auto ix = std::lower_bound(m_etabinsx.begin(), m_etabinsx.end(), eta.x)- m_etabinsx.begin(); + // auto iy = std::lower_bound(m_etabinsy.begin(), m_etabinsy.end(), eta.y)- m_etabinsy.begin(); + // if(ix<0) ix=0; + // if(iy<0) iy=0; + + auto find_index = [](NDArray& etabins, double val){ + auto iter = std::min_element(etabins.begin(), etabins.end(), + [val,etabins](double a, double b) { + return std::abs(a - val) < std::abs(b - val); + }); + return std::distance(etabins.begin(), iter); + }; + auto ix = find_index(m_etabinsx, eta.x)-1; + auto iy = find_index(m_etabinsy, eta.y)-1; + + photon.x += (1-m_ietax(ix, iy, idx))*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 + photon.y += (1-m_ietay(ix, iy, idx))*2; + + // photon.x = ix; + // photon.y = iy; + photons.push_back(photon); + } + + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation"); + } + + + return photons; +} + +} // namespace aare \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 942481c..eff3e2c 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -2,6 +2,7 @@ #include #include #include +#include using aare::NDArray; using aare::NDView; @@ -34,6 +35,24 @@ TEST_CASE("Construct from an NDView") { } } +TEST_CASE("3D NDArray from NDView"){ + std::vector data(27); + std::iota(data.begin(), data.end(), 0); + NDView view(data.data(), Shape<3>{3, 3, 3}); + NDArray image(view); + REQUIRE(image.shape() == view.shape()); + REQUIRE(image.size() == view.size()); + REQUIRE(image.data() != view.data()); + + for(int64_t i=0; i shape{{20}}; NDArray img(shape, 3); From 3a987319d4f2233942e994f61f13f9b28c2773fa Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 5 Mar 2025 21:51:23 +0100 Subject: [PATCH 041/120] WIP --- src/ClusterFile.cpp | 6 +++--- src/Interpolator.cpp | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 37b5e89..be3f607 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -422,10 +422,10 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { Eta2 calculate_eta2(Cluster2x2 &cl) { Eta2 eta{}; - eta.x = static_cast(cl.data[0]) / (cl.data[0] + cl.data[1]); - eta.y = static_cast(cl.data[0]) / (cl.data[0] + cl.data[2]); + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; - eta.c = cBottomLeft; //TODO! This is not correct + eta.c = cBottomLeft; //TODO! This is not correct, but need to put something return eta; } diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index a4ecb94..0e72849 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -139,12 +139,14 @@ std::vector Interpolator::interpolate(const ClusterVector& clus }; auto ix = find_index(m_etabinsx, eta.x)-1; auto iy = find_index(m_etabinsy, eta.y)-1; + if(ix<0) ix=0; + if(iy<0) iy=0; - photon.x += (1-m_ietax(ix, iy, idx))*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 - photon.y += (1-m_ietay(ix, iy, idx))*2; + photon.x += m_ietax(ix, iy, 0)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 + photon.y += m_ietay(ix, iy, 0)*2; // photon.x = ix; - // photon.y = iy; + // photon.y = idx; photons.push_back(photon); } From 332bdeb02ba5ac03fa37fe30b114fb7db1556ba4 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 14 Mar 2025 11:07:09 +0100 Subject: [PATCH 042/120] modified algo --- CMakeLists.txt | 1 + include/aare/NDArray.hpp | 3 ++ include/aare/algorithm.hpp | 55 ++++++++++++++++++++++++ python/examples/play.py | 87 +++++++++++++++++++++++++++----------- src/Interpolator.cpp | 60 ++++++++++---------------- src/algorithm.test.cpp | 63 +++++++++++++++++++++++++++ 6 files changed, 206 insertions(+), 63 deletions(-) create mode 100644 include/aare/algorithm.hpp create mode 100644 src/algorithm.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index bff2afe..4772f0b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -386,6 +386,7 @@ endif() if(AARE_TESTS) set(TestSources + ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 310d070..45d3a83 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -102,6 +102,9 @@ class NDArray : public ArrayExpr, Ndim> { auto begin() { return data_; } auto end() { return data_ + size_; } + auto begin() const { return data_; } + auto end() const { return data_ + size_; } + using value_type = T; NDArray &operator=(NDArray &&other) noexcept; // Move assign diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp new file mode 100644 index 0000000..5d6dc57 --- /dev/null +++ b/include/aare/algorithm.hpp @@ -0,0 +1,55 @@ + +#pragma once +#include +#include +#include +#include + +namespace aare { +/** + * @brief Find the index of the last element smaller than val + * assume a sorted array + */ +template +size_t last_smaller(const T* first, const T* last, T val) { + for (auto iter = first+1; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter-1); + } + } + return std::distance(first, last-1); +} + +template +size_t last_smaller(const NDArray& arr, T val) { + return last_smaller(arr.begin(), arr.end(), val); +} + + +template +size_t nearest_index(const T* first, const T* last, T val) { + auto iter = std::min_element(first, last, + [val](T a, T b) { + return std::abs(a - val) < std::abs(b - val); + }); + return std::distance(first, iter); +} + +template +size_t nearest_index(const NDArray& arr, T val) { + return nearest_index(arr.begin(), arr.end(), val); +} + +template +size_t nearest_index(const std::vector& vec, T val) { + return nearest_index(vec.data(), vec.data()+vec.size(), val); +} + +template +size_t nearest_index(const std::array& arr, T val) { + return nearest_index(arr.data(), arr.data()+arr.size(), val); +} + + + +} // namespace aare \ No newline at end of file diff --git a/python/examples/play.py b/python/examples/play.py index 05f3c82..b2c368b 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,40 +1,77 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') +from aare._aare import ClusterVector_i, Interpolator -#Our normal python imports -from pathlib import Path -import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1 import make_axes_locatable +import pickle import numpy as np +import matplotlib.pyplot as plt import boost_histogram as bh +import torch +import math import time -import tifffile -#Directly import what we need from aare -from aare import File, ClusterFile, hitmap -from aare._aare import calculate_eta2, ClusterFinderMT, ClusterCollector +def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): + """ + Generate a 2D gaussian as position mx, my, with sigma=sigma. + The gaussian is placed on a 2x2 pixel matrix with resolution + res in one dimesion. + """ + x = torch.linspace(0, pixel_size*grid_size, res) + x,y = torch.meshgrid(x,x, indexing="ij") + return 1 / (2*math.pi*sigma**2) * \ + torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) + +scale = 1000 #Scale factor when converting to integer +pixel_size = 25 #um +grid = 2 +resolution = 100 +sigma_um = 10 +xa = np.linspace(0,grid*pixel_size,resolution) +ticks = [0, 25, 50] + +hit = np.array((20,20)) +etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" + +local_resolution = 99 +grid_size = 3 +xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +pixels = pixels.numpy() +pixels = (pixels*scale).astype(np.int32) +v = ClusterVector_i(3,3) +v.push_back(1,1, pixels) + +with open(etahist_fname, "rb") as f: + hist = pickle.load(f) +eta = hist.view().copy() +etabinsx = np.array(hist.axes.edges.T[0].flat) +etabinsy = np.array(hist.axes.edges.T[1].flat) +ebins = np.array(hist.axes.edges.T[2].flat) +p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) -base = Path('/mnt/sls_det_storage/moench_data/tomcat_nanoscope_21042020/09_Moench_650um/') -# for f in base.glob('*'): -# print(f.name) +#Generate the hit -cluster_fname = base/'acq_interp_center_3.8Mfr_200V.clust' -flatfield_fname = base/'flatfield_center_200_d0_f000000000000_0.clust' -cluster_fname.stat().st_size/1e6/4 -image = np.zeros((400,400)) -with ClusterFile(cluster_fname, chunk_size = 1000000) as f: - for clusters in f: - test = hitmap(image.shape, clusters) - break - # image += hitmap(image.shape, clusters) - # break -print('We are back in python') -# fig, ax = plt.subplots(figsize = (7,7)) -# im = ax.imshow(image) -# im.set_clim(0,1) \ No newline at end of file + +tmp = p.interpolate(v) +print(f'tmp:{tmp}') +pos = np.array((tmp['x'], tmp['y']))*25 + + +print(pixels) +fig, ax = plt.subplots(figsize = (7,7)) +ax.pcolormesh(xaxis, xaxis, t) +ax.plot(*pos, 'o') +ax.set_xticks([0,25,50,75]) +ax.set_yticks([0,25,50,75]) +ax.set_xlim(0,75) +ax.set_ylim(0,75) +ax.grid() +print(f'{hit=}') +print(f'{pos=}') \ No newline at end of file diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 0e72849..85e0b5d 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -1,4 +1,5 @@ #include "aare/Interpolator.hpp" +#include "aare/algorithm.hpp" namespace aare { @@ -68,16 +69,17 @@ std::vector Interpolator::interpolate(const ClusterVector& clus photon.y = cluster.y; photon.energy = eta.sum; - //Now do some actual interpolation. - //Find which energy bin the cluster is in - //TODO! Could we use boost-histogram Axis.index here? - ssize_t idx = std::lower_bound(m_energy_bins.begin(), m_energy_bins.end(), photon.energy)-m_energy_bins.begin(); - auto ix = std::lower_bound(m_etabinsx.begin(), m_etabinsx.end(), eta.x)- m_etabinsx.begin(); - auto iy = std::lower_bound(m_etabinsy.begin(), m_etabinsy.end(), eta.y)- m_etabinsy.begin(); - + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); - // ibx=(np.abs(etabinsx - ex)).argmin() #Find out which bin the eta should land in - // iby=(np.abs(etabinsy - ey)).argmin() double dX, dY; int ex, ey; // cBottomLeft = 0, @@ -98,21 +100,16 @@ std::vector Interpolator::interpolate(const ClusterVector& clus dY = -1.; break; case cBottomRight: - dX = 0; + dX = 0.; dY = -1.; break; } - photon.x += m_ietax(ix, iy, idx) + dX + 0.5; - photon.y += m_ietay(ix, iy, idx) + dY + 0.5; - - - // fmt::print("x: {}, y: {}, energy: {}\n", photon.x, photon.y, photon.energy); + photon.x += m_ietax(ix, iy, 0)*2 + dX; + photon.y += m_ietay(ix, iy, 0)*2 + dY; photons.push_back(photon); } }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ - //TODO! Implement 2x2 interpolation for (size_t i = 0; i(i); Eta2 eta= calculate_eta2(cluster); @@ -123,30 +120,17 @@ std::vector Interpolator::interpolate(const ClusterVector& clus //Now do some actual interpolation. //Find which energy bin the cluster is in - //TODO! Could we use boost-histogram Axis.index here? - ssize_t idx = std::lower_bound(m_energy_bins.begin(), m_energy_bins.end(), photon.energy)-m_energy_bins.begin(); - // auto ix = std::lower_bound(m_etabinsx.begin(), m_etabinsx.end(), eta.x)- m_etabinsx.begin(); - // auto iy = std::lower_bound(m_etabinsy.begin(), m_etabinsy.end(), eta.y)- m_etabinsy.begin(); - // if(ix<0) ix=0; - // if(iy<0) iy=0; - - auto find_index = [](NDArray& etabins, double val){ - auto iter = std::min_element(etabins.begin(), etabins.end(), - [val,etabins](double a, double b) { - return std::abs(a - val) < std::abs(b - val); - }); - return std::distance(etabins.begin(), iter); - }; - auto ix = find_index(m_etabinsx, eta.x)-1; - auto iy = find_index(m_etabinsy, eta.y)-1; - if(ix<0) ix=0; - if(iy<0) iy=0; + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); photon.x += m_ietax(ix, iy, 0)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 photon.y += m_ietay(ix, iy, 0)*2; - - // photon.x = ix; - // photon.y = idx; photons.push_back(photon); } diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp new file mode 100644 index 0000000..9e75eb9 --- /dev/null +++ b/src/algorithm.test.cpp @@ -0,0 +1,63 @@ + + +#include +#include + + +TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2.3) == 2); + REQUIRE(aare::nearest_index(arr, 2.6) == 3); + REQUIRE(aare::nearest_index(arr, 45.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -1.0) == 0); +} + +TEST_CASE("Passing integers to nearest_index works"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2) == 2); + REQUIRE(aare::nearest_index(arr, 3) == 3); + REQUIRE(aare::nearest_index(arr, 45) == 4); + REQUIRE(aare::nearest_index(arr, 0) == 0); + REQUIRE(aare::nearest_index(arr, -1) == 0); +} + + +TEST_CASE("nearest_index works with std::vector"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(vec, 2.123) == 2); + REQUIRE(aare::nearest_index(vec, 2.66) == 3); + REQUIRE(aare::nearest_index(vec, 4555555.0) == 4); + REQUIRE(aare::nearest_index(vec, 0.0) == 0); + REQUIRE(aare::nearest_index(vec, -10.0) == 0); +} + +TEST_CASE("nearest index works with std::array"){ + std::array arr = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(arr, 2.123) == 2); + REQUIRE(aare::nearest_index(arr, 2.501) == 3); + REQUIRE(aare::nearest_index(arr, 4555555.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -10.0) == 0); +} + + +TEST_CASE("last smaller"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -10.0) == 0); + REQUIRE(aare::last_smaller(arr, 0.0) == 0); + REQUIRE(aare::last_smaller(arr, 2.3) == 2); + REQUIRE(aare::last_smaller(arr, 253.) == 4); +} \ No newline at end of file From 1ad362ccfc1679a3c42328deab4804162095b3d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Mon, 17 Mar 2025 15:21:59 +0100 Subject: [PATCH 043/120] added action for gitea (#136) --- .gitea/workflows/cmake_build.yml | 58 ++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .gitea/workflows/cmake_build.yml diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml new file mode 100644 index 0000000..43a0181 --- /dev/null +++ b/.gitea/workflows/cmake_build.yml @@ -0,0 +1,58 @@ +name: Build the package using cmake then documentation + +on: + workflow_dispatch: + push: + + + +permissions: + contents: read + pages: write + id-token: write + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] # macos-12, windows-2019] + python-version: ["3.12",] + + runs-on: ${{ matrix.platform }} + + # The setup-miniconda action needs this to activate miniconda + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Setup dev env + run: | + sudo apt-get update + sudo apt-get -y install cmake gcc g++ + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3.0.4 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + + - name: Prepare + run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + + - name: Build library + run: | + mkdir build + cd build + cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON + make -j 2 + make docs + + + + + + From e59a361b513282d3091d8f2552f64ca9fa876050 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Mon, 17 Mar 2025 15:23:55 +0100 Subject: [PATCH 044/120] removed workspace --- python/src/aare.code-workspace | 98 ---------------------------------- 1 file changed, 98 deletions(-) delete mode 100644 python/src/aare.code-workspace diff --git a/python/src/aare.code-workspace b/python/src/aare.code-workspace deleted file mode 100644 index 01045a6..0000000 --- a/python/src/aare.code-workspace +++ /dev/null @@ -1,98 +0,0 @@ -{ - "folders": [ - { - "path": "../../.." - }, - { - "path": "../../../../slsDetectorPackage" - } - ], - "settings": { - "files.associations": { - "compare": "cpp", - "cstdint": "cpp", - "cctype": "cpp", - "clocale": "cpp", - "cmath": "cpp", - "csignal": "cpp", - "cstdarg": "cpp", - "cstddef": "cpp", - "cstdio": "cpp", - "cstdlib": "cpp", - "cstring": "cpp", - "ctime": "cpp", - "cwchar": "cpp", - "cwctype": "cpp", - "any": "cpp", - "array": "cpp", - "atomic": "cpp", - "strstream": "cpp", - "bit": "cpp", - "*.tcc": "cpp", - "bitset": "cpp", - "cfenv": "cpp", - "charconv": "cpp", - "chrono": "cpp", - "codecvt": "cpp", - "complex": "cpp", - "concepts": "cpp", - "condition_variable": "cpp", - "deque": "cpp", - "forward_list": "cpp", - "list": "cpp", - "map": "cpp", - "set": "cpp", - "string": "cpp", - "unordered_map": "cpp", - "unordered_set": "cpp", - "vector": "cpp", - "exception": "cpp", - "algorithm": "cpp", - "functional": "cpp", - "iterator": "cpp", - "memory": "cpp", - "memory_resource": "cpp", - "numeric": "cpp", - "optional": "cpp", - "random": "cpp", - "ratio": "cpp", - "source_location": "cpp", - "string_view": "cpp", - "system_error": "cpp", - "tuple": "cpp", - "type_traits": "cpp", - "utility": "cpp", - "format": "cpp", - "fstream": "cpp", - "future": "cpp", - "initializer_list": "cpp", - "iomanip": "cpp", - "iosfwd": "cpp", - "iostream": "cpp", - "istream": "cpp", - "limits": "cpp", - "mutex": "cpp", - "new": "cpp", - "numbers": "cpp", - "ostream": "cpp", - "ranges": "cpp", - "semaphore": "cpp", - "shared_mutex": "cpp", - "span": "cpp", - "sstream": "cpp", - "stdexcept": "cpp", - "stdfloat": "cpp", - "stop_token": "cpp", - "streambuf": "cpp", - "text_encoding": "cpp", - "thread": "cpp", - "cinttypes": "cpp", - "typeindex": "cpp", - "typeinfo": "cpp", - "valarray": "cpp", - "variant": "cpp", - "regex": "cpp", - "*.ipp": "cpp" - } - } -} \ No newline at end of file From 11cd2ec654c7aaf3ff3c110ab815625e5594adda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 18 Mar 2025 17:45:38 +0100 Subject: [PATCH 045/120] Interpolate (#137) - added eta based interpolation --- CMakeLists.txt | 2 + include/aare/ClusterFile.hpp | 10 +++ include/aare/ClusterVector.hpp | 4 + include/aare/Interpolator.hpp | 29 +++++++ include/aare/NDArray.hpp | 3 + include/aare/algorithm.hpp | 55 +++++++++++++ python/aare/__init__.py | 3 +- python/examples/play.py | 91 +++++++++++++-------- python/src/cluster.hpp | 10 ++- python/src/cluster_file.hpp | 5 ++ python/src/file.hpp | 2 + python/src/interpolation.hpp | 58 +++++++++++++ python/src/module.cpp | 2 + src/ClusterFile.cpp | 109 +++++++++++++++++++++++-- src/Interpolator.cpp | 144 +++++++++++++++++++++++++++++++++ src/NDArray.test.cpp | 19 +++++ src/algorithm.test.cpp | 73 +++++++++++++++++ 17 files changed, 580 insertions(+), 39 deletions(-) create mode 100644 include/aare/Interpolator.hpp create mode 100644 include/aare/algorithm.hpp create mode 100644 python/src/interpolation.hpp create mode 100644 src/Interpolator.cpp create mode 100644 src/algorithm.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index cff4c75..4772f0b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -346,6 +346,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp @@ -385,6 +386,7 @@ endif() if(AARE_TESTS) set(TestSources + ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index b796763..5bea342 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -8,11 +8,17 @@ namespace aare { +//TODO! Template this? struct Cluster3x3 { int16_t x; int16_t y; int32_t data[9]; }; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; typedef enum { cBottomLeft = 0, @@ -37,6 +43,7 @@ struct Eta2 { double x; double y; corner c; + int32_t sum; }; struct ClusterAnalysis { @@ -97,6 +104,8 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); + ClusterVector read_clusters(size_t n_clusters, ROI roi); + /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. @@ -131,5 +140,6 @@ int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); +Eta2 calculate_eta2(Cluster2x2 &cl); } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index febf06c..1c15a22 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -231,6 +231,10 @@ template class ClusterVector { return *reinterpret_cast(element_ptr(i)); } + template const V &at(size_t i) const { + return *reinterpret_cast(element_ptr(i)); + } + const std::string_view fmt_base() const { // TODO! how do we match on coord_t? return m_fmt_base; diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp new file mode 100644 index 0000000..4905bce --- /dev/null +++ b/include/aare/Interpolator.hpp @@ -0,0 +1,29 @@ +#pragma once +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFile.hpp" //Cluster_3x3 +namespace aare{ + +struct Photon{ + double x; + double y; + double energy; +}; + +class Interpolator{ + NDArray m_ietax; + NDArray m_ietay; + + NDArray m_etabinsx; + NDArray m_etabinsy; + NDArray m_energy_bins; + public: + Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins); + NDArray get_ietax(){return m_ietax;} + NDArray get_ietay(){return m_ietay;} + + std::vector interpolate(const ClusterVector& clusters); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 310d070..45d3a83 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -102,6 +102,9 @@ class NDArray : public ArrayExpr, Ndim> { auto begin() { return data_; } auto end() { return data_ + size_; } + auto begin() const { return data_; } + auto end() const { return data_ + size_; } + using value_type = T; NDArray &operator=(NDArray &&other) noexcept; // Move assign diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp new file mode 100644 index 0000000..5d6dc57 --- /dev/null +++ b/include/aare/algorithm.hpp @@ -0,0 +1,55 @@ + +#pragma once +#include +#include +#include +#include + +namespace aare { +/** + * @brief Find the index of the last element smaller than val + * assume a sorted array + */ +template +size_t last_smaller(const T* first, const T* last, T val) { + for (auto iter = first+1; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter-1); + } + } + return std::distance(first, last-1); +} + +template +size_t last_smaller(const NDArray& arr, T val) { + return last_smaller(arr.begin(), arr.end(), val); +} + + +template +size_t nearest_index(const T* first, const T* last, T val) { + auto iter = std::min_element(first, last, + [val](T a, T b) { + return std::abs(a - val) < std::abs(b - val); + }); + return std::distance(first, iter); +} + +template +size_t nearest_index(const NDArray& arr, T val) { + return nearest_index(arr.begin(), arr.end(), val); +} + +template +size_t nearest_index(const std::vector& vec, T val) { + return nearest_index(vec.data(), vec.data()+vec.size(), val); +} + +template +size_t nearest_index(const std::array& arr, T val) { + return nearest_index(arr.data(), arr.data()+arr.size(), val); +} + + + +} // namespace aare \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index f4c19cc..058d7cf 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -7,11 +7,12 @@ from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile from ._aare import hitmap +from ._aare import ROI from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i from ._aare import fit_gaus, fit_pol1 - +from ._aare import Interpolator from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters diff --git a/python/examples/play.py b/python/examples/play.py index 37754df..b2c368b 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,50 +1,77 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') -#Our normal python imports -from pathlib import Path -import matplotlib.pyplot as plt +from aare._aare import ClusterVector_i, Interpolator + +import pickle import numpy as np +import matplotlib.pyplot as plt import boost_histogram as bh +import torch +import math import time -import aare +def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): + """ + Generate a 2D gaussian as position mx, my, with sigma=sigma. + The gaussian is placed on a 2x2 pixel matrix with resolution + res in one dimesion. + """ + x = torch.linspace(0, pixel_size*grid_size, res) + x,y = torch.meshgrid(x,x, indexing="ij") + return 1 / (2*math.pi*sigma**2) * \ + torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) -data = np.random.normal(10, 1, 1000) +scale = 1000 #Scale factor when converting to integer +pixel_size = 25 #um +grid = 2 +resolution = 100 +sigma_um = 10 +xa = np.linspace(0,grid*pixel_size,resolution) +ticks = [0, 25, 50] -hist = bh.Histogram(bh.axis.Regular(10, 0, 20)) -hist.fill(data) +hit = np.array((20,20)) +etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" + +local_resolution = 99 +grid_size = 3 +xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +pixels = pixels.numpy() +pixels = (pixels*scale).astype(np.int32) +v = ClusterVector_i(3,3) +v.push_back(1,1, pixels) + +with open(etahist_fname, "rb") as f: + hist = pickle.load(f) +eta = hist.view().copy() +etabinsx = np.array(hist.axes.edges.T[0].flat) +etabinsy = np.array(hist.axes.edges.T[1].flat) +ebins = np.array(hist.axes.edges.T[2].flat) +p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) -x = hist.axes[0].centers -y = hist.values() -y_err = np.sqrt(y)+1 -res = aare.fit_gaus(x, y, y_err, chi2 = True) + +#Generate the hit - -t_elapsed = time.perf_counter()-t0 -print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') -histogram_data = hist3d.counts() -x = hist3d.axes[2].edges[:-1] -y = histogram_data[100,100,:] -xx = np.linspace(x[0], x[-1]) -# fig, ax = plt.subplots() -# ax.step(x, y, where = 'post') +tmp = p.interpolate(v) +print(f'tmp:{tmp}') +pos = np.array((tmp['x'], tmp['y']))*25 -y_err = np.sqrt(y) -y_err = np.zeros(y.size) -y_err += 1 - -# par = fit_gaus2(y,x, y_err) -# ax.plot(xx, gaus(xx,par)) -# print(par) - -res = fit_gaus(y,x) -res2 = fit_gaus(y,x, y_err) -print(res) -print(res2) +print(pixels) +fig, ax = plt.subplots(figsize = (7,7)) +ax.pcolormesh(xaxis, xaxis, t) +ax.plot(*pos, 'o') +ax.set_xticks([0,25,50,75]) +ax.set_yticks([0,25,50,75]) +ax.set_xlim(0,75) +ax.set_ylim(0,75) +ax.grid() +print(f'{hit=}') +print(f'{pos=}') \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 792b7e6..3db816a 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -20,7 +20,13 @@ template void define_cluster_vector(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterVector_{}", typestr); py::class_>(m, class_name.c_str(), py::buffer_protocol()) - .def(py::init()) + .def(py::init(), + py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3) + .def("push_back", + [](ClusterVector &self, int x, int y, py::array_t data) { + // auto view = make_view_2d(data); + self.push_back(x, y, reinterpret_cast(data.data())); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", @@ -38,6 +44,8 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum_2x2()); return return_vector(vec); }) + .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", &ClusterVector::cluster_size_y) .def_property_readonly("capacity", &ClusterVector::capacity) .def_property("frame_number", &ClusterVector::frame_number, &ClusterVector::set_frame_number) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 8a431b5..f587443 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,6 +31,11 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) + .def("read_clusters", + [](ClusterFile &self, size_t n_clusters, ROI roi) { + auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); + return v; + },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); diff --git a/python/src/file.hpp b/python/src/file.hpp index c3c800c..0d64e16 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -195,6 +195,8 @@ void define_file_io_bindings(py::module &m) { py::class_(m, "ROI") .def(py::init<>()) + .def(py::init(), py::arg("xmin"), + py::arg("xmax"), py::arg("ymin"), py::arg("ymax")) .def_readwrite("xmin", &ROI::xmin) .def_readwrite("xmax", &ROI::xmax) .def_readwrite("ymin", &ROI::ymin) diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp new file mode 100644 index 0000000..02742e1 --- /dev/null +++ b/python/src/interpolation.hpp @@ -0,0 +1,58 @@ +#include "aare/Interpolator.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "np_helper.hpp" +#include +#include +#include +#include + +namespace py = pybind11; +void define_interpolation_bindings(py::module &m) { + + PYBIND11_NUMPY_DTYPE(aare::Photon, x,y,energy); + + py::class_(m, "Interpolator") + .def(py::init([](py::array_t etacube, py::array_t xbins, + py::array_t ybins, py::array_t ebins) { + return Interpolator(make_view_3d(etacube), make_view_1d(xbins), + make_view_1d(ybins), make_view_1d(ebins)); + })) + .def("get_ietax", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }) + .def("interpolate", [](Interpolator& self, const ClusterVector& clusters){ + auto photons = self.interpolate(clusters); + auto* ptr = new std::vector{photons}; + return return_vector(ptr); + }); + + // TODO! Evaluate without converting to double + m.def( + "hej", + []() { + // auto boost_histogram = py::module_::import("boost_histogram"); + // py::object axis = + // boost_histogram.attr("axis").attr("Regular")(10, 0.0, 10.0); + // py::object histogram = boost_histogram.attr("Histogram")(axis); + // return histogram; + // return h; + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )"); +} \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 70d143f..43f48ba 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -9,6 +9,7 @@ #include "cluster.hpp" #include "cluster_file.hpp" #include "fit.hpp" +#include "interpolation.hpp" //Pybind stuff #include @@ -31,5 +32,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_collector_bindings(m); define_cluster_file_sink_bindings(m); define_fit_bindings(m); + define_interpolation_bindings(m); } \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2928d26..2e23e09 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -108,6 +108,79 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } +ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters(3,3); + clusters.reserve(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + // auto buf = reinterpret_cast(clusters.data()); + // auto buf = clusters.data(); + + Cluster3x3 tmp; //this would break if the cluster size changes + + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to read we + // read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + //Read one cluster, in the ROI push back + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number of clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + return clusters; +} + ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); @@ -268,11 +341,23 @@ ClusterVector ClusterFile::read_frame() { NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters NDArray eta2({static_cast(clusters.size()), 2}); - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); } + return eta2; } @@ -290,7 +375,7 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - + eta.sum = tot2[c]; switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) @@ -333,6 +418,20 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { return eta; } + +Eta2 calculate_eta2(Cluster2x2 &cl) { + Eta2 eta{}; + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; + eta.c = cBottomLeft; //TODO! This is not correct, but need to put something + return eta; +} + + + int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y) { diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp new file mode 100644 index 0000000..7f82533 --- /dev/null +++ b/src/Interpolator.cpp @@ -0,0 +1,144 @@ +#include "aare/Interpolator.hpp" +#include "aare/algorithm.hpp" + +namespace aare { + +Interpolator::Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins) + : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) { + if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || + etacube.shape(2) != ebins.size()) { + throw std::invalid_argument( + "The shape of the etacube does not match the shape of the bins"); + } + + // Cumulative sum in the x direction + for (ssize_t i = 1; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + m_ietax(i, j, k) += m_ietax(i - 1, j, k); + } + } + } + + // Normalize by the highest row, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + auto val = m_ietax(m_ietax.shape(0) - 1, j, k); + double norm = val < 1 ? 1 : val; + m_ietax(i, j, k) /= norm; + } + } + } + + // Cumulative sum in the y direction + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 1; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + m_ietay(i, j, k) += m_ietay(i, j - 1, k); + } + } + } + + // Normalize by the highest column, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 0; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + auto val = m_ietay(i, m_ietay.shape(1) - 1, k); + double norm = val < 1 ? 1 : val; + m_ietay(i, j, k) /= norm; + } + } + } +} + +std::vector Interpolator::interpolate(const ClusterVector& clusters) { + std::vector photons; + photons.reserve(clusters.size()); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); + + double dX, dY; + int ex, ey; + // cBottomLeft = 0, + // cBottomRight = 1, + // cTopLeft = 2, + // cTopRight = 3 + switch (eta.c) { + case cTopLeft: + dX = -1.; + dY = 0.; + break; + case cTopRight:; + dX = 0.; + dY = 0.; + break; + case cBottomLeft: + dX = -1.; + dY = -1.; + break; + case cBottomRight: + dX = 0.; + dY = -1.; + break; + } + photon.x += m_ietax(ix, iy, ie)*2 + dX; + photon.y += m_ietay(ix, iy, ie)*2 + dY; + photons.push_back(photon); + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + //Now do some actual interpolation. + //Find which energy bin the cluster is in + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + photon.x += m_ietax(ix, iy, ie)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 + photon.y += m_ietay(ix, iy, ie)*2; + photons.push_back(photon); + } + + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation"); + } + + + return photons; +} + +} // namespace aare \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 942481c..eff3e2c 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -2,6 +2,7 @@ #include #include #include +#include using aare::NDArray; using aare::NDView; @@ -34,6 +35,24 @@ TEST_CASE("Construct from an NDView") { } } +TEST_CASE("3D NDArray from NDView"){ + std::vector data(27); + std::iota(data.begin(), data.end(), 0); + NDView view(data.data(), Shape<3>{3, 3, 3}); + NDArray image(view); + REQUIRE(image.shape() == view.shape()); + REQUIRE(image.size() == view.size()); + REQUIRE(image.data() != view.data()); + + for(int64_t i=0; i shape{{20}}; NDArray img(shape, 3); diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp new file mode 100644 index 0000000..fcfa8d2 --- /dev/null +++ b/src/algorithm.test.cpp @@ -0,0 +1,73 @@ + + +#include +#include + + +TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2.3) == 2); + REQUIRE(aare::nearest_index(arr, 2.6) == 3); + REQUIRE(aare::nearest_index(arr, 45.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -1.0) == 0); +} + +TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2) == 2); + REQUIRE(aare::nearest_index(arr, 3) == 3); + REQUIRE(aare::nearest_index(arr, 45) == 4); + REQUIRE(aare::nearest_index(arr, 0) == 0); + REQUIRE(aare::nearest_index(arr, -1) == 0); +} + + +TEST_CASE("nearest_index works with std::vector", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(vec, 2.123) == 2); + REQUIRE(aare::nearest_index(vec, 2.66) == 3); + REQUIRE(aare::nearest_index(vec, 4555555.0) == 4); + REQUIRE(aare::nearest_index(vec, 0.0) == 0); + REQUIRE(aare::nearest_index(vec, -10.0) == 0); +} + +TEST_CASE("nearest index works with std::array", "[algorithm]"){ + std::array arr = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(arr, 2.123) == 2); + REQUIRE(aare::nearest_index(arr, 2.501) == 3); + REQUIRE(aare::nearest_index(arr, 4555555.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -10.0) == 0); +} + + +TEST_CASE("last smaller", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -10.0) == 0); + REQUIRE(aare::last_smaller(arr, 0.0) == 0); + REQUIRE(aare::last_smaller(arr, 2.3) == 2); + REQUIRE(aare::last_smaller(arr, 253.) == 4); +} + +TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 2.0) == 2); + +} \ No newline at end of file From 602b04e49fd61beea1a9a3c4f0942b4632b64b64 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 18 Mar 2025 17:47:05 +0100 Subject: [PATCH 046/120] bumped version number --- conda-recipe/meta.yaml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index ffa95a7..93c1219 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.2.18 #TODO! how to not duplicate this? + version: 2025.3.18 #TODO! how to not duplicate this? diff --git a/pyproject.toml b/pyproject.toml index 6dc941e..8b0b789 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.2.18" +version = "2025.3.18" [tool.scikit-build] From 5d8ad27b21af0d6b22390523263b2d48c442eb9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 20 Mar 2025 12:52:04 +0100 Subject: [PATCH 047/120] Developer (#138) - Fully functioning variable size cluster finder - Added interpolation - Bit reordering for ADC SAR 05 --------- Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil Co-authored-by: xiangyu.xie --- .clang-tidy | 42 +++++++++ .gitea/workflows/cmake_build.yml | 58 ++++++++++++ CMakeLists.txt | 5 ++ conda-recipe/meta.yaml | 3 +- include/aare/ClusterFile.hpp | 10 +++ include/aare/ClusterVector.hpp | 4 + include/aare/Interpolator.hpp | 29 ++++++ include/aare/NDArray.hpp | 15 ++-- include/aare/RawSubFile.hpp | 2 +- include/aare/VarClusterFinder.hpp | 2 +- include/aare/algorithm.hpp | 55 ++++++++++++ include/aare/defs.hpp | 2 + pyproject.toml | 3 +- python/aare/__init__.py | 3 +- python/examples/play.py | 89 +++++++++++------- python/src/cluster.hpp | 10 ++- python/src/cluster_file.hpp | 5 ++ python/src/ctb_raw_file.hpp | 4 +- python/src/file.hpp | 2 + python/src/interpolation.hpp | 58 ++++++++++++ python/src/module.cpp | 2 + python/src/np_helper.hpp | 12 +-- python/src/var_cluster.hpp | 37 +++++++- src/ClusterFile.cpp | 109 ++++++++++++++++++++-- src/Dtype.cpp | 2 +- src/File.cpp | 2 +- src/Interpolator.cpp | 144 ++++++++++++++++++++++++++++++ src/NDArray.test.cpp | 19 ++++ src/RawFile.cpp | 3 +- src/algorithm.test.cpp | 73 +++++++++++++++ 30 files changed, 743 insertions(+), 61 deletions(-) create mode 100644 .clang-tidy create mode 100644 .gitea/workflows/cmake_build.yml create mode 100644 include/aare/Interpolator.hpp create mode 100644 include/aare/algorithm.hpp create mode 100644 python/src/interpolation.hpp create mode 100644 src/Interpolator.cpp create mode 100644 src/algorithm.test.cpp diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000..a2ab6c1 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,42 @@ + +--- +Checks: '*, + -altera-*, + -android-cloexec-fopen, + -cppcoreguidelines-pro-bounds-array-to-pointer-decay, + -cppcoreguidelines-pro-bounds-pointer-arithmetic, + -fuchsia*, + -readability-else-after-return, + -readability-avoid-const-params-in-decls, + -readability-identifier-length, + -cppcoreguidelines-pro-bounds-constant-array-index, + -cppcoreguidelines-pro-type-reinterpret-cast, + -llvm-header-guard, + -modernize-use-nodiscard, + -misc-non-private-member-variables-in-classes, + -readability-static-accessed-through-instance, + -readability-braces-around-statements, + -readability-isolate-declaration, + -readability-implicit-bool-conversion, + -readability-identifier-length, + -readability-identifier-naming, + -hicpp-signed-bitwise, + -hicpp-no-array-decay, + -hicpp-braces-around-statements, + -google-runtime-references, + -google-readability-todo, + -google-readability-braces-around-statements, + -modernize-use-trailing-return-type, + -llvmlibc-*' + +HeaderFilterRegex: \.hpp +FormatStyle: none +CheckOptions: + - { key: readability-identifier-naming.NamespaceCase, value: lower_case } + # - { key: readability-identifier-naming.FunctionCase, value: lower_case } + - { key: readability-identifier-naming.ClassCase, value: CamelCase } + # - { key: readability-identifier-naming.MethodCase, value: CamelCase } + # - { key: readability-identifier-naming.StructCase, value: CamelCase } + # - { key: readability-identifier-naming.VariableCase, value: lower_case } + - { key: readability-identifier-naming.GlobalConstantCase, value: UPPER_CASE } +... diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml new file mode 100644 index 0000000..43a0181 --- /dev/null +++ b/.gitea/workflows/cmake_build.yml @@ -0,0 +1,58 @@ +name: Build the package using cmake then documentation + +on: + workflow_dispatch: + push: + + + +permissions: + contents: read + pages: write + id-token: write + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] # macos-12, windows-2019] + python-version: ["3.12",] + + runs-on: ${{ matrix.platform }} + + # The setup-miniconda action needs this to activate miniconda + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Setup dev env + run: | + sudo apt-get update + sudo apt-get -y install cmake gcc g++ + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3.0.4 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + + - name: Prepare + run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + + - name: Build library + run: | + mkdir build + cd build + cmake .. -DAARE_SYSTEM_LIBRARIES=ON -DAARE_DOCS=ON + make -j 2 + make docs + + + + + + diff --git a/CMakeLists.txt b/CMakeLists.txt index b93b513..4772f0b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,6 +60,8 @@ if(AARE_SYSTEM_LIBRARIES) set(AARE_FETCH_CATCH OFF CACHE BOOL "Disabled FetchContent for catch2" FORCE) set(AARE_FETCH_JSON OFF CACHE BOOL "Disabled FetchContent for nlohmann::json" FORCE) set(AARE_FETCH_ZMQ OFF CACHE BOOL "Disabled FetchContent for libzmq" FORCE) + # Still fetch lmfit when setting AARE_SYSTEM_LIBRARIES since this is not available + # on conda-forge endif() if(AARE_VERBOSE) @@ -78,6 +80,7 @@ endif() set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) + #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) FetchContent_Declare( lmfit @@ -343,6 +346,7 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp @@ -382,6 +386,7 @@ endif() if(AARE_TESTS) set(TestSources + ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index ffa95a7..120854b 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.2.18 #TODO! how to not duplicate this? + version: 2025.3.18 #TODO! how to not duplicate this? + diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index b796763..5bea342 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -8,11 +8,17 @@ namespace aare { +//TODO! Template this? struct Cluster3x3 { int16_t x; int16_t y; int32_t data[9]; }; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; typedef enum { cBottomLeft = 0, @@ -37,6 +43,7 @@ struct Eta2 { double x; double y; corner c; + int32_t sum; }; struct ClusterAnalysis { @@ -97,6 +104,8 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); + ClusterVector read_clusters(size_t n_clusters, ROI roi); + /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. @@ -131,5 +140,6 @@ int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); +Eta2 calculate_eta2(Cluster2x2 &cl); } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index febf06c..1c15a22 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -231,6 +231,10 @@ template class ClusterVector { return *reinterpret_cast(element_ptr(i)); } + template const V &at(size_t i) const { + return *reinterpret_cast(element_ptr(i)); + } + const std::string_view fmt_base() const { // TODO! how do we match on coord_t? return m_fmt_base; diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp new file mode 100644 index 0000000..4905bce --- /dev/null +++ b/include/aare/Interpolator.hpp @@ -0,0 +1,29 @@ +#pragma once +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFile.hpp" //Cluster_3x3 +namespace aare{ + +struct Photon{ + double x; + double y; + double energy; +}; + +class Interpolator{ + NDArray m_ietax; + NDArray m_ietay; + + NDArray m_etabinsx; + NDArray m_etabinsy; + NDArray m_energy_bins; + public: + Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins); + NDArray get_ietax(){return m_ietax;} + NDArray get_ietay(){return m_ietay;} + + std::vector interpolate(const ClusterVector& clusters); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index cfa5b5c..45d3a83 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -102,6 +102,9 @@ class NDArray : public ArrayExpr, Ndim> { auto begin() { return data_; } auto end() { return data_ + size_; } + auto begin() const { return data_; } + auto end() const { return data_ + size_; } + using value_type = T; NDArray &operator=(NDArray &&other) noexcept; // Move assign @@ -388,12 +391,12 @@ NDArray NDArray::operator*(const T &value) { result *= value; return result; } -template void NDArray::Print() { - if (shape_[0] < 20 && shape_[1] < 20) - Print_all(); - else - Print_some(); -} +// template void NDArray::Print() { +// if (shape_[0] < 20 && shape_[1] < 20) +// Print_all(); +// else +// Print_some(); +// } template std::ostream &operator<<(std::ostream &os, const NDArray &arr) { diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 89c278e..1d554e8 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -64,7 +64,7 @@ class RawSubFile { size_t bytes_per_frame() const { return m_bytes_per_frame; } size_t pixels_per_frame() const { return m_rows * m_cols; } - size_t bytes_per_pixel() const { return m_bitdepth / 8; } + size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } private: template diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index d4d51cc..ea62a9d 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -7,7 +7,7 @@ #include "aare/NDArray.hpp" -const int MAX_CLUSTER_SIZE = 200; +const int MAX_CLUSTER_SIZE = 50; namespace aare { template class VarClusterFinder { diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp new file mode 100644 index 0000000..5d6dc57 --- /dev/null +++ b/include/aare/algorithm.hpp @@ -0,0 +1,55 @@ + +#pragma once +#include +#include +#include +#include + +namespace aare { +/** + * @brief Find the index of the last element smaller than val + * assume a sorted array + */ +template +size_t last_smaller(const T* first, const T* last, T val) { + for (auto iter = first+1; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter-1); + } + } + return std::distance(first, last-1); +} + +template +size_t last_smaller(const NDArray& arr, T val) { + return last_smaller(arr.begin(), arr.end(), val); +} + + +template +size_t nearest_index(const T* first, const T* last, T val) { + auto iter = std::min_element(first, last, + [val](T a, T b) { + return std::abs(a - val) < std::abs(b - val); + }); + return std::distance(first, iter); +} + +template +size_t nearest_index(const NDArray& arr, T val) { + return nearest_index(arr.begin(), arr.end(), val); +} + +template +size_t nearest_index(const std::vector& vec, T val) { + return nearest_index(vec.data(), vec.data()+vec.size(), val); +} + +template +size_t nearest_index(const std::array& arr, T val) { + return nearest_index(arr.data(), arr.data()+arr.size(), val); +} + + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index db1a47b..4559882 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -38,6 +38,8 @@ namespace aare { +inline constexpr size_t bits_per_byte = 8; + void assert_failed(const std::string &msg); diff --git a/pyproject.toml b/pyproject.toml index 6dc941e..b9bf7d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,8 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.2.18" +version = "2025.3.18" + [tool.scikit-build] diff --git a/python/aare/__init__.py b/python/aare/__init__.py index f4c19cc..058d7cf 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -7,11 +7,12 @@ from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile from ._aare import hitmap +from ._aare import ROI from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i from ._aare import fit_gaus, fit_pol1 - +from ._aare import Interpolator from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters diff --git a/python/examples/play.py b/python/examples/play.py index 37754df..da469dc 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,50 +1,79 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') -#Our normal python imports -from pathlib import Path -import matplotlib.pyplot as plt +from aare._aare import ClusterVector_i, Interpolator + +import pickle import numpy as np +import matplotlib.pyplot as plt import boost_histogram as bh +import torch +import math import time -import aare -data = np.random.normal(10, 1, 1000) +def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): + """ + Generate a 2D gaussian as position mx, my, with sigma=sigma. + The gaussian is placed on a 2x2 pixel matrix with resolution + res in one dimesion. + """ + x = torch.linspace(0, pixel_size*grid_size, res) + x,y = torch.meshgrid(x,x, indexing="ij") + return 1 / (2*math.pi*sigma**2) * \ + torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) -hist = bh.Histogram(bh.axis.Regular(10, 0, 20)) -hist.fill(data) +scale = 1000 #Scale factor when converting to integer +pixel_size = 25 #um +grid = 2 +resolution = 100 +sigma_um = 10 +xa = np.linspace(0,grid*pixel_size,resolution) +ticks = [0, 25, 50] + +hit = np.array((20,20)) +etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" + +local_resolution = 99 +grid_size = 3 +xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +pixels = pixels.numpy() +pixels = (pixels*scale).astype(np.int32) +v = ClusterVector_i(3,3) +v.push_back(1,1, pixels) + +with open(etahist_fname, "rb") as f: + hist = pickle.load(f) +eta = hist.view().copy() +etabinsx = np.array(hist.axes.edges.T[0].flat) +etabinsy = np.array(hist.axes.edges.T[1].flat) +ebins = np.array(hist.axes.edges.T[2].flat) +p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) -x = hist.axes[0].centers -y = hist.values() -y_err = np.sqrt(y)+1 -res = aare.fit_gaus(x, y, y_err, chi2 = True) - -t_elapsed = time.perf_counter()-t0 -print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') +#Generate the hit -histogram_data = hist3d.counts() -x = hist3d.axes[2].edges[:-1] -y = histogram_data[100,100,:] -xx = np.linspace(x[0], x[-1]) -# fig, ax = plt.subplots() -# ax.step(x, y, where = 'post') -y_err = np.sqrt(y) -y_err = np.zeros(y.size) -y_err += 1 -# par = fit_gaus2(y,x, y_err) -# ax.plot(xx, gaus(xx,par)) -# print(par) +tmp = p.interpolate(v) +print(f'tmp:{tmp}') +pos = np.array((tmp['x'], tmp['y']))*25 -res = fit_gaus(y,x) -res2 = fit_gaus(y,x, y_err) -print(res) -print(res2) +print(pixels) +fig, ax = plt.subplots(figsize = (7,7)) +ax.pcolormesh(xaxis, xaxis, t) +ax.plot(*pos, 'o') +ax.set_xticks([0,25,50,75]) +ax.set_yticks([0,25,50,75]) +ax.set_xlim(0,75) +ax.set_ylim(0,75) +ax.grid() +print(f'{hit=}') +print(f'{pos=}') \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 792b7e6..3db816a 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -20,7 +20,13 @@ template void define_cluster_vector(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterVector_{}", typestr); py::class_>(m, class_name.c_str(), py::buffer_protocol()) - .def(py::init()) + .def(py::init(), + py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3) + .def("push_back", + [](ClusterVector &self, int x, int y, py::array_t data) { + // auto view = make_view_2d(data); + self.push_back(x, y, reinterpret_cast(data.data())); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", @@ -38,6 +44,8 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum_2x2()); return return_vector(vec); }) + .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", &ClusterVector::cluster_size_y) .def_property_readonly("capacity", &ClusterVector::capacity) .def_property("frame_number", &ClusterVector::frame_number, &ClusterVector::set_frame_number) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 8a431b5..f587443 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,6 +31,11 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) + .def("read_clusters", + [](ClusterFile &self, size_t n_clusters, ROI roi) { + auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); + return v; + },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 9ce656d..56e571b 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -32,7 +32,7 @@ m.def("adc_sar_05_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/8}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays @@ -53,7 +53,7 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/8}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays diff --git a/python/src/file.hpp b/python/src/file.hpp index c3c800c..0d64e16 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -195,6 +195,8 @@ void define_file_io_bindings(py::module &m) { py::class_(m, "ROI") .def(py::init<>()) + .def(py::init(), py::arg("xmin"), + py::arg("xmax"), py::arg("ymin"), py::arg("ymax")) .def_readwrite("xmin", &ROI::xmin) .def_readwrite("xmax", &ROI::xmax) .def_readwrite("ymin", &ROI::ymin) diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp new file mode 100644 index 0000000..02742e1 --- /dev/null +++ b/python/src/interpolation.hpp @@ -0,0 +1,58 @@ +#include "aare/Interpolator.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include "np_helper.hpp" +#include +#include +#include +#include + +namespace py = pybind11; +void define_interpolation_bindings(py::module &m) { + + PYBIND11_NUMPY_DTYPE(aare::Photon, x,y,energy); + + py::class_(m, "Interpolator") + .def(py::init([](py::array_t etacube, py::array_t xbins, + py::array_t ybins, py::array_t ebins) { + return Interpolator(make_view_3d(etacube), make_view_1d(xbins), + make_view_1d(ybins), make_view_1d(ebins)); + })) + .def("get_ietax", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", [](Interpolator& self){ + auto*ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }) + .def("interpolate", [](Interpolator& self, const ClusterVector& clusters){ + auto photons = self.interpolate(clusters); + auto* ptr = new std::vector{photons}; + return return_vector(ptr); + }); + + // TODO! Evaluate without converting to double + m.def( + "hej", + []() { + // auto boost_histogram = py::module_::import("boost_histogram"); + // py::object axis = + // boost_histogram.attr("axis").attr("Regular")(10, 0.0, 10.0); + // py::object histogram = boost_histogram.attr("Histogram")(axis); + // return histogram; + // return h; + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )"); +} \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 70d143f..43f48ba 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -9,6 +9,7 @@ #include "cluster.hpp" #include "cluster_file.hpp" #include "fit.hpp" +#include "interpolation.hpp" //Pybind stuff #include @@ -31,5 +32,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_collector_bindings(m); define_cluster_file_sink_bindings(m); define_fit_bindings(m); + define_interpolation_bindings(m); } \ No newline at end of file diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 6e92830..1845196 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -40,25 +40,25 @@ template py::array return_vector(std::vector *vec) { } // todo rewrite generic -template auto get_shape_3d(py::array_t arr) { +template auto get_shape_3d(const py::array_t& arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; } -template auto make_view_3d(py::array_t arr) { +template auto make_view_3d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_3d(arr)); } -template auto get_shape_2d(py::array_t arr) { +template auto get_shape_2d(const py::array_t& arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } -template auto get_shape_1d(py::array_t arr) { +template auto get_shape_1d(const py::array_t& arr) { return aare::Shape<1>{arr.shape(0)}; } -template auto make_view_2d(py::array_t arr) { +template auto make_view_2d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); } -template auto make_view_1d(py::array_t arr) { +template auto make_view_1d(py::array_t& arr) { return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); } \ No newline at end of file diff --git a/python/src/var_cluster.hpp b/python/src/var_cluster.hpp index f3a5741..f7b373f 100644 --- a/python/src/var_cluster.hpp +++ b/python/src/var_cluster.hpp @@ -19,15 +19,24 @@ using namespace::aare; void define_var_cluster_finder_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(VarClusterFinder::Hit, size, row, col, - reserved, energy, max); + reserved, energy, max, rows, cols, enes); py::class_>(m, "VarClusterFinder") .def(py::init, double>()) .def("labeled", [](VarClusterFinder &self) { - auto ptr = new NDArray(self.labeled()); + auto *ptr = new NDArray(self.labeled()); return return_image_data(ptr); }) + .def("set_noiseMap", + [](VarClusterFinder &self, + py::array_t + noise_map) { + auto noise_map_span = make_view_2d(noise_map); + self.set_noiseMap(noise_map_span); + }) + .def("set_peripheralThresholdFactor", + &VarClusterFinder::set_peripheralThresholdFactor) .def("find_clusters", [](VarClusterFinder &self, py::array_t @@ -35,6 +44,30 @@ void define_var_cluster_finder_bindings(py::module &m) { auto view = make_view_2d(img); self.find_clusters(view); }) + .def("find_clusters_X", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.find_clusters_X(img_span); + }) + .def("single_pass", + [](VarClusterFinder &self, + py::array_t + img) { + auto img_span = make_view_2d(img); + self.single_pass(img_span); + }) + .def("hits", + [](VarClusterFinder &self) { + auto ptr = new std::vector::Hit>( + self.steal_hits()); + return return_vector(ptr); + }) + .def("clear_hits", + [](VarClusterFinder &self) { + self.clear_hits(); + }) .def("steal_hits", [](VarClusterFinder &self) { auto ptr = new std::vector::Hit>( diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2928d26..2e23e09 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -108,6 +108,79 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } +ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters(3,3); + clusters.reserve(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + // auto buf = reinterpret_cast(clusters.data()); + // auto buf = clusters.data(); + + Cluster3x3 tmp; //this would break if the cluster size changes + + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to read we + // read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + //Read one cluster, in the ROI push back + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for(size_t i = 0; i < nn; i++){ + fread(&tmp, sizeof(tmp), 1, fp); + if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ + clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + nph_read++; + } + } + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number of clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + return clusters; +} + ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); @@ -268,11 +341,23 @@ ClusterVector ClusterFile::read_frame() { NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters NDArray eta2({static_cast(clusters.size()), 2}); - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); } + return eta2; } @@ -290,7 +375,7 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - + eta.sum = tot2[c]; switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) @@ -333,6 +418,20 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { return eta; } + +Eta2 calculate_eta2(Cluster2x2 &cl) { + Eta2 eta{}; + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; + eta.c = cBottomLeft; //TODO! This is not correct, but need to put something + return eta; +} + + + int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y) { diff --git a/src/Dtype.cpp b/src/Dtype.cpp index 565d509..b818ea3 100644 --- a/src/Dtype.cpp +++ b/src/Dtype.cpp @@ -70,7 +70,7 @@ uint8_t Dtype::bitdepth() const { /** * @brief Get the number of bytes of the data type */ -size_t Dtype::bytes() const { return bitdepth() / 8; } +size_t Dtype::bytes() const { return bitdepth() / bits_per_byte; } /** * @brief Construct a DType object from a TypeIndex diff --git a/src/File.cpp b/src/File.cpp index 1180967..3c68eff 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -73,7 +73,7 @@ size_t File::tell() const { return file_impl->tell(); } size_t File::rows() const { return file_impl->rows(); } size_t File::cols() const { return file_impl->cols(); } size_t File::bitdepth() const { return file_impl->bitdepth(); } -size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / 8; } +size_t File::bytes_per_pixel() const { return file_impl->bitdepth() / bits_per_byte; } DetectorType File::detector_type() const { return file_impl->detector_type(); } diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp new file mode 100644 index 0000000..7f82533 --- /dev/null +++ b/src/Interpolator.cpp @@ -0,0 +1,144 @@ +#include "aare/Interpolator.hpp" +#include "aare/algorithm.hpp" + +namespace aare { + +Interpolator::Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins) + : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) { + if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || + etacube.shape(2) != ebins.size()) { + throw std::invalid_argument( + "The shape of the etacube does not match the shape of the bins"); + } + + // Cumulative sum in the x direction + for (ssize_t i = 1; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + m_ietax(i, j, k) += m_ietax(i - 1, j, k); + } + } + } + + // Normalize by the highest row, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietax.shape(0); i++) { + for (ssize_t j = 0; j < m_ietax.shape(1); j++) { + for (ssize_t k = 0; k < m_ietax.shape(2); k++) { + auto val = m_ietax(m_ietax.shape(0) - 1, j, k); + double norm = val < 1 ? 1 : val; + m_ietax(i, j, k) /= norm; + } + } + } + + // Cumulative sum in the y direction + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 1; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + m_ietay(i, j, k) += m_ietay(i, j - 1, k); + } + } + } + + // Normalize by the highest column, if norm less than 1 don't do anything + for (ssize_t i = 0; i < m_ietay.shape(0); i++) { + for (ssize_t j = 0; j < m_ietay.shape(1); j++) { + for (ssize_t k = 0; k < m_ietay.shape(2); k++) { + auto val = m_ietay(i, m_ietay.shape(1) - 1, k); + double norm = val < 1 ? 1 : val; + m_ietay(i, j, k) /= norm; + } + } + } +} + +std::vector Interpolator::interpolate(const ClusterVector& clusters) { + std::vector photons; + photons.reserve(clusters.size()); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); + + double dX, dY; + int ex, ey; + // cBottomLeft = 0, + // cBottomRight = 1, + // cTopLeft = 2, + // cTopRight = 3 + switch (eta.c) { + case cTopLeft: + dX = -1.; + dY = 0.; + break; + case cTopRight:; + dX = 0.; + dY = 0.; + break; + case cBottomLeft: + dX = -1.; + dY = -1.; + break; + case cBottomRight: + dX = 0.; + dY = -1.; + break; + } + photon.x += m_ietax(ix, iy, ie)*2 + dX; + photon.y += m_ietay(ix, iy, ie)*2 + dY; + photons.push_back(photon); + } + }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ + for (size_t i = 0; i(i); + Eta2 eta= calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + //Now do some actual interpolation. + //Find which energy bin the cluster is in + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller + //should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + photon.x += m_ietax(ix, iy, ie)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 + photon.y += m_ietay(ix, iy, ie)*2; + photons.push_back(photon); + } + + }else{ + throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation"); + } + + + return photons; +} + +} // namespace aare \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index 942481c..eff3e2c 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -2,6 +2,7 @@ #include #include #include +#include using aare::NDArray; using aare::NDView; @@ -34,6 +35,24 @@ TEST_CASE("Construct from an NDView") { } } +TEST_CASE("3D NDArray from NDView"){ + std::vector data(27); + std::iota(data.begin(), data.end(), 0); + NDView view(data.data(), Shape<3>{3, 3, 3}); + NDArray image(view); + REQUIRE(image.shape() == view.shape()); + REQUIRE(image.size() == view.size()); + REQUIRE(image.data() != view.data()); + + for(int64_t i=0; i shape{{20}}; NDArray img(shape, 3); diff --git a/src/RawFile.cpp b/src/RawFile.cpp index e704add..78cb6c5 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -76,8 +76,7 @@ size_t RawFile::n_mod() const { return n_subfile_parts; } size_t RawFile::bytes_per_frame() { - // return m_rows * m_cols * m_master.bitdepth() / 8; - return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / bits_per_byte; } size_t RawFile::pixels_per_frame() { // return m_rows * m_cols; diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp new file mode 100644 index 0000000..fcfa8d2 --- /dev/null +++ b/src/algorithm.test.cpp @@ -0,0 +1,73 @@ + + +#include +#include + + +TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2.3) == 2); + REQUIRE(aare::nearest_index(arr, 2.6) == 3); + REQUIRE(aare::nearest_index(arr, 45.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -1.0) == 0); +} + +TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::nearest_index(arr, 2) == 2); + REQUIRE(aare::nearest_index(arr, 3) == 3); + REQUIRE(aare::nearest_index(arr, 45) == 4); + REQUIRE(aare::nearest_index(arr, 0) == 0); + REQUIRE(aare::nearest_index(arr, -1) == 0); +} + + +TEST_CASE("nearest_index works with std::vector", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(vec, 2.123) == 2); + REQUIRE(aare::nearest_index(vec, 2.66) == 3); + REQUIRE(aare::nearest_index(vec, 4555555.0) == 4); + REQUIRE(aare::nearest_index(vec, 0.0) == 0); + REQUIRE(aare::nearest_index(vec, -10.0) == 0); +} + +TEST_CASE("nearest index works with std::array", "[algorithm]"){ + std::array arr = {0, 1, 2, 3, 4}; + REQUIRE(aare::nearest_index(arr, 2.123) == 2); + REQUIRE(aare::nearest_index(arr, 2.501) == 3); + REQUIRE(aare::nearest_index(arr, 4555555.0) == 4); + REQUIRE(aare::nearest_index(arr, 0.0) == 0); + REQUIRE(aare::nearest_index(arr, -10.0) == 0); +} + + +TEST_CASE("last smaller", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -10.0) == 0); + REQUIRE(aare::last_smaller(arr, 0.0) == 0); + REQUIRE(aare::last_smaller(arr, 2.3) == 2); + REQUIRE(aare::last_smaller(arr, 253.) == 4); +} + +TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 2.0) == 2); + +} \ No newline at end of file From 6e7e81b36ba7ab9276405df004c792d275111fe8 Mon Sep 17 00:00:00 2001 From: AliceMazzoleni99 Date: Fri, 21 Mar 2025 16:32:54 +0100 Subject: [PATCH 048/120] complete mess but need to install RedHat 9 --- include/aare/ClusterFile.hpp | 48 +++++---- include/aare/ClusterVector.hpp | 48 +++++---- src/ClusterFile.cpp | 184 ++++++++++++++++++++------------- 3 files changed, 162 insertions(+), 118 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 5bea342..d35f362 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -8,16 +8,12 @@ namespace aare { -//TODO! Template this? -struct Cluster3x3 { - int16_t x; - int16_t y; - int32_t data[9]; -}; -struct Cluster2x2 { - int16_t x; - int16_t y; - int32_t data[4]; +template +struct Cluster { + CoordType x; + CoordType y; + T data[ClusterSizeX * ClusterSizeY]; }; typedef enum { @@ -93,8 +89,7 @@ class ClusterFile { */ ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, const std::string &mode = "r"); - - + ~ClusterFile(); /** @@ -109,26 +104,26 @@ class ClusterFile { /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. - * @throws std::runtime_error if the file is not opened for reading or the file pointer not - * at the beginning of a frame + * @throws std::runtime_error if the file is not opened for reading or the + * file pointer not at the beginning of a frame */ ClusterVector read_frame(); - void write_frame(const ClusterVector &clusters); - + // Need to be migrated to support NDArray and return a ClusterVector // std::vector - // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); + // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int + // ny); /** * @brief Return the chunk size */ size_t chunk_size() const { return m_chunk_size; } - - + /** - * @brief Close the file. If not closed the file will be closed in the destructor + * @brief Close the file. If not closed the file will be closed in the + * destructor */ void close(); }; @@ -138,8 +133,17 @@ int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); -NDArray calculate_eta2(ClusterVector &clusters); -Eta2 calculate_eta2(Cluster3x3 &cl); +template +NDArray calculate_eta2(ClusterVector &clusters); + +template Eta2 calculate_eta2(Cluster &cl); + Eta2 calculate_eta2(Cluster2x2 &cl); +template Eta2 calculate_eta2(ClusterType &cl); + +template +Eta2 calculate_eta2(Cluster &cl); + } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 1c15a22..c5e66b7 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -10,6 +10,8 @@ namespace aare { +template class ClusterVector; // Forward declaration + /** * @brief ClusterVector is a container for clusters of various sizes. It uses a * contiguous memory buffer to store the clusters. It is templated on the data @@ -21,10 +23,12 @@ namespace aare { * @tparam CoordType data type of the x and y coordinates of the cluster * (normally int16_t) */ -template class ClusterVector { +template +class ClusterVector> { using value_type = T; - size_t m_cluster_size_x; - size_t m_cluster_size_y; + // size_t m_cluster_size_x; + // size_t m_cluster_size_y; std::byte *m_data{}; size_t m_size{0}; size_t m_capacity; @@ -40,6 +44,8 @@ template class ClusterVector { constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:"; public: + using ClusterType = Cluster; + /** * @brief Construct a new ClusterVector object * @param cluster_size_x size of the cluster in x direction @@ -48,10 +54,8 @@ template class ClusterVector { * @param frame_number frame number of the clusters. Default is 0, which is * also used to indicate that the clusters come from many frames */ - ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3, - size_t capacity = 1024, uint64_t frame_number = 0) - : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), - m_capacity(capacity), m_frame_number(frame_number) { + ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) + : m_capacity(capacity), m_frame_number(frame_number) { allocate_buffer(capacity); } @@ -59,10 +63,8 @@ template class ClusterVector { // Move constructor ClusterVector(ClusterVector &&other) noexcept - : m_cluster_size_x(other.m_cluster_size_x), - m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data), - m_size(other.m_size), m_capacity(other.m_capacity), - m_frame_number(other.m_frame_number) { + : m_data(other.m_data), m_size(other.m_size), + m_capacity(other.m_capacity), m_frame_number(other.m_frame_number) { other.m_data = nullptr; other.m_size = 0; other.m_capacity = 0; @@ -72,8 +74,6 @@ template class ClusterVector { ClusterVector &operator=(ClusterVector &&other) noexcept { if (this != &other) { delete[] m_data; - m_cluster_size_x = other.m_cluster_size_x; - m_cluster_size_y = other.m_cluster_size_y; m_data = other.m_data; m_size = other.m_size; m_capacity = other.m_capacity; @@ -116,8 +116,7 @@ template class ClusterVector { *reinterpret_cast(ptr) = y; ptr += sizeof(CoordType); - std::copy(data, data + m_cluster_size_x * m_cluster_size_y * sizeof(T), - ptr); + std::copy(data, data + ClusterSizeX * ClusterSizeY * sizeof(T), ptr); m_size++; } ClusterVector &operator+=(const ClusterVector &other) { @@ -137,7 +136,7 @@ template class ClusterVector { std::vector sum() { std::vector sums(m_size); const size_t stride = item_size(); - const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; + const size_t n_pixels = ClusterSizeX * ClusterSizeY; std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y for (size_t i = 0; i < m_size; i++) { @@ -159,7 +158,7 @@ template class ClusterVector { std::vector sums(m_size); const size_t stride = item_size(); - if (m_cluster_size_x != 3 || m_cluster_size_y != 3) { + if (ClusterSizeX != 3 || ClusterSizeY != 3) { throw std::runtime_error( "Only 3x3 clusters are supported for the 2x2 sum."); } @@ -196,8 +195,7 @@ template class ClusterVector { * @brief Return the size in bytes of a single cluster */ size_t item_size() const { - return 2 * sizeof(CoordType) + - m_cluster_size_x * m_cluster_size_y * sizeof(T); + return 2 * sizeof(CoordType) + ClusterSizeX * ClusterSizeY * sizeof(T); } /** @@ -217,8 +215,8 @@ template class ClusterVector { return m_data + element_offset(i); } - size_t cluster_size_x() const { return m_cluster_size_x; } - size_t cluster_size_y() const { return m_cluster_size_y; } + // size_t cluster_size_x() const { return m_cluster_size_x; } + // size_t cluster_size_y() const { return m_cluster_size_y; } std::byte *data() { return m_data; } std::byte const *data() const { return m_data; } @@ -227,12 +225,12 @@ template class ClusterVector { * @brief Return a reference to the i-th cluster casted to type V * @tparam V type of the cluster */ - template V &at(size_t i) { - return *reinterpret_cast(element_ptr(i)); + ClusterType &at(size_t i) { + return *reinterpret_cast(element_ptr(i)); } - template const V &at(size_t i) const { - return *reinterpret_cast(element_ptr(i)); + const ClusterType &at(size_t i) const { + return *reinterpret_cast(element_ptr(i)); } const std::string_view fmt_base() const { diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index be3f607..c6ae470 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -59,8 +59,8 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - - ClusterVector clusters(3,3, n_clusters); + + ClusterVector clusters(3, 3, n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; @@ -78,7 +78,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { } else { nn = nph; } - nph_read += fread((buf + nph_read*clusters.item_size()), + nph_read += fread((buf + nph_read * clusters.item_size()), clusters.item_size(), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -93,7 +93,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { else nn = nph; - nph_read += fread((buf + nph_read*clusters.item_size()), + nph_read += fread((buf + nph_read * clusters.item_size()), clusters.item_size(), nn, fp); m_num_left = nph - nn; } @@ -112,8 +112,8 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - - ClusterVector clusters(3,3); + + ClusterVector clusters(3, 3); clusters.reserve(n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! @@ -124,7 +124,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { // auto buf = reinterpret_cast(clusters.data()); // auto buf = clusters.data(); - Cluster3x3 tmp; //this would break if the cluster size changes + Cluster3x3 tmp; // this would break if the cluster size changes // if there are photons left from previous frame read them first if (nph) { @@ -135,13 +135,15 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { } else { nn = nph; } - //Read one cluster, in the ROI push back - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ + // Read one cluster, in the ROI push back + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for (size_t i = 0; i < nn; i++) { fread(&tmp, sizeof(tmp), 1, fp); - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && + tmp.y <= roi.ymax) { + clusters.push_back(tmp.x, tmp.y, + reinterpret_cast(tmp.data)); nph_read++; } } @@ -161,10 +163,13 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { // nph_read += fread((buf + nph_read*clusters.item_size()), // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ + for (size_t i = 0; i < nn; i++) { fread(&tmp, sizeof(tmp), 1, fp); - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && + tmp.y >= roi.ymin && tmp.y <= roi.ymax) { + clusters.push_back( + tmp.x, tmp.y, + reinterpret_cast(tmp.data)); nph_read++; } } @@ -210,7 +215,6 @@ ClusterVector ClusterFile::read_frame() { return clusters; } - // std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, // double *noise_map, // int nx, int ny) { @@ -218,7 +222,8 @@ ClusterVector ClusterFile::read_frame() { // throw std::runtime_error("File not opened for reading"); // } // std::vector clusters(n_clusters); -// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, +// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster +// *buf, // // uint32_t *n_left, double *noise_map, int // // nx, int ny) { // int iframe = 0; @@ -249,7 +254,8 @@ ClusterVector ClusterFile::read_frame() { // for (size_t iph = 0; iph < nn; iph++) { // // read photons 1 by 1 // size_t n_read = -// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); +// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, +// fp); // if (n_read != 1) { // clusters.resize(nph_read); // return clusters; @@ -257,12 +263,15 @@ ClusterVector ClusterFile::read_frame() { // // TODO! error handling on read // good = 1; // if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { +// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) +// { // tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, +// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, +// NULL, // NULL); // noise = noise_map[ptr->y * nx + ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { +// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * +// noise) { // ; // } else { // good = 0; @@ -316,8 +325,8 @@ ClusterVector ClusterFile::read_frame() { // } else // good = 0; // } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; +// printf("Bad pixel number %d %d\n", ptr->x, +// ptr->y); good = 0; // } // } // if (good) { @@ -338,37 +347,81 @@ ClusterVector ClusterFile::read_frame() { // return clusters; // } -NDArray calculate_eta2(ClusterVector &clusters) { - //TOTO! make work with 2x2 clusters +template +NDArray calculate_eta2(ClusterVector &clusters) { + // TOTO! make work with 2x2 clusters NDArray eta2({static_cast(clusters.size()), 2}); - - if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; - } - }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; - } - }else{ - throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); + + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; } - + return eta2; } -/** - * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct - * containing etay, etax and the corner of the cluster. -*/ -Eta2 calculate_eta2(Cluster3x3 &cl) { +/** + * @brief Calculate the eta2 values for a generic sized cluster and return them + * in a Eta2 struct containing etay, etax and the index of the respective 2x2 + * subcluster. + */ +template +Eta2 calculate_eta2(Cluster &cl) { Eta2 eta{}; - std::array tot2; + // TODO loads of overhead for a 2x2 clsuter maybe keep 2x2 calculation + size_t num_2x2_subclusters = (ClusterSizeX - 1) * (ClusterSizeY - 1); + std::array sum_2x2_subcluster; + for (size_t i = 0; i < ClusterSizeY - 1; ++i) { + for (size_t j = 0; j < ClusterSizeX - 1; ++j) + sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = + cl.data[i * ClusterSizeX + j] + + cl.data[i * ClusterSizeX + j + 1] + + cl.data[(i + 1) * ClusterSizeX + j] + + cl.data[(i + 1) * ClusterSizeX + j + 1]; + } + + auto c = std::max_element(sum_2x2_subclusters.begin(), + sum_2x2_subcluster.end()) - + sum_2x2_subcluster.begin(); + + eta.sum = sum_2x2_subcluster[c]; + + eta.x = static_cast(cl.data[(c + 1) * ClusterSizeX + 1]) / + (cl.data[0] + cl.data[1]); + + size_t index_top_left_2x2_subcluster = + (int(c / (ClusterSizeX - 1)) + 1) * ClusterSizeX + + c % (ClusterSizeX - 1) * 2 + 1; + if ((cl.data[index_top_left_2x2_subcluster] + + cl.data[index_top_left_2x2_subcluster - 1]) != 0) + eta.x = + static_cast(cl.data[index_top_left_2x2_subcluster] / + (cl.data[index_top_left_2x2_subcluster] + + cl.data[index_top_left_2x2_subcluster - 1])); + + if ((cl.data[index_top_left_2x2_subcluster] + + cl.data[index_top_left_2x2_subcluster - ClusterSizeX]) != 0) + eta.y = static_cast( + cl.data[index_top_left_2x2_subcluster] / + (cl.data[index_top_left_2x2_subcluster] + + cl.data[index_top_left_2x2_subcluster - ClusterSizeX])); + + eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no + // underyling enum class + return eta; +} + +/** + * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 + * struct containing etay, etax and the corner of the cluster. + */ +template Eta2 calculate_eta2(Cluster &cl) { + Eta2 eta{}; + + std::array tot2; tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; @@ -379,58 +432,47 @@ Eta2 calculate_eta2(Cluster3x3 &cl) { switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[1] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); eta.c = cBottomLeft; break; case cBottomRight: if ((cl.data[2] + cl.data[5]) != 0) - eta.x = - static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); + eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); if ((cl.data[1] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); eta.c = cBottomRight; break; case cTopLeft: if ((cl.data[7] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); eta.c = cTopLeft; break; case cTopRight: if ((cl.data[5] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); + eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); eta.c = cTopRight; break; - // no default to allow compiler to warn about missing cases + // no default to allow compiler to warn about missing cases } return eta; } - -Eta2 calculate_eta2(Cluster2x2 &cl) { +template Eta2 calculate_eta2(Cluster &cl) { Eta2 eta{}; eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); - eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; - eta.c = cBottomLeft; //TODO! This is not correct, but need to put something + eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3]; + eta.c = cBottomLeft; // TODO! This is not correct, but need to put something return eta; } - - int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y) { From 6ad76f63c11754444c049c652f54b3d16d3f0586 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Mon, 24 Mar 2025 14:28:10 +0100 Subject: [PATCH 049/120] Fixed reading clusters with ROI (#142) Fixed incorrect reading of clusters with ROI closes #141 --- src/ClusterFile.cpp | 78 +++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 45 deletions(-) diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2e23e09..59b8bb8 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -115,69 +115,57 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { ClusterVector clusters(3,3); clusters.reserve(n_clusters); - - int32_t iframe = 0; // frame number needs to be 4 bytes! - size_t nph_read = 0; - uint32_t nn = m_num_left; - uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - - // auto buf = reinterpret_cast(clusters.data()); - // auto buf = clusters.data(); - + Cluster3x3 tmp; //this would break if the cluster size changes + // if there are photons left from previous frame read them first - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - //Read one cluster, in the ROI push back - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ + if (m_num_left) { + size_t nph_read = 0; + while(nph_read < m_num_left && clusters.size() < n_clusters){ fread(&tmp, sizeof(tmp), 1, fp); + nph_read++; if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; } } - - m_num_left = nph - nn; // write back the number of photons left + m_num_left -= nph_read; } - if (nph_read < n_clusters) { - // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // read number of clusters in frame - if (fread(&nph, sizeof(nph), 1, fp)) { - if (nph > (n_clusters - nph_read)) - nn = n_clusters - nph_read; - else - nn = nph; - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ + if (clusters.size() < n_clusters) { + if (m_num_left) { + throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); + } + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters + + int32_t frame_number = 0; // frame number needs to be 4 bytes! + while (fread(&frame_number, sizeof(frame_number), 1, fp)) { + uint32_t nph_in_frame = 0; //number of photons we can read until next frame number + size_t nph_read = 0; //number of photons read in this frame + + if (fread(&nph_in_frame, sizeof(nph_in_frame), 1, fp)) { + if(frame_number != 1){ + throw std::runtime_error("Frame number is not 1"); + } + + while(nph_read < nph_in_frame && clusters.size() < n_clusters){ fread(&tmp, sizeof(tmp), 1, fp); + nph_read++; if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; } } - m_num_left = nph - nn; + m_num_left = nph_in_frame - nph_read; } - if (nph_read >= n_clusters) - break; - } - } - // Resize the vector to the number of clusters. - // No new allocation, only change bounds. - clusters.resize(nph_read); + if (clusters.size() >= n_clusters){ + break; + } + } + + } return clusters; } From 0876b6891ad641d534c7b39d6d5dba3256e70af5 Mon Sep 17 00:00:00 2001 From: AliceMazzoleni99 Date: Tue, 25 Mar 2025 21:42:50 +0100 Subject: [PATCH 050/120] cpp Cluster and ClusterVector and ClusterFile are templated now, they support generic cluster types --- include/aare/Cluster.hpp | 38 +++ include/aare/ClusterFile.hpp | 499 +++++++++++++++++++++++++++++++-- include/aare/ClusterVector.hpp | 13 +- include/aare/Interpolator.hpp | 26 +- src/ClusterFile.cpp | 54 ++-- src/Interpolator.cpp | 78 +++--- 6 files changed, 619 insertions(+), 89 deletions(-) create mode 100644 include/aare/Cluster.hpp diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp new file mode 100644 index 0000000..74f5281 --- /dev/null +++ b/include/aare/Cluster.hpp @@ -0,0 +1,38 @@ + +/************************************************ + * @file Cluster.hpp + * @short definition of cluster, where CoordType (x,y) give + * the cluster center coordinates and data the actual cluster data + * cluster size is given as template parameters + ***********************************************/ + +#pragma once + +#include +#include + +namespace aare { + +// requires clause c++20 maybe update +template && + std::is_integral_v>> +struct Cluster { + CoordType x; + CoordType y; + T data[ClusterSizeX * ClusterSizeY]; +}; + +// Type Traits for is_cluster_type +template +struct is_cluster : std::false_type {}; // Default case: Not a Cluster + +// TODO: Do i need the require clause here as well? +template +struct is_cluster> : std::true_type {}; // Cluster + +// helper +template constexpr bool is_cluster_v = is_cluster::value; + +} // namespace aare diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index d35f362..d61ee2b 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,5 +1,6 @@ #pragma once +#include "aare/Cluster.hpp" #include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" @@ -8,14 +9,6 @@ namespace aare { -template -struct Cluster { - CoordType x; - CoordType y; - T data[ClusterSizeX * ClusterSizeY]; -}; - typedef enum { cBottomLeft = 0, cBottomRight = 1, @@ -59,6 +52,8 @@ uint32_t number_of_clusters .... */ +// TODO: change to support any type of clusters, e.g. header line with +// clsuter_size_x, cluster_size_y, /** * @brief Class to read and write cluster files * Expects data to be laid out as: @@ -71,6 +66,8 @@ uint32_t number_of_clusters * uint32_t number_of_clusters * etc. */ +template , bool>> class ClusterFile { FILE *fp{}; uint32_t m_num_left{}; @@ -97,9 +94,9 @@ class ClusterFile { * If EOF is reached the returned vector will have less than n_clusters * clusters */ - ClusterVector read_clusters(size_t n_clusters); + ClusterVector read_clusters(size_t n_clusters); - ClusterVector read_clusters(size_t n_clusters, ROI roi); + ClusterVector read_clusters(size_t n_clusters, ROI roi); /** * @brief Read a single frame from the file and return the clusters. The @@ -107,9 +104,9 @@ class ClusterFile { * @throws std::runtime_error if the file is not opened for reading or the * file pointer not at the beginning of a frame */ - ClusterVector read_frame(); + ClusterVector read_frame(); - void write_frame(const ClusterVector &clusters); + void write_frame(const ClusterVector &clusters); // Need to be migrated to support NDArray and return a ClusterVector // std::vector @@ -130,20 +127,484 @@ class ClusterFile { int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); +int analyze_cluster(Cluster &cl, int32_t *t2, int32_t *t3, + char *quad, double *eta2x, double *eta2y, double *eta3x, + double *eta3y); -template +template >> NDArray calculate_eta2(ClusterVector &clusters); -template Eta2 calculate_eta2(Cluster &cl); +// TODO: do we need rquire clauses? +template Eta2 calculate_eta2(const Cluster &cl); -Eta2 calculate_eta2(Cluster2x2 &cl); +template Eta2 calculate_eta2(const Cluster &cl); -template Eta2 calculate_eta2(ClusterType &cl); +template >> +Eta2 calculate_eta2(const ClusterType &cl); template -Eta2 calculate_eta2(Cluster &cl); +Eta2 calculate_eta2( + const Cluster &cl); + +template +ClusterFile::ClusterFile( + const std::filesystem::path &fname, size_t chunk_size, + const std::string &mode) + : m_chunk_size(chunk_size), m_mode(mode) { + + if (mode == "r") { + fp = fopen(fname.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + fname.string()); + } + } else if (mode == "w") { + fp = fopen(fname.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + fname.string()); + } + } else if (mode == "a") { + fp = fopen(fname.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + fname.string()); + } + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } +} + +template +ClusterFile::~ClusterFile() { + close(); +} + +template +void ClusterFile::close() { + if (fp) { + fclose(fp); + fp = nullptr; + } +} + +// TODO generally supported for all clsuter types +template +void ClusterFile::write_frame( + const ClusterVector &clusters) { + if (m_mode != "w" && m_mode != "a") { + throw std::runtime_error("File not opened for writing"); + } + if (!(clusters.cluster_size_x() == 3) && + !(clusters.cluster_size_y() == 3)) { + throw std::runtime_error("Only 3x3 clusters are supported"); + } + int32_t frame_number = clusters.frame_number(); + fwrite(&frame_number, sizeof(frame_number), 1, fp); + uint32_t n_clusters = clusters.size(); + fwrite(&n_clusters, sizeof(n_clusters), 1, fp); + fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); +} + +template +ClusterVector +ClusterFile::read_clusters(size_t n_clusters) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + // auto buf = reinterpret_cast(clusters.data()); + auto buf = clusters.data(); + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to read we + // read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + nph_read += fread((buf + nph_read * clusters.item_size()), + clusters.item_size(), nn, fp); + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + nph_read += fread((buf + nph_read * clusters.item_size()), + clusters.item_size(), nn, fp); + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number of clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + return clusters; +} + +template +ClusterVector +ClusterFile::read_clusters(size_t n_clusters, ROI roi) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + + ClusterVector clusters; + clusters.reserve(n_clusters); + + int32_t iframe = 0; // frame number needs to be 4 bytes! + size_t nph_read = 0; + uint32_t nn = m_num_left; + uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 + + // auto buf = reinterpret_cast(clusters.data()); + // auto buf = clusters.data(); + + ClusterType tmp; // this would break if the cluster size changes + + // if there are photons left from previous frame read them first + if (nph) { + if (nph > n_clusters) { + // if we have more photons left in the frame then photons to read we + // read directly the requested number + nn = n_clusters; + } else { + nn = nph; + } + // Read one cluster, in the ROI push back + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for (size_t i = 0; i < nn; i++) { + fread(&tmp, sizeof(tmp), 1, fp); + if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && + tmp.y <= roi.ymax) { + clusters.push_back(tmp.x, tmp.y, + reinterpret_cast(tmp.data)); + nph_read++; + } + } + + m_num_left = nph - nn; // write back the number of photons left + } + + if (nph_read < n_clusters) { + // keep on reading frames and photons until reaching n_clusters + while (fread(&iframe, sizeof(iframe), 1, fp)) { + // read number of clusters in frame + if (fread(&nph, sizeof(nph), 1, fp)) { + if (nph > (n_clusters - nph_read)) + nn = n_clusters - nph_read; + else + nn = nph; + + // nph_read += fread((buf + nph_read*clusters.item_size()), + // clusters.item_size(), nn, fp); + for (size_t i = 0; i < nn; i++) { + fread(&tmp, sizeof(tmp), 1, fp); + if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && + tmp.y >= roi.ymin && tmp.y <= roi.ymax) { + clusters.push_back( + tmp.x, tmp.y, + reinterpret_cast(tmp.data)); + nph_read++; + } + } + m_num_left = nph - nn; + } + if (nph_read >= n_clusters) + break; + } + } + + // Resize the vector to the number of clusters. + // No new allocation, only change bounds. + clusters.resize(nph_read); + return clusters; +} + +template +ClusterVector ClusterFile::read_frame() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error("Could not read frame number"); + } + + int32_t n_clusters; // Saved as 32bit integer in the cluster file + if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error("Could not read number of clusters"); + } + // std::vector clusters(n_clusters); + ClusterVector clusters(n_clusters); + clusters.set_frame_number(frame_number); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error("Could not read clusters"); + } + clusters.resize(n_clusters); + return clusters; +} + +template >> +NDArray calculate_eta2(const ClusterVector &clusters) { + // TOTO! make work with 2x2 clusters + NDArray eta2({static_cast(clusters.size()), 2}); + + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + + return eta2; +} + +/** + * @brief Calculate the eta2 values for a generic sized cluster and return them + * in a Eta2 struct containing etay, etax and the index of the respective 2x2 + * subcluster. + */ +template +Eta2 calculate_eta2( + const Cluster &cl) { + Eta2 eta{}; + + // TODO loads of overhead for a 2x2 clsuter maybe keep 2x2 calculation + constexpr size_t num_2x2_subclusters = + (ClusterSizeX - 1) * (ClusterSizeY - 1); + std::array sum_2x2_subcluster; + for (size_t i = 0; i < ClusterSizeY - 1; ++i) { + for (size_t j = 0; j < ClusterSizeX - 1; ++j) + sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = + cl.data[i * ClusterSizeX + j] + + cl.data[i * ClusterSizeX + j + 1] + + cl.data[(i + 1) * ClusterSizeX + j] + + cl.data[(i + 1) * ClusterSizeX + j + 1]; + } + + auto c = + std::max_element(sum_2x2_subcluster.begin(), sum_2x2_subcluster.end()) - + sum_2x2_subcluster.begin(); + + eta.sum = sum_2x2_subcluster[c]; + + eta.x = static_cast(cl.data[(c + 1) * ClusterSizeX + 1]) / + (cl.data[0] + cl.data[1]); + + size_t index_top_left_2x2_subcluster = + (int(c / (ClusterSizeX - 1)) + 1) * ClusterSizeX + + c % (ClusterSizeX - 1) * 2 + 1; + if ((cl.data[index_top_left_2x2_subcluster] + + cl.data[index_top_left_2x2_subcluster - 1]) != 0) + eta.x = + static_cast(cl.data[index_top_left_2x2_subcluster] / + (cl.data[index_top_left_2x2_subcluster] + + cl.data[index_top_left_2x2_subcluster - 1])); + + if ((cl.data[index_top_left_2x2_subcluster] + + cl.data[index_top_left_2x2_subcluster - ClusterSizeX]) != 0) + eta.y = static_cast( + cl.data[index_top_left_2x2_subcluster] / + (cl.data[index_top_left_2x2_subcluster] + + cl.data[index_top_left_2x2_subcluster - ClusterSizeX])); + + eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no + // underyling enum class + return eta; +} + +/** + * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 + * struct containing etay, etax and the corner of the cluster. + */ +template Eta2 calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + std::array tot2; + tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; + tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; + tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; + tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; + + auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); + eta.sum = tot2[c]; + switch (c) { + case cBottomLeft: + if ((cl.data[3] + cl.data[4]) != 0) + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + if ((cl.data[1] + cl.data[4]) != 0) + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomLeft; + break; + case cBottomRight: + if ((cl.data[2] + cl.data[5]) != 0) + eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); + if ((cl.data[1] + cl.data[4]) != 0) + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomRight; + break; + case cTopLeft: + if ((cl.data[7] + cl.data[4]) != 0) + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + if ((cl.data[7] + cl.data[4]) != 0) + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopLeft; + break; + case cTopRight: + if ((cl.data[5] + cl.data[4]) != 0) + eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); + if ((cl.data[7] + cl.data[4]) != 0) + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopRight; + break; + } + return eta; +} + +template Eta2 calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3]; + eta.c = cBottomLeft; // TODO! This is not correct, but need to put something + return eta; +} + +// TODO complicated API simplify? +int analyze_cluster(Cluster &cl, int32_t *t2, int32_t *t3, + char *quad, double *eta2x, double *eta2y, double *eta3x, + double *eta3y) { + + return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); +} + +int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, + double *eta2x, double *eta2y, double *eta3x, double *eta3y) { + + int ok = 1; + + int32_t tot2[4]; + int32_t t2max = 0; + char c = 0; + int32_t val, tot3; + + tot3 = 0; + for (int i = 0; i < 4; i++) + tot2[i] = 0; + + for (int ix = 0; ix < 3; ix++) { + for (int iy = 0; iy < 3; iy++) { + val = data[iy * 3 + ix]; + // printf ("%d ",data[iy * 3 + ix]); + tot3 += val; + if (ix <= 1 && iy <= 1) + tot2[cBottomLeft] += val; + if (ix >= 1 && iy <= 1) + tot2[cBottomRight] += val; + if (ix <= 1 && iy >= 1) + tot2[cTopLeft] += val; + if (ix >= 1 && iy >= 1) + tot2[cTopRight] += val; + } + // printf ("\n"); + } + // printf ("\n"); + + if (t2 || quad) { + + t2max = tot2[0]; + c = cBottomLeft; + for (int i = 1; i < 4; i++) { + if (tot2[i] > t2max) { + t2max = tot2[i]; + c = i; + } + } + // printf("*** %d %d %d %d -- + // %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); + if (quad) + *quad = c; + if (t2) + *t2 = t2max; + } + + if (t3) + *t3 = tot3; + + if (eta2x || eta2y) { + if (eta2x) + *eta2x = 0; + if (eta2y) + *eta2y = 0; + switch (c) { + case cBottomLeft: + if (eta2x && (data[3] + data[4]) != 0) + *eta2x = static_cast(data[4]) / (data[3] + data[4]); + if (eta2y && (data[1] + data[4]) != 0) + *eta2y = static_cast(data[4]) / (data[1] + data[4]); + break; + case cBottomRight: + if (eta2x && (data[2] + data[5]) != 0) + *eta2x = static_cast(data[5]) / (data[4] + data[5]); + if (eta2y && (data[1] + data[4]) != 0) + *eta2y = static_cast(data[4]) / (data[1] + data[4]); + break; + case cTopLeft: + if (eta2x && (data[7] + data[4]) != 0) + *eta2x = static_cast(data[4]) / (data[3] + data[4]); + if (eta2y && (data[7] + data[4]) != 0) + *eta2y = static_cast(data[7]) / (data[7] + data[4]); + break; + case cTopRight: + if (eta2x && t2max != 0) + *eta2x = static_cast(data[5]) / (data[5] + data[4]); + if (eta2y && t2max != 0) + *eta2y = static_cast(data[7]) / (data[7] + data[4]); + break; + default:; + } + } + + if (eta3x || eta3y) { + if (eta3x && (data[3] + data[4] + data[5]) != 0) + *eta3x = static_cast(-data[3] + data[3 + 2]) / + (data[3] + data[4] + data[5]); + if (eta3y && (data[1] + data[4] + data[7]) != 0) + *eta3y = static_cast(-data[1] + data[2 * 3 + 1]) / + (data[1] + data[4] + data[7]); + } + + return ok; +} } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index c5e66b7..ec0fa40 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -1,4 +1,5 @@ #pragma once +#include "aare/Cluster.hpp" //TODO maybe store in seperate file !!! #include #include #include @@ -10,7 +11,9 @@ namespace aare { -template class ClusterVector; // Forward declaration +template >> +class ClusterVector; // Forward declaration /** * @brief ClusterVector is a container for clusters of various sizes. It uses a @@ -44,12 +47,10 @@ class ClusterVector> { constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:"; public: - using ClusterType = Cluster; + using ClusterType = Cluster; /** * @brief Construct a new ClusterVector object - * @param cluster_size_x size of the cluster in x direction - * @param cluster_size_y size of the cluster in y direction * @param capacity initial capacity of the buffer in number of clusters * @param frame_number frame number of the clusters. Default is 0, which is * also used to indicate that the clusters come from many frames @@ -184,6 +185,10 @@ class ClusterVector> { */ size_t size() const { return m_size; } + uint8_t cluster_size_x() const { return ClusterSizeX; } + + uint8_t cluster_size_y() const { return ClusterSizeY; } + /** * @brief Return the capacity of the buffer in number of clusters. This is * the number of clusters that can be stored in the current buffer without diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp index 4905bce..5843046 100644 --- a/include/aare/Interpolator.hpp +++ b/include/aare/Interpolator.hpp @@ -1,29 +1,35 @@ #pragma once + +#include "aare/Cluster.hpp" +#include "aare/ClusterFile.hpp" //Cluster_3x3 +#include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/NDView.hpp" -#include "aare/ClusterVector.hpp" -#include "aare/ClusterFile.hpp" //Cluster_3x3 -namespace aare{ +namespace aare { -struct Photon{ +struct Photon { double x; double y; double energy; }; -class Interpolator{ +class Interpolator { NDArray m_ietax; NDArray m_ietay; NDArray m_etabinsx; NDArray m_etabinsy; NDArray m_energy_bins; - public: - Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins); - NDArray get_ietax(){return m_ietax;} - NDArray get_ietay(){return m_ietay;} - std::vector interpolate(const ClusterVector& clusters); + public: + Interpolator(NDView etacube, NDView xbins, + NDView ybins, NDView ebins); + NDArray get_ietax() { return m_ietax; } + NDArray get_ietay() { return m_ietay; } + + template >> + std::vector interpolate(const ClusterVector &clusters); }; } // namespace aare \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index c6ae470..0fc5764 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -4,8 +4,11 @@ namespace aare { -ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, - const std::string &mode) +template >> +ClusterFile::ClusterFile(const std::filesystem::path &fname, + size_t chunk_size, + const std::string &mode) : m_chunk_size(chunk_size), m_mode(mode) { if (mode == "r") { @@ -31,16 +34,21 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, } } -ClusterFile::~ClusterFile() { close(); } +template ClusterFile::~ClusterFile() { + close(); +} -void ClusterFile::close() { +template void ClusterFile::close() { if (fp) { fclose(fp); fp = nullptr; } } -void ClusterFile::write_frame(const ClusterVector &clusters) { +// TODO generally supported for all clsuter types +template +void ClusterFile::write_frame( + const ClusterVector &clusters) { if (m_mode != "w" && m_mode != "a") { throw std::runtime_error("File not opened for writing"); } @@ -55,12 +63,14 @@ void ClusterFile::write_frame(const ClusterVector &clusters) { fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); } -ClusterVector ClusterFile::read_clusters(size_t n_clusters) { +template +ClusterVector +ClusterFile::read_clusters(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - ClusterVector clusters(3, 3, n_clusters); + ClusterVector clusters(n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; @@ -108,12 +118,14 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } -ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { +template +ClusterVector +ClusterFile::read_clusters(size_t n_clusters, ROI roi) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - ClusterVector clusters(3, 3); + ClusterVector clusters; clusters.reserve(n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! @@ -124,7 +136,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { // auto buf = reinterpret_cast(clusters.data()); // auto buf = clusters.data(); - Cluster3x3 tmp; // this would break if the cluster size changes + ClusterType tmp; // this would break if the cluster size changes // if there are photons left from previous frame read them first if (nph) { @@ -186,7 +198,8 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { return clusters; } -ClusterVector ClusterFile::read_frame() { +template +ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -204,7 +217,7 @@ ClusterVector ClusterFile::read_frame() { throw std::runtime_error("Could not read number of clusters"); } // std::vector clusters(n_clusters); - ClusterVector clusters(3, 3, n_clusters); + ClusterVector clusters(n_clusters); clusters.set_frame_number(frame_number); if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != @@ -372,8 +385,9 @@ Eta2 calculate_eta2(Cluster &cl) { Eta2 eta{}; // TODO loads of overhead for a 2x2 clsuter maybe keep 2x2 calculation - size_t num_2x2_subclusters = (ClusterSizeX - 1) * (ClusterSizeY - 1); - std::array sum_2x2_subcluster; + constexpr size_t num_2x2_subclusters = + (ClusterSizeX - 1) * (ClusterSizeY - 1); + std::array sum_2x2_subcluster; for (size_t i = 0; i < ClusterSizeY - 1; ++i) { for (size_t j = 0; j < ClusterSizeX - 1; ++j) sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = @@ -383,9 +397,9 @@ Eta2 calculate_eta2(Cluster &cl) { cl.data[(i + 1) * ClusterSizeX + j + 1]; } - auto c = std::max_element(sum_2x2_subclusters.begin(), - sum_2x2_subcluster.end()) - - sum_2x2_subcluster.begin(); + auto c = + std::max_element(sum_2x2_subcluster.begin(), sum_2x2_subcluster.end()) - + sum_2x2_subcluster.begin(); eta.sum = sum_2x2_subcluster[c]; @@ -458,7 +472,6 @@ template Eta2 calculate_eta2(Cluster &cl) { eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); eta.c = cTopRight; break; - // no default to allow compiler to warn about missing cases } return eta; } @@ -473,8 +486,9 @@ template Eta2 calculate_eta2(Cluster &cl) { return eta; } -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, +// TODO complicated API simplify? +int analyze_cluster(Cluster &cl, int32_t *t2, int32_t *t3, + char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y) { return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 85e0b5d..d95405a 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -5,7 +5,8 @@ namespace aare { Interpolator::Interpolator(NDView etacube, NDView xbins, NDView ybins, NDView ebins) - : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) { + : m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), + m_energy_bins(ebins) { if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() || etacube.shape(2) != ebins.size()) { throw std::invalid_argument( @@ -51,35 +52,37 @@ Interpolator::Interpolator(NDView etacube, NDView xbins, } } } - } -std::vector Interpolator::interpolate(const ClusterVector& clusters) { +template >> +std::vector +Interpolator::interpolate(const ClusterVector &clusters) { std::vector photons; photons.reserve(clusters.size()); if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { - for (size_t i = 0; i(i); - Eta2 eta= calculate_eta2(cluster); - Photon photon; photon.x = cluster.x; photon.y = cluster.y; photon.energy = eta.sum; - + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; - //Finding the index of the last element that is smaller - //should work fine as long as we have many bins + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + // Finding the index of the last element that is smaller + // should work fine as long as we have many bins auto ie = last_smaller(m_energy_bins, photon.energy); auto ix = last_smaller(m_etabinsx, eta.x); - auto iy = last_smaller(m_etabinsy, eta.y); + auto iy = last_smaller(m_etabinsy, eta.y); // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); - + double dX, dY; int ex, ey; // cBottomLeft = 0, @@ -100,44 +103,47 @@ std::vector Interpolator::interpolate(const ClusterVector& clus dY = -1.; break; case cBottomRight: - dX = 0.; + dX = 0.; dY = -1.; break; } - photon.x += m_ietax(ix, iy, 0)*2 + dX; - photon.y += m_ietay(ix, iy, 0)*2 + dY; + photon.x += m_ietax(ix, iy, 0) * 2 + dX; + photon.y += m_ietay(ix, iy, 0) * 2 + dY; photons.push_back(photon); } - }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ - for (size_t i = 0; i(i); - Eta2 eta= calculate_eta2(cluster); - + } else if (clusters.cluster_size_x() == 2 || + clusters.cluster_size_y() == 2) { + for (size_t i = 0; i < clusters.size(); i++) { + auto cluster = clusters.at(i); + Eta2 eta = calculate_eta2(cluster); + Photon photon; photon.x = cluster.x; photon.y = cluster.y; photon.energy = eta.sum; - - //Now do some actual interpolation. - //Find which energy bin the cluster is in - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; - //Finding the index of the last element that is smaller - //should work fine as long as we have many bins + + // Now do some actual interpolation. + // Find which energy bin the cluster is in + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + // Finding the index of the last element that is smaller + // should work fine as long as we have many bins auto ie = last_smaller(m_energy_bins, photon.energy); auto ix = last_smaller(m_etabinsx, eta.x); - auto iy = last_smaller(m_etabinsy, eta.y); + auto iy = last_smaller(m_etabinsy, eta.y); - photon.x += m_ietax(ix, iy, 0)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 - photon.y += m_ietay(ix, iy, 0)*2; + photon.x += + m_ietax(ix, iy, 0) * 2; // eta goes between 0 and 1 but we could + // move the hit anywhere in the 2x2 + photon.y += m_ietay(ix, iy, 0) * 2; photons.push_back(photon); } - - }else{ - throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation"); + + } else { + throw std::runtime_error( + "Only 3x3 and 2x2 clusters are supported for interpolation"); } - return photons; } From f8f98b6ec3438902d4c6b23008e01fefe331bc76 Mon Sep 17 00:00:00 2001 From: AliceMazzoleni99 Date: Fri, 28 Mar 2025 14:29:20 +0100 Subject: [PATCH 051/120] Generalized calculate_eta2 function to work with general cluster types --- CMakeLists.txt | 14 ++++---- include/aare/ClusterFile.hpp | 63 +++++++++++++++++----------------- src/Cluster.test.cpp | 65 ++++++++++++++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 36 deletions(-) create mode 100644 src/Cluster.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4772f0b..568d868 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) # General options -option(AARE_PYTHON_BINDINGS "Build python bindings" ON) +option(AARE_PYTHON_BINDINGS "Build python bindings" OFF) option(AARE_TESTS "Build tests" OFF) option(AARE_BENCHMARKS "Build benchmarks" OFF) option(AARE_EXAMPLES "Build examples" OFF) @@ -307,7 +307,8 @@ endif() set(PUBLICHEADERS include/aare/ArrayExpr.hpp - include/aare/ClusterFinder.hpp + include/aare/Cluster.hpp + #include/aare/ClusterFinder.hpp include/aare/ClusterFile.hpp include/aare/CtbRawFile.hpp include/aare/ClusterVector.hpp @@ -328,7 +329,7 @@ set(PUBLICHEADERS include/aare/RawFile.hpp include/aare/RawMasterFile.hpp include/aare/RawSubFile.hpp - include/aare/VarClusterFinder.hpp + #include/aare/VarClusterFinder.hpp include/aare/utils/task.hpp ) @@ -336,7 +337,7 @@ set(PUBLICHEADERS set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp + #${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp @@ -394,8 +395,9 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + #${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp + #${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index d61ee2b..3a7dab8 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -28,10 +28,11 @@ typedef enum { pTopRight = 8 } pixel; +// TODO: maybe template this!!!!!! why int32_t???? struct Eta2 { double x; double y; - corner c; + int c; int32_t sum; }; @@ -131,22 +132,22 @@ int analyze_cluster(Cluster &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); -template >> -NDArray calculate_eta2(ClusterVector &clusters); +// template >> +// NDArray calculate_eta2(ClusterVector &clusters); // TODO: do we need rquire clauses? -template Eta2 calculate_eta2(const Cluster &cl); +// template Eta2 calculate_eta2(const Cluster &cl); -template Eta2 calculate_eta2(const Cluster &cl); +// template Eta2 calculate_eta2(const Cluster &cl); -template >> -Eta2 calculate_eta2(const ClusterType &cl); +// template >> +// Eta2 calculate_eta2(const ClusterType &cl); -template -Eta2 calculate_eta2( - const Cluster &cl); +// template +// Eta2 calculate_eta2( +// const Cluster &cl); template ClusterFile::ClusterFile( @@ -379,7 +380,7 @@ NDArray calculate_eta2(const ClusterVector &clusters) { NDArray eta2({static_cast(clusters.size()), 2}); for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); + auto e = calculate_eta2(clusters.at(i)); eta2(i, 0) = e.x; eta2(i, 1) = e.y; } @@ -417,25 +418,23 @@ Eta2 calculate_eta2( eta.sum = sum_2x2_subcluster[c]; - eta.x = static_cast(cl.data[(c + 1) * ClusterSizeX + 1]) / - (cl.data[0] + cl.data[1]); + size_t index_bottom_left_max_2x2_subcluster = + (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); - size_t index_top_left_2x2_subcluster = - (int(c / (ClusterSizeX - 1)) + 1) * ClusterSizeX + - c % (ClusterSizeX - 1) * 2 + 1; - if ((cl.data[index_top_left_2x2_subcluster] + - cl.data[index_top_left_2x2_subcluster - 1]) != 0) - eta.x = - static_cast(cl.data[index_top_left_2x2_subcluster] / - (cl.data[index_top_left_2x2_subcluster] + - cl.data[index_top_left_2x2_subcluster - 1])); + if ((cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0) + eta.x = static_cast( + cl.data[index_bottom_left_max_2x2_subcluster + 1]) / + (cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + 1]); - if ((cl.data[index_top_left_2x2_subcluster] + - cl.data[index_top_left_2x2_subcluster - ClusterSizeX]) != 0) - eta.y = static_cast( - cl.data[index_top_left_2x2_subcluster] / - (cl.data[index_top_left_2x2_subcluster] + - cl.data[index_top_left_2x2_subcluster - ClusterSizeX])); + if ((cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) != 0) + eta.y = + static_cast( + cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) / + (cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]); eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no // underyling enum class @@ -446,6 +445,7 @@ Eta2 calculate_eta2( * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 * struct containing etay, etax and the corner of the cluster. */ +/* template Eta2 calculate_eta2(const Cluster &cl) { Eta2 eta{}; @@ -489,7 +489,9 @@ template Eta2 calculate_eta2(const Cluster &cl) { } return eta; } +*/ +/* template Eta2 calculate_eta2(const Cluster &cl) { Eta2 eta{}; @@ -499,6 +501,7 @@ template Eta2 calculate_eta2(const Cluster &cl) { eta.c = cBottomLeft; // TODO! This is not correct, but need to put something return eta; } +*/ // TODO complicated API simplify? int analyze_cluster(Cluster &cl, int32_t *t2, int32_t *t3, diff --git a/src/Cluster.test.cpp b/src/Cluster.test.cpp new file mode 100644 index 0000000..de53e6e --- /dev/null +++ b/src/Cluster.test.cpp @@ -0,0 +1,65 @@ +/************************************************ + * @file test-Cluster.cpp + * @short test case for generic Cluster, ClusterVector, and calculate_eta2 + ***********************************************/ + +#include "aare/Cluster.hpp" +#include "aare/ClusterFile.hpp" + +// #include "catch.hpp" +#include +#include +#include + +using namespace aare; + +/* +TEST_CASE("Correct Instantiation of Cluster and ClusterVector", + "[.cluster][.instantiation]") { + + + REQUIRE(not std::is_constructible_v>); + + // all 1,2 and 0,4 are not defined!! + std::make_tuple(Cluster, ), + std::make_tuple(Cluster, ) + + +} +*/ + +using ClusterTypes = + std::variant, Cluster, Cluster, + Cluster, Cluster>; + +TEST_CASE("calculate_eta2", "[.cluster][.instantiation]") { + + // weird expect cluster_start to be in bottom_left corner -> row major -> + // check how its used should be an image!! + + auto [cluster, expected_eta] = GENERATE( + std::make_tuple(ClusterTypes{Cluster{0, 0, {1, 2, 3, 1}}}, + Eta2{2. / 3, 3. / 4, corner::cBottomLeft, 7}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}}, + Eta2{6. / 11, 2. / 7, corner::cTopRight, 20}), + std::make_tuple(ClusterTypes{Cluster{ + 0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2, + 1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}}, + Eta2{9. / 17, 5. / 13, 8, 28}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}}, + Eta2{7. / 11, 6. / 10, 1, 21}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 3, 2, 3, 4, 2}}}, + Eta2{3. / 5, 4. / 6, 1, 11})); + + Eta2 eta = std::visit( + [](const auto &clustertype) { return calculate_eta2(clustertype); }, + cluster); + + CHECK(eta.x == expected_eta.x); + CHECK(eta.y == expected_eta.y); + CHECK(eta.c == expected_eta.c); + CHECK(eta.sum == expected_eta.sum); +} From 57bb6c71ae02ce1b190e72c3be3931d9bf979a5e Mon Sep 17 00:00:00 2001 From: AliceMazzoleni99 Date: Fri, 28 Mar 2025 14:49:55 +0100 Subject: [PATCH 052/120] ClusterSize should be larger than 1 --- include/aare/Cluster.hpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index 74f5281..90701ea 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -16,8 +16,9 @@ namespace aare { // requires clause c++20 maybe update template && - std::is_integral_v>> + typename Enable = std::enable_if_t< + std::is_arithmetic_v && std::is_integral_v && + (ClusterSizeX > 1) && (ClusterSizeY > 1)>> struct Cluster { CoordType x; CoordType y; From ed9ef7c600324820ef3f9726d283d235d02cb60c Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Mon, 31 Mar 2025 12:26:29 +0200 Subject: [PATCH 053/120] removed analyze_cluster function as not used anymore --- include/aare/ClusterFile.hpp | 109 +++++------------------------------ 1 file changed, 16 insertions(+), 93 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 3a7dab8..836565b 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -503,111 +503,34 @@ template Eta2 calculate_eta2(const Cluster &cl) { } */ -// TODO complicated API simplify? -int analyze_cluster(Cluster &cl, int32_t *t2, int32_t *t3, - char *quad, double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { +// calculates Eta3 for 3x3 cluster based on code from analyze_cluster +// TODO only supported for 3x3 Clusters +template Eta2 calculate_eta3(const Cluster &cl) { - return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); -} + Eta2 eta{}; -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y) { + T sum = 0; - int ok = 1; + std::for_each(std::begin(cl.data), std::end(cl.data), + [&sum](T x) { sum += x; }); - int32_t tot2[4]; - int32_t t2max = 0; - char c = 0; - int32_t val, tot3; + eta.sum = sum; - tot3 = 0; - for (int i = 0; i < 4; i++) - tot2[i] = 0; + eta.c = corner::cBottomLeft; - for (int ix = 0; ix < 3; ix++) { - for (int iy = 0; iy < 3; iy++) { - val = data[iy * 3 + ix]; - // printf ("%d ",data[iy * 3 + ix]); - tot3 += val; - if (ix <= 1 && iy <= 1) - tot2[cBottomLeft] += val; - if (ix >= 1 && iy <= 1) - tot2[cBottomRight] += val; - if (ix <= 1 && iy >= 1) - tot2[cTopLeft] += val; - if (ix >= 1 && iy >= 1) - tot2[cTopRight] += val; - } - // printf ("\n"); - } - // printf ("\n"); + if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0) - if (t2 || quad) { + eta.x = static_cast(-cl.data[3] + cl.data[3 + 2]) / - t2max = tot2[0]; - c = cBottomLeft; - for (int i = 1; i < 4; i++) { - if (tot2[i] > t2max) { - t2max = tot2[i]; - c = i; - } - } - // printf("*** %d %d %d %d -- - // %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); - if (quad) - *quad = c; - if (t2) - *t2 = t2max; - } + (cl.data[3] + cl.data[4] + cl.data[5]); - if (t3) - *t3 = tot3; + if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0) - if (eta2x || eta2y) { - if (eta2x) - *eta2x = 0; - if (eta2y) - *eta2y = 0; - switch (c) { - case cBottomLeft: - if (eta2x && (data[3] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cBottomRight: - if (eta2x && (data[2] + data[5]) != 0) - *eta2x = static_cast(data[5]) / (data[4] + data[5]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cTopLeft: - if (eta2x && (data[7] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[7] + data[4]) != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - case cTopRight: - if (eta2x && t2max != 0) - *eta2x = static_cast(data[5]) / (data[5] + data[4]); - if (eta2y && t2max != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - default:; - } - } + eta.y = static_cast(-cl.data[1] + cl.data[2 * 3 + 1]) / - if (eta3x || eta3y) { - if (eta3x && (data[3] + data[4] + data[5]) != 0) - *eta3x = static_cast(-data[3] + data[3 + 2]) / - (data[3] + data[4] + data[5]); - if (eta3y && (data[1] + data[4] + data[7]) != 0) - *eta3y = static_cast(-data[1] + data[2 * 3 + 1]) / - (data[1] + data[4] + data[7]); - } + (cl.data[1] + cl.data[4] + cl.data[7]); - return ok; + return eta; } } // namespace aare From 7e5f91c6ecf8095117325b960d3e0154535c0bc2 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Mon, 31 Mar 2025 17:04:57 +0200 Subject: [PATCH 054/120] added benchmark to time generalize calculate_eta - twice as long so will keep specific version for 2x2 and 3x3 clusters --- CMakeLists.txt | 10 ++-- benchmarks/CMakeLists.txt | 30 +++++++++--- benchmarks/calculateeta_benchmark.cpp | 66 +++++++++++++++++++++++++++ include/aare/ClusterFile.hpp | 5 +- 4 files changed, 95 insertions(+), 16 deletions(-) create mode 100644 benchmarks/calculateeta_benchmark.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 568d868..51ed7f5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,14 +81,14 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? - set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + #set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) FetchContent_Declare( lmfit GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git GIT_TAG main - PATCH_COMMAND ${lmfit_patch} - UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL 1 + #PATCH_COMMAND ${lmfit_patch} + #UPDATE_DISCONNECTED 1 + #EXCLUDE_FROM_ALL 1 ) #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") @@ -359,7 +359,7 @@ set(SourceFiles add_library(aare_core STATIC ${SourceFiles}) target_include_directories(aare_core PUBLIC "$" - "$" + "$" PRIVATE ${lmfit_SOURCE_DIR}/lib ) diff --git a/benchmarks/CMakeLists.txt b/benchmarks/CMakeLists.txt index d083bab..699b4c6 100644 --- a/benchmarks/CMakeLists.txt +++ b/benchmarks/CMakeLists.txt @@ -1,11 +1,27 @@ -find_package(benchmark REQUIRED) -add_executable(ndarray_benchmark ndarray_benchmark.cpp) +include(FetchContent) -target_link_libraries(ndarray_benchmark benchmark::benchmark aare_core aare_compiler_flags) -# target_link_libraries(tests PRIVATE aare_core aare_compiler_flags) -set_target_properties(ndarray_benchmark PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} - # OUTPUT_NAME run_tests +FetchContent_Declare( + benchmark + GIT_REPOSITORY https://github.com/google/benchmark.git + GIT_TAG v1.8.3 # Change to the latest version if needed +) + +# Ensure Google Benchmark is built correctly +set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE) + +FetchContent_MakeAvailable(benchmark) + +add_executable(benchmarks) + +target_sources(benchmarks PRIVATE ndarray_benchmark.cpp calculateeta_benchmark.cpp) + +# Link Google Benchmark and other necessary libraries +target_link_libraries(benchmarks PRIVATE benchmark::benchmark aare_core aare_compiler_flags) + +# Set output properties +set_target_properties(benchmarks PROPERTIES + RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR} + OUTPUT_NAME run_benchmarks ) \ No newline at end of file diff --git a/benchmarks/calculateeta_benchmark.cpp b/benchmarks/calculateeta_benchmark.cpp new file mode 100644 index 0000000..dc7cd91 --- /dev/null +++ b/benchmarks/calculateeta_benchmark.cpp @@ -0,0 +1,66 @@ +#include "aare/ClusterFile.hpp" +#include + +using namespace aare; + +class ClusterFixture : public benchmark::Fixture { + public: + Cluster cluster_2x2{}; + Cluster cluster_3x3{}; + + void SetUp(::benchmark::State &state) { + int temp_data[4] = {1, 2, 3, 1}; + std::copy(std::begin(temp_data), std::end(temp_data), + std::begin(cluster_2x2.data)); + + cluster_2x2.x = 0; + cluster_2x2.y = 0; + + int temp_data2[9] = {1, 2, 3, 1, 3, 4, 5, 1, 20}; + std::copy(std::begin(temp_data2), std::end(temp_data2), + std::begin(cluster_3x3.data)); + + cluster_3x3.x = 0; + cluster_3x3.y = 0; + } + + // void TearDown(::benchmark::State& state) { + // } +}; + +BENCHMARK_F(ClusterFixture, Calculate2x2Eta)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_2x2); + benchmark::DoNotOptimize(eta); + } +} + +// almost takes double the time +BENCHMARK_F(ClusterFixture, + CalculateGeneralEtaFor2x2Cluster)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_2x2); + benchmark::DoNotOptimize(eta); + } +} + +BENCHMARK_F(ClusterFixture, Calculate3x3Eta)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_3x3); + benchmark::DoNotOptimize(eta); + } +} + +// almost takes double the time +BENCHMARK_F(ClusterFixture, + CalculateGeneralEtaFor3x3Cluster)(benchmark::State &st) { + for (auto _ : st) { + // This code gets timed + Eta2 eta = calculate_eta2(cluster_3x3); + benchmark::DoNotOptimize(eta); + } +} +// BENCHMARK_MAIN(); \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 836565b..e9530f6 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -445,7 +445,7 @@ Eta2 calculate_eta2( * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 * struct containing etay, etax and the corner of the cluster. */ -/* + template Eta2 calculate_eta2(const Cluster &cl) { Eta2 eta{}; @@ -489,9 +489,7 @@ template Eta2 calculate_eta2(const Cluster &cl) { } return eta; } -*/ -/* template Eta2 calculate_eta2(const Cluster &cl) { Eta2 eta{}; @@ -501,7 +499,6 @@ template Eta2 calculate_eta2(const Cluster &cl) { eta.c = cBottomLeft; // TODO! This is not correct, but need to put something return eta; } -*/ // calculates Eta3 for 3x3 cluster based on code from analyze_cluster // TODO only supported for 3x3 Clusters From e038bd16469a41605c0f6e7c0177c3948b83e1a7 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Mon, 31 Mar 2025 17:35:39 +0200 Subject: [PATCH 055/120] refactored and put calculate_eta function in seperate file --- CMakeLists.txt | 4 +- benchmarks/calculateeta_benchmark.cpp | 1 + include/aare/ClusterFile.hpp | 213 -------------------------- include/aare/ClusterVector.hpp | 49 +++--- src/Interpolator.cpp | 1 + 5 files changed, 28 insertions(+), 240 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 51ed7f5..efb3a0c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -307,6 +307,7 @@ endif() set(PUBLICHEADERS include/aare/ArrayExpr.hpp + include/aare/CalculateEta.hpp include/aare/Cluster.hpp #include/aare/ClusterFinder.hpp include/aare/ClusterFile.hpp @@ -331,7 +332,6 @@ set(PUBLICHEADERS include/aare/RawSubFile.hpp #include/aare/VarClusterFinder.hpp include/aare/utils/task.hpp - ) @@ -362,8 +362,6 @@ target_include_directories(aare_core PUBLIC "$" PRIVATE ${lmfit_SOURCE_DIR}/lib ) - - target_link_libraries( aare_core PUBLIC diff --git a/benchmarks/calculateeta_benchmark.cpp b/benchmarks/calculateeta_benchmark.cpp index dc7cd91..609ce89 100644 --- a/benchmarks/calculateeta_benchmark.cpp +++ b/benchmarks/calculateeta_benchmark.cpp @@ -1,3 +1,4 @@ +#include "aare/CalculateEta.hpp" #include "aare/ClusterFile.hpp" #include diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index e9530f6..289647d 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -9,40 +9,6 @@ namespace aare { -typedef enum { - cBottomLeft = 0, - cBottomRight = 1, - cTopLeft = 2, - cTopRight = 3 -} corner; - -typedef enum { - pBottomLeft = 0, - pBottom = 1, - pBottomRight = 2, - pLeft = 3, - pCenter = 4, - pRight = 5, - pTopLeft = 6, - pTop = 7, - pTopRight = 8 -} pixel; - -// TODO: maybe template this!!!!!! why int32_t???? -struct Eta2 { - double x; - double y; - int c; - int32_t sum; -}; - -struct ClusterAnalysis { - uint32_t c; - int32_t tot; - double etax; - double etay; -}; - /* Binary cluster file. Expects data to be layed out as: int32_t frame_number @@ -126,29 +92,6 @@ class ClusterFile { void close(); }; -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster &cl, int32_t *t2, int32_t *t3, - char *quad, double *eta2x, double *eta2y, double *eta3x, - double *eta3y); - -// template >> -// NDArray calculate_eta2(ClusterVector &clusters); - -// TODO: do we need rquire clauses? -// template Eta2 calculate_eta2(const Cluster &cl); - -// template Eta2 calculate_eta2(const Cluster &cl); - -// template >> -// Eta2 calculate_eta2(const ClusterType &cl); - -// template -// Eta2 calculate_eta2( -// const Cluster &cl); - template ClusterFile::ClusterFile( const std::filesystem::path &fname, size_t chunk_size, @@ -374,160 +317,4 @@ ClusterVector ClusterFile::read_frame() { return clusters; } -template >> -NDArray calculate_eta2(const ClusterVector &clusters) { - // TOTO! make work with 2x2 clusters - NDArray eta2({static_cast(clusters.size()), 2}); - - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; - } - - return eta2; -} - -/** - * @brief Calculate the eta2 values for a generic sized cluster and return them - * in a Eta2 struct containing etay, etax and the index of the respective 2x2 - * subcluster. - */ -template -Eta2 calculate_eta2( - const Cluster &cl) { - Eta2 eta{}; - - // TODO loads of overhead for a 2x2 clsuter maybe keep 2x2 calculation - constexpr size_t num_2x2_subclusters = - (ClusterSizeX - 1) * (ClusterSizeY - 1); - std::array sum_2x2_subcluster; - for (size_t i = 0; i < ClusterSizeY - 1; ++i) { - for (size_t j = 0; j < ClusterSizeX - 1; ++j) - sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = - cl.data[i * ClusterSizeX + j] + - cl.data[i * ClusterSizeX + j + 1] + - cl.data[(i + 1) * ClusterSizeX + j] + - cl.data[(i + 1) * ClusterSizeX + j + 1]; - } - - auto c = - std::max_element(sum_2x2_subcluster.begin(), sum_2x2_subcluster.end()) - - sum_2x2_subcluster.begin(); - - eta.sum = sum_2x2_subcluster[c]; - - size_t index_bottom_left_max_2x2_subcluster = - (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); - - if ((cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0) - eta.x = static_cast( - cl.data[index_bottom_left_max_2x2_subcluster + 1]) / - (cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + 1]); - - if ((cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) != 0) - eta.y = - static_cast( - cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) / - (cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]); - - eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no - // underyling enum class - return eta; -} - -/** - * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 - * struct containing etay, etax and the corner of the cluster. - */ - -template Eta2 calculate_eta2(const Cluster &cl) { - Eta2 eta{}; - - std::array tot2; - tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; - tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; - tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; - tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; - - auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - eta.sum = tot2[c]; - switch (c) { - case cBottomLeft: - if ((cl.data[3] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); - if ((cl.data[1] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); - eta.c = cBottomLeft; - break; - case cBottomRight: - if ((cl.data[2] + cl.data[5]) != 0) - eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); - if ((cl.data[1] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); - eta.c = cBottomRight; - break; - case cTopLeft: - if ((cl.data[7] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); - if ((cl.data[7] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); - eta.c = cTopLeft; - break; - case cTopRight: - if ((cl.data[5] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); - if ((cl.data[7] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); - eta.c = cTopRight; - break; - } - return eta; -} - -template Eta2 calculate_eta2(const Cluster &cl) { - Eta2 eta{}; - - eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); - eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); - eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3]; - eta.c = cBottomLeft; // TODO! This is not correct, but need to put something - return eta; -} - -// calculates Eta3 for 3x3 cluster based on code from analyze_cluster -// TODO only supported for 3x3 Clusters -template Eta2 calculate_eta3(const Cluster &cl) { - - Eta2 eta{}; - - T sum = 0; - - std::for_each(std::begin(cl.data), std::end(cl.data), - [&sum](T x) { sum += x; }); - - eta.sum = sum; - - eta.c = corner::cBottomLeft; - - if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0) - - eta.x = static_cast(-cl.data[3] + cl.data[3 + 2]) / - - (cl.data[3] + cl.data[4] + cl.data[5]); - - if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0) - - eta.y = static_cast(-cl.data[1] + cl.data[2 * 3 + 1]) / - - (cl.data[1] + cl.data[4] + cl.data[7]); - - return eta; -} - } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index ec0fa40..0e47b51 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -30,8 +30,7 @@ template class ClusterVector> { using value_type = T; - // size_t m_cluster_size_x; - // size_t m_cluster_size_y; + std::byte *m_data{}; size_t m_size{0}; size_t m_capacity; @@ -155,30 +154,32 @@ class ClusterVector> { * @throws std::runtime_error if the cluster size is not 3x3 * @warning Only 3x3 clusters are supported for the 2x2 sum. */ - std::vector sum_2x2() { - std::vector sums(m_size); - const size_t stride = item_size(); + /* only needed to calculate eta + std::vector sum_2x2() { + std::vector sums(m_size); + const size_t stride = item_size(); - if (ClusterSizeX != 3 || ClusterSizeY != 3) { - throw std::runtime_error( - "Only 3x3 clusters are supported for the 2x2 sum."); + if (ClusterSizeX != 3 || ClusterSizeY != 3) { + throw std::runtime_error( + "Only 3x3 clusters are supported for the 2x2 sum."); + } + std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y + + for (size_t i = 0; i < m_size; i++) { + std::array total; + auto T_ptr = reinterpret_cast(ptr); + total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4]; + total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5]; + total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7]; + total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8]; + + sums[i] = *std::max_element(total.begin(), total.end()); + ptr += stride; + } + + return sums; } - std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y - - for (size_t i = 0; i < m_size; i++) { - std::array total; - auto T_ptr = reinterpret_cast(ptr); - total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4]; - total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5]; - total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7]; - total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8]; - - sums[i] = *std::max_element(total.begin(), total.end()); - ptr += stride; - } - - return sums; - } + */ /** * @brief Return the number of clusters in the vector diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index d95405a..1c4a385 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -1,4 +1,5 @@ #include "aare/Interpolator.hpp" +#include "aare/CalculateEta.hpp" #include "aare/algorithm.hpp" namespace aare { From 508adf5016fda7c274279e43d8899ea29e003015 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Tue, 1 Apr 2025 10:01:23 +0200 Subject: [PATCH 056/120] refactoring of remaining files --- CMakeLists.txt | 9 +- include/aare/CalculateEta.hpp | 192 +++++++++++++++++++++++++++++++++ include/aare/Cluster.hpp | 11 +- include/aare/ClusterFileV2.hpp | 28 +++-- include/aare/ClusterFinder.hpp | 51 ++++----- src/Cluster.test.cpp | 19 ++-- src/ClusterFinder.test.cpp | 24 ++--- src/ClusterVector.test.cpp | 154 ++++++++++++-------------- 8 files changed, 338 insertions(+), 150 deletions(-) create mode 100644 include/aare/CalculateEta.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index efb3a0c..0bdc317 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -309,7 +309,7 @@ set(PUBLICHEADERS include/aare/ArrayExpr.hpp include/aare/CalculateEta.hpp include/aare/Cluster.hpp - #include/aare/ClusterFinder.hpp + include/aare/ClusterFinder.hpp include/aare/ClusterFile.hpp include/aare/CtbRawFile.hpp include/aare/ClusterVector.hpp @@ -330,14 +330,13 @@ set(PUBLICHEADERS include/aare/RawFile.hpp include/aare/RawMasterFile.hpp include/aare/RawSubFile.hpp - #include/aare/VarClusterFinder.hpp + include/aare/VarClusterFinder.hpp include/aare/utils/task.hpp ) set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/CtbRawFile.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp @@ -393,8 +392,8 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp new file mode 100644 index 0000000..29c5cc4 --- /dev/null +++ b/include/aare/CalculateEta.hpp @@ -0,0 +1,192 @@ +#pragma once + +#include "aare/Cluster.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDArray.hpp" + +namespace aare { + +typedef enum { + cBottomLeft = 0, + cBottomRight = 1, + cTopLeft = 2, + cTopRight = 3 +} corner; + +typedef enum { + pBottomLeft = 0, + pBottom = 1, + pBottomRight = 2, + pLeft = 3, + pCenter = 4, + pRight = 5, + pTopLeft = 6, + pTop = 7, + pTopRight = 8 +} pixel; + +// TODO: maybe template this!!!!!! why int32_t???? +struct Eta2 { + double x; + double y; + int c; + int32_t sum; +}; + +/** + * @brief Calculate the eta2 values for all clusters in a Clsutervector + */ +template >> +NDArray calculate_eta2(const ClusterVector &clusters) { + NDArray eta2({static_cast(clusters.size()), 2}); + + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; + } + + return eta2; +} + +/** + * @brief Calculate the eta2 values for a generic sized cluster and return them + * in a Eta2 struct containing etay, etax and the index of the respective 2x2 + * subcluster. + */ +template +Eta2 calculate_eta2( + const Cluster &cl) { + Eta2 eta{}; + + constexpr size_t num_2x2_subclusters = + (ClusterSizeX - 1) * (ClusterSizeY - 1); + std::array sum_2x2_subcluster; + for (size_t i = 0; i < ClusterSizeY - 1; ++i) { + for (size_t j = 0; j < ClusterSizeX - 1; ++j) + sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = + cl.data[i * ClusterSizeX + j] + + cl.data[i * ClusterSizeX + j + 1] + + cl.data[(i + 1) * ClusterSizeX + j] + + cl.data[(i + 1) * ClusterSizeX + j + 1]; + } + + auto c = + std::max_element(sum_2x2_subcluster.begin(), sum_2x2_subcluster.end()) - + sum_2x2_subcluster.begin(); + + eta.sum = sum_2x2_subcluster[c]; + + size_t index_bottom_left_max_2x2_subcluster = + (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); + + if ((cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0) + eta.x = static_cast( + cl.data[index_bottom_left_max_2x2_subcluster + 1]) / + (cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + 1]); + + if ((cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) != 0) + eta.y = + static_cast( + cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) / + (cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]); + + eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no + // underyling enum class + return eta; +} + +/** + * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 + * struct containing etay, etax and the corner of the cluster. + */ +template Eta2 calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + std::array tot2; + tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; + tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; + tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; + tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; + + auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); + eta.sum = tot2[c]; + switch (c) { + case cBottomLeft: + if ((cl.data[3] + cl.data[4]) != 0) + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + if ((cl.data[1] + cl.data[4]) != 0) + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomLeft; + break; + case cBottomRight: + if ((cl.data[2] + cl.data[5]) != 0) + eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); + if ((cl.data[1] + cl.data[4]) != 0) + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomRight; + break; + case cTopLeft: + if ((cl.data[7] + cl.data[4]) != 0) + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); + if ((cl.data[7] + cl.data[4]) != 0) + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopLeft; + break; + case cTopRight: + if ((cl.data[5] + cl.data[4]) != 0) + eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); + if ((cl.data[7] + cl.data[4]) != 0) + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopRight; + break; + } + return eta; +} + +template Eta2 calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3]; + eta.c = cBottomLeft; // TODO! This is not correct, but need to put something + return eta; +} + +// calculates Eta3 for 3x3 cluster based on code from analyze_cluster +// TODO only supported for 3x3 Clusters +template Eta2 calculate_eta3(const Cluster &cl) { + + Eta2 eta{}; + + T sum = 0; + + std::for_each(std::begin(cl.data), std::end(cl.data), + [&sum](T x) { sum += x; }); + + eta.sum = sum; + + eta.c = corner::cBottomLeft; + + if ((cl.data[3] + cl.data[4] + cl.data[5]) != 0) + + eta.x = static_cast(-cl.data[3] + cl.data[3 + 2]) / + + (cl.data[3] + cl.data[4] + cl.data[5]); + + if ((cl.data[1] + cl.data[4] + cl.data[7]) != 0) + + eta.y = static_cast(-cl.data[1] + cl.data[2 * 3 + 1]) / + + (cl.data[1] + cl.data[4] + cl.data[7]); + + return eta; +} + +} // namespace aare \ No newline at end of file diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index 90701ea..b2d1d3c 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -13,12 +13,17 @@ namespace aare { +template +constexpr bool is_valid_cluster = + std::is_arithmetic_v && std::is_integral_v && + (ClusterSizeX > 0) && (ClusterSizeY > 0); + // requires clause c++20 maybe update template && std::is_integral_v && - (ClusterSizeX > 1) && (ClusterSizeY > 1)>> + is_valid_cluster>> struct Cluster { CoordType x; CoordType y; @@ -29,11 +34,9 @@ struct Cluster { template struct is_cluster : std::false_type {}; // Default case: Not a Cluster -// TODO: Do i need the require clause here as well? template struct is_cluster> : std::true_type {}; // Cluster -// helper template constexpr bool is_cluster_v = is_cluster::value; } // namespace aare diff --git a/include/aare/ClusterFileV2.hpp b/include/aare/ClusterFileV2.hpp index 99f5976..55b8a2b 100644 --- a/include/aare/ClusterFileV2.hpp +++ b/include/aare/ClusterFileV2.hpp @@ -1,15 +1,16 @@ #pragma once #include "aare/core/defs.hpp" #include -#include #include +#include namespace aare { struct ClusterHeader { int32_t frame_number; int32_t n_clusters; std::string to_string() const { - return "frame_number: " + std::to_string(frame_number) + ", n_clusters: " + std::to_string(n_clusters); + return "frame_number: " + std::to_string(frame_number) + + ", n_clusters: " + std::to_string(n_clusters); } }; @@ -24,7 +25,8 @@ struct ClusterV2_ { data_str += std::to_string(d) + ", "; } data_str += "]"; - return "x: " + std::to_string(x) + ", y: " + std::to_string(y) + ", data: " + data_str; + return "x: " + std::to_string(x) + ", y: " + std::to_string(y) + + ", data: " + data_str; } return "x: " + std::to_string(x) + ", y: " + std::to_string(y); } @@ -34,27 +36,31 @@ struct ClusterV2 { ClusterV2_ cluster; int32_t frame_number; std::string to_string() const { - return "frame_number: " + std::to_string(frame_number) + ", " + cluster.to_string(); + return "frame_number: " + std::to_string(frame_number) + ", " + + cluster.to_string(); } }; /** * @brief - * important not: fp always points to the clusters header and does not point to individual clusters + * important not: fp always points to the clusters header and does not point to + * individual clusters * */ class ClusterFileV2 { - std::filesystem::path m_fpath; + std::filesystem::path m_fpath; std::string m_mode; FILE *fp{nullptr}; - void check_open(){ + void check_open() { if (!fp) - throw std::runtime_error(fmt::format("File: {} not open", m_fpath.string())); + throw std::runtime_error( + fmt::format("File: {} not open", m_fpath.string())); } public: - ClusterFileV2(std::filesystem::path const &fpath, std::string const &mode): m_fpath(fpath), m_mode(mode) { + ClusterFileV2(std::filesystem::path const &fpath, std::string const &mode) + : m_fpath(fpath), m_mode(mode) { if (m_mode != "r" && m_mode != "w") throw std::invalid_argument("mode must be 'r' or 'w'"); if (m_mode == "r" && !std::filesystem::exists(m_fpath)) @@ -77,7 +83,7 @@ class ClusterFileV2 { check_open(); ClusterHeader header; - fread(&header, sizeof(ClusterHeader), 1, fp); + fread(&header, sizeof(ClusterHeader), 1, fp); std::vector clusters_(header.n_clusters); fread(clusters_.data(), sizeof(ClusterV2_), header.n_clusters, fp); std::vector clusters; @@ -117,7 +123,7 @@ class ClusterFileV2 { size_t write(std::vector> const &clusters) { check_open(); - if (m_mode != "w") + if (m_mode != "w") throw std::runtime_error("File not opened in write mode"); size_t n_clusters = 0; diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 84b207b..9a15448 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -10,17 +10,16 @@ namespace aare { -template +template class ClusterFinder { Shape<2> m_image_size; - const int m_cluster_sizeX; - const int m_cluster_sizeY; const PEDESTAL_TYPE m_nSigma; const PEDESTAL_TYPE c2; const PEDESTAL_TYPE c3; Pedestal m_pedestal; - ClusterVector m_clusters; + ClusterVector> m_clusters; public: /** @@ -31,15 +30,12 @@ class ClusterFinder { * @param capacity initial capacity of the cluster vector * */ - ClusterFinder(Shape<2> image_size, Shape<2> cluster_size, - PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 1000000) - : m_image_size(image_size), m_cluster_sizeX(cluster_size[0]), - m_cluster_sizeY(cluster_size[1]), - m_nSigma(nSigma), - c2(sqrt((m_cluster_sizeY + 1) / 2 * (m_cluster_sizeX + 1) / 2)), - c3(sqrt(m_cluster_sizeX * m_cluster_sizeY)), - m_pedestal(image_size[0], image_size[1]), - m_clusters(m_cluster_sizeX, m_cluster_sizeY, capacity) {}; + ClusterFinder(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 1000000) + : m_image_size(image_size), m_nSigma(nSigma), + c2(sqrt((ClusterSizeY + 1) / 2 * (ClusterSizeX + 1) / 2)), + c3(sqrt(ClusterSizeX * ClusterSizeY)), + m_pedestal(image_size[0], image_size[1]), m_clusters(capacity) {}; void push_pedestal_frame(NDView frame) { m_pedestal.push(frame); @@ -56,23 +52,26 @@ class ClusterFinder { * same capacity as the old one * */ - ClusterVector steal_clusters(bool realloc_same_capacity = false) { - ClusterVector tmp = std::move(m_clusters); + ClusterVector> + steal_clusters(bool realloc_same_capacity = false) { + ClusterVector> tmp = + std::move(m_clusters); if (realloc_same_capacity) - m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY, - tmp.capacity()); + m_clusters = ClusterVector>( + tmp.capacity()); else - m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY); + m_clusters = + ClusterVector>{}; return tmp; } void find_clusters(NDView frame, uint64_t frame_number = 0) { // // TODO! deal with even size clusters // // currently 3,3 -> +/- 1 // // 4,4 -> +/- 2 - int dy = m_cluster_sizeY / 2; - int dx = m_cluster_sizeX / 2; + int dy = ClusterSizeY / 2; + int dx = ClusterSizeX / 2; m_clusters.set_frame_number(frame_number); - std::vector cluster_data(m_cluster_sizeX * m_cluster_sizeY); + std::vector cluster_data(ClusterSizeX * ClusterSizeY); for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { @@ -109,8 +108,12 @@ class ClusterFinder { // pass } else { // m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option - m_pedestal.push_fast(iy, ix, frame(iy, ix)); // Assume we have reached n_samples in the pedestal, slight performance improvement - continue; // It was a pedestal value nothing to store + m_pedestal.push_fast( + iy, ix, + frame(iy, + ix)); // Assume we have reached n_samples in the + // pedestal, slight performance improvement + continue; // It was a pedestal value nothing to store } // Store cluster diff --git a/src/Cluster.test.cpp b/src/Cluster.test.cpp index de53e6e..20c3948 100644 --- a/src/Cluster.test.cpp +++ b/src/Cluster.test.cpp @@ -4,6 +4,7 @@ ***********************************************/ #include "aare/Cluster.hpp" +#include "aare/CalculateEta.hpp" #include "aare/ClusterFile.hpp" // #include "catch.hpp" @@ -13,26 +14,24 @@ using namespace aare; -/* TEST_CASE("Correct Instantiation of Cluster and ClusterVector", "[.cluster][.instantiation]") { + CHECK(is_valid_cluster); + CHECK(is_valid_cluster); + CHECK(not is_valid_cluster); + CHECK(not is_valid_cluster); + CHECK(not is_valid_cluster); - REQUIRE(not std::is_constructible_v>); - - // all 1,2 and 0,4 are not defined!! - std::make_tuple(Cluster, ), - std::make_tuple(Cluster, ) - - + CHECK(not is_cluster_v); + CHECK(is_cluster_v>); } -*/ using ClusterTypes = std::variant, Cluster, Cluster, Cluster, Cluster>; -TEST_CASE("calculate_eta2", "[.cluster][.instantiation]") { +TEST_CASE("calculate_eta2", "[.cluster][.eta_calculation]") { // weird expect cluster_start to be in bottom_left corner -> row major -> // check how its used should be an image!! diff --git a/src/ClusterFinder.test.cpp b/src/ClusterFinder.test.cpp index 768e632..8989581 100644 --- a/src/ClusterFinder.test.cpp +++ b/src/ClusterFinder.test.cpp @@ -1,19 +1,18 @@ #include "aare/ClusterFinder.hpp" #include "aare/Pedestal.hpp" -#include #include +#include #include #include using namespace aare; -//TODO! Find a way to test the cluster finder - - +// TODO! Find a way to test the cluster finder // class ClusterFinderUnitTest : public ClusterFinder { // public: -// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma = 5.0, double threshold = 0.0) +// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma +// = 5.0, double threshold = 0.0) // : ClusterFinder(cluster_sizeX, cluster_sizeY, nSigma, threshold) {} // double get_c2() { return c2; } // double get_c3() { return c3; } @@ -37,8 +36,8 @@ using namespace aare; // REQUIRE_THAT(cf.get_c3(), Catch::Matchers::WithinRel(c3, 1e-9)); // } -TEST_CASE("Construct a cluster finder"){ - ClusterFinder clusterFinder({400,400}, {3,3}); +TEST_CASE("Construct a cluster finder") { + ClusterFinder clusterFinder({400, 400}); // REQUIRE(clusterFinder.get_cluster_sizeX() == 3); // REQUIRE(clusterFinder.get_cluster_sizeY() == 3); // REQUIRE(clusterFinder.get_threshold() == 1); @@ -49,16 +48,17 @@ TEST_CASE("Construct a cluster finder"){ // aare::Pedestal pedestal(10, 10, 5); // NDArray frame({10, 10}); // frame = 0; -// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1 threshold +// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1 +// threshold -// auto clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); +// auto clusters = +// clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); // REQUIRE(clusters.size() == 0); // frame(5, 5) = 10; -// clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal); -// REQUIRE(clusters.size() == 1); -// REQUIRE(clusters[0].x == 5); +// clusters = clusterFinder.find_clusters_without_threshold(frame.span(), +// pedestal); REQUIRE(clusters.size() == 1); REQUIRE(clusters[0].x == 5); // REQUIRE(clusters[0].y == 5); // for (int i = 0; i < 3; i++) { // for (int j = 0; j < 3; j++) { diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 8ca3b1e..acbbf56 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -1,21 +1,15 @@ -#include #include "aare/ClusterVector.hpp" +#include -#include #include +#include +using aare::Cluster; using aare::ClusterVector; -struct Cluster_i2x2 { - int16_t x; - int16_t y; - int32_t data[4]; -}; - TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { - - ClusterVector cv(2, 2, 4); + ClusterVector> cv(4); REQUIRE(cv.capacity() == 4); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 2); @@ -23,51 +17,45 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { // int16_t, int16_t, 2x2 int32_t = 20 bytes REQUIRE(cv.item_size() == 20); - //Create a cluster and push back into the vector - Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); REQUIRE(cv.size() == 1); REQUIRE(cv.capacity() == 4); - //Read the cluster back out using copy. TODO! Can we improve the API? - Cluster_i2x2 c2; + // Read the cluster back out using copy. TODO! Can we improve the API? + Cluster c2; std::byte *ptr = cv.element_ptr(0); - std::copy(ptr, ptr + cv.item_size(), reinterpret_cast(&c2)); + std::copy(ptr, ptr + cv.item_size(), reinterpret_cast(&c2)); - //Check that the data is the same + // Check that the data is the same REQUIRE(c1.x == c2.x); REQUIRE(c1.y == c2.y); - for(size_t i = 0; i < 4; i++) { + for (size_t i = 0; i < 4; i++) { REQUIRE(c1.data[i] == c2.data[i]); } } -TEST_CASE("Summing 3x1 clusters of int64"){ - struct Cluster_l3x1{ - int16_t x; - int16_t y; - int32_t data[3]; - }; - - ClusterVector cv(3, 1, 2); +TEST_CASE("Summing 3x1 clusters of int64") { + ClusterVector> cv(2); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 3); REQUIRE(cv.cluster_size_y() == 1); - //Create a cluster and push back into the vector - Cluster_l3x1 c1 = {1, 2, {3, 4, 5}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3, 4, 5}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 1); - Cluster_l3x1 c2 = {6, 7, {8, 9, 10}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + Cluster c2 = {6, 7, {8, 9, 10}}; + cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 2); - Cluster_l3x1 c3 = {11, 12, {13, 14, 15}}; - cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster c3 = {11, 12, {13, 14, 15}}; + cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); REQUIRE(cv.capacity() == 4); REQUIRE(cv.size() == 3); @@ -78,28 +66,22 @@ TEST_CASE("Summing 3x1 clusters of int64"){ REQUIRE(sums[2] == 42); } -TEST_CASE("Storing floats"){ - struct Cluster_f4x2{ - int16_t x; - int16_t y; - float data[8]; - }; - - ClusterVector cv(2, 4, 10); +TEST_CASE("Storing floats") { + ClusterVector> cv(10); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 2); REQUIRE(cv.cluster_size_y() == 4); - //Create a cluster and push back into the vector - Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + // Create a cluster and push back into the vector + Cluster c1 = {1, 2, {3.0, 4.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 1); - - Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + Cluster c2 = { + 6, 7, {8.0, 9.0, 10.0, 11.0, 8.0, 9.0, 10.0, 11.0}}; + cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 2); @@ -109,26 +91,27 @@ TEST_CASE("Storing floats"){ REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); } -TEST_CASE("Push back more than initial capacity"){ - - ClusterVector cv(2, 2, 2); +TEST_CASE("Push back more than initial capacity") { + + ClusterVector> cv(2); auto initial_data = cv.data(); - Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); REQUIRE(cv.size() == 1); REQUIRE(cv.capacity() == 2); - Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); REQUIRE(cv.size() == 2); REQUIRE(cv.capacity() == 2); - Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; - cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); - REQUIRE(cv.size() == 3); + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + REQUIRE(cv.size() == 3); REQUIRE(cv.capacity() == 4); - Cluster_i2x2* ptr = reinterpret_cast(cv.data()); + Cluster *ptr = + reinterpret_cast *>(cv.data()); REQUIRE(ptr[0].x == 1); REQUIRE(ptr[0].y == 2); REQUIRE(ptr[1].x == 6); @@ -136,29 +119,31 @@ TEST_CASE("Push back more than initial capacity"){ REQUIRE(ptr[2].x == 11); REQUIRE(ptr[2].y == 12); - //We should have allocated a new buffer, since we outgrew the initial capacity + // We should have allocated a new buffer, since we outgrew the initial + // capacity REQUIRE(initial_data != cv.data()); - } -TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){ - ClusterVector cv1(2, 2, 12); - Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; - cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); - Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; - cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); +TEST_CASE( + "Concatenate two cluster vectors where the first has enough capacity") { + ClusterVector> cv1(12); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); - ClusterVector cv2(2, 2, 2); - Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; - cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); - Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; - cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + ClusterVector> cv2(2); + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); cv1 += cv2; REQUIRE(cv1.size() == 4); REQUIRE(cv1.capacity() == 12); - Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + Cluster *ptr = + reinterpret_cast *>(cv1.data()); REQUIRE(ptr[0].x == 1); REQUIRE(ptr[0].y == 2); REQUIRE(ptr[1].x == 6); @@ -169,24 +154,25 @@ TEST_CASE("Concatenate two cluster vectors where the first has enough capacity") REQUIRE(ptr[3].y == 17); } -TEST_CASE("Concatenate two cluster vectors where we need to allocate"){ - ClusterVector cv1(2, 2, 2); - Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; - cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); - Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; - cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); +TEST_CASE("Concatenate two cluster vectors where we need to allocate") { + ClusterVector> cv1(2); + Cluster c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); - ClusterVector cv2(2, 2, 2); - Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; - cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); - Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; - cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + ClusterVector> cv2(2); + Cluster c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); cv1 += cv2; REQUIRE(cv1.size() == 4); REQUIRE(cv1.capacity() == 4); - Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + Cluster *ptr = + reinterpret_cast *>(cv1.data()); REQUIRE(ptr[0].x == 1); REQUIRE(ptr[0].y == 2); REQUIRE(ptr[1].x == 6); From a42c0d645bc91f9ced8cee98e03010505a742d81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 1 Apr 2025 14:31:25 +0200 Subject: [PATCH 057/120] added roi, noise and gain (#143) - Moved definitions of Cluster_2x2 and Cluster_3x3 to it's own file - Added optional members for ROI, noise_map and gain_map in ClusterFile **API:** After creating the ClusterFile the user can set one or all of: roi, noise_map, gain_map ```python f = ClusterFile(fname) f.set_roi(roi) #aare.ROI f.set_noise_map(noise_map) #numpy array f.set_gain_map(gain_map) #numpy array ``` **When reading clusters they are evaluated in the order:** 1. If ROI is enabled check that the cluster is within the ROI 1. If noise_map is enabled check that the cluster meets one of the conditions - Center pixel above noise - Highest 2x2 sum above 2x noise - 3x3 sum above 3x noise 1. If gain_map is set apply the gain map before returning the clusters (not used for noise cut) **Open questions:** 1. Check for out of bounds access in noise and gain map? closes #139 closes #135 closes #90 --- CMakeLists.txt | 45 ++- conda-recipe/meta.yaml | 2 +- include/aare/Cluster.hpp | 36 +++ include/aare/ClusterFile.hpp | 77 +++--- include/aare/ClusterVector.hpp | 25 ++ include/aare/defs.hpp | 6 +- patches/libzmq_cmake_version.patch | 18 ++ pyproject.toml | 2 +- python/src/cluster_file.hpp | 24 +- src/ClusterFile.cpp | 428 ++++++++++------------------- src/ClusterFile.test.cpp | 80 ++++++ tests/test_config.hpp.in | 2 +- 12 files changed, 400 insertions(+), 345 deletions(-) create mode 100644 include/aare/Cluster.hpp create mode 100644 patches/libzmq_cmake_version.patch create mode 100644 src/ClusterFile.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4772f0b..4a12fe6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,15 +81,29 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? - set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) - FetchContent_Declare( - lmfit - GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git - GIT_TAG main - PATCH_COMMAND ${lmfit_patch} - UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL 1 - ) + set(LMFIT_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + + # For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare + # so we need this workaround + if (${CMAKE_VERSION} VERSION_LESS "3.28") + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + ) + else() + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL 1 + ) + endif() + #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") set(LMFIT_CPPTEST OFF CACHE BOOL "") @@ -97,8 +111,15 @@ if(AARE_FETCH_LMFIT) set(LMFIT_CPPTEST OFF CACHE BOOL "") set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + if (${CMAKE_VERSION} VERSION_LESS "3.28") + if(NOT lmfit_POPULATED) + FetchContent_Populate(lmfit) + add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() + else() + FetchContent_MakeAvailable(lmfit) + endif() - FetchContent_MakeAvailable(lmfit) set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) else() find_package(lmfit REQUIRED) @@ -111,10 +132,13 @@ if(AARE_FETCH_ZMQ) if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30") cmake_policy(SET CMP0169 OLD) endif() + set(ZMQ_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch) FetchContent_Declare( libzmq GIT_REPOSITORY https://github.com/zeromq/libzmq.git GIT_TAG v4.3.4 + PATCH_COMMAND ${ZMQ_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 ) # Disable unwanted options from libzmq set(BUILD_TESTS OFF CACHE BOOL "Switch off libzmq test build") @@ -396,6 +420,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 120854b..3630b29 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.3.18 #TODO! how to not duplicate this? + version: 2025.4.1 #TODO! how to not duplicate this? diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp new file mode 100644 index 0000000..48f9ef0 --- /dev/null +++ b/include/aare/Cluster.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace aare { + +//TODO! Template this? +struct Cluster3x3 { + int16_t x; + int16_t y; + int32_t data[9]; + + int32_t sum_2x2() const{ + std::array total; + total[0] = data[0] + data[1] + data[3] + data[4]; + total[1] = data[1] + data[2] + data[4] + data[5]; + total[2] = data[3] + data[4] + data[6] + data[7]; + total[3] = data[4] + data[5] + data[7] + data[8]; + return *std::max_element(total.begin(), total.end()); + } + + int32_t sum() const{ + return std::accumulate(data, data + 9, 0); + } +}; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 5bea342..22f4183 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,25 +1,16 @@ #pragma once +#include "aare/Cluster.hpp" #include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" #include #include +#include namespace aare { -//TODO! Template this? -struct Cluster3x3 { - int16_t x; - int16_t y; - int32_t data[9]; -}; -struct Cluster2x2 { - int16_t x; - int16_t y; - int32_t data[4]; -}; - +//TODO! Legacy enums, migrate to enum class typedef enum { cBottomLeft = 0, cBottomRight = 1, @@ -53,15 +44,7 @@ struct ClusterAnalysis { double etay; }; -/* -Binary cluster file. Expects data to be layed out as: -int32_t frame_number -uint32_t number_of_clusters -int16_t x, int16_t y, int32_t data[9] x number_of_clusters -int32_t frame_number -uint32_t number_of_clusters -.... -*/ + /** * @brief Class to read and write cluster files @@ -70,16 +53,19 @@ uint32_t number_of_clusters * * int32_t frame_number * uint32_t number_of_clusters - * int16_t x, int16_t y, int32_t data[9] x number_of_clusters + * int16_t x, int16_t y, int32_t data[9] * number_of_clusters * int32_t frame_number * uint32_t number_of_clusters * etc. */ class ClusterFile { FILE *fp{}; - uint32_t m_num_left{}; - size_t m_chunk_size{}; - const std::string m_mode; + uint32_t m_num_left{}; /*Number of photons left in frame*/ + size_t m_chunk_size{}; /*Number of clusters to read at a time*/ + const std::string m_mode; /*Mode to open the file in*/ + std::optional m_roi; /*Region of interest, will be applied if set*/ + std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ + std::optional> m_gain_map; /*Gain map to apply to the clusters, will be applied if set*/ public: /** @@ -104,8 +90,6 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); - ClusterVector read_clusters(size_t n_clusters, ROI roi); - /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. @@ -117,29 +101,50 @@ class ClusterFile { void write_frame(const ClusterVector &clusters); - // Need to be migrated to support NDArray and return a ClusterVector - // std::vector - // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); - /** * @brief Return the chunk size */ size_t chunk_size() const { return m_chunk_size; } + + /** + * @brief Set the region of interest to use when reading clusters. If set only clusters within + * the ROI will be read. + */ + void set_roi(ROI roi); + + /** + * @brief Set the noise map to use when reading clusters. If set clusters below the noise + * level will be discarded. Selection criteria one of: Central pixel above noise, highest + * 2x2 sum above 2 * noise, total sum above 3 * noise. + */ + void set_noise_map(const NDView noise_map); + + /** + * @brief Set the gain map to use when reading clusters. If set the gain map will be applied + * to the clusters that pass ROI and noise_map selection. + */ + void set_gain_map(const NDView gain_map); /** * @brief Close the file. If not closed the file will be closed in the destructor */ void close(); + + private: + ClusterVector read_clusters_with_cut(size_t n_clusters); + ClusterVector read_clusters_without_cut(size_t n_clusters); + ClusterVector read_frame_with_cut(); + ClusterVector read_frame_without_cut(); + bool is_selected(Cluster3x3 &cl); + Cluster3x3 read_one_cluster(); }; -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); - +//TODO! helper functions that doesn't really belong here NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); Eta2 calculate_eta2(Cluster2x2 &cl); + + } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 1c15a22..b91278c 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -8,6 +8,9 @@ #include +#include "aare/Cluster.hpp" +#include "aare/NDView.hpp" + namespace aare { /** @@ -265,6 +268,28 @@ template class ClusterVector { m_size = new_size; } + void apply_gain_map(const NDView gain_map){ + //in principle we need to know the size of the image for this lookup + //TODO! check orientations + std::array xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; + std::array ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; + for (size_t i=0; i(i); + + if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1)-1 && cl.y < gain_map.shape(0)-1){ + for (size_t j=0; j<9; j++){ + size_t x = cl.x + xcorr[j]; + size_t y = cl.y + ycorr[j]; + cl.data[j] = static_cast(cl.data[j] * gain_map(y, x)); + } + }else{ + memset(cl.data, 0, 9*sizeof(T)); //clear edge clusters + } + + + } + } + private: void allocate_buffer(size_t new_capacity) { size_t num_bytes = item_size() * new_capacity; diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 4559882..4d22bd4 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -1,11 +1,9 @@ #pragma once #include "aare/Dtype.hpp" -// #include "aare/utils/logger.hpp" #include #include - #include #include #include @@ -43,6 +41,7 @@ inline constexpr size_t bits_per_byte = 8; void assert_failed(const std::string &msg); + class DynamicCluster { public: int cluster_sizeX; @@ -215,6 +214,9 @@ struct ROI{ int64_t height() const { return ymax - ymin; } int64_t width() const { return xmax - xmin; } + bool contains(int64_t x, int64_t y) const { + return x >= xmin && x < xmax && y >= ymin && y < ymax; + } }; diff --git a/patches/libzmq_cmake_version.patch b/patches/libzmq_cmake_version.patch new file mode 100644 index 0000000..4e421d3 --- /dev/null +++ b/patches/libzmq_cmake_version.patch @@ -0,0 +1,18 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index dd3d8eb9..c0187747 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1,11 +1,8 @@ + # CMake build script for ZeroMQ + project(ZeroMQ) + +-if(${CMAKE_SYSTEM_NAME} STREQUAL Darwin) +- cmake_minimum_required(VERSION 3.0.2) +-else() +- cmake_minimum_required(VERSION 2.8.12) +-endif() ++cmake_minimum_required(VERSION 3.15) ++message(STATUS "Patched cmake version") + + include(CheckIncludeFiles) + include(CheckCCompilerFlag) diff --git a/pyproject.toml b/pyproject.toml index b9bf7d2..0b6d2af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.3.18" +version = "2025.4.1" diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index f587443..ff46043 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,26 +31,22 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); - return v; - },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); return v; }) + .def("set_roi", &ClusterFile::set_roi) + .def("set_noise_map", [](ClusterFile &self, py::array_t noise_map) { + auto view = make_view_2d(noise_map); + self.set_noise_map(view); + }) + .def("set_gain_map", [](ClusterFile &self, py::array_t gain_map) { + auto view = make_view_2d(gain_map); + self.set_gain_map(view); + }) + .def("close", &ClusterFile::close) .def("write_frame", &ClusterFile::write_frame) - // .def("read_cluster_with_cut", - // [](ClusterFile &self, size_t n_clusters, - // py::array_t noise_map, int nx, int ny) { - // auto view = make_view_2d(noise_map); - // auto *vec = - // new std::vector(self.read_cluster_with_cut( - // n_clusters, view.data(), nx, ny)); - // return return_vector(vec); - // }) .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", [](ClusterFile &self, diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 59b8bb8..f4ef0ae 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -31,6 +31,18 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, } } +void ClusterFile::set_roi(ROI roi){ + m_roi = roi; +} + +void ClusterFile::set_noise_map(const NDView noise_map){ + m_noise_map = NDArray(noise_map); +} + +void ClusterFile::set_gain_map(const NDView gain_map){ + m_gain_map = NDArray(gain_map); +} + ClusterFile::~ClusterFile() { close(); } void ClusterFile::close() { @@ -48,14 +60,37 @@ void ClusterFile::write_frame(const ClusterVector &clusters) { !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } + //First write the frame number - 4 bytes int32_t frame_number = clusters.frame_number(); - fwrite(&frame_number, sizeof(frame_number), 1, fp); + if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write frame number"); + } + + //Then write the number of clusters - 4 bytes uint32_t n_clusters = clusters.size(); - fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); + if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write number of clusters"); + } + + //Now write the clusters in the frame + if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ + throw std::runtime_error(LOCATION + "Could not write clusters"); + } } -ClusterVector ClusterFile::read_clusters(size_t n_clusters) { + +ClusterVector ClusterFile::read_clusters(size_t n_clusters){ + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_clusters_with_cut(n_clusters); + }else{ + return read_clusters_without_cut(n_clusters); + } +} + +ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -86,6 +121,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (nph_read < n_clusters) { // keep on reading frames and photons until reaching n_clusters while (fread(&iframe, sizeof(iframe), 1, fp)) { + clusters.set_frame_number(iframe); // read number of clusters in frame if (fread(&nph, sizeof(nph), 1, fp)) { if (nph > (n_clusters - nph_read)) @@ -105,71 +141,112 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { // Resize the vector to the number of clusters. // No new allocation, only change bounds. clusters.resize(nph_read); + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - + + +ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { ClusterVector clusters(3,3); clusters.reserve(n_clusters); - - Cluster3x3 tmp; //this would break if the cluster size changes - // if there are photons left from previous frame read them first if (m_num_left) { - size_t nph_read = 0; - while(nph_read < m_num_left && clusters.size() < n_clusters){ - fread(&tmp, sizeof(tmp), 1, fp); - nph_read++; - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - m_num_left -= nph_read; } - + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters if (clusters.size() < n_clusters) { + // sanity check if (m_num_left) { throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); } - // we did not have enough clusters left in the previous frame - // keep on reading frames until reaching n_clusters - + int32_t frame_number = 0; // frame number needs to be 4 bytes! while (fread(&frame_number, sizeof(frame_number), 1, fp)) { - uint32_t nph_in_frame = 0; //number of photons we can read until next frame number - size_t nph_read = 0; //number of photons read in this frame - - if (fread(&nph_in_frame, sizeof(nph_in_frame), 1, fp)) { - if(frame_number != 1){ - throw std::runtime_error("Frame number is not 1"); - } - - while(nph_read < nph_in_frame && clusters.size() < n_clusters){ - fread(&tmp, sizeof(tmp), 1, fp); - nph_read++; - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); + if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { + clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - m_num_left = nph_in_frame - nph_read; } - if (clusters.size() >= n_clusters){ + // we have enough clusters, break out of the outer while loop + if (clusters.size() >= n_clusters) break; - } } } + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; } -ClusterVector ClusterFile::read_frame() { +Cluster3x3 ClusterFile::read_one_cluster(){ + Cluster3x3 c; + auto rc = fread(&c, sizeof(c), 1, fp); + if (rc != 1) { + throw std::runtime_error(LOCATION + "Could not read cluster"); + } + --m_num_left; + return c; +} + +ClusterVector ClusterFile::read_frame(){ + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_frame_with_cut(); + }else{ + return read_frame_without_cut(); + } +} + +ClusterVector ClusterFile::read_frame_without_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read frame number"); + } + + int32_t n_clusters; // Saved as 32bit integer in the cluster file + if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read number of clusters"); + } + + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error(LOCATION + "Could not read clusters"); + } + clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; +} + +ClusterVector ClusterFile::read_frame_with_cut() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -182,149 +259,47 @@ ClusterVector ClusterFile::read_frame() { throw std::runtime_error("Could not read frame number"); } - int32_t n_clusters; // Saved as 32bit integer in the cluster file - if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + + if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - // std::vector clusters(n_clusters); - ClusterVector clusters(3, 3, n_clusters); + + ClusterVector clusters(3, 3); + clusters.reserve(m_num_left); clusters.set_frame_number(frame_number); - - if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != - static_cast(n_clusters)) { - throw std::runtime_error("Could not read clusters"); + while(m_num_left){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + } } - clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, -// double *noise_map, -// int nx, int ny) { -// if (m_mode != "r") { -// throw std::runtime_error("File not opened for reading"); -// } -// std::vector clusters(n_clusters); -// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, -// // uint32_t *n_left, double *noise_map, int -// // nx, int ny) { -// int iframe = 0; -// // uint32_t nph = *n_left; -// uint32_t nph = m_num_left; -// // uint32_t nn = *n_left; -// uint32_t nn = m_num_left; -// size_t nph_read = 0; -// int32_t t2max, tot1; -// int32_t tot3; -// // Cluster *ptr = buf; -// Cluster3x3 *ptr = clusters.data(); -// int good = 1; -// double noise; -// // read photons left from previous frame -// if (noise_map) -// printf("Using noise map\n"); +bool ClusterFile::is_selected(Cluster3x3 &cl) { + //Should fail fast + if (m_roi) { + if (!(m_roi->contains(cl.x, cl.y))) { + return false; + } + } + if (m_noise_map){ + int32_t sum_1x1 = cl.data[4]; // central pixel + int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters + int32_t sum_3x3 = cl.sum(); // sum of all pixels -// if (nph) { -// if (nph > n_clusters) { -// // if we have more photons left in the frame then photons to -// // read we read directly the requested number -// nn = n_clusters; -// } else { -// nn = nph; -// } -// for (size_t iph = 0; iph < nn; iph++) { -// // read photons 1 by 1 -// size_t n_read = -// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// } -// // TODO! error handling on read -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, -// NULL); -// noise = noise_map[ptr->y * nx + ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { -// ; -// } else { -// good = 0; -// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, -// tot1, t2max, tot3); -// } -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read < n_clusters) { -// // // keep on reading frames and photons until reaching -// // n_clusters -// while (fread(&iframe, sizeof(iframe), 1, fp)) { -// // // printf("%d\n",nph_read); - -// if (fread(&nph, sizeof(nph), 1, fp)) { -// // // printf("** %d\n",nph); -// m_num_left = nph; -// for (size_t iph = 0; iph < nph; iph++) { -// // // read photons 1 by 1 -// size_t n_read = fread(reinterpret_cast(ptr), -// sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// // return nph_read; -// } -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && -// ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, -// NULL, NULL, NULL); -// // noise = noise_map[ptr->y * nx + ptr->x]; -// noise = noise_map[ptr->y + ny * ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || -// tot3 > 3 * noise) { -// ; -// } else -// good = 0; -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read >= n_clusters) -// break; -// } -// } -// // printf("%d\n",nph_read); -// clusters.resize(nph_read); -// return clusters; -// } + auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { + return false; + } + } + //we passed all checks + return true; +} NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters @@ -419,111 +394,4 @@ Eta2 calculate_eta2(Cluster2x2 &cl) { } - -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { - - return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); -} - -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y) { - - int ok = 1; - - int32_t tot2[4]; - int32_t t2max = 0; - char c = 0; - int32_t val, tot3; - - tot3 = 0; - for (int i = 0; i < 4; i++) - tot2[i] = 0; - - for (int ix = 0; ix < 3; ix++) { - for (int iy = 0; iy < 3; iy++) { - val = data[iy * 3 + ix]; - // printf ("%d ",data[iy * 3 + ix]); - tot3 += val; - if (ix <= 1 && iy <= 1) - tot2[cBottomLeft] += val; - if (ix >= 1 && iy <= 1) - tot2[cBottomRight] += val; - if (ix <= 1 && iy >= 1) - tot2[cTopLeft] += val; - if (ix >= 1 && iy >= 1) - tot2[cTopRight] += val; - } - // printf ("\n"); - } - // printf ("\n"); - - if (t2 || quad) { - - t2max = tot2[0]; - c = cBottomLeft; - for (int i = 1; i < 4; i++) { - if (tot2[i] > t2max) { - t2max = tot2[i]; - c = i; - } - } - // printf("*** %d %d %d %d -- - // %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); - if (quad) - *quad = c; - if (t2) - *t2 = t2max; - } - - if (t3) - *t3 = tot3; - - if (eta2x || eta2y) { - if (eta2x) - *eta2x = 0; - if (eta2y) - *eta2y = 0; - switch (c) { - case cBottomLeft: - if (eta2x && (data[3] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cBottomRight: - if (eta2x && (data[2] + data[5]) != 0) - *eta2x = static_cast(data[5]) / (data[4] + data[5]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cTopLeft: - if (eta2x && (data[7] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[7] + data[4]) != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - case cTopRight: - if (eta2x && t2max != 0) - *eta2x = static_cast(data[5]) / (data[5] + data[4]); - if (eta2y && t2max != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - default:; - } - } - - if (eta3x || eta3y) { - if (eta3x && (data[3] + data[4] + data[5]) != 0) - *eta3x = static_cast(-data[3] + data[3 + 2]) / - (data[3] + data[4] + data[5]); - if (eta3y && (data[1] + data[4] + data[7]) != 0) - *eta3y = static_cast(-data[1] + data[2 * 3 + 1]) / - (data[1] + data[4] + data[7]); - } - - return ok; -} - } // namespace aare \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp new file mode 100644 index 0000000..a0eed04 --- /dev/null +++ b/src/ClusterFile.test.cpp @@ -0,0 +1,80 @@ +#include "aare/ClusterFile.hpp" +#include "test_config.hpp" + + +#include "aare/defs.hpp" +#include +#include + + + + +using aare::ClusterFile; + +TEST_CASE("Read one frame from a a cluster file", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); +} + +TEST_CASE("Read one frame using ROI", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 49); + REQUIRE(clusters.frame_number() == 135); + + //Check that all clusters are within the ROI + for (size_t i = 0; i < clusters.size(); i++) { + auto c = clusters.at(i); + REQUIRE(c.x >= roi.xmin); + REQUIRE(c.x <= roi.xmax); + REQUIRE(c.y >= roi.ymin); + REQUIRE(c.y <= roi.ymax); + } + +} + + +TEST_CASE("Read clusters from single frame file", "[.integration]") { + + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + SECTION("Read fewer clusters than available") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(50); + REQUIRE(clusters.size() == 50); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read more clusters than available") { + ClusterFile f(fpath); + // 100 is the maximum number of clusters read + auto clusters = f.read_clusters(100); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read all clusters") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(97); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + + + +} diff --git a/tests/test_config.hpp.in b/tests/test_config.hpp.in index 62993b7..e314b8f 100644 --- a/tests/test_config.hpp.in +++ b/tests/test_config.hpp.in @@ -7,6 +7,6 @@ inline auto test_data_path(){ if(const char* env_p = std::getenv("AARE_TEST_DATA")){ return std::filesystem::path(env_p); }else{ - throw std::runtime_error("AARE_TEST_DATA_PATH not set"); + throw std::runtime_error("Path to test data: $AARE_TEST_DATA not set"); } } \ No newline at end of file From 8cad7a50a6ea69ffa0058c3134afcbc1753b2274 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 1 Apr 2025 15:00:03 +0200 Subject: [PATCH 058/120] fixed py --- python/src/cluster_file.hpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index b807712..ff46043 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,11 +31,6 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); - return v; - },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); From e1533282f1103f24c427d89bc94dea083ec6b776 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 1 Apr 2025 15:15:54 +0200 Subject: [PATCH 059/120] Cluster cuts (#146) Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com> Co-authored-by: xiangyu.xie --- CMakeLists.txt | 46 ++- conda-recipe/meta.yaml | 3 +- include/aare/Cluster.hpp | 36 +++ include/aare/ClusterFile.hpp | 73 ++--- include/aare/ClusterVector.hpp | 25 ++ include/aare/defs.hpp | 6 +- patches/libzmq_cmake_version.patch | 18 ++ pyproject.toml | 3 +- python/src/cluster_file.hpp | 24 +- src/ClusterFile.cpp | 457 ++++++++++------------------- src/ClusterFile.test.cpp | 80 +++++ tests/test_config.hpp.in | 2 +- 12 files changed, 410 insertions(+), 363 deletions(-) create mode 100644 include/aare/Cluster.hpp create mode 100644 patches/libzmq_cmake_version.patch create mode 100644 src/ClusterFile.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4772f0b..804b2f6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,15 +81,30 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? - set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) - FetchContent_Declare( - lmfit - GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git - GIT_TAG main - PATCH_COMMAND ${lmfit_patch} - UPDATE_DISCONNECTED 1 - EXCLUDE_FROM_ALL 1 - ) + set(LMFIT_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + + # For cmake < 3.28 we can't supply EXCLUDE_FROM_ALL to FetchContent_Declare + # so we need this workaround + if (${CMAKE_VERSION} VERSION_LESS "3.28") + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + ) + else() + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${LMFIT_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL 1 + ) + endif() + + #Disable what we don't need from lmfit set(BUILD_TESTING OFF CACHE BOOL "") set(LMFIT_CPPTEST OFF CACHE BOOL "") @@ -97,8 +112,15 @@ if(AARE_FETCH_LMFIT) set(LMFIT_CPPTEST OFF CACHE BOOL "") set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + if (${CMAKE_VERSION} VERSION_LESS "3.28") + if(NOT lmfit_POPULATED) + FetchContent_Populate(lmfit) + add_subdirectory(${lmfit_SOURCE_DIR} ${lmfit_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() + else() + FetchContent_MakeAvailable(lmfit) + endif() - FetchContent_MakeAvailable(lmfit) set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) else() find_package(lmfit REQUIRED) @@ -111,10 +133,13 @@ if(AARE_FETCH_ZMQ) if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.30") cmake_policy(SET CMP0169 OLD) endif() + set(ZMQ_PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/libzmq_cmake_version.patch) FetchContent_Declare( libzmq GIT_REPOSITORY https://github.com/zeromq/libzmq.git GIT_TAG v4.3.4 + PATCH_COMMAND ${ZMQ_PATCH_COMMAND} + UPDATE_DISCONNECTED 1 ) # Disable unwanted options from libzmq set(BUILD_TESTS OFF CACHE BOOL "Switch off libzmq test build") @@ -396,6 +421,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 120854b..560e831 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.3.18 #TODO! how to not duplicate this? + version: 2025.4.1 #TODO! how to not duplicate this? + diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp new file mode 100644 index 0000000..48f9ef0 --- /dev/null +++ b/include/aare/Cluster.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace aare { + +//TODO! Template this? +struct Cluster3x3 { + int16_t x; + int16_t y; + int32_t data[9]; + + int32_t sum_2x2() const{ + std::array total; + total[0] = data[0] + data[1] + data[3] + data[4]; + total[1] = data[1] + data[2] + data[4] + data[5]; + total[2] = data[3] + data[4] + data[6] + data[7]; + total[3] = data[4] + data[5] + data[7] + data[8]; + return *std::max_element(total.begin(), total.end()); + } + + int32_t sum() const{ + return std::accumulate(data, data + 9, 0); + } +}; +struct Cluster2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 5bea342..bea9f48 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -1,25 +1,17 @@ #pragma once +#include "aare/Cluster.hpp" #include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" #include #include +#include namespace aare { -//TODO! Template this? -struct Cluster3x3 { - int16_t x; - int16_t y; - int32_t data[9]; -}; -struct Cluster2x2 { - int16_t x; - int16_t y; - int32_t data[4]; -}; +//TODO! Legacy enums, migrate to enum class typedef enum { cBottomLeft = 0, cBottomRight = 1, @@ -53,15 +45,7 @@ struct ClusterAnalysis { double etay; }; -/* -Binary cluster file. Expects data to be layed out as: -int32_t frame_number -uint32_t number_of_clusters -int16_t x, int16_t y, int32_t data[9] x number_of_clusters -int32_t frame_number -uint32_t number_of_clusters -.... -*/ + /** * @brief Class to read and write cluster files @@ -70,16 +54,19 @@ uint32_t number_of_clusters * * int32_t frame_number * uint32_t number_of_clusters - * int16_t x, int16_t y, int32_t data[9] x number_of_clusters + * int16_t x, int16_t y, int32_t data[9] * number_of_clusters * int32_t frame_number * uint32_t number_of_clusters * etc. */ class ClusterFile { FILE *fp{}; - uint32_t m_num_left{}; - size_t m_chunk_size{}; - const std::string m_mode; + uint32_t m_num_left{}; /*Number of photons left in frame*/ + size_t m_chunk_size{}; /*Number of clusters to read at a time*/ + const std::string m_mode; /*Mode to open the file in*/ + std::optional m_roi; /*Region of interest, will be applied if set*/ + std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ + std::optional> m_gain_map; /*Gain map to apply to the clusters, will be applied if set*/ public: /** @@ -117,29 +104,49 @@ class ClusterFile { void write_frame(const ClusterVector &clusters); - // Need to be migrated to support NDArray and return a ClusterVector - // std::vector - // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); - /** * @brief Return the chunk size */ size_t chunk_size() const { return m_chunk_size; } + + /** + * @brief Set the region of interest to use when reading clusters. If set only clusters within + * the ROI will be read. + */ + void set_roi(ROI roi); + + /** + * @brief Set the noise map to use when reading clusters. If set clusters below the noise + * level will be discarded. Selection criteria one of: Central pixel above noise, highest + * 2x2 sum above 2 * noise, total sum above 3 * noise. + */ + void set_noise_map(const NDView noise_map); + + /** + * @brief Set the gain map to use when reading clusters. If set the gain map will be applied + * to the clusters that pass ROI and noise_map selection. + */ + void set_gain_map(const NDView gain_map); /** * @brief Close the file. If not closed the file will be closed in the destructor */ void close(); + + private: + ClusterVector read_clusters_with_cut(size_t n_clusters); + ClusterVector read_clusters_without_cut(size_t n_clusters); + ClusterVector read_frame_with_cut(); + ClusterVector read_frame_without_cut(); + bool is_selected(Cluster3x3 &cl); + Cluster3x3 read_one_cluster(); }; -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y); - +//TODO! helper functions that doesn't really belong here NDArray calculate_eta2(ClusterVector &clusters); Eta2 calculate_eta2(Cluster3x3 &cl); Eta2 calculate_eta2(Cluster2x2 &cl); + } // namespace aare diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 1c15a22..b91278c 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -8,6 +8,9 @@ #include +#include "aare/Cluster.hpp" +#include "aare/NDView.hpp" + namespace aare { /** @@ -265,6 +268,28 @@ template class ClusterVector { m_size = new_size; } + void apply_gain_map(const NDView gain_map){ + //in principle we need to know the size of the image for this lookup + //TODO! check orientations + std::array xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; + std::array ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; + for (size_t i=0; i(i); + + if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1)-1 && cl.y < gain_map.shape(0)-1){ + for (size_t j=0; j<9; j++){ + size_t x = cl.x + xcorr[j]; + size_t y = cl.y + ycorr[j]; + cl.data[j] = static_cast(cl.data[j] * gain_map(y, x)); + } + }else{ + memset(cl.data, 0, 9*sizeof(T)); //clear edge clusters + } + + + } + } + private: void allocate_buffer(size_t new_capacity) { size_t num_bytes = item_size() * new_capacity; diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 4559882..4d22bd4 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -1,11 +1,9 @@ #pragma once #include "aare/Dtype.hpp" -// #include "aare/utils/logger.hpp" #include #include - #include #include #include @@ -43,6 +41,7 @@ inline constexpr size_t bits_per_byte = 8; void assert_failed(const std::string &msg); + class DynamicCluster { public: int cluster_sizeX; @@ -215,6 +214,9 @@ struct ROI{ int64_t height() const { return ymax - ymin; } int64_t width() const { return xmax - xmin; } + bool contains(int64_t x, int64_t y) const { + return x >= xmin && x < xmax && y >= ymin && y < ymax; + } }; diff --git a/patches/libzmq_cmake_version.patch b/patches/libzmq_cmake_version.patch new file mode 100644 index 0000000..4e421d3 --- /dev/null +++ b/patches/libzmq_cmake_version.patch @@ -0,0 +1,18 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index dd3d8eb9..c0187747 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1,11 +1,8 @@ + # CMake build script for ZeroMQ + project(ZeroMQ) + +-if(${CMAKE_SYSTEM_NAME} STREQUAL Darwin) +- cmake_minimum_required(VERSION 3.0.2) +-else() +- cmake_minimum_required(VERSION 2.8.12) +-endif() ++cmake_minimum_required(VERSION 3.15) ++message(STATUS "Patched cmake version") + + include(CheckIncludeFiles) + include(CheckCCompilerFlag) diff --git a/pyproject.toml b/pyproject.toml index b9bf7d2..60128c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,8 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.3.18" +version = "2025.4.1" + diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index f587443..ff46043 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -31,26 +31,22 @@ void define_cluster_file_io_bindings(py::module &m) { auto v = new ClusterVector(self.read_clusters(n_clusters)); return v; },py::return_value_policy::take_ownership) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); - return v; - },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); return v; }) + .def("set_roi", &ClusterFile::set_roi) + .def("set_noise_map", [](ClusterFile &self, py::array_t noise_map) { + auto view = make_view_2d(noise_map); + self.set_noise_map(view); + }) + .def("set_gain_map", [](ClusterFile &self, py::array_t gain_map) { + auto view = make_view_2d(gain_map); + self.set_gain_map(view); + }) + .def("close", &ClusterFile::close) .def("write_frame", &ClusterFile::write_frame) - // .def("read_cluster_with_cut", - // [](ClusterFile &self, size_t n_clusters, - // py::array_t noise_map, int nx, int ny) { - // auto view = make_view_2d(noise_map); - // auto *vec = - // new std::vector(self.read_cluster_with_cut( - // n_clusters, view.data(), nx, ny)); - // return return_vector(vec); - // }) .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", [](ClusterFile &self, diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 2e23e09..f77ac92 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -31,6 +31,18 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, } } +void ClusterFile::set_roi(ROI roi){ + m_roi = roi; +} + +void ClusterFile::set_noise_map(const NDView noise_map){ + m_noise_map = NDArray(noise_map); +} + +void ClusterFile::set_gain_map(const NDView gain_map){ + m_gain_map = NDArray(gain_map); +} + ClusterFile::~ClusterFile() { close(); } void ClusterFile::close() { @@ -48,14 +60,37 @@ void ClusterFile::write_frame(const ClusterVector &clusters) { !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } + //First write the frame number - 4 bytes int32_t frame_number = clusters.frame_number(); - fwrite(&frame_number, sizeof(frame_number), 1, fp); + if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write frame number"); + } + + //Then write the number of clusters - 4 bytes uint32_t n_clusters = clusters.size(); - fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); + if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ + throw std::runtime_error(LOCATION + "Could not write number of clusters"); + } + + //Now write the clusters in the frame + if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ + throw std::runtime_error(LOCATION + "Could not write clusters"); + } } -ClusterVector ClusterFile::read_clusters(size_t n_clusters) { + +ClusterVector ClusterFile::read_clusters(size_t n_clusters){ + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_clusters_with_cut(n_clusters); + }else{ + return read_clusters_without_cut(n_clusters); + } +} + +ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -86,6 +121,7 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (nph_read < n_clusters) { // keep on reading frames and photons until reaching n_clusters while (fread(&iframe, sizeof(iframe), 1, fp)) { + clusters.set_frame_number(iframe); // read number of clusters in frame if (fread(&nph, sizeof(nph), 1, fp)) { if (nph > (n_clusters - nph_read)) @@ -105,83 +141,111 @@ ClusterVector ClusterFile::read_clusters(size_t n_clusters) { // Resize the vector to the number of clusters. // No new allocation, only change bounds. clusters.resize(nph_read); + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -ClusterVector ClusterFile::read_clusters(size_t n_clusters, ROI roi) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - + +ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { ClusterVector clusters(3,3); clusters.reserve(n_clusters); - int32_t iframe = 0; // frame number needs to be 4 bytes! - size_t nph_read = 0; - uint32_t nn = m_num_left; - uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - - // auto buf = reinterpret_cast(clusters.data()); - // auto buf = clusters.data(); - - Cluster3x3 tmp; //this would break if the cluster size changes - // if there are photons left from previous frame read them first - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - //Read one cluster, in the ROI push back - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ - fread(&tmp, sizeof(tmp), 1, fp); - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; + if (m_num_left) { + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - - m_num_left = nph - nn; // write back the number of photons left } - if (nph_read < n_clusters) { - // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // read number of clusters in frame - if (fread(&nph, sizeof(nph), 1, fp)) { - if (nph > (n_clusters - nph_read)) - nn = n_clusters - nph_read; - else - nn = nph; - - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for(size_t i = 0; i < nn; i++){ - fread(&tmp, sizeof(tmp), 1, fp); - if(tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax){ - clusters.push_back(tmp.x, tmp.y, reinterpret_cast(tmp.data)); - nph_read++; + // we did not have enough clusters left in the previous frame + // keep on reading frames until reaching n_clusters + if (clusters.size() < n_clusters) { + // sanity check + if (m_num_left) { + throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); + } + + int32_t frame_number = 0; // frame number needs to be 4 bytes! + while (fread(&frame_number, sizeof(frame_number), 1, fp)) { + if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { + clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number + while(m_num_left && clusters.size() < n_clusters){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); } } - m_num_left = nph - nn; } - if (nph_read >= n_clusters) + + // we have enough clusters, break out of the outer while loop + if (clusters.size() >= n_clusters) break; } - } - // Resize the vector to the number of clusters. - // No new allocation, only change bounds. - clusters.resize(nph_read); + } + if(m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; } -ClusterVector ClusterFile::read_frame() { +Cluster3x3 ClusterFile::read_one_cluster(){ + Cluster3x3 c; + auto rc = fread(&c, sizeof(c), 1, fp); + if (rc != 1) { + throw std::runtime_error(LOCATION + "Could not read cluster"); + } + --m_num_left; + return c; +} + +ClusterVector ClusterFile::read_frame(){ + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi){ + return read_frame_with_cut(); + }else{ + return read_frame_without_cut(); + } +} + +ClusterVector ClusterFile::read_frame_without_cut() { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_num_left) { + throw std::runtime_error( + "There are still photons left in the last frame"); + } + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read frame number"); + } + + int32_t n_clusters; // Saved as 32bit integer in the cluster file + if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + throw std::runtime_error(LOCATION + "Could not read number of clusters"); + } + + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); + + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != + static_cast(n_clusters)) { + throw std::runtime_error(LOCATION + "Could not read clusters"); + } + clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); + return clusters; +} + +ClusterVector ClusterFile::read_frame_with_cut() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -194,149 +258,47 @@ ClusterVector ClusterFile::read_frame() { throw std::runtime_error("Could not read frame number"); } - int32_t n_clusters; // Saved as 32bit integer in the cluster file - if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { + + if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - // std::vector clusters(n_clusters); - ClusterVector clusters(3, 3, n_clusters); + + ClusterVector clusters(3, 3); + clusters.reserve(m_num_left); clusters.set_frame_number(frame_number); - - if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != - static_cast(n_clusters)) { - throw std::runtime_error("Could not read clusters"); + while(m_num_left){ + Cluster3x3 c = read_one_cluster(); + if(is_selected(c)){ + clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); + } } - clusters.resize(n_clusters); + if (m_gain_map) + clusters.apply_gain_map(m_gain_map->view()); return clusters; } -// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, -// double *noise_map, -// int nx, int ny) { -// if (m_mode != "r") { -// throw std::runtime_error("File not opened for reading"); -// } -// std::vector clusters(n_clusters); -// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, -// // uint32_t *n_left, double *noise_map, int -// // nx, int ny) { -// int iframe = 0; -// // uint32_t nph = *n_left; -// uint32_t nph = m_num_left; -// // uint32_t nn = *n_left; -// uint32_t nn = m_num_left; -// size_t nph_read = 0; -// int32_t t2max, tot1; -// int32_t tot3; -// // Cluster *ptr = buf; -// Cluster3x3 *ptr = clusters.data(); -// int good = 1; -// double noise; -// // read photons left from previous frame -// if (noise_map) -// printf("Using noise map\n"); +bool ClusterFile::is_selected(Cluster3x3 &cl) { + //Should fail fast + if (m_roi) { + if (!(m_roi->contains(cl.x, cl.y))) { + return false; + } + } + if (m_noise_map){ + int32_t sum_1x1 = cl.data[4]; // central pixel + int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters + int32_t sum_3x3 = cl.sum(); // sum of all pixels -// if (nph) { -// if (nph > n_clusters) { -// // if we have more photons left in the frame then photons to -// // read we read directly the requested number -// nn = n_clusters; -// } else { -// nn = nph; -// } -// for (size_t iph = 0; iph < nn; iph++) { -// // read photons 1 by 1 -// size_t n_read = -// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// } -// // TODO! error handling on read -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, -// NULL); -// noise = noise_map[ptr->y * nx + ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { -// ; -// } else { -// good = 0; -// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, -// tot1, t2max, tot3); -// } -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read < n_clusters) { -// // // keep on reading frames and photons until reaching -// // n_clusters -// while (fread(&iframe, sizeof(iframe), 1, fp)) { -// // // printf("%d\n",nph_read); - -// if (fread(&nph, sizeof(nph), 1, fp)) { -// // // printf("** %d\n",nph); -// m_num_left = nph; -// for (size_t iph = 0; iph < nph; iph++) { -// // // read photons 1 by 1 -// size_t n_read = fread(reinterpret_cast(ptr), -// sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// // return nph_read; -// } -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && -// ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, -// NULL, NULL, NULL); -// // noise = noise_map[ptr->y * nx + ptr->x]; -// noise = noise_map[ptr->y + ny * ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || -// tot3 > 3 * noise) { -// ; -// } else -// good = 0; -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read >= n_clusters) -// break; -// } -// } -// // printf("%d\n",nph_read); -// clusters.resize(nph_read); -// return clusters; -// } + auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { + return false; + } + } + //we passed all checks + return true; +} NDArray calculate_eta2(ClusterVector &clusters) { //TOTO! make work with 2x2 clusters @@ -431,111 +393,4 @@ Eta2 calculate_eta2(Cluster2x2 &cl) { } - -int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { - - return analyze_data(cl.data, t2, t3, quad, eta2x, eta2y, eta3x, eta3y); -} - -int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, - double *eta2x, double *eta2y, double *eta3x, double *eta3y) { - - int ok = 1; - - int32_t tot2[4]; - int32_t t2max = 0; - char c = 0; - int32_t val, tot3; - - tot3 = 0; - for (int i = 0; i < 4; i++) - tot2[i] = 0; - - for (int ix = 0; ix < 3; ix++) { - for (int iy = 0; iy < 3; iy++) { - val = data[iy * 3 + ix]; - // printf ("%d ",data[iy * 3 + ix]); - tot3 += val; - if (ix <= 1 && iy <= 1) - tot2[cBottomLeft] += val; - if (ix >= 1 && iy <= 1) - tot2[cBottomRight] += val; - if (ix <= 1 && iy >= 1) - tot2[cTopLeft] += val; - if (ix >= 1 && iy >= 1) - tot2[cTopRight] += val; - } - // printf ("\n"); - } - // printf ("\n"); - - if (t2 || quad) { - - t2max = tot2[0]; - c = cBottomLeft; - for (int i = 1; i < 4; i++) { - if (tot2[i] > t2max) { - t2max = tot2[i]; - c = i; - } - } - // printf("*** %d %d %d %d -- - // %d\n",tot2[0],tot2[1],tot2[2],tot2[3],t2max); - if (quad) - *quad = c; - if (t2) - *t2 = t2max; - } - - if (t3) - *t3 = tot3; - - if (eta2x || eta2y) { - if (eta2x) - *eta2x = 0; - if (eta2y) - *eta2y = 0; - switch (c) { - case cBottomLeft: - if (eta2x && (data[3] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cBottomRight: - if (eta2x && (data[2] + data[5]) != 0) - *eta2x = static_cast(data[5]) / (data[4] + data[5]); - if (eta2y && (data[1] + data[4]) != 0) - *eta2y = static_cast(data[4]) / (data[1] + data[4]); - break; - case cTopLeft: - if (eta2x && (data[7] + data[4]) != 0) - *eta2x = static_cast(data[4]) / (data[3] + data[4]); - if (eta2y && (data[7] + data[4]) != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - case cTopRight: - if (eta2x && t2max != 0) - *eta2x = static_cast(data[5]) / (data[5] + data[4]); - if (eta2y && t2max != 0) - *eta2y = static_cast(data[7]) / (data[7] + data[4]); - break; - default:; - } - } - - if (eta3x || eta3y) { - if (eta3x && (data[3] + data[4] + data[5]) != 0) - *eta3x = static_cast(-data[3] + data[3 + 2]) / - (data[3] + data[4] + data[5]); - if (eta3y && (data[1] + data[4] + data[7]) != 0) - *eta3y = static_cast(-data[1] + data[2 * 3 + 1]) / - (data[1] + data[4] + data[7]); - } - - return ok; -} - } // namespace aare \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp new file mode 100644 index 0000000..a0eed04 --- /dev/null +++ b/src/ClusterFile.test.cpp @@ -0,0 +1,80 @@ +#include "aare/ClusterFile.hpp" +#include "test_config.hpp" + + +#include "aare/defs.hpp" +#include +#include + + + + +using aare::ClusterFile; + +TEST_CASE("Read one frame from a a cluster file", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); +} + +TEST_CASE("Read one frame using ROI", "[.integration]") { + //We know that the frame has 97 clusters + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile f(fpath); + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + auto clusters = f.read_frame(); + REQUIRE(clusters.size() == 49); + REQUIRE(clusters.frame_number() == 135); + + //Check that all clusters are within the ROI + for (size_t i = 0; i < clusters.size(); i++) { + auto c = clusters.at(i); + REQUIRE(c.x >= roi.xmin); + REQUIRE(c.x <= roi.xmax); + REQUIRE(c.y >= roi.ymin); + REQUIRE(c.y <= roi.ymax); + } + +} + + +TEST_CASE("Read clusters from single frame file", "[.integration]") { + + auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + SECTION("Read fewer clusters than available") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(50); + REQUIRE(clusters.size() == 50); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read more clusters than available") { + ClusterFile f(fpath); + // 100 is the maximum number of clusters read + auto clusters = f.read_clusters(100); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + SECTION("Read all clusters") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(97); + REQUIRE(clusters.size() == 97); + REQUIRE(clusters.frame_number() == 135); + } + + + +} diff --git a/tests/test_config.hpp.in b/tests/test_config.hpp.in index 62993b7..e314b8f 100644 --- a/tests/test_config.hpp.in +++ b/tests/test_config.hpp.in @@ -7,6 +7,6 @@ inline auto test_data_path(){ if(const char* env_p = std::getenv("AARE_TEST_DATA")){ return std::filesystem::path(env_p); }else{ - throw std::runtime_error("AARE_TEST_DATA_PATH not set"); + throw std::runtime_error("Path to test data: $AARE_TEST_DATA not set"); } } \ No newline at end of file From 745d09fbe901c9c07d5d3e6fc80d7366ff057957 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Tue, 1 Apr 2025 15:30:10 +0200 Subject: [PATCH 060/120] changed push_back to take Cluster as input argument --- CMakeLists.txt | 10 +++---- include/aare/ClusterFile.hpp | 12 ++++---- include/aare/ClusterFinder.hpp | 5 ++++ include/aare/ClusterVector.hpp | 22 ++++++--------- src/ClusterVector.test.cpp | 50 ++++++++++++++++++---------------- src/Interpolator.cpp | 2 ++ 6 files changed, 54 insertions(+), 47 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0bdc317..9e02a8c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,13 +81,13 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) #TODO! Should we fetch lmfit from the web or inlcude a tar.gz in the repo? - #set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) FetchContent_Declare( lmfit GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git GIT_TAG main - #PATCH_COMMAND ${lmfit_patch} - #UPDATE_DISCONNECTED 1 + PATCH_COMMAND ${lmfit_patch} + UPDATE_DISCONNECTED 1 #EXCLUDE_FROM_ALL 1 ) #Disable what we don't need from lmfit @@ -358,7 +358,7 @@ set(SourceFiles add_library(aare_core STATIC ${SourceFiles}) target_include_directories(aare_core PUBLIC "$" - "$" PRIVATE ${lmfit_SOURCE_DIR}/lib + "$" ) target_link_libraries( @@ -369,7 +369,7 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags - "$" + $ ) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 289647d..6bd231e 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -243,8 +243,9 @@ ClusterFile::read_clusters(size_t n_clusters, ROI roi) { fread(&tmp, sizeof(tmp), 1, fp); if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax) { - clusters.push_back(tmp.x, tmp.y, - reinterpret_cast(tmp.data)); + // clusters.push_back(tmp.x, tmp.y, + // reinterpret_cast(tmp.data)); + clusters.push_back(tmp); nph_read++; } } @@ -268,9 +269,10 @@ ClusterFile::read_clusters(size_t n_clusters, ROI roi) { fread(&tmp, sizeof(tmp), 1, fp); if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && tmp.y <= roi.ymax) { - clusters.push_back( - tmp.x, tmp.y, - reinterpret_cast(tmp.data)); + // clusters.push_back( + // tmp.x, tmp.y, + // reinterpret_cast(tmp.data)); + clusters.push_back(tmp); nph_read++; } } diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 9a15448..6a8fec4 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -140,9 +140,14 @@ class ClusterFinder { } // Add the cluster to the output ClusterVector + /* m_clusters.push_back( ix, iy, reinterpret_cast(cluster_data.data())); + */ + m_clusters.push_back( + Cluster{ + ix, iy, cluster_data.data()}); } } } diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 0e47b51..073df13 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -100,25 +100,22 @@ class ClusterVector> { /** * @brief Add a cluster to the vector - * @param x x-coordinate of the cluster - * @param y y-coordinate of the cluster - * @param data pointer to the data of the cluster - * @warning The data pointer must point to a buffer of size cluster_size_x * - * cluster_size_y * sizeof(T) */ - void push_back(CoordType x, CoordType y, const std::byte *data) { + void push_back(const ClusterType &cluster) { if (m_size == m_capacity) { allocate_buffer(m_capacity * 2); } std::byte *ptr = element_ptr(m_size); - *reinterpret_cast(ptr) = x; + *reinterpret_cast(ptr) = cluster.x; ptr += sizeof(CoordType); - *reinterpret_cast(ptr) = y; + *reinterpret_cast(ptr) = cluster.y; ptr += sizeof(CoordType); - std::copy(data, data + ClusterSizeX * ClusterSizeY * sizeof(T), ptr); + std::memcpy(ptr, cluster.data, ClusterSizeX * ClusterSizeY * sizeof(T)); + m_size++; } + ClusterVector &operator+=(const ClusterVector &other) { if (m_size + other.m_size > m_capacity) { allocate_buffer(m_capacity + other.m_size); @@ -154,10 +151,9 @@ class ClusterVector> { * @throws std::runtime_error if the cluster size is not 3x3 * @warning Only 3x3 clusters are supported for the 2x2 sum. */ - /* only needed to calculate eta - std::vector sum_2x2() { - std::vector sums(m_size); - const size_t stride = item_size(); + /* only needed to calculate eta TODO: in previous PR already added calculate + sum in PR std::vector sum_2x2() { std::vector sums(m_size); const + size_t stride = item_size(); if (ClusterSizeX != 3 || ClusterSizeY != 3) { throw std::runtime_error( diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index acbbf56..b58e88a 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -7,7 +7,8 @@ using aare::Cluster; using aare::ClusterVector; -TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { +TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read", + "[.ClusterVector]") { ClusterVector> cv(4); REQUIRE(cv.capacity() == 4); @@ -19,7 +20,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { // Create a cluster and push back into the vector Cluster c1 = {1, 2, {3, 4, 5, 6}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + cv.push_back(c1); REQUIRE(cv.size() == 1); REQUIRE(cv.capacity() == 4); @@ -36,7 +37,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { } } -TEST_CASE("Summing 3x1 clusters of int64") { +TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") { ClusterVector> cv(2); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 0); @@ -45,17 +46,17 @@ TEST_CASE("Summing 3x1 clusters of int64") { // Create a cluster and push back into the vector Cluster c1 = {1, 2, {3, 4, 5}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + cv.push_back(c1); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 1); Cluster c2 = {6, 7, {8, 9, 10}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + cv.push_back(c2); REQUIRE(cv.capacity() == 2); REQUIRE(cv.size() == 2); Cluster c3 = {11, 12, {13, 14, 15}}; - cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + cv.push_back(c3); REQUIRE(cv.capacity() == 4); REQUIRE(cv.size() == 3); @@ -66,7 +67,7 @@ TEST_CASE("Summing 3x1 clusters of int64") { REQUIRE(sums[2] == 42); } -TEST_CASE("Storing floats") { +TEST_CASE("Storing floats", "[.ClusterVector]") { ClusterVector> cv(10); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 0); @@ -75,13 +76,13 @@ TEST_CASE("Storing floats") { // Create a cluster and push back into the vector Cluster c1 = {1, 2, {3.0, 4.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + cv.push_back(c1); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 1); Cluster c2 = { 6, 7, {8.0, 9.0, 10.0, 11.0, 8.0, 9.0, 10.0, 11.0}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + cv.push_back(c2); REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 2); @@ -91,22 +92,22 @@ TEST_CASE("Storing floats") { REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); } -TEST_CASE("Push back more than initial capacity") { +TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") { ClusterVector> cv(2); auto initial_data = cv.data(); Cluster c1 = {1, 2, {3, 4, 5, 6}}; - cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + cv.push_back(c1); REQUIRE(cv.size() == 1); REQUIRE(cv.capacity() == 2); Cluster c2 = {6, 7, {8, 9, 10, 11}}; - cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + cv.push_back(c2); REQUIRE(cv.size() == 2); REQUIRE(cv.capacity() == 2); Cluster c3 = {11, 12, {13, 14, 15, 16}}; - cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + cv.push_back(c3); REQUIRE(cv.size() == 3); REQUIRE(cv.capacity() == 4); @@ -124,19 +125,19 @@ TEST_CASE("Push back more than initial capacity") { REQUIRE(initial_data != cv.data()); } -TEST_CASE( - "Concatenate two cluster vectors where the first has enough capacity") { +TEST_CASE("Concatenate two cluster vectors where the first has enough capacity", + "[.ClusterVector]") { ClusterVector> cv1(12); Cluster c1 = {1, 2, {3, 4, 5, 6}}; - cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + cv1.push_back(c1); Cluster c2 = {6, 7, {8, 9, 10, 11}}; - cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + cv1.push_back(c2); ClusterVector> cv2(2); Cluster c3 = {11, 12, {13, 14, 15, 16}}; - cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + cv2.push_back(c3); Cluster c4 = {16, 17, {18, 19, 20, 21}}; - cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + cv2.push_back(c4); cv1 += cv2; REQUIRE(cv1.size() == 4); @@ -154,18 +155,19 @@ TEST_CASE( REQUIRE(ptr[3].y == 17); } -TEST_CASE("Concatenate two cluster vectors where we need to allocate") { +TEST_CASE("Concatenate two cluster vectors where we need to allocate", + "[.ClusterVector]") { ClusterVector> cv1(2); Cluster c1 = {1, 2, {3, 4, 5, 6}}; - cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + cv1.push_back(c1); Cluster c2 = {6, 7, {8, 9, 10, 11}}; - cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + cv1.push_back(c2); ClusterVector> cv2(2); Cluster c3 = {11, 12, {13, 14, 15, 16}}; - cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + cv2.push_back(c3); Cluster c4 = {16, 17, {18, 19, 20, 21}}; - cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + cv2.push_back(c4); cv1 += cv2; REQUIRE(cv1.size() == 4); diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 1c4a385..cfe5b03 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -55,6 +55,8 @@ Interpolator::Interpolator(NDView etacube, NDView xbins, } } +// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t +// to only take Cluster2x2 and Cluster3x3 template >> std::vector From 3083d516998b91a5316efdc81d2ae9a66f11f421 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Tue, 1 Apr 2025 17:50:11 +0200 Subject: [PATCH 061/120] merge conflict --- include/aare/CalculateEta.hpp | 6 ++++-- include/aare/ClusterVector.hpp | 22 +++++++++---------- src/ClusterFile.test.cpp | 39 +++++++++++++++------------------- 3 files changed, 32 insertions(+), 35 deletions(-) diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 29c5cc4..57c1460 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -152,8 +152,10 @@ template Eta2 calculate_eta2(const Cluster &cl) { template Eta2 calculate_eta2(const Cluster &cl) { Eta2 eta{}; - eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); - eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3]; eta.c = cBottomLeft; // TODO! This is not correct, but need to put something return eta; diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 4ab0aa7..188b018 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -272,25 +272,25 @@ class ClusterVector> { m_size = new_size; } - void apply_gain_map(const NDView gain_map){ - //in principle we need to know the size of the image for this lookup - //TODO! check orientations + // TODO: Generalize !!!! + void apply_gain_map(const NDView gain_map) { + // in principle we need to know the size of the image for this lookup + // TODO! check orientations std::array xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; std::array ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; - for (size_t i=0; i(i); + for (size_t i = 0; i < m_size; i++) { + auto &cl = at(i); - if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1)-1 && cl.y < gain_map.shape(0)-1){ - for (size_t j=0; j<9; j++){ + if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1) - 1 && + cl.y < gain_map.shape(0) - 1) { + for (size_t j = 0; j < 9; j++) { size_t x = cl.x + xcorr[j]; size_t y = cl.y + ycorr[j]; cl.data[j] = static_cast(cl.data[j] * gain_map(y, x)); } - }else{ - memset(cl.data, 0, 9*sizeof(T)); //clear edge clusters + } else { + memset(cl.data, 0, 9 * sizeof(T)); // clear edge clusters } - - } } diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index a0eed04..ccc0170 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -1,33 +1,32 @@ #include "aare/ClusterFile.hpp" #include "test_config.hpp" - #include "aare/defs.hpp" #include #include - - - +using aare::Cluster; using aare::ClusterFile; TEST_CASE("Read one frame from a a cluster file", "[.integration]") { - //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + // We know that the frame has 97 clusters + auto fpath = + test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); - ClusterFile f(fpath); + ClusterFile> f(fpath); auto clusters = f.read_frame(); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); } TEST_CASE("Read one frame using ROI", "[.integration]") { - //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + // We know that the frame has 97 clusters + auto fpath = + test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); - ClusterFile f(fpath); + ClusterFile> f(fpath); aare::ROI roi; roi.xmin = 0; roi.xmax = 50; @@ -38,43 +37,39 @@ TEST_CASE("Read one frame using ROI", "[.integration]") { REQUIRE(clusters.size() == 49); REQUIRE(clusters.frame_number() == 135); - //Check that all clusters are within the ROI + // Check that all clusters are within the ROI for (size_t i = 0; i < clusters.size(); i++) { - auto c = clusters.at(i); + auto c = clusters.at(i); REQUIRE(c.x >= roi.xmin); REQUIRE(c.x <= roi.xmax); REQUIRE(c.y >= roi.ymin); REQUIRE(c.y <= roi.ymax); } - } - TEST_CASE("Read clusters from single frame file", "[.integration]") { - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = + test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); SECTION("Read fewer clusters than available") { - ClusterFile f(fpath); + ClusterFile> f(fpath); auto clusters = f.read_clusters(50); REQUIRE(clusters.size() == 50); - REQUIRE(clusters.frame_number() == 135); + REQUIRE(clusters.frame_number() == 135); } SECTION("Read more clusters than available") { - ClusterFile f(fpath); + ClusterFile> f(fpath); // 100 is the maximum number of clusters read auto clusters = f.read_clusters(100); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); } SECTION("Read all clusters") { - ClusterFile f(fpath); + ClusterFile> f(fpath); auto clusters = f.read_clusters(97); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); } - - - } From 04728929cba0a61831748f3179e08f9967011380 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Tue, 1 Apr 2025 18:29:08 +0200 Subject: [PATCH 062/120] implemented sum_2x2() for general clusters, only one calculate_eta2 function for all clusters --- include/aare/CalculateEta.hpp | 80 +--- include/aare/Cluster.hpp | 22 +- include/aare/ClusterFile.hpp | 8 +- include/aare/ClusterVector.hpp | 37 +- src/Cluster.test.cpp | 3 - src/ClusterFile.cpp | 704 --------------------------------- 6 files changed, 22 insertions(+), 832 deletions(-) delete mode 100644 src/ClusterFile.cpp diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 57c1460..1a0797a 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -60,23 +60,9 @@ Eta2 calculate_eta2( const Cluster &cl) { Eta2 eta{}; - constexpr size_t num_2x2_subclusters = - (ClusterSizeX - 1) * (ClusterSizeY - 1); - std::array sum_2x2_subcluster; - for (size_t i = 0; i < ClusterSizeY - 1; ++i) { - for (size_t j = 0; j < ClusterSizeX - 1; ++j) - sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = - cl.data[i * ClusterSizeX + j] + - cl.data[i * ClusterSizeX + j + 1] + - cl.data[(i + 1) * ClusterSizeX + j] + - cl.data[(i + 1) * ClusterSizeX + j + 1]; - } - - auto c = - std::max_element(sum_2x2_subcluster.begin(), sum_2x2_subcluster.end()) - - sum_2x2_subcluster.begin(); - - eta.sum = sum_2x2_subcluster[c]; + auto max_sum = cl.max_sum_2x2(); + eta.sum = max_sum.first; + auto c = max_sum.second; size_t index_bottom_left_max_2x2_subcluster = (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); @@ -101,66 +87,6 @@ Eta2 calculate_eta2( return eta; } -/** - * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 - * struct containing etay, etax and the corner of the cluster. - */ -template Eta2 calculate_eta2(const Cluster &cl) { - Eta2 eta{}; - - std::array tot2; - tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; - tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; - tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; - tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; - - auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - eta.sum = tot2[c]; - switch (c) { - case cBottomLeft: - if ((cl.data[3] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); - if ((cl.data[1] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); - eta.c = cBottomLeft; - break; - case cBottomRight: - if ((cl.data[2] + cl.data[5]) != 0) - eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); - if ((cl.data[1] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); - eta.c = cBottomRight; - break; - case cTopLeft: - if ((cl.data[7] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); - if ((cl.data[7] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); - eta.c = cTopLeft; - break; - case cTopRight: - if ((cl.data[5] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); - if ((cl.data[7] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); - eta.c = cTopRight; - break; - } - return eta; -} - -template Eta2 calculate_eta2(const Cluster &cl) { - Eta2 eta{}; - - if ((cl.data[0] + cl.data[1]) != 0) - eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); - if ((cl.data[0] + cl.data[2]) != 0) - eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); - eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3]; - eta.c = cBottomLeft; // TODO! This is not correct, but need to put something - return eta; -} - // calculates Eta3 for 3x3 cluster based on code from analyze_cluster // TODO only supported for 3x3 Clusters template Eta2 calculate_eta3(const Cluster &cl) { diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index da756dc..cc102c4 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -35,7 +35,7 @@ struct Cluster { return std::accumulate(data, data + ClusterSizeX * ClusterSizeY, 0); } - T max_sum_2x2() const { + std::pair max_sum_2x2() const { constexpr size_t num_2x2_subclusters = (ClusterSizeX - 1) * (ClusterSizeY - 1); @@ -49,8 +49,10 @@ struct Cluster { data[(i + 1) * ClusterSizeX + j + 1]; } - return *std::max_element(sum_2x2_subcluster.begin(), - sum_2x2_subcluster.end()); + int index = std::max_element(sum_2x2_subcluster.begin(), + sum_2x2_subcluster.end()) - + sum_2x2_subcluster.begin(); + return std::make_pair(sum_2x2_subcluster[index], index); } }; @@ -62,9 +64,9 @@ template struct Cluster { T sum() const { return std::accumulate(data, data + 4, 0); } - T max_sum_2x2() const { - return data[0] + data[1] + data[2] + - data[3]; // Only one possible 2x2 sum + std::pair max_sum_2x2() const { + return std::make_pair(data[0] + data[1] + data[2] + data[3], + 0); // Only one possible 2x2 sum } }; @@ -76,14 +78,16 @@ template struct Cluster { T sum() const { return std::accumulate(data, data + 9, 0); } - T max_sum_2x2() const { + std::pair max_sum_2x2() const { std::array sum_2x2_subclusters; sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4]; sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5]; sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7]; sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8]; - return *std::max_element(sum_2x2_subclusters.begin(), - sum_2x2_subclusters.end()); + int index = std::max_element(sum_2x2_subclusters.begin(), + sum_2x2_subclusters.end()) - + sum_2x2_subclusters.begin(); + return std::make_pair(sum_2x2_subclusters[index], index); } }; diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 3fc855a..9c43326 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -415,10 +415,12 @@ bool ClusterFile::is_selected(ClusterType &cl) { return false; } } + // TODO types are wrong generalize if (m_noise_map) { - int32_t sum_1x1 = cl.data[4]; // central pixel - int32_t sum_2x2 = cl.max_sum_2x2(); // highest sum of 2x2 subclusters - int32_t sum_3x3 = cl.sum(); // sum of all pixels + int32_t sum_1x1 = cl.data[4]; // central pixel + int32_t sum_2x2 = + cl.max_sum_2x2().first; // highest sum of 2x2 subclusters + int32_t sum_3x3 = cl.sum(); // sum of all pixels auto noise = (*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 188b018..30be5eb 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -148,38 +148,6 @@ class ClusterVector> { return sums; } - /** - * @brief Return the maximum sum of the 2x2 subclusters in each cluster - * @return std::vector vector of sums for each cluster - * @throws std::runtime_error if the cluster size is not 3x3 - * @warning Only 3x3 clusters are supported for the 2x2 sum. - */ - /* only needed to calculate eta TODO: in previous PR already added calculate - sum in PR std::vector sum_2x2() { std::vector sums(m_size); const - size_t stride = item_size(); - - if (ClusterSizeX != 3 || ClusterSizeY != 3) { - throw std::runtime_error( - "Only 3x3 clusters are supported for the 2x2 sum."); - } - std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y - - for (size_t i = 0; i < m_size; i++) { - std::array total; - auto T_ptr = reinterpret_cast(ptr); - total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4]; - total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5]; - total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7]; - total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8]; - - sums[i] = *std::max_element(total.begin(), total.end()); - ptr += stride; - } - - return sums; - } - */ - /** * @brief Return the number of clusters in the vector */ @@ -220,9 +188,6 @@ class ClusterVector> { return m_data + element_offset(i); } - // size_t cluster_size_x() const { return m_cluster_size_x; } - // size_t cluster_size_y() const { return m_cluster_size_y; } - std::byte *data() { return m_data; } std::byte const *data() const { return m_data; } @@ -272,7 +237,7 @@ class ClusterVector> { m_size = new_size; } - // TODO: Generalize !!!! + // TODO: Generalize !!!! Maybe move somewhere else void apply_gain_map(const NDView gain_map) { // in principle we need to know the size of the image for this lookup // TODO! check orientations diff --git a/src/Cluster.test.cpp b/src/Cluster.test.cpp index 20c3948..7918d72 100644 --- a/src/Cluster.test.cpp +++ b/src/Cluster.test.cpp @@ -33,9 +33,6 @@ using ClusterTypes = TEST_CASE("calculate_eta2", "[.cluster][.eta_calculation]") { - // weird expect cluster_start to be in bottom_left corner -> row major -> - // check how its used should be an image!! - auto [cluster, expected_eta] = GENERATE( std::make_tuple(ClusterTypes{Cluster{0, 0, {1, 2, 3, 1}}}, Eta2{2. / 3, 3. / 4, corner::cBottomLeft, 7}), diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp deleted file mode 100644 index 221c15d..0000000 --- a/src/ClusterFile.cpp +++ /dev/null @@ -1,704 +0,0 @@ -#include "aare/ClusterFile.hpp" - -#include - -namespace aare { - -template >> -ClusterFile::ClusterFile(const std::filesystem::path &fname, - size_t chunk_size, - const std::string &mode) - : m_chunk_size(chunk_size), m_mode(mode) { - - if (mode == "r") { - fp = fopen(fname.c_str(), "rb"); - if (!fp) { - throw std::runtime_error("Could not open file for reading: " + - fname.string()); - } - } else if (mode == "w") { - fp = fopen(fname.c_str(), "wb"); - if (!fp) { - throw std::runtime_error("Could not open file for writing: " + - fname.string()); - } - } else if (mode == "a") { - fp = fopen(fname.c_str(), "ab"); - if (!fp) { - throw std::runtime_error("Could not open file for appending: " + - fname.string()); - } - } else { - throw std::runtime_error("Unsupported mode: " + mode); - } -} - -<<<<<<< HEAD -template ClusterFile::~ClusterFile() { - close(); -} -======= -void ClusterFile::set_roi(ROI roi){ - m_roi = roi; -} - -void ClusterFile::set_noise_map(const NDView noise_map){ - m_noise_map = NDArray(noise_map); -} - -void ClusterFile::set_gain_map(const NDView gain_map){ - m_gain_map = NDArray(gain_map); -} - -ClusterFile::~ClusterFile() { close(); } ->>>>>>> developer - -template void ClusterFile::close() { - if (fp) { - fclose(fp); - fp = nullptr; - } -} - -// TODO generally supported for all clsuter types -template -void ClusterFile::write_frame( - const ClusterVector &clusters) { - if (m_mode != "w" && m_mode != "a") { - throw std::runtime_error("File not opened for writing"); - } - if (!(clusters.cluster_size_x() == 3) && - !(clusters.cluster_size_y() == 3)) { - throw std::runtime_error("Only 3x3 clusters are supported"); - } - //First write the frame number - 4 bytes - int32_t frame_number = clusters.frame_number(); - if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ - throw std::runtime_error(LOCATION + "Could not write frame number"); - } - - //Then write the number of clusters - 4 bytes - uint32_t n_clusters = clusters.size(); - if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ - throw std::runtime_error(LOCATION + "Could not write number of clusters"); - } - - //Now write the clusters in the frame - if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ - throw std::runtime_error(LOCATION + "Could not write clusters"); - } -} - -<<<<<<< HEAD -template -ClusterVector -ClusterFile::read_clusters(size_t n_clusters) { -======= - -ClusterVector ClusterFile::read_clusters(size_t n_clusters){ - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - if (m_noise_map || m_roi){ - return read_clusters_with_cut(n_clusters); - }else{ - return read_clusters_without_cut(n_clusters); - } -} - -ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { ->>>>>>> developer - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - - ClusterVector clusters(n_clusters); - - int32_t iframe = 0; // frame number needs to be 4 bytes! - size_t nph_read = 0; - uint32_t nn = m_num_left; - uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - - // auto buf = reinterpret_cast(clusters.data()); - auto buf = clusters.data(); - // if there are photons left from previous frame read them first - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - nph_read += fread((buf + nph_read * clusters.item_size()), - clusters.item_size(), nn, fp); - m_num_left = nph - nn; // write back the number of photons left - } - - if (nph_read < n_clusters) { - // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - clusters.set_frame_number(iframe); - // read number of clusters in frame - if (fread(&nph, sizeof(nph), 1, fp)) { - if (nph > (n_clusters - nph_read)) - nn = n_clusters - nph_read; - else - nn = nph; - - nph_read += fread((buf + nph_read * clusters.item_size()), - clusters.item_size(), nn, fp); - m_num_left = nph - nn; - } - if (nph_read >= n_clusters) - break; - } - } - - // Resize the vector to the number of clusters. - // No new allocation, only change bounds. - clusters.resize(nph_read); - if(m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); - return clusters; -} - -<<<<<<< HEAD -template -ClusterVector -ClusterFile::read_clusters(size_t n_clusters, ROI roi) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - - ClusterVector clusters; - clusters.reserve(n_clusters); - - int32_t iframe = 0; // frame number needs to be 4 bytes! - size_t nph_read = 0; - uint32_t nn = m_num_left; - uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - - // auto buf = reinterpret_cast(clusters.data()); - // auto buf = clusters.data(); - - ClusterType tmp; // this would break if the cluster size changes - - // if there are photons left from previous frame read them first - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - // Read one cluster, in the ROI push back - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for (size_t i = 0; i < nn; i++) { - fread(&tmp, sizeof(tmp), 1, fp); - if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && tmp.y >= roi.ymin && - tmp.y <= roi.ymax) { - clusters.push_back(tmp.x, tmp.y, - reinterpret_cast(tmp.data)); - nph_read++; - } - } - - m_num_left = nph - nn; // write back the number of photons left - } - - if (nph_read < n_clusters) { - // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // read number of clusters in frame - if (fread(&nph, sizeof(nph), 1, fp)) { - if (nph > (n_clusters - nph_read)) - nn = n_clusters - nph_read; - else - nn = nph; - - // nph_read += fread((buf + nph_read*clusters.item_size()), - // clusters.item_size(), nn, fp); - for (size_t i = 0; i < nn; i++) { - fread(&tmp, sizeof(tmp), 1, fp); - if (tmp.x >= roi.xmin && tmp.x <= roi.xmax && - tmp.y >= roi.ymin && tmp.y <= roi.ymax) { - clusters.push_back( - tmp.x, tmp.y, - reinterpret_cast(tmp.data)); - nph_read++; - } - } - m_num_left = nph - nn; - } - if (nph_read >= n_clusters) - break; - } - } - - // Resize the vector to the number of clusters. - // No new allocation, only change bounds. - clusters.resize(nph_read); - return clusters; -} - -template -ClusterVector ClusterFile::read_frame() { -======= - - -ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { - ClusterVector clusters(3,3); - clusters.reserve(n_clusters); - - // if there are photons left from previous frame read them first - if (m_num_left) { - while(m_num_left && clusters.size() < n_clusters){ - Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); - } - } - } - - // we did not have enough clusters left in the previous frame - // keep on reading frames until reaching n_clusters - if (clusters.size() < n_clusters) { - // sanity check - if (m_num_left) { - throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); - } - - int32_t frame_number = 0; // frame number needs to be 4 bytes! - while (fread(&frame_number, sizeof(frame_number), 1, fp)) { - if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { - clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number - while(m_num_left && clusters.size() < n_clusters){ - Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); - } - } - } - - // we have enough clusters, break out of the outer while loop - if (clusters.size() >= n_clusters) - break; - } - - } - if(m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); - - return clusters; -} - -Cluster3x3 ClusterFile::read_one_cluster(){ - Cluster3x3 c; - auto rc = fread(&c, sizeof(c), 1, fp); - if (rc != 1) { - throw std::runtime_error(LOCATION + "Could not read cluster"); - } - --m_num_left; - return c; -} - -ClusterVector ClusterFile::read_frame(){ - if (m_mode != "r") { - throw std::runtime_error(LOCATION + "File not opened for reading"); - } - if (m_noise_map || m_roi){ - return read_frame_with_cut(); - }else{ - return read_frame_without_cut(); - } -} - -ClusterVector ClusterFile::read_frame_without_cut() { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - if (m_num_left) { - throw std::runtime_error( - "There are still photons left in the last frame"); - } - int32_t frame_number; - if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { - throw std::runtime_error(LOCATION + "Could not read frame number"); - } - - int32_t n_clusters; // Saved as 32bit integer in the cluster file - if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { - throw std::runtime_error(LOCATION + "Could not read number of clusters"); - } - - ClusterVector clusters(3, 3, n_clusters); - clusters.set_frame_number(frame_number); - - if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != - static_cast(n_clusters)) { - throw std::runtime_error(LOCATION + "Could not read clusters"); - } - clusters.resize(n_clusters); - if (m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); - return clusters; -} - -ClusterVector ClusterFile::read_frame_with_cut() { ->>>>>>> developer - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - if (m_num_left) { - throw std::runtime_error( - "There are still photons left in the last frame"); - } - int32_t frame_number; - if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { - throw std::runtime_error("Could not read frame number"); - } - - - if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { - throw std::runtime_error("Could not read number of clusters"); - } -<<<<<<< HEAD - // std::vector clusters(n_clusters); - ClusterVector clusters(n_clusters); -======= - - ClusterVector clusters(3, 3); - clusters.reserve(m_num_left); ->>>>>>> developer - clusters.set_frame_number(frame_number); - while(m_num_left){ - Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); - } - } - if (m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); - return clusters; -} - -<<<<<<< HEAD -// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, -// double *noise_map, -// int nx, int ny) { -// if (m_mode != "r") { -// throw std::runtime_error("File not opened for reading"); -// } -// std::vector clusters(n_clusters); -// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster -// *buf, -// // uint32_t *n_left, double *noise_map, int -// // nx, int ny) { -// int iframe = 0; -// // uint32_t nph = *n_left; -// uint32_t nph = m_num_left; -// // uint32_t nn = *n_left; -// uint32_t nn = m_num_left; -// size_t nph_read = 0; -======= - ->>>>>>> developer - -bool ClusterFile::is_selected(Cluster3x3 &cl) { - //Should fail fast - if (m_roi) { - if (!(m_roi->contains(cl.x, cl.y))) { - return false; - } - } - if (m_noise_map){ - int32_t sum_1x1 = cl.data[4]; // central pixel - int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters - int32_t sum_3x3 = cl.sum(); // sum of all pixels - -<<<<<<< HEAD -// if (nph) { -// if (nph > n_clusters) { -// // if we have more photons left in the frame then photons to -// // read we read directly the requested number -// nn = n_clusters; -// } else { -// nn = nph; -// } -// for (size_t iph = 0; iph < nn; iph++) { -// // read photons 1 by 1 -// size_t n_read = -// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, -// fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// } -// // TODO! error handling on read -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) -// { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, -// NULL, -// NULL); -// noise = noise_map[ptr->y * nx + ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * -// noise) { -// ; -// } else { -// good = 0; -// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, -// tot1, t2max, tot3); -// } -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); -// good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read < n_clusters) { -// // // keep on reading frames and photons until reaching -// // n_clusters -// while (fread(&iframe, sizeof(iframe), 1, fp)) { -// // // printf("%d\n",nph_read); - -// if (fread(&nph, sizeof(nph), 1, fp)) { -// // // printf("** %d\n",nph); -// m_num_left = nph; -// for (size_t iph = 0; iph < nph; iph++) { -// // // read photons 1 by 1 -// size_t n_read = fread(reinterpret_cast(ptr), -// sizeof(Cluster3x3), 1, fp); -// if (n_read != 1) { -// clusters.resize(nph_read); -// return clusters; -// // return nph_read; -// } -// good = 1; -// if (noise_map) { -// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && -// ptr->y < ny) { -// tot1 = ptr->data[4]; -// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, -// NULL, NULL, NULL); -// // noise = noise_map[ptr->y * nx + ptr->x]; -// noise = noise_map[ptr->y + ny * ptr->x]; -// if (tot1 > noise || t2max > 2 * noise || -// tot3 > 3 * noise) { -// ; -// } else -// good = 0; -// } else { -// printf("Bad pixel number %d %d\n", ptr->x, -// ptr->y); good = 0; -// } -// } -// if (good) { -// ptr++; -// nph_read++; -// } -// (m_num_left)--; -// if (nph_read >= n_clusters) -// break; -// } -// } -// if (nph_read >= n_clusters) -// break; -// } -// } -// // printf("%d\n",nph_read); -// clusters.resize(nph_read); -// return clusters; -// } -======= - auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct - if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { - return false; - } - } - //we passed all checks - return true; -} ->>>>>>> developer - -template -NDArray calculate_eta2(ClusterVector &clusters) { - // TOTO! make work with 2x2 clusters - NDArray eta2({static_cast(clusters.size()), 2}); -<<<<<<< HEAD - - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; - } - -======= - - if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; - } - }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; - } - }else{ - throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); - } - ->>>>>>> developer - return eta2; -} - -/** - * @brief Calculate the eta2 values for a generic sized cluster and return them - * in a Eta2 struct containing etay, etax and the index of the respective 2x2 - * subcluster. - */ -template -Eta2 calculate_eta2(Cluster &cl) { - Eta2 eta{}; - - // TODO loads of overhead for a 2x2 clsuter maybe keep 2x2 calculation - constexpr size_t num_2x2_subclusters = - (ClusterSizeX - 1) * (ClusterSizeY - 1); - std::array sum_2x2_subcluster; - for (size_t i = 0; i < ClusterSizeY - 1; ++i) { - for (size_t j = 0; j < ClusterSizeX - 1; ++j) - sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = - cl.data[i * ClusterSizeX + j] + - cl.data[i * ClusterSizeX + j + 1] + - cl.data[(i + 1) * ClusterSizeX + j] + - cl.data[(i + 1) * ClusterSizeX + j + 1]; - } - - auto c = - std::max_element(sum_2x2_subcluster.begin(), sum_2x2_subcluster.end()) - - sum_2x2_subcluster.begin(); - - eta.sum = sum_2x2_subcluster[c]; - - eta.x = static_cast(cl.data[(c + 1) * ClusterSizeX + 1]) / - (cl.data[0] + cl.data[1]); - - size_t index_top_left_2x2_subcluster = - (int(c / (ClusterSizeX - 1)) + 1) * ClusterSizeX + - c % (ClusterSizeX - 1) * 2 + 1; - if ((cl.data[index_top_left_2x2_subcluster] + - cl.data[index_top_left_2x2_subcluster - 1]) != 0) - eta.x = - static_cast(cl.data[index_top_left_2x2_subcluster] / - (cl.data[index_top_left_2x2_subcluster] + - cl.data[index_top_left_2x2_subcluster - 1])); - - if ((cl.data[index_top_left_2x2_subcluster] + - cl.data[index_top_left_2x2_subcluster - ClusterSizeX]) != 0) - eta.y = static_cast( - cl.data[index_top_left_2x2_subcluster] / - (cl.data[index_top_left_2x2_subcluster] + - cl.data[index_top_left_2x2_subcluster - ClusterSizeX])); - - eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no - // underyling enum class - return eta; -} - -/** - * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 - * struct containing etay, etax and the corner of the cluster. - */ -template Eta2 calculate_eta2(Cluster &cl) { - Eta2 eta{}; - - std::array tot2; - tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; - tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; - tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; - tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; - - auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - eta.sum = tot2[c]; - switch (c) { - case cBottomLeft: - if ((cl.data[3] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); - if ((cl.data[1] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); - eta.c = cBottomLeft; - break; - case cBottomRight: - if ((cl.data[2] + cl.data[5]) != 0) - eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); - if ((cl.data[1] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); - eta.c = cBottomRight; - break; - case cTopLeft: - if ((cl.data[7] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); - if ((cl.data[7] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); - eta.c = cTopLeft; - break; - case cTopRight: - if ((cl.data[5] + cl.data[4]) != 0) - eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); - if ((cl.data[7] + cl.data[4]) != 0) - eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); - eta.c = cTopRight; - break; - } - return eta; -} - -<<<<<<< HEAD -template Eta2 calculate_eta2(Cluster &cl) { - Eta2 eta{}; - - eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); - eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); - eta.sum = cl.data[0] + cl.data[1] + cl.data[2] + cl.data[3]; - eta.c = cBottomLeft; // TODO! This is not correct, but need to put something - return eta; -} - -// TODO complicated API simplify? -int analyze_cluster(Cluster &cl, int32_t *t2, int32_t *t3, - char *quad, double *eta2x, double *eta2y, double *eta3x, - double *eta3y) { -======= ->>>>>>> developer - -Eta2 calculate_eta2(Cluster2x2 &cl) { - Eta2 eta{}; - if ((cl.data[0] + cl.data[1]) != 0) - eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); - if ((cl.data[0] + cl.data[2]) != 0) - eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); - eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; - eta.c = cBottomLeft; //TODO! This is not correct, but need to put something - return eta; -} - - -} // namespace aare \ No newline at end of file From 240960d3e7e5c27ad1ba82ffa8a909a92134af97 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Wed, 2 Apr 2025 12:05:16 +0200 Subject: [PATCH 063/120] generalized FindCluster to read in general cluster sizes - assuming that finding cluster center is equal for all clusters --- include/aare/CalculateEta.hpp | 2 +- include/aare/ClusterFinder.hpp | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 1a0797a..088b4e8 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -67,7 +67,7 @@ Eta2 calculate_eta2( size_t index_bottom_left_max_2x2_subcluster = (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); - if ((cl.data[index_bottom_left_max_2x2_subcluster] + + if ((cl.data[index_bottom_left_max_2x2_subcluster] +s cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0) eta.x = static_cast( cl.data[index_bottom_left_max_2x2_subcluster + 1]) / diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 6a8fec4..19ada67 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -70,6 +70,12 @@ class ClusterFinder { // // 4,4 -> +/- 2 int dy = ClusterSizeY / 2; int dx = ClusterSizeX / 2; + int has_center_pixel_x = + ClusterSizeX % + 2; // for even sized clusters there is no proper cluster center and + // even amount of pixels around the center + int has_center_pixel_y = ClusterSizeY % 2; + m_clusters.set_frame_number(frame_number); std::vector cluster_data(ClusterSizeX * ClusterSizeY); for (int iy = 0; iy < frame.shape(0); iy++) { @@ -86,8 +92,8 @@ class ClusterFinder { continue; // NEGATIVE_PEDESTAL go to next pixel // TODO! No pedestal update??? - for (int ir = -dy; ir < dy + 1; ir++) { - for (int ic = -dx; ic < dx + 1; ic++) { + for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) { + for (int ic = -dx; ic < dx + has_center_pixel_x; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { PEDESTAL_TYPE val = @@ -125,8 +131,8 @@ class ClusterFinder { // It's worth redoing the look since most of the time we // don't have a photon int i = 0; - for (int ir = -dy; ir < dy + 1; ir++) { - for (int ic = -dx; ic < dx + 1; ic++) { + for (int ir = -dy; ir < dy + has_center_pixel_y; ir++) { + for (int ic = -dx; ic < dx + has_center_pixel_y; ic++) { if (ix + ic >= 0 && ix + ic < frame.shape(1) && iy + ir >= 0 && iy + ir < frame.shape(0)) { CT tmp = @@ -140,11 +146,6 @@ class ClusterFinder { } // Add the cluster to the output ClusterVector - /* - m_clusters.push_back( - ix, iy, - reinterpret_cast(cluster_data.data())); - */ m_clusters.push_back( Cluster{ ix, iy, cluster_data.data()}); From 61af1105a16b818aa7998ffd2d4312352bf53bc1 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Wed, 2 Apr 2025 14:42:38 +0200 Subject: [PATCH 064/120] templated eta and updated test --- CMakeLists.txt | 1 + include/aare/CalculateEta.hpp | 17 +++++----- src/CalculateEta.test.cpp | 62 +++++++++++++++++++++++++++++++++++ src/Cluster.test.cpp | 33 ------------------- src/Interpolator.cpp | 4 +-- 5 files changed, 73 insertions(+), 44 deletions(-) create mode 100644 src/CalculateEta.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index df41ae8..0ab1e73 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -419,6 +419,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinder.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 088b4e8..86871c9 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -25,12 +25,11 @@ typedef enum { pTopRight = 8 } pixel; -// TODO: maybe template this!!!!!! why int32_t???? -struct Eta2 { +template struct Eta2 { double x; double y; int c; - int32_t sum; + T sum; }; /** @@ -56,9 +55,9 @@ NDArray calculate_eta2(const ClusterVector &clusters) { */ template -Eta2 calculate_eta2( - const Cluster &cl) { - Eta2 eta{}; +Eta2 +calculate_eta2(const Cluster &cl) { + Eta2 eta{}; auto max_sum = cl.max_sum_2x2(); eta.sum = max_sum.first; @@ -67,7 +66,7 @@ Eta2 calculate_eta2( size_t index_bottom_left_max_2x2_subcluster = (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); - if ((cl.data[index_bottom_left_max_2x2_subcluster] +s + if ((cl.data[index_bottom_left_max_2x2_subcluster] + cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0) eta.x = static_cast( cl.data[index_bottom_left_max_2x2_subcluster + 1]) / @@ -89,9 +88,9 @@ Eta2 calculate_eta2( // calculates Eta3 for 3x3 cluster based on code from analyze_cluster // TODO only supported for 3x3 Clusters -template Eta2 calculate_eta3(const Cluster &cl) { +template Eta2 calculate_eta3(const Cluster &cl) { - Eta2 eta{}; + Eta2 eta{}; T sum = 0; diff --git a/src/CalculateEta.test.cpp b/src/CalculateEta.test.cpp new file mode 100644 index 0000000..2bdf387 --- /dev/null +++ b/src/CalculateEta.test.cpp @@ -0,0 +1,62 @@ +/************************************************ + * @file CalculateEta.test.cpp + * @short test case to calculate_eta2 + ***********************************************/ + +#include "aare/CalculateEta.hpp" +#include "aare/Cluster.hpp" +#include "aare/ClusterFile.hpp" + +// #include "catch.hpp" +#include +#include +#include + +using namespace aare; + +using ClusterTypes = + std::variant, Cluster, Cluster, + Cluster, Cluster>; + +auto get_test_parameters() { + return GENERATE( + std::make_tuple(ClusterTypes{Cluster{0, 0, {1, 2, 3, 1}}}, + Eta2{2. / 3, 3. / 4, corner::cBottomLeft, 7}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}}, + Eta2{6. / 11, 2. / 7, corner::cTopRight, 20}), + std::make_tuple(ClusterTypes{Cluster{ + 0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2, + 1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}}, + Eta2{9. / 17, 5. / 13, 8, 28}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}}, + Eta2{7. / 11, 6. / 10, 1, 21}), + std::make_tuple( + ClusterTypes{Cluster{0, 0, {1, 3, 2, 3, 4, 2}}}, + Eta2{3. / 5, 4. / 6, 1, 11})); +} + +TEST_CASE("compute_largest_2x2_subcluster", "[.eta_calculation]") { + auto [cluster, expected_eta] = get_test_parameters(); + + auto [sum, index] = std::visit( + [](const auto &clustertype) { return clustertype.max_sum_2x2(); }, + cluster); + CHECK(expected_eta.c == index); + CHECK(expected_eta.sum == sum); +} + +TEST_CASE("calculate_eta2", "[.eta_calculation]") { + + auto [cluster, expected_eta] = get_test_parameters(); + + auto eta = std::visit( + [](const auto &clustertype) { return calculate_eta2(clustertype); }, + cluster); + + CHECK(eta.x == expected_eta.x); + CHECK(eta.y == expected_eta.y); + CHECK(eta.c == expected_eta.c); + CHECK(eta.sum == expected_eta.sum); +} diff --git a/src/Cluster.test.cpp b/src/Cluster.test.cpp index 7918d72..e502012 100644 --- a/src/Cluster.test.cpp +++ b/src/Cluster.test.cpp @@ -26,36 +26,3 @@ TEST_CASE("Correct Instantiation of Cluster and ClusterVector", CHECK(not is_cluster_v); CHECK(is_cluster_v>); } - -using ClusterTypes = - std::variant, Cluster, Cluster, - Cluster, Cluster>; - -TEST_CASE("calculate_eta2", "[.cluster][.eta_calculation]") { - - auto [cluster, expected_eta] = GENERATE( - std::make_tuple(ClusterTypes{Cluster{0, 0, {1, 2, 3, 1}}}, - Eta2{2. / 3, 3. / 4, corner::cBottomLeft, 7}), - std::make_tuple( - ClusterTypes{Cluster{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}}, - Eta2{6. / 11, 2. / 7, corner::cTopRight, 20}), - std::make_tuple(ClusterTypes{Cluster{ - 0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2, - 1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}}, - Eta2{9. / 17, 5. / 13, 8, 28}), - std::make_tuple( - ClusterTypes{Cluster{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}}, - Eta2{7. / 11, 6. / 10, 1, 21}), - std::make_tuple( - ClusterTypes{Cluster{0, 0, {1, 3, 2, 3, 4, 2}}}, - Eta2{3. / 5, 4. / 6, 1, 11})); - - Eta2 eta = std::visit( - [](const auto &clustertype) { return calculate_eta2(clustertype); }, - cluster); - - CHECK(eta.x == expected_eta.x); - CHECK(eta.y == expected_eta.y); - CHECK(eta.c == expected_eta.c); - CHECK(eta.sum == expected_eta.sum); -} diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index e434231..e4f8e5c 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -68,7 +68,7 @@ Interpolator::interpolate(const ClusterVector &clusters) { for (size_t i = 0; i < clusters.size(); i++) { auto cluster = clusters.at(i); - Eta2 eta = calculate_eta2(cluster); + auto eta = calculate_eta2(cluster); Photon photon; photon.x = cluster.x; @@ -118,7 +118,7 @@ Interpolator::interpolate(const ClusterVector &clusters) { clusters.cluster_size_y() == 2) { for (size_t i = 0; i < clusters.size(); i++) { auto cluster = clusters.at(i); - Eta2 eta = calculate_eta2(cluster); + auto eta = calculate_eta2(cluster); Photon photon; photon.x = cluster.x; From 98d2d6098e64e767efdb1c3297b649100354f5c9 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Wed, 2 Apr 2025 16:00:46 +0200 Subject: [PATCH 065/120] refactored other cpp files --- include/aare/ClusterCollector.hpp | 56 +++++++++++++++-------------- include/aare/ClusterFileSink.hpp | 58 +++++++++++++++++-------------- 2 files changed, 61 insertions(+), 53 deletions(-) diff --git a/include/aare/ClusterCollector.hpp b/include/aare/ClusterCollector.hpp index 0738062..0a53cd0 100644 --- a/include/aare/ClusterCollector.hpp +++ b/include/aare/ClusterCollector.hpp @@ -2,29 +2,31 @@ #include #include -#include "aare/ProducerConsumerQueue.hpp" -#include "aare/ClusterVector.hpp" #include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ProducerConsumerQueue.hpp" namespace aare { -class ClusterCollector{ - ProducerConsumerQueue>* m_source; - std::atomic m_stop_requested{false}; - std::atomic m_stopped{true}; - std::chrono::milliseconds m_default_wait{1}; - std::thread m_thread; - std::vector> m_clusters; +template >> +class ClusterCollector { + ProducerConsumerQueue> *m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::vector> m_clusters; - void process(){ + void process() { m_stopped = false; fmt::print("ClusterCollector started\n"); - while (!m_stop_requested || !m_source->isEmpty()) { - if (ClusterVector *clusters = m_source->frontPtr(); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); clusters != nullptr) { m_clusters.push_back(std::move(*clusters)); m_source->popFront(); - }else{ + } else { std::this_thread::sleep_for(m_default_wait); } } @@ -32,21 +34,21 @@ class ClusterCollector{ m_stopped = true; } - public: - ClusterCollector(ClusterFinderMT* source){ - m_source = source->sink(); - m_thread = std::thread(&ClusterCollector::process, this); - } - void stop(){ - m_stop_requested = true; - m_thread.join(); - } - std::vector> steal_clusters(){ - if(!m_stopped){ - throw std::runtime_error("ClusterCollector is still running"); - } - return std::move(m_clusters); + public: + ClusterCollector(ClusterFinderMT *source) { + m_source = source->sink(); + m_thread = std::thread(&ClusterCollector::process, this); + } + void stop() { + m_stop_requested = true; + m_thread.join(); + } + std::vector> steal_clusters() { + if (!m_stopped) { + throw std::runtime_error("ClusterCollector is still running"); } + return std::move(m_clusters); + } }; } // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp index 158fdeb..520fbe3 100644 --- a/include/aare/ClusterFileSink.hpp +++ b/include/aare/ClusterFileSink.hpp @@ -3,35 +3,41 @@ #include #include -#include "aare/ProducerConsumerQueue.hpp" -#include "aare/ClusterVector.hpp" #include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ProducerConsumerQueue.hpp" -namespace aare{ +namespace aare { -class ClusterFileSink{ - ProducerConsumerQueue>* m_source; +template class ClusterFileSink { + ProducerConsumerQueue> *m_source; std::atomic m_stop_requested{false}; std::atomic m_stopped{true}; std::chrono::milliseconds m_default_wait{1}; std::thread m_thread; std::ofstream m_file; - - void process(){ + void process() { m_stopped = false; fmt::print("ClusterFileSink started\n"); - while (!m_stop_requested || !m_source->isEmpty()) { - if (ClusterVector *clusters = m_source->frontPtr(); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); clusters != nullptr) { // Write clusters to file - int32_t frame_number = clusters->frame_number(); //TODO! Should we store frame number already as int? + int32_t frame_number = + clusters->frame_number(); // TODO! Should we store frame + // number already as int? uint32_t num_clusters = clusters->size(); - m_file.write(reinterpret_cast(&frame_number), sizeof(frame_number)); - m_file.write(reinterpret_cast(&num_clusters), sizeof(num_clusters)); - m_file.write(reinterpret_cast(clusters->data()), clusters->size() * clusters->item_size()); + m_file.write(reinterpret_cast(&frame_number), + sizeof(frame_number)); + m_file.write(reinterpret_cast(&num_clusters), + sizeof(num_clusters)); + m_file.write(reinterpret_cast(clusters->data()), + clusters->size() * clusters->item_size()); m_source->popFront(); - }else{ + } else { std::this_thread::sleep_for(m_default_wait); } } @@ -39,18 +45,18 @@ class ClusterFileSink{ m_stopped = true; } - public: - ClusterFileSink(ClusterFinderMT* source, const std::filesystem::path& fname){ - m_source = source->sink(); - m_thread = std::thread(&ClusterFileSink::process, this); - m_file.open(fname, std::ios::binary); - } - void stop(){ - m_stop_requested = true; - m_thread.join(); - m_file.close(); - } + public: + ClusterFileSink(ClusterFinderMT *source, + const std::filesystem::path &fname) { + m_source = source->sink(); + m_thread = std::thread(&ClusterFileSink::process, this); + m_file.open(fname, std::ios::binary); + } + void stop() { + m_stop_requested = true; + m_thread.join(); + m_file.close(); + } }; - } // namespace aare \ No newline at end of file From 50eeba40059b37a22d8e2cec4138a379f9c98eaa Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Wed, 2 Apr 2025 17:58:26 +0200 Subject: [PATCH 066/120] restructured GainMap to have own class and generalized --- CMakeLists.txt | 1 + include/aare/ClusterFile.hpp | 30 ++++++++++++----- include/aare/ClusterVector.hpp | 24 +------------- include/aare/GainMap.hpp | 59 ++++++++++++++++++++++++++++++++++ src/ClusterVector.test.cpp | 46 ++++++++++++++++++++++++++ 5 files changed, 129 insertions(+), 31 deletions(-) create mode 100644 include/aare/GainMap.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 0ab1e73..b02303c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -344,6 +344,7 @@ set(PUBLICHEADERS include/aare/Fit.hpp include/aare/FileInterface.hpp include/aare/Frame.hpp + include/aare/GainMap.hpp include/aare/geo_helpers.hpp include/aare/NDArray.hpp include/aare/NDView.hpp diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 9c43326..eb6cc86 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -2,6 +2,7 @@ #include "aare/Cluster.hpp" #include "aare/ClusterVector.hpp" +#include "aare/GainMap.hpp" #include "aare/NDArray.hpp" #include "aare/defs.hpp" #include @@ -44,9 +45,8 @@ class ClusterFile { std::optional m_roi; /*Region of interest, will be applied if set*/ std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ - std::optional> - m_gain_map; /*Gain map to apply to the clusters, will be applied if - set*/ + std::optional m_gain_map; /*Gain map to apply to the clusters, will + be applied if set*/ public: /** @@ -107,6 +107,10 @@ class ClusterFile { */ void set_gain_map(const NDView gain_map); + void set_gain_map(const GainMap &gain_map); + + void set_gain_map(const GainMap &&gain_map); + /** * @brief Close the file. If not closed the file will be closed in the * destructor @@ -175,7 +179,17 @@ void ClusterFile::set_noise_map( template void ClusterFile::set_gain_map( const NDView gain_map) { - m_gain_map = NDArray(gain_map); + m_gain_map = GainMap(gain_map); +} + +template +void ClusterFile::set_gain_map(const GainMap &gain_map) { + m_gain_map = gain_map; +} + +template +void ClusterFile::set_gain_map(const GainMap &&gain_map) { + m_gain_map = gain_map; } // TODO generally supported for all clsuter types @@ -263,7 +277,7 @@ ClusterFile::read_clusters_without_cut(size_t n_clusters) { // No new allocation, only change bounds. clusters.resize(nph_read); if (m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); + m_gain_map->apply_gain_map(clusters); return clusters; } @@ -312,7 +326,7 @@ ClusterFile::read_clusters_with_cut(size_t n_clusters) { } } if (m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); + m_gain_map->apply_gain_map(clusters); return clusters; } @@ -370,7 +384,7 @@ ClusterFile::read_frame_without_cut() { } clusters.resize(n_clusters); if (m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); + m_gain_map->apply_gain_map(clusters); return clusters; } @@ -403,7 +417,7 @@ ClusterFile::read_frame_with_cut() { } } if (m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); + m_gain_map->apply_gain_map(clusters); return clusters; } diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 30be5eb..ca2fd4d 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -32,7 +32,6 @@ class ClusterVector; // Forward declaration template class ClusterVector> { - using value_type = T; std::byte *m_data{}; size_t m_size{0}; @@ -49,6 +48,7 @@ class ClusterVector> { constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:"; public: + using value_type = T; using ClusterType = Cluster; /** @@ -237,28 +237,6 @@ class ClusterVector> { m_size = new_size; } - // TODO: Generalize !!!! Maybe move somewhere else - void apply_gain_map(const NDView gain_map) { - // in principle we need to know the size of the image for this lookup - // TODO! check orientations - std::array xcorr = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; - std::array ycorr = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; - for (size_t i = 0; i < m_size; i++) { - auto &cl = at(i); - - if (cl.x > 0 && cl.y > 0 && cl.x < gain_map.shape(1) - 1 && - cl.y < gain_map.shape(0) - 1) { - for (size_t j = 0; j < 9; j++) { - size_t x = cl.x + xcorr[j]; - size_t y = cl.y + ycorr[j]; - cl.data[j] = static_cast(cl.data[j] * gain_map(y, x)); - } - } else { - memset(cl.data, 0, 9 * sizeof(T)); // clear edge clusters - } - } - } - private: void allocate_buffer(size_t new_capacity) { size_t num_bytes = item_size() * new_capacity; diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp new file mode 100644 index 0000000..9eb7c11 --- /dev/null +++ b/include/aare/GainMap.hpp @@ -0,0 +1,59 @@ +/************************************************ + * @file ApplyGainMap.hpp + * @short function to apply gain map of image size to a vector of clusters + ***********************************************/ + +#pragma once +#include "aare/Cluster.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDArray.hpp" +#include "aare/NDView.hpp" +#include + +namespace aare { + +class GainMap { + + public: + explicit GainMap(const NDArray &gain_map) + : m_gain_map(gain_map) {}; + + explicit GainMap(const NDView gain_map) { + m_gain_map = NDArray(gain_map); + } + + template >> + void apply_gain_map(ClusterVector &clustervec) { + // in principle we need to know the size of the image for this lookup + // TODO! check orientations + size_t ClusterSizeX = clustervec.cluster_size_x(); + size_t ClusterSizeY = clustervec.cluster_size_y(); + + using T = typename ClusterVector::value_type; + + int64_t index_cluster_center_x = ClusterSizeX / 2; + int64_t index_cluster_center_y = ClusterSizeY / 2; + for (size_t i = 0; i < clustervec.size(); i++) { + auto &cl = clustervec.at(i); + + if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 && + cl.y < m_gain_map.shape(0) - 1) { + for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) { + size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x; + size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y; + cl.data[j] = static_cast(cl.data[j] * m_gain_map(y, x)); + } + } else { + memset(cl.data, 0, + ClusterSizeX * ClusterSizeY * + sizeof(T)); // clear edge clusters + } + } + } + + private: + NDArray m_gain_map{}; +}; + +} // end of namespace aare \ No newline at end of file diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index b58e88a..c354891 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -1,6 +1,7 @@ #include "aare/ClusterVector.hpp" #include +#include #include #include @@ -183,4 +184,49 @@ TEST_CASE("Concatenate two cluster vectors where we need to allocate", REQUIRE(ptr[2].y == 12); REQUIRE(ptr[3].x == 16); REQUIRE(ptr[3].y == 17); +} + +struct ClusterTestData { + int8_t ClusterSizeX; + int8_t ClusterSizeY; + std::vector index_map_x; + std::vector index_map_y; +}; + +TEST_CASE("Gain Map Calculation Index Map", "[.ClusterVector][.gain_map]") { + + auto clustertestdata = GENERATE( + ClusterTestData{3, + 3, + {-1, 0, 1, -1, 0, 1, -1, 0, 1}, + {-1, -1, -1, 0, 0, 0, 1, 1, 1}}, + ClusterTestData{ + 4, + 4, + {-2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1}, + {-2, -2, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1}}, + ClusterTestData{2, 2, {-1, 0, -1, 0}, {-1, -1, 0, 0}}, + ClusterTestData{5, + 5, + {-2, -1, 0, 1, 2, -2, -1, 0, 1, 2, -2, -1, 0, + 1, 2, -2, -1, 0, 1, 2, -2, -1, 0, 1, 2}, + {-2, -2, -2, -2, -2, -1, -1, -1, -1, -1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2}}); + + int8_t ClusterSizeX = clustertestdata.ClusterSizeX; + int8_t ClusterSizeY = clustertestdata.ClusterSizeY; + + std::vector index_map_x(ClusterSizeX * ClusterSizeY); + std::vector index_map_y(ClusterSizeX * ClusterSizeY); + + int64_t index_cluster_center_x = ClusterSizeX / 2; + int64_t index_cluster_center_y = ClusterSizeY / 2; + + for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) { + index_map_x[j] = j % ClusterSizeX - index_cluster_center_x; + index_map_y[j] = j / ClusterSizeX - index_cluster_center_y; + } + + CHECK(index_map_x == clustertestdata.index_map_x); + CHECK(index_map_y == clustertestdata.index_map_y); } \ No newline at end of file From 85a6b5b95eabd6724567e4920b7d3e208056ed1d Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 3 Apr 2025 09:28:02 +0200 Subject: [PATCH 067/120] suppress compiler warnings --- benchmarks/calculateeta_benchmark.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/benchmarks/calculateeta_benchmark.cpp b/benchmarks/calculateeta_benchmark.cpp index 609ce89..a320188 100644 --- a/benchmarks/calculateeta_benchmark.cpp +++ b/benchmarks/calculateeta_benchmark.cpp @@ -9,7 +9,10 @@ class ClusterFixture : public benchmark::Fixture { Cluster cluster_2x2{}; Cluster cluster_3x3{}; - void SetUp(::benchmark::State &state) { + private: + using benchmark::Fixture::SetUp; + + void SetUp([[maybe_unused]] const benchmark::State &state) override { int temp_data[4] = {1, 2, 3, 1}; std::copy(std::begin(temp_data), std::end(temp_data), std::begin(cluster_2x2.data)); From de9fc16e896c439695e4b461104a4c7f057e282f Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 3 Apr 2025 09:28:54 +0200 Subject: [PATCH 068/120] generalize is_selected --- include/aare/Cluster.hpp | 16 ++++++++++++++++ include/aare/ClusterFile.hpp | 20 ++++++++++++++------ src/ClusterVector.test.cpp | 8 ++++---- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index cc102c4..46be10d 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -100,4 +100,20 @@ struct is_cluster> : std::true_type {}; // Cluster template constexpr bool is_cluster_v = is_cluster::value; +template >> +struct extract_template_arguments; // Forward declaration + +// helper struct to extract template argument +template +struct extract_template_arguments< + Cluster> { + + using type = T; + static constexpr int cluster_size_x = ClusterSizeX; + static constexpr int cluster_size_y = ClusterSizeY; + using coordtype = CoordType; +}; + } // namespace aare diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index eb6cc86..bc0ebd1 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -429,16 +429,24 @@ bool ClusterFile::is_selected(ClusterType &cl) { return false; } } - // TODO types are wrong generalize + + auto cluster_size_x = extract_template_arguments< + std::remove_reference_t>::cluster_size_x; + auto cluster_size_y = extract_template_arguments< + std::remove_reference_t>::cluster_size_y; + + size_t cluster_center_index = + (cluster_size_x / 2) + (cluster_size_y / 2) * cluster_size_x; + if (m_noise_map) { - int32_t sum_1x1 = cl.data[4]; // central pixel - int32_t sum_2x2 = - cl.max_sum_2x2().first; // highest sum of 2x2 subclusters - int32_t sum_3x3 = cl.sum(); // sum of all pixels + auto sum_1x1 = cl.data[cluster_center_index]; // central pixel + auto sum_2x2 = cl.max_sum_2x2().first; // highest sum of 2x2 subclusters + auto total_sum = cl.sum(); // sum of all pixels auto noise = (*m_noise_map)(cl.y, cl.x); // TODO! check if this is correct - if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { + if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || + total_sum <= 3 * noise) { return false; } } diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index c354891..c6a36d8 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -187,8 +187,8 @@ TEST_CASE("Concatenate two cluster vectors where we need to allocate", } struct ClusterTestData { - int8_t ClusterSizeX; - int8_t ClusterSizeY; + uint8_t ClusterSizeX; + uint8_t ClusterSizeY; std::vector index_map_x; std::vector index_map_y; }; @@ -213,8 +213,8 @@ TEST_CASE("Gain Map Calculation Index Map", "[.ClusterVector][.gain_map]") { {-2, -2, -2, -2, -2, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2}}); - int8_t ClusterSizeX = clustertestdata.ClusterSizeX; - int8_t ClusterSizeY = clustertestdata.ClusterSizeY; + uint8_t ClusterSizeX = clustertestdata.ClusterSizeX; + uint8_t ClusterSizeY = clustertestdata.ClusterSizeY; std::vector index_map_x(ClusterSizeX * ClusterSizeY); std::vector index_map_y(ClusterSizeX * ClusterSizeY); From d7ef9bb1d8e933a6059bef9236a45f2150cc28d4 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 3 Apr 2025 11:36:15 +0200 Subject: [PATCH 069/120] missed some refactoring of datatypes --- include/aare/Cluster.hpp | 6 +++--- include/aare/ClusterCollector.hpp | 2 +- include/aare/ClusterFileSink.hpp | 6 +++--- include/aare/ClusterFinder.hpp | 27 ++++++++++++++------------- include/aare/ClusterFinderMT.hpp | 22 ++++++++++++++-------- include/aare/ClusterVector.hpp | 14 ++++++++++++++ include/aare/GainMap.hpp | 1 - 7 files changed, 49 insertions(+), 29 deletions(-) diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index 46be10d..ca2e01f 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -100,8 +100,8 @@ struct is_cluster> : std::true_type {}; // Cluster template constexpr bool is_cluster_v = is_cluster::value; -template >> +template +// typename = std::enable_if_t>> struct extract_template_arguments; // Forward declaration // helper struct to extract template argument @@ -110,7 +110,7 @@ template > { - using type = T; + using value_type = T; static constexpr int cluster_size_x = ClusterSizeX; static constexpr int cluster_size_y = ClusterSizeY; using coordtype = CoordType; diff --git a/include/aare/ClusterCollector.hpp b/include/aare/ClusterCollector.hpp index 0a53cd0..cb49f58 100644 --- a/include/aare/ClusterCollector.hpp +++ b/include/aare/ClusterCollector.hpp @@ -35,7 +35,7 @@ class ClusterCollector { } public: - ClusterCollector(ClusterFinderMT *source) { + ClusterCollector(ClusterFinderMT *source) { m_source = source->sink(); m_thread = std::thread(&ClusterCollector::process, this); } diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp index 520fbe3..810e63c 100644 --- a/include/aare/ClusterFileSink.hpp +++ b/include/aare/ClusterFileSink.hpp @@ -10,8 +10,8 @@ namespace aare { template class ClusterFileSink { + typename = std::enable_if_t>> +class ClusterFileSink { ProducerConsumerQueue> *m_source; std::atomic m_stop_requested{false}; std::atomic m_stopped{true}; @@ -46,7 +46,7 @@ template *source, + ClusterFileSink(ClusterFinderMT *source, const std::filesystem::path &fname) { m_source = source->sink(); m_thread = std::thread(&ClusterFileSink::process, this); diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 19ada67..120d39d 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -10,16 +10,21 @@ namespace aare { -template +template , + typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> class ClusterFinder { Shape<2> m_image_size; const PEDESTAL_TYPE m_nSigma; const PEDESTAL_TYPE c2; const PEDESTAL_TYPE c3; Pedestal m_pedestal; - ClusterVector> m_clusters; + ClusterVector m_clusters; + + static const uint8_t ClusterSizeX = + extract_template_arguments::cluster_size_x; + static const uint8_t ClusterSizeY = + extract_template_arguments::cluster_size_x; + using CT = typename extract_template_arguments::value_type; public: /** @@ -52,16 +57,13 @@ class ClusterFinder { * same capacity as the old one * */ - ClusterVector> + ClusterVector steal_clusters(bool realloc_same_capacity = false) { - ClusterVector> tmp = - std::move(m_clusters); + ClusterVector tmp = std::move(m_clusters); if (realloc_same_capacity) - m_clusters = ClusterVector>( - tmp.capacity()); + m_clusters = ClusterVector(tmp.capacity()); else - m_clusters = - ClusterVector>{}; + m_clusters = ClusterVector{}; return tmp; } void find_clusters(NDView frame, uint64_t frame_number = 0) { @@ -147,8 +149,7 @@ class ClusterFinder { // Add the cluster to the output ClusterVector m_clusters.push_back( - Cluster{ - ix, iy, cluster_data.data()}); + ClusterType{ix, iy, cluster_data.data()}); } } } diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index 1efb843..62046b7 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -30,14 +30,16 @@ struct FrameWrapper { * @tparam PEDESTAL_TYPE type of the pedestal data * @tparam CT type of the cluster data */ -template +template , + typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> class ClusterFinderMT { + + using CT = typename extract_template_arguments::value_type; size_t m_current_thread{0}; size_t m_n_threads{0}; - using Finder = ClusterFinder; + using Finder = ClusterFinder; using InputQueue = ProducerConsumerQueue; - using OutputQueue = ProducerConsumerQueue>; + using OutputQueue = ProducerConsumerQueue>; std::vector> m_input_queues; std::vector> m_output_queues; @@ -66,7 +68,8 @@ class ClusterFinderMT { switch (frame->type) { case FrameType::DATA: cf->find_clusters(frame->data.view(), frame->frame_number); - m_output_queues[thread_id]->write(cf->steal_clusters(realloc_same_capacity)); + m_output_queues[thread_id]->write( + cf->steal_clusters(realloc_same_capacity)); break; case FrameType::PEDESTAL: @@ -127,15 +130,18 @@ class ClusterFinderMT { m_input_queues.emplace_back(std::make_unique(200)); m_output_queues.emplace_back(std::make_unique(200)); } - //TODO! Should we start automatically? + // TODO! Should we start automatically? start(); } /** * @brief Return the sink queue where all the clusters are collected - * @warning You need to empty this queue otherwise the cluster finder will wait forever + * @warning You need to empty this queue otherwise the cluster finder will + * wait forever */ - ProducerConsumerQueue> *sink() { return &m_sink; } + ProducerConsumerQueue> *sink() { + return &m_sink; + } /** * @brief Start all processing threads diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index ca2fd4d..0beae3d 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -148,6 +148,20 @@ class ClusterVector> { return sums; } + /** + * @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in + * each cluster + * @return std::vector vector of sums for each cluster + */ //TODO if underlying container is a vector use std::for_each + std::vector sum_2x2() { + std::vector sums_2x2(m_size); + + for (size_t i = 0; i < m_size; i++) { + sums_2x2[i] = at(i).max_sum_2x2; + } + return sums_2x2; + } + /** * @brief Return the number of clusters in the vector */ diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp index 9eb7c11..a60c131 100644 --- a/include/aare/GainMap.hpp +++ b/include/aare/GainMap.hpp @@ -26,7 +26,6 @@ class GainMap { typename = std::enable_if_t>> void apply_gain_map(ClusterVector &clustervec) { // in principle we need to know the size of the image for this lookup - // TODO! check orientations size_t ClusterSizeX = clustervec.cluster_size_x(); size_t ClusterSizeY = clustervec.cluster_size_y(); From a24bbd9cf93961546451094b4a269a0b93289ae4 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 3 Apr 2025 11:56:25 +0200 Subject: [PATCH 070/120] started to do python refactoring --- python/src/cluster.hpp | 185 ++++++++++++++++++++--------------- python/src/cluster_file.hpp | 89 ++++++++++------- python/src/interpolation.hpp | 51 ++++++---- python/src/module.cpp | 19 ++-- 4 files changed, 198 insertions(+), 146 deletions(-) diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 3db816a..5657288 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -16,149 +16,176 @@ namespace py = pybind11; using pd_type = double; -template +using namespace aare; + +template void define_cluster_vector(py::module &m, const std::string &typestr) { + + using T = typename extract_template_arguments::value_type; + auto class_name = fmt::format("ClusterVector_{}", typestr); - py::class_>(m, class_name.c_str(), py::buffer_protocol()) - .def(py::init(), - py::arg("cluster_size_x") = 3, py::arg("cluster_size_y") = 3) + py::class_>(m, class_name.c_str(), + py::buffer_protocol()) + .def(py::init(), py::arg("cluster_size_x") = 3, + py::arg("cluster_size_y") = 3) // TODO change!!! .def("push_back", - [](ClusterVector &self, int x, int y, py::array_t data) { - // auto view = make_view_2d(data); - self.push_back(x, y, reinterpret_cast(data.data())); + [](ClusterVector &self, ClusterType &cl) { + // auto view = make_view_2d(data); + self.push_back(cl); }) - .def_property_readonly("size", &ClusterVector::size) - .def("item_size", &ClusterVector::item_size) + .def_property_readonly("size", &ClusterVector::size) + .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", - [typestr](ClusterVector &self) { + [typestr](ClusterVector &self) { return fmt::format( self.fmt_base(), self.cluster_size_x(), self.cluster_size_y(), typestr); }) .def("sum", - [](ClusterVector &self) { + [](ClusterVector &self) { auto *vec = new std::vector(self.sum()); return return_vector(vec); }) - .def("sum_2x2", [](ClusterVector &self) { - auto *vec = new std::vector(self.sum_2x2()); - return return_vector(vec); - }) - .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) - .def_property_readonly("cluster_size_y", &ClusterVector::cluster_size_y) - .def_property_readonly("capacity", &ClusterVector::capacity) - .def_property("frame_number", &ClusterVector::frame_number, - &ClusterVector::set_frame_number) - .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { - return py::buffer_info( - self.data(), /* Pointer to buffer */ - self.item_size(), /* Size of one scalar */ - fmt::format(self.fmt_base(), self.cluster_size_x(), - self.cluster_size_y(), - typestr), /* Format descriptor */ - 1, /* Number of dimensions */ - {self.size()}, /* Buffer dimensions */ - {self.item_size()} /* Strides (in bytes) for each index */ - ); - }); + .def("sum_2x2", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum_2x2()); + return return_vector(vec); + }) + .def_property_readonly("cluster_size_x", + &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", + &ClusterVector::cluster_size_y) + .def_property_readonly("capacity", + &ClusterVector::capacity) + .def_property("frame_number", &ClusterVector::frame_number, + &ClusterVector::set_frame_number) + .def_buffer( + [typestr](ClusterVector &self) -> py::buffer_info { + return py::buffer_info( + self.data(), /* Pointer to buffer */ + self.item_size(), /* Size of one scalar */ + fmt::format(self.fmt_base(), self.cluster_size_x(), + self.cluster_size_y(), + typestr), /* Format descriptor */ + 1, /* Number of dimensions */ + {self.size()}, /* Buffer dimensions */ + {self.item_size()} /* Strides (in bytes) for each index */ + ); + }); } +template void define_cluster_finder_mt_bindings(py::module &m) { - py::class_>(m, "ClusterFinderMT") + py::class_>( + m, "ClusterFinderMT") .def(py::init, Shape<2>, pd_type, size_t, size_t>(), py::arg("image_size"), py::arg("cluster_size"), py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048, py::arg("n_threads") = 3) .def("push_pedestal_frame", - [](ClusterFinderMT &self, + [](ClusterFinderMT &self, py::array_t frame) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) .def( "find_clusters", - [](ClusterFinderMT &self, + [](ClusterFinderMT &self, py::array_t frame, uint64_t frame_number) { auto view = make_view_2d(frame); self.find_clusters(view, frame_number); return; }, py::arg(), py::arg("frame_number") = 0) - .def("clear_pedestal", &ClusterFinderMT::clear_pedestal) - .def("sync", &ClusterFinderMT::sync) - .def("stop", &ClusterFinderMT::stop) - .def("start", &ClusterFinderMT::start) - .def("pedestal", - [](ClusterFinderMT &self, size_t thread_index) { - auto pd = new NDArray{}; - *pd = self.pedestal(thread_index); - return return_image_data(pd); - },py::arg("thread_index") = 0) - .def("noise", - [](ClusterFinderMT &self, size_t thread_index) { - auto arr = new NDArray{}; - *arr = self.noise(thread_index); - return return_image_data(arr); - },py::arg("thread_index") = 0); + .def("clear_pedestal", + &ClusterFinderMT::clear_pedestal) + .def("sync", &ClusterFinderMT::sync) + .def("stop", &ClusterFinderMT::stop) + .def("start", &ClusterFinderMT::start) + .def( + "pedestal", + [](ClusterFinderMT &self, + size_t thread_index) { + auto pd = new NDArray{}; + *pd = self.pedestal(thread_index); + return return_image_data(pd); + }, + py::arg("thread_index") = 0) + .def( + "noise", + [](ClusterFinderMT &self, + size_t thread_index) { + auto arr = new NDArray{}; + *arr = self.noise(thread_index); + return return_image_data(arr); + }, + py::arg("thread_index") = 0); } +template void define_cluster_collector_bindings(py::module &m) { - py::class_(m, "ClusterCollector") - .def(py::init *>()) - .def("stop", &ClusterCollector::stop) + py::class_>(m, "ClusterCollector") + .def(py::init *>()) + .def("stop", &ClusterCollector::stop) .def( "steal_clusters", - [](ClusterCollector &self) { - auto v = - new std::vector>(self.steal_clusters()); + [](ClusterCollector &self) { + auto v = new std::vector>( + self.steal_clusters()); return v; }, py::return_value_policy::take_ownership); } +template void define_cluster_file_sink_bindings(py::module &m) { - py::class_(m, "ClusterFileSink") - .def(py::init *, + py::class_>(m, "ClusterFileSink") + .def(py::init *, const std::filesystem::path &>()) - .def("stop", &ClusterFileSink::stop); + .def("stop", &ClusterFileSink::stop); } +template void define_cluster_finder_bindings(py::module &m) { - py::class_>(m, "ClusterFinder") + py::class_>(m, + "ClusterFinder") .def(py::init, Shape<2>, pd_type, size_t>(), py::arg("image_size"), py::arg("cluster_size"), py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) .def("push_pedestal_frame", - [](ClusterFinder &self, + [](ClusterFinder &self, py::array_t frame) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) - .def("clear_pedestal", &ClusterFinder::clear_pedestal) - .def_property_readonly("pedestal", - [](ClusterFinder &self) { - auto pd = new NDArray{}; - *pd = self.pedestal(); - return return_image_data(pd); - }) - .def_property_readonly("noise", - [](ClusterFinder &self) { - auto arr = new NDArray{}; - *arr = self.noise(); - return return_image_data(arr); - }) + .def("clear_pedestal", + &ClusterFinder::clear_pedestal) + .def_property_readonly( + "pedestal", + [](ClusterFinder &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly( + "noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) .def( "steal_clusters", - [](ClusterFinder &self, + [](ClusterFinder &self, bool realloc_same_capacity) { - auto v = new ClusterVector( + auto v = new ClusterVector( self.steal_clusters(realloc_same_capacity)); return v; }, py::arg("realloc_same_capacity") = false) .def( "find_clusters", - [](ClusterFinder &self, + [](ClusterFinder &self, py::array_t frame, uint64_t frame_number) { auto view = make_view_2d(frame); self.find_clusters(view, frame_number); @@ -167,7 +194,7 @@ void define_cluster_finder_bindings(py::module &m) { py::arg(), py::arg("frame_number") = 0); m.def("hitmap", - [](std::array image_size, ClusterVector &cv) { + [](std::array image_size, ClusterVector &cv) { py::array_t hitmap(image_size); auto r = hitmap.mutable_unchecked<2>(); diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index b807712..576c3bb 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -10,69 +10,86 @@ #include #include -//Disable warnings for unused parameters, as we ignore some -//in the __exit__ method +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" - namespace py = pybind11; using namespace ::aare; +template void define_cluster_file_io_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(Cluster3x3, x, y, data); - py::class_(m, "ClusterFile") + py::class_>(m, "ClusterFile") .def(py::init(), py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r") - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters) { - auto v = new ClusterVector(self.read_clusters(n_clusters)); + .def( + "read_clusters", + [](ClusterFile &self, size_t n_clusters) { + auto v = new ClusterVector( + self.read_clusters(n_clusters)); return v; - },py::return_value_policy::take_ownership) - .def("read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector(self.read_clusters(n_clusters, roi)); - return v; - },py::return_value_policy::take_ownership) + }, + py::return_value_policy::take_ownership) + .def( + "read_clusters", + [](ClusterFile &self, size_t n_clusters, ROI roi) { + auto v = new ClusterVector( + self.read_clusters(n_clusters, roi)); + return v; + }, + py::return_value_policy::take_ownership) .def("read_frame", - [](ClusterFile &self) { - auto v = new ClusterVector(self.read_frame()); - return v; + [](ClusterFile &self) { + auto v = new ClusterVector(self.read_frame()); + return v; }) - .def("set_roi", &ClusterFile::set_roi) - .def("set_noise_map", [](ClusterFile &self, py::array_t noise_map) { - auto view = make_view_2d(noise_map); - self.set_noise_map(view); - }) - .def("set_gain_map", [](ClusterFile &self, py::array_t gain_map) { - auto view = make_view_2d(gain_map); - self.set_gain_map(view); - }) - .def("close", &ClusterFile::close) - .def("write_frame", &ClusterFile::write_frame) - .def("__enter__", [](ClusterFile &self) { return &self; }) + .def("set_roi", &ClusterFile::set_roi) + .def( + "set_noise_map", + [](ClusterFile &self, py::array_t noise_map) { + auto view = make_view_2d(noise_map); + self.set_noise_map(view); + }) + + .def("set_gain_map", + [](ClusterFile &self, py::array_t gain_map) { + auto view = make_view_2d(gain_map); + self.set_gain_map(view); + }) + + // void set_gain_map(const GainMap &gain_map); //TODO do i need a + // gainmap constructor? + + .def("close", &ClusterFile::close) + .def("write_frame", &ClusterFile::write_frame) + .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", - [](ClusterFile &self, + [](ClusterFile &self, const std::optional &exc_type, const std::optional &exc_value, const std::optional &traceback) { self.close(); }) - .def("__iter__", [](ClusterFile &self) { return &self; }) - .def("__next__", [](ClusterFile &self) { - auto v = new ClusterVector(self.read_clusters(self.chunk_size())); + .def("__iter__", [](ClusterFile &self) { return &self; }) + .def("__next__", [](ClusterFile &self) { + auto v = new ClusterVector( + self.read_clusters(self.chunk_size())); if (v->size() == 0) { throw py::stop_iteration(); } return v; }); - m.def("calculate_eta2", []( aare::ClusterVector &clusters) { - auto eta2 = new NDArray(calculate_eta2(clusters)); - return return_image_data(eta2); - }); + /* + m.def("calculate_eta2", []( aare::ClusterVector &clusters) { + auto eta2 = new NDArray(calculate_eta2(clusters)); + return return_image_data(eta2); + }); + */ //add in different file } #pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp index 02742e1..47b5203 100644 --- a/python/src/interpolation.hpp +++ b/python/src/interpolation.hpp @@ -8,31 +8,40 @@ #include namespace py = pybind11; + void define_interpolation_bindings(py::module &m) { - PYBIND11_NUMPY_DTYPE(aare::Photon, x,y,energy); + PYBIND11_NUMPY_DTYPE(aare::Photon, x, y, energy); py::class_(m, "Interpolator") - .def(py::init([](py::array_t etacube, py::array_t xbins, - py::array_t ybins, py::array_t ebins) { - return Interpolator(make_view_3d(etacube), make_view_1d(xbins), - make_view_1d(ybins), make_view_1d(ebins)); - })) - .def("get_ietax", [](Interpolator& self){ - auto*ptr = new NDArray{}; - *ptr = self.get_ietax(); - return return_image_data(ptr); - }) - .def("get_ietay", [](Interpolator& self){ - auto*ptr = new NDArray{}; - *ptr = self.get_ietay(); - return return_image_data(ptr); - }) - .def("interpolate", [](Interpolator& self, const ClusterVector& clusters){ - auto photons = self.interpolate(clusters); - auto* ptr = new std::vector{photons}; - return return_vector(ptr); - }); + .def(py::init( + [](py::array_t + etacube, + py::array_t xbins, py::array_t ybins, + py::array_t ebins) { + return Interpolator(make_view_3d(etacube), make_view_1d(xbins), + make_view_1d(ybins), make_view_1d(ebins)); + })) + .def("get_ietax", + [](Interpolator &self) { + auto *ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", + [](Interpolator &self) { + auto *ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }) + + // TODO take care of clustertype template + .def("interpolate", + [](Interpolator &self, const ClusterVector &clusters) { + auto photons = self.interpolate(clusters); + auto *ptr = new std::vector{photons}; + return return_vector(ptr); + }); // TODO! Evaluate without converting to double m.def( diff --git a/python/src/module.cpp b/python/src/module.cpp index 43f48ba..ba681be 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -1,17 +1,17 @@ -//Files with bindings to the different classes -#include "file.hpp" -#include "raw_file.hpp" -#include "ctb_raw_file.hpp" -#include "raw_master_file.hpp" -#include "var_cluster.hpp" -#include "pixel_map.hpp" -#include "pedestal.hpp" +// Files with bindings to the different classes #include "cluster.hpp" #include "cluster_file.hpp" +#include "ctb_raw_file.hpp" +#include "file.hpp" #include "fit.hpp" #include "interpolation.hpp" +#include "pedestal.hpp" +#include "pixel_map.hpp" +#include "raw_file.hpp" +#include "raw_master_file.hpp" +#include "var_cluster.hpp" -//Pybind stuff +// Pybind stuff #include #include @@ -33,5 +33,4 @@ PYBIND11_MODULE(_aare, m) { define_cluster_file_sink_bindings(m); define_fit_bindings(m); define_interpolation_bindings(m); - } \ No newline at end of file From 7db1ae4d942e3ea4a947ca8b081b1e0f90b79b75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 3 Apr 2025 13:18:55 +0200 Subject: [PATCH 071/120] Dev/gitea ci (#151) Build and test on internal PSI gitea --- .gitea/workflows/cmake_build.yml | 18 +++++++++--------- .gitea/workflows/rh8-native.yml | 30 ++++++++++++++++++++++++++++++ .gitea/workflows/rh9-native.yml | 31 +++++++++++++++++++++++++++++++ .github/workflows/build_docs.yml | 12 +++++------- etc/dev-env.yml | 15 +++++++++++++++ 5 files changed, 90 insertions(+), 16 deletions(-) create mode 100644 .gitea/workflows/rh8-native.yml create mode 100644 .gitea/workflows/rh9-native.yml create mode 100644 etc/dev-env.yml diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml index 43a0181..aa7a297 100644 --- a/.gitea/workflows/cmake_build.yml +++ b/.gitea/workflows/cmake_build.yml @@ -2,9 +2,8 @@ name: Build the package using cmake then documentation on: workflow_dispatch: - push: - + permissions: contents: read @@ -16,12 +15,12 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] - python-version: ["3.12",] + platform: [ubuntu-latest, ] + python-version: ["3.12", ] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda + defaults: run: shell: "bash -l {0}" @@ -35,13 +34,13 @@ jobs: sudo apt-get -y install cmake gcc g++ - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | @@ -56,3 +55,4 @@ jobs: + diff --git a/.gitea/workflows/rh8-native.yml b/.gitea/workflows/rh8-native.yml new file mode 100644 index 0000000..02d3dc0 --- /dev/null +++ b/.gitea/workflows/rh8-native.yml @@ -0,0 +1,30 @@ +name: Build on RHEL8 + +on: + workflow_dispatch: + +permissions: + contents: read + +jobs: + buildh: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel8-developer-gitea-actions + steps: + - uses: actions/checkout@v4 + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.gitea/workflows/rh9-native.yml b/.gitea/workflows/rh9-native.yml new file mode 100644 index 0000000..c1f10ac --- /dev/null +++ b/.gitea/workflows/rh9-native.yml @@ -0,0 +1,31 @@ +name: Build on RHEL9 + +on: + push: + workflow_dispatch: + +permissions: + contents: read + +jobs: + buildh: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel9-developer-gitea-actions + steps: + - uses: actions/checkout@v4 + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 959ab70..24050a3 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -5,7 +5,6 @@ on: push: - permissions: contents: read pages: write @@ -16,12 +15,11 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] + platform: [ubuntu-latest, ] python-version: ["3.12",] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda defaults: run: shell: "bash -l {0}" @@ -30,13 +28,13 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | diff --git a/etc/dev-env.yml b/etc/dev-env.yml new file mode 100644 index 0000000..25038ee --- /dev/null +++ b/etc/dev-env.yml @@ -0,0 +1,15 @@ +name: dev-environment +channels: + - conda-forge +dependencies: + - anaconda-client + - doxygen + - sphinx=7.1.2 + - breathe + - pybind11 + - sphinx_rtd_theme + - furo + - nlohmann_json + - zeromq + - fmt + - numpy From 248d25486fc0223498742215dbe4bd3d6736512c Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 3 Apr 2025 16:38:12 +0200 Subject: [PATCH 072/120] refactored python files --- include/aare/CalculateEta.hpp | 13 ++++--- include/aare/Cluster.hpp | 4 +- include/aare/ClusterFinder.hpp | 11 ++++-- include/aare/ClusterFinderMT.hpp | 10 ++--- include/aare/GainMap.hpp | 2 +- python/src/cluster.hpp | 53 +++++++++++++++----------- python/src/cluster_file.hpp | 22 ++++++----- python/src/interpolation.hpp | 64 +++++++++++++++++++------------- python/src/module.cpp | 49 +++++++++++++++++++++--- 9 files changed, 151 insertions(+), 77 deletions(-) diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 86871c9..0aab540 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -35,7 +35,8 @@ template struct Eta2 { /** * @brief Calculate the eta2 values for all clusters in a Clsutervector */ -template >> +template >> NDArray calculate_eta2(const ClusterVector &clusters) { NDArray eta2({static_cast(clusters.size()), 2}); @@ -70,16 +71,18 @@ calculate_eta2(const Cluster &cl) { cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0) eta.x = static_cast( cl.data[index_bottom_left_max_2x2_subcluster + 1]) / - (cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + 1]); + static_cast( + (cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + 1])); if ((cl.data[index_bottom_left_max_2x2_subcluster] + cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) != 0) eta.y = static_cast( cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) / - (cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]); + static_cast( + (cl.data[index_bottom_left_max_2x2_subcluster] + + cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX])); eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no // underyling enum class diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index ca2e01f..a47edf0 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -100,8 +100,8 @@ struct is_cluster> : std::true_type {}; // Cluster template constexpr bool is_cluster_v = is_cluster::value; -template -// typename = std::enable_if_t>> +template >> struct extract_template_arguments; // Forward declaration // helper struct to extract template argument diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 120d39d..8c3540a 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -139,7 +139,8 @@ class ClusterFinder { iy + ir >= 0 && iy + ir < frame.shape(0)) { CT tmp = static_cast(frame(iy + ir, ix + ic)) - - m_pedestal.mean(iy + ir, ix + ic); + static_cast( + m_pedestal.mean(iy + ir, ix + ic)); cluster_data[i] = tmp; // Watch for out of bounds access i++; @@ -147,9 +148,13 @@ class ClusterFinder { } } + ClusterType new_cluster{}; + new_cluster.x = ix; + new_cluster.y = iy; + std::copy(cluster_data.begin(), cluster_data.end(), + new_cluster.data); // Add the cluster to the output ClusterVector - m_clusters.push_back( - ClusterType{ix, iy, cluster_data.data()}); + m_clusters.push_back(new_cluster); } } } diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index 62046b7..75b6497 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -117,14 +117,14 @@ class ClusterFinderMT { * expected number of clusters in a frame per frame. * @param n_threads number of threads to use */ - ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size, - PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000, - size_t n_threads = 3) + ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 2000, size_t n_threads = 3) : m_n_threads(n_threads) { for (size_t i = 0; i < n_threads; i++) { m_cluster_finders.push_back( - std::make_unique>( - image_size, cluster_size, nSigma, capacity)); + std::make_unique< + ClusterFinder>( + image_size, nSigma, capacity)); } for (size_t i = 0; i < n_threads; i++) { m_input_queues.emplace_back(std::make_unique(200)); diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp index a60c131..41acb33 100644 --- a/include/aare/GainMap.hpp +++ b/include/aare/GainMap.hpp @@ -41,7 +41,7 @@ class GainMap { for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) { size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x; size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y; - cl.data[j] = static_cast(cl.data[j] * m_gain_map(y, x)); + cl.data[j] = cl.data[j] * static_cast(m_gain_map(y, x)); } } else { memset(cl.data, 0, diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 5657288..fb3d1da 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -21,9 +21,8 @@ using namespace aare; template void define_cluster_vector(py::module &m, const std::string &typestr) { - using T = typename extract_template_arguments::value_type; - auto class_name = fmt::format("ClusterVector_{}", typestr); + py::class_>(m, class_name.c_str(), py::buffer_protocol()) .def(py::init(), py::arg("cluster_size_x") = 3, @@ -41,6 +40,7 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { self.fmt_base(), self.cluster_size_x(), self.cluster_size_y(), typestr); }) + /* .def("sum", [](ClusterVector &self) { auto *vec = new std::vector(self.sum()); @@ -51,6 +51,7 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum_2x2()); return return_vector(vec); }) + */ .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) .def_property_readonly("cluster_size_y", @@ -75,13 +76,16 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { } template -void define_cluster_finder_mt_bindings(py::module &m) { +void define_cluster_finder_mt_bindings(py::module &m, + const std::string &typestr) { + + auto class_name = fmt::format("ClusterFinderMT_{}", typestr); + py::class_>( - m, "ClusterFinderMT") - .def(py::init, Shape<2>, pd_type, size_t, size_t>(), - py::arg("image_size"), py::arg("cluster_size"), - py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048, - py::arg("n_threads") = 3) + m, class_name.c_str()) + .def(py::init, pd_type, size_t, size_t>(), + py::arg("image_size"), py::arg("n_sigma") = 5.0, + py::arg("capacity") = 2048, py::arg("n_threads") = 3) .def("push_pedestal_frame", [](ClusterFinderMT &self, py::array_t frame) { @@ -123,8 +127,12 @@ void define_cluster_finder_mt_bindings(py::module &m) { } template -void define_cluster_collector_bindings(py::module &m) { - py::class_>(m, "ClusterCollector") +void define_cluster_collector_bindings(py::module &m, + const std::string &typestr) { + + auto class_name = fmt::format("ClusterCollector_{}", typestr); + + py::class_>(m, class_name.c_str()) .def(py::init *>()) .def("stop", &ClusterCollector::stop) .def( @@ -138,19 +146,25 @@ void define_cluster_collector_bindings(py::module &m) { } template -void define_cluster_file_sink_bindings(py::module &m) { - py::class_>(m, "ClusterFileSink") +void define_cluster_file_sink_bindings(py::module &m, + const std::string &typestr) { + + auto class_name = fmt::format("ClusterFileSink_{}", typestr); + + py::class_>(m, class_name.c_str()) .def(py::init *, const std::filesystem::path &>()) .def("stop", &ClusterFileSink::stop); } template -void define_cluster_finder_bindings(py::module &m) { - py::class_>(m, - "ClusterFinder") - .def(py::init, Shape<2>, pd_type, size_t>(), - py::arg("image_size"), py::arg("cluster_size"), +void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { + + auto class_name = fmt::format("ClusterFinder_{}", typestr); + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t>(), py::arg("image_size"), py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) .def("push_pedestal_frame", [](ClusterFinder &self, @@ -213,9 +227,6 @@ void define_cluster_finder_bindings(py::module &m) { } return hitmap; }); - define_cluster_vector(m, "i"); - define_cluster_vector(m, "d"); - define_cluster_vector(m, "f"); py::class_(m, "DynamicCluster", py::buffer_protocol()) .def(py::init()) @@ -233,4 +244,4 @@ void define_cluster_finder_bindings(py::module &m) { return ""; }); -} \ No newline at end of file +} diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 576c3bb..151644c 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -1,3 +1,4 @@ +#include "aare/CalculateEta.hpp" #include "aare/ClusterFile.hpp" #include "aare/defs.hpp" @@ -19,10 +20,14 @@ namespace py = pybind11; using namespace ::aare; template -void define_cluster_file_io_bindings(py::module &m) { - PYBIND11_NUMPY_DTYPE(Cluster3x3, x, y, data); +void define_cluster_file_io_bindings(py::module &m, + const std::string &typestr) { + // PYBIND11_NUMPY_DTYPE(Cluster, x, y, + // data); // is this used - maybe use as cluster type - py::class_>(m, "ClusterFile") + auto class_name = fmt::format("ClusterFile_{}", typestr); + + py::class_>(m, class_name.c_str()) .def(py::init(), py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r") @@ -84,12 +89,11 @@ void define_cluster_file_io_bindings(py::module &m) { return v; }); - /* - m.def("calculate_eta2", []( aare::ClusterVector &clusters) { - auto eta2 = new NDArray(calculate_eta2(clusters)); - return return_image_data(eta2); - }); - */ //add in different file + m.def("calculate_eta2", + [](const aare::ClusterVector &clusters) { + auto eta2 = new NDArray(calculate_eta2(clusters)); + return return_image_data(eta2); + }); } #pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp index 47b5203..cc14553 100644 --- a/python/src/interpolation.hpp +++ b/python/src/interpolation.hpp @@ -9,39 +9,53 @@ namespace py = pybind11; +template +void register_interpolate(py::class_ &interpolator) { + std::string name = + fmt::format("interpolate_{}", typeid(ClusterType).name()); + + interpolator.def(name.c_str(), + [](aare::Interpolator &self, + const ClusterVector &clusters) { + auto photons = self.interpolate(clusters); + auto *ptr = new std::vector{photons}; + return return_vector(ptr); + }); +} + void define_interpolation_bindings(py::module &m) { PYBIND11_NUMPY_DTYPE(aare::Photon, x, y, energy); - py::class_(m, "Interpolator") - .def(py::init( - [](py::array_t - etacube, - py::array_t xbins, py::array_t ybins, - py::array_t ebins) { + auto interpolator = + py::class_(m, "Interpolator") + .def(py::init([](py::array_t + etacube, + py::array_t xbins, + py::array_t ybins, + py::array_t ebins) { return Interpolator(make_view_3d(etacube), make_view_1d(xbins), make_view_1d(ybins), make_view_1d(ebins)); })) - .def("get_ietax", - [](Interpolator &self) { - auto *ptr = new NDArray{}; - *ptr = self.get_ietax(); - return return_image_data(ptr); - }) - .def("get_ietay", - [](Interpolator &self) { - auto *ptr = new NDArray{}; - *ptr = self.get_ietay(); - return return_image_data(ptr); - }) + .def("get_ietax", + [](Interpolator &self) { + auto *ptr = new NDArray{}; + *ptr = self.get_ietax(); + return return_image_data(ptr); + }) + .def("get_ietay", [](Interpolator &self) { + auto *ptr = new NDArray{}; + *ptr = self.get_ietay(); + return return_image_data(ptr); + }); - // TODO take care of clustertype template - .def("interpolate", - [](Interpolator &self, const ClusterVector &clusters) { - auto photons = self.interpolate(clusters); - auto *ptr = new std::vector{photons}; - return return_vector(ptr); - }); + register_interpolate>(interpolator); + register_interpolate>(interpolator); + register_interpolate>(interpolator); + register_interpolate>(interpolator); + register_interpolate>(interpolator); + register_interpolate>(interpolator); // TODO! Evaluate without converting to double m.def( diff --git a/python/src/module.cpp b/python/src/module.cpp index ba681be..4df5d77 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -26,11 +26,48 @@ PYBIND11_MODULE(_aare, m) { define_pixel_map_bindings(m); define_pedestal_bindings(m, "Pedestal_d"); define_pedestal_bindings(m, "Pedestal_f"); - define_cluster_finder_bindings(m); - define_cluster_finder_mt_bindings(m); - define_cluster_file_io_bindings(m); - define_cluster_collector_bindings(m); - define_cluster_file_sink_bindings(m); define_fit_bindings(m); define_interpolation_bindings(m); -} \ No newline at end of file + + define_cluster_file_io_bindings>(m, "Cluster3x3i"); + define_cluster_file_io_bindings>(m, "Cluster3x3d"); + define_cluster_file_io_bindings>(m, "Cluster3x3f"); + define_cluster_file_io_bindings>(m, "Cluster2x2i"); + define_cluster_file_io_bindings>(m, "Cluster2x2f"); + define_cluster_file_io_bindings>(m, "Cluster2x2d"); + + define_cluster_vector>(m, "Cluster3x3i"); + define_cluster_vector>(m, "Cluster3x3d"); + define_cluster_vector>(m, "Cluster3x3f"); + define_cluster_vector>(m, "Cluster2x2i"); + define_cluster_vector>(m, "Cluster2x2d"); + define_cluster_vector>(m, "Cluster2x2f"); + + define_cluster_finder_bindings>(m, "Cluster3x3i"); + define_cluster_finder_bindings>(m, "Cluster3x3d"); + define_cluster_finder_bindings>(m, "Cluster3x3f"); + define_cluster_finder_bindings>(m, "Cluster2x2i"); + define_cluster_finder_bindings>(m, "Cluster2x2d"); + define_cluster_finder_bindings>(m, "Cluster2x2f"); + + define_cluster_finder_mt_bindings>(m, "Cluster3x3i"); + define_cluster_finder_mt_bindings>(m, "Cluster3x3d"); + define_cluster_finder_mt_bindings>(m, "Cluster3x3f"); + define_cluster_finder_mt_bindings>(m, "Cluster2x2i"); + define_cluster_finder_mt_bindings>(m, "Cluster2x2d"); + define_cluster_finder_mt_bindings>(m, "Cluster2x2f"); + + define_cluster_file_sink_bindings>(m, "Cluster3x3i"); + define_cluster_file_sink_bindings>(m, "Cluster3x3d"); + define_cluster_file_sink_bindings>(m, "Cluster3x3f"); + define_cluster_file_sink_bindings>(m, "Cluster2x2i"); + define_cluster_file_sink_bindings>(m, "Cluster2x2d"); + define_cluster_file_sink_bindings>(m, "Cluster2x2f"); + + define_cluster_collector_bindings>(m, "Cluster3x3i"); + define_cluster_collector_bindings>(m, "Cluster3x3f"); + define_cluster_collector_bindings>(m, "Cluster3x3d"); + define_cluster_collector_bindings>(m, "Cluster2x2i"); + define_cluster_collector_bindings>(m, "Cluster2x2f"); + define_cluster_collector_bindings>(m, "Cluster2x2d"); +} From e24ed684166a6c4b927ca65836c72eeec93e40ec Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 3 Apr 2025 16:50:02 +0200 Subject: [PATCH 073/120] fixed include --- include/aare/Cluster.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index a47edf0..e2cfe99 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -9,6 +9,7 @@ #pragma once #include +#include #include #include #include From 885309d97ce6295b8304533f4a29a96fe253866f Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 3 Apr 2025 17:14:28 +0200 Subject: [PATCH 074/120] fix build --- include/aare/Interpolator.hpp | 4 ++-- src/Interpolator.cpp | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp index 5843046..88f127e 100644 --- a/include/aare/Interpolator.hpp +++ b/include/aare/Interpolator.hpp @@ -5,6 +5,7 @@ #include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/NDView.hpp" + namespace aare { struct Photon { @@ -27,8 +28,7 @@ class Interpolator { NDArray get_ietax() { return m_ietax; } NDArray get_ietay() { return m_ietay; } - template >> + template std::vector interpolate(const ClusterVector &clusters); }; diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index e4f8e5c..3680522 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -57,8 +57,7 @@ Interpolator::Interpolator(NDView etacube, NDView xbins, // TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t // to only take Cluster2x2 and Cluster3x3 -template >> +template std::vector Interpolator::interpolate(const ClusterVector &clusters) { std::vector photons; From 9de84a7f87b27346c07b0fc69a0d366ca416dd72 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Fri, 4 Apr 2025 17:19:15 +0200 Subject: [PATCH 075/120] added some python tests --- include/aare/Cluster.hpp | 1 + include/aare/ClusterFile.hpp | 4 +- include/aare/ClusterVector.hpp | 4 ++ include/aare/Interpolator.hpp | 99 +++++++++++++++++++++++++++++++++- python/src/cluster.hpp | 94 ++++++++++++++++++++++---------- python/src/cluster_file.hpp | 8 --- python/src/interpolation.hpp | 18 +++---- python/src/module.cpp | 7 +++ python/src/np_helper.hpp | 15 +++--- python/tests/test_Cluster.py | 64 ++++++++++++++++++++++ src/ClusterVector.test.cpp | 4 ++ src/Interpolator.cpp | 97 --------------------------------- 12 files changed, 264 insertions(+), 151 deletions(-) create mode 100644 python/tests/test_Cluster.py diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index e2cfe99..a2c9b55 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -40,6 +40,7 @@ struct Cluster { constexpr size_t num_2x2_subclusters = (ClusterSizeX - 1) * (ClusterSizeY - 1); + std::array sum_2x2_subcluster; for (size_t i = 0; i < ClusterSizeY - 1; ++i) { for (size_t j = 0; j < ClusterSizeX - 1; ++j) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index bc0ebd1..ff5d338 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -36,7 +36,7 @@ uint32_t number_of_clusters * etc. */ template , bool>> + typename Enable = std::enable_if_t>> class ClusterFile { FILE *fp{}; uint32_t m_num_left{}; /*Number of photons left in frame*/ @@ -70,8 +70,6 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); - ClusterVector read_clusters(size_t n_clusters, ROI roi); - /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 0beae3d..f3b55be 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -133,6 +133,7 @@ class ClusterVector> { * @brief Sum the pixels in each cluster * @return std::vector vector of sums for each cluster */ + /* std::vector sum() { std::vector sums(m_size); const size_t stride = item_size(); @@ -147,12 +148,14 @@ class ClusterVector> { } return sums; } + */ /** * @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in * each cluster * @return std::vector vector of sums for each cluster */ //TODO if underlying container is a vector use std::for_each + /* std::vector sum_2x2() { std::vector sums_2x2(m_size); @@ -161,6 +164,7 @@ class ClusterVector> { } return sums_2x2; } + */ /** * @brief Return the number of clusters in the vector diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp index 88f127e..7e3a1c1 100644 --- a/include/aare/Interpolator.hpp +++ b/include/aare/Interpolator.hpp @@ -1,10 +1,12 @@ #pragma once +#include "aare/CalculateEta.hpp" #include "aare/Cluster.hpp" #include "aare/ClusterFile.hpp" //Cluster_3x3 #include "aare/ClusterVector.hpp" #include "aare/NDArray.hpp" #include "aare/NDView.hpp" +#include "aare/algorithm.hpp" namespace aare { @@ -28,8 +30,103 @@ class Interpolator { NDArray get_ietax() { return m_ietax; } NDArray get_ietay() { return m_ietay; } - template + template >> std::vector interpolate(const ClusterVector &clusters); }; +// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t +// to only take Cluster2x2 and Cluster3x3 +template +std::vector +Interpolator::interpolate(const ClusterVector &clusters) { + std::vector photons; + photons.reserve(clusters.size()); + + if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { + for (size_t i = 0; i < clusters.size(); i++) { + + auto cluster = clusters.at(i); + auto eta = calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + // Finding the index of the last element that is smaller + // should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); + + double dX, dY; + // cBottomLeft = 0, + // cBottomRight = 1, + // cTopLeft = 2, + // cTopRight = 3 + switch (eta.c) { + case cTopLeft: + dX = -1.; + dY = 0; + break; + case cTopRight:; + dX = 0; + dY = 0; + break; + case cBottomLeft: + dX = -1.; + dY = -1.; + break; + case cBottomRight: + dX = 0.; + dY = -1.; + break; + } + photon.x += m_ietax(ix, iy, ie) * 2 + dX; + photon.y += m_ietay(ix, iy, ie) * 2 + dY; + photons.push_back(photon); + } + } else if (clusters.cluster_size_x() == 2 || + clusters.cluster_size_y() == 2) { + for (size_t i = 0; i < clusters.size(); i++) { + auto cluster = clusters.at(i); + auto eta = calculate_eta2(cluster); + + Photon photon; + photon.x = cluster.x; + photon.y = cluster.y; + photon.energy = eta.sum; + + // Now do some actual interpolation. + // Find which energy bin the cluster is in + // auto ie = nearest_index(m_energy_bins, photon.energy)-1; + // auto ix = nearest_index(m_etabinsx, eta.x)-1; + // auto iy = nearest_index(m_etabinsy, eta.y)-1; + // Finding the index of the last element that is smaller + // should work fine as long as we have many bins + auto ie = last_smaller(m_energy_bins, photon.energy); + auto ix = last_smaller(m_etabinsx, eta.x); + auto iy = last_smaller(m_etabinsy, eta.y); + + photon.x += m_ietax(ix, iy, ie) * + 2; // eta goes between 0 and 1 but we could move the hit + // anywhere in the 2x2 + photon.y += m_ietay(ix, iy, ie) * 2; + photons.push_back(photon); + } + + } else { + throw std::runtime_error( + "Only 3x3 and 2x2 clusters are supported for interpolation"); + } + + return photons; +} + } // namespace aare \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index fb3d1da..7dcb338 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -18,20 +18,81 @@ using pd_type = double; using namespace aare; +template +void define_cluster(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("Cluster{}", typestr); + + using ClusterType = + Cluster; + py::class_>( + m, class_name.c_str()) + + .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { + py::buffer_info buf_info = data.request(); + Type *ptr = static_cast(buf_info.ptr); + Cluster cluster; + cluster.x = x; + cluster.y = y; + std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, + cluster.data); // Copy array contents + return cluster; + })) + + //.def(py::init<>()) + .def_readwrite("x", &ClusterType::x) + .def_readwrite("y", &ClusterType::y) + .def_property( + "data", + [](ClusterType &c) -> py::array { + return py::array(py::buffer_info( + c.data, sizeof(Type), + py::format_descriptor::format(), // Type + // format + 1, // Number of dimensions + {static_cast(ClusterSizeX * + ClusterSizeY)}, // Shape (flattened) + {sizeof(Type)} // Stride (step size between elements) + )); + }, + [](ClusterType &c, py::array_t arr) { + py::buffer_info buf_info = arr.request(); + Type *ptr = static_cast(buf_info.ptr); + std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, c.data); + }); +} + template void define_cluster_vector(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("ClusterVector_{}", typestr); py::class_>(m, class_name.c_str(), py::buffer_protocol()) - .def(py::init(), py::arg("cluster_size_x") = 3, - py::arg("cluster_size_y") = 3) // TODO change!!! + .def(py::init()) // TODO change!!! + /* + .def("push_back", + [](ClusterVector &self, ClusterType &cl) { + // auto view = make_view_2d(data); + self.push_back(cl); + }) + */ + /* + .def( + "push_back", + [](ClusterVector &self, py::object obj) { + ClusterType &cl = py::cast(obj); + self.push_back(cl); + }, + py::arg("cluster")) + */ + /* .def("push_back", - [](ClusterVector &self, ClusterType &cl) { - // auto view = make_view_2d(data); - self.push_back(cl); + [](ClusterVector &self, const ClusterType &cluster) { + self.push_back(cluster); }) + */ + //.def("push_back", &ClusterVector::push_back) //TODO + //implement push_back .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", @@ -78,7 +139,6 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { template void define_cluster_finder_mt_bindings(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("ClusterFinderMT_{}", typestr); py::class_>( @@ -129,7 +189,6 @@ void define_cluster_finder_mt_bindings(py::module &m, template void define_cluster_collector_bindings(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("ClusterCollector_{}", typestr); py::class_>(m, class_name.c_str()) @@ -148,7 +207,6 @@ void define_cluster_collector_bindings(py::module &m, template void define_cluster_file_sink_bindings(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("ClusterFileSink_{}", typestr); py::class_>(m, class_name.c_str()) @@ -159,7 +217,6 @@ void define_cluster_file_sink_bindings(py::module &m, template void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("ClusterFinder_{}", typestr); py::class_>( @@ -227,21 +284,4 @@ void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { } return hitmap; }); - - py::class_(m, "DynamicCluster", py::buffer_protocol()) - .def(py::init()) - .def("size", &DynamicCluster::size) - .def("begin", &DynamicCluster::begin) - .def("end", &DynamicCluster::end) - .def_readwrite("x", &DynamicCluster::x) - .def_readwrite("y", &DynamicCluster::y) - .def_buffer([](DynamicCluster &c) -> py::buffer_info { - return py::buffer_info(c.data(), c.dt.bytes(), c.dt.format_descr(), - 1, {c.size()}, {c.dt.bytes()}); - }) - - .def("__repr__", [](const DynamicCluster &a) { - return ""; - }); } diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 151644c..b41cab8 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -39,14 +39,6 @@ void define_cluster_file_io_bindings(py::module &m, return v; }, py::return_value_policy::take_ownership) - .def( - "read_clusters", - [](ClusterFile &self, size_t n_clusters, ROI roi) { - auto v = new ClusterVector( - self.read_clusters(n_clusters, roi)); - return v; - }, - py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { auto v = new ClusterVector(self.read_frame()); diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp index cc14553..08ec98d 100644 --- a/python/src/interpolation.hpp +++ b/python/src/interpolation.hpp @@ -10,9 +10,9 @@ namespace py = pybind11; template -void register_interpolate(py::class_ &interpolator) { - std::string name = - fmt::format("interpolate_{}", typeid(ClusterType).name()); +void register_interpolate(py::class_ &interpolator, + const std::string &typestr) { + auto name = fmt::format("interpolate_{}", typestr); interpolator.def(name.c_str(), [](aare::Interpolator &self, @@ -50,12 +50,12 @@ void define_interpolation_bindings(py::module &m) { return return_image_data(ptr); }); - register_interpolate>(interpolator); - register_interpolate>(interpolator); - register_interpolate>(interpolator); - register_interpolate>(interpolator); - register_interpolate>(interpolator); - register_interpolate>(interpolator); + register_interpolate>(interpolator, "Cluster3x3i"); + register_interpolate>(interpolator, "Cluster3x3f"); + register_interpolate>(interpolator, "Cluster3x3d"); + register_interpolate>(interpolator, "Cluster2x2i"); + register_interpolate>(interpolator, "Cluster2x2f"); + register_interpolate>(interpolator, "Cluster2x2d"); // TODO! Evaluate without converting to double m.def( diff --git a/python/src/module.cpp b/python/src/module.cpp index 4df5d77..9d3866e 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -70,4 +70,11 @@ PYBIND11_MODULE(_aare, m) { define_cluster_collector_bindings>(m, "Cluster2x2i"); define_cluster_collector_bindings>(m, "Cluster2x2f"); define_cluster_collector_bindings>(m, "Cluster2x2d"); + + define_cluster(m, "3x3i"); + define_cluster(m, "3x3f"); + define_cluster(m, "3x3d"); + define_cluster(m, "2x2i"); + define_cluster(m, "2x2f"); + define_cluster(m, "2x2d"); } diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 1845196..768efac 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -40,25 +40,28 @@ template py::array return_vector(std::vector *vec) { } // todo rewrite generic -template auto get_shape_3d(const py::array_t& arr) { +template +auto get_shape_3d(const py::array_t &arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; } -template auto make_view_3d(py::array_t& arr) { +template auto make_view_3d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_3d(arr)); } -template auto get_shape_2d(const py::array_t& arr) { +template +auto get_shape_2d(const py::array_t &arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } -template auto get_shape_1d(const py::array_t& arr) { +template +auto get_shape_1d(const py::array_t &arr) { return aare::Shape<1>{arr.shape(0)}; } -template auto make_view_2d(py::array_t& arr) { +template auto make_view_2d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); } -template auto make_view_1d(py::array_t& arr) { +template auto make_view_1d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); } \ No newline at end of file diff --git a/python/tests/test_Cluster.py b/python/tests/test_Cluster.py new file mode 100644 index 0000000..2281e13 --- /dev/null +++ b/python/tests/test_Cluster.py @@ -0,0 +1,64 @@ +import pytest +import numpy as np + +from _aare import ClusterVector_Cluster3x3i, Interpolator, Cluster3x3i, ClusterFinder_Cluster3x3i + +def test_ClusterVector(): + """Test ClusterVector""" + + clustervector = ClusterVector_Cluster3x3i() + assert clustervector.cluster_size_x == 3 + assert clustervector.cluster_size_y == 3 + assert clustervector.item_size() == 4+9*4 + assert clustervector.frame_number == 0 + assert clustervector.capacity == 1024 + assert clustervector.size == 0 + + cluster = Cluster3x3i(0,0,np.ones(9, dtype=np.int32)) + + #clustervector.push_back(cluster) + #assert clustervector.size == 1 + + #push_back - check size + + +def test_Interpolator(): + """Test Interpolator""" + + ebins = np.linspace(0,10, 20, dtype=np.float64) + xbins = np.linspace(0, 5, 30, dtype=np.float64) + ybins = np.linspace(0, 5, 30, dtype=np.float64) + + etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64) + interpolator = Interpolator(etacube, xbins, ybins, ebins) + + assert interpolator.get_ietax().shape == (30,30,20) + assert interpolator.get_ietay().shape == (30,30,20) + clustervector = ClusterVector_Cluster3x3i() + + #TODO clustervector is empty + cluster = Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) + #clustervector.push_back(cluster) + num_clusters = 1; + + assert interpolator.interpolate_Cluster3x3i(clustervector).shape == (num_clusters, 3) + + +#def test_cluster_file(): + +#def test_cluster_finder(): + #"""Test ClusterFinder""" + + #clusterfinder = ClusterFinder_Cluster3x3i([100,100]) + + #clusterfinder.find_clusters() + + #clusters = clusterfinder.steal_clusters() + + #print("cluster size: ", clusters.size()) + + + + + + diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index c6a36d8..096abfa 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -61,11 +61,13 @@ TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") { REQUIRE(cv.capacity() == 4); REQUIRE(cv.size() == 3); + /* auto sums = cv.sum(); REQUIRE(sums.size() == 3); REQUIRE(sums[0] == 12); REQUIRE(sums[1] == 27); REQUIRE(sums[2] == 42); + */ } TEST_CASE("Storing floats", "[.ClusterVector]") { @@ -87,10 +89,12 @@ TEST_CASE("Storing floats", "[.ClusterVector]") { REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 2); + /* auto sums = cv.sum(); REQUIRE(sums.size() == 2); REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6)); REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); + */ } TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") { diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 3680522..4bc2b34 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -1,6 +1,4 @@ #include "aare/Interpolator.hpp" -#include "aare/CalculateEta.hpp" -#include "aare/algorithm.hpp" namespace aare { @@ -55,99 +53,4 @@ Interpolator::Interpolator(NDView etacube, NDView xbins, } } -// TODO: generalize to support any clustertype!!! otherwise add std::enable_if_t -// to only take Cluster2x2 and Cluster3x3 -template -std::vector -Interpolator::interpolate(const ClusterVector &clusters) { - std::vector photons; - photons.reserve(clusters.size()); - - if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { - for (size_t i = 0; i < clusters.size(); i++) { - - auto cluster = clusters.at(i); - auto eta = calculate_eta2(cluster); - - Photon photon; - photon.x = cluster.x; - photon.y = cluster.y; - photon.energy = eta.sum; - - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; - // Finding the index of the last element that is smaller - // should work fine as long as we have many bins - auto ie = last_smaller(m_energy_bins, photon.energy); - auto ix = last_smaller(m_etabinsx, eta.x); - auto iy = last_smaller(m_etabinsy, eta.y); - - // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); - - double dX, dY; - int ex, ey; - // cBottomLeft = 0, - // cBottomRight = 1, - // cTopLeft = 2, - // cTopRight = 3 - switch (eta.c) { - case cTopLeft: - dX = -1.; - dY = 0; - break; - case cTopRight:; - dX = 0; - dY = 0; - break; - case cBottomLeft: - dX = -1.; - dY = -1.; - break; - case cBottomRight: - dX = 0.; - dY = -1.; - break; - } - photon.x += m_ietax(ix, iy, ie) * 2 + dX; - photon.y += m_ietay(ix, iy, ie) * 2 + dY; - photons.push_back(photon); - } - } else if (clusters.cluster_size_x() == 2 || - clusters.cluster_size_y() == 2) { - for (size_t i = 0; i < clusters.size(); i++) { - auto cluster = clusters.at(i); - auto eta = calculate_eta2(cluster); - - Photon photon; - photon.x = cluster.x; - photon.y = cluster.y; - photon.energy = eta.sum; - - // Now do some actual interpolation. - // Find which energy bin the cluster is in - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; - // Finding the index of the last element that is smaller - // should work fine as long as we have many bins - auto ie = last_smaller(m_energy_bins, photon.energy); - auto ix = last_smaller(m_etabinsx, eta.x); - auto iy = last_smaller(m_etabinsy, eta.y); - - photon.x += m_ietax(ix, iy, ie) * - 2; // eta goes between 0 and 1 but we could move the hit - // anywhere in the 2x2 - photon.y += m_ietay(ix, iy, ie) * 2; - photons.push_back(photon); - } - - } else { - throw std::runtime_error( - "Only 3x3 and 2x2 clusters are supported for interpolation"); - } - - return photons; -} - } // namespace aare \ No newline at end of file From a12e43b1767e32453662ba56c9ee1380b9c4f02a Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Mon, 7 Apr 2025 12:27:44 +0200 Subject: [PATCH 076/120] underlying container of ClusterVcetor is now a std::vector --- include/aare/ClusterVector.hpp | 128 +++++++++++++++++++++++++++++++-- python/src/cluster.hpp | 38 +++++----- python/src/np_helper.hpp | 21 +++++- src/ClusterVector.test.cpp | 5 +- 4 files changed, 162 insertions(+), 30 deletions(-) diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index f3b55be..13ec882 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -29,6 +29,7 @@ class ClusterVector; // Forward declaration * @tparam CoordType data type of the x and y coordinates of the cluster * (normally int16_t) */ +#if 0 template class ClusterVector> { @@ -37,7 +38,7 @@ class ClusterVector> { size_t m_size{0}; size_t m_capacity; uint64_t m_frame_number{0}; // TODO! Check frame number size and type - /* + /** Format string used in the python bindings to create a numpy array from the buffer = - native byte order @@ -59,7 +60,7 @@ class ClusterVector> { */ ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) : m_capacity(capacity), m_frame_number(frame_number) { - allocate_buffer(capacity); + allocate_buffer(m_capacity); } ~ClusterVector() { delete[] m_data; } @@ -230,7 +231,7 @@ class ClusterVector> { return m_fmt_base; } - /** + /** * @brief Return the frame number of the clusters. 0 is used to indicate * that the clusters come from many frames */ @@ -240,7 +241,7 @@ class ClusterVector> { m_frame_number = frame_number; } - /** + /** * @brief Resize the vector to contain new_size clusters. If new_size is * greater than the current capacity, a new buffer is allocated. If the size * is smaller no memory is freed, size is just updated. @@ -265,5 +266,124 @@ class ClusterVector> { m_capacity = new_capacity; } }; +#endif + +/** + * @brief ClusterVector is a container for clusters of various sizes. It + * uses a contiguous memory buffer to store the clusters. It is templated on + * the data type and the coordinate type of the clusters. + * @note push_back can invalidate pointers to elements in the container + * @warning ClusterVector is currently move only to catch unintended copies, + * but this might change since there are probably use cases where copying is + * needed. + * @tparam T data type of the pixels in the cluster + * @tparam CoordType data type of the x and y coordinates of the cluster + * (normally int16_t) + */ +template +class ClusterVector> { + + std::vector> m_data{}; + uint64_t m_frame_number{0}; // TODO! Check frame number size and type + + public: + using value_type = T; + using ClusterType = Cluster; + + /** + * @brief Construct a new ClusterVector object + * @param capacity initial capacity of the buffer in number of clusters + * @param frame_number frame number of the clusters. Default is 0, which is + * also used to indicate that the clusters come from many frames + */ + ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) + : m_frame_number(frame_number) { + m_data.reserve(capacity); + } + + // Move constructor + ClusterVector(ClusterVector &&other) noexcept + : m_data(other.m_data), m_frame_number(other.m_frame_number) { + other.m_data.clear(); + } + + // Move assignment operator + ClusterVector &operator=(ClusterVector &&other) noexcept { + if (this != &other) { + m_data = other.m_data; + m_frame_number = other.m_frame_number; + other.m_data.clear(); + other.m_frame_number = 0; + } + return *this; + } + + /** + * @brief Reserve space for at least capacity clusters + * @param capacity number of clusters to reserve space for + * @note If capacity is less than the current capacity, the function does + * nothing. + */ + void reserve(size_t capacity) { m_data.reserve(capacity); } + + void resize(size_t size) { m_data.resize(size); } + + void push_back(const ClusterType &cluster) { m_data.push_back(cluster); } + + ClusterVector &operator+=(const ClusterVector &other) { + m_data.insert(m_data.end(), other.begin(), other.end()); + + return *this; + } + + /** + * @brief Return the number of clusters in the vector + */ + size_t size() const { return m_data.size(); } + + uint8_t cluster_size_x() const { return ClusterSizeX; } + + uint8_t cluster_size_y() const { return ClusterSizeY; } + + /** + * @brief Return the capacity of the buffer in number of clusters. This is + * the number of clusters that can be stored in the current buffer without + * reallocation. + */ + size_t capacity() const { return m_data.capacity(); } + + const auto begin() const { return m_data.begin(); } + + const auto end() const { return m_data.end(); } + + /** + * @brief Return the size in bytes of a single cluster + */ + size_t item_size() const { + return 2 * sizeof(CoordType) + ClusterSizeX * ClusterSizeY * sizeof(T); + } + + ClusterType *data() { return m_data.data(); } + ClusterType const *data() const { return m_data.data(); } + + /** + * @brief Return a reference to the i-th cluster casted to type V + * @tparam V type of the cluster + */ + ClusterType &at(size_t i) { return m_data[i]; } + + const ClusterType &at(size_t i) const { return m_data[i]; } + + /** + * @brief Return the frame number of the clusters. 0 is used to indicate + * that the clusters come from many frames + */ + uint64_t frame_number() const { return m_frame_number; } + + void set_frame_number(uint64_t frame_number) { + m_frame_number = frame_number; + } +}; } // namespace aare \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 7dcb338..b414ae1 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -58,7 +58,8 @@ void define_cluster(py::module &m, const std::string &typestr) { [](ClusterType &c, py::array_t arr) { py::buffer_info buf_info = arr.request(); Type *ptr = static_cast(buf_info.ptr); - std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, c.data); + std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, + c.data); // TODO dont iterate over centers!!! }); } @@ -68,14 +69,15 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { py::class_>(m, class_name.c_str(), py::buffer_protocol()) + .def(py::init()) // TODO change!!! - /* - .def("push_back", - [](ClusterVector &self, ClusterType &cl) { - // auto view = make_view_2d(data); - self.push_back(cl); - }) - */ + /* + .def("push_back", + [](ClusterVector &self, ClusterType &cl) { + // auto view = make_view_2d(data); + self.push_back(cl); + }) + */ /* .def( "push_back", @@ -92,15 +94,11 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { }) */ //.def("push_back", &ClusterVector::push_back) //TODO - //implement push_back + // implement push_back .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", - [typestr](ClusterVector &self) { - return fmt::format( - self.fmt_base(), self.cluster_size_x(), - self.cluster_size_y(), typestr); - }) + [typestr]() { return fmt_format; }) /* .def("sum", [](ClusterVector &self) { @@ -124,13 +122,11 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { .def_buffer( [typestr](ClusterVector &self) -> py::buffer_info { return py::buffer_info( - self.data(), /* Pointer to buffer */ - self.item_size(), /* Size of one scalar */ - fmt::format(self.fmt_base(), self.cluster_size_x(), - self.cluster_size_y(), - typestr), /* Format descriptor */ - 1, /* Number of dimensions */ - {self.size()}, /* Buffer dimensions */ + self.data(), /* Pointer to buffer */ + self.item_size(), /* Size of one scalar */ + fmt_format, /* Format descriptor */ + 1, /* Number of dimensions */ + {self.size()}, /* Buffer dimensions */ {self.item_size()} /* Strides (in bytes) for each index */ ); }); diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 768efac..98be52f 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -10,6 +10,7 @@ #include "aare/NDView.hpp" namespace py = pybind11; +using namespace aare; // Pass image data back to python as a numpy array template @@ -64,4 +65,22 @@ template auto make_view_2d(py::array_t &arr) { } template auto make_view_1d(py::array_t &arr) { return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); -} \ No newline at end of file +} + +template struct fmt_format_trait; // forward declaration + +template +struct fmt_format_trait> { + + static std::string value() { + return fmt::format("T{{{}:x;{}:y;{}:data;}}", + py::format_descriptor::format(), + py::format_descriptor::format(), + fmt::format("{}{}", ClusterSizeX * ClusterSizeY, + py::format_descriptor::format())); + } +}; + +template +auto fmt_format = fmt_format_trait::value(); \ No newline at end of file diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 096abfa..1880355 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -25,10 +25,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read", REQUIRE(cv.size() == 1); REQUIRE(cv.capacity() == 4); - // Read the cluster back out using copy. TODO! Can we improve the API? - Cluster c2; - std::byte *ptr = cv.element_ptr(0); - std::copy(ptr, ptr + cv.item_size(), reinterpret_cast(&c2)); + auto c2 = cv.at(0); // Check that the data is the same REQUIRE(c1.x == c2.x); From 017960d963073634b7bc1cf8317d31962020e058 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Mon, 7 Apr 2025 13:41:14 +0200 Subject: [PATCH 077/120] added push_back property --- python/src/cluster.hpp | 4 ++-- python/tests/test_Cluster.py | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index b414ae1..fda80a7 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -87,12 +87,12 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { }, py::arg("cluster")) */ - /* + .def("push_back", [](ClusterVector &self, const ClusterType &cluster) { self.push_back(cluster); }) - */ + //.def("push_back", &ClusterVector::push_back) //TODO // implement push_back .def_property_readonly("size", &ClusterVector::size) diff --git a/python/tests/test_Cluster.py b/python/tests/test_Cluster.py index 2281e13..bd2c482 100644 --- a/python/tests/test_Cluster.py +++ b/python/tests/test_Cluster.py @@ -16,12 +16,13 @@ def test_ClusterVector(): cluster = Cluster3x3i(0,0,np.ones(9, dtype=np.int32)) - #clustervector.push_back(cluster) - #assert clustervector.size == 1 + clustervector.push_back(cluster) + assert clustervector.size == 1 #push_back - check size + def test_Interpolator(): """Test Interpolator""" @@ -36,12 +37,11 @@ def test_Interpolator(): assert interpolator.get_ietay().shape == (30,30,20) clustervector = ClusterVector_Cluster3x3i() - #TODO clustervector is empty cluster = Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) #clustervector.push_back(cluster) - num_clusters = 1; + #num_clusters = 1; - assert interpolator.interpolate_Cluster3x3i(clustervector).shape == (num_clusters, 3) + #assert interpolator.interpolate_Cluster3x3i(clustervector).shape == (num_clusters, 3) #def test_cluster_file(): From 10e4e10431884af9a9f9abdd9084fcebba54247f Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Mon, 7 Apr 2025 15:33:37 +0200 Subject: [PATCH 078/120] function signature for push back --- python/aare/__init__.py | 8 ++++---- python/src/cluster.hpp | 5 ++++- python/src/module.cpp | 12 ++++++------ 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 058d7cf..c9076cf 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -2,14 +2,14 @@ from . import _aare -from ._aare import File, RawMasterFile, RawSubFile -from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder +# from ._aare import File, RawMasterFile, RawSubFile +# from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType -from ._aare import ClusterFile +from ._aare import ClusterFile_Cluster3x3i as ClusterFile from ._aare import hitmap from ._aare import ROI -from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i +# from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i from ._aare import fit_gaus, fit_pol1 from ._aare import Interpolator diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index fda80a7..30b80f0 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -63,8 +63,11 @@ void define_cluster(py::module &m, const std::string &typestr) { }); } -template +template void define_cluster_vector(py::module &m, const std::string &typestr) { + using ClusterType = + Cluster; auto class_name = fmt::format("ClusterVector_{}", typestr); py::class_>(m, class_name.c_str(), diff --git a/python/src/module.cpp b/python/src/module.cpp index 9d3866e..38d3681 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -36,12 +36,12 @@ PYBIND11_MODULE(_aare, m) { define_cluster_file_io_bindings>(m, "Cluster2x2f"); define_cluster_file_io_bindings>(m, "Cluster2x2d"); - define_cluster_vector>(m, "Cluster3x3i"); - define_cluster_vector>(m, "Cluster3x3d"); - define_cluster_vector>(m, "Cluster3x3f"); - define_cluster_vector>(m, "Cluster2x2i"); - define_cluster_vector>(m, "Cluster2x2d"); - define_cluster_vector>(m, "Cluster2x2f"); + define_cluster_vector(m, "Cluster3x3i"); + define_cluster_vector(m, "Cluster3x3d"); + define_cluster_vector(m, "Cluster3x3f"); + define_cluster_vector(m, "Cluster2x2i"); + define_cluster_vector(m, "Cluster2x2d"); + define_cluster_vector(m, "Cluster2x2f"); define_cluster_finder_bindings>(m, "Cluster3x3i"); define_cluster_finder_bindings>(m, "Cluster3x3d"); From f16273a566a6cfa0a0ce906f0a0a8462a615ff9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 8 Apr 2025 15:31:04 +0200 Subject: [PATCH 079/120] Adding support for Jungfrau .dat files (#152) closes #150 **Not addressed in this PR:** - pixels_per_frame, bytes_per_frame and tell should be made cost in FileInterface --- CMakeLists.txt | 5 + docs/src/JungfrauDataFile.rst | 25 +++ docs/src/Tests.rst | 47 +++++ docs/src/algorithm.rst | 5 + docs/src/index.rst | 12 +- docs/src/pyJungfrauDataFile.rst | 10 + include/aare/FilePtr.hpp | 30 +++ include/aare/JungfrauDataFile.hpp | 112 +++++++++++ include/aare/algorithm.hpp | 62 +++++- pyproject.toml | 7 +- python/aare/__init__.py | 2 +- python/src/jungfrau_data_file.hpp | 116 ++++++++++++ python/src/module.cpp | 3 + python/tests/conftest.py | 29 +++ python/tests/test_jungfrau_dat_files.py | 92 +++++++++ src/ClusterFile.test.cpp | 12 +- src/File.cpp | 3 + src/FilePtr.cpp | 44 +++++ src/JungfrauDataFile.cpp | 242 ++++++++++++++++++++++++ src/JungfrauDataFile.test.cpp | 94 +++++++++ src/algorithm.test.cpp | 90 ++++++++- 21 files changed, 1025 insertions(+), 17 deletions(-) create mode 100644 docs/src/JungfrauDataFile.rst create mode 100644 docs/src/Tests.rst create mode 100644 docs/src/algorithm.rst create mode 100644 docs/src/pyJungfrauDataFile.rst create mode 100644 include/aare/FilePtr.hpp create mode 100644 include/aare/JungfrauDataFile.hpp create mode 100644 python/src/jungfrau_data_file.hpp create mode 100644 python/tests/conftest.py create mode 100644 python/tests/test_jungfrau_dat_files.py create mode 100644 src/FilePtr.cpp create mode 100644 src/JungfrauDataFile.cpp create mode 100644 src/JungfrauDataFile.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 804b2f6..6db9314 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -342,8 +342,10 @@ set(PUBLICHEADERS include/aare/File.hpp include/aare/Fit.hpp include/aare/FileInterface.hpp + include/aare/FilePtr.hpp include/aare/Frame.hpp include/aare/geo_helpers.hpp + include/aare/JungfrauDataFile.hpp include/aare/NDArray.hpp include/aare/NDView.hpp include/aare/NumpyFile.hpp @@ -367,8 +369,10 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp @@ -423,6 +427,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp diff --git a/docs/src/JungfrauDataFile.rst b/docs/src/JungfrauDataFile.rst new file mode 100644 index 0000000..78d473f --- /dev/null +++ b/docs/src/JungfrauDataFile.rst @@ -0,0 +1,25 @@ +JungfrauDataFile +================== + +JungfrauDataFile is a class to read the .dat files that are produced by Aldo's receiver. +It is mostly used for calibration. + +The structure of the file is: + +* JungfrauDataHeader +* Binary data (256x256, 256x1024 or 512x1024) +* JungfrauDataHeader +* ... + +There is no metadata indicating number of frames or the size of the image, but this +will be infered by this reader. + +.. doxygenstruct:: aare::JungfrauDataHeader + :members: + :undoc-members: + :private-members: + +.. doxygenclass:: aare::JungfrauDataFile + :members: + :undoc-members: + :private-members: \ No newline at end of file diff --git a/docs/src/Tests.rst b/docs/src/Tests.rst new file mode 100644 index 0000000..da98001 --- /dev/null +++ b/docs/src/Tests.rst @@ -0,0 +1,47 @@ +**************** +Tests +**************** + +We test the code both from the C++ and Python API. By default only tests that does not require image data is run. + +C++ +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + mkdir build + cd build + cmake .. -DAARE_TESTS=ON + make -j 4 + + export AARE_TEST_DATA=/path/to/test/data + ./run_test [.files] #or using ctest, [.files] is the option to include tests needing data + + + +Python +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + #From the root dir of the library + python -m pytest python/tests --files # passing --files will run the tests needing data + + + +Getting the test data +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. attention :: + + The tests needing the test data are not run by default. To make the data available, you need to set the environment variable + AARE_TEST_DATA to the path of the test data directory. Then pass either [.files] for the C++ tests or --files for Python + +The image files needed for the test are large and are not included in the repository. They are stored +using GIT LFS in a separate repository. To get the test data, you need to clone the repository. +To do this, you need to have GIT LFS installed. You can find instructions on how to install it here: https://git-lfs.github.com/ +Once you have GIT LFS installed, you can clone the repository like any normal repo using: + +.. code-block:: bash + + git clone https://gitea.psi.ch/detectors/aare-test-data.git diff --git a/docs/src/algorithm.rst b/docs/src/algorithm.rst new file mode 100644 index 0000000..9b11857 --- /dev/null +++ b/docs/src/algorithm.rst @@ -0,0 +1,5 @@ +algorithm +============= + +.. doxygenfile:: algorithm.hpp + diff --git a/docs/src/index.rst b/docs/src/index.rst index 905caea..af5e99a 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -20,9 +20,6 @@ AARE Requirements Consume - - - .. toctree:: :caption: Python API :maxdepth: 1 @@ -31,6 +28,7 @@ AARE pyCtbRawFile pyClusterFile pyClusterVector + pyJungfrauDataFile pyRawFile pyRawMasterFile pyVarClusterFinder @@ -42,6 +40,7 @@ AARE :caption: C++ API :maxdepth: 1 + algorithm NDArray NDView Frame @@ -51,6 +50,7 @@ AARE ClusterFinderMT ClusterFile ClusterVector + JungfrauDataFile Pedestal RawFile RawSubFile @@ -59,4 +59,8 @@ AARE - +.. toctree:: + :caption: Developer + :maxdepth: 3 + + Tests \ No newline at end of file diff --git a/docs/src/pyJungfrauDataFile.rst b/docs/src/pyJungfrauDataFile.rst new file mode 100644 index 0000000..2173adf --- /dev/null +++ b/docs/src/pyJungfrauDataFile.rst @@ -0,0 +1,10 @@ +JungfrauDataFile +=================== + +.. py:currentmodule:: aare + +.. autoclass:: JungfrauDataFile + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/include/aare/FilePtr.hpp b/include/aare/FilePtr.hpp new file mode 100644 index 0000000..4c88ecb --- /dev/null +++ b/include/aare/FilePtr.hpp @@ -0,0 +1,30 @@ +#pragma once +#include +#include + +namespace aare { + +/** + * \brief RAII wrapper for FILE pointer + */ +class FilePtr { + FILE *fp_{nullptr}; + + public: + FilePtr() = default; + FilePtr(const std::filesystem::path& fname, const std::string& mode); + FilePtr(const FilePtr &) = delete; // we don't want a copy + FilePtr &operator=(const FilePtr &) = delete; // since we handle a resource + FilePtr(FilePtr &&other); + FilePtr &operator=(FilePtr &&other); + FILE *get(); + int64_t tell(); + void seek(int64_t offset, int whence = SEEK_SET) { + if (fseek(fp_, offset, whence) != 0) + throw std::runtime_error("Error seeking in file"); + } + std::string error_msg(); + ~FilePtr(); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/JungfrauDataFile.hpp b/include/aare/JungfrauDataFile.hpp new file mode 100644 index 0000000..bba5403 --- /dev/null +++ b/include/aare/JungfrauDataFile.hpp @@ -0,0 +1,112 @@ +#pragma once +#include +#include +#include + +#include "aare/FilePtr.hpp" +#include "aare/defs.hpp" +#include "aare/NDArray.hpp" +#include "aare/FileInterface.hpp" +namespace aare { + + +struct JungfrauDataHeader{ + uint64_t framenum; + uint64_t bunchid; +}; + +class JungfrauDataFile : public FileInterface { + + size_t m_rows{}; //!< number of rows in the image, from find_frame_size(); + size_t m_cols{}; //!< number of columns in the image, from find_frame_size(); + size_t m_bytes_per_frame{}; //!< number of bytes per frame excluding header + size_t m_total_frames{}; //!< total number of frames in the series of files + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + std::filesystem::path m_path; //!< path to the files + std::string m_base_name; //!< base name used for formatting file names + + FilePtr m_fp; //!< RAII wrapper for a FILE* + + + using pixel_type = uint16_t; + static constexpr size_t header_size = sizeof(JungfrauDataHeader); + static constexpr size_t n_digits_in_file_index = 6; //!< to format file names + + public: + JungfrauDataFile(const std::filesystem::path &fname); + + std::string base_name() const; //!< get the base name of the file (without path and extension) + size_t bytes_per_frame() override; + size_t pixels_per_frame() override; + size_t bytes_per_pixel() const; + size_t bitdepth() const override; + void seek(size_t frame_index) override; //!< seek to the given frame index (note not byte offset) + size_t tell() override; //!< get the frame index of the file pointer + size_t total_frames() const override; + size_t rows() const override; + size_t cols() const override; + size_t n_files() const; //!< get the number of files in the series. + + // Extra functions needed for FileInterface + Frame read_frame() override; + Frame read_frame(size_t frame_number) override; + std::vector read_n(size_t n_frames=0) override; + void read_into(std::byte *image_buf) override; + void read_into(std::byte *image_buf, size_t n_frames) override; + size_t frame_number(size_t frame_index) override; + DetectorType detector_type() const override; + + /** + * @brief Read a single frame from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a multiple frames from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param n_frames number of frames to read + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, size_t n_frames, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a single frame from the file into the given NDArray + * @param image NDArray to read the frame into. + */ + void read_into(NDArray* image, JungfrauDataHeader* header = nullptr); + + /** + * @brief Read a single frame from the file. Allocated a new NDArray for the output data + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + * @return NDArray with the image data + */ + NDArray read_frame(JungfrauDataHeader* header = nullptr); + + JungfrauDataHeader read_header(); + std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); } + + + private: + /** + * @brief Find the size of the frame in the file. (256x256, 256x1024, 512x1024) + * @param fname path to the file + * @throws std::runtime_error if the file is empty or the size cannot be determined + */ + void find_frame_size(const std::filesystem::path &fname); + + + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t frame_index) const; + + + }; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp index 5d6dc57..fc7d51f 100644 --- a/include/aare/algorithm.hpp +++ b/include/aare/algorithm.hpp @@ -7,13 +7,20 @@ namespace aare { /** - * @brief Find the index of the last element smaller than val - * assume a sorted array + * @brief Index of the last element that is smaller than val. + * Requires a sorted array. Uses >= for ordering. If all elements + * are smaller it returns the last element and if all elements are + * larger it returns the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the last element that is smaller than val + * */ template size_t last_smaller(const T* first, const T* last, T val) { for (auto iter = first+1; iter != last; ++iter) { - if (*iter > val) { + if (*iter >= val) { return std::distance(first, iter-1); } } @@ -25,7 +32,49 @@ size_t last_smaller(const NDArray& arr, T val) { return last_smaller(arr.begin(), arr.end(), val); } +template +size_t last_smaller(const std::vector& vec, T val) { + return last_smaller(vec.data(), vec.data()+vec.size(), val); +} +/** + * @brief Index of the first element that is larger than val. + * Requires a sorted array. Uses > for ordering. If all elements + * are larger it returns the first element and if all elements are + * smaller it returns the last element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the first element that is larger than val + */ +template +size_t first_larger(const T* first, const T* last, T val) { + for (auto iter = first; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter); + } + } + return std::distance(first, last-1); +} + +template +size_t first_larger(const NDArray& arr, T val) { + return first_larger(arr.begin(), arr.end(), val); +} + +template +size_t first_larger(const std::vector& vec, T val) { + return first_larger(vec.data(), vec.data()+vec.size(), val); +} + +/** + * @brief Index of the nearest element to val. + * Requires a sorted array. If there is no difference it takes the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the nearest element + */ template size_t nearest_index(const T* first, const T* last, T val) { auto iter = std::min_element(first, last, @@ -50,6 +99,13 @@ size_t nearest_index(const std::array& arr, T val) { return nearest_index(arr.data(), arr.data()+arr.size(), val); } +template +std::vector cumsum(const std::vector& vec) { + std::vector result(vec.size()); + std::partial_sum(vec.begin(), vec.end(), result.begin()); + return result; +} + } // namespace aare \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 60128c9..470d158 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,4 +15,9 @@ cmake.verbose = true [tool.scikit-build.cmake.define] AARE_PYTHON_BINDINGS = "ON" AARE_SYSTEM_LIBRARIES = "ON" -AARE_INSTALL_PYTHONEXT = "ON" \ No newline at end of file +AARE_INSTALL_PYTHONEXT = "ON" + +[tool.pytest.ini_options] +markers = [ + "files: marks tests that need additional data (deselect with '-m \"not files\"')", +] \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 058d7cf..606f958 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -2,7 +2,7 @@ from . import _aare -from ._aare import File, RawMasterFile, RawSubFile +from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile diff --git a/python/src/jungfrau_data_file.hpp b/python/src/jungfrau_data_file.hpp new file mode 100644 index 0000000..942f6a6 --- /dev/null +++ b/python/src/jungfrau_data_file.hpp @@ -0,0 +1,116 @@ + +#include "aare/JungfrauDataFile.hpp" +#include "aare/defs.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +auto read_dat_frame(JungfrauDataFile &self) { + py::array_t header(1); + py::array_t image({ + self.rows(), + self.cols() + }); + + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_dat_frames(JungfrauDataFile &self, size_t n_frames) { + // adjust for actual frames left in the file + n_frames = std::min(n_frames, self.total_frames() - self.tell()); + if (n_frames == 0) { + throw std::runtime_error("No frames left in file"); + } + + py::array_t header(n_frames); + py::array_t image({ + n_frames, self.rows(), + self.cols()}); + + self.read_into(reinterpret_cast(image.mutable_data()), + n_frames, header.mutable_data()); + + return py::make_tuple(header, image); +} + +void define_jungfrau_data_file_io_bindings(py::module &m) { + // Make the JungfrauDataHeader usable from numpy + PYBIND11_NUMPY_DTYPE(JungfrauDataHeader, framenum, bunchid); + + py::class_(m, "JungfrauDataFile") + .def(py::init()) + .def("seek", &JungfrauDataFile::seek, + R"( + Seek to the given frame index. + )") + .def("tell", &JungfrauDataFile::tell, + R"( + Get the current frame index. + )") + .def_property_readonly("rows", &JungfrauDataFile::rows) + .def_property_readonly("cols", &JungfrauDataFile::cols) + .def_property_readonly("base_name", &JungfrauDataFile::base_name) + .def_property_readonly("bytes_per_frame", + &JungfrauDataFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &JungfrauDataFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", + &JungfrauDataFile::bytes_per_pixel) + .def_property_readonly("bitdepth", &JungfrauDataFile::bitdepth) + .def_property_readonly("current_file", &JungfrauDataFile::current_file) + .def_property_readonly("total_frames", &JungfrauDataFile::total_frames) + .def_property_readonly("n_files", &JungfrauDataFile::n_files) + .def("read_frame", &read_dat_frame, + R"( + Read a single frame from the file. + )") + .def("read_n", &read_n_dat_frames, + R"( + Read maximum n_frames frames from the file. + )") + .def( + "read", + [](JungfrauDataFile &self) { + self.seek(0); + auto n_frames = self.total_frames(); + return read_n_dat_frames(self, n_frames); + }, + R"( + Read all frames from the file. Seeks to the beginning before reading. + )") + .def("__enter__", [](JungfrauDataFile &self) { return &self; }) + .def("__exit__", + [](JungfrauDataFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](JungfrauDataFile &self) { return &self; }) + .def("__next__", [](JungfrauDataFile &self) { + try { + return read_dat_frame(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 43f48ba..7a17e78 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -11,6 +11,8 @@ #include "fit.hpp" #include "interpolation.hpp" +#include "jungfrau_data_file.hpp" + //Pybind stuff #include #include @@ -33,5 +35,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_file_sink_bindings(m); define_fit_bindings(m); define_interpolation_bindings(m); + define_jungfrau_data_file_io_bindings(m); } \ No newline at end of file diff --git a/python/tests/conftest.py b/python/tests/conftest.py new file mode 100644 index 0000000..5badf13 --- /dev/null +++ b/python/tests/conftest.py @@ -0,0 +1,29 @@ +import os +from pathlib import Path +import pytest + + + +def pytest_addoption(parser): + parser.addoption( + "--files", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "files: mark test as needing image files to run") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--files"): + return + skip = pytest.mark.skip(reason="need --files option to run") + for item in items: + if "files" in item.keywords: + item.add_marker(skip) + + +@pytest.fixture +def test_data_path(): + return Path(os.environ["AARE_TEST_DATA"]) + diff --git a/python/tests/test_jungfrau_dat_files.py b/python/tests/test_jungfrau_dat_files.py new file mode 100644 index 0000000..5d3fdf8 --- /dev/null +++ b/python/tests/test_jungfrau_dat_files.py @@ -0,0 +1,92 @@ +import pytest +import numpy as np +from aare import JungfrauDataFile + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_frames(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.total_frames == 24 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.total_frames == 53 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.total_frames == 113 + + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_file(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.n_files == 4 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + +@pytest.mark.files +def test_read_module(test_data_path): + """ + Read all frames from the series of .dat files. Compare to canned data in npz format. + """ + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as f: + header, data = f.read() + + #Sanity check + n_frames = 24 + assert header.size == n_frames + assert data.shape == (n_frames, 512, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF500k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + +@pytest.mark.files +def test_read_half_module(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as f: + header, data = f.read() + + n_frames = 53 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF250k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + + +@pytest.mark.files +def test_read_single_chip(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as f: + header, data = f.read() + + n_frames = 113 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 256) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF65k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index a0eed04..a7fc044 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -11,9 +11,9 @@ using aare::ClusterFile; -TEST_CASE("Read one frame from a a cluster file", "[.integration]") { +TEST_CASE("Read one frame from a a cluster file", "[.files]") { //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile f(fpath); @@ -22,9 +22,9 @@ TEST_CASE("Read one frame from a a cluster file", "[.integration]") { REQUIRE(clusters.frame_number() == 135); } -TEST_CASE("Read one frame using ROI", "[.integration]") { +TEST_CASE("Read one frame using ROI", "[.files]") { //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile f(fpath); @@ -50,9 +50,9 @@ TEST_CASE("Read one frame using ROI", "[.integration]") { } -TEST_CASE("Read clusters from single frame file", "[.integration]") { +TEST_CASE("Read clusters from single frame file", "[.files]") { - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); SECTION("Read fewer clusters than available") { diff --git a/src/File.cpp b/src/File.cpp index 3c68eff..eb04893 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -1,4 +1,5 @@ #include "aare/File.hpp" +#include "aare/JungfrauDataFile.hpp" #include "aare/NumpyFile.hpp" #include "aare/RawFile.hpp" @@ -27,6 +28,8 @@ File::File(const std::filesystem::path &fname, const std::string &mode, else if (fname.extension() == ".npy") { // file_impl = new NumpyFile(fname, mode, cfg); file_impl = std::make_unique(fname, mode, cfg); + }else if(fname.extension() == ".dat"){ + file_impl = std::make_unique(fname); } else { throw std::runtime_error("Unsupported file type"); } diff --git a/src/FilePtr.cpp b/src/FilePtr.cpp new file mode 100644 index 0000000..4fed3d7 --- /dev/null +++ b/src/FilePtr.cpp @@ -0,0 +1,44 @@ + +#include "aare/FilePtr.hpp" +#include +#include +#include + +namespace aare { + +FilePtr::FilePtr(const std::filesystem::path& fname, const std::string& mode = "rb") { + fp_ = fopen(fname.c_str(), mode.c_str()); + if (!fp_) + throw std::runtime_error(fmt::format("Could not open: {}", fname.c_str())); +} + +FilePtr::FilePtr(FilePtr &&other) { std::swap(fp_, other.fp_); } + +FilePtr &FilePtr::operator=(FilePtr &&other) { + std::swap(fp_, other.fp_); + return *this; +} + +FILE *FilePtr::get() { return fp_; } + +int64_t FilePtr::tell() { + auto pos = ftell(fp_); + if (pos == -1) + throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg())); + return pos; +} +FilePtr::~FilePtr() { + if (fp_) + fclose(fp_); // check? +} + +std::string FilePtr::error_msg(){ + if (feof(fp_)) { + return "End of file reached"; + } + if (ferror(fp_)) { + return fmt::format("Error reading file: {}", std::strerror(errno)); + } + return ""; +} +} // namespace aare diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp new file mode 100644 index 0000000..6e1ccd6 --- /dev/null +++ b/src/JungfrauDataFile.cpp @@ -0,0 +1,242 @@ +#include "aare/JungfrauDataFile.hpp" +#include "aare/algorithm.hpp" +#include "aare/defs.hpp" + +#include +#include + +namespace aare { + +JungfrauDataFile::JungfrauDataFile(const std::filesystem::path &fname) { + + if (!std::filesystem::exists(fname)) { + throw std::runtime_error(LOCATION + + "File does not exist: " + fname.string()); + } + find_frame_size(fname); + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); +} + + +// FileInterface + +Frame JungfrauDataFile::read_frame(){ + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +Frame JungfrauDataFile::read_frame(size_t frame_number){ + seek(frame_number); + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +std::vector JungfrauDataFile::read_n(size_t n_frames) { + std::vector frames; + throw std::runtime_error(LOCATION + + "Not implemented yet"); + return frames; +} + +void JungfrauDataFile::read_into(std::byte *image_buf) { + read_into(image_buf, nullptr); +} +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames) { + read_into(image_buf, n_frames, nullptr); +} + +size_t JungfrauDataFile::frame_number(size_t frame_index) { + seek(frame_index); + return read_header().framenum; +} + +DetectorType JungfrauDataFile::detector_type() const { return DetectorType::Jungfrau; } + +std::string JungfrauDataFile::base_name() const { return m_base_name; } + +size_t JungfrauDataFile::bytes_per_frame() { return m_bytes_per_frame; } + +size_t JungfrauDataFile::pixels_per_frame() { return m_rows * m_cols; } + +size_t JungfrauDataFile::bytes_per_pixel() const { return sizeof(pixel_type); } + +size_t JungfrauDataFile::bitdepth() const { + return bytes_per_pixel() * bits_per_byte; +} + +void JungfrauDataFile::seek(size_t frame_index) { + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + "Frame index out of range: " + + std::to_string(frame_index)); + } + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + header_size); + m_fp.seek(byte_offset); +}; + +size_t JungfrauDataFile::tell() { return m_current_frame_index; } +size_t JungfrauDataFile::total_frames() const { return m_total_frames; } +size_t JungfrauDataFile::rows() const { return m_rows; } +size_t JungfrauDataFile::cols() const { return m_cols; } + +size_t JungfrauDataFile::n_files() const { return m_last_frame_in_file.size(); } + +void JungfrauDataFile::find_frame_size(const std::filesystem::path &fname) { + + static constexpr size_t module_data_size = + header_size + sizeof(pixel_type) * 512 * 1024; + static constexpr size_t half_data_size = + header_size + sizeof(pixel_type) * 256 * 1024; + static constexpr size_t chip_data_size = + header_size + sizeof(pixel_type) * 256 * 256; + + auto file_size = std::filesystem::file_size(fname); + if (file_size == 0) { + throw std::runtime_error(LOCATION + + "Cannot guess frame size: file is empty"); + } + + if (file_size % module_data_size == 0) { + m_rows = 512; + m_cols = 1024; + m_bytes_per_frame = module_data_size - header_size; + } else if (file_size % half_data_size == 0) { + m_rows = 256; + m_cols = 1024; + m_bytes_per_frame = half_data_size - header_size; + } else if (file_size % chip_data_size == 0) { + m_rows = 256; + m_cols = 256; + m_bytes_per_frame = chip_data_size - header_size; + } else { + throw std::runtime_error(LOCATION + + "Cannot find frame size: file size is not a " + "multiple of any known frame size"); + } +} + +void JungfrauDataFile::parse_fname(const std::filesystem::path &fname) { + m_path = fname.parent_path(); + m_base_name = fname.stem(); + + // find file index, then remove if from the base name + if (auto pos = m_base_name.find_last_of('_'); pos != std::string::npos) { + m_offset = std::stoul(m_base_name.substr(pos + 1)); + m_base_name.erase(pos); + } +} + +void JungfrauDataFile::scan_files() { + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + header_size); + m_last_frame_in_file.push_back(n_frames); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + m_total_frames = m_last_frame_in_file.back(); +} + +void JungfrauDataFile::read_into(std::byte *image_buf, + JungfrauDataHeader *header) { + + // read header if not passed nullptr + if (header) { + if (auto rc = fread(header, sizeof(JungfrauDataHeader), 1, m_fp.get()); + rc != 1) { + throw std::runtime_error( + LOCATION + + "Could not read header from file:" + m_fp.error_msg()); + } + } else { + m_fp.seek(header_size, SEEK_CUR); + } + + // read data + if (auto rc = fread(image_buf, 1, m_bytes_per_frame, m_fp.get()); + rc != m_bytes_per_frame) { + throw std::runtime_error(LOCATION + "Could not read image from file" + + m_fp.error_msg()); + } + + // prepare for next read + // if we are at the end of the file, open the next file + ++m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } +} + +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames, + JungfrauDataHeader *header) { + if (header) { + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, header + i); + }else{ + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, nullptr); + } +} + +void JungfrauDataFile::read_into(NDArray* image, JungfrauDataHeader* header) { + if(!(rows() == image->shape(0) && cols() == image->shape(1))){ + throw std::runtime_error(LOCATION + + "Image shape does not match file size: " + std::to_string(rows()) + "x" + std::to_string(cols())); + } + read_into(reinterpret_cast(image->data()), header); +} + +NDArray JungfrauDataFile::read_frame(JungfrauDataHeader* header) { + Shape<2> shape{rows(), cols()}; + NDArray image(shape); + + read_into(reinterpret_cast(image.data()), + header); + + return image; +} + +JungfrauDataHeader JungfrauDataFile::read_header() { + JungfrauDataHeader header; + if (auto rc = fread(&header, 1, sizeof(header), m_fp.get()); + rc != sizeof(header)) { + throw std::runtime_error(LOCATION + "Could not read header from file" + + m_fp.error_msg()); + } + m_fp.seek(-header_size, SEEK_CUR); + return header; +} + +void JungfrauDataFile::open_file(size_t file_index) { + // fmt::print(stderr, "Opening file: {}\n", + // fpath(file_index+m_offset).string()); + m_fp = FilePtr(fpath(file_index + m_offset), "rb"); + m_current_file_index = file_index; +} + +std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const { + auto fname = fmt::format("{}_{:0{}}.dat", m_base_name, file_index, + n_digits_in_file_index); + return m_path / fname; +} + +} // namespace aare \ No newline at end of file diff --git a/src/JungfrauDataFile.test.cpp b/src/JungfrauDataFile.test.cpp new file mode 100644 index 0000000..626a318 --- /dev/null +++ b/src/JungfrauDataFile.test.cpp @@ -0,0 +1,94 @@ +#include "aare/JungfrauDataFile.hpp" + +#include +#include "test_config.hpp" + +using aare::JungfrauDataFile; +using aare::JungfrauDataHeader; +TEST_CASE("Open a Jungfrau data file", "[.files]") { + //we know we have 4 files with 7, 7, 7, and 3 frames + //firs frame number if 1 and the bunch id is frame_number**2 + //so we can check the header + auto fpath = test_data_path() / "dat" / "AldoJF500k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.bytes_per_frame() == 1048576); + REQUIRE(f.pixels_per_frame() == 524288); + REQUIRE(f.bytes_per_pixel() == 2); + REQUIRE(f.bitdepth() == 16); + REQUIRE(f.base_name() == "AldoJF500k"); + REQUIRE(f.n_files() == 4); + REQUIRE(f.tell() == 0); + REQUIRE(f.total_frames() == 24); + REQUIRE(f.current_file() == fpath); + + //Check that the frame number and buch id is read correctly + for (size_t i = 0; i < 24; ++i) { + JungfrauDataHeader header; + auto image = f.read_frame(&header); + REQUIRE(header.framenum == i + 1); + REQUIRE(header.bunchid == (i + 1) * (i + 1)); + REQUIRE(image.shape(0) == 512); + REQUIRE(image.shape(1) == 1024); + } +} + +TEST_CASE("Seek in a JungfrauDataFile", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //The file should have 113 frames + f.seek(19); + REQUIRE(f.tell() == 19); + auto h = f.read_header(); + REQUIRE(h.framenum == 19+1); + + //Reading again does not change the file pointer + auto h2 = f.read_header(); + REQUIRE(h2.framenum == 19+1); + + f.seek(59); + REQUIRE(f.tell() == 59); + auto h3 = f.read_header(); + REQUIRE(h3.framenum == 59+1); + + JungfrauDataHeader h4; + auto image = f.read_frame(&h4); + REQUIRE(h4.framenum == 59+1); + + //now we should be on the next frame + REQUIRE(f.tell() == 60); + REQUIRE(f.read_header().framenum == 60+1); + + REQUIRE_THROWS(f.seek(86356)); //out of range +} + +TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]"){ + + auto fpath = test_data_path() / "dat" / "AldoJF65k_000003.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //18 files per data file, opening the 3rd file we ignore the first 3 + REQUIRE(f.total_frames() == 113-18*3); + REQUIRE(f.tell() == 0); + + //Frame numbers start at 1 in the first file + REQUIRE(f.read_header().framenum == 18*3+1); + + // moving relative to the third file + f.seek(5); + REQUIRE(f.read_header().framenum == 18*3+1+5); + + // ignoring the first 3 files + REQUIRE(f.n_files() == 4); + + REQUIRE(f.current_file().stem() == "AldoJF65k_000003"); + +} \ No newline at end of file diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index fcfa8d2..e2ae8fa 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -49,6 +49,16 @@ TEST_CASE("nearest index works with std::array", "[algorithm]"){ REQUIRE(aare::nearest_index(arr, -10.0) == 0); } +TEST_CASE("nearest index when there is no different uses the first element", "[algorithm]"){ + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 5) == 0); +} + +TEST_CASE("nearest index when there is no different uses the first element also when all smaller", "[algorithm]"){ + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 10) == 0); +} + TEST_CASE("last smaller", "[algorithm]"){ aare::NDArray arr({5}); @@ -68,6 +78,82 @@ TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ arr[i] = i; } // arr 0, 1, 2, 3, 4 - REQUIRE(aare::last_smaller(arr, 2.0) == 2); + REQUIRE(aare::last_smaller(arr, 2.0) == 1); + +} + +TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 50.) == 4); +} + +TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){ + aare::NDArray arr({5}); + for (size_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -50.) == 0); +} + +TEST_CASE("last smaller with all elements equal returns the first element", "[algorithm]"){ + std::vector vec = {5,5,5,5,5,5,5}; + REQUIRE(aare::last_smaller(vec, 5) == 0); +} + + +TEST_CASE("first_lager with vector", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 2.5) == 3); +} + +TEST_CASE("first_lager with all elements smaller returns last element", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 50.) == 4); +} + +TEST_CASE("first_lager with all elements bigger returns first element", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, -50.) == 0); +} + +TEST_CASE("first_lager with all elements the same as the check returns last", "[algorithm]"){ + std::vector vec = {14, 14, 14, 14, 14}; + REQUIRE(aare::first_larger(vec, 14) == 4); +} + +TEST_CASE("first larger with the same element", "[algorithm]"){ + std::vector vec = {7,8,9,10,11}; + REQUIRE(aare::first_larger(vec, 9) == 3); +} + +TEST_CASE("cumsum works", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == 1); + REQUIRE(result[2] == 3); + REQUIRE(result[3] == 6); + REQUIRE(result[4] == 10); +} +TEST_CASE("cumsum works with empty vector", "[algorithm]"){ + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); +} +TEST_CASE("cumsum works with negative numbers", "[algorithm]"){ + std::vector vec = {0, -1, -2, -3, -4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == -1); + REQUIRE(result[2] == -3); + REQUIRE(result[3] == -6); + REQUIRE(result[4] == -10); +} -} \ No newline at end of file From 894065fe9ccc0dd4b63d44af4dcfa21839135a37 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Wed, 9 Apr 2025 12:19:14 +0200 Subject: [PATCH 080/120] added utility plot --- python/aare/__init__.py | 2 +- python/aare/utils.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 606f958..98e8c72 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -17,7 +17,7 @@ from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters -from .utils import random_pixels, random_pixel, flat_list +from .utils import random_pixels, random_pixel, flat_list, add_colorbar #make functions available in the top level API diff --git a/python/aare/utils.py b/python/aare/utils.py index 4708921..a10f54c 100644 --- a/python/aare/utils.py +++ b/python/aare/utils.py @@ -1,4 +1,6 @@ import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable def random_pixels(n_pixels, xmin=0, xmax=512, ymin=0, ymax=1024): """Return a list of random pixels. @@ -24,4 +26,11 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024): def flat_list(xss): """Flatten a list of lists.""" - return [x for xs in xss for x in xs] \ No newline at end of file + return [x for xs in xss for x in xs] + +def add_colorbar(ax, im, size="5%", pad=0.05): + """Add a colorbar with the same height as the image.""" + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size=size, pad=pad) + plt.colorbar(im, cax=cax) + return ax, im, cax \ No newline at end of file From 8b0eee1e66dd7f6273bb4c7ceb7cd3d67a15f52a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 9 Apr 2025 17:54:55 +0200 Subject: [PATCH 081/120] fixed warnings and removed ambiguous read_frame (#154) Fixed warnings: - unused variable in Interpolator - Narrowing conversions uint64-->int64 Removed an ambiguous function from JungfrauDataFile - NDarry read_frame(header&=nullptr) - Frame read_frame() NDArray and NDView size() is now signed --- include/aare/JungfrauDataFile.hpp | 8 +------- include/aare/NDArray.hpp | 2 +- include/aare/NDView.hpp | 4 ++-- include/aare/VarClusterFinder.hpp | 4 ++-- src/Fit.cpp | 8 ++++---- src/Interpolator.cpp | 9 ++------- src/JungfrauDataFile.cpp | 20 ++++++++------------ src/JungfrauDataFile.test.cpp | 24 ++++++++++++++++++++++-- src/NDArray.test.cpp | 4 ++-- src/algorithm.test.cpp | 12 ++++++------ 10 files changed, 50 insertions(+), 45 deletions(-) diff --git a/include/aare/JungfrauDataFile.hpp b/include/aare/JungfrauDataFile.hpp index bba5403..9b1bc48 100644 --- a/include/aare/JungfrauDataFile.hpp +++ b/include/aare/JungfrauDataFile.hpp @@ -49,6 +49,7 @@ class JungfrauDataFile : public FileInterface { size_t total_frames() const override; size_t rows() const override; size_t cols() const override; + std::array shape() const; size_t n_files() const; //!< get the number of files in the series. // Extra functions needed for FileInterface @@ -81,13 +82,6 @@ class JungfrauDataFile : public FileInterface { */ void read_into(NDArray* image, JungfrauDataHeader* header = nullptr); - /** - * @brief Read a single frame from the file. Allocated a new NDArray for the output data - * @param header pointer to a JungfrauDataHeader or nullptr to skip header) - * @return NDArray with the image data - */ - NDArray read_frame(JungfrauDataHeader* header = nullptr); - JungfrauDataHeader read_header(); std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); } diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 45d3a83..ceb1e0b 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -194,7 +194,7 @@ class NDArray : public ArrayExpr, Ndim> { T *data() { return data_; } std::byte *buffer() { return reinterpret_cast(data_); } - size_t size() const { return size_; } + ssize_t size() const { return static_cast(size_); } size_t total_bytes() const { return size_ * sizeof(T); } std::array shape() const noexcept { return shape_; } int64_t shape(int64_t i) const noexcept { return shape_[i]; } diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index f53f758..55b442b 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -71,7 +71,7 @@ template class NDView : public ArrayExpr(size_); } size_t total_bytes() const { return size_ * sizeof(T); } std::array strides() const noexcept { return strides_; } @@ -102,7 +102,7 @@ template class NDView : public ArrayExpr NDView& operator=(const std::array &arr) { - if(size() != arr.size()) + if(size() != static_cast(arr.size())) throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); std::copy(arr.begin(), arr.end(), begin()); return *this; diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index ea62a9d..161941a 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -226,7 +226,7 @@ template void VarClusterFinder::single_pass(NDView img) { template void VarClusterFinder::first_pass() { - for (size_t i = 0; i < original_.size(); ++i) { + for (ssize_t i = 0; i < original_.size(); ++i) { if (use_noise_map) threshold_ = 5 * noiseMap(i); binary_(i) = (original_(i) > threshold_); @@ -250,7 +250,7 @@ template void VarClusterFinder::first_pass() { template void VarClusterFinder::second_pass() { - for (size_t i = 0; i != labeled_.size(); ++i) { + for (ssize_t i = 0; i != labeled_.size(); ++i) { auto cl = labeled_(i); if (cl != 0) { auto it = child.find(cl); diff --git a/src/Fit.cpp b/src/Fit.cpp index 3001efd..9126109 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -18,7 +18,7 @@ double gaus(const double x, const double *par) { NDArray gaus(NDView x, NDView par) { NDArray y({x.shape(0)}, 0); - for (size_t i = 0; i < x.size(); i++) { + for (ssize_t i = 0; i < x.size(); i++) { y(i) = gaus(x(i), par.data()); } return y; @@ -28,7 +28,7 @@ double pol1(const double x, const double *par) { return par[0] * x + par[1]; } NDArray pol1(NDView x, NDView par) { NDArray y({x.shape()}, 0); - for (size_t i = 0; i < x.size(); i++) { + for (ssize_t i = 0; i < x.size(); i++) { y(i) = pol1(x(i), par.data()); } return y; @@ -153,7 +153,7 @@ void fit_gaus(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; - for (size_t i = 0; i < y.size(); i++) { + for (ssize_t i = 0; i < y.size(); i++) { chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); } } @@ -205,7 +205,7 @@ void fit_pol1(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; - for (size_t i = 0; i < y.size(); i++) { + for (ssize_t i = 0; i < y.size(); i++) { chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 7f82533..7034a83 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -68,19 +68,14 @@ std::vector Interpolator::interpolate(const ClusterVector& clus photon.y = cluster.y; photon.energy = eta.sum; - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller //should work fine as long as we have many bins auto ie = last_smaller(m_energy_bins, photon.energy); auto ix = last_smaller(m_etabinsx, eta.x); auto iy = last_smaller(m_etabinsy, eta.y); - - // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); - double dX, dY; - int ex, ey; + double dX{}, dY{}; // cBottomLeft = 0, // cBottomRight = 1, // cTopLeft = 2, diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp index 6e1ccd6..8f1f904 100644 --- a/src/JungfrauDataFile.cpp +++ b/src/JungfrauDataFile.cpp @@ -37,8 +37,9 @@ Frame JungfrauDataFile::read_frame(size_t frame_number){ std::vector JungfrauDataFile::read_n(size_t n_frames) { std::vector frames; - throw std::runtime_error(LOCATION + - "Not implemented yet"); + for(size_t i = 0; i < n_frames; ++i){ + frames.push_back(read_frame()); + } return frames; } @@ -54,6 +55,10 @@ size_t JungfrauDataFile::frame_number(size_t frame_index) { return read_header().framenum; } +std::array JungfrauDataFile::shape() const { + return {static_cast(rows()), static_cast(cols())}; +} + DetectorType JungfrauDataFile::detector_type() const { return DetectorType::Jungfrau; } std::string JungfrauDataFile::base_name() const { return m_base_name; } @@ -198,22 +203,13 @@ void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames, } void JungfrauDataFile::read_into(NDArray* image, JungfrauDataHeader* header) { - if(!(rows() == image->shape(0) && cols() == image->shape(1))){ + if(image->shape()!=shape()){ throw std::runtime_error(LOCATION + "Image shape does not match file size: " + std::to_string(rows()) + "x" + std::to_string(cols())); } read_into(reinterpret_cast(image->data()), header); } -NDArray JungfrauDataFile::read_frame(JungfrauDataHeader* header) { - Shape<2> shape{rows(), cols()}; - NDArray image(shape); - - read_into(reinterpret_cast(image.data()), - header); - - return image; -} JungfrauDataHeader JungfrauDataFile::read_header() { JungfrauDataHeader header; diff --git a/src/JungfrauDataFile.test.cpp b/src/JungfrauDataFile.test.cpp index 626a318..ce51168 100644 --- a/src/JungfrauDataFile.test.cpp +++ b/src/JungfrauDataFile.test.cpp @@ -28,7 +28,8 @@ TEST_CASE("Open a Jungfrau data file", "[.files]") { //Check that the frame number and buch id is read correctly for (size_t i = 0; i < 24; ++i) { JungfrauDataHeader header; - auto image = f.read_frame(&header); + aare::NDArray image(f.shape()); + f.read_into(&image, &header); REQUIRE(header.framenum == i + 1); REQUIRE(header.bunchid == (i + 1) * (i + 1)); REQUIRE(image.shape(0) == 512); @@ -58,7 +59,8 @@ TEST_CASE("Seek in a JungfrauDataFile", "[.files]"){ REQUIRE(h3.framenum == 59+1); JungfrauDataHeader h4; - auto image = f.read_frame(&h4); + aare::NDArray image(f.shape()); + f.read_into(&image, &h4); REQUIRE(h4.framenum == 59+1); //now we should be on the next frame @@ -91,4 +93,22 @@ TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]"){ REQUIRE(f.current_file().stem() == "AldoJF65k_000003"); +} + +TEST_CASE("Read into throws if size doesn't match", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + aare::NDArray image({39, 85}); + JungfrauDataHeader header; + + REQUIRE_THROWS(f.read_into(&image, &header)); + REQUIRE_THROWS(f.read_into(&image, nullptr)); + REQUIRE_THROWS(f.read_into(&image)); + + REQUIRE(f.tell() == 0); + + } \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index eff3e2c..c37a285 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -183,14 +183,14 @@ TEST_CASE("Size and shape matches") { int64_t h = 75; std::array shape{w, h}; NDArray a{shape}; - REQUIRE(a.size() == static_cast(w * h)); + REQUIRE(a.size() == w * h); REQUIRE(a.shape() == shape); } TEST_CASE("Initial value matches for all elements") { double v = 4.35; NDArray a{{5, 5}, v}; - for (uint32_t i = 0; i < a.size(); ++i) { + for (int i = 0; i < a.size(); ++i) { REQUIRE(a(i) == v); } } diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index e2ae8fa..79541a1 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -6,7 +6,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -19,7 +19,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -62,7 +62,7 @@ TEST_CASE("nearest index when there is no different uses the first element also TEST_CASE("last smaller", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -74,7 +74,7 @@ TEST_CASE("last smaller", "[algorithm]"){ TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -84,7 +84,7 @@ TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -93,7 +93,7 @@ TEST_CASE("last_smaller with all elements smaller returns last element", "[algor TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 From a13affa4d3a3ccb52c16b0259311d4433d7001f9 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 10 Apr 2025 09:13:58 +0200 Subject: [PATCH 082/120] changed template arguments added tests --- python/src/cluster.hpp | 116 +++++++++++++++-------------------- python/src/cluster_file.hpp | 5 ++ python/src/interpolation.hpp | 23 +++---- python/src/module.cpp | 55 +++++++++-------- python/tests/test_Cluster.py | 96 +++++++++++++++++++++++------ 5 files changed, 175 insertions(+), 120 deletions(-) diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 30b80f0..f6d3636 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -23,10 +23,8 @@ template ; py::class_>( - m, class_name.c_str()) + m, class_name.c_str(), py::buffer_protocol()) .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { py::buffer_info buf_info = data.request(); @@ -37,83 +35,57 @@ void define_cluster(py::module &m, const std::string &typestr) { std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, cluster.data); // Copy array contents return cluster; - })) + })); - //.def(py::init<>()) - .def_readwrite("x", &ClusterType::x) - .def_readwrite("y", &ClusterType::y) - .def_property( - "data", - [](ClusterType &c) -> py::array { - return py::array(py::buffer_info( - c.data, sizeof(Type), - py::format_descriptor::format(), // Type - // format - 1, // Number of dimensions - {static_cast(ClusterSizeX * - ClusterSizeY)}, // Shape (flattened) - {sizeof(Type)} // Stride (step size between elements) - )); - }, - [](ClusterType &c, py::array_t arr) { - py::buffer_info buf_info = arr.request(); - Type *ptr = static_cast(buf_info.ptr); - std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, - c.data); // TODO dont iterate over centers!!! - }); + /* + .def_property( + "data", + [](ClusterType &c) -> py::array { + return py::array(py::buffer_info( + c.data, sizeof(Type), + py::format_descriptor::format(), // Type + // format + 1, // Number of dimensions + {static_cast(ClusterSizeX * + ClusterSizeY)}, // Shape (flattened) + {sizeof(Type)} // Stride (step size between elements) + )); + }, + [](ClusterType &c, py::array_t arr) { + py::buffer_info buf_info = arr.request(); + Type *ptr = static_cast(buf_info.ptr); + std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, + c.data); // TODO dont iterate over centers!!! + + }); + */ } template void define_cluster_vector(py::module &m, const std::string &typestr) { using ClusterType = - Cluster; + Cluster; auto class_name = fmt::format("ClusterVector_{}", typestr); - py::class_>(m, class_name.c_str(), - py::buffer_protocol()) + py::class_, void>>( + m, class_name.c_str(), + py::buffer_protocol()) .def(py::init()) // TODO change!!! - /* - .def("push_back", - [](ClusterVector &self, ClusterType &cl) { - // auto view = make_view_2d(data); - self.push_back(cl); - }) - */ - /* - .def( - "push_back", - [](ClusterVector &self, py::object obj) { - ClusterType &cl = py::cast(obj); - self.push_back(cl); - }, - py::arg("cluster")) - */ .def("push_back", [](ClusterVector &self, const ClusterType &cluster) { self.push_back(cluster); }) - //.def("push_back", &ClusterVector::push_back) //TODO // implement push_back .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", [typestr]() { return fmt_format; }) - /* - .def("sum", - [](ClusterVector &self) { - auto *vec = new std::vector(self.sum()); - return return_vector(vec); - }) - .def("sum_2x2", - [](ClusterVector &self) { - auto *vec = new std::vector(self.sum_2x2()); - return return_vector(vec); - }) - */ + .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) .def_property_readonly("cluster_size_y", @@ -135,11 +107,14 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { }); } -template +template void define_cluster_finder_mt_bindings(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterFinderMT_{}", typestr); + using ClusterType = Cluster; + py::class_>( m, class_name.c_str()) .def(py::init, pd_type, size_t, size_t>(), @@ -185,11 +160,14 @@ void define_cluster_finder_mt_bindings(py::module &m, py::arg("thread_index") = 0); } -template +template void define_cluster_collector_bindings(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterCollector_{}", typestr); + using ClusterType = Cluster; + py::class_>(m, class_name.c_str()) .def(py::init *>()) .def("stop", &ClusterCollector::stop) @@ -198,26 +176,32 @@ void define_cluster_collector_bindings(py::module &m, [](ClusterCollector &self) { auto v = new std::vector>( self.steal_clusters()); - return v; + return v; // TODO change!!! }, py::return_value_policy::take_ownership); } -template +template void define_cluster_file_sink_bindings(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterFileSink_{}", typestr); + using ClusterType = Cluster; + py::class_>(m, class_name.c_str()) .def(py::init *, const std::filesystem::path &>()) .def("stop", &ClusterFileSink::stop); } -template +template void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { auto class_name = fmt::format("ClusterFinder_{}", typestr); + using ClusterType = Cluster; + py::class_>( m, class_name.c_str()) .def(py::init, pd_type, size_t>(), py::arg("image_size"), @@ -248,9 +232,9 @@ void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { "steal_clusters", [](ClusterFinder &self, bool realloc_same_capacity) { - auto v = new ClusterVector( - self.steal_clusters(realloc_same_capacity)); - return v; + ClusterVector clusters = + self.steal_clusters(realloc_same_capacity); + return clusters; }, py::arg("realloc_same_capacity") = false) .def( diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index b41cab8..7ece8e6 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -80,7 +80,12 @@ void define_cluster_file_io_bindings(py::module &m, } return v; }); +} +template +void register_calculate_eta(py::module &m) { + using ClusterType = Cluster; m.def("calculate_eta2", [](const aare::ClusterVector &clusters) { auto eta2 = new NDArray(calculate_eta2(clusters)); diff --git a/python/src/interpolation.hpp b/python/src/interpolation.hpp index 08ec98d..e667015 100644 --- a/python/src/interpolation.hpp +++ b/python/src/interpolation.hpp @@ -9,12 +9,13 @@ namespace py = pybind11; -template -void register_interpolate(py::class_ &interpolator, - const std::string &typestr) { - auto name = fmt::format("interpolate_{}", typestr); +template +void register_interpolate(py::class_ &interpolator) { - interpolator.def(name.c_str(), + using ClusterType = Cluster; + + interpolator.def("interpolate", [](aare::Interpolator &self, const ClusterVector &clusters) { auto photons = self.interpolate(clusters); @@ -50,12 +51,12 @@ void define_interpolation_bindings(py::module &m) { return return_image_data(ptr); }); - register_interpolate>(interpolator, "Cluster3x3i"); - register_interpolate>(interpolator, "Cluster3x3f"); - register_interpolate>(interpolator, "Cluster3x3d"); - register_interpolate>(interpolator, "Cluster2x2i"); - register_interpolate>(interpolator, "Cluster2x2f"); - register_interpolate>(interpolator, "Cluster2x2d"); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); + register_interpolate(interpolator); // TODO! Evaluate without converting to double m.def( diff --git a/python/src/module.cpp b/python/src/module.cpp index 38d3681..bfc1bc1 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -43,33 +43,33 @@ PYBIND11_MODULE(_aare, m) { define_cluster_vector(m, "Cluster2x2d"); define_cluster_vector(m, "Cluster2x2f"); - define_cluster_finder_bindings>(m, "Cluster3x3i"); - define_cluster_finder_bindings>(m, "Cluster3x3d"); - define_cluster_finder_bindings>(m, "Cluster3x3f"); - define_cluster_finder_bindings>(m, "Cluster2x2i"); - define_cluster_finder_bindings>(m, "Cluster2x2d"); - define_cluster_finder_bindings>(m, "Cluster2x2f"); + define_cluster_finder_bindings(m, "Cluster3x3i"); + define_cluster_finder_bindings(m, "Cluster3x3d"); + define_cluster_finder_bindings(m, "Cluster3x3f"); + define_cluster_finder_bindings(m, "Cluster2x2i"); + define_cluster_finder_bindings(m, "Cluster2x2d"); + define_cluster_finder_bindings(m, "Cluster2x2f"); - define_cluster_finder_mt_bindings>(m, "Cluster3x3i"); - define_cluster_finder_mt_bindings>(m, "Cluster3x3d"); - define_cluster_finder_mt_bindings>(m, "Cluster3x3f"); - define_cluster_finder_mt_bindings>(m, "Cluster2x2i"); - define_cluster_finder_mt_bindings>(m, "Cluster2x2d"); - define_cluster_finder_mt_bindings>(m, "Cluster2x2f"); + define_cluster_finder_mt_bindings(m, "Cluster3x3i"); + define_cluster_finder_mt_bindings(m, "Cluster3x3d"); + define_cluster_finder_mt_bindings(m, "Cluster3x3f"); + define_cluster_finder_mt_bindings(m, "Cluster2x2i"); + define_cluster_finder_mt_bindings(m, "Cluster2x2d"); + define_cluster_finder_mt_bindings(m, "Cluster2x2f"); - define_cluster_file_sink_bindings>(m, "Cluster3x3i"); - define_cluster_file_sink_bindings>(m, "Cluster3x3d"); - define_cluster_file_sink_bindings>(m, "Cluster3x3f"); - define_cluster_file_sink_bindings>(m, "Cluster2x2i"); - define_cluster_file_sink_bindings>(m, "Cluster2x2d"); - define_cluster_file_sink_bindings>(m, "Cluster2x2f"); + define_cluster_file_sink_bindings(m, "Cluster3x3i"); + define_cluster_file_sink_bindings(m, "Cluster3x3d"); + define_cluster_file_sink_bindings(m, "Cluster3x3f"); + define_cluster_file_sink_bindings(m, "Cluster2x2i"); + define_cluster_file_sink_bindings(m, "Cluster2x2d"); + define_cluster_file_sink_bindings(m, "Cluster2x2f"); - define_cluster_collector_bindings>(m, "Cluster3x3i"); - define_cluster_collector_bindings>(m, "Cluster3x3f"); - define_cluster_collector_bindings>(m, "Cluster3x3d"); - define_cluster_collector_bindings>(m, "Cluster2x2i"); - define_cluster_collector_bindings>(m, "Cluster2x2f"); - define_cluster_collector_bindings>(m, "Cluster2x2d"); + define_cluster_collector_bindings(m, "Cluster3x3i"); + define_cluster_collector_bindings(m, "Cluster3x3f"); + define_cluster_collector_bindings(m, "Cluster3x3d"); + define_cluster_collector_bindings(m, "Cluster2x2i"); + define_cluster_collector_bindings(m, "Cluster2x2f"); + define_cluster_collector_bindings(m, "Cluster2x2d"); define_cluster(m, "3x3i"); define_cluster(m, "3x3f"); @@ -77,4 +77,11 @@ PYBIND11_MODULE(_aare, m) { define_cluster(m, "2x2i"); define_cluster(m, "2x2f"); define_cluster(m, "2x2d"); + + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); + register_calculate_eta(m); } diff --git a/python/tests/test_Cluster.py b/python/tests/test_Cluster.py index bd2c482..3bb4828 100644 --- a/python/tests/test_Cluster.py +++ b/python/tests/test_Cluster.py @@ -1,12 +1,12 @@ import pytest import numpy as np -from _aare import ClusterVector_Cluster3x3i, Interpolator, Cluster3x3i, ClusterFinder_Cluster3x3i +import aare._aare as aare #import ClusterVector_Cluster3x3i, ClusterVector_Cluster2x2i, Interpolator, Cluster3x3i, ClusterFinder_Cluster3x3i, Cluster2x2i, ClusterFile_Cluster3x3i, Cluster3x3f, calculate_eta2 def test_ClusterVector(): """Test ClusterVector""" - clustervector = ClusterVector_Cluster3x3i() + clustervector = aare.ClusterVector_Cluster3x3i() assert clustervector.cluster_size_x == 3 assert clustervector.cluster_size_y == 3 assert clustervector.item_size() == 4+9*4 @@ -14,14 +14,16 @@ def test_ClusterVector(): assert clustervector.capacity == 1024 assert clustervector.size == 0 - cluster = Cluster3x3i(0,0,np.ones(9, dtype=np.int32)) + cluster = aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32)) clustervector.push_back(cluster) assert clustervector.size == 1 - #push_back - check size - + with pytest.raises(TypeError): # Or use the appropriate exception type + clustervector.push_back(aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32))) + with pytest.raises(TypeError): + clustervector.push_back(aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32))) def test_Interpolator(): """Test Interpolator""" @@ -31,31 +33,87 @@ def test_Interpolator(): ybins = np.linspace(0, 5, 30, dtype=np.float64) etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64) - interpolator = Interpolator(etacube, xbins, ybins, ebins) + interpolator = aare.Interpolator(etacube, xbins, ybins, ebins) assert interpolator.get_ietax().shape == (30,30,20) assert interpolator.get_ietay().shape == (30,30,20) - clustervector = ClusterVector_Cluster3x3i() + clustervector = aare.ClusterVector_Cluster3x3i() - cluster = Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) - #clustervector.push_back(cluster) - #num_clusters = 1; + cluster = aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) + clustervector.push_back(cluster) - #assert interpolator.interpolate_Cluster3x3i(clustervector).shape == (num_clusters, 3) + interpolated_photons = interpolator.interpolate(clustervector) + + assert interpolated_photons.size == 1 + + assert interpolated_photons[0]["x"] == -1 + assert interpolated_photons[0]["y"] == -1 + assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0 + + clustervector = aare.ClusterVector_Cluster2x2i() + + cluster = aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32)) + clustervector.push_back(cluster) + + interpolated_photons = interpolator.interpolate(clustervector) + + assert interpolated_photons.size == 1 + + assert interpolated_photons[0]["x"] == 0 + assert interpolated_photons[0]["y"] == 0 + assert interpolated_photons[0]["energy"] == 4 + +@pytest.mark.files +def test_cluster_file(): + """Test ClusterFile""" + cluster_file = aare.ClusterFile_Cluster3x3i(test_data_path() / "clust/single_frame_97_clustrers.clust") + clustervector = cluster_file.read_clusters() #conversion does not work + + cluster_file.close() + + ###reading with wrong file + cluster_file = ClusterFile_Cluster2x2i(test_data_path() / "clust/single_frame_97_clustrers.clust") #TODO check behavior! + +def test_calculate_eta(): + """Calculate Eta""" + clusters = aare.ClusterVector_Cluster3x3i() + clusters.push_back(aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))) + clusters.push_back(aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3]))) + + eta2 = aare.calculate_eta2(clusters) + + assert eta2.shape == (2,2) + assert eta2[0,0] == 0.5 + assert eta2[0,1] == 0.5 + assert eta2[1,0] == 0.5 + assert eta2[1,1] == 0.6 #1/5 + +def test_cluster_finder(): + """Test ClusterFinder""" + + clusterfinder = aare.ClusterFinder_Cluster3x3i([100,100]) + + #frame = np.random.rand(100,100) + frame = np.zeros(shape=[100,100]) + + clusterfinder.find_clusters(frame) + + clusters = clusterfinder.steal_clusters(False) #conversion does not work + + assert clusters.size == 0 -#def test_cluster_file(): +def test_cluster_collector(): + """Test ClusterCollector""" -#def test_cluster_finder(): - #"""Test ClusterFinder""" + clusterfinder = aare.ClusterFinderMT_Cluster3x3i([100,100]) #TODO: no idea what the data is in InputQueue not zero - #clusterfinder = ClusterFinder_Cluster3x3i([100,100]) + clustercollector = aare.ClusterCollector_Cluster3x3i(clusterfinder) - #clusterfinder.find_clusters() + cluster_vectors = clustercollector.steal_clusters() - #clusters = clusterfinder.steal_clusters() - - #print("cluster size: ", clusters.size()) + assert len(cluster_vectors) == 1 #single thread execution + assert cluster_vectors[0].size == 0 # From 6e4db45b578c7e87c577783ee639ffc89c2d11d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 10 Apr 2025 10:17:16 +0200 Subject: [PATCH 083/120] Activated RH8 build on PSI gitea (#155) --- .gitea/workflows/rh8-native.yml | 12 +++++++++--- .gitea/workflows/rh9-native.yml | 2 +- CMakeLists.txt | 8 ++++++++ python/CMakeLists.txt | 3 ++- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/.gitea/workflows/rh8-native.yml b/.gitea/workflows/rh8-native.yml index 02d3dc0..1c64161 100644 --- a/.gitea/workflows/rh8-native.yml +++ b/.gitea/workflows/rh8-native.yml @@ -1,18 +1,24 @@ name: Build on RHEL8 on: + push: workflow_dispatch: permissions: contents: read jobs: - buildh: + build: runs-on: "ubuntu-latest" container: image: gitea.psi.ch/images/rhel8-developer-gitea-actions steps: - - uses: actions/checkout@v4 + # workaround until actions/checkout@v4 is available for RH8 + # - uses: actions/checkout@v4 + - name: Clone repository + run: | + echo Cloning ${{ github.ref_name }} + git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} . - name: Install dependencies @@ -22,7 +28,7 @@ jobs: - name: Build library run: | mkdir build && cd build - cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST make -j 2 - name: C++ unit tests diff --git a/.gitea/workflows/rh9-native.yml b/.gitea/workflows/rh9-native.yml index c1f10ac..5027365 100644 --- a/.gitea/workflows/rh9-native.yml +++ b/.gitea/workflows/rh9-native.yml @@ -8,7 +8,7 @@ permissions: contents: read jobs: - buildh: + build: runs-on: "ubuntu-latest" container: image: gitea.psi.ch/images/rhel9-developer-gitea-actions diff --git a/CMakeLists.txt b/CMakeLists.txt index 6db9314..039545e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,6 +11,14 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) +execute_process( + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + OUTPUT_VARIABLE GIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +message(STATUS "Building from git hash: ${GIT_HASH}") + if (${CMAKE_VERSION} VERSION_GREATER "3.24") cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp endif() diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 09de736..75847a7 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,12 +1,13 @@ find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED) +set(PYBIND11_FINDPYTHON ON) # Needed for RH8 # Download or find pybind11 depending on configuration if(AARE_FETCH_PYBIND11) FetchContent_Declare( pybind11 GIT_REPOSITORY https://github.com/pybind/pybind11 - GIT_TAG v2.13.0 + GIT_TAG v2.13.6 ) FetchContent_MakeAvailable(pybind11) else() From 53a90e197e2c4c468fbb6e8f84d678cc144f2a0e Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Thu, 10 Apr 2025 10:41:58 +0200 Subject: [PATCH 084/120] added additional tests --- include/aare/ClusterFile.hpp | 41 ---- python/aare/__init__.py | 2 +- python/src/cluster_file.hpp | 7 +- python/src/module.cpp | 12 +- python/tests/conftest.py | 7 +- python/tests/test_Cluster.py | 17 +- src/ClusterFile.cpp | 396 ----------------------------------- src/Interpolator.cpp | 86 -------- 8 files changed, 29 insertions(+), 539 deletions(-) delete mode 100644 src/ClusterFile.cpp diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 1995a16..ff5d338 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -11,7 +11,6 @@ namespace aare { -<<<<<<< HEAD /* Binary cluster file. Expects data to be layed out as: int32_t frame_number @@ -21,44 +20,6 @@ int32_t frame_number uint32_t number_of_clusters .... */ -======= - -// TODO! Legacy enums, migrate to enum class -typedef enum { - cBottomLeft = 0, - cBottomRight = 1, - cTopLeft = 2, - cTopRight = 3 -} corner; - -typedef enum { - pBottomLeft = 0, - pBottom = 1, - pBottomRight = 2, - pLeft = 3, - pCenter = 4, - pRight = 5, - pTopLeft = 6, - pTop = 7, - pTopRight = 8 -} pixel; - -struct Eta2 { - double x; - double y; - corner c; - int32_t sum; -}; - -struct ClusterAnalysis { - uint32_t c; - int32_t tot; - double etax; - double etay; -}; - - ->>>>>>> developer // TODO: change to support any type of clusters, e.g. header line with // clsuter_size_x, cluster_size_y, @@ -109,8 +70,6 @@ class ClusterFile { */ ClusterVector read_clusters(size_t n_clusters); - ClusterVector read_clusters(size_t n_clusters, ROI roi); - /** * @brief Read a single frame from the file and return the clusters. The * cluster vector will have the frame number set. diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 36aac14..8c51d73 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -3,7 +3,7 @@ from . import _aare from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile -from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder +from ._aare import Pedestal_d, Pedestal_f, ClusterFinder_Cluster3x3i, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile_Cluster3x3i as ClusterFile from ._aare import hitmap diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 7ece8e6..3e7aa48 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -19,11 +19,12 @@ namespace py = pybind11; using namespace ::aare; -template +template void define_cluster_file_io_bindings(py::module &m, const std::string &typestr) { - // PYBIND11_NUMPY_DTYPE(Cluster, x, y, - // data); // is this used - maybe use as cluster type + + using ClusterType = Cluster; auto class_name = fmt::format("ClusterFile_{}", typestr); diff --git a/python/src/module.cpp b/python/src/module.cpp index 78fd283..8d5b5ab 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -32,12 +32,12 @@ PYBIND11_MODULE(_aare, m) { define_interpolation_bindings(m); define_jungfrau_data_file_io_bindings(m); - define_cluster_file_io_bindings>(m, "Cluster3x3i"); - define_cluster_file_io_bindings>(m, "Cluster3x3d"); - define_cluster_file_io_bindings>(m, "Cluster3x3f"); - define_cluster_file_io_bindings>(m, "Cluster2x2i"); - define_cluster_file_io_bindings>(m, "Cluster2x2f"); - define_cluster_file_io_bindings>(m, "Cluster2x2d"); + define_cluster_file_io_bindings(m, "Cluster3x3i"); + define_cluster_file_io_bindings(m, "Cluster3x3d"); + define_cluster_file_io_bindings(m, "Cluster3x3f"); + define_cluster_file_io_bindings(m, "Cluster2x2i"); + define_cluster_file_io_bindings(m, "Cluster2x2f"); + define_cluster_file_io_bindings(m, "Cluster2x2d"); define_cluster_vector(m, "Cluster3x3i"); define_cluster_vector(m, "Cluster3x3d"); diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 5badf13..fbcfeb3 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -25,5 +25,10 @@ def pytest_collection_modifyitems(config, items): @pytest.fixture def test_data_path(): - return Path(os.environ["AARE_TEST_DATA"]) + env_value = os.environ.get("AARE_TEST_DATA") + if not env_value: + raise RuntimeError("Environment variable AARE_TEST_DATA is not set or is empty") + + return Path(env_value) + diff --git a/python/tests/test_Cluster.py b/python/tests/test_Cluster.py index 3bb4828..29d5ad9 100644 --- a/python/tests/test_Cluster.py +++ b/python/tests/test_Cluster.py @@ -1,7 +1,9 @@ import pytest import numpy as np -import aare._aare as aare #import ClusterVector_Cluster3x3i, ClusterVector_Cluster2x2i, Interpolator, Cluster3x3i, ClusterFinder_Cluster3x3i, Cluster2x2i, ClusterFile_Cluster3x3i, Cluster3x3f, calculate_eta2 +import aare._aare as aare +from conftest import test_data_path + def test_ClusterVector(): """Test ClusterVector""" @@ -64,15 +66,19 @@ def test_Interpolator(): assert interpolated_photons[0]["energy"] == 4 @pytest.mark.files -def test_cluster_file(): +def test_cluster_file(test_data_path): """Test ClusterFile""" - cluster_file = aare.ClusterFile_Cluster3x3i(test_data_path() / "clust/single_frame_97_clustrers.clust") - clustervector = cluster_file.read_clusters() #conversion does not work + cluster_file = aare.ClusterFile_Cluster3x3i(test_data_path / "clust/single_frame_97_clustrers.clust") + clustervector = cluster_file.read_clusters(10) #conversion does not work cluster_file.close() + assert clustervector.size == 10 + ###reading with wrong file - cluster_file = ClusterFile_Cluster2x2i(test_data_path() / "clust/single_frame_97_clustrers.clust") #TODO check behavior! + with pytest.raises(TypeError): + cluster_file = aare.ClusterFile_Cluster2x2i(test_data_path / "clust/single_frame_97_clustrers.clust") + cluster_file.close() def test_calculate_eta(): """Calculate Eta""" @@ -103,6 +109,7 @@ def test_cluster_finder(): assert clusters.size == 0 +#TODO dont understand behavior def test_cluster_collector(): """Test ClusterCollector""" diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp deleted file mode 100644 index f77ac92..0000000 --- a/src/ClusterFile.cpp +++ /dev/null @@ -1,396 +0,0 @@ -#include "aare/ClusterFile.hpp" - -#include - -namespace aare { - -ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, - const std::string &mode) - : m_chunk_size(chunk_size), m_mode(mode) { - - if (mode == "r") { - fp = fopen(fname.c_str(), "rb"); - if (!fp) { - throw std::runtime_error("Could not open file for reading: " + - fname.string()); - } - } else if (mode == "w") { - fp = fopen(fname.c_str(), "wb"); - if (!fp) { - throw std::runtime_error("Could not open file for writing: " + - fname.string()); - } - } else if (mode == "a") { - fp = fopen(fname.c_str(), "ab"); - if (!fp) { - throw std::runtime_error("Could not open file for appending: " + - fname.string()); - } - } else { - throw std::runtime_error("Unsupported mode: " + mode); - } -} - -void ClusterFile::set_roi(ROI roi){ - m_roi = roi; -} - -void ClusterFile::set_noise_map(const NDView noise_map){ - m_noise_map = NDArray(noise_map); -} - -void ClusterFile::set_gain_map(const NDView gain_map){ - m_gain_map = NDArray(gain_map); -} - -ClusterFile::~ClusterFile() { close(); } - -void ClusterFile::close() { - if (fp) { - fclose(fp); - fp = nullptr; - } -} - -void ClusterFile::write_frame(const ClusterVector &clusters) { - if (m_mode != "w" && m_mode != "a") { - throw std::runtime_error("File not opened for writing"); - } - if (!(clusters.cluster_size_x() == 3) && - !(clusters.cluster_size_y() == 3)) { - throw std::runtime_error("Only 3x3 clusters are supported"); - } - //First write the frame number - 4 bytes - int32_t frame_number = clusters.frame_number(); - if(fwrite(&frame_number, sizeof(frame_number), 1, fp)!=1){ - throw std::runtime_error(LOCATION + "Could not write frame number"); - } - - //Then write the number of clusters - 4 bytes - uint32_t n_clusters = clusters.size(); - if(fwrite(&n_clusters, sizeof(n_clusters), 1, fp)!=1){ - throw std::runtime_error(LOCATION + "Could not write number of clusters"); - } - - //Now write the clusters in the frame - if(fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp)!=clusters.size()){ - throw std::runtime_error(LOCATION + "Could not write clusters"); - } -} - - -ClusterVector ClusterFile::read_clusters(size_t n_clusters){ - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - if (m_noise_map || m_roi){ - return read_clusters_with_cut(n_clusters); - }else{ - return read_clusters_without_cut(n_clusters); - } -} - -ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - - ClusterVector clusters(3,3, n_clusters); - - int32_t iframe = 0; // frame number needs to be 4 bytes! - size_t nph_read = 0; - uint32_t nn = m_num_left; - uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - - // auto buf = reinterpret_cast(clusters.data()); - auto buf = clusters.data(); - // if there are photons left from previous frame read them first - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - nph_read += fread((buf + nph_read*clusters.item_size()), - clusters.item_size(), nn, fp); - m_num_left = nph - nn; // write back the number of photons left - } - - if (nph_read < n_clusters) { - // keep on reading frames and photons until reaching n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - clusters.set_frame_number(iframe); - // read number of clusters in frame - if (fread(&nph, sizeof(nph), 1, fp)) { - if (nph > (n_clusters - nph_read)) - nn = n_clusters - nph_read; - else - nn = nph; - - nph_read += fread((buf + nph_read*clusters.item_size()), - clusters.item_size(), nn, fp); - m_num_left = nph - nn; - } - if (nph_read >= n_clusters) - break; - } - } - - // Resize the vector to the number of clusters. - // No new allocation, only change bounds. - clusters.resize(nph_read); - if(m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); - return clusters; -} - - -ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { - ClusterVector clusters(3,3); - clusters.reserve(n_clusters); - - // if there are photons left from previous frame read them first - if (m_num_left) { - while(m_num_left && clusters.size() < n_clusters){ - Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); - } - } - } - - // we did not have enough clusters left in the previous frame - // keep on reading frames until reaching n_clusters - if (clusters.size() < n_clusters) { - // sanity check - if (m_num_left) { - throw std::runtime_error(LOCATION + "Entered second loop with clusters left\n"); - } - - int32_t frame_number = 0; // frame number needs to be 4 bytes! - while (fread(&frame_number, sizeof(frame_number), 1, fp)) { - if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { - clusters.set_frame_number(frame_number); //cluster vector will hold the last frame number - while(m_num_left && clusters.size() < n_clusters){ - Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); - } - } - } - - // we have enough clusters, break out of the outer while loop - if (clusters.size() >= n_clusters) - break; - } - - } - if(m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); - - return clusters; -} - -Cluster3x3 ClusterFile::read_one_cluster(){ - Cluster3x3 c; - auto rc = fread(&c, sizeof(c), 1, fp); - if (rc != 1) { - throw std::runtime_error(LOCATION + "Could not read cluster"); - } - --m_num_left; - return c; -} - -ClusterVector ClusterFile::read_frame(){ - if (m_mode != "r") { - throw std::runtime_error(LOCATION + "File not opened for reading"); - } - if (m_noise_map || m_roi){ - return read_frame_with_cut(); - }else{ - return read_frame_without_cut(); - } -} - -ClusterVector ClusterFile::read_frame_without_cut() { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - if (m_num_left) { - throw std::runtime_error( - "There are still photons left in the last frame"); - } - int32_t frame_number; - if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { - throw std::runtime_error(LOCATION + "Could not read frame number"); - } - - int32_t n_clusters; // Saved as 32bit integer in the cluster file - if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { - throw std::runtime_error(LOCATION + "Could not read number of clusters"); - } - - ClusterVector clusters(3, 3, n_clusters); - clusters.set_frame_number(frame_number); - - if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != - static_cast(n_clusters)) { - throw std::runtime_error(LOCATION + "Could not read clusters"); - } - clusters.resize(n_clusters); - if (m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); - return clusters; -} - -ClusterVector ClusterFile::read_frame_with_cut() { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - if (m_num_left) { - throw std::runtime_error( - "There are still photons left in the last frame"); - } - int32_t frame_number; - if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { - throw std::runtime_error("Could not read frame number"); - } - - - if (fread(&m_num_left, sizeof(m_num_left), 1, fp) != 1) { - throw std::runtime_error("Could not read number of clusters"); - } - - ClusterVector clusters(3, 3); - clusters.reserve(m_num_left); - clusters.set_frame_number(frame_number); - while(m_num_left){ - Cluster3x3 c = read_one_cluster(); - if(is_selected(c)){ - clusters.push_back(c.x, c.y, reinterpret_cast(c.data)); - } - } - if (m_gain_map) - clusters.apply_gain_map(m_gain_map->view()); - return clusters; -} - - - -bool ClusterFile::is_selected(Cluster3x3 &cl) { - //Should fail fast - if (m_roi) { - if (!(m_roi->contains(cl.x, cl.y))) { - return false; - } - } - if (m_noise_map){ - int32_t sum_1x1 = cl.data[4]; // central pixel - int32_t sum_2x2 = cl.sum_2x2(); // highest sum of 2x2 subclusters - int32_t sum_3x3 = cl.sum(); // sum of all pixels - - auto noise = (*m_noise_map)(cl.y, cl.x); //TODO! check if this is correct - if (sum_1x1 <= noise || sum_2x2 <= 2 * noise || sum_3x3 <= 3 * noise) { - return false; - } - } - //we passed all checks - return true; -} - -NDArray calculate_eta2(ClusterVector &clusters) { - //TOTO! make work with 2x2 clusters - NDArray eta2({static_cast(clusters.size()), 2}); - - if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; - } - }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); - eta2(i, 0) = e.x; - eta2(i, 1) = e.y; - } - }else{ - throw std::runtime_error("Only 3x3 and 2x2 clusters are supported"); - } - - return eta2; -} - -/** - * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct - * containing etay, etax and the corner of the cluster. -*/ -Eta2 calculate_eta2(Cluster3x3 &cl) { - Eta2 eta{}; - - std::array tot2; - tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; - tot2[1] = cl.data[1] + cl.data[2] + cl.data[4] + cl.data[5]; - tot2[2] = cl.data[3] + cl.data[4] + cl.data[6] + cl.data[7]; - tot2[3] = cl.data[4] + cl.data[5] + cl.data[7] + cl.data[8]; - - auto c = std::max_element(tot2.begin(), tot2.end()) - tot2.begin(); - eta.sum = tot2[c]; - switch (c) { - case cBottomLeft: - if ((cl.data[3] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); - if ((cl.data[1] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); - eta.c = cBottomLeft; - break; - case cBottomRight: - if ((cl.data[2] + cl.data[5]) != 0) - eta.x = - static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); - if ((cl.data[1] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); - eta.c = cBottomRight; - break; - case cTopLeft: - if ((cl.data[7] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); - if ((cl.data[7] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); - eta.c = cTopLeft; - break; - case cTopRight: - if ((cl.data[5] + cl.data[4]) != 0) - eta.x = - static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); - if ((cl.data[7] + cl.data[4]) != 0) - eta.y = - static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); - eta.c = cTopRight; - break; - // no default to allow compiler to warn about missing cases - } - return eta; -} - - -Eta2 calculate_eta2(Cluster2x2 &cl) { - Eta2 eta{}; - if ((cl.data[0] + cl.data[1]) != 0) - eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); - if ((cl.data[0] + cl.data[2]) != 0) - eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); - eta.sum = cl.data[0] + cl.data[1] + cl.data[2]+ cl.data[3]; - eta.c = cBottomLeft; //TODO! This is not correct, but need to put something - return eta; -} - - -} // namespace aare \ No newline at end of file diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index ecc3e61..4bc2b34 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -53,90 +53,4 @@ Interpolator::Interpolator(NDView etacube, NDView xbins, } } -<<<<<<< HEAD -======= -std::vector Interpolator::interpolate(const ClusterVector& clusters) { - std::vector photons; - photons.reserve(clusters.size()); - - if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { - for (size_t i = 0; i(i); - Eta2 eta= calculate_eta2(cluster); - - Photon photon; - photon.x = cluster.x; - photon.y = cluster.y; - photon.energy = eta.sum; - - - //Finding the index of the last element that is smaller - //should work fine as long as we have many bins - auto ie = last_smaller(m_energy_bins, photon.energy); - auto ix = last_smaller(m_etabinsx, eta.x); - auto iy = last_smaller(m_etabinsy, eta.y); - - double dX{}, dY{}; - // cBottomLeft = 0, - // cBottomRight = 1, - // cTopLeft = 2, - // cTopRight = 3 - switch (eta.c) { - case cTopLeft: - dX = -1.; - dY = 0.; - break; - case cTopRight:; - dX = 0.; - dY = 0.; - break; - case cBottomLeft: - dX = -1.; - dY = -1.; - break; - case cBottomRight: - dX = 0.; - dY = -1.; - break; - } - photon.x += m_ietax(ix, iy, ie)*2 + dX; - photon.y += m_ietay(ix, iy, ie)*2 + dY; - photons.push_back(photon); - } - }else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){ - for (size_t i = 0; i(i); - Eta2 eta= calculate_eta2(cluster); - - Photon photon; - photon.x = cluster.x; - photon.y = cluster.y; - photon.energy = eta.sum; - - //Now do some actual interpolation. - //Find which energy bin the cluster is in - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; - //Finding the index of the last element that is smaller - //should work fine as long as we have many bins - auto ie = last_smaller(m_energy_bins, photon.energy); - auto ix = last_smaller(m_etabinsx, eta.x); - auto iy = last_smaller(m_etabinsy, eta.y); - - photon.x += m_ietax(ix, iy, ie)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2 - photon.y += m_ietay(ix, iy, ie)*2; - photons.push_back(photon); - } - - }else{ - throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation"); - } - - - return photons; -} - ->>>>>>> developer } // namespace aare \ No newline at end of file From 113f34cc98457f135456578ccdde3eb9430da261 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Thu, 10 Apr 2025 16:50:04 +0200 Subject: [PATCH 085/120] fixes --- include/aare/ClusterFile.hpp | 2 +- include/aare/ClusterVector.hpp | 2 +- python/src/cluster.hpp | 3 +- python/src/np_helper.hpp | 4 +- src/ClusterFile.test.cpp | 114 +++++++++++++++++++++++++++++++++ src/ClusterVector.test.cpp | 24 +++++++ 6 files changed, 144 insertions(+), 5 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index ff5d338..7248dc2 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -236,7 +236,7 @@ ClusterFile::read_clusters_without_cut(size_t n_clusters) { uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 // auto buf = reinterpret_cast(clusters.data()); - auto buf = clusters.data(); + auto buf = reinterpret_cast(clusters.data()); // if there are photons left from previous frame read them first if (nph) { if (nph > n_clusters) { diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 13ec882..eae2118 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -299,7 +299,7 @@ class ClusterVector> { */ ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) : m_frame_number(frame_number) { - m_data.reserve(capacity); + m_data.resize(capacity); } // Move constructor diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index f6d3636..755f595 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -80,11 +80,12 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { self.push_back(cluster); }) + // implement push_back .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", - [typestr]() { return fmt_format; }) + [typestr](ClusterVector &self) { return fmt_format; }) .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 98be52f..3d3ee3c 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -74,10 +74,10 @@ template > { static std::string value() { - return fmt::format("T{{{}:x;{}:y;{}:data;}}", + return fmt::format("T{{{}:x:{}:y:{}:data:}}", py::format_descriptor::format(), py::format_descriptor::format(), - fmt::format("{}{}", ClusterSizeX * ClusterSizeY, + fmt::format("({},{}){}", ClusterSizeX, ClusterSizeY, py::format_descriptor::format())); } }; diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index f689b69..d5fdf7c 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -47,6 +47,106 @@ TEST_CASE("Read one frame using ROI", "[.files]") { TEST_CASE("Read clusters from single frame file", "[.files]") { +// frame_number, num_clusters [135] 97 +// [ 1 200] [0 1 2 3 4 5 6 7 8] +// [ 2 201] [ 9 10 11 12 13 14 15 16 17] +// [ 3 202] [18 19 20 21 22 23 24 25 26] +// [ 4 203] [27 28 29 30 31 32 33 34 35] +// [ 5 204] [36 37 38 39 40 41 42 43 44] +// [ 6 205] [45 46 47 48 49 50 51 52 53] +// [ 7 206] [54 55 56 57 58 59 60 61 62] +// [ 8 207] [63 64 65 66 67 68 69 70 71] +// [ 9 208] [72 73 74 75 76 77 78 79 80] +// [ 10 209] [81 82 83 84 85 86 87 88 89] +// [ 11 210] [90 91 92 93 94 95 96 97 98] +// [ 12 211] [ 99 100 101 102 103 104 105 106 107] +// [ 13 212] [108 109 110 111 112 113 114 115 116] +// [ 14 213] [117 118 119 120 121 122 123 124 125] +// [ 15 214] [126 127 128 129 130 131 132 133 134] +// [ 16 215] [135 136 137 138 139 140 141 142 143] +// [ 17 216] [144 145 146 147 148 149 150 151 152] +// [ 18 217] [153 154 155 156 157 158 159 160 161] +// [ 19 218] [162 163 164 165 166 167 168 169 170] +// [ 20 219] [171 172 173 174 175 176 177 178 179] +// [ 21 220] [180 181 182 183 184 185 186 187 188] +// [ 22 221] [189 190 191 192 193 194 195 196 197] +// [ 23 222] [198 199 200 201 202 203 204 205 206] +// [ 24 223] [207 208 209 210 211 212 213 214 215] +// [ 25 224] [216 217 218 219 220 221 222 223 224] +// [ 26 225] [225 226 227 228 229 230 231 232 233] +// [ 27 226] [234 235 236 237 238 239 240 241 242] +// [ 28 227] [243 244 245 246 247 248 249 250 251] +// [ 29 228] [252 253 254 255 256 257 258 259 260] +// [ 30 229] [261 262 263 264 265 266 267 268 269] +// [ 31 230] [270 271 272 273 274 275 276 277 278] +// [ 32 231] [279 280 281 282 283 284 285 286 287] +// [ 33 232] [288 289 290 291 292 293 294 295 296] +// [ 34 233] [297 298 299 300 301 302 303 304 305] +// [ 35 234] [306 307 308 309 310 311 312 313 314] +// [ 36 235] [315 316 317 318 319 320 321 322 323] +// [ 37 236] [324 325 326 327 328 329 330 331 332] +// [ 38 237] [333 334 335 336 337 338 339 340 341] +// [ 39 238] [342 343 344 345 346 347 348 349 350] +// [ 40 239] [351 352 353 354 355 356 357 358 359] +// [ 41 240] [360 361 362 363 364 365 366 367 368] +// [ 42 241] [369 370 371 372 373 374 375 376 377] +// [ 43 242] [378 379 380 381 382 383 384 385 386] +// [ 44 243] [387 388 389 390 391 392 393 394 395] +// [ 45 244] [396 397 398 399 400 401 402 403 404] +// [ 46 245] [405 406 407 408 409 410 411 412 413] +// [ 47 246] [414 415 416 417 418 419 420 421 422] +// [ 48 247] [423 424 425 426 427 428 429 430 431] +// [ 49 248] [432 433 434 435 436 437 438 439 440] +// [ 50 249] [441 442 443 444 445 446 447 448 449] +// [ 51 250] [450 451 452 453 454 455 456 457 458] +// [ 52 251] [459 460 461 462 463 464 465 466 467] +// [ 53 252] [468 469 470 471 472 473 474 475 476] +// [ 54 253] [477 478 479 480 481 482 483 484 485] +// [ 55 254] [486 487 488 489 490 491 492 493 494] +// [ 56 255] [495 496 497 498 499 500 501 502 503] +// [ 57 256] [504 505 506 507 508 509 510 511 512] +// [ 58 257] [513 514 515 516 517 518 519 520 521] +// [ 59 258] [522 523 524 525 526 527 528 529 530] +// [ 60 259] [531 532 533 534 535 536 537 538 539] +// [ 61 260] [540 541 542 543 544 545 546 547 548] +// [ 62 261] [549 550 551 552 553 554 555 556 557] +// [ 63 262] [558 559 560 561 562 563 564 565 566] +// [ 64 263] [567 568 569 570 571 572 573 574 575] +// [ 65 264] [576 577 578 579 580 581 582 583 584] +// [ 66 265] [585 586 587 588 589 590 591 592 593] +// [ 67 266] [594 595 596 597 598 599 600 601 602] +// [ 68 267] [603 604 605 606 607 608 609 610 611] +// [ 69 268] [612 613 614 615 616 617 618 619 620] +// [ 70 269] [621 622 623 624 625 626 627 628 629] +// [ 71 270] [630 631 632 633 634 635 636 637 638] +// [ 72 271] [639 640 641 642 643 644 645 646 647] +// [ 73 272] [648 649 650 651 652 653 654 655 656] +// [ 74 273] [657 658 659 660 661 662 663 664 665] +// [ 75 274] [666 667 668 669 670 671 672 673 674] +// [ 76 275] [675 676 677 678 679 680 681 682 683] +// [ 77 276] [684 685 686 687 688 689 690 691 692] +// [ 78 277] [693 694 695 696 697 698 699 700 701] +// [ 79 278] [702 703 704 705 706 707 708 709 710] +// [ 80 279] [711 712 713 714 715 716 717 718 719] +// [ 81 280] [720 721 722 723 724 725 726 727 728] +// [ 82 281] [729 730 731 732 733 734 735 736 737] +// [ 83 282] [738 739 740 741 742 743 744 745 746] +// [ 84 283] [747 748 749 750 751 752 753 754 755] +// [ 85 284] [756 757 758 759 760 761 762 763 764] +// [ 86 285] [765 766 767 768 769 770 771 772 773] +// [ 87 286] [774 775 776 777 778 779 780 781 782] +// [ 88 287] [783 784 785 786 787 788 789 790 791] +// [ 89 288] [792 793 794 795 796 797 798 799 800] +// [ 90 289] [801 802 803 804 805 806 807 808 809] +// [ 91 290] [810 811 812 813 814 815 816 817 818] +// [ 92 291] [819 820 821 822 823 824 825 826 827] +// [ 93 292] [828 829 830 831 832 833 834 835 836] +// [ 94 293] [837 838 839 840 841 842 843 844 845] +// [ 95 294] [846 847 848 849 850 851 852 853 854] +// [ 96 295] [855 856 857 858 859 860 861 862 863] +// [ 97 296] [864 865 866 867 868 869 870 871 872] + + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); @@ -68,5 +168,19 @@ TEST_CASE("Read clusters from single frame file", "[.files]") { auto clusters = f.read_clusters(97); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); + + REQUIRE(clusters.at(0).x == 1); + REQUIRE(clusters.at(0).y == 200); } } + + + +TEST_CASE("Read clusters", "[.files]"){ + // beam_En700eV_-40deg_300V_10us_d0_f0_100.clust + auto fpath = test_data_path() / "clust" / "beam_En700eV_-40deg_300V_10us_d0_f0_100.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile> f(fpath); + auto clusters = f.read_clusters(500); +} diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 1880355..5a5abe0 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -8,6 +8,30 @@ using aare::Cluster; using aare::ClusterVector; + +TEST_CASE("item_size return the size of the cluster stored"){ + using C1 = Cluster; + ClusterVector cv(4); + CHECK(cv.item_size() == sizeof(C1)); + + //Sanity check + //2*2*4 = 16 bytes of data for the cluster + // 2*2 = 4 bytes for the x and y coordinates + REQUIRE(cv.item_size() == 20); + + using C2 = Cluster; + ClusterVector cv2(4); + CHECK(cv2.item_size() == sizeof(C2)); + + using C3 = Cluster; + ClusterVector cv3(4); + CHECK(cv3.item_size() == sizeof(C3)); + + using C4 = Cluster; + ClusterVector cv4(4); + CHECK(cv4.item_size() == sizeof(C4)); +} + TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read", "[.ClusterVector]") { From 92f5421481c05634fd54e018245d8f5530721f28 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Thu, 10 Apr 2025 16:58:47 +0200 Subject: [PATCH 086/120] np test --- python/tests/test_Cluster.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/python/tests/test_Cluster.py b/python/tests/test_Cluster.py index 29d5ad9..e24bcf8 100644 --- a/python/tests/test_Cluster.py +++ b/python/tests/test_Cluster.py @@ -5,6 +5,12 @@ import aare._aare as aare from conftest import test_data_path +def test_cluster_vector_can_be_converted_to_numpy(): + cv = aare.ClusterVector_Cluster3x3i() + arr = np.array(cv, copy=False) + assert arr.shape == (0,) # 4 for x, y, size, energy and 9 for the cluster data + + def test_ClusterVector(): """Test ClusterVector""" From e71569b15ec3385212fcbb5b6ec6fc8dea6c0386 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Fri, 11 Apr 2025 13:38:33 +0200 Subject: [PATCH 087/120] resize before read --- include/aare/ClusterFile.hpp | 3 ++- include/aare/ClusterVector.hpp | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 7248dc2..f58d2d6 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -229,6 +229,7 @@ ClusterFile::read_clusters_without_cut(size_t n_clusters) { } ClusterVector clusters(n_clusters); + clusters.resize(n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; @@ -283,7 +284,7 @@ template ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { ClusterVector clusters; - clusters.reserve(n_clusters); + clusters.resize(n_clusters); // if there are photons left from previous frame read them first if (m_num_left) { diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index eae2118..13ec882 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -299,7 +299,7 @@ class ClusterVector> { */ ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) : m_frame_number(frame_number) { - m_data.resize(capacity); + m_data.reserve(capacity); } // Move constructor From 15e52565a99aae6be03b41088f7034b2376ac348 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Fri, 11 Apr 2025 14:35:20 +0200 Subject: [PATCH 088/120] dont convert to byte --- include/aare/ClusterFile.hpp | 10 +- include/aare/ClusterVector.hpp | 2 +- python/src/cluster.hpp | 9 +- src/ClusterFile.test.cpp | 204 ++++++++++++++++----------------- 4 files changed, 113 insertions(+), 112 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index f58d2d6..45df8a0 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -236,8 +236,7 @@ ClusterFile::read_clusters_without_cut(size_t n_clusters) { uint32_t nn = m_num_left; uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - // auto buf = reinterpret_cast(clusters.data()); - auto buf = reinterpret_cast(clusters.data()); + auto buf = clusters.data(); // if there are photons left from previous frame read them first if (nph) { if (nph > n_clusters) { @@ -247,8 +246,7 @@ ClusterFile::read_clusters_without_cut(size_t n_clusters) { } else { nn = nph; } - nph_read += fread((buf + nph_read * clusters.item_size()), - clusters.item_size(), nn, fp); + nph_read += fread((buf + nph_read), clusters.item_size(), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -263,8 +261,8 @@ ClusterFile::read_clusters_without_cut(size_t n_clusters) { else nn = nph; - nph_read += fread((buf + nph_read * clusters.item_size()), - clusters.item_size(), nn, fp); + nph_read += + fread((buf + nph_read), clusters.item_size(), nn, fp); m_num_left = nph - nn; } if (nph_read >= n_clusters) diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 13ec882..22315cc 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -297,7 +297,7 @@ class ClusterVector> { * @param frame_number frame number of the clusters. Default is 0, which is * also used to indicate that the clusters come from many frames */ - ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) + ClusterVector(size_t capacity = 300, uint64_t frame_number = 0) : m_frame_number(frame_number) { m_data.reserve(capacity); } diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 755f595..a06bcdd 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -18,6 +18,9 @@ using pd_type = double; using namespace aare; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + template void define_cluster(py::module &m, const std::string &typestr) { @@ -80,12 +83,13 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { self.push_back(cluster); }) - // implement push_back .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", - [typestr](ClusterVector &self) { return fmt_format; }) + [typestr](ClusterVector &self) { + return fmt_format; + }) .def_property_readonly("cluster_size_x", &ClusterVector::cluster_size_x) @@ -269,3 +273,4 @@ void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { return hitmap; }); } +#pragma GCC diagnostic pop diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index d5fdf7c..1ee54e7 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -47,105 +47,104 @@ TEST_CASE("Read one frame using ROI", "[.files]") { TEST_CASE("Read clusters from single frame file", "[.files]") { -// frame_number, num_clusters [135] 97 -// [ 1 200] [0 1 2 3 4 5 6 7 8] -// [ 2 201] [ 9 10 11 12 13 14 15 16 17] -// [ 3 202] [18 19 20 21 22 23 24 25 26] -// [ 4 203] [27 28 29 30 31 32 33 34 35] -// [ 5 204] [36 37 38 39 40 41 42 43 44] -// [ 6 205] [45 46 47 48 49 50 51 52 53] -// [ 7 206] [54 55 56 57 58 59 60 61 62] -// [ 8 207] [63 64 65 66 67 68 69 70 71] -// [ 9 208] [72 73 74 75 76 77 78 79 80] -// [ 10 209] [81 82 83 84 85 86 87 88 89] -// [ 11 210] [90 91 92 93 94 95 96 97 98] -// [ 12 211] [ 99 100 101 102 103 104 105 106 107] -// [ 13 212] [108 109 110 111 112 113 114 115 116] -// [ 14 213] [117 118 119 120 121 122 123 124 125] -// [ 15 214] [126 127 128 129 130 131 132 133 134] -// [ 16 215] [135 136 137 138 139 140 141 142 143] -// [ 17 216] [144 145 146 147 148 149 150 151 152] -// [ 18 217] [153 154 155 156 157 158 159 160 161] -// [ 19 218] [162 163 164 165 166 167 168 169 170] -// [ 20 219] [171 172 173 174 175 176 177 178 179] -// [ 21 220] [180 181 182 183 184 185 186 187 188] -// [ 22 221] [189 190 191 192 193 194 195 196 197] -// [ 23 222] [198 199 200 201 202 203 204 205 206] -// [ 24 223] [207 208 209 210 211 212 213 214 215] -// [ 25 224] [216 217 218 219 220 221 222 223 224] -// [ 26 225] [225 226 227 228 229 230 231 232 233] -// [ 27 226] [234 235 236 237 238 239 240 241 242] -// [ 28 227] [243 244 245 246 247 248 249 250 251] -// [ 29 228] [252 253 254 255 256 257 258 259 260] -// [ 30 229] [261 262 263 264 265 266 267 268 269] -// [ 31 230] [270 271 272 273 274 275 276 277 278] -// [ 32 231] [279 280 281 282 283 284 285 286 287] -// [ 33 232] [288 289 290 291 292 293 294 295 296] -// [ 34 233] [297 298 299 300 301 302 303 304 305] -// [ 35 234] [306 307 308 309 310 311 312 313 314] -// [ 36 235] [315 316 317 318 319 320 321 322 323] -// [ 37 236] [324 325 326 327 328 329 330 331 332] -// [ 38 237] [333 334 335 336 337 338 339 340 341] -// [ 39 238] [342 343 344 345 346 347 348 349 350] -// [ 40 239] [351 352 353 354 355 356 357 358 359] -// [ 41 240] [360 361 362 363 364 365 366 367 368] -// [ 42 241] [369 370 371 372 373 374 375 376 377] -// [ 43 242] [378 379 380 381 382 383 384 385 386] -// [ 44 243] [387 388 389 390 391 392 393 394 395] -// [ 45 244] [396 397 398 399 400 401 402 403 404] -// [ 46 245] [405 406 407 408 409 410 411 412 413] -// [ 47 246] [414 415 416 417 418 419 420 421 422] -// [ 48 247] [423 424 425 426 427 428 429 430 431] -// [ 49 248] [432 433 434 435 436 437 438 439 440] -// [ 50 249] [441 442 443 444 445 446 447 448 449] -// [ 51 250] [450 451 452 453 454 455 456 457 458] -// [ 52 251] [459 460 461 462 463 464 465 466 467] -// [ 53 252] [468 469 470 471 472 473 474 475 476] -// [ 54 253] [477 478 479 480 481 482 483 484 485] -// [ 55 254] [486 487 488 489 490 491 492 493 494] -// [ 56 255] [495 496 497 498 499 500 501 502 503] -// [ 57 256] [504 505 506 507 508 509 510 511 512] -// [ 58 257] [513 514 515 516 517 518 519 520 521] -// [ 59 258] [522 523 524 525 526 527 528 529 530] -// [ 60 259] [531 532 533 534 535 536 537 538 539] -// [ 61 260] [540 541 542 543 544 545 546 547 548] -// [ 62 261] [549 550 551 552 553 554 555 556 557] -// [ 63 262] [558 559 560 561 562 563 564 565 566] -// [ 64 263] [567 568 569 570 571 572 573 574 575] -// [ 65 264] [576 577 578 579 580 581 582 583 584] -// [ 66 265] [585 586 587 588 589 590 591 592 593] -// [ 67 266] [594 595 596 597 598 599 600 601 602] -// [ 68 267] [603 604 605 606 607 608 609 610 611] -// [ 69 268] [612 613 614 615 616 617 618 619 620] -// [ 70 269] [621 622 623 624 625 626 627 628 629] -// [ 71 270] [630 631 632 633 634 635 636 637 638] -// [ 72 271] [639 640 641 642 643 644 645 646 647] -// [ 73 272] [648 649 650 651 652 653 654 655 656] -// [ 74 273] [657 658 659 660 661 662 663 664 665] -// [ 75 274] [666 667 668 669 670 671 672 673 674] -// [ 76 275] [675 676 677 678 679 680 681 682 683] -// [ 77 276] [684 685 686 687 688 689 690 691 692] -// [ 78 277] [693 694 695 696 697 698 699 700 701] -// [ 79 278] [702 703 704 705 706 707 708 709 710] -// [ 80 279] [711 712 713 714 715 716 717 718 719] -// [ 81 280] [720 721 722 723 724 725 726 727 728] -// [ 82 281] [729 730 731 732 733 734 735 736 737] -// [ 83 282] [738 739 740 741 742 743 744 745 746] -// [ 84 283] [747 748 749 750 751 752 753 754 755] -// [ 85 284] [756 757 758 759 760 761 762 763 764] -// [ 86 285] [765 766 767 768 769 770 771 772 773] -// [ 87 286] [774 775 776 777 778 779 780 781 782] -// [ 88 287] [783 784 785 786 787 788 789 790 791] -// [ 89 288] [792 793 794 795 796 797 798 799 800] -// [ 90 289] [801 802 803 804 805 806 807 808 809] -// [ 91 290] [810 811 812 813 814 815 816 817 818] -// [ 92 291] [819 820 821 822 823 824 825 826 827] -// [ 93 292] [828 829 830 831 832 833 834 835 836] -// [ 94 293] [837 838 839 840 841 842 843 844 845] -// [ 95 294] [846 847 848 849 850 851 852 853 854] -// [ 96 295] [855 856 857 858 859 860 861 862 863] -// [ 97 296] [864 865 866 867 868 869 870 871 872] - + // frame_number, num_clusters [135] 97 + // [ 1 200] [0 1 2 3 4 5 6 7 8] + // [ 2 201] [ 9 10 11 12 13 14 15 16 17] + // [ 3 202] [18 19 20 21 22 23 24 25 26] + // [ 4 203] [27 28 29 30 31 32 33 34 35] + // [ 5 204] [36 37 38 39 40 41 42 43 44] + // [ 6 205] [45 46 47 48 49 50 51 52 53] + // [ 7 206] [54 55 56 57 58 59 60 61 62] + // [ 8 207] [63 64 65 66 67 68 69 70 71] + // [ 9 208] [72 73 74 75 76 77 78 79 80] + // [ 10 209] [81 82 83 84 85 86 87 88 89] + // [ 11 210] [90 91 92 93 94 95 96 97 98] + // [ 12 211] [ 99 100 101 102 103 104 105 106 107] + // [ 13 212] [108 109 110 111 112 113 114 115 116] + // [ 14 213] [117 118 119 120 121 122 123 124 125] + // [ 15 214] [126 127 128 129 130 131 132 133 134] + // [ 16 215] [135 136 137 138 139 140 141 142 143] + // [ 17 216] [144 145 146 147 148 149 150 151 152] + // [ 18 217] [153 154 155 156 157 158 159 160 161] + // [ 19 218] [162 163 164 165 166 167 168 169 170] + // [ 20 219] [171 172 173 174 175 176 177 178 179] + // [ 21 220] [180 181 182 183 184 185 186 187 188] + // [ 22 221] [189 190 191 192 193 194 195 196 197] + // [ 23 222] [198 199 200 201 202 203 204 205 206] + // [ 24 223] [207 208 209 210 211 212 213 214 215] + // [ 25 224] [216 217 218 219 220 221 222 223 224] + // [ 26 225] [225 226 227 228 229 230 231 232 233] + // [ 27 226] [234 235 236 237 238 239 240 241 242] + // [ 28 227] [243 244 245 246 247 248 249 250 251] + // [ 29 228] [252 253 254 255 256 257 258 259 260] + // [ 30 229] [261 262 263 264 265 266 267 268 269] + // [ 31 230] [270 271 272 273 274 275 276 277 278] + // [ 32 231] [279 280 281 282 283 284 285 286 287] + // [ 33 232] [288 289 290 291 292 293 294 295 296] + // [ 34 233] [297 298 299 300 301 302 303 304 305] + // [ 35 234] [306 307 308 309 310 311 312 313 314] + // [ 36 235] [315 316 317 318 319 320 321 322 323] + // [ 37 236] [324 325 326 327 328 329 330 331 332] + // [ 38 237] [333 334 335 336 337 338 339 340 341] + // [ 39 238] [342 343 344 345 346 347 348 349 350] + // [ 40 239] [351 352 353 354 355 356 357 358 359] + // [ 41 240] [360 361 362 363 364 365 366 367 368] + // [ 42 241] [369 370 371 372 373 374 375 376 377] + // [ 43 242] [378 379 380 381 382 383 384 385 386] + // [ 44 243] [387 388 389 390 391 392 393 394 395] + // [ 45 244] [396 397 398 399 400 401 402 403 404] + // [ 46 245] [405 406 407 408 409 410 411 412 413] + // [ 47 246] [414 415 416 417 418 419 420 421 422] + // [ 48 247] [423 424 425 426 427 428 429 430 431] + // [ 49 248] [432 433 434 435 436 437 438 439 440] + // [ 50 249] [441 442 443 444 445 446 447 448 449] + // [ 51 250] [450 451 452 453 454 455 456 457 458] + // [ 52 251] [459 460 461 462 463 464 465 466 467] + // [ 53 252] [468 469 470 471 472 473 474 475 476] + // [ 54 253] [477 478 479 480 481 482 483 484 485] + // [ 55 254] [486 487 488 489 490 491 492 493 494] + // [ 56 255] [495 496 497 498 499 500 501 502 503] + // [ 57 256] [504 505 506 507 508 509 510 511 512] + // [ 58 257] [513 514 515 516 517 518 519 520 521] + // [ 59 258] [522 523 524 525 526 527 528 529 530] + // [ 60 259] [531 532 533 534 535 536 537 538 539] + // [ 61 260] [540 541 542 543 544 545 546 547 548] + // [ 62 261] [549 550 551 552 553 554 555 556 557] + // [ 63 262] [558 559 560 561 562 563 564 565 566] + // [ 64 263] [567 568 569 570 571 572 573 574 575] + // [ 65 264] [576 577 578 579 580 581 582 583 584] + // [ 66 265] [585 586 587 588 589 590 591 592 593] + // [ 67 266] [594 595 596 597 598 599 600 601 602] + // [ 68 267] [603 604 605 606 607 608 609 610 611] + // [ 69 268] [612 613 614 615 616 617 618 619 620] + // [ 70 269] [621 622 623 624 625 626 627 628 629] + // [ 71 270] [630 631 632 633 634 635 636 637 638] + // [ 72 271] [639 640 641 642 643 644 645 646 647] + // [ 73 272] [648 649 650 651 652 653 654 655 656] + // [ 74 273] [657 658 659 660 661 662 663 664 665] + // [ 75 274] [666 667 668 669 670 671 672 673 674] + // [ 76 275] [675 676 677 678 679 680 681 682 683] + // [ 77 276] [684 685 686 687 688 689 690 691 692] + // [ 78 277] [693 694 695 696 697 698 699 700 701] + // [ 79 278] [702 703 704 705 706 707 708 709 710] + // [ 80 279] [711 712 713 714 715 716 717 718 719] + // [ 81 280] [720 721 722 723 724 725 726 727 728] + // [ 82 281] [729 730 731 732 733 734 735 736 737] + // [ 83 282] [738 739 740 741 742 743 744 745 746] + // [ 84 283] [747 748 749 750 751 752 753 754 755] + // [ 85 284] [756 757 758 759 760 761 762 763 764] + // [ 86 285] [765 766 767 768 769 770 771 772 773] + // [ 87 286] [774 775 776 777 778 779 780 781 782] + // [ 88 287] [783 784 785 786 787 788 789 790 791] + // [ 89 288] [792 793 794 795 796 797 798 799 800] + // [ 90 289] [801 802 803 804 805 806 807 808 809] + // [ 91 290] [810 811 812 813 814 815 816 817 818] + // [ 92 291] [819 820 821 822 823 824 825 826 827] + // [ 93 292] [828 829 830 831 832 833 834 835 836] + // [ 94 293] [837 838 839 840 841 842 843 844 845] + // [ 95 294] [846 847 848 849 850 851 852 853 854] + // [ 96 295] [855 856 857 858 859 860 861 862 863] + // [ 97 296] [864 865 866 867 868 869 870 871 872] auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); @@ -174,11 +173,10 @@ TEST_CASE("Read clusters from single frame file", "[.files]") { } } - - -TEST_CASE("Read clusters", "[.files]"){ +TEST_CASE("Read clusters", "[.files]") { // beam_En700eV_-40deg_300V_10us_d0_f0_100.clust - auto fpath = test_data_path() / "clust" / "beam_En700eV_-40deg_300V_10us_d0_f0_100.clust"; + auto fpath = test_data_path() / "clust" / + "beam_En700eV_-40deg_300V_10us_d0_f0_100.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile> f(fpath); From a59e9656be7b68428f7af210d1e913e7906ac0ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Fri, 11 Apr 2025 16:54:21 +0200 Subject: [PATCH 089/120] Making RawSubFile usable from Python (#158) - Removed a printout left from debugging - return also header when reading - added read_n - check for error in ifstream --- CMakeLists.txt | 2 + include/aare/RawSubFile.hpp | 5 +- include/aare/utils/ifstream_helpers.hpp | 12 +++ python/src/file.hpp | 36 ++------ python/src/module.cpp | 2 + python/src/raw_sub_file.hpp | 110 ++++++++++++++++++++++++ python/tests/test_RawSubFile.py | 36 ++++++++ src/RawSubFile.cpp | 31 ++++++- src/utils/ifstream_helpers.cpp | 18 ++++ 9 files changed, 217 insertions(+), 35 deletions(-) create mode 100644 include/aare/utils/ifstream_helpers.hpp create mode 100644 python/src/raw_sub_file.hpp create mode 100644 python/tests/test_RawSubFile.py create mode 100644 src/utils/ifstream_helpers.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 039545e..2f2a7b5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -388,7 +388,9 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp ) diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 1d554e8..350a475 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -22,7 +22,7 @@ class RawSubFile { size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - size_t n_frames{}; + size_t m_num_frames{}; uint32_t m_pos_row{}; uint32_t m_pos_col{}; @@ -53,6 +53,7 @@ class RawSubFile { size_t tell(); void read_into(std::byte *image_buf, DetectorHeader *header = nullptr); + void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr); void get_part(std::byte *buffer, size_t frame_index); void read_header(DetectorHeader *header); @@ -66,6 +67,8 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } + size_t frames_in_file() const { return m_num_frames; } + private: template void read_with_map(std::byte *image_buf); diff --git a/include/aare/utils/ifstream_helpers.hpp b/include/aare/utils/ifstream_helpers.hpp new file mode 100644 index 0000000..0a842ed --- /dev/null +++ b/include/aare/utils/ifstream_helpers.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +namespace aare { + +/** + * @brief Get the error message from an ifstream object +*/ +std::string ifstream_error_msg(std::ifstream &ifs); + +} // namespace aare \ No newline at end of file diff --git a/python/src/file.hpp b/python/src/file.hpp index 0d64e16..2d0f53e 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -20,6 +20,9 @@ namespace py = pybind11; using namespace ::aare; + + + //Disable warnings for unused parameters, as we ignore some //in the __exit__ method #pragma GCC diagnostic push @@ -214,36 +217,9 @@ void define_file_io_bindings(py::module &m) { - py::class_(m, "RawSubFile") - .def(py::init()) - .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) - .def_property_readonly("pixels_per_frame", - &RawSubFile::pixels_per_frame) - .def("seek", &RawSubFile::seek) - .def("tell", &RawSubFile::tell) - .def_property_readonly("rows", &RawSubFile::rows) - .def_property_readonly("cols", &RawSubFile::cols) - .def("read_frame", - [](RawSubFile &self) { - const uint8_t item_size = self.bytes_per_pixel(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(self.rows()); - shape.push_back(self.cols()); - if (item_size == 1) { - image = py::array_t(shape); - } else if (item_size == 2) { - image = py::array_t(shape); - } else if (item_size == 4) { - image = py::array_t(shape); - } - fmt::print("item_size: {} rows: {} cols: {}\n", item_size, self.rows(), self.cols()); - self.read_into( - reinterpret_cast(image.mutable_data())); - return image; - }); + + + #pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") diff --git a/python/src/module.cpp b/python/src/module.cpp index 7a17e78..75fe237 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -10,6 +10,7 @@ #include "cluster_file.hpp" #include "fit.hpp" #include "interpolation.hpp" +#include "raw_sub_file.hpp" #include "jungfrau_data_file.hpp" @@ -22,6 +23,7 @@ namespace py = pybind11; PYBIND11_MODULE(_aare, m) { define_file_io_bindings(m); define_raw_file_io_bindings(m); + define_raw_sub_file_io_bindings(m); define_ctb_raw_file_io_bindings(m); define_raw_master_file_bindings(m); define_var_cluster_finder_bindings(m); diff --git a/python/src/raw_sub_file.hpp b/python/src/raw_sub_file.hpp new file mode 100644 index 0000000..2cb83fc --- /dev/null +++ b/python/src/raw_sub_file.hpp @@ -0,0 +1,110 @@ +#include "aare/CtbRawFile.hpp" +#include "aare/File.hpp" +#include "aare/Frame.hpp" +#include "aare/RawFile.hpp" +#include "aare/RawMasterFile.hpp" +#include "aare/RawSubFile.hpp" + +#include "aare/defs.hpp" +// #include "aare/fClusterFileV2.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +auto read_frame_from_RawSubFile(RawSubFile &self) { + py::array_t header(1); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{static_cast(self.rows()), + static_cast(self.cols())}; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) { + py::array_t header(n_frames); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{ + static_cast(n_frames), + static_cast(self.rows()), + static_cast(self.cols()) + }; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), n_frames, + header.mutable_data()); + + return py::make_tuple(header, image); +} + + +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +void define_raw_sub_file_io_bindings(py::module &m) { + py::class_(m, "RawSubFile") + .def(py::init()) + .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &RawSubFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel) + .def("seek", &RawSubFile::seek) + .def("tell", &RawSubFile::tell) + .def_property_readonly("rows", &RawSubFile::rows) + .def_property_readonly("cols", &RawSubFile::cols) + .def_property_readonly("frames_in_file", &RawSubFile::frames_in_file) + .def("read_frame", &read_frame_from_RawSubFile) + .def("read_n", &read_n_frames_from_RawSubFile) + .def("read", [](RawSubFile &self){ + self.seek(0); + auto n_frames = self.frames_in_file(); + return read_n_frames_from_RawSubFile(self, n_frames); + }) + .def("__enter__", [](RawSubFile &self) { return &self; }) + .def("__exit__", + [](RawSubFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + }) + .def("__iter__", [](RawSubFile &self) { return &self; }) + .def("__next__", [](RawSubFile &self) { + try { + return read_frame_from_RawSubFile(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); + +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/tests/test_RawSubFile.py b/python/tests/test_RawSubFile.py new file mode 100644 index 0000000..a5eea91 --- /dev/null +++ b/python/tests/test_RawSubFile.py @@ -0,0 +1,36 @@ +import pytest +import numpy as np +from aare import RawSubFile, DetectorType + + +@pytest.mark.files +def test_read_a_jungfrau_RawSubFile(test_data_path): + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + assert f.frames_in_file == 3 + + headers, frames = f.read() + + assert headers.size == 3 + assert frames.shape == (3, 512, 1024) + + # Frame numbers in this file should be 4, 5, 6 + for i,h in zip(range(4,7,1), headers): + assert h["frameNumber"] == i + + # Compare to canned data using numpy + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + assert np.all(data[3:6] == frames) + +@pytest.mark.files +def test_iterate_over_a_jungfrau_RawSubFile(test_data_path): + + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + i = 0 + for header, frame in f: + assert header["frameNumber"] == i+1 + assert np.all(frame == data[i]) + i += 1 + assert i == 3 + assert header["frameNumber"] == 3 \ No newline at end of file diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index a3bb79c..9e7a421 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,9 +1,12 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" +#include "aare/utils/ifstream_helpers.hpp" #include // memcpy #include #include + + namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, @@ -20,7 +23,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } if (std::filesystem::exists(fname)) { - n_frames = std::filesystem::file_size(fname) / + m_num_frames = std::filesystem::file_size(fname) / (sizeof(DetectorHeader) + rows * cols * bitdepth / 8); } else { throw std::runtime_error( @@ -35,7 +38,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } #ifdef AARE_VERBOSE - fmt::print("Opened file: {} with {} frames\n", m_fname.string(), n_frames); + fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames); fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols, m_bitdepth); fmt::print("file size: {}\n", std::filesystem::file_size(fname)); @@ -43,8 +46,8 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } void RawSubFile::seek(size_t frame_index) { - if (frame_index >= n_frames) { - throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames)); + if (frame_index >= m_num_frames) { + throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames)); } m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); } @@ -60,6 +63,10 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } + // TODO! expand support for different bitdepths if (m_pixel_map) { // read into a temporary buffer and then copy the data to the buffer @@ -79,8 +86,24 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { // read directly into the buffer m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } + + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } } +void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { + for (size_t i = 0; i < n_frames; i++) { + read_into(image_buf, header); + image_buf += bytes_per_frame(); + if (header) { + ++header; + } + } +} + + + template void RawSubFile::read_with_map(std::byte *image_buf) { auto part_buffer = new std::byte[bytes_per_frame()]; diff --git a/src/utils/ifstream_helpers.cpp b/src/utils/ifstream_helpers.cpp new file mode 100644 index 0000000..74c56f3 --- /dev/null +++ b/src/utils/ifstream_helpers.cpp @@ -0,0 +1,18 @@ +#include "aare/utils/ifstream_helpers.hpp" + +namespace aare { + +std::string ifstream_error_msg(std::ifstream &ifs) { + std::ios_base::iostate state = ifs.rdstate(); + if (state & std::ios_base::eofbit) { + return " End of file reached"; + } else if (state & std::ios_base::badbit) { + return " Bad file stream"; + } else if (state & std::ios_base::failbit) { + return " File read failed"; + }else{ + return " Unknown/no error"; + } +} + +} // namespace aare From 54def2633439f4ec9adbb2fdabae95f27dae23aa Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Mon, 14 Apr 2025 15:48:09 +0200 Subject: [PATCH 090/120] added ClusterFile tests fixed some bugs in ClusterFile --- CMakeLists.txt | 1 - include/aare/CalculateEta.hpp | 2 +- include/aare/ClusterFile.hpp | 68 ++++++-- include/aare/ClusterFileV2.hpp | 154 ------------------ include/aare/ClusterVector.hpp | 286 ++++----------------------------- python/src/cluster.hpp | 11 ++ src/Cluster.test.cpp | 6 + src/ClusterFile.test.cpp | 184 +++++++++++++++++++-- src/ClusterVector.test.cpp | 21 ++- 9 files changed, 294 insertions(+), 439 deletions(-) delete mode 100644 include/aare/ClusterFileV2.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 8bb7667..b57f05f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -384,7 +384,6 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp ) - add_library(aare_core STATIC ${SourceFiles}) target_include_directories(aare_core PUBLIC "$" diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 0aab540..2797233 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -33,7 +33,7 @@ template struct Eta2 { }; /** - * @brief Calculate the eta2 values for all clusters in a Clsutervector + * @brief Calculate the eta2 values for all clusters in a Clustervector */ template >> diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 45df8a0..06de985 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -39,9 +39,10 @@ template >> class ClusterFile { FILE *fp{}; + const std::string m_filename{}; uint32_t m_num_left{}; /*Number of photons left in frame*/ size_t m_chunk_size{}; /*Number of clusters to read at a time*/ - const std::string m_mode; /*Mode to open the file in*/ + std::string m_mode; /*Mode to open the file in*/ std::optional m_roi; /*Region of interest, will be applied if set*/ std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ @@ -115,6 +116,11 @@ class ClusterFile { */ void close(); + /** @brief Open the file in specific mode + * + */ + void open(const std::string &mode); + private: ClusterVector read_clusters_with_cut(size_t n_clusters); ClusterVector read_clusters_without_cut(size_t n_clusters); @@ -128,25 +134,25 @@ template ClusterFile::ClusterFile( const std::filesystem::path &fname, size_t chunk_size, const std::string &mode) - : m_chunk_size(chunk_size), m_mode(mode) { + : m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) { if (mode == "r") { - fp = fopen(fname.c_str(), "rb"); + fp = fopen(m_filename.c_str(), "rb"); if (!fp) { throw std::runtime_error("Could not open file for reading: " + - fname.string()); + m_filename); } } else if (mode == "w") { - fp = fopen(fname.c_str(), "wb"); + fp = fopen(m_filename.c_str(), "wb"); if (!fp) { throw std::runtime_error("Could not open file for writing: " + - fname.string()); + m_filename); } } else if (mode == "a") { - fp = fopen(fname.c_str(), "ab"); + fp = fopen(m_filename.c_str(), "ab"); if (!fp) { throw std::runtime_error("Could not open file for appending: " + - fname.string()); + m_filename); } } else { throw std::runtime_error("Unsupported mode: " + mode); @@ -165,6 +171,39 @@ void ClusterFile::close() { fp = nullptr; } } + +template +void ClusterFile::open(const std::string &mode) { + if (fp) { + close(); + } + + if (mode == "r") { + fp = fopen(m_filename.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + m_filename); + } + m_mode = "r"; + } else if (mode == "w") { + fp = fopen(m_filename.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + m_filename); + } + m_mode = "w"; + } else if (mode == "a") { + fp = fopen(m_filename.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + m_filename); + } + m_mode = "a"; + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } +} + template void ClusterFile::set_roi(ROI roi) { m_roi = roi; @@ -197,10 +236,7 @@ void ClusterFile::write_frame( if (m_mode != "w" && m_mode != "a") { throw std::runtime_error("File not opened for writing"); } - if (!(clusters.cluster_size_x() == 3) && - !(clusters.cluster_size_y() == 3)) { - throw std::runtime_error("Only 3x3 clusters are supported"); - } + int32_t frame_number = clusters.frame_number(); fwrite(&frame_number, sizeof(frame_number), 1, fp); uint32_t n_clusters = clusters.size(); @@ -270,7 +306,7 @@ ClusterFile::read_clusters_without_cut(size_t n_clusters) { } } - // Resize the vector to the number of clusters. + // Resize the vector to the number o f clusters. // No new allocation, only change bounds. clusters.resize(nph_read); if (m_gain_map) @@ -282,7 +318,7 @@ template ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { ClusterVector clusters; - clusters.resize(n_clusters); + clusters.reserve(n_clusters); // if there are photons left from previous frame read them first if (m_num_left) { @@ -375,11 +411,13 @@ ClusterFile::read_frame_without_cut() { ClusterVector clusters(n_clusters); clusters.set_frame_number(frame_number); + clusters.resize(n_clusters); + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != static_cast(n_clusters)) { throw std::runtime_error(LOCATION + "Could not read clusters"); } - clusters.resize(n_clusters); + if (m_gain_map) m_gain_map->apply_gain_map(clusters); return clusters; diff --git a/include/aare/ClusterFileV2.hpp b/include/aare/ClusterFileV2.hpp deleted file mode 100644 index 55b8a2b..0000000 --- a/include/aare/ClusterFileV2.hpp +++ /dev/null @@ -1,154 +0,0 @@ -#pragma once -#include "aare/core/defs.hpp" -#include -#include -#include - -namespace aare { -struct ClusterHeader { - int32_t frame_number; - int32_t n_clusters; - std::string to_string() const { - return "frame_number: " + std::to_string(frame_number) + - ", n_clusters: " + std::to_string(n_clusters); - } -}; - -struct ClusterV2_ { - int16_t x; - int16_t y; - std::array data; - std::string to_string(bool detailed = false) const { - if (detailed) { - std::string data_str = "["; - for (auto &d : data) { - data_str += std::to_string(d) + ", "; - } - data_str += "]"; - return "x: " + std::to_string(x) + ", y: " + std::to_string(y) + - ", data: " + data_str; - } - return "x: " + std::to_string(x) + ", y: " + std::to_string(y); - } -}; - -struct ClusterV2 { - ClusterV2_ cluster; - int32_t frame_number; - std::string to_string() const { - return "frame_number: " + std::to_string(frame_number) + ", " + - cluster.to_string(); - } -}; - -/** - * @brief - * important not: fp always points to the clusters header and does not point to - * individual clusters - * - */ -class ClusterFileV2 { - std::filesystem::path m_fpath; - std::string m_mode; - FILE *fp{nullptr}; - - void check_open() { - if (!fp) - throw std::runtime_error( - fmt::format("File: {} not open", m_fpath.string())); - } - - public: - ClusterFileV2(std::filesystem::path const &fpath, std::string const &mode) - : m_fpath(fpath), m_mode(mode) { - if (m_mode != "r" && m_mode != "w") - throw std::invalid_argument("mode must be 'r' or 'w'"); - if (m_mode == "r" && !std::filesystem::exists(m_fpath)) - throw std::invalid_argument("File does not exist"); - if (mode == "r") { - fp = fopen(fpath.string().c_str(), "rb"); - } else if (mode == "w") { - if (std::filesystem::exists(fpath)) { - fp = fopen(fpath.string().c_str(), "r+b"); - } else { - fp = fopen(fpath.string().c_str(), "wb"); - } - } - if (fp == nullptr) { - throw std::runtime_error("Failed to open file"); - } - } - ~ClusterFileV2() { close(); } - std::vector read() { - check_open(); - - ClusterHeader header; - fread(&header, sizeof(ClusterHeader), 1, fp); - std::vector clusters_(header.n_clusters); - fread(clusters_.data(), sizeof(ClusterV2_), header.n_clusters, fp); - std::vector clusters; - for (auto &c : clusters_) { - ClusterV2 cluster; - cluster.cluster = std::move(c); - cluster.frame_number = header.frame_number; - clusters.push_back(cluster); - } - - return clusters; - } - std::vector> read(int n_frames) { - std::vector> clusters; - for (int i = 0; i < n_frames; i++) { - clusters.push_back(read()); - } - return clusters; - } - - size_t write(std::vector const &clusters) { - check_open(); - if (m_mode != "w") - throw std::runtime_error("File not opened in write mode"); - if (clusters.empty()) - return 0; - - ClusterHeader header; - header.frame_number = clusters[0].frame_number; - header.n_clusters = clusters.size(); - fwrite(&header, sizeof(ClusterHeader), 1, fp); - for (auto &c : clusters) { - fwrite(&c.cluster, sizeof(ClusterV2_), 1, fp); - } - return clusters.size(); - } - - size_t write(std::vector> const &clusters) { - check_open(); - if (m_mode != "w") - throw std::runtime_error("File not opened in write mode"); - - size_t n_clusters = 0; - for (auto &c : clusters) { - n_clusters += write(c); - } - return n_clusters; - } - - int seek_to_begin() { return fseek(fp, 0, SEEK_SET); } - int seek_to_end() { return fseek(fp, 0, SEEK_END); } - - int32_t frame_number() { - auto pos = ftell(fp); - ClusterHeader header; - fread(&header, sizeof(ClusterHeader), 1, fp); - fseek(fp, pos, SEEK_SET); - return header.frame_number; - } - - void close() { - if (fp) { - fclose(fp); - fp = nullptr; - } - } -}; -} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 22315cc..cc88256 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -18,256 +18,6 @@ template >> class ClusterVector; // Forward declaration -/** - * @brief ClusterVector is a container for clusters of various sizes. It uses a - * contiguous memory buffer to store the clusters. It is templated on the data - * type and the coordinate type of the clusters. - * @note push_back can invalidate pointers to elements in the container - * @warning ClusterVector is currently move only to catch unintended copies, but - * this might change since there are probably use cases where copying is needed. - * @tparam T data type of the pixels in the cluster - * @tparam CoordType data type of the x and y coordinates of the cluster - * (normally int16_t) - */ -#if 0 -template -class ClusterVector> { - - std::byte *m_data{}; - size_t m_size{0}; - size_t m_capacity; - uint64_t m_frame_number{0}; // TODO! Check frame number size and type - /** - Format string used in the python bindings to create a numpy - array from the buffer - = - native byte order - h - short - d - double - i - int - */ - constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:"; - - public: - using value_type = T; - using ClusterType = Cluster; - - /** - * @brief Construct a new ClusterVector object - * @param capacity initial capacity of the buffer in number of clusters - * @param frame_number frame number of the clusters. Default is 0, which is - * also used to indicate that the clusters come from many frames - */ - ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) - : m_capacity(capacity), m_frame_number(frame_number) { - allocate_buffer(m_capacity); - } - - ~ClusterVector() { delete[] m_data; } - - // Move constructor - ClusterVector(ClusterVector &&other) noexcept - : m_data(other.m_data), m_size(other.m_size), - m_capacity(other.m_capacity), m_frame_number(other.m_frame_number) { - other.m_data = nullptr; - other.m_size = 0; - other.m_capacity = 0; - } - - // Move assignment operator - ClusterVector &operator=(ClusterVector &&other) noexcept { - if (this != &other) { - delete[] m_data; - m_data = other.m_data; - m_size = other.m_size; - m_capacity = other.m_capacity; - m_frame_number = other.m_frame_number; - other.m_data = nullptr; - other.m_size = 0; - other.m_capacity = 0; - other.m_frame_number = 0; - } - return *this; - } - - /** - * @brief Reserve space for at least capacity clusters - * @param capacity number of clusters to reserve space for - * @note If capacity is less than the current capacity, the function does - * nothing. - */ - void reserve(size_t capacity) { - if (capacity > m_capacity) { - allocate_buffer(capacity); - } - } - - /** - * @brief Add a cluster to the vector - */ - void push_back(const ClusterType &cluster) { - if (m_size == m_capacity) { - allocate_buffer(m_capacity * 2); - } - std::byte *ptr = element_ptr(m_size); - *reinterpret_cast(ptr) = cluster.x; - ptr += sizeof(CoordType); - *reinterpret_cast(ptr) = cluster.y; - ptr += sizeof(CoordType); - - std::memcpy(ptr, cluster.data, ClusterSizeX * ClusterSizeY * sizeof(T)); - - m_size++; - } - - ClusterVector &operator+=(const ClusterVector &other) { - if (m_size + other.m_size > m_capacity) { - allocate_buffer(m_capacity + other.m_size); - } - std::copy(other.m_data, other.m_data + other.m_size * item_size(), - m_data + m_size * item_size()); - m_size += other.m_size; - return *this; - } - - /** - * @brief Sum the pixels in each cluster - * @return std::vector vector of sums for each cluster - */ - /* - std::vector sum() { - std::vector sums(m_size); - const size_t stride = item_size(); - const size_t n_pixels = ClusterSizeX * ClusterSizeY; - std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y - - for (size_t i = 0; i < m_size; i++) { - sums[i] = - std::accumulate(reinterpret_cast(ptr), - reinterpret_cast(ptr) + n_pixels, T{}); - ptr += stride; - } - return sums; - } - */ - - /** - * @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in - * each cluster - * @return std::vector vector of sums for each cluster - */ //TODO if underlying container is a vector use std::for_each - /* - std::vector sum_2x2() { - std::vector sums_2x2(m_size); - - for (size_t i = 0; i < m_size; i++) { - sums_2x2[i] = at(i).max_sum_2x2; - } - return sums_2x2; - } - */ - - /** - * @brief Return the number of clusters in the vector - */ - size_t size() const { return m_size; } - - uint8_t cluster_size_x() const { return ClusterSizeX; } - - uint8_t cluster_size_y() const { return ClusterSizeY; } - - /** - * @brief Return the capacity of the buffer in number of clusters. This is - * the number of clusters that can be stored in the current buffer without - * reallocation. - */ - size_t capacity() const { return m_capacity; } - - /** - * @brief Return the size in bytes of a single cluster - */ - size_t item_size() const { - return 2 * sizeof(CoordType) + ClusterSizeX * ClusterSizeY * sizeof(T); - } - - /** - * @brief Return the offset in bytes for the i-th cluster - */ - size_t element_offset(size_t i) const { return item_size() * i; } - - /** - * @brief Return a pointer to the i-th cluster - */ - std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } - - /** - * @brief Return a pointer to the i-th cluster - */ - const std::byte *element_ptr(size_t i) const { - return m_data + element_offset(i); - } - - std::byte *data() { return m_data; } - std::byte const *data() const { return m_data; } - - /** - * @brief Return a reference to the i-th cluster casted to type V - * @tparam V type of the cluster - */ - ClusterType &at(size_t i) { - return *reinterpret_cast(element_ptr(i)); - } - - const ClusterType &at(size_t i) const { - return *reinterpret_cast(element_ptr(i)); - } - - template const V &at(size_t i) const { - return *reinterpret_cast(element_ptr(i)); - } - - const std::string_view fmt_base() const { - // TODO! how do we match on coord_t? - return m_fmt_base; - } - - /** - * @brief Return the frame number of the clusters. 0 is used to indicate - * that the clusters come from many frames - */ - uint64_t frame_number() const { return m_frame_number; } - - void set_frame_number(uint64_t frame_number) { - m_frame_number = frame_number; - } - - /** - * @brief Resize the vector to contain new_size clusters. If new_size is - * greater than the current capacity, a new buffer is allocated. If the size - * is smaller no memory is freed, size is just updated. - * @param new_size new size of the vector - * @warning The additional clusters are not initialized - */ - void resize(size_t new_size) { - // TODO! Should we initialize the new clusters? - if (new_size > m_capacity) { - allocate_buffer(new_size); - } - m_size = new_size; - } - - private: - void allocate_buffer(size_t new_capacity) { - size_t num_bytes = item_size() * new_capacity; - std::byte *new_data = new std::byte[num_bytes]{}; - std::copy(m_data, m_data + item_size() * m_size, new_data); - delete[] m_data; - m_data = new_data; - m_capacity = new_capacity; - } -}; -#endif - /** * @brief ClusterVector is a container for clusters of various sizes. It * uses a contiguous memory buffer to store the clusters. It is templated on @@ -285,7 +35,7 @@ template > { std::vector> m_data{}; - uint64_t m_frame_number{0}; // TODO! Check frame number size and type + int32_t m_frame_number{0}; // TODO! Check frame number size and type public: using value_type = T; @@ -319,6 +69,33 @@ class ClusterVector> { return *this; } + /** + * @brief Sum the pixels in each cluster + * @return std::vector vector of sums for each cluster + */ + std::vector sum() { + std::vector sums(m_data.size()); + + for (size_t i = 0; i < m_data.size(); i++) { + sums[i] = at(i).sum(); + } + return sums; + } + + /** + * @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in + * each cluster + * @return std::vector vector of sums for each cluster + */ //TODO if underlying container is a vector use std::for_each + std::vector sum_2x2() { + std::vector sums_2x2(m_data.size()); + + for (size_t i = 0; i < m_data.size(); i++) { + sums_2x2[i] = at(i).max_sum_2x2().first; + } + return sums_2x2; + } + /** * @brief Reserve space for at least capacity clusters * @param capacity number of clusters to reserve space for @@ -361,7 +138,8 @@ class ClusterVector> { * @brief Return the size in bytes of a single cluster */ size_t item_size() const { - return 2 * sizeof(CoordType) + ClusterSizeX * ClusterSizeY * sizeof(T); + return sizeof(ClusterType); // 2 * sizeof(CoordType) + ClusterSizeX * + // ClusterSizeY * sizeof(T); } ClusterType *data() { return m_data.data(); } @@ -379,9 +157,9 @@ class ClusterVector> { * @brief Return the frame number of the clusters. 0 is used to indicate * that the clusters come from many frames */ - uint64_t frame_number() const { return m_frame_number; } + int32_t frame_number() const { return m_frame_number; } - void set_frame_number(uint64_t frame_number) { + void set_frame_number(int32_t frame_number) { m_frame_number = frame_number; } }; diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index a06bcdd..96a4f0f 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -83,6 +83,17 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { self.push_back(cluster); }) + .def("sum", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); + }) + .def("sum_2x2", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum_2x2()); + return return_vector(vec); + }) + // implement push_back .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) diff --git a/src/Cluster.test.cpp b/src/Cluster.test.cpp index e502012..879a5e7 100644 --- a/src/Cluster.test.cpp +++ b/src/Cluster.test.cpp @@ -26,3 +26,9 @@ TEST_CASE("Correct Instantiation of Cluster and ClusterVector", CHECK(not is_cluster_v); CHECK(is_cluster_v>); } + +TEST_CASE("Test sum of Cluster", "[.cluster]") { + Cluster cluster{0, 0, {1, 2, 3, 4}}; + + CHECK(cluster.sum() == 10); +} \ No newline at end of file diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index 1ee54e7..024bed4 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -2,21 +2,29 @@ #include "test_config.hpp" #include "aare/defs.hpp" +#include #include #include using aare::Cluster; using aare::ClusterFile; +using aare::ClusterVector; -TEST_CASE("Read one frame from a a cluster file", "[.files]") { +TEST_CASE("Read one frame from a cluster file", "[.files]") { // We know that the frame has 97 clusters auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile> f(fpath); auto clusters = f.read_frame(); - REQUIRE(clusters.size() == 97); - REQUIRE(clusters.frame_number() == 135); + CHECK(clusters.size() == 97); + CHECK(clusters.frame_number() == 135); + CHECK(clusters.at(0).x == 1); + CHECK(clusters.at(0).y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters.at(0).data), + std::end(clusters.at(0).data), + std::begin(expected_cluster_data))); } TEST_CASE("Read one frame using ROI", "[.files]") { @@ -43,6 +51,13 @@ TEST_CASE("Read one frame using ROI", "[.files]") { REQUIRE(c.y >= roi.ymin); REQUIRE(c.y <= roi.ymax); } + + CHECK(clusters.at(0).x == 1); + CHECK(clusters.at(0).y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters.at(0).data), + std::end(clusters.at(0).data), + std::begin(expected_cluster_data))); } TEST_CASE("Read clusters from single frame file", "[.files]") { @@ -154,6 +169,12 @@ TEST_CASE("Read clusters from single frame file", "[.files]") { auto clusters = f.read_clusters(50); REQUIRE(clusters.size() == 50); REQUIRE(clusters.frame_number() == 135); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + REQUIRE(clusters.at(0).x == 1); + REQUIRE(clusters.at(0).y == 200); + CHECK(std::equal(std::begin(clusters.at(0).data), + std::end(clusters.at(0).data), + std::begin(expected_cluster_data))); } SECTION("Read more clusters than available") { ClusterFile> f(fpath); @@ -161,24 +182,169 @@ TEST_CASE("Read clusters from single frame file", "[.files]") { auto clusters = f.read_clusters(100); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + REQUIRE(clusters.at(0).x == 1); + REQUIRE(clusters.at(0).y == 200); + CHECK(std::equal(std::begin(clusters.at(0).data), + std::end(clusters.at(0).data), + std::begin(expected_cluster_data))); } SECTION("Read all clusters") { ClusterFile> f(fpath); auto clusters = f.read_clusters(97); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); - REQUIRE(clusters.at(0).x == 1); REQUIRE(clusters.at(0).y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters.at(0).data), + std::end(clusters.at(0).data), + std::begin(expected_cluster_data))); } } -TEST_CASE("Read clusters", "[.files]") { - // beam_En700eV_-40deg_300V_10us_d0_f0_100.clust - auto fpath = test_data_path() / "clust" / - "beam_En700eV_-40deg_300V_10us_d0_f0_100.clust"; +TEST_CASE("Read clusters from single frame file with ROI", "[.files]") { + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile> f(fpath); - auto clusters = f.read_clusters(500); + + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 50; + roi.ymin = 200; + roi.ymax = 249; + f.set_roi(roi); + + auto clusters = f.read_clusters(10); + + CHECK(clusters.size() == 10); + CHECK(clusters.frame_number() == 135); + CHECK(clusters.at(0).x == 1); + CHECK(clusters.at(0).y == 200); + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + CHECK(std::equal(std::begin(clusters.at(0).data), + std::end(clusters.at(0).data), + std::begin(expected_cluster_data))); +} + +TEST_CASE("Read cluster from multiple frame file", "[.files]") { + + using ClusterType = Cluster; + + auto fpath = + test_data_path() / "clust" / "Two_frames_2x2double_test_clusters.clust"; + + REQUIRE(std::filesystem::exists(fpath)); + + // Two_frames_2x2double_test_clusters.clust + // frame number, num_clusters 0, 4 + //[10, 20], {0. ,0., 0., 0.} + //[11, 30], {1., 1., 1., 1.} + //[12, 40], {2., 2., 2., 2.} + //[13, 50], {3., 3., 3., 3.} + // 1,4 + //[10, 20], {4., 4., 4., 4.} + //[11, 30], {5., 5., 5., 5.} + //[12, 40], {6., 6., 6., 6.} + //[13, 50], {7., 7., 7., 7.} + + SECTION("Read clusters from both frames") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(2); + REQUIRE(clusters.size() == 2); + REQUIRE(clusters.frame_number() == 0); + + auto clusters1 = f.read_clusters(3); + + REQUIRE(clusters1.size() == 3); + REQUIRE(clusters1.frame_number() == 1); + } + + SECTION("Read all clusters") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(8); + REQUIRE(clusters.size() == 8); + REQUIRE(clusters.frame_number() == 1); + } + + SECTION("Read clusters from one frame") { + ClusterFile f(fpath); + auto clusters = f.read_clusters(2); + REQUIRE(clusters.size() == 2); + REQUIRE(clusters.frame_number() == 0); + + auto clusters1 = f.read_clusters(1); + + REQUIRE(clusters1.size() == 1); + REQUIRE(clusters1.frame_number() == 0); + } +} + +TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") { + + using ClusterType = Cluster; + + REQUIRE(std::filesystem::exists(test_data_path() / "clust")); + + auto fpath = test_data_path() / "clust" / "single_frame_2_clusters.clust"; + + ClusterFile file(fpath, 1000, "w"); + + ClusterVector clustervec(2); + int16_t coordinate = 5; + clustervec.push_back(ClusterType{ + coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}}); + clustervec.push_back(ClusterType{ + coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}}); + + file.write_frame(clustervec); + + file.close(); + + file.open("r"); + + auto read_cluster_vector = file.read_frame(); + + CHECK(read_cluster_vector.size() == 2); + CHECK(read_cluster_vector.frame_number() == 0); + + CHECK(read_cluster_vector.at(0).x == clustervec.at(0).x); + CHECK(read_cluster_vector.at(0).y == clustervec.at(0).y); + CHECK(std::equal(clustervec.at(0).data, clustervec.at(0).data + 9, + read_cluster_vector.at(0).data, [](double a, double b) { + return std::abs(a - b) < + std::numeric_limits::epsilon(); + })); + + CHECK(read_cluster_vector.at(1).x == clustervec.at(1).x); + CHECK(read_cluster_vector.at(1).y == clustervec.at(1).y); + CHECK(std::equal(clustervec.at(1).data, std::end(clustervec.at(1).data), + read_cluster_vector.at(1).data, [](double a, double b) { + return std::abs(a - b) < + std::numeric_limits::epsilon(); + })); +} + +TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") { + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); + + ClusterFile> f(fpath); + + auto clusters = f.read_frame(); + CHECK(clusters.size() == 97); + CHECK(clusters.frame_number() == 135); + + int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + clusters.push_back( + Cluster{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}}); + + CHECK(clusters.size() == 98); + CHECK(clusters.at(0).x == 1); + CHECK(clusters.at(0).y == 200); + + CHECK(std::equal(std::begin(clusters.at(0).data), + std::end(clusters.at(0).data), + std::begin(expected_cluster_data))); } diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 5a5abe0..468a707 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -8,15 +8,14 @@ using aare::Cluster; using aare::ClusterVector; - -TEST_CASE("item_size return the size of the cluster stored"){ +TEST_CASE("item_size return the size of the cluster stored") { using C1 = Cluster; ClusterVector cv(4); CHECK(cv.item_size() == sizeof(C1)); - //Sanity check - //2*2*4 = 16 bytes of data for the cluster - // 2*2 = 4 bytes for the x and y coordinates + // Sanity check + // 2*2*4 = 16 bytes of data for the cluster + // 2*2 = 4 bytes for the x and y coordinates REQUIRE(cv.item_size() == 20); using C2 = Cluster; @@ -30,6 +29,18 @@ TEST_CASE("item_size return the size of the cluster stored"){ using C4 = Cluster; ClusterVector cv4(4); CHECK(cv4.item_size() == sizeof(C4)); + + using C5 = Cluster; + ClusterVector cv5(4); + CHECK(cv5.item_size() == sizeof(C5)); + + using C6 = Cluster; + ClusterVector cv6(4); + CHECK(cv6.item_size() == sizeof(C6)); + + using C7 = Cluster; + ClusterVector cv7(4); + CHECK(cv7.item_size() == sizeof(C7)); } TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read", From 7c93632605794825c328cdc293e38cfbb1b74fae Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Mon, 14 Apr 2025 16:38:25 +0200 Subject: [PATCH 091/120] tests and fix --- include/aare/ClusterFile.hpp | 2 +- include/aare/ClusterVector.hpp | 8 +++ python/CMakeLists.txt | 4 ++ python/aare/ClusterFinder.py | 14 ++++ python/aare/ClusterVector.py | 11 +++ python/aare/__init__.py | 4 ++ python/src/bind_ClusterVector.hpp | 103 +++++++++++++++++++++++++++++ python/src/cluster.hpp | 66 ------------------ python/src/module.cpp | 17 +++-- python/tests/test_Cluster.py | 59 +++++------------ python/tests/test_ClusterFile.py | 64 ++++++++++++++++++ python/tests/test_ClusterVector.py | 54 +++++++++++++++ src/CalculateEta.test.cpp | 70 +++++++++++++++++++- 13 files changed, 359 insertions(+), 117 deletions(-) create mode 100644 python/aare/ClusterFinder.py create mode 100644 python/aare/ClusterVector.py create mode 100644 python/src/bind_ClusterVector.hpp create mode 100644 python/tests/test_ClusterFile.py create mode 100644 python/tests/test_ClusterVector.py diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 45df8a0..6ec2f2d 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -282,7 +282,7 @@ template ClusterVector ClusterFile::read_clusters_with_cut(size_t n_clusters) { ClusterVector clusters; - clusters.resize(n_clusters); + clusters.reserve(n_clusters); // if there are photons left from previous frame read them first if (m_num_left) { diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 22315cc..e85a6f0 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -384,6 +384,14 @@ class ClusterVector> { void set_frame_number(uint64_t frame_number) { m_frame_number = frame_number; } + + std::vector sum() { + std::vector sums(m_data.size()); + for (size_t i = 0; i < m_data.size(); i++) { + sums[i] = m_data[i].sum(); + } + return sums; + } }; } // namespace aare \ No newline at end of file diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 09de736..9f54049 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -28,6 +28,9 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags) set( PYTHON_FILES aare/__init__.py aare/CtbRawFile.py + aare/ClusterFinder.py + aare/ClusterVector.py + aare/func.py aare/RawFile.py aare/transform.py @@ -35,6 +38,7 @@ set( PYTHON_FILES aare/utils.py ) + # Copy the python files to the build directory foreach(FILE ${PYTHON_FILES}) configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) diff --git a/python/aare/ClusterFinder.py b/python/aare/ClusterFinder.py new file mode 100644 index 0000000..a2042a4 --- /dev/null +++ b/python/aare/ClusterFinder.py @@ -0,0 +1,14 @@ + +from ._aare import ClusterFinder_Cluster3x3i +import numpy as np + +def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): + """ + Factory function to create a ClusterFinder object. Provides a cleaner syntax for + the templated ClusterFinder in C++. + """ + if dtype == np.int32 and cluster_size == (3,3): + return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity) + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") diff --git a/python/aare/ClusterVector.py b/python/aare/ClusterVector.py new file mode 100644 index 0000000..b0dd453 --- /dev/null +++ b/python/aare/ClusterVector.py @@ -0,0 +1,11 @@ + + +from ._aare import ClusterVector_Cluster3x3i +import numpy as np + +def ClusterVector(cluster_size, dtype = np.int32): + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterVector_Cluster3x3i() + else: + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 8c51d73..b1eb604 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -11,8 +11,12 @@ from ._aare import ROI # from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i +from .ClusterFinder import ClusterFinder +from .ClusterVector import ClusterVector + from ._aare import fit_gaus, fit_pol1 from ._aare import Interpolator +from ._aare import calculate_eta2 from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp new file mode 100644 index 0000000..f7fa796 --- /dev/null +++ b/python/src/bind_ClusterVector.hpp @@ -0,0 +1,103 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + + +template +void define_ClusterVector(py::module &m, const std::string &typestr) { + using ClusterType = + Cluster; + auto class_name = fmt::format("ClusterVector_{}", typestr); + + py::class_, void>>( + m, class_name.c_str(), + py::buffer_protocol()) + + .def(py::init()) // TODO change!!! + + .def("push_back", + [](ClusterVector &self, const ClusterType &cluster) { + self.push_back(cluster); + }) + + .def("sum", [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); + }) + .def_property_readonly("size", &ClusterVector::size) + .def("item_size", &ClusterVector::item_size) + .def_property_readonly("fmt", + [typestr](ClusterVector &self) { + return fmt_format; + }) + + .def_property_readonly("cluster_size_x", + &ClusterVector::cluster_size_x) + .def_property_readonly("cluster_size_y", + &ClusterVector::cluster_size_y) + .def_property_readonly("capacity", + &ClusterVector::capacity) + .def_property("frame_number", &ClusterVector::frame_number, + &ClusterVector::set_frame_number) + .def_buffer( + [typestr](ClusterVector &self) -> py::buffer_info { + return py::buffer_info( + self.data(), /* Pointer to buffer */ + self.item_size(), /* Size of one scalar */ + fmt_format, /* Format descriptor */ + 1, /* Number of dimensions */ + {self.size()}, /* Buffer dimensions */ + {self.item_size()} /* Strides (in bytes) for each index */ + ); + }); + + // Free functions using ClusterVector + m.def("hitmap", + [](std::array image_size, ClusterVector &cv) { + + // Create a numpy array to hold the hitmap + // The shape of the array is (image_size[0], image_size[1]) + // note that the python array is passed as [row, col] which + // is the opposite of the clusters [x,y] + py::array_t hitmap(image_size); + auto r = hitmap.mutable_unchecked<2>(); + + // Initialize hitmap to 0 + for (py::ssize_t i = 0; i < r.shape(0); i++) + for (py::ssize_t j = 0; j < r.shape(1); j++) + r(i, j) = 0; + + + // Loop over the clusters and increment the hitmap + // Skip out of bound clusters + for (const auto& cluster : cv) { + auto x = cluster.x; + auto y = cluster.y; + if(x -void define_cluster_vector(py::module &m, const std::string &typestr) { - using ClusterType = - Cluster; - auto class_name = fmt::format("ClusterVector_{}", typestr); - py::class_, void>>( - m, class_name.c_str(), - py::buffer_protocol()) - - .def(py::init()) // TODO change!!! - - .def("push_back", - [](ClusterVector &self, const ClusterType &cluster) { - self.push_back(cluster); - }) - - // implement push_back - .def_property_readonly("size", &ClusterVector::size) - .def("item_size", &ClusterVector::item_size) - .def_property_readonly("fmt", - [typestr](ClusterVector &self) { - return fmt_format; - }) - - .def_property_readonly("cluster_size_x", - &ClusterVector::cluster_size_x) - .def_property_readonly("cluster_size_y", - &ClusterVector::cluster_size_y) - .def_property_readonly("capacity", - &ClusterVector::capacity) - .def_property("frame_number", &ClusterVector::frame_number, - &ClusterVector::set_frame_number) - .def_buffer( - [typestr](ClusterVector &self) -> py::buffer_info { - return py::buffer_info( - self.data(), /* Pointer to buffer */ - self.item_size(), /* Size of one scalar */ - fmt_format, /* Format descriptor */ - 1, /* Number of dimensions */ - {self.size()}, /* Buffer dimensions */ - {self.item_size()} /* Strides (in bytes) for each index */ - ); - }); -} template @@ -252,25 +206,5 @@ void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { }, py::arg(), py::arg("frame_number") = 0); - m.def("hitmap", - [](std::array image_size, ClusterVector &cv) { - py::array_t hitmap(image_size); - auto r = hitmap.mutable_unchecked<2>(); - - // Initialize hitmap to 0 - for (py::ssize_t i = 0; i < r.shape(0); i++) - for (py::ssize_t j = 0; j < r.shape(1); j++) - r(i, j) = 0; - - size_t stride = cv.item_size(); - auto ptr = cv.data(); - for (size_t i = 0; i < cv.size(); i++) { - auto x = *reinterpret_cast(ptr); - auto y = *reinterpret_cast(ptr + sizeof(int16_t)); - r(y, x) += 1; - ptr += stride; - } - return hitmap; - }); } #pragma GCC diagnostic pop diff --git a/python/src/module.cpp b/python/src/module.cpp index 8d5b5ab..c2067ed 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -1,4 +1,9 @@ // Files with bindings to the different classes + +//New style file naming +#include "bind_ClusterVector.hpp" + +//TODO! migrate the other names #include "cluster.hpp" #include "cluster_file.hpp" #include "ctb_raw_file.hpp" @@ -39,12 +44,12 @@ PYBIND11_MODULE(_aare, m) { define_cluster_file_io_bindings(m, "Cluster2x2f"); define_cluster_file_io_bindings(m, "Cluster2x2d"); - define_cluster_vector(m, "Cluster3x3i"); - define_cluster_vector(m, "Cluster3x3d"); - define_cluster_vector(m, "Cluster3x3f"); - define_cluster_vector(m, "Cluster2x2i"); - define_cluster_vector(m, "Cluster2x2d"); - define_cluster_vector(m, "Cluster2x2f"); + define_ClusterVector(m, "Cluster3x3i"); + define_ClusterVector(m, "Cluster3x3d"); + define_ClusterVector(m, "Cluster3x3f"); + define_ClusterVector(m, "Cluster2x2i"); + define_ClusterVector(m, "Cluster2x2d"); + define_ClusterVector(m, "Cluster2x2f"); define_cluster_finder_bindings(m, "Cluster3x3i"); define_cluster_finder_bindings(m, "Cluster3x3d"); diff --git a/python/tests/test_Cluster.py b/python/tests/test_Cluster.py index e24bcf8..ddaa6f3 100644 --- a/python/tests/test_Cluster.py +++ b/python/tests/test_Cluster.py @@ -1,12 +1,12 @@ import pytest import numpy as np -import aare._aare as aare +from aare import _aare #import the C++ module from conftest import test_data_path def test_cluster_vector_can_be_converted_to_numpy(): - cv = aare.ClusterVector_Cluster3x3i() + cv = _aare.ClusterVector_Cluster3x3i() arr = np.array(cv, copy=False) assert arr.shape == (0,) # 4 for x, y, size, energy and 9 for the cluster data @@ -14,24 +14,23 @@ def test_cluster_vector_can_be_converted_to_numpy(): def test_ClusterVector(): """Test ClusterVector""" - clustervector = aare.ClusterVector_Cluster3x3i() + clustervector = _aare.ClusterVector_Cluster3x3i() assert clustervector.cluster_size_x == 3 assert clustervector.cluster_size_y == 3 assert clustervector.item_size() == 4+9*4 assert clustervector.frame_number == 0 - assert clustervector.capacity == 1024 assert clustervector.size == 0 - cluster = aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32)) + cluster = _aare.Cluster3x3i(0,0,np.ones(9, dtype=np.int32)) clustervector.push_back(cluster) assert clustervector.size == 1 with pytest.raises(TypeError): # Or use the appropriate exception type - clustervector.push_back(aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32))) + clustervector.push_back(_aare.Cluster2x2i(0,0,np.ones(4, dtype=np.int32))) with pytest.raises(TypeError): - clustervector.push_back(aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32))) + clustervector.push_back(_aare.Cluster3x3f(0,0,np.ones(9, dtype=np.float32))) def test_Interpolator(): """Test Interpolator""" @@ -41,13 +40,13 @@ def test_Interpolator(): ybins = np.linspace(0, 5, 30, dtype=np.float64) etacube = np.zeros(shape=[30, 30, 20], dtype=np.float64) - interpolator = aare.Interpolator(etacube, xbins, ybins, ebins) + interpolator = _aare.Interpolator(etacube, xbins, ybins, ebins) assert interpolator.get_ietax().shape == (30,30,20) assert interpolator.get_ietay().shape == (30,30,20) - clustervector = aare.ClusterVector_Cluster3x3i() + clustervector = _aare.ClusterVector_Cluster3x3i() - cluster = aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) + cluster = _aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32)) clustervector.push_back(cluster) interpolated_photons = interpolator.interpolate(clustervector) @@ -58,9 +57,9 @@ def test_Interpolator(): assert interpolated_photons[0]["y"] == -1 assert interpolated_photons[0]["energy"] == 4 #eta_sum = 4, dx, dy = -1,-1 m_ietax = 0, m_ietay = 0 - clustervector = aare.ClusterVector_Cluster2x2i() + clustervector = _aare.ClusterVector_Cluster2x2i() - cluster = aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32)) + cluster = _aare.Cluster2x2i(0,0, np.ones(4, dtype=np.int32)) clustervector.push_back(cluster) interpolated_photons = interpolator.interpolate(clustervector) @@ -71,28 +70,15 @@ def test_Interpolator(): assert interpolated_photons[0]["y"] == 0 assert interpolated_photons[0]["energy"] == 4 -@pytest.mark.files -def test_cluster_file(test_data_path): - """Test ClusterFile""" - cluster_file = aare.ClusterFile_Cluster3x3i(test_data_path / "clust/single_frame_97_clustrers.clust") - clustervector = cluster_file.read_clusters(10) #conversion does not work - cluster_file.close() - - assert clustervector.size == 10 - - ###reading with wrong file - with pytest.raises(TypeError): - cluster_file = aare.ClusterFile_Cluster2x2i(test_data_path / "clust/single_frame_97_clustrers.clust") - cluster_file.close() def test_calculate_eta(): """Calculate Eta""" - clusters = aare.ClusterVector_Cluster3x3i() - clusters.push_back(aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))) - clusters.push_back(aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3]))) + clusters = _aare.ClusterVector_Cluster3x3i() + clusters.push_back(_aare.Cluster3x3i(0,0, np.ones(9, dtype=np.int32))) + clusters.push_back(_aare.Cluster3x3i(0,0, np.array([1,1,1,2,2,2,3,3,3]))) - eta2 = aare.calculate_eta2(clusters) + eta2 = _aare.calculate_eta2(clusters) assert eta2.shape == (2,2) assert eta2[0,0] == 0.5 @@ -103,7 +89,7 @@ def test_calculate_eta(): def test_cluster_finder(): """Test ClusterFinder""" - clusterfinder = aare.ClusterFinder_Cluster3x3i([100,100]) + clusterfinder = _aare.ClusterFinder_Cluster3x3i([100,100]) #frame = np.random.rand(100,100) frame = np.zeros(shape=[100,100]) @@ -115,18 +101,7 @@ def test_cluster_finder(): assert clusters.size == 0 -#TODO dont understand behavior -def test_cluster_collector(): - """Test ClusterCollector""" - - clusterfinder = aare.ClusterFinderMT_Cluster3x3i([100,100]) #TODO: no idea what the data is in InputQueue not zero - - clustercollector = aare.ClusterCollector_Cluster3x3i(clusterfinder) - - cluster_vectors = clustercollector.steal_clusters() - - assert len(cluster_vectors) == 1 #single thread execution - assert cluster_vectors[0].size == 0 # + diff --git a/python/tests/test_ClusterFile.py b/python/tests/test_ClusterFile.py new file mode 100644 index 0000000..4126a6c --- /dev/null +++ b/python/tests/test_ClusterFile.py @@ -0,0 +1,64 @@ + +import pytest +import numpy as np +import boost_histogram as bh +import time +from pathlib import Path +import pickle + +from aare import ClusterFile +from conftest import test_data_path + +@pytest.mark.files +def test_cluster_file(test_data_path): + """Test ClusterFile""" + f = ClusterFile(test_data_path / "clust/single_frame_97_clustrers.clust") + cv = f.read_clusters(10) #conversion does not work + + + assert cv.frame_number == 135 + assert cv.size == 10 + + #Known data + #frame_number, num_clusters [135] 97 + #[ 1 200] [0 1 2 3 4 5 6 7 8] + #[ 2 201] [ 9 10 11 12 13 14 15 16 17] + #[ 3 202] [18 19 20 21 22 23 24 25 26] + #[ 4 203] [27 28 29 30 31 32 33 34 35] + #[ 5 204] [36 37 38 39 40 41 42 43 44] + #[ 6 205] [45 46 47 48 49 50 51 52 53] + #[ 7 206] [54 55 56 57 58 59 60 61 62] + #[ 8 207] [63 64 65 66 67 68 69 70 71] + #[ 9 208] [72 73 74 75 76 77 78 79 80] + #[ 10 209] [81 82 83 84 85 86 87 88 89] + + #conversion to numpy array + arr = np.array(cv, copy = False) + + assert arr.size == 10 + for i in range(10): + assert arr[i]['x'] == i+1 + +@pytest.mark.files +def test_read_clusters_and_fill_histogram(test_data_path): + # Create the histogram + n_bins = 100 + xmin = -100 + xmax = 1e4 + hist_aare = bh.Histogram(bh.axis.Regular(n_bins, xmin, xmax)) + + fname = test_data_path / "clust/beam_En700eV_-40deg_300V_10us_d0_f0_100.clust" + + #Read clusters and fill the histogram with pixel values + with ClusterFile(fname, chunk_size = 10000) as f: + for clusters in f: + arr = np.array(clusters, copy = False) + hist_aare.fill(arr['data'].flat) + + + #Load the histogram from the pickle file + with open(fname.with_suffix('.pkl'), 'rb') as f: + hist_py = pickle.load(f) + + #Compare the two histograms + assert hist_aare == hist_py \ No newline at end of file diff --git a/python/tests/test_ClusterVector.py b/python/tests/test_ClusterVector.py new file mode 100644 index 0000000..b64aeef --- /dev/null +++ b/python/tests/test_ClusterVector.py @@ -0,0 +1,54 @@ +import pytest +import numpy as np +import boost_histogram as bh +import time +from pathlib import Path +import pickle + +from aare import ClusterFile +from aare import _aare +from conftest import test_data_path + + +def test_create_cluster_vector(): + cv = _aare.ClusterVector_Cluster3x3i() + assert cv.cluster_size_x == 3 + assert cv.cluster_size_y == 3 + assert cv.size == 0 + + +def test_push_back_on_cluster_vector(): + cv = _aare.ClusterVector_Cluster2x2i() + assert cv.cluster_size_x == 2 + assert cv.cluster_size_y == 2 + assert cv.size == 0 + + cluster = _aare.Cluster2x2i(19, 22, np.ones(4, dtype=np.int32)) + cv.push_back(cluster) + assert cv.size == 1 + + arr = np.array(cv, copy=False) + assert arr[0]['x'] == 19 + assert arr[0]['y'] == 22 + + +def test_make_a_hitmap_from_cluster_vector(): + cv = _aare.ClusterVector_Cluster3x3i() + + # Push back 4 clusters with different positions + cv.push_back(_aare.Cluster3x3i(0, 0, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(1, 1, np.ones(9, dtype=np.int32))) + cv.push_back(_aare.Cluster3x3i(2, 2, np.ones(9, dtype=np.int32))) + + ref = np.zeros((5, 5), dtype=np.int32) + ref[0,0] = 1 + ref[1,1] = 2 + ref[2,2] = 1 + + + img = _aare.hitmap((5,5), cv) + # print(img) + # print(ref) + assert (img == ref).all() + \ No newline at end of file diff --git a/src/CalculateEta.test.cpp b/src/CalculateEta.test.cpp index 2bdf387..29d7ed3 100644 --- a/src/CalculateEta.test.cpp +++ b/src/CalculateEta.test.cpp @@ -37,7 +37,7 @@ auto get_test_parameters() { Eta2{3. / 5, 4. / 6, 1, 11})); } -TEST_CASE("compute_largest_2x2_subcluster", "[.eta_calculation]") { +TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") { auto [cluster, expected_eta] = get_test_parameters(); auto [sum, index] = std::visit( @@ -47,7 +47,7 @@ TEST_CASE("compute_largest_2x2_subcluster", "[.eta_calculation]") { CHECK(expected_eta.sum == sum); } -TEST_CASE("calculate_eta2", "[.eta_calculation]") { +TEST_CASE("calculate_eta2", "[eta_calculation]") { auto [cluster, expected_eta] = get_test_parameters(); @@ -60,3 +60,69 @@ TEST_CASE("calculate_eta2", "[.eta_calculation]") { CHECK(eta.c == expected_eta.c); CHECK(eta.sum == expected_eta.sum); } + + +//3x3 cluster layout (in case of cBottomLeft etc corner): +// 6, 7, 8 +// 3, 4, 5 +// 0, 1, 2 + + +TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in the bottom left", + "[eta_calculation]") { + + // Create a 3x3 cluster + Cluster cl; + cl.x = 0; + cl.y = 0; + cl.data[0] = 30; + cl.data[1] = 23; + cl.data[2] = 5; + cl.data[3] = 20; + cl.data[4] = 50; + cl.data[5] = 3; + cl.data[6] = 8; + cl.data[7] = 2; + cl.data[8] = 3; + + // 8, 2, 3 + // 20, 50, 3 + // 30, 23, 5 + + auto eta = calculate_eta2(cl); + CHECK(eta.c == corner::cBottomLeft); + CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4) + CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4) + CHECK(eta.sum == 30+23+20+50); + +} + +TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in the top left", + "[eta_calculation]") { + +// Create a 3x3 cluster +Cluster cl; +cl.x = 0; +cl.y = 0; +cl.data[0] = 8; +cl.data[1] = 12; +cl.data[2] = 5; +cl.data[3] = 77; +cl.data[4] = 80; +cl.data[5] = 3; +cl.data[6] = 82; +cl.data[7] = 91; +cl.data[8] = 3; + +// 82, 91, 3 +// 77, 80, 3 +// 8, 12, 5 + +auto eta = calculate_eta2(cl); +CHECK(eta.c == corner::cTopLeft); +CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4) +CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4) +CHECK(eta.sum == 77+80+82+91); + +} + From 5f34ab6df143d3420c0e934c159b11a53b336a48 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 15 Apr 2025 08:05:05 +0200 Subject: [PATCH 092/120] minor comment --- src/CalculateEta.test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CalculateEta.test.cpp b/src/CalculateEta.test.cpp index 29d7ed3..59a616e 100644 --- a/src/CalculateEta.test.cpp +++ b/src/CalculateEta.test.cpp @@ -62,7 +62,7 @@ TEST_CASE("calculate_eta2", "[eta_calculation]") { } -//3x3 cluster layout (in case of cBottomLeft etc corner): +//3x3 cluster layout (rotated to match the cBottomLeft enum): // 6, 7, 8 // 3, 4, 5 // 0, 1, 2 From a90e532b21b56a810eaff3958084f4fbe548846f Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 15 Apr 2025 08:08:59 +0200 Subject: [PATCH 093/120] removed extra sum after merge --- include/aare/ClusterVector.hpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 3084c96..cc88256 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -162,14 +162,6 @@ class ClusterVector> { void set_frame_number(int32_t frame_number) { m_frame_number = frame_number; } - - std::vector sum() { - std::vector sums(m_data.size()); - for (size_t i = 0; i < m_data.size(); i++) { - sums[i] = m_data[i].sum(); - } - return sums; - } }; } // namespace aare \ No newline at end of file From 1174f7f43472d19fd6d473678265ac688cf5d203 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Tue, 15 Apr 2025 13:14:07 +0200 Subject: [PATCH 094/120] fixed calculate eta --- include/aare/CalculateEta.hpp | 77 +++++++++++++++++++++++++++------- include/aare/ClusterVector.hpp | 2 +- src/CalculateEta.test.cpp | 8 ++-- 3 files changed, 67 insertions(+), 20 deletions(-) diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 2797233..1566de5 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -64,31 +64,78 @@ calculate_eta2(const Cluster &cl) { eta.sum = max_sum.first; auto c = max_sum.second; + size_t cluster_center_index = + (ClusterSizeX / 2) + (ClusterSizeY / 2) * ClusterSizeX; + size_t index_bottom_left_max_2x2_subcluster = (int(c / (ClusterSizeX - 1))) * ClusterSizeX + c % (ClusterSizeX - 1); - if ((cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + 1]) != 0) - eta.x = static_cast( - cl.data[index_bottom_left_max_2x2_subcluster + 1]) / - static_cast( - (cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + 1])); + // check that cluster center is in max subcluster + if (cluster_center_index != index_bottom_left_max_2x2_subcluster && + cluster_center_index != index_bottom_left_max_2x2_subcluster + 1 && + cluster_center_index != + index_bottom_left_max_2x2_subcluster + ClusterSizeX && + cluster_center_index != + index_bottom_left_max_2x2_subcluster + ClusterSizeX + 1) + throw std::runtime_error("Photon center is not in max 2x2_subcluster"); - if ((cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) != 0) - eta.y = - static_cast( - cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX]) / - static_cast( - (cl.data[index_bottom_left_max_2x2_subcluster] + - cl.data[index_bottom_left_max_2x2_subcluster + ClusterSizeX])); + if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) % + ClusterSizeX == + 0) { + if ((cl.data[cluster_center_index + 1] + + cl.data[cluster_center_index]) != 0) + + eta.x = static_cast(cl.data[cluster_center_index + 1]) / + static_cast((cl.data[cluster_center_index + 1] + + cl.data[cluster_center_index])); + } else { + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index - 1]) != 0) + + eta.x = static_cast(cl.data[cluster_center_index]) / + static_cast((cl.data[cluster_center_index - 1] + + cl.data[cluster_center_index])); + } + if ((cluster_center_index - index_bottom_left_max_2x2_subcluster) / + ClusterSizeX < + 1) { + assert(cluster_center_index + ClusterSizeX < + ClusterSizeX * ClusterSizeY); // suppress warning + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index + ClusterSizeX]) != 0) + eta.y = static_cast( + cl.data[cluster_center_index + ClusterSizeX]) / + static_cast( + (cl.data[cluster_center_index] + + cl.data[cluster_center_index + ClusterSizeX])); + } else { + if ((cl.data[cluster_center_index] + + cl.data[cluster_center_index - ClusterSizeX]) != 0) + eta.y = static_cast(cl.data[cluster_center_index]) / + static_cast( + (cl.data[cluster_center_index] + + cl.data[cluster_center_index - ClusterSizeX])); + } eta.c = c; // TODO only supported for 2x2 and 3x3 clusters -> at least no // underyling enum class return eta; } +// Dont get why this is correct - photon center should be top right corner +template +Eta2 calculate_eta2(const Cluster &cl) { + Eta2 eta{}; + + if ((cl.data[0] + cl.data[1]) != 0) + eta.x = static_cast(cl.data[1]) / (cl.data[0] + cl.data[1]); + if ((cl.data[0] + cl.data[2]) != 0) + eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); + eta.sum = cl.sum(); + eta.c = cBottomLeft; // TODO! This is not correct, but need to put something + return eta; +} + // calculates Eta3 for 3x3 cluster based on code from analyze_cluster // TODO only supported for 3x3 Clusters template Eta2 calculate_eta3(const Cluster &cl) { diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index cc88256..e91cb6d 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -47,7 +47,7 @@ class ClusterVector> { * @param frame_number frame number of the clusters. Default is 0, which is * also used to indicate that the clusters come from many frames */ - ClusterVector(size_t capacity = 300, uint64_t frame_number = 0) + ClusterVector(size_t capacity = 1024, uint64_t frame_number = 0) : m_frame_number(frame_number) { m_data.reserve(capacity); } diff --git a/src/CalculateEta.test.cpp b/src/CalculateEta.test.cpp index 59a616e..cdec79b 100644 --- a/src/CalculateEta.test.cpp +++ b/src/CalculateEta.test.cpp @@ -26,15 +26,15 @@ auto get_test_parameters() { ClusterTypes{Cluster{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}}, Eta2{6. / 11, 2. / 7, corner::cTopRight, 20}), std::make_tuple(ClusterTypes{Cluster{ - 0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 8, 8, 9, 2, + 0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8, 1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}}, - Eta2{9. / 17, 5. / 13, 8, 28}), + Eta2{8. / 17, 7. / 15, 9, 30}), std::make_tuple( ClusterTypes{Cluster{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}}, - Eta2{7. / 11, 6. / 10, 1, 21}), + Eta2{4. / 10, 4. / 11, 1, 21}), std::make_tuple( ClusterTypes{Cluster{0, 0, {1, 3, 2, 3, 4, 2}}}, - Eta2{3. / 5, 4. / 6, 1, 11})); + Eta2{3. / 5, 2. / 5, 1, 11})); } TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") { From fca9d5d2fa328e8952d27f24e16f73120b5bccf7 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Tue, 15 Apr 2025 14:40:09 +0200 Subject: [PATCH 095/120] replaced extract template parameters --- include/aare/Cluster.hpp | 30 ++++++++++++++---------------- include/aare/ClusterFile.hpp | 8 ++------ include/aare/ClusterFinder.hpp | 8 +++----- include/aare/ClusterFinderMT.hpp | 2 +- 4 files changed, 20 insertions(+), 28 deletions(-) diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index a2c9b55..7eb1a13 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -32,6 +32,11 @@ struct Cluster { CoordType y; T data[ClusterSizeX * ClusterSizeY]; + static constexpr uint8_t cluster_size_x = ClusterSizeX; + static constexpr uint8_t cluster_size_y = ClusterSizeY; + using value_type = T; + using coord_type = CoordType; + T sum() const { return std::accumulate(data, data + ClusterSizeX * ClusterSizeY, 0); } @@ -64,6 +69,11 @@ template struct Cluster { int16_t y; T data[4]; + static constexpr uint8_t cluster_size_x = 2; + static constexpr uint8_t cluster_size_y = 2; + using value_type = T; + using coord_type = int16_t; + T sum() const { return std::accumulate(data, data + 4, 0); } std::pair max_sum_2x2() const { @@ -77,6 +87,10 @@ template struct Cluster { int16_t x; int16_t y; T data[9]; + static constexpr uint8_t cluster_size_x = 3; + static constexpr uint8_t cluster_size_y = 3; + using value_type = T; + using coord_type = int16_t; T sum() const { return std::accumulate(data, data + 9, 0); } @@ -102,20 +116,4 @@ struct is_cluster> : std::true_type {}; // Cluster template constexpr bool is_cluster_v = is_cluster::value; -template >> -struct extract_template_arguments; // Forward declaration - -// helper struct to extract template argument -template -struct extract_template_arguments< - Cluster> { - - using value_type = T; - static constexpr int cluster_size_x = ClusterSizeX; - static constexpr int cluster_size_y = ClusterSizeY; - using coordtype = CoordType; -}; - } // namespace aare diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index 06de985..b063008 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -465,13 +465,9 @@ bool ClusterFile::is_selected(ClusterType &cl) { } } - auto cluster_size_x = extract_template_arguments< - std::remove_reference_t>::cluster_size_x; - auto cluster_size_y = extract_template_arguments< - std::remove_reference_t>::cluster_size_y; - size_t cluster_center_index = - (cluster_size_x / 2) + (cluster_size_y / 2) * cluster_size_x; + (ClusterType::cluster_size_x / 2) + + (ClusterType::cluster_size_y / 2) * ClusterType::cluster_size_x; if (m_noise_map) { auto sum_1x1 = cl.data[cluster_center_index]; // central pixel diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 8c3540a..b3538eb 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -20,11 +20,9 @@ class ClusterFinder { Pedestal m_pedestal; ClusterVector m_clusters; - static const uint8_t ClusterSizeX = - extract_template_arguments::cluster_size_x; - static const uint8_t ClusterSizeY = - extract_template_arguments::cluster_size_x; - using CT = typename extract_template_arguments::value_type; + static const uint8_t ClusterSizeX = ClusterType::cluster_size_x; + static const uint8_t ClusterSizeY = ClusterType::cluster_size_y; + using CT = typename ClusterType::value_type; public: /** diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index 75b6497..29fc715 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -34,7 +34,7 @@ template , typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> class ClusterFinderMT { - using CT = typename extract_template_arguments::value_type; + using CT = typename ClusterType::value_type; size_t m_current_thread{0}; size_t m_n_threads{0}; using Finder = ClusterFinder; From d4050ec557a870b17f9e2ef0bcde912f321d9d43 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Tue, 15 Apr 2025 14:57:25 +0200 Subject: [PATCH 096/120] enum is now enum class --- include/aare/CalculateEta.hpp | 11 ++--- include/aare/Interpolator.hpp | 10 ++--- src/CalculateEta.test.cpp | 75 +++++++++++++++++------------------ 3 files changed, 48 insertions(+), 48 deletions(-) diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 1566de5..289e8bc 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -6,14 +6,14 @@ namespace aare { -typedef enum { +enum class corner : int { cBottomLeft = 0, cBottomRight = 1, cTopLeft = 2, cTopRight = 3 -} corner; +}; -typedef enum { +enum class pixel : int { pBottomLeft = 0, pBottom = 1, pBottomRight = 2, @@ -23,7 +23,7 @@ typedef enum { pTopLeft = 6, pTop = 7, pTopRight = 8 -} pixel; +}; template struct Eta2 { double x; @@ -132,7 +132,8 @@ Eta2 calculate_eta2(const Cluster &cl) { if ((cl.data[0] + cl.data[2]) != 0) eta.y = static_cast(cl.data[2]) / (cl.data[0] + cl.data[2]); eta.sum = cl.sum(); - eta.c = cBottomLeft; // TODO! This is not correct, but need to put something + eta.c = static_cast(corner::cBottomLeft); // TODO! This is not correct, + // but need to put something return eta; } diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp index 7e3a1c1..85ccf29 100644 --- a/include/aare/Interpolator.hpp +++ b/include/aare/Interpolator.hpp @@ -70,20 +70,20 @@ Interpolator::interpolate(const ClusterVector &clusters) { // cBottomRight = 1, // cTopLeft = 2, // cTopRight = 3 - switch (eta.c) { - case cTopLeft: + switch (static_cast(eta.c)) { + case corner::cTopLeft: dX = -1.; dY = 0; break; - case cTopRight:; + case corner::cTopRight:; dX = 0; dY = 0; break; - case cBottomLeft: + case corner::cBottomLeft: dX = -1.; dY = -1.; break; - case cBottomRight: + case corner::cBottomRight: dX = 0.; dY = -1.; break; diff --git a/src/CalculateEta.test.cpp b/src/CalculateEta.test.cpp index cdec79b..820ab44 100644 --- a/src/CalculateEta.test.cpp +++ b/src/CalculateEta.test.cpp @@ -21,10 +21,12 @@ using ClusterTypes = auto get_test_parameters() { return GENERATE( std::make_tuple(ClusterTypes{Cluster{0, 0, {1, 2, 3, 1}}}, - Eta2{2. / 3, 3. / 4, corner::cBottomLeft, 7}), + Eta2{2. / 3, 3. / 4, + static_cast(corner::cBottomLeft), 7}), std::make_tuple( ClusterTypes{Cluster{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}}, - Eta2{6. / 11, 2. / 7, corner::cTopRight, 20}), + Eta2{6. / 11, 2. / 7, static_cast(corner::cTopRight), + 20}), std::make_tuple(ClusterTypes{Cluster{ 0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8, 1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}}, @@ -61,14 +63,13 @@ TEST_CASE("calculate_eta2", "[eta_calculation]") { CHECK(eta.sum == expected_eta.sum); } +// 3x3 cluster layout (rotated to match the cBottomLeft enum): +// 6, 7, 8 +// 3, 4, 5 +// 0, 1, 2 -//3x3 cluster layout (rotated to match the cBottomLeft enum): -// 6, 7, 8 -// 3, 4, 5 -// 0, 1, 2 - - -TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in the bottom left", +TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in " + "the bottom left", "[eta_calculation]") { // Create a 3x3 cluster @@ -84,45 +85,43 @@ TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in th cl.data[6] = 8; cl.data[7] = 2; cl.data[8] = 3; - + // 8, 2, 3 // 20, 50, 3 // 30, 23, 5 auto eta = calculate_eta2(cl); - CHECK(eta.c == corner::cBottomLeft); + CHECK(eta.c == static_cast(corner::cBottomLeft)); CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4) CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4) - CHECK(eta.sum == 30+23+20+50); - + CHECK(eta.sum == 30 + 23 + 20 + 50); } -TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in the top left", - "[eta_calculation]") { +TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in " + "the top left", + "[eta_calculation]") { -// Create a 3x3 cluster -Cluster cl; -cl.x = 0; -cl.y = 0; -cl.data[0] = 8; -cl.data[1] = 12; -cl.data[2] = 5; -cl.data[3] = 77; -cl.data[4] = 80; -cl.data[5] = 3; -cl.data[6] = 82; -cl.data[7] = 91; -cl.data[8] = 3; + // Create a 3x3 cluster + Cluster cl; + cl.x = 0; + cl.y = 0; + cl.data[0] = 8; + cl.data[1] = 12; + cl.data[2] = 5; + cl.data[3] = 77; + cl.data[4] = 80; + cl.data[5] = 3; + cl.data[6] = 82; + cl.data[7] = 91; + cl.data[8] = 3; -// 82, 91, 3 -// 77, 80, 3 -// 8, 12, 5 - -auto eta = calculate_eta2(cl); -CHECK(eta.c == corner::cTopLeft); -CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4) -CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4) -CHECK(eta.sum == 77+80+82+91); + // 82, 91, 3 + // 77, 80, 3 + // 8, 12, 5 + auto eta = calculate_eta2(cl); + CHECK(eta.c == static_cast(corner::cTopLeft)); + CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4) + CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4) + CHECK(eta.sum == 77 + 80 + 82 + 91); } - From acd9d5d4876a5b43c2f62cbf41eed71666f74d2d Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Tue, 15 Apr 2025 15:15:34 +0200 Subject: [PATCH 097/120] moved parts of ClusterFile implementation into declaration --- include/aare/ClusterFile.hpp | 301 +++++++++++++++-------------------- 1 file changed, 129 insertions(+), 172 deletions(-) diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index b063008..ab6488a 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -60,26 +60,81 @@ class ClusterFile { * @throws std::runtime_error if the file could not be opened */ ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, - const std::string &mode = "r"); + const std::string &mode = "r") - ~ClusterFile(); + : m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) { + + if (mode == "r") { + fp = fopen(m_filename.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + m_filename); + } + } else if (mode == "w") { + fp = fopen(m_filename.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + m_filename); + } + } else if (mode == "a") { + fp = fopen(m_filename.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + m_filename); + } + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } + } + + ~ClusterFile() { close(); } /** - * @brief Read n_clusters clusters from the file discarding frame numbers. - * If EOF is reached the returned vector will have less than n_clusters - * clusters + * @brief Read n_clusters clusters from the file discarding + * frame numbers. If EOF is reached the returned vector will + * have less than n_clusters clusters */ - ClusterVector read_clusters(size_t n_clusters); + ClusterVector read_clusters(size_t n_clusters) { + if (m_mode != "r") { + throw std::runtime_error("File not opened for reading"); + } + if (m_noise_map || m_roi) { + return read_clusters_with_cut(n_clusters); + } else { + return read_clusters_without_cut(n_clusters); + } + } /** - * @brief Read a single frame from the file and return the clusters. The - * cluster vector will have the frame number set. - * @throws std::runtime_error if the file is not opened for reading or the - * file pointer not at the beginning of a frame + * @brief Read a single frame from the file and return the + * clusters. The cluster vector will have the frame number + * set. + * @throws std::runtime_error if the file is not opened for + * reading or the file pointer not at the beginning of a + * frame */ - ClusterVector read_frame(); + ClusterVector read_frame() { + if (m_mode != "r") { + throw std::runtime_error(LOCATION + "File not opened for reading"); + } + if (m_noise_map || m_roi) { + return read_frame_with_cut(); + } else { + return read_frame_without_cut(); + } + } - void write_frame(const ClusterVector &clusters); + void write_frame(const ClusterVector &clusters) { + if (m_mode != "w" && m_mode != "a") { + throw std::runtime_error("File not opened for writing"); + } + + int32_t frame_number = clusters.frame_number(); + fwrite(&frame_number, sizeof(frame_number), 1, fp); + uint32_t n_clusters = clusters.size(); + fwrite(&n_clusters, sizeof(n_clusters), 1, fp); + fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); + } /** * @brief Return the chunk size @@ -87,39 +142,80 @@ class ClusterFile { size_t chunk_size() const { return m_chunk_size; } /** - * @brief Set the region of interest to use when reading clusters. If set - * only clusters within the ROI will be read. + * @brief Set the region of interest to use when reading + * clusters. If set only clusters within the ROI will be + * read. */ - void set_roi(ROI roi); + void set_roi(ROI roi) { m_roi = roi; } /** - * @brief Set the noise map to use when reading clusters. If set clusters - * below the noise level will be discarded. Selection criteria one of: - * Central pixel above noise, highest 2x2 sum above 2 * noise, total sum - * above 3 * noise. + * @brief Set the noise map to use when reading clusters. If + * set clusters below the noise level will be discarded. + * Selection criteria one of: Central pixel above noise, + * highest 2x2 sum above 2 * noise, total sum above 3 * + * noise. */ - void set_noise_map(const NDView noise_map); + void set_noise_map(const NDView noise_map) { + m_noise_map = NDArray(noise_map); + } /** - * @brief Set the gain map to use when reading clusters. If set the gain map - * will be applied to the clusters that pass ROI and noise_map selection. + * @brief Set the gain map to use when reading clusters. If + * set the gain map will be applied to the clusters that + * pass ROI and noise_map selection. */ - void set_gain_map(const NDView gain_map); + void set_gain_map(const NDView gain_map) { + m_gain_map = GainMap(gain_map); + } - void set_gain_map(const GainMap &gain_map); + void set_gain_map(const GainMap &gain_map) { m_gain_map = gain_map; } - void set_gain_map(const GainMap &&gain_map); + void set_gain_map(const GainMap &&gain_map) { m_gain_map = gain_map; } /** - * @brief Close the file. If not closed the file will be closed in the - * destructor + * @brief Close the file. If not closed the file will be + * closed in the destructor */ - void close(); + void close() { + if (fp) { + fclose(fp); + fp = nullptr; + } + } /** @brief Open the file in specific mode * */ - void open(const std::string &mode); + void open(const std::string &mode) { + if (fp) { + close(); + } + + if (mode == "r") { + fp = fopen(m_filename.c_str(), "rb"); + if (!fp) { + throw std::runtime_error("Could not open file for reading: " + + m_filename); + } + m_mode = "r"; + } else if (mode == "w") { + fp = fopen(m_filename.c_str(), "wb"); + if (!fp) { + throw std::runtime_error("Could not open file for writing: " + + m_filename); + } + m_mode = "w"; + } else if (mode == "a") { + fp = fopen(m_filename.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + m_filename); + } + m_mode = "a"; + } else { + throw std::runtime_error("Unsupported mode: " + mode); + } + } private: ClusterVector read_clusters_with_cut(size_t n_clusters); @@ -130,133 +226,6 @@ class ClusterFile { ClusterType read_one_cluster(); }; -template -ClusterFile::ClusterFile( - const std::filesystem::path &fname, size_t chunk_size, - const std::string &mode) - : m_filename(fname.string()), m_chunk_size(chunk_size), m_mode(mode) { - - if (mode == "r") { - fp = fopen(m_filename.c_str(), "rb"); - if (!fp) { - throw std::runtime_error("Could not open file for reading: " + - m_filename); - } - } else if (mode == "w") { - fp = fopen(m_filename.c_str(), "wb"); - if (!fp) { - throw std::runtime_error("Could not open file for writing: " + - m_filename); - } - } else if (mode == "a") { - fp = fopen(m_filename.c_str(), "ab"); - if (!fp) { - throw std::runtime_error("Could not open file for appending: " + - m_filename); - } - } else { - throw std::runtime_error("Unsupported mode: " + mode); - } -} - -template -ClusterFile::~ClusterFile() { - close(); -} - -template -void ClusterFile::close() { - if (fp) { - fclose(fp); - fp = nullptr; - } -} - -template -void ClusterFile::open(const std::string &mode) { - if (fp) { - close(); - } - - if (mode == "r") { - fp = fopen(m_filename.c_str(), "rb"); - if (!fp) { - throw std::runtime_error("Could not open file for reading: " + - m_filename); - } - m_mode = "r"; - } else if (mode == "w") { - fp = fopen(m_filename.c_str(), "wb"); - if (!fp) { - throw std::runtime_error("Could not open file for writing: " + - m_filename); - } - m_mode = "w"; - } else if (mode == "a") { - fp = fopen(m_filename.c_str(), "ab"); - if (!fp) { - throw std::runtime_error("Could not open file for appending: " + - m_filename); - } - m_mode = "a"; - } else { - throw std::runtime_error("Unsupported mode: " + mode); - } -} - -template -void ClusterFile::set_roi(ROI roi) { - m_roi = roi; -} -template -void ClusterFile::set_noise_map( - const NDView noise_map) { - m_noise_map = NDArray(noise_map); -} -template -void ClusterFile::set_gain_map( - const NDView gain_map) { - m_gain_map = GainMap(gain_map); -} - -template -void ClusterFile::set_gain_map(const GainMap &gain_map) { - m_gain_map = gain_map; -} - -template -void ClusterFile::set_gain_map(const GainMap &&gain_map) { - m_gain_map = gain_map; -} - -// TODO generally supported for all clsuter types -template -void ClusterFile::write_frame( - const ClusterVector &clusters) { - if (m_mode != "w" && m_mode != "a") { - throw std::runtime_error("File not opened for writing"); - } - - int32_t frame_number = clusters.frame_number(); - fwrite(&frame_number, sizeof(frame_number), 1, fp); - uint32_t n_clusters = clusters.size(); - fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); -} - -template -ClusterVector -ClusterFile::read_clusters(size_t n_clusters) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - if (m_noise_map || m_roi) { - return read_clusters_with_cut(n_clusters); - } else { - return read_clusters_without_cut(n_clusters); - } -} - template ClusterVector ClusterFile::read_clusters_without_cut(size_t n_clusters) { @@ -276,8 +245,8 @@ ClusterFile::read_clusters_without_cut(size_t n_clusters) { // if there are photons left from previous frame read them first if (nph) { if (nph > n_clusters) { - // if we have more photons left in the frame then photons to read we - // read directly the requested number + // if we have more photons left in the frame then photons to + // read we read directly the requested number nn = n_clusters; } else { nn = nph; @@ -343,8 +312,8 @@ ClusterFile::read_clusters_with_cut(size_t n_clusters) { while (fread(&frame_number, sizeof(frame_number), 1, fp)) { if (fread(&m_num_left, sizeof(m_num_left), 1, fp)) { clusters.set_frame_number( - frame_number); // cluster vector will hold the last frame - // number + frame_number); // cluster vector will hold the last + // frame number while (m_num_left && clusters.size() < n_clusters) { ClusterType c = read_one_cluster(); if (is_selected(c)) { @@ -375,18 +344,6 @@ ClusterType ClusterFile::read_one_cluster() { return c; } -template -ClusterVector ClusterFile::read_frame() { - if (m_mode != "r") { - throw std::runtime_error(LOCATION + "File not opened for reading"); - } - if (m_noise_map || m_roi) { - return read_frame_with_cut(); - } else { - return read_frame_without_cut(); - } -} - template ClusterVector ClusterFile::read_frame_without_cut() { From 14211047ffdf4614aa75730a8f2e56eeb762920a Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Wed, 16 Apr 2025 14:22:44 +0200 Subject: [PATCH 098/120] added function warpper around ClusterFinderMT and ClusterCollector to construct object --- python/aare/ClusterFinder.py | 38 +++++++++++++++++++++++++++++++++++- python/aare/__init__.py | 3 ++- 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/python/aare/ClusterFinder.py b/python/aare/ClusterFinder.py index a2042a4..f678dd1 100644 --- a/python/aare/ClusterFinder.py +++ b/python/aare/ClusterFinder.py @@ -1,5 +1,5 @@ -from ._aare import ClusterFinder_Cluster3x3i +from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i import numpy as np def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): @@ -9,6 +9,42 @@ def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacit """ if dtype == np.int32 and cluster_size == (3,3): return ClusterFinder_Cluster3x3i(image_size, n_sigma = n_sigma, capacity=capacity) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterFinder_Cluster2x2i(image_size, n_sigma = n_sigma, capacity=capacity) else: #TODO! add the other formats raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + + +def ClusterFinderMT(image_size, cluster_size = (3,3), dtype=np.int32, n_sigma=5, capacity = 1024, n_threads = 3): + """ + Factory function to create a ClusterFinderMT object. Provides a cleaner syntax for + the templated ClusterFinderMT in C++. + """ + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterFinderMT_Cluster3x3i(image_size, n_sigma = n_sigma, + capacity = capacity, n_threads = n_threads) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterFinderMT_Cluster2x2i(image_size, n_sigma = n_sigma, + capacity = capacity, n_threads = n_threads) + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + + +def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32): + """ + Factory function to create a ClusterCollector object. Provides a cleaner syntax for + the templated ClusterCollector in C++. + """ + + if dtype == np.int32 and cluster_size == (3,3): + return ClusterCollector_Cluster3x3i(clusterfindermt) + elif dtype == np.int32 and cluster_size == (2,2): + return ClusterCollector_Cluster2x2i(clusterfindermt) + + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") + diff --git a/python/aare/__init__.py b/python/aare/__init__.py index b1eb604..096dddf 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -11,9 +11,10 @@ from ._aare import ROI # from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i -from .ClusterFinder import ClusterFinder +from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT from .ClusterVector import ClusterVector + from ._aare import fit_gaus, fit_pol1 from ._aare import Interpolator from ._aare import calculate_eta2 From c49a2fdf8e0fc8a056e8ee1acaa1808101f48866 Mon Sep 17 00:00:00 2001 From: Mazzoleni Alice Francesca Date: Wed, 16 Apr 2025 16:40:42 +0200 Subject: [PATCH 099/120] removed cluster_2x2 and cluster3x3 specializations --- include/aare/Cluster.hpp | 111 +++++++++++------------------- include/aare/ClusterFinder.hpp | 15 ++-- include/aare/GainMap.hpp | 5 +- python/src/bind_ClusterVector.hpp | 65 +++++++++-------- python/src/cluster.hpp | 15 ++-- src/Cluster.test.cpp | 13 ---- src/ClusterFile.test.cpp | 20 +++--- 7 files changed, 93 insertions(+), 151 deletions(-) diff --git a/include/aare/Cluster.hpp b/include/aare/Cluster.hpp index 7eb1a13..889593b 100644 --- a/include/aare/Cluster.hpp +++ b/include/aare/Cluster.hpp @@ -16,94 +16,61 @@ namespace aare { -template -constexpr bool is_valid_cluster = - std::is_arithmetic_v && std::is_integral_v && - (ClusterSizeX > 0) && (ClusterSizeY > 0); - // requires clause c++20 maybe update template >> + typename CoordType = int16_t> struct Cluster { + + static_assert(std::is_arithmetic_v, "T needs to be an arithmetic type"); + static_assert(std::is_integral_v, + "CoordType needs to be an integral type"); + static_assert(ClusterSizeX > 0 && ClusterSizeY > 0, + "Cluster sizes must be bigger than zero"); + CoordType x; CoordType y; - T data[ClusterSizeX * ClusterSizeY]; + std::array data; static constexpr uint8_t cluster_size_x = ClusterSizeX; static constexpr uint8_t cluster_size_y = ClusterSizeY; using value_type = T; using coord_type = CoordType; - T sum() const { - return std::accumulate(data, data + ClusterSizeX * ClusterSizeY, 0); - } + T sum() const { return std::accumulate(data.begin(), data.end(), T{}); } std::pair max_sum_2x2() const { - constexpr size_t num_2x2_subclusters = - (ClusterSizeX - 1) * (ClusterSizeY - 1); + if constexpr (cluster_size_x == 3 && cluster_size_y == 3) { + std::array sum_2x2_subclusters; + sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4]; + sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5]; + sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7]; + sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8]; + int index = std::max_element(sum_2x2_subclusters.begin(), + sum_2x2_subclusters.end()) - + sum_2x2_subclusters.begin(); + return std::make_pair(sum_2x2_subclusters[index], index); + } else if constexpr (cluster_size_x == 2 && cluster_size_y == 2) { + return std::make_pair(data[0] + data[1] + data[2] + data[3], 0); + } else { + constexpr size_t num_2x2_subclusters = + (ClusterSizeX - 1) * (ClusterSizeY - 1); - std::array sum_2x2_subcluster; - for (size_t i = 0; i < ClusterSizeY - 1; ++i) { - for (size_t j = 0; j < ClusterSizeX - 1; ++j) - sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = - data[i * ClusterSizeX + j] + - data[i * ClusterSizeX + j + 1] + - data[(i + 1) * ClusterSizeX + j] + - data[(i + 1) * ClusterSizeX + j + 1]; + std::array sum_2x2_subcluster; + for (size_t i = 0; i < ClusterSizeY - 1; ++i) { + for (size_t j = 0; j < ClusterSizeX - 1; ++j) + sum_2x2_subcluster[i * (ClusterSizeX - 1) + j] = + data[i * ClusterSizeX + j] + + data[i * ClusterSizeX + j + 1] + + data[(i + 1) * ClusterSizeX + j] + + data[(i + 1) * ClusterSizeX + j + 1]; + } + + int index = std::max_element(sum_2x2_subcluster.begin(), + sum_2x2_subcluster.end()) - + sum_2x2_subcluster.begin(); + return std::make_pair(sum_2x2_subcluster[index], index); } - - int index = std::max_element(sum_2x2_subcluster.begin(), - sum_2x2_subcluster.end()) - - sum_2x2_subcluster.begin(); - return std::make_pair(sum_2x2_subcluster[index], index); - } -}; - -// Specialization for 2x2 clusters (only one sum exists) -template struct Cluster { - int16_t x; - int16_t y; - T data[4]; - - static constexpr uint8_t cluster_size_x = 2; - static constexpr uint8_t cluster_size_y = 2; - using value_type = T; - using coord_type = int16_t; - - T sum() const { return std::accumulate(data, data + 4, 0); } - - std::pair max_sum_2x2() const { - return std::make_pair(data[0] + data[1] + data[2] + data[3], - 0); // Only one possible 2x2 sum - } -}; - -// Specialization for 3x3 clusters -template struct Cluster { - int16_t x; - int16_t y; - T data[9]; - static constexpr uint8_t cluster_size_x = 3; - static constexpr uint8_t cluster_size_y = 3; - using value_type = T; - using coord_type = int16_t; - - T sum() const { return std::accumulate(data, data + 9, 0); } - - std::pair max_sum_2x2() const { - std::array sum_2x2_subclusters; - sum_2x2_subclusters[0] = data[0] + data[1] + data[3] + data[4]; - sum_2x2_subclusters[1] = data[1] + data[2] + data[4] + data[5]; - sum_2x2_subclusters[2] = data[3] + data[4] + data[6] + data[7]; - sum_2x2_subclusters[3] = data[4] + data[5] + data[7] + data[8]; - int index = std::max_element(sum_2x2_subclusters.begin(), - sum_2x2_subclusters.end()) - - sum_2x2_subclusters.begin(); - return std::make_pair(sum_2x2_subclusters[index], index); } }; diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index b3538eb..ea11162 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -77,7 +77,6 @@ class ClusterFinder { int has_center_pixel_y = ClusterSizeY % 2; m_clusters.set_frame_number(frame_number); - std::vector cluster_data(ClusterSizeX * ClusterSizeY); for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { @@ -124,8 +123,9 @@ class ClusterFinder { // Store cluster if (value == max) { - // Zero out the cluster data - std::fill(cluster_data.begin(), cluster_data.end(), 0); + ClusterType cluster{}; + cluster.x = ix; + cluster.y = iy; // Fill the cluster data since we have a photon to store // It's worth redoing the look since most of the time we @@ -139,20 +139,15 @@ class ClusterFinder { static_cast(frame(iy + ir, ix + ic)) - static_cast( m_pedestal.mean(iy + ir, ix + ic)); - cluster_data[i] = + cluster.data[i] = tmp; // Watch for out of bounds access i++; } } } - ClusterType new_cluster{}; - new_cluster.x = ix; - new_cluster.y = iy; - std::copy(cluster_data.begin(), cluster_data.end(), - new_cluster.data); // Add the cluster to the output ClusterVector - m_clusters.push_back(new_cluster); + m_clusters.push_back(cluster); } } } diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp index 41acb33..5311916 100644 --- a/include/aare/GainMap.hpp +++ b/include/aare/GainMap.hpp @@ -44,9 +44,8 @@ class GainMap { cl.data[j] = cl.data[j] * static_cast(m_gain_map(y, x)); } } else { - memset(cl.data, 0, - ClusterSizeX * ClusterSizeY * - sizeof(T)); // clear edge clusters + // clear edge clusters + cl.data.fill(0); } } } diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp index f7fa796..ea02487 100644 --- a/python/src/bind_ClusterVector.hpp +++ b/python/src/bind_ClusterVector.hpp @@ -21,16 +21,14 @@ using namespace aare; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" - template void define_ClusterVector(py::module &m, const std::string &typestr) { - using ClusterType = - Cluster; + using ClusterType = Cluster; auto class_name = fmt::format("ClusterVector_{}", typestr); py::class_, void>>( + Cluster, void>>( m, class_name.c_str(), py::buffer_protocol()) @@ -41,10 +39,11 @@ void define_ClusterVector(py::module &m, const std::string &typestr) { self.push_back(cluster); }) - .def("sum", [](ClusterVector &self) { - auto *vec = new std::vector(self.sum()); - return return_vector(vec); - }) + .def("sum", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", @@ -72,32 +71,30 @@ void define_ClusterVector(py::module &m, const std::string &typestr) { ); }); - // Free functions using ClusterVector - m.def("hitmap", - [](std::array image_size, ClusterVector &cv) { - - // Create a numpy array to hold the hitmap - // The shape of the array is (image_size[0], image_size[1]) - // note that the python array is passed as [row, col] which - // is the opposite of the clusters [x,y] - py::array_t hitmap(image_size); - auto r = hitmap.mutable_unchecked<2>(); - - // Initialize hitmap to 0 - for (py::ssize_t i = 0; i < r.shape(0); i++) - for (py::ssize_t j = 0; j < r.shape(1); j++) - r(i, j) = 0; - + // Free functions using ClusterVector + m.def("hitmap", + [](std::array image_size, ClusterVector &cv) { + // Create a numpy array to hold the hitmap + // The shape of the array is (image_size[0], image_size[1]) + // note that the python array is passed as [row, col] which + // is the opposite of the clusters [x,y] + py::array_t hitmap(image_size); + auto r = hitmap.mutable_unchecked<2>(); - // Loop over the clusters and increment the hitmap - // Skip out of bound clusters - for (const auto& cluster : cv) { - auto x = cluster.x; - auto y = cluster.y; - if(x>( + py::class_>( m, class_name.c_str(), py::buffer_protocol()) .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { py::buffer_info buf_info = data.request(); - Type *ptr = static_cast(buf_info.ptr); - Cluster cluster; + Cluster cluster; cluster.x = x; cluster.y = y; - std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, - cluster.data); // Copy array contents + auto r = data.template unchecked<1>(); // no bounds checks + for (py::ssize_t i = 0; i < data.size(); ++i) { + cluster.data[i] = r(i); + } return cluster; })); @@ -64,9 +65,6 @@ void define_cluster(py::module &m, const std::string &typestr) { */ } - - - template void define_cluster_finder_mt_bindings(py::module &m, @@ -206,6 +204,5 @@ void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { return; }, py::arg(), py::arg("frame_number") = 0); - } #pragma GCC diagnostic pop diff --git a/src/Cluster.test.cpp b/src/Cluster.test.cpp index 879a5e7..ba9cda1 100644 --- a/src/Cluster.test.cpp +++ b/src/Cluster.test.cpp @@ -14,19 +14,6 @@ using namespace aare; -TEST_CASE("Correct Instantiation of Cluster and ClusterVector", - "[.cluster][.instantiation]") { - - CHECK(is_valid_cluster); - CHECK(is_valid_cluster); - CHECK(not is_valid_cluster); - CHECK(not is_valid_cluster); - CHECK(not is_valid_cluster); - - CHECK(not is_cluster_v); - CHECK(is_cluster_v>); -} - TEST_CASE("Test sum of Cluster", "[.cluster]") { Cluster cluster{0, 0, {1, 2, 3, 4}}; diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index 024bed4..3f15332 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -311,19 +311,19 @@ TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") { CHECK(read_cluster_vector.at(0).x == clustervec.at(0).x); CHECK(read_cluster_vector.at(0).y == clustervec.at(0).y); - CHECK(std::equal(clustervec.at(0).data, clustervec.at(0).data + 9, - read_cluster_vector.at(0).data, [](double a, double b) { - return std::abs(a - b) < - std::numeric_limits::epsilon(); - })); + CHECK(std::equal( + clustervec.at(0).data.begin(), clustervec.at(0).data.end(), + read_cluster_vector.at(0).data.begin(), [](double a, double b) { + return std::abs(a - b) < std::numeric_limits::epsilon(); + })); CHECK(read_cluster_vector.at(1).x == clustervec.at(1).x); CHECK(read_cluster_vector.at(1).y == clustervec.at(1).y); - CHECK(std::equal(clustervec.at(1).data, std::end(clustervec.at(1).data), - read_cluster_vector.at(1).data, [](double a, double b) { - return std::abs(a - b) < - std::numeric_limits::epsilon(); - })); + CHECK(std::equal( + clustervec.at(1).data.begin(), clustervec.at(1).data.end(), + read_cluster_vector.at(1).data.begin(), [](double a, double b) { + return std::abs(a - b) < std::numeric_limits::epsilon(); + })); } TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") { From 177459c98a283b5f6afcee8683194f086e3e3a72 Mon Sep 17 00:00:00 2001 From: mazzol_a Date: Thu, 17 Apr 2025 17:09:53 +0200 Subject: [PATCH 100/120] added multithreaded cluster finder test --- CMakeLists.txt | 3 +- include/aare/ClusterCollector.hpp | 6 +- include/aare/ClusterFinderMT.hpp | 3 + src/ClusterFinderMT.test.cpp | 99 +++++++++++++++++++++++++++++++ 4 files changed, 109 insertions(+), 2 deletions(-) create mode 100644 src/ClusterFinderMT.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index b57f05f..3c0d03a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -275,7 +275,7 @@ else() if(CMAKE_BUILD_TYPE STREQUAL "Release") message(STATUS "Release build") - target_compile_options(aare_compiler_flags INTERFACE -O3) + target_compile_options(aare_compiler_flags INTERFACE -O3 -g) else() message(STATUS "Debug build") endif() @@ -426,6 +426,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/Cluster.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/CalculateEta.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFinderMT.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp diff --git a/include/aare/ClusterCollector.hpp b/include/aare/ClusterCollector.hpp index cb49f58..ae78a8e 100644 --- a/include/aare/ClusterCollector.hpp +++ b/include/aare/ClusterCollector.hpp @@ -37,7 +37,11 @@ class ClusterCollector { public: ClusterCollector(ClusterFinderMT *source) { m_source = source->sink(); - m_thread = std::thread(&ClusterCollector::process, this); + m_thread = + std::thread(&ClusterCollector::process, + this); // only one process does that so why isnt it + // automatically written to m_cluster in collect + // - instead of writing first to m_sink? } void stop() { m_stop_requested = true; diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp index 29fc715..2dfb279 100644 --- a/include/aare/ClusterFinderMT.hpp +++ b/include/aare/ClusterFinderMT.hpp @@ -34,6 +34,7 @@ template , typename FRAME_TYPE = uint16_t, typename PEDESTAL_TYPE = double> class ClusterFinderMT { + protected: using CT = typename ClusterType::value_type; size_t m_current_thread{0}; size_t m_n_threads{0}; @@ -50,6 +51,7 @@ class ClusterFinderMT { std::thread m_collect_thread; std::chrono::milliseconds m_default_wait{1}; + private: std::atomic m_stop_requested{false}; std::atomic m_processing_threads_stopped{true}; @@ -120,6 +122,7 @@ class ClusterFinderMT { ClusterFinderMT(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000, size_t n_threads = 3) : m_n_threads(n_threads) { + for (size_t i = 0; i < n_threads; i++) { m_cluster_finders.push_back( std::make_unique< diff --git a/src/ClusterFinderMT.test.cpp b/src/ClusterFinderMT.test.cpp new file mode 100644 index 0000000..9289592 --- /dev/null +++ b/src/ClusterFinderMT.test.cpp @@ -0,0 +1,99 @@ + +#include "aare/ClusterFinderMT.hpp" +#include "aare/Cluster.hpp" +#include "aare/ClusterCollector.hpp" +#include "aare/File.hpp" + +#include "test_config.hpp" + +#include +#include +#include + +using namespace aare; + +// wrapper function to access private member variables for testing +template +class ClusterFinderMTWrapper + : public ClusterFinderMT { + + public: + ClusterFinderMTWrapper(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0, + size_t capacity = 2000, size_t n_threads = 3) + : ClusterFinderMT( + image_size, nSigma, capacity, n_threads) {} + + size_t get_m_input_queues_size() const { + return this->m_input_queues.size(); + } + + size_t get_m_output_queues_size() const { + return this->m_output_queues.size(); + } + + size_t get_m_cluster_finders_size() const { + return this->m_cluster_finders.size(); + } + + bool m_output_queues_are_empty() const { + for (auto &queue : this->m_output_queues) { + if (!queue->isEmpty()) + return false; + } + return true; + } + + bool m_input_queues_are_empty() const { + for (auto &queue : this->m_input_queues) { + if (!queue->isEmpty()) + return false; + } + return true; + } + + bool m_sink_is_empty() const { return this->m_sink.isEmpty(); } + + size_t m_sink_size() const { return this->m_sink.sizeGuess(); } +}; + +TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") { + auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/" + "Moench03new/cu_half_speed_master_4.json"; + + File file(fpath); + + size_t n_threads = 2; + size_t n_frames_pd = 10; + + using ClusterType = Cluster; + + ClusterFinderMTWrapper cf( + {static_cast(file.rows()), static_cast(file.cols())}, + 5, 2000, n_threads); // no idea what frame type is!!! default uint16_t + + CHECK(cf.get_m_input_queues_size() == n_threads); + CHECK(cf.get_m_output_queues_size() == n_threads); + CHECK(cf.get_m_cluster_finders_size() == n_threads); + CHECK(cf.m_output_queues_are_empty() == true); + CHECK(cf.m_input_queues_are_empty() == true); + + for (size_t i = 0; i < n_frames_pd; ++i) { + cf.find_clusters(file.read_frame().view()); + } + + cf.stop(); + + CHECK(cf.m_output_queues_are_empty() == true); + CHECK(cf.m_input_queues_are_empty() == true); + + CHECK(cf.m_sink_size() == n_frames_pd); + ClusterCollector clustercollector(&cf); + + clustercollector.stop(); + + CHECK(cf.m_sink_size() == 0); + + auto clustervec = clustercollector.steal_clusters(); + // CHECK(clustervec.size() == ) //dont know how many clusters to expect +} From 84aafa75f6b5f0097ed1b7b5b03102f2e305a1e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 22 Apr 2025 08:36:34 +0200 Subject: [PATCH 101/120] Building wheels and uploading to pypi (#160) Still to be resolved in another PR: - Consistent versioning across compiled code, conda and pypi --- .github/workflows/build_wheel.yml | 64 +++++++++++++++++++++++++++++++ .gitignore | 3 +- CMakeLists.txt | 2 +- pyproject.toml | 17 ++++++-- python/CMakeLists.txt | 12 ++++-- 5 files changed, 90 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/build_wheel.yml diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml new file mode 100644 index 0000000..f131e77 --- /dev/null +++ b/.github/workflows/build_wheel.yml @@ -0,0 +1,64 @@ +name: Build wheel + +on: + workflow_dispatch: + pull_request: + push: + branches: + - main + release: + types: + - published + + +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest,] + + steps: + - uses: actions/checkout@v4 + + - name: Build wheels + run: pipx run cibuildwheel==2.23.0 + + - uses: actions/upload-artifact@v4 + with: + name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} + path: ./wheelhouse/*.whl + + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Build sdist + run: pipx run build --sdist + + - uses: actions/upload-artifact@v4 + with: + name: cibw-sdist + path: dist/*.tar.gz + + upload_pypi: + needs: [build_wheels, build_sdist] + runs-on: ubuntu-latest + environment: pypi + permissions: + id-token: write + if: github.event_name == 'release' && github.event.action == 'published' + # or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this) + # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') + steps: + - uses: actions/download-artifact@v4 + with: + # unpacks all CIBW artifacts into dist/ + pattern: cibw-* + path: dist + merge-multiple: true + + - uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.gitignore b/.gitignore index af3e3b7..5982f7f 100644 --- a/.gitignore +++ b/.gitignore @@ -17,7 +17,8 @@ Testing/ ctbDict.cpp ctbDict.h - +wheelhouse/ +dist/ *.pyc */__pycache__/* diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f2a7b5..236e323 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.15) project(aare VERSION 1.0.0 diff --git a/pyproject.toml b/pyproject.toml index 470d158..4a477a3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,19 +4,30 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.4.1" +version = "2025.4.2" +requires-python = ">=3.11" +dependencies = [ + "numpy", + "matplotlib", +] +[tool.cibuildwheel] + +build = "cp{311,312,313}-manylinux_x86_64" + [tool.scikit-build] -cmake.verbose = true +build.verbose = true +cmake.build-type = "Release" +install.components = ["python"] [tool.scikit-build.cmake.define] AARE_PYTHON_BINDINGS = "ON" -AARE_SYSTEM_LIBRARIES = "ON" AARE_INSTALL_PYTHONEXT = "ON" + [tool.pytest.ini_options] markers = [ "files: marks tests that need additional data (deselect with '-m \"not files\"')", diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 75847a7..549205a 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,5 +1,5 @@ -find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED) +find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED) set(PYBIND11_FINDPYTHON ON) # Needed for RH8 # Download or find pybind11 depending on configuration @@ -59,10 +59,16 @@ endforeach(FILE ${PYTHON_EXAMPLES}) if(AARE_INSTALL_PYTHONEXT) - install(TARGETS _aare + install( + TARGETS _aare EXPORT "${TARGETS_EXPORT_NAME}" LIBRARY DESTINATION aare + COMPONENT python ) - install(FILES ${PYTHON_FILES} DESTINATION aare) + install( + FILES ${PYTHON_FILES} + DESTINATION aare + COMPONENT python + ) endif() \ No newline at end of file From 326941e2b4ef69f98bf2f773cf6b3281b9ff78bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 22 Apr 2025 15:20:46 +0200 Subject: [PATCH 102/120] Custom base for decoding ADC data (#163) New function apply_custom_weights (can we find a better name) that takes a uint16 and a NDView of bases for the conversion. For each supplied weight it is used as base (instead of 2) to convert from bits to a double. --------- Co-authored-by: siebsi --- CMakeLists.txt | 1 + include/aare/NDView.hpp | 5 +++ include/aare/decode.hpp | 15 ++++++- python/aare/__init__.py | 4 ++ python/src/ctb_raw_file.hpp | 71 ++++++++++++++++++++------------ src/NDView.test.cpp | 12 ++++++ src/decode.cpp | 43 +++++++++++++++++++- src/decode.test.cpp | 80 +++++++++++++++++++++++++++++++++++++ 8 files changed, 204 insertions(+), 27 deletions(-) create mode 100644 src/decode.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 236e323..b3d7377 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -427,6 +427,7 @@ if(AARE_TESTS) set(TestSources ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index 55b442b..ddb5d1c 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -184,4 +184,9 @@ std::ostream& operator <<(std::ostream& os, const NDView& arr){ } +template +NDView make_view(std::vector& vec){ + return NDView(vec.data(), {static_cast(vec.size())}); +} + } // namespace aare \ No newline at end of file diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp index 1c3c479..e784c4a 100644 --- a/include/aare/decode.hpp +++ b/include/aare/decode.hpp @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace aare { @@ -10,4 +11,16 @@ uint16_t adc_sar_04_decode64to16(uint64_t input); void adc_sar_05_decode64to16(NDView input, NDView output); void adc_sar_04_decode64to16(NDView input, NDView output); -} // namespace aare \ No newline at end of file + +/** + * @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i + * for each bit i that is set in the input value. + * @throws std::out_of_range if weights.size() < 16 + * @param input 16-bit input value + * @param weights vector of weights, size must be less than or equal to 16 + */ +double apply_custom_weights(uint16_t input, const NDView weights); + +void apply_custom_weights(NDView input, NDView output, const NDView weights); + +} // namespace aare diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 98e8c72..db9672f 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -13,6 +13,10 @@ from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVe from ._aare import fit_gaus, fit_pol1 from ._aare import Interpolator + + +from ._aare import apply_custom_weights + from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 56e571b..a88a9d1 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -10,6 +10,8 @@ #include "aare/decode.hpp" // #include "aare/fClusterFileV2.hpp" +#include "np_helper.hpp" + #include #include #include @@ -65,35 +67,54 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { return output; }); - py::class_(m, "CtbRawFile") - .def(py::init()) - .def("read_frame", - [](CtbRawFile &self) { - size_t image_size = self.image_size_in_bytes(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(1); - shape.push_back(image_size); +m.def( + "apply_custom_weights", + [](py::array_t &input, + py::array_t + &weights) { + - py::array_t header(1); + // Create new array with same shape as the input array (uninitialized values) + py::buffer_info buf = input.request(); + py::array_t output(buf.shape); - // always read bytes - image = py::array_t(shape); + // Use NDViews to call into the C++ library + auto weights_view = make_view_1d(weights); + NDView input_view(input.mutable_data(), {input.size()}); + NDView output_view(output.mutable_data(), {output.size()}); - self.read_into( - reinterpret_cast(image.mutable_data()), - header.mutable_data()); + apply_custom_weights(input_view, output_view, weights_view); + return output; + }); - return py::make_tuple(header, image); - }) - .def("seek", &CtbRawFile::seek) - .def("tell", &CtbRawFile::tell) - .def("master", &CtbRawFile::master) +py::class_(m, "CtbRawFile") + .def(py::init()) + .def("read_frame", + [](CtbRawFile &self) { + size_t image_size = self.image_size_in_bytes(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(1); + shape.push_back(image_size); - .def_property_readonly("image_size_in_bytes", - &CtbRawFile::image_size_in_bytes) + py::array_t header(1); - .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + // always read bytes + image = py::array_t(shape); -} \ No newline at end of file + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); + }) + .def("seek", &CtbRawFile::seek) + .def("tell", &CtbRawFile::tell) + .def("master", &CtbRawFile::master) + + .def_property_readonly("image_size_in_bytes", + &CtbRawFile::image_size_in_bytes) + + .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + +} diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 3070de6..6bc8eef 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -190,4 +190,16 @@ TEST_CASE("compare two views") { NDView view2(vec2.data(), Shape<2>{3, 4}); REQUIRE((view1 == view2)); +} + + +TEST_CASE("Create a view over a vector"){ + std::vector vec; + for (int i = 0; i != 12; ++i) { + vec.push_back(i); + } + auto v = aare::make_view(vec); + REQUIRE(v.shape()[0] == 12); + REQUIRE(v[0] == 0); + REQUIRE(v[11] == 11); } \ No newline at end of file diff --git a/src/decode.cpp b/src/decode.cpp index 17c033d..8ac7bc0 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -1,5 +1,5 @@ #include "aare/decode.hpp" - +#include namespace aare { uint16_t adc_sar_05_decode64to16(uint64_t input){ @@ -22,6 +22,10 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){ } void adc_sar_05_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + for(int64_t i = 0; i < input.shape(0); i++){ for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_05_decode64to16(input(i,j)); @@ -49,6 +53,9 @@ uint16_t adc_sar_04_decode64to16(uint64_t input){ } void adc_sar_04_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } for(int64_t i = 0; i < input.shape(0); i++){ for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_04_decode64to16(input(i,j)); @@ -56,6 +63,40 @@ void adc_sar_04_decode64to16(NDView input, NDView outpu } } +double apply_custom_weights(uint16_t input, const NDView weights) { + if(weights.size() > 16){ + throw std::invalid_argument("weights size must be less than or equal to 16"); + } + + double result = 0.0; + for (ssize_t i = 0; i < weights.size(); ++i) { + result += ((input >> i) & 1) * std::pow(weights[i], i); + } + return result; + +} + +void apply_custom_weights(NDView input, NDView output, const NDView weights) { + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + + //Calculate weights to avoid repeatedly calling std::pow + std::vector weights_powers(weights.size()); + for (ssize_t i = 0; i < weights.size(); ++i) { + weights_powers[i] = std::pow(weights[i], i); + } + + // Apply custom weights to each element in the input array + for (ssize_t i = 0; i < input.shape(0); i++) { + double result = 0.0; + for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) { + result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index]; + } + output(i) = result; + } +} + } // namespace aare diff --git a/src/decode.test.cpp b/src/decode.test.cpp new file mode 100644 index 0000000..a90213c --- /dev/null +++ b/src/decode.test.cpp @@ -0,0 +1,80 @@ +#include "aare/decode.hpp" + +#include +#include +#include "aare/NDArray.hpp" +using Catch::Matchers::WithinAbs; +#include + +TEST_CASE("test_adc_sar_05_decode64to16"){ + uint64_t input = 0; + uint16_t output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 0); + + + // bit 29 on th input is bit 0 on the output + input = 1UL << 29; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1); + + // test all bits by iteratting through the bitlist + std::vector bitlist = {29, 19, 28, 18, 31, 21, 27, 20, 24, 23, 25, 22}; + for (size_t i = 0; i < bitlist.size(); i++) { + input = 1UL << bitlist[i]; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == (1 << i)); + } + + + // test a few "random" values + input = 0; + input |= (1UL << 29); + input |= (1UL << 19); + input |= (1UL << 28); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 7UL); + + + input = 0; + input |= (1UL << 18); + input |= (1UL << 27); + input |= (1UL << 25); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1096UL); + + input = 0; + input |= (1UL << 25); + input |= (1UL << 22); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 3072UL); + } + + + TEST_CASE("test_apply_custom_weights") { + + uint16_t input = 1; + aare::NDArray weights_data({3}, 0.0); + weights_data(0) = 1.7; + weights_data(1) = 2.1; + weights_data(2) = 1.8; + + auto weights = weights_data.view(); + + + double output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(1.0, 0.001)); + + input = 1UL << 1; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(2.1, 0.001)); + + + input = 1UL << 2; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(3.24, 0.001)); + + input = 0b111; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(6.34, 0.001)); + + } \ No newline at end of file From b501c31e389e1b8374578ebe1b34a6a74dd9395d Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 22 Apr 2025 15:22:47 +0200 Subject: [PATCH 103/120] added missed commit --- src/NDView.test.cpp | 43 +++++++++++++++---------------------------- src/decode.test.cpp | 4 ++-- 2 files changed, 17 insertions(+), 30 deletions(-) diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 6bc8eef..8750f3a 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -3,6 +3,7 @@ #include #include +#include using aare::NDView; using aare::Shape; @@ -21,10 +22,8 @@ TEST_CASE("Element reference 1D") { } TEST_CASE("Element reference 2D") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(vec.size() == static_cast(data.size())); @@ -58,10 +57,8 @@ TEST_CASE("Element reference 3D") { } TEST_CASE("Plus and miuns with single value") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); data += 5; int i = 0; @@ -116,10 +113,8 @@ TEST_CASE("elementwise assign") { } TEST_CASE("iterators") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<1>{12}); int i = 0; for (const auto item : data) { @@ -167,26 +162,20 @@ TEST_CASE("divide with another span") { } TEST_CASE("Retrieve shape") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(data.shape()[0] == 3); REQUIRE(data.shape()[1] == 4); } TEST_CASE("compare two views") { - std::vector vec1; - for (int i = 0; i != 12; ++i) { - vec1.push_back(i); - } + std::vector vec1(12); + std::iota(vec1.begin(), vec1.end(), 0); NDView view1(vec1.data(), Shape<2>{3, 4}); - std::vector vec2; - for (int i = 0; i != 12; ++i) { - vec2.push_back(i); - } + std::vector vec2(12); + std::iota(vec2.begin(), vec2.end(), 0); NDView view2(vec2.data(), Shape<2>{3, 4}); REQUIRE((view1 == view2)); @@ -194,10 +183,8 @@ TEST_CASE("compare two views") { TEST_CASE("Create a view over a vector"){ - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); auto v = aare::make_view(vec); REQUIRE(v.shape()[0] == 12); REQUIRE(v[0] == 0); diff --git a/src/decode.test.cpp b/src/decode.test.cpp index a90213c..1e4b2fc 100644 --- a/src/decode.test.cpp +++ b/src/decode.test.cpp @@ -64,12 +64,12 @@ TEST_CASE("test_adc_sar_05_decode64to16"){ double output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(1.0, 0.001)); - input = 1UL << 1; + input = 1 << 1; output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(2.1, 0.001)); - input = 1UL << 2; + input = 1 << 2; output = aare::apply_custom_weights(input, weights); CHECK_THAT(output, WithinAbs(3.24, 0.001)); From c6e8e5f6a1f6754bdbf6a8ff9bb90d4c36747368 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 22 Apr 2025 16:16:27 +0200 Subject: [PATCH 104/120] inverted gain map --- conda-recipe/meta.yaml | 2 +- include/aare/ClusterFile.hpp | 2 +- pyproject.toml | 2 +- src/ClusterFile.cpp | 6 ++++++ 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 560e831..0d3b532 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2025.4.1 #TODO! how to not duplicate this? + version: 2025.4.22 #TODO! how to not duplicate this? diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index bea9f48..b47a1d5 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -124,7 +124,7 @@ class ClusterFile { /** * @brief Set the gain map to use when reading clusters. If set the gain map will be applied - * to the clusters that pass ROI and noise_map selection. + * to the clusters that pass ROI and noise_map selection. The gain map is expected to be in ADU/energy. */ void set_gain_map(const NDView gain_map); diff --git a/pyproject.toml b/pyproject.toml index 4a477a3..6451f39 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.4.2" +version = "2025.4.22" requires-python = ">=3.11" dependencies = [ "numpy", diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index f77ac92..d24e803 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -41,6 +41,12 @@ void ClusterFile::set_noise_map(const NDView noise_map){ void ClusterFile::set_gain_map(const NDView gain_map){ m_gain_map = NDArray(gain_map); + + // Gain map is passed as ADU/keV to avoid dividing in when applying the gain + // map we invert it here + for (auto &item : m_gain_map->view()) { + item = 1.0 / item; + } } ClusterFile::~ClusterFile() { close(); } From d5f8daf194e8297c2f02045348963cb2dfd80832 Mon Sep 17 00:00:00 2001 From: mazzol_a Date: Tue, 22 Apr 2025 16:16:31 +0200 Subject: [PATCH 105/120] removed debug option in CMakelist --- CMakeLists.txt | 2 +- python/src/bind_ClusterVector.hpp | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c0d03a..6820516 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -275,7 +275,7 @@ else() if(CMAKE_BUILD_TYPE STREQUAL "Release") message(STATUS "Release build") - target_compile_options(aare_compiler_flags INTERFACE -O3 -g) + target_compile_options(aare_compiler_flags INTERFACE -O3) else() message(STATUS "Debug build") endif() diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp index ea02487..ecd7a77 100644 --- a/python/src/bind_ClusterVector.hpp +++ b/python/src/bind_ClusterVector.hpp @@ -44,6 +44,11 @@ void define_ClusterVector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum()); return return_vector(vec); }) + .def("sum_2x2", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum_2x2()); + return return_vector(vec); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", From 58c934d9cf39dd7b27e55d3e67bb8c20cf622680 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Tue, 22 Apr 2025 16:24:15 +0200 Subject: [PATCH 106/120] added mpl to conda specs --- conda-recipe/meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 0d3b532..46aee34 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -38,6 +38,7 @@ requirements: run: - python {{python}} - numpy {{ numpy }} + - matplotlib test: From fd0196f2fd5cb3f58e2044a1d898d0e38428c0fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 22 Apr 2025 16:41:48 +0200 Subject: [PATCH 107/120] Developer (#164) - State before merging the new cluster vector API --------- Co-authored-by: Patrick Co-authored-by: JulianHeymes Co-authored-by: Dhanya Thattil Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com> Co-authored-by: xiangyu.xie Co-authored-by: siebsi --- .gitea/workflows/cmake_build.yml | 18 +- .gitea/workflows/rh8-native.yml | 36 ++++ .gitea/workflows/rh9-native.yml | 31 +++ .github/workflows/build_docs.yml | 12 +- .github/workflows/build_wheel.yml | 64 +++++++ .gitignore | 3 +- CMakeLists.txt | 18 +- conda-recipe/meta.yaml | 4 +- docs/src/JungfrauDataFile.rst | 25 +++ docs/src/Tests.rst | 47 +++++ docs/src/algorithm.rst | 5 + docs/src/index.rst | 12 +- docs/src/pyJungfrauDataFile.rst | 10 + etc/dev-env.yml | 15 ++ include/aare/ClusterFile.hpp | 2 +- include/aare/FilePtr.hpp | 30 +++ include/aare/JungfrauDataFile.hpp | 106 +++++++++++ include/aare/NDArray.hpp | 2 +- include/aare/NDView.hpp | 9 +- include/aare/RawSubFile.hpp | 5 +- include/aare/VarClusterFinder.hpp | 4 +- include/aare/algorithm.hpp | 62 +++++- include/aare/decode.hpp | 15 +- include/aare/utils/ifstream_helpers.hpp | 12 ++ pyproject.toml | 25 ++- python/CMakeLists.txt | 15 +- python/aare/__init__.py | 8 +- python/aare/utils.py | 11 +- python/src/ctb_raw_file.hpp | 71 ++++--- python/src/file.hpp | 36 +--- python/src/jungfrau_data_file.hpp | 116 ++++++++++++ python/src/module.cpp | 5 + python/src/raw_sub_file.hpp | 110 +++++++++++ python/tests/conftest.py | 29 +++ python/tests/test_RawSubFile.py | 36 ++++ python/tests/test_jungfrau_dat_files.py | 92 +++++++++ src/ClusterFile.cpp | 6 + src/ClusterFile.test.cpp | 16 +- src/File.cpp | 3 + src/FilePtr.cpp | 44 +++++ src/Fit.cpp | 8 +- src/Interpolator.cpp | 9 +- src/JungfrauDataFile.cpp | 238 ++++++++++++++++++++++++ src/JungfrauDataFile.test.cpp | 114 ++++++++++++ src/NDArray.test.cpp | 4 +- src/NDView.test.cpp | 47 +++-- src/RawSubFile.cpp | 31 ++- src/algorithm.test.cpp | 98 +++++++++- src/decode.cpp | 43 ++++- src/decode.test.cpp | 80 ++++++++ src/utils/ifstream_helpers.cpp | 18 ++ 51 files changed, 1706 insertions(+), 154 deletions(-) create mode 100644 .gitea/workflows/rh8-native.yml create mode 100644 .gitea/workflows/rh9-native.yml create mode 100644 .github/workflows/build_wheel.yml create mode 100644 docs/src/JungfrauDataFile.rst create mode 100644 docs/src/Tests.rst create mode 100644 docs/src/algorithm.rst create mode 100644 docs/src/pyJungfrauDataFile.rst create mode 100644 etc/dev-env.yml create mode 100644 include/aare/FilePtr.hpp create mode 100644 include/aare/JungfrauDataFile.hpp create mode 100644 include/aare/utils/ifstream_helpers.hpp create mode 100644 python/src/jungfrau_data_file.hpp create mode 100644 python/src/raw_sub_file.hpp create mode 100644 python/tests/conftest.py create mode 100644 python/tests/test_RawSubFile.py create mode 100644 python/tests/test_jungfrau_dat_files.py create mode 100644 src/FilePtr.cpp create mode 100644 src/JungfrauDataFile.cpp create mode 100644 src/JungfrauDataFile.test.cpp create mode 100644 src/decode.test.cpp create mode 100644 src/utils/ifstream_helpers.cpp diff --git a/.gitea/workflows/cmake_build.yml b/.gitea/workflows/cmake_build.yml index 43a0181..aa7a297 100644 --- a/.gitea/workflows/cmake_build.yml +++ b/.gitea/workflows/cmake_build.yml @@ -2,9 +2,8 @@ name: Build the package using cmake then documentation on: workflow_dispatch: - push: - + permissions: contents: read @@ -16,12 +15,12 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] - python-version: ["3.12",] + platform: [ubuntu-latest, ] + python-version: ["3.12", ] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda + defaults: run: shell: "bash -l {0}" @@ -35,13 +34,13 @@ jobs: sudo apt-get -y install cmake gcc g++ - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | @@ -56,3 +55,4 @@ jobs: + diff --git a/.gitea/workflows/rh8-native.yml b/.gitea/workflows/rh8-native.yml new file mode 100644 index 0000000..1c64161 --- /dev/null +++ b/.gitea/workflows/rh8-native.yml @@ -0,0 +1,36 @@ +name: Build on RHEL8 + +on: + push: + workflow_dispatch: + +permissions: + contents: read + +jobs: + build: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel8-developer-gitea-actions + steps: + # workaround until actions/checkout@v4 is available for RH8 + # - uses: actions/checkout@v4 + - name: Clone repository + run: | + echo Cloning ${{ github.ref_name }} + git clone https://${{secrets.GITHUB_TOKEN}}@gitea.psi.ch/${{ github.repository }}.git --branch=${{ github.ref_name }} . + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON -DPython_FIND_VIRTUALENV=FIRST + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.gitea/workflows/rh9-native.yml b/.gitea/workflows/rh9-native.yml new file mode 100644 index 0000000..5027365 --- /dev/null +++ b/.gitea/workflows/rh9-native.yml @@ -0,0 +1,31 @@ +name: Build on RHEL9 + +on: + push: + workflow_dispatch: + +permissions: + contents: read + +jobs: + build: + runs-on: "ubuntu-latest" + container: + image: gitea.psi.ch/images/rhel9-developer-gitea-actions + steps: + - uses: actions/checkout@v4 + + + - name: Install dependencies + run: | + dnf install -y cmake python3.12 python3.12-devel python3.12-pip + + - name: Build library + run: | + mkdir build && cd build + cmake .. -DAARE_PYTHON_BINDINGS=ON -DAARE_TESTS=ON + make -j 2 + + - name: C++ unit tests + working-directory: ${{gitea.workspace}}/build + run: ctest \ No newline at end of file diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml index 959ab70..24050a3 100644 --- a/.github/workflows/build_docs.yml +++ b/.github/workflows/build_docs.yml @@ -5,7 +5,6 @@ on: push: - permissions: contents: read pages: write @@ -16,12 +15,11 @@ jobs: strategy: fail-fast: false matrix: - platform: [ubuntu-latest, ] # macos-12, windows-2019] + platform: [ubuntu-latest, ] python-version: ["3.12",] runs-on: ${{ matrix.platform }} - # The setup-miniconda action needs this to activate miniconda defaults: run: shell: "bash -l {0}" @@ -30,13 +28,13 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install doxygen sphinx=7.1.2 breathe pybind11 sphinx_rtd_theme furo nlohmann_json zeromq fmt numpy + conda-remove-defaults: "true" - name: Build library run: | diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml new file mode 100644 index 0000000..f131e77 --- /dev/null +++ b/.github/workflows/build_wheel.yml @@ -0,0 +1,64 @@ +name: Build wheel + +on: + workflow_dispatch: + pull_request: + push: + branches: + - main + release: + types: + - published + + +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest,] + + steps: + - uses: actions/checkout@v4 + + - name: Build wheels + run: pipx run cibuildwheel==2.23.0 + + - uses: actions/upload-artifact@v4 + with: + name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} + path: ./wheelhouse/*.whl + + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Build sdist + run: pipx run build --sdist + + - uses: actions/upload-artifact@v4 + with: + name: cibw-sdist + path: dist/*.tar.gz + + upload_pypi: + needs: [build_wheels, build_sdist] + runs-on: ubuntu-latest + environment: pypi + permissions: + id-token: write + if: github.event_name == 'release' && github.event.action == 'published' + # or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this) + # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') + steps: + - uses: actions/download-artifact@v4 + with: + # unpacks all CIBW artifacts into dist/ + pattern: cibw-* + path: dist + merge-multiple: true + + - uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.gitignore b/.gitignore index af3e3b7..5982f7f 100644 --- a/.gitignore +++ b/.gitignore @@ -17,7 +17,8 @@ Testing/ ctbDict.cpp ctbDict.h - +wheelhouse/ +dist/ *.pyc */__pycache__/* diff --git a/CMakeLists.txt b/CMakeLists.txt index 804b2f6..b3d7377 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.15) project(aare VERSION 1.0.0 @@ -11,6 +11,14 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) +execute_process( + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} + OUTPUT_VARIABLE GIT_HASH + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +message(STATUS "Building from git hash: ${GIT_HASH}") + if (${CMAKE_VERSION} VERSION_GREATER "3.24") cmake_policy(SET CMP0135 NEW) #Fetch content download timestamp endif() @@ -342,8 +350,10 @@ set(PUBLICHEADERS include/aare/File.hpp include/aare/Fit.hpp include/aare/FileInterface.hpp + include/aare/FilePtr.hpp include/aare/Frame.hpp include/aare/geo_helpers.hpp + include/aare/JungfrauDataFile.hpp include/aare/NDArray.hpp include/aare/NDView.hpp include/aare/NumpyFile.hpp @@ -367,8 +377,10 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/FilePtr.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Interpolator.cpp @@ -376,7 +388,9 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ifstream_helpers.cpp ) @@ -413,6 +427,7 @@ if(AARE_TESTS) set(TestSources ${CMAKE_CURRENT_SOURCE_DIR}/src/algorithm.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp @@ -423,6 +438,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterVector.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Pedestal.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/JungfrauDataFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 560e831..12c6ca0 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,7 @@ package: name: aare - version: 2025.4.1 #TODO! how to not duplicate this? + version: 2025.4.22 #TODO! how to not duplicate this? + @@ -38,6 +39,7 @@ requirements: run: - python {{python}} - numpy {{ numpy }} + - matplotlib test: diff --git a/docs/src/JungfrauDataFile.rst b/docs/src/JungfrauDataFile.rst new file mode 100644 index 0000000..78d473f --- /dev/null +++ b/docs/src/JungfrauDataFile.rst @@ -0,0 +1,25 @@ +JungfrauDataFile +================== + +JungfrauDataFile is a class to read the .dat files that are produced by Aldo's receiver. +It is mostly used for calibration. + +The structure of the file is: + +* JungfrauDataHeader +* Binary data (256x256, 256x1024 or 512x1024) +* JungfrauDataHeader +* ... + +There is no metadata indicating number of frames or the size of the image, but this +will be infered by this reader. + +.. doxygenstruct:: aare::JungfrauDataHeader + :members: + :undoc-members: + :private-members: + +.. doxygenclass:: aare::JungfrauDataFile + :members: + :undoc-members: + :private-members: \ No newline at end of file diff --git a/docs/src/Tests.rst b/docs/src/Tests.rst new file mode 100644 index 0000000..da98001 --- /dev/null +++ b/docs/src/Tests.rst @@ -0,0 +1,47 @@ +**************** +Tests +**************** + +We test the code both from the C++ and Python API. By default only tests that does not require image data is run. + +C++ +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + mkdir build + cd build + cmake .. -DAARE_TESTS=ON + make -j 4 + + export AARE_TEST_DATA=/path/to/test/data + ./run_test [.files] #or using ctest, [.files] is the option to include tests needing data + + + +Python +~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + #From the root dir of the library + python -m pytest python/tests --files # passing --files will run the tests needing data + + + +Getting the test data +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. attention :: + + The tests needing the test data are not run by default. To make the data available, you need to set the environment variable + AARE_TEST_DATA to the path of the test data directory. Then pass either [.files] for the C++ tests or --files for Python + +The image files needed for the test are large and are not included in the repository. They are stored +using GIT LFS in a separate repository. To get the test data, you need to clone the repository. +To do this, you need to have GIT LFS installed. You can find instructions on how to install it here: https://git-lfs.github.com/ +Once you have GIT LFS installed, you can clone the repository like any normal repo using: + +.. code-block:: bash + + git clone https://gitea.psi.ch/detectors/aare-test-data.git diff --git a/docs/src/algorithm.rst b/docs/src/algorithm.rst new file mode 100644 index 0000000..9b11857 --- /dev/null +++ b/docs/src/algorithm.rst @@ -0,0 +1,5 @@ +algorithm +============= + +.. doxygenfile:: algorithm.hpp + diff --git a/docs/src/index.rst b/docs/src/index.rst index 905caea..af5e99a 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -20,9 +20,6 @@ AARE Requirements Consume - - - .. toctree:: :caption: Python API :maxdepth: 1 @@ -31,6 +28,7 @@ AARE pyCtbRawFile pyClusterFile pyClusterVector + pyJungfrauDataFile pyRawFile pyRawMasterFile pyVarClusterFinder @@ -42,6 +40,7 @@ AARE :caption: C++ API :maxdepth: 1 + algorithm NDArray NDView Frame @@ -51,6 +50,7 @@ AARE ClusterFinderMT ClusterFile ClusterVector + JungfrauDataFile Pedestal RawFile RawSubFile @@ -59,4 +59,8 @@ AARE - +.. toctree:: + :caption: Developer + :maxdepth: 3 + + Tests \ No newline at end of file diff --git a/docs/src/pyJungfrauDataFile.rst b/docs/src/pyJungfrauDataFile.rst new file mode 100644 index 0000000..2173adf --- /dev/null +++ b/docs/src/pyJungfrauDataFile.rst @@ -0,0 +1,10 @@ +JungfrauDataFile +=================== + +.. py:currentmodule:: aare + +.. autoclass:: JungfrauDataFile + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/etc/dev-env.yml b/etc/dev-env.yml new file mode 100644 index 0000000..25038ee --- /dev/null +++ b/etc/dev-env.yml @@ -0,0 +1,15 @@ +name: dev-environment +channels: + - conda-forge +dependencies: + - anaconda-client + - doxygen + - sphinx=7.1.2 + - breathe + - pybind11 + - sphinx_rtd_theme + - furo + - nlohmann_json + - zeromq + - fmt + - numpy diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index bea9f48..b47a1d5 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -124,7 +124,7 @@ class ClusterFile { /** * @brief Set the gain map to use when reading clusters. If set the gain map will be applied - * to the clusters that pass ROI and noise_map selection. + * to the clusters that pass ROI and noise_map selection. The gain map is expected to be in ADU/energy. */ void set_gain_map(const NDView gain_map); diff --git a/include/aare/FilePtr.hpp b/include/aare/FilePtr.hpp new file mode 100644 index 0000000..4c88ecb --- /dev/null +++ b/include/aare/FilePtr.hpp @@ -0,0 +1,30 @@ +#pragma once +#include +#include + +namespace aare { + +/** + * \brief RAII wrapper for FILE pointer + */ +class FilePtr { + FILE *fp_{nullptr}; + + public: + FilePtr() = default; + FilePtr(const std::filesystem::path& fname, const std::string& mode); + FilePtr(const FilePtr &) = delete; // we don't want a copy + FilePtr &operator=(const FilePtr &) = delete; // since we handle a resource + FilePtr(FilePtr &&other); + FilePtr &operator=(FilePtr &&other); + FILE *get(); + int64_t tell(); + void seek(int64_t offset, int whence = SEEK_SET) { + if (fseek(fp_, offset, whence) != 0) + throw std::runtime_error("Error seeking in file"); + } + std::string error_msg(); + ~FilePtr(); +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/JungfrauDataFile.hpp b/include/aare/JungfrauDataFile.hpp new file mode 100644 index 0000000..9b1bc48 --- /dev/null +++ b/include/aare/JungfrauDataFile.hpp @@ -0,0 +1,106 @@ +#pragma once +#include +#include +#include + +#include "aare/FilePtr.hpp" +#include "aare/defs.hpp" +#include "aare/NDArray.hpp" +#include "aare/FileInterface.hpp" +namespace aare { + + +struct JungfrauDataHeader{ + uint64_t framenum; + uint64_t bunchid; +}; + +class JungfrauDataFile : public FileInterface { + + size_t m_rows{}; //!< number of rows in the image, from find_frame_size(); + size_t m_cols{}; //!< number of columns in the image, from find_frame_size(); + size_t m_bytes_per_frame{}; //!< number of bytes per frame excluding header + size_t m_total_frames{}; //!< total number of frames in the series of files + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + std::filesystem::path m_path; //!< path to the files + std::string m_base_name; //!< base name used for formatting file names + + FilePtr m_fp; //!< RAII wrapper for a FILE* + + + using pixel_type = uint16_t; + static constexpr size_t header_size = sizeof(JungfrauDataHeader); + static constexpr size_t n_digits_in_file_index = 6; //!< to format file names + + public: + JungfrauDataFile(const std::filesystem::path &fname); + + std::string base_name() const; //!< get the base name of the file (without path and extension) + size_t bytes_per_frame() override; + size_t pixels_per_frame() override; + size_t bytes_per_pixel() const; + size_t bitdepth() const override; + void seek(size_t frame_index) override; //!< seek to the given frame index (note not byte offset) + size_t tell() override; //!< get the frame index of the file pointer + size_t total_frames() const override; + size_t rows() const override; + size_t cols() const override; + std::array shape() const; + size_t n_files() const; //!< get the number of files in the series. + + // Extra functions needed for FileInterface + Frame read_frame() override; + Frame read_frame(size_t frame_number) override; + std::vector read_n(size_t n_frames=0) override; + void read_into(std::byte *image_buf) override; + void read_into(std::byte *image_buf, size_t n_frames) override; + size_t frame_number(size_t frame_index) override; + DetectorType detector_type() const override; + + /** + * @brief Read a single frame from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a multiple frames from the file into the given buffer. + * @param image_buf buffer to read the frame into. (Note the caller is responsible for allocating the buffer) + * @param n_frames number of frames to read + * @param header pointer to a JungfrauDataHeader or nullptr to skip header) + */ + void read_into(std::byte *image_buf, size_t n_frames, JungfrauDataHeader *header = nullptr); + + /** + * @brief Read a single frame from the file into the given NDArray + * @param image NDArray to read the frame into. + */ + void read_into(NDArray* image, JungfrauDataHeader* header = nullptr); + + JungfrauDataHeader read_header(); + std::filesystem::path current_file() const { return fpath(m_current_file_index+m_offset); } + + + private: + /** + * @brief Find the size of the frame in the file. (256x256, 256x1024, 512x1024) + * @param fname path to the file + * @throws std::runtime_error if the file is empty or the size cannot be determined + */ + void find_frame_size(const std::filesystem::path &fname); + + + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t frame_index) const; + + + }; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index 45d3a83..ceb1e0b 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -194,7 +194,7 @@ class NDArray : public ArrayExpr, Ndim> { T *data() { return data_; } std::byte *buffer() { return reinterpret_cast(data_); } - size_t size() const { return size_; } + ssize_t size() const { return static_cast(size_); } size_t total_bytes() const { return size_ * sizeof(T); } std::array shape() const noexcept { return shape_; } int64_t shape(int64_t i) const noexcept { return shape_[i]; } diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index f53f758..ddb5d1c 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -71,7 +71,7 @@ template class NDView : public ArrayExpr(size_); } size_t total_bytes() const { return size_ * sizeof(T); } std::array strides() const noexcept { return strides_; } @@ -102,7 +102,7 @@ template class NDView : public ArrayExpr NDView& operator=(const std::array &arr) { - if(size() != arr.size()) + if(size() != static_cast(arr.size())) throw std::runtime_error(LOCATION + "Array and NDView size mismatch"); std::copy(arr.begin(), arr.end(), begin()); return *this; @@ -184,4 +184,9 @@ std::ostream& operator <<(std::ostream& os, const NDView& arr){ } +template +NDView make_view(std::vector& vec){ + return NDView(vec.data(), {static_cast(vec.size())}); +} + } // namespace aare \ No newline at end of file diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 1d554e8..350a475 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -22,7 +22,7 @@ class RawSubFile { size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - size_t n_frames{}; + size_t m_num_frames{}; uint32_t m_pos_row{}; uint32_t m_pos_col{}; @@ -53,6 +53,7 @@ class RawSubFile { size_t tell(); void read_into(std::byte *image_buf, DetectorHeader *header = nullptr); + void read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header= nullptr); void get_part(std::byte *buffer, size_t frame_index); void read_header(DetectorHeader *header); @@ -66,6 +67,8 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } + size_t frames_in_file() const { return m_num_frames; } + private: template void read_with_map(std::byte *image_buf); diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index ea62a9d..161941a 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -226,7 +226,7 @@ template void VarClusterFinder::single_pass(NDView img) { template void VarClusterFinder::first_pass() { - for (size_t i = 0; i < original_.size(); ++i) { + for (ssize_t i = 0; i < original_.size(); ++i) { if (use_noise_map) threshold_ = 5 * noiseMap(i); binary_(i) = (original_(i) > threshold_); @@ -250,7 +250,7 @@ template void VarClusterFinder::first_pass() { template void VarClusterFinder::second_pass() { - for (size_t i = 0; i != labeled_.size(); ++i) { + for (ssize_t i = 0; i != labeled_.size(); ++i) { auto cl = labeled_(i); if (cl != 0) { auto it = child.find(cl); diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp index 5d6dc57..fc7d51f 100644 --- a/include/aare/algorithm.hpp +++ b/include/aare/algorithm.hpp @@ -7,13 +7,20 @@ namespace aare { /** - * @brief Find the index of the last element smaller than val - * assume a sorted array + * @brief Index of the last element that is smaller than val. + * Requires a sorted array. Uses >= for ordering. If all elements + * are smaller it returns the last element and if all elements are + * larger it returns the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the last element that is smaller than val + * */ template size_t last_smaller(const T* first, const T* last, T val) { for (auto iter = first+1; iter != last; ++iter) { - if (*iter > val) { + if (*iter >= val) { return std::distance(first, iter-1); } } @@ -25,7 +32,49 @@ size_t last_smaller(const NDArray& arr, T val) { return last_smaller(arr.begin(), arr.end(), val); } +template +size_t last_smaller(const std::vector& vec, T val) { + return last_smaller(vec.data(), vec.data()+vec.size(), val); +} +/** + * @brief Index of the first element that is larger than val. + * Requires a sorted array. Uses > for ordering. If all elements + * are larger it returns the first element and if all elements are + * smaller it returns the last element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the first element that is larger than val + */ +template +size_t first_larger(const T* first, const T* last, T val) { + for (auto iter = first; iter != last; ++iter) { + if (*iter > val) { + return std::distance(first, iter); + } + } + return std::distance(first, last-1); +} + +template +size_t first_larger(const NDArray& arr, T val) { + return first_larger(arr.begin(), arr.end(), val); +} + +template +size_t first_larger(const std::vector& vec, T val) { + return first_larger(vec.data(), vec.data()+vec.size(), val); +} + +/** + * @brief Index of the nearest element to val. + * Requires a sorted array. If there is no difference it takes the first element. + * @param first iterator to the first element + * @param last iterator to the last element + * @param val value to compare + * @return index of the nearest element + */ template size_t nearest_index(const T* first, const T* last, T val) { auto iter = std::min_element(first, last, @@ -50,6 +99,13 @@ size_t nearest_index(const std::array& arr, T val) { return nearest_index(arr.data(), arr.data()+arr.size(), val); } +template +std::vector cumsum(const std::vector& vec) { + std::vector result(vec.size()); + std::partial_sum(vec.begin(), vec.end(), result.begin()); + return result; +} + } // namespace aare \ No newline at end of file diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp index 1c3c479..e784c4a 100644 --- a/include/aare/decode.hpp +++ b/include/aare/decode.hpp @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace aare { @@ -10,4 +11,16 @@ uint16_t adc_sar_04_decode64to16(uint64_t input); void adc_sar_05_decode64to16(NDView input, NDView output); void adc_sar_04_decode64to16(NDView input, NDView output); -} // namespace aare \ No newline at end of file + +/** + * @brief Apply custom weights to a 16-bit input value. Will sum up weights[i]**i + * for each bit i that is set in the input value. + * @throws std::out_of_range if weights.size() < 16 + * @param input 16-bit input value + * @param weights vector of weights, size must be less than or equal to 16 + */ +double apply_custom_weights(uint16_t input, const NDView weights); + +void apply_custom_weights(NDView input, NDView output, const NDView weights); + +} // namespace aare diff --git a/include/aare/utils/ifstream_helpers.hpp b/include/aare/utils/ifstream_helpers.hpp new file mode 100644 index 0000000..0a842ed --- /dev/null +++ b/include/aare/utils/ifstream_helpers.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +namespace aare { + +/** + * @brief Get the error message from an ifstream object +*/ +std::string ifstream_error_msg(std::ifstream &ifs); + +} // namespace aare \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 60128c9..7415062 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,15 +4,32 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.4.1" +version = "2025.4.22" +requires-python = ">=3.11" +dependencies = [ + "numpy", + "matplotlib", +] + + +[tool.cibuildwheel] + +build = "cp{311,312,313}-manylinux_x86_64" [tool.scikit-build] -cmake.verbose = true +build.verbose = true +cmake.build-type = "Release" +install.components = ["python"] [tool.scikit-build.cmake.define] AARE_PYTHON_BINDINGS = "ON" -AARE_SYSTEM_LIBRARIES = "ON" -AARE_INSTALL_PYTHONEXT = "ON" \ No newline at end of file +AARE_INSTALL_PYTHONEXT = "ON" + + +[tool.pytest.ini_options] +markers = [ + "files: marks tests that need additional data (deselect with '-m \"not files\"')", +] \ No newline at end of file diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 09de736..549205a 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,12 +1,13 @@ -find_package (Python 3.10 COMPONENTS Interpreter Development REQUIRED) +find_package (Python 3.10 COMPONENTS Interpreter Development.Module REQUIRED) +set(PYBIND11_FINDPYTHON ON) # Needed for RH8 # Download or find pybind11 depending on configuration if(AARE_FETCH_PYBIND11) FetchContent_Declare( pybind11 GIT_REPOSITORY https://github.com/pybind/pybind11 - GIT_TAG v2.13.0 + GIT_TAG v2.13.6 ) FetchContent_MakeAvailable(pybind11) else() @@ -58,10 +59,16 @@ endforeach(FILE ${PYTHON_EXAMPLES}) if(AARE_INSTALL_PYTHONEXT) - install(TARGETS _aare + install( + TARGETS _aare EXPORT "${TARGETS_EXPORT_NAME}" LIBRARY DESTINATION aare + COMPONENT python ) - install(FILES ${PYTHON_FILES} DESTINATION aare) + install( + FILES ${PYTHON_FILES} + DESTINATION aare + COMPONENT python + ) endif() \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 058d7cf..db9672f 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -2,7 +2,7 @@ from . import _aare -from ._aare import File, RawMasterFile, RawSubFile +from ._aare import File, RawMasterFile, RawSubFile, JungfrauDataFile from ._aare import Pedestal_d, Pedestal_f, ClusterFinder, VarClusterFinder from ._aare import DetectorType from ._aare import ClusterFile @@ -13,11 +13,15 @@ from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVe from ._aare import fit_gaus, fit_pol1 from ._aare import Interpolator + + +from ._aare import apply_custom_weights + from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters -from .utils import random_pixels, random_pixel, flat_list +from .utils import random_pixels, random_pixel, flat_list, add_colorbar #make functions available in the top level API diff --git a/python/aare/utils.py b/python/aare/utils.py index 4708921..a10f54c 100644 --- a/python/aare/utils.py +++ b/python/aare/utils.py @@ -1,4 +1,6 @@ import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable def random_pixels(n_pixels, xmin=0, xmax=512, ymin=0, ymax=1024): """Return a list of random pixels. @@ -24,4 +26,11 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024): def flat_list(xss): """Flatten a list of lists.""" - return [x for xs in xss for x in xs] \ No newline at end of file + return [x for xs in xss for x in xs] + +def add_colorbar(ax, im, size="5%", pad=0.05): + """Add a colorbar with the same height as the image.""" + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size=size, pad=pad) + plt.colorbar(im, cax=cax) + return ax, im, cax \ No newline at end of file diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 56e571b..a88a9d1 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -10,6 +10,8 @@ #include "aare/decode.hpp" // #include "aare/fClusterFileV2.hpp" +#include "np_helper.hpp" + #include #include #include @@ -65,35 +67,54 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { return output; }); - py::class_(m, "CtbRawFile") - .def(py::init()) - .def("read_frame", - [](CtbRawFile &self) { - size_t image_size = self.image_size_in_bytes(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(1); - shape.push_back(image_size); +m.def( + "apply_custom_weights", + [](py::array_t &input, + py::array_t + &weights) { + - py::array_t header(1); + // Create new array with same shape as the input array (uninitialized values) + py::buffer_info buf = input.request(); + py::array_t output(buf.shape); - // always read bytes - image = py::array_t(shape); + // Use NDViews to call into the C++ library + auto weights_view = make_view_1d(weights); + NDView input_view(input.mutable_data(), {input.size()}); + NDView output_view(output.mutable_data(), {output.size()}); - self.read_into( - reinterpret_cast(image.mutable_data()), - header.mutable_data()); + apply_custom_weights(input_view, output_view, weights_view); + return output; + }); - return py::make_tuple(header, image); - }) - .def("seek", &CtbRawFile::seek) - .def("tell", &CtbRawFile::tell) - .def("master", &CtbRawFile::master) +py::class_(m, "CtbRawFile") + .def(py::init()) + .def("read_frame", + [](CtbRawFile &self) { + size_t image_size = self.image_size_in_bytes(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(1); + shape.push_back(image_size); - .def_property_readonly("image_size_in_bytes", - &CtbRawFile::image_size_in_bytes) + py::array_t header(1); - .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + // always read bytes + image = py::array_t(shape); -} \ No newline at end of file + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); + }) + .def("seek", &CtbRawFile::seek) + .def("tell", &CtbRawFile::tell) + .def("master", &CtbRawFile::master) + + .def_property_readonly("image_size_in_bytes", + &CtbRawFile::image_size_in_bytes) + + .def_property_readonly("frames_in_file", &CtbRawFile::frames_in_file); + +} diff --git a/python/src/file.hpp b/python/src/file.hpp index 0d64e16..2d0f53e 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -20,6 +20,9 @@ namespace py = pybind11; using namespace ::aare; + + + //Disable warnings for unused parameters, as we ignore some //in the __exit__ method #pragma GCC diagnostic push @@ -214,36 +217,9 @@ void define_file_io_bindings(py::module &m) { - py::class_(m, "RawSubFile") - .def(py::init()) - .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) - .def_property_readonly("pixels_per_frame", - &RawSubFile::pixels_per_frame) - .def("seek", &RawSubFile::seek) - .def("tell", &RawSubFile::tell) - .def_property_readonly("rows", &RawSubFile::rows) - .def_property_readonly("cols", &RawSubFile::cols) - .def("read_frame", - [](RawSubFile &self) { - const uint8_t item_size = self.bytes_per_pixel(); - py::array image; - std::vector shape; - shape.reserve(2); - shape.push_back(self.rows()); - shape.push_back(self.cols()); - if (item_size == 1) { - image = py::array_t(shape); - } else if (item_size == 2) { - image = py::array_t(shape); - } else if (item_size == 4) { - image = py::array_t(shape); - } - fmt::print("item_size: {} rows: {} cols: {}\n", item_size, self.rows(), self.cols()); - self.read_into( - reinterpret_cast(image.mutable_data())); - return image; - }); + + + #pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") diff --git a/python/src/jungfrau_data_file.hpp b/python/src/jungfrau_data_file.hpp new file mode 100644 index 0000000..942f6a6 --- /dev/null +++ b/python/src/jungfrau_data_file.hpp @@ -0,0 +1,116 @@ + +#include "aare/JungfrauDataFile.hpp" +#include "aare/defs.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +// Disable warnings for unused parameters, as we ignore some +// in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +auto read_dat_frame(JungfrauDataFile &self) { + py::array_t header(1); + py::array_t image({ + self.rows(), + self.cols() + }); + + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_dat_frames(JungfrauDataFile &self, size_t n_frames) { + // adjust for actual frames left in the file + n_frames = std::min(n_frames, self.total_frames() - self.tell()); + if (n_frames == 0) { + throw std::runtime_error("No frames left in file"); + } + + py::array_t header(n_frames); + py::array_t image({ + n_frames, self.rows(), + self.cols()}); + + self.read_into(reinterpret_cast(image.mutable_data()), + n_frames, header.mutable_data()); + + return py::make_tuple(header, image); +} + +void define_jungfrau_data_file_io_bindings(py::module &m) { + // Make the JungfrauDataHeader usable from numpy + PYBIND11_NUMPY_DTYPE(JungfrauDataHeader, framenum, bunchid); + + py::class_(m, "JungfrauDataFile") + .def(py::init()) + .def("seek", &JungfrauDataFile::seek, + R"( + Seek to the given frame index. + )") + .def("tell", &JungfrauDataFile::tell, + R"( + Get the current frame index. + )") + .def_property_readonly("rows", &JungfrauDataFile::rows) + .def_property_readonly("cols", &JungfrauDataFile::cols) + .def_property_readonly("base_name", &JungfrauDataFile::base_name) + .def_property_readonly("bytes_per_frame", + &JungfrauDataFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &JungfrauDataFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", + &JungfrauDataFile::bytes_per_pixel) + .def_property_readonly("bitdepth", &JungfrauDataFile::bitdepth) + .def_property_readonly("current_file", &JungfrauDataFile::current_file) + .def_property_readonly("total_frames", &JungfrauDataFile::total_frames) + .def_property_readonly("n_files", &JungfrauDataFile::n_files) + .def("read_frame", &read_dat_frame, + R"( + Read a single frame from the file. + )") + .def("read_n", &read_n_dat_frames, + R"( + Read maximum n_frames frames from the file. + )") + .def( + "read", + [](JungfrauDataFile &self) { + self.seek(0); + auto n_frames = self.total_frames(); + return read_n_dat_frames(self, n_frames); + }, + R"( + Read all frames from the file. Seeks to the beginning before reading. + )") + .def("__enter__", [](JungfrauDataFile &self) { return &self; }) + .def("__exit__", + [](JungfrauDataFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](JungfrauDataFile &self) { return &self; }) + .def("__next__", [](JungfrauDataFile &self) { + try { + return read_dat_frame(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 43f48ba..75fe237 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -10,6 +10,9 @@ #include "cluster_file.hpp" #include "fit.hpp" #include "interpolation.hpp" +#include "raw_sub_file.hpp" + +#include "jungfrau_data_file.hpp" //Pybind stuff #include @@ -20,6 +23,7 @@ namespace py = pybind11; PYBIND11_MODULE(_aare, m) { define_file_io_bindings(m); define_raw_file_io_bindings(m); + define_raw_sub_file_io_bindings(m); define_ctb_raw_file_io_bindings(m); define_raw_master_file_bindings(m); define_var_cluster_finder_bindings(m); @@ -33,5 +37,6 @@ PYBIND11_MODULE(_aare, m) { define_cluster_file_sink_bindings(m); define_fit_bindings(m); define_interpolation_bindings(m); + define_jungfrau_data_file_io_bindings(m); } \ No newline at end of file diff --git a/python/src/raw_sub_file.hpp b/python/src/raw_sub_file.hpp new file mode 100644 index 0000000..2cb83fc --- /dev/null +++ b/python/src/raw_sub_file.hpp @@ -0,0 +1,110 @@ +#include "aare/CtbRawFile.hpp" +#include "aare/File.hpp" +#include "aare/Frame.hpp" +#include "aare/RawFile.hpp" +#include "aare/RawMasterFile.hpp" +#include "aare/RawSubFile.hpp" + +#include "aare/defs.hpp" +// #include "aare/fClusterFileV2.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace ::aare; + +auto read_frame_from_RawSubFile(RawSubFile &self) { + py::array_t header(1); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{static_cast(self.rows()), + static_cast(self.cols())}; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), + header.mutable_data()); + + return py::make_tuple(header, image); +} + +auto read_n_frames_from_RawSubFile(RawSubFile &self, size_t n_frames) { + py::array_t header(n_frames); + const uint8_t item_size = self.bytes_per_pixel(); + std::vector shape{ + static_cast(n_frames), + static_cast(self.rows()), + static_cast(self.cols()) + }; + + py::array image; + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into(reinterpret_cast(image.mutable_data()), n_frames, + header.mutable_data()); + + return py::make_tuple(header, image); +} + + +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +void define_raw_sub_file_io_bindings(py::module &m) { + py::class_(m, "RawSubFile") + .def(py::init()) + .def_property_readonly("bytes_per_frame", &RawSubFile::bytes_per_frame) + .def_property_readonly("pixels_per_frame", + &RawSubFile::pixels_per_frame) + .def_property_readonly("bytes_per_pixel", &RawSubFile::bytes_per_pixel) + .def("seek", &RawSubFile::seek) + .def("tell", &RawSubFile::tell) + .def_property_readonly("rows", &RawSubFile::rows) + .def_property_readonly("cols", &RawSubFile::cols) + .def_property_readonly("frames_in_file", &RawSubFile::frames_in_file) + .def("read_frame", &read_frame_from_RawSubFile) + .def("read_n", &read_n_frames_from_RawSubFile) + .def("read", [](RawSubFile &self){ + self.seek(0); + auto n_frames = self.frames_in_file(); + return read_n_frames_from_RawSubFile(self, n_frames); + }) + .def("__enter__", [](RawSubFile &self) { return &self; }) + .def("__exit__", + [](RawSubFile &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + }) + .def("__iter__", [](RawSubFile &self) { return &self; }) + .def("__next__", [](RawSubFile &self) { + try { + return read_frame_from_RawSubFile(self); + } catch (std::runtime_error &e) { + throw py::stop_iteration(); + } + }); + +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/tests/conftest.py b/python/tests/conftest.py new file mode 100644 index 0000000..5badf13 --- /dev/null +++ b/python/tests/conftest.py @@ -0,0 +1,29 @@ +import os +from pathlib import Path +import pytest + + + +def pytest_addoption(parser): + parser.addoption( + "--files", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "files: mark test as needing image files to run") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--files"): + return + skip = pytest.mark.skip(reason="need --files option to run") + for item in items: + if "files" in item.keywords: + item.add_marker(skip) + + +@pytest.fixture +def test_data_path(): + return Path(os.environ["AARE_TEST_DATA"]) + diff --git a/python/tests/test_RawSubFile.py b/python/tests/test_RawSubFile.py new file mode 100644 index 0000000..a5eea91 --- /dev/null +++ b/python/tests/test_RawSubFile.py @@ -0,0 +1,36 @@ +import pytest +import numpy as np +from aare import RawSubFile, DetectorType + + +@pytest.mark.files +def test_read_a_jungfrau_RawSubFile(test_data_path): + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + assert f.frames_in_file == 3 + + headers, frames = f.read() + + assert headers.size == 3 + assert frames.shape == (3, 512, 1024) + + # Frame numbers in this file should be 4, 5, 6 + for i,h in zip(range(4,7,1), headers): + assert h["frameNumber"] == i + + # Compare to canned data using numpy + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + assert np.all(data[3:6] == frames) + +@pytest.mark.files +def test_iterate_over_a_jungfrau_RawSubFile(test_data_path): + + data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + + with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: + i = 0 + for header, frame in f: + assert header["frameNumber"] == i+1 + assert np.all(frame == data[i]) + i += 1 + assert i == 3 + assert header["frameNumber"] == 3 \ No newline at end of file diff --git a/python/tests/test_jungfrau_dat_files.py b/python/tests/test_jungfrau_dat_files.py new file mode 100644 index 0000000..5d3fdf8 --- /dev/null +++ b/python/tests/test_jungfrau_dat_files.py @@ -0,0 +1,92 @@ +import pytest +import numpy as np +from aare import JungfrauDataFile + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_frames(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.total_frames == 24 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.total_frames == 53 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.total_frames == 113 + + +@pytest.mark.files +def test_jfungfrau_dat_read_number_of_file(test_data_path): + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as dat_file: + assert dat_file.n_files == 4 + + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as dat_file: + assert dat_file.n_files == 7 + + +@pytest.mark.files +def test_read_module(test_data_path): + """ + Read all frames from the series of .dat files. Compare to canned data in npz format. + """ + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF500k_000000.dat") as f: + header, data = f.read() + + #Sanity check + n_frames = 24 + assert header.size == n_frames + assert data.shape == (n_frames, 512, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF500k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + +@pytest.mark.files +def test_read_half_module(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF250k_000000.dat") as f: + header, data = f.read() + + n_frames = 53 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 1024) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF250k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) + + +@pytest.mark.files +def test_read_single_chip(test_data_path): + + # Read all frames from the .dat file + with JungfrauDataFile(test_data_path / "dat/AldoJF65k_000000.dat") as f: + header, data = f.read() + + n_frames = 113 + assert header.size == n_frames + assert data.shape == (n_frames, 256, 256) + + # Read reference data using numpy + with np.load(test_data_path / "dat/AldoJF65k.npz") as f: + ref_header = f["headers"] + ref_data = f["frames"] + + # Check that the data is the same + assert np.all(ref_header == header) + assert np.all(ref_data == data) \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index f77ac92..d24e803 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -41,6 +41,12 @@ void ClusterFile::set_noise_map(const NDView noise_map){ void ClusterFile::set_gain_map(const NDView gain_map){ m_gain_map = NDArray(gain_map); + + // Gain map is passed as ADU/keV to avoid dividing in when applying the gain + // map we invert it here + for (auto &item : m_gain_map->view()) { + item = 1.0 / item; + } } ClusterFile::~ClusterFile() { close(); } diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index a0eed04..4152ce0 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -11,9 +11,10 @@ using aare::ClusterFile; -TEST_CASE("Read one frame from a a cluster file", "[.integration]") { + +TEST_CASE("Read one frame from a a cluster file", "[.files]") { //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile f(fpath); @@ -22,9 +23,10 @@ TEST_CASE("Read one frame from a a cluster file", "[.integration]") { REQUIRE(clusters.frame_number() == 135); } -TEST_CASE("Read one frame using ROI", "[.integration]") { + +TEST_CASE("Read one frame using ROI", "[.files]") { //We know that the frame has 97 clusters - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); ClusterFile f(fpath); @@ -50,9 +52,11 @@ TEST_CASE("Read one frame using ROI", "[.integration]") { } -TEST_CASE("Read clusters from single frame file", "[.integration]") { - auto fpath = test_data_path() / "clusters" / "single_frame_97_clustrers.clust"; +TEST_CASE("Read clusters from single frame file", "[.files]") { + + auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; + REQUIRE(std::filesystem::exists(fpath)); SECTION("Read fewer clusters than available") { diff --git a/src/File.cpp b/src/File.cpp index 3c68eff..eb04893 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -1,4 +1,5 @@ #include "aare/File.hpp" +#include "aare/JungfrauDataFile.hpp" #include "aare/NumpyFile.hpp" #include "aare/RawFile.hpp" @@ -27,6 +28,8 @@ File::File(const std::filesystem::path &fname, const std::string &mode, else if (fname.extension() == ".npy") { // file_impl = new NumpyFile(fname, mode, cfg); file_impl = std::make_unique(fname, mode, cfg); + }else if(fname.extension() == ".dat"){ + file_impl = std::make_unique(fname); } else { throw std::runtime_error("Unsupported file type"); } diff --git a/src/FilePtr.cpp b/src/FilePtr.cpp new file mode 100644 index 0000000..4fed3d7 --- /dev/null +++ b/src/FilePtr.cpp @@ -0,0 +1,44 @@ + +#include "aare/FilePtr.hpp" +#include +#include +#include + +namespace aare { + +FilePtr::FilePtr(const std::filesystem::path& fname, const std::string& mode = "rb") { + fp_ = fopen(fname.c_str(), mode.c_str()); + if (!fp_) + throw std::runtime_error(fmt::format("Could not open: {}", fname.c_str())); +} + +FilePtr::FilePtr(FilePtr &&other) { std::swap(fp_, other.fp_); } + +FilePtr &FilePtr::operator=(FilePtr &&other) { + std::swap(fp_, other.fp_); + return *this; +} + +FILE *FilePtr::get() { return fp_; } + +int64_t FilePtr::tell() { + auto pos = ftell(fp_); + if (pos == -1) + throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg())); + return pos; +} +FilePtr::~FilePtr() { + if (fp_) + fclose(fp_); // check? +} + +std::string FilePtr::error_msg(){ + if (feof(fp_)) { + return "End of file reached"; + } + if (ferror(fp_)) { + return fmt::format("Error reading file: {}", std::strerror(errno)); + } + return ""; +} +} // namespace aare diff --git a/src/Fit.cpp b/src/Fit.cpp index 3001efd..9126109 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -18,7 +18,7 @@ double gaus(const double x, const double *par) { NDArray gaus(NDView x, NDView par) { NDArray y({x.shape(0)}, 0); - for (size_t i = 0; i < x.size(); i++) { + for (ssize_t i = 0; i < x.size(); i++) { y(i) = gaus(x(i), par.data()); } return y; @@ -28,7 +28,7 @@ double pol1(const double x, const double *par) { return par[0] * x + par[1]; } NDArray pol1(NDView x, NDView par) { NDArray y({x.shape()}, 0); - for (size_t i = 0; i < x.size(); i++) { + for (ssize_t i = 0; i < x.size(); i++) { y(i) = pol1(x(i), par.data()); } return y; @@ -153,7 +153,7 @@ void fit_gaus(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; - for (size_t i = 0; i < y.size(); i++) { + for (ssize_t i = 0; i < y.size(); i++) { chi2 += std::pow((y(i) - func::gaus(x(i), par_out.data())) / y_err(i), 2); } } @@ -205,7 +205,7 @@ void fit_pol1(NDView x, NDView y, NDView y_err, // Calculate chi2 chi2 = 0; - for (size_t i = 0; i < y.size(); i++) { + for (ssize_t i = 0; i < y.size(); i++) { chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); } } diff --git a/src/Interpolator.cpp b/src/Interpolator.cpp index 7f82533..7034a83 100644 --- a/src/Interpolator.cpp +++ b/src/Interpolator.cpp @@ -68,19 +68,14 @@ std::vector Interpolator::interpolate(const ClusterVector& clus photon.y = cluster.y; photon.energy = eta.sum; - // auto ie = nearest_index(m_energy_bins, photon.energy)-1; - // auto ix = nearest_index(m_etabinsx, eta.x)-1; - // auto iy = nearest_index(m_etabinsy, eta.y)-1; + //Finding the index of the last element that is smaller //should work fine as long as we have many bins auto ie = last_smaller(m_energy_bins, photon.energy); auto ix = last_smaller(m_etabinsx, eta.x); auto iy = last_smaller(m_etabinsy, eta.y); - - // fmt::print("ex: {}, ix: {}, iy: {}\n", ie, ix, iy); - double dX, dY; - int ex, ey; + double dX{}, dY{}; // cBottomLeft = 0, // cBottomRight = 1, // cTopLeft = 2, diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp new file mode 100644 index 0000000..8f1f904 --- /dev/null +++ b/src/JungfrauDataFile.cpp @@ -0,0 +1,238 @@ +#include "aare/JungfrauDataFile.hpp" +#include "aare/algorithm.hpp" +#include "aare/defs.hpp" + +#include +#include + +namespace aare { + +JungfrauDataFile::JungfrauDataFile(const std::filesystem::path &fname) { + + if (!std::filesystem::exists(fname)) { + throw std::runtime_error(LOCATION + + "File does not exist: " + fname.string()); + } + find_frame_size(fname); + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); +} + + +// FileInterface + +Frame JungfrauDataFile::read_frame(){ + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +Frame JungfrauDataFile::read_frame(size_t frame_number){ + seek(frame_number); + Frame f(rows(), cols(), Dtype::UINT16); + read_into(reinterpret_cast(f.data()), nullptr); + return f; +} + +std::vector JungfrauDataFile::read_n(size_t n_frames) { + std::vector frames; + for(size_t i = 0; i < n_frames; ++i){ + frames.push_back(read_frame()); + } + return frames; +} + +void JungfrauDataFile::read_into(std::byte *image_buf) { + read_into(image_buf, nullptr); +} +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames) { + read_into(image_buf, n_frames, nullptr); +} + +size_t JungfrauDataFile::frame_number(size_t frame_index) { + seek(frame_index); + return read_header().framenum; +} + +std::array JungfrauDataFile::shape() const { + return {static_cast(rows()), static_cast(cols())}; +} + +DetectorType JungfrauDataFile::detector_type() const { return DetectorType::Jungfrau; } + +std::string JungfrauDataFile::base_name() const { return m_base_name; } + +size_t JungfrauDataFile::bytes_per_frame() { return m_bytes_per_frame; } + +size_t JungfrauDataFile::pixels_per_frame() { return m_rows * m_cols; } + +size_t JungfrauDataFile::bytes_per_pixel() const { return sizeof(pixel_type); } + +size_t JungfrauDataFile::bitdepth() const { + return bytes_per_pixel() * bits_per_byte; +} + +void JungfrauDataFile::seek(size_t frame_index) { + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + "Frame index out of range: " + + std::to_string(frame_index)); + } + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + header_size); + m_fp.seek(byte_offset); +}; + +size_t JungfrauDataFile::tell() { return m_current_frame_index; } +size_t JungfrauDataFile::total_frames() const { return m_total_frames; } +size_t JungfrauDataFile::rows() const { return m_rows; } +size_t JungfrauDataFile::cols() const { return m_cols; } + +size_t JungfrauDataFile::n_files() const { return m_last_frame_in_file.size(); } + +void JungfrauDataFile::find_frame_size(const std::filesystem::path &fname) { + + static constexpr size_t module_data_size = + header_size + sizeof(pixel_type) * 512 * 1024; + static constexpr size_t half_data_size = + header_size + sizeof(pixel_type) * 256 * 1024; + static constexpr size_t chip_data_size = + header_size + sizeof(pixel_type) * 256 * 256; + + auto file_size = std::filesystem::file_size(fname); + if (file_size == 0) { + throw std::runtime_error(LOCATION + + "Cannot guess frame size: file is empty"); + } + + if (file_size % module_data_size == 0) { + m_rows = 512; + m_cols = 1024; + m_bytes_per_frame = module_data_size - header_size; + } else if (file_size % half_data_size == 0) { + m_rows = 256; + m_cols = 1024; + m_bytes_per_frame = half_data_size - header_size; + } else if (file_size % chip_data_size == 0) { + m_rows = 256; + m_cols = 256; + m_bytes_per_frame = chip_data_size - header_size; + } else { + throw std::runtime_error(LOCATION + + "Cannot find frame size: file size is not a " + "multiple of any known frame size"); + } +} + +void JungfrauDataFile::parse_fname(const std::filesystem::path &fname) { + m_path = fname.parent_path(); + m_base_name = fname.stem(); + + // find file index, then remove if from the base name + if (auto pos = m_base_name.find_last_of('_'); pos != std::string::npos) { + m_offset = std::stoul(m_base_name.substr(pos + 1)); + m_base_name.erase(pos); + } +} + +void JungfrauDataFile::scan_files() { + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + header_size); + m_last_frame_in_file.push_back(n_frames); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + m_total_frames = m_last_frame_in_file.back(); +} + +void JungfrauDataFile::read_into(std::byte *image_buf, + JungfrauDataHeader *header) { + + // read header if not passed nullptr + if (header) { + if (auto rc = fread(header, sizeof(JungfrauDataHeader), 1, m_fp.get()); + rc != 1) { + throw std::runtime_error( + LOCATION + + "Could not read header from file:" + m_fp.error_msg()); + } + } else { + m_fp.seek(header_size, SEEK_CUR); + } + + // read data + if (auto rc = fread(image_buf, 1, m_bytes_per_frame, m_fp.get()); + rc != m_bytes_per_frame) { + throw std::runtime_error(LOCATION + "Could not read image from file" + + m_fp.error_msg()); + } + + // prepare for next read + // if we are at the end of the file, open the next file + ++m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } +} + +void JungfrauDataFile::read_into(std::byte *image_buf, size_t n_frames, + JungfrauDataHeader *header) { + if (header) { + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, header + i); + }else{ + for (size_t i = 0; i < n_frames; ++i) + read_into(image_buf + i * m_bytes_per_frame, nullptr); + } +} + +void JungfrauDataFile::read_into(NDArray* image, JungfrauDataHeader* header) { + if(image->shape()!=shape()){ + throw std::runtime_error(LOCATION + + "Image shape does not match file size: " + std::to_string(rows()) + "x" + std::to_string(cols())); + } + read_into(reinterpret_cast(image->data()), header); +} + + +JungfrauDataHeader JungfrauDataFile::read_header() { + JungfrauDataHeader header; + if (auto rc = fread(&header, 1, sizeof(header), m_fp.get()); + rc != sizeof(header)) { + throw std::runtime_error(LOCATION + "Could not read header from file" + + m_fp.error_msg()); + } + m_fp.seek(-header_size, SEEK_CUR); + return header; +} + +void JungfrauDataFile::open_file(size_t file_index) { + // fmt::print(stderr, "Opening file: {}\n", + // fpath(file_index+m_offset).string()); + m_fp = FilePtr(fpath(file_index + m_offset), "rb"); + m_current_file_index = file_index; +} + +std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const { + auto fname = fmt::format("{}_{:0{}}.dat", m_base_name, file_index, + n_digits_in_file_index); + return m_path / fname; +} + +} // namespace aare \ No newline at end of file diff --git a/src/JungfrauDataFile.test.cpp b/src/JungfrauDataFile.test.cpp new file mode 100644 index 0000000..ce51168 --- /dev/null +++ b/src/JungfrauDataFile.test.cpp @@ -0,0 +1,114 @@ +#include "aare/JungfrauDataFile.hpp" + +#include +#include "test_config.hpp" + +using aare::JungfrauDataFile; +using aare::JungfrauDataHeader; +TEST_CASE("Open a Jungfrau data file", "[.files]") { + //we know we have 4 files with 7, 7, 7, and 3 frames + //firs frame number if 1 and the bunch id is frame_number**2 + //so we can check the header + auto fpath = test_data_path() / "dat" / "AldoJF500k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.bytes_per_frame() == 1048576); + REQUIRE(f.pixels_per_frame() == 524288); + REQUIRE(f.bytes_per_pixel() == 2); + REQUIRE(f.bitdepth() == 16); + REQUIRE(f.base_name() == "AldoJF500k"); + REQUIRE(f.n_files() == 4); + REQUIRE(f.tell() == 0); + REQUIRE(f.total_frames() == 24); + REQUIRE(f.current_file() == fpath); + + //Check that the frame number and buch id is read correctly + for (size_t i = 0; i < 24; ++i) { + JungfrauDataHeader header; + aare::NDArray image(f.shape()); + f.read_into(&image, &header); + REQUIRE(header.framenum == i + 1); + REQUIRE(header.bunchid == (i + 1) * (i + 1)); + REQUIRE(image.shape(0) == 512); + REQUIRE(image.shape(1) == 1024); + } +} + +TEST_CASE("Seek in a JungfrauDataFile", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //The file should have 113 frames + f.seek(19); + REQUIRE(f.tell() == 19); + auto h = f.read_header(); + REQUIRE(h.framenum == 19+1); + + //Reading again does not change the file pointer + auto h2 = f.read_header(); + REQUIRE(h2.framenum == 19+1); + + f.seek(59); + REQUIRE(f.tell() == 59); + auto h3 = f.read_header(); + REQUIRE(h3.framenum == 59+1); + + JungfrauDataHeader h4; + aare::NDArray image(f.shape()); + f.read_into(&image, &h4); + REQUIRE(h4.framenum == 59+1); + + //now we should be on the next frame + REQUIRE(f.tell() == 60); + REQUIRE(f.read_header().framenum == 60+1); + + REQUIRE_THROWS(f.seek(86356)); //out of range +} + +TEST_CASE("Open a Jungfrau data file with non zero file index", "[.files]"){ + + auto fpath = test_data_path() / "dat" / "AldoJF65k_000003.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + //18 files per data file, opening the 3rd file we ignore the first 3 + REQUIRE(f.total_frames() == 113-18*3); + REQUIRE(f.tell() == 0); + + //Frame numbers start at 1 in the first file + REQUIRE(f.read_header().framenum == 18*3+1); + + // moving relative to the third file + f.seek(5); + REQUIRE(f.read_header().framenum == 18*3+1+5); + + // ignoring the first 3 files + REQUIRE(f.n_files() == 4); + + REQUIRE(f.current_file().stem() == "AldoJF65k_000003"); + +} + +TEST_CASE("Read into throws if size doesn't match", "[.files]"){ + auto fpath = test_data_path() / "dat" / "AldoJF65k_000000.dat"; + REQUIRE(std::filesystem::exists(fpath)); + + JungfrauDataFile f(fpath); + + aare::NDArray image({39, 85}); + JungfrauDataHeader header; + + REQUIRE_THROWS(f.read_into(&image, &header)); + REQUIRE_THROWS(f.read_into(&image, nullptr)); + REQUIRE_THROWS(f.read_into(&image)); + + REQUIRE(f.tell() == 0); + + +} \ No newline at end of file diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index eff3e2c..c37a285 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -183,14 +183,14 @@ TEST_CASE("Size and shape matches") { int64_t h = 75; std::array shape{w, h}; NDArray a{shape}; - REQUIRE(a.size() == static_cast(w * h)); + REQUIRE(a.size() == w * h); REQUIRE(a.shape() == shape); } TEST_CASE("Initial value matches for all elements") { double v = 4.35; NDArray a{{5, 5}, v}; - for (uint32_t i = 0; i < a.size(); ++i) { + for (int i = 0; i < a.size(); ++i) { REQUIRE(a(i) == v); } } diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 3070de6..8750f3a 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -3,6 +3,7 @@ #include #include +#include using aare::NDView; using aare::Shape; @@ -21,10 +22,8 @@ TEST_CASE("Element reference 1D") { } TEST_CASE("Element reference 2D") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(vec.size() == static_cast(data.size())); @@ -58,10 +57,8 @@ TEST_CASE("Element reference 3D") { } TEST_CASE("Plus and miuns with single value") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); data += 5; int i = 0; @@ -116,10 +113,8 @@ TEST_CASE("elementwise assign") { } TEST_CASE("iterators") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<1>{12}); int i = 0; for (const auto item : data) { @@ -167,27 +162,31 @@ TEST_CASE("divide with another span") { } TEST_CASE("Retrieve shape") { - std::vector vec; - for (int i = 0; i != 12; ++i) { - vec.push_back(i); - } + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); NDView data(vec.data(), Shape<2>{3, 4}); REQUIRE(data.shape()[0] == 3); REQUIRE(data.shape()[1] == 4); } TEST_CASE("compare two views") { - std::vector vec1; - for (int i = 0; i != 12; ++i) { - vec1.push_back(i); - } + std::vector vec1(12); + std::iota(vec1.begin(), vec1.end(), 0); NDView view1(vec1.data(), Shape<2>{3, 4}); - std::vector vec2; - for (int i = 0; i != 12; ++i) { - vec2.push_back(i); - } + std::vector vec2(12); + std::iota(vec2.begin(), vec2.end(), 0); NDView view2(vec2.data(), Shape<2>{3, 4}); REQUIRE((view1 == view2)); +} + + +TEST_CASE("Create a view over a vector"){ + std::vector vec(12); + std::iota(vec.begin(), vec.end(), 0); + auto v = aare::make_view(vec); + REQUIRE(v.shape()[0] == 12); + REQUIRE(v[0] == 0); + REQUIRE(v[11] == 11); } \ No newline at end of file diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index a3bb79c..9e7a421 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,9 +1,12 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" +#include "aare/utils/ifstream_helpers.hpp" #include // memcpy #include #include + + namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, @@ -20,7 +23,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } if (std::filesystem::exists(fname)) { - n_frames = std::filesystem::file_size(fname) / + m_num_frames = std::filesystem::file_size(fname) / (sizeof(DetectorHeader) + rows * cols * bitdepth / 8); } else { throw std::runtime_error( @@ -35,7 +38,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } #ifdef AARE_VERBOSE - fmt::print("Opened file: {} with {} frames\n", m_fname.string(), n_frames); + fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames); fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols, m_bitdepth); fmt::print("file size: {}\n", std::filesystem::file_size(fname)); @@ -43,8 +46,8 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, } void RawSubFile::seek(size_t frame_index) { - if (frame_index >= n_frames) { - throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames)); + if (frame_index >= m_num_frames) { + throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames)); } m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); } @@ -60,6 +63,10 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } + // TODO! expand support for different bitdepths if (m_pixel_map) { // read into a temporary buffer and then copy the data to the buffer @@ -79,8 +86,24 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { // read directly into the buffer m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } + + if (m_file.fail()){ + throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); + } } +void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { + for (size_t i = 0; i < n_frames; i++) { + read_into(image_buf, header); + image_buf += bytes_per_frame(); + if (header) { + ++header; + } + } +} + + + template void RawSubFile::read_with_map(std::byte *image_buf) { auto part_buffer = new std::byte[bytes_per_frame()]; diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index fcfa8d2..79541a1 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -6,7 +6,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -19,7 +19,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") { TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -49,10 +49,20 @@ TEST_CASE("nearest index works with std::array", "[algorithm]"){ REQUIRE(aare::nearest_index(arr, -10.0) == 0); } +TEST_CASE("nearest index when there is no different uses the first element", "[algorithm]"){ + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 5) == 0); +} + +TEST_CASE("nearest index when there is no different uses the first element also when all smaller", "[algorithm]"){ + std::vector vec = {5, 5, 5, 5, 5}; + REQUIRE(aare::nearest_index(vec, 10) == 0); +} + TEST_CASE("last smaller", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 @@ -64,10 +74,86 @@ TEST_CASE("last smaller", "[algorithm]"){ TEST_CASE("returns last bin strictly smaller", "[algorithm]"){ aare::NDArray arr({5}); - for (size_t i = 0; i < arr.size(); i++) { + for (ssize_t i = 0; i < arr.size(); i++) { arr[i] = i; } // arr 0, 1, 2, 3, 4 - REQUIRE(aare::last_smaller(arr, 2.0) == 2); + REQUIRE(aare::last_smaller(arr, 2.0) == 1); + +} + +TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){ + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, 50.) == 4); +} + +TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){ + aare::NDArray arr({5}); + for (ssize_t i = 0; i < arr.size(); i++) { + arr[i] = i; + } + // arr 0, 1, 2, 3, 4 + REQUIRE(aare::last_smaller(arr, -50.) == 0); +} + +TEST_CASE("last smaller with all elements equal returns the first element", "[algorithm]"){ + std::vector vec = {5,5,5,5,5,5,5}; + REQUIRE(aare::last_smaller(vec, 5) == 0); +} + + +TEST_CASE("first_lager with vector", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 2.5) == 3); +} + +TEST_CASE("first_lager with all elements smaller returns last element", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, 50.) == 4); +} + +TEST_CASE("first_lager with all elements bigger returns first element", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + REQUIRE(aare::first_larger(vec, -50.) == 0); +} + +TEST_CASE("first_lager with all elements the same as the check returns last", "[algorithm]"){ + std::vector vec = {14, 14, 14, 14, 14}; + REQUIRE(aare::first_larger(vec, 14) == 4); +} + +TEST_CASE("first larger with the same element", "[algorithm]"){ + std::vector vec = {7,8,9,10,11}; + REQUIRE(aare::first_larger(vec, 9) == 3); +} + +TEST_CASE("cumsum works", "[algorithm]"){ + std::vector vec = {0, 1, 2, 3, 4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == 1); + REQUIRE(result[2] == 3); + REQUIRE(result[3] == 6); + REQUIRE(result[4] == 10); +} +TEST_CASE("cumsum works with empty vector", "[algorithm]"){ + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); +} +TEST_CASE("cumsum works with negative numbers", "[algorithm]"){ + std::vector vec = {0, -1, -2, -3, -4}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == vec.size()); + REQUIRE(result[0] == 0); + REQUIRE(result[1] == -1); + REQUIRE(result[2] == -3); + REQUIRE(result[3] == -6); + REQUIRE(result[4] == -10); +} -} \ No newline at end of file diff --git a/src/decode.cpp b/src/decode.cpp index 17c033d..8ac7bc0 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -1,5 +1,5 @@ #include "aare/decode.hpp" - +#include namespace aare { uint16_t adc_sar_05_decode64to16(uint64_t input){ @@ -22,6 +22,10 @@ uint16_t adc_sar_05_decode64to16(uint64_t input){ } void adc_sar_05_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + for(int64_t i = 0; i < input.shape(0); i++){ for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_05_decode64to16(input(i,j)); @@ -49,6 +53,9 @@ uint16_t adc_sar_04_decode64to16(uint64_t input){ } void adc_sar_04_decode64to16(NDView input, NDView output){ + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } for(int64_t i = 0; i < input.shape(0); i++){ for(int64_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_04_decode64to16(input(i,j)); @@ -56,6 +63,40 @@ void adc_sar_04_decode64to16(NDView input, NDView outpu } } +double apply_custom_weights(uint16_t input, const NDView weights) { + if(weights.size() > 16){ + throw std::invalid_argument("weights size must be less than or equal to 16"); + } + + double result = 0.0; + for (ssize_t i = 0; i < weights.size(); ++i) { + result += ((input >> i) & 1) * std::pow(weights[i], i); + } + return result; + +} + +void apply_custom_weights(NDView input, NDView output, const NDView weights) { + if(input.shape() != output.shape()){ + throw std::invalid_argument(LOCATION + " input and output shapes must match"); + } + + //Calculate weights to avoid repeatedly calling std::pow + std::vector weights_powers(weights.size()); + for (ssize_t i = 0; i < weights.size(); ++i) { + weights_powers[i] = std::pow(weights[i], i); + } + + // Apply custom weights to each element in the input array + for (ssize_t i = 0; i < input.shape(0); i++) { + double result = 0.0; + for (size_t bit_index = 0; bit_index < weights_powers.size(); ++bit_index) { + result += ((input(i) >> bit_index) & 1) * weights_powers[bit_index]; + } + output(i) = result; + } +} + } // namespace aare diff --git a/src/decode.test.cpp b/src/decode.test.cpp new file mode 100644 index 0000000..1e4b2fc --- /dev/null +++ b/src/decode.test.cpp @@ -0,0 +1,80 @@ +#include "aare/decode.hpp" + +#include +#include +#include "aare/NDArray.hpp" +using Catch::Matchers::WithinAbs; +#include + +TEST_CASE("test_adc_sar_05_decode64to16"){ + uint64_t input = 0; + uint16_t output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 0); + + + // bit 29 on th input is bit 0 on the output + input = 1UL << 29; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1); + + // test all bits by iteratting through the bitlist + std::vector bitlist = {29, 19, 28, 18, 31, 21, 27, 20, 24, 23, 25, 22}; + for (size_t i = 0; i < bitlist.size(); i++) { + input = 1UL << bitlist[i]; + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == (1 << i)); + } + + + // test a few "random" values + input = 0; + input |= (1UL << 29); + input |= (1UL << 19); + input |= (1UL << 28); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 7UL); + + + input = 0; + input |= (1UL << 18); + input |= (1UL << 27); + input |= (1UL << 25); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 1096UL); + + input = 0; + input |= (1UL << 25); + input |= (1UL << 22); + output = aare::adc_sar_05_decode64to16(input); + CHECK(output == 3072UL); + } + + + TEST_CASE("test_apply_custom_weights") { + + uint16_t input = 1; + aare::NDArray weights_data({3}, 0.0); + weights_data(0) = 1.7; + weights_data(1) = 2.1; + weights_data(2) = 1.8; + + auto weights = weights_data.view(); + + + double output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(1.0, 0.001)); + + input = 1 << 1; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(2.1, 0.001)); + + + input = 1 << 2; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(3.24, 0.001)); + + input = 0b111; + output = aare::apply_custom_weights(input, weights); + CHECK_THAT(output, WithinAbs(6.34, 0.001)); + + } \ No newline at end of file diff --git a/src/utils/ifstream_helpers.cpp b/src/utils/ifstream_helpers.cpp new file mode 100644 index 0000000..74c56f3 --- /dev/null +++ b/src/utils/ifstream_helpers.cpp @@ -0,0 +1,18 @@ +#include "aare/utils/ifstream_helpers.hpp" + +namespace aare { + +std::string ifstream_error_msg(std::ifstream &ifs) { + std::ios_base::iostate state = ifs.rdstate(); + if (state & std::ios_base::eofbit) { + return " End of file reached"; + } else if (state & std::ios_base::badbit) { + return " Bad file stream"; + } else if (state & std::ios_base::failbit) { + return " File read failed"; + }else{ + return " Unknown/no error"; + } +} + +} // namespace aare From 7b5e32a824af294ca6b35d763dd296a23f6b2c8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Fri, 25 Apr 2025 10:31:16 +0200 Subject: [PATCH 108/120] Api extra (#166) Changes to be able to run the example notebooks: - Invert gain map on setting (multiplication is faster but user supplies ADU/energy) - Cast after applying gain map not to loose precision (Important for int32 clusters) - "factor" for ClusterFileSink - Cluster size available to be able to create the right file sink --- include/aare/GainMap.hpp | 12 ++++++++++-- python/aare/ClusterFinder.py | 17 +++++++++++++++++ python/aare/__init__.py | 2 +- python/src/bind_ClusterVector.hpp | 9 ++++----- python/src/cluster.hpp | 3 +++ python/src/module.cpp | 11 +++++------ src/ClusterFile.test.cpp | 3 ++- 7 files changed, 42 insertions(+), 15 deletions(-) diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp index 5311916..621cc9f 100644 --- a/include/aare/GainMap.hpp +++ b/include/aare/GainMap.hpp @@ -16,10 +16,18 @@ class GainMap { public: explicit GainMap(const NDArray &gain_map) - : m_gain_map(gain_map) {}; + : m_gain_map(gain_map) { + for (auto &item : m_gain_map) { + item = 1.0 / item; + } + + }; explicit GainMap(const NDView gain_map) { m_gain_map = NDArray(gain_map); + for (auto &item : m_gain_map) { + item = 1.0 / item; + } } template (m_gain_map(y, x)); + cl.data[j] = static_cast(cl.data[j] * m_gain_map(y, x)); //cast after conversion to keep precision } } else { // clear edge clusters diff --git a/python/aare/ClusterFinder.py b/python/aare/ClusterFinder.py index f678dd1..6e7c352 100644 --- a/python/aare/ClusterFinder.py +++ b/python/aare/ClusterFinder.py @@ -1,5 +1,8 @@ from ._aare import ClusterFinder_Cluster3x3i, ClusterFinder_Cluster2x2i, ClusterFinderMT_Cluster3x3i, ClusterFinderMT_Cluster2x2i, ClusterCollector_Cluster3x3i, ClusterCollector_Cluster2x2i + + +from ._aare import ClusterFileSink_Cluster3x3i, ClusterFileSink_Cluster2x2i import numpy as np def ClusterFinder(image_size, cluster_size, n_sigma=5, dtype = np.int32, capacity = 1024): @@ -48,3 +51,17 @@ def ClusterCollector(clusterfindermt, cluster_size = (3,3), dtype=np.int32): #TODO! add the other formats raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") +def ClusterFileSink(clusterfindermt, cluster_file, dtype=np.int32): + """ + Factory function to create a ClusterCollector object. Provides a cleaner syntax for + the templated ClusterCollector in C++. + """ + + if dtype == np.int32 and clusterfindermt.cluster_size == (3,3): + return ClusterFileSink_Cluster3x3i(clusterfindermt, cluster_file) + elif dtype == np.int32 and clusterfindermt.cluster_size == (2,2): + return ClusterFileSink_Cluster2x2i(clusterfindermt, cluster_file) + + else: + #TODO! add the other formats + raise ValueError(f"Unsupported dtype: {dtype}. Only np.int32 is supported.") \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index 81a9b86..e1e5757 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -11,7 +11,7 @@ from ._aare import ROI # from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i -from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT +from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, ClusterFileSink from .ClusterVector import ClusterVector diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp index ecd7a77..db8c8a3 100644 --- a/python/src/bind_ClusterVector.hpp +++ b/python/src/bind_ClusterVector.hpp @@ -44,11 +44,10 @@ void define_ClusterVector(py::module &m, const std::string &typestr) { auto *vec = new std::vector(self.sum()); return return_vector(vec); }) - .def("sum_2x2", - [](ClusterVector &self) { - auto *vec = new std::vector(self.sum_2x2()); - return return_vector(vec); - }) + .def("sum_2x2", [](ClusterVector &self){ + auto *vec = new std::vector(self.sum_2x2()); + return return_vector(vec); + }) .def_property_readonly("size", &ClusterVector::size) .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index 6dd05ad..58f137c 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -93,6 +93,9 @@ void define_cluster_finder_mt_bindings(py::module &m, return; }, py::arg(), py::arg("frame_number") = 0) + .def_property_readonly("cluster_size", [](ClusterFinderMT &self){ + return py::make_tuple(ClusterSizeX, ClusterSizeY); + }) .def("clear_pedestal", &ClusterFinderMT::clear_pedestal) .def("sync", &ClusterFinderMT::sync) diff --git a/python/src/module.cpp b/python/src/module.cpp index cac97dd..946a41b 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -10,13 +10,12 @@ #include "file.hpp" #include "fit.hpp" #include "interpolation.hpp" -#include "pedestal.hpp" -#include "pixel_map.hpp" -#include "raw_file.hpp" -#include "raw_master_file.hpp" -#include "var_cluster.hpp" #include "raw_sub_file.hpp" - +#include "raw_master_file.hpp" +#include "raw_file.hpp" +#include "pixel_map.hpp" +#include "var_cluster.hpp" +#include "pedestal.hpp" #include "jungfrau_data_file.hpp" // Pybind stuff diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index 39c45d4..f688c3d 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -10,8 +10,9 @@ using aare::Cluster; using aare::ClusterFile; using aare::ClusterVector; + TEST_CASE("Read one frame from a cluster file", "[.files]") { - // We know that the frame has 97 clusters + //We know that the frame has 97 clusters auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust"; REQUIRE(std::filesystem::exists(fpath)); From 2e0424254cf92b4b2bb08152eea6c42685920ad3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Fri, 25 Apr 2025 10:31:40 +0200 Subject: [PATCH 109/120] removed uneccecary codna numpy variants (#165) With numpy 2.0 we no longer need to build against every supported numpy version. This way we can save up to 6 builds. - https://numpy.org/doc/stable/dev/depending_on_numpy.html - https://conda-forge.org/docs/maintainer/knowledge_base/#building-against-numpy --- conda-recipe/conda_build_config.yaml | 23 --------------- conda-recipe/meta.yaml | 44 ++++++++++------------------ 2 files changed, 16 insertions(+), 51 deletions(-) diff --git a/conda-recipe/conda_build_config.yaml b/conda-recipe/conda_build_config.yaml index 36a7465..6d3d479 100644 --- a/conda-recipe/conda_build_config.yaml +++ b/conda-recipe/conda_build_config.yaml @@ -1,28 +1,5 @@ python: - 3.11 - - 3.11 - - 3.11 - - 3.12 - - 3.12 - 3.12 - 3.13 - - -numpy: - - 1.26 - - 2.0 - - 2.1 - - 1.26 - - 2.0 - - 2.1 - - 2.1 - - -zip_keys: - - python - - numpy - -pin_run_as_build: - numpy: x.x - python: x.x \ No newline at end of file diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 12c6ca0..5b7eb48 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -2,56 +2,44 @@ package: name: aare version: 2025.4.22 #TODO! how to not duplicate this? - - - - - source: path: .. build: number: 0 script: - - unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv # [not win] - - {{ PYTHON }} -m pip install . -vv # [win] + - unset CMAKE_GENERATOR && {{ PYTHON }} -m pip install . -vv requirements: build: - - python {{python}} - - numpy {{ numpy }} - {{ compiler('cxx') }} - - - host: - cmake - ninja - - python {{python}} - - numpy {{ numpy }} + + host: + - python - pip + - numpy=2.1 - scikit-build-core - pybind11 >=2.13.0 - - fmt - - zeromq - - nlohmann_json - - catch2 + - matplotlib # needed in host to solve the environment for run run: - - python {{python}} - - numpy {{ numpy }} + - python + - {{ pin_compatible('numpy') }} - matplotlib + test: imports: - aare - # requires: - # - pytest - # source_files: - # - tests - # commands: - # - pytest tests + requires: + - pytest + source_files: + - python/tests + commands: + - python -m pytest python/tests about: - summary: An example project built with pybind11 and scikit-build. - # license_file: LICENSE \ No newline at end of file + summary: Data analysis library for hybrid pixel detectors from PSI From f06e722dce1a881eac25302814c8fa96d83e77c9 Mon Sep 17 00:00:00 2001 From: mazzol_a Date: Fri, 25 Apr 2025 11:38:51 +0200 Subject: [PATCH 110/120] changes from PR review --- include/aare/CalculateEta.hpp | 6 +-- include/aare/ClusterVector.hpp | 19 ++++----- include/aare/GainMap.hpp | 3 +- include/aare/Interpolator.hpp | 6 +-- src/ClusterFile.test.cpp | 70 ++++++++++++++++------------------ src/ClusterVector.test.cpp | 2 +- 6 files changed, 50 insertions(+), 56 deletions(-) diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 289e8bc..37bdf00 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -40,8 +40,8 @@ template calculate_eta2(const ClusterVector &clusters) { NDArray eta2({static_cast(clusters.size()), 2}); - for (size_t i = 0; i < clusters.size(); i++) { - auto e = calculate_eta2(clusters.at(i)); + for (const ClusterType &cluster : clusters) { + auto e = calculate_eta2(cluster); eta2(i, 0) = e.x; eta2(i, 1) = e.y; } @@ -122,7 +122,7 @@ calculate_eta2(const Cluster &cl) { return eta; } -// Dont get why this is correct - photon center should be top right corner +// TODO! Look up eta2 calculation - photon center should be top right corner template Eta2 calculate_eta2(const Cluster &cl) { Eta2 eta{}; diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index e91cb6d..5630278 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -76,9 +76,9 @@ class ClusterVector> { std::vector sum() { std::vector sums(m_data.size()); - for (size_t i = 0; i < m_data.size(); i++) { - sums[i] = at(i).sum(); - } + std::transform(m_data.begin(), m_data.end(), sums.begin(), + [](const T &cluster) { return cluster.sum(); }); + return sums; } @@ -86,13 +86,14 @@ class ClusterVector> { * @brief Sum the pixels in the 2x2 subcluster with the biggest pixel sum in * each cluster * @return std::vector vector of sums for each cluster - */ //TODO if underlying container is a vector use std::for_each + */ std::vector sum_2x2() { std::vector sums_2x2(m_data.size()); - for (size_t i = 0; i < m_data.size(); i++) { - sums_2x2[i] = at(i).max_sum_2x2().first; - } + std::transform( + m_data.begin(), m_data.end(), sums_2x2.begin(), + [](const T &cluster) { return cluster.max_sum_2x2().first; }); + return sums_2x2; } @@ -149,9 +150,9 @@ class ClusterVector> { * @brief Return a reference to the i-th cluster casted to type V * @tparam V type of the cluster */ - ClusterType &at(size_t i) { return m_data[i]; } + ClusterType &operator[](size_t i) { return m_data[i]; } - const ClusterType &at(size_t i) const { return m_data[i]; } + const ClusterType &operator[](size_t i) const { return m_data[i]; } /** * @brief Return the frame number of the clusters. 0 is used to indicate diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp index 621cc9f..23ed467 100644 --- a/include/aare/GainMap.hpp +++ b/include/aare/GainMap.hpp @@ -41,8 +41,7 @@ class GainMap { int64_t index_cluster_center_x = ClusterSizeX / 2; int64_t index_cluster_center_y = ClusterSizeY / 2; - for (size_t i = 0; i < clustervec.size(); i++) { - auto &cl = clustervec.at(i); + for (T &cl : clustervec) { if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 && cl.y < m_gain_map.shape(0) - 1) { diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp index 85ccf29..d2b2322 100644 --- a/include/aare/Interpolator.hpp +++ b/include/aare/Interpolator.hpp @@ -44,9 +44,8 @@ Interpolator::interpolate(const ClusterVector &clusters) { photons.reserve(clusters.size()); if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) { - for (size_t i = 0; i < clusters.size(); i++) { + for (const ClusterType &cluster : clusters) { - auto cluster = clusters.at(i); auto eta = calculate_eta2(cluster); Photon photon; @@ -94,8 +93,7 @@ Interpolator::interpolate(const ClusterVector &clusters) { } } else if (clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2) { - for (size_t i = 0; i < clusters.size(); i++) { - auto cluster = clusters.at(i); + for (const ClusterType &cluster : clusters) { auto eta = calculate_eta2(cluster); Photon photon; diff --git a/src/ClusterFile.test.cpp b/src/ClusterFile.test.cpp index f688c3d..6254b5d 100644 --- a/src/ClusterFile.test.cpp +++ b/src/ClusterFile.test.cpp @@ -20,11 +20,10 @@ TEST_CASE("Read one frame from a cluster file", "[.files]") { auto clusters = f.read_frame(); CHECK(clusters.size() == 97); CHECK(clusters.frame_number() == 135); - CHECK(clusters.at(0).x == 1); - CHECK(clusters.at(0).y == 200); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - CHECK(std::equal(std::begin(clusters.at(0).data), - std::end(clusters.at(0).data), + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), std::begin(expected_cluster_data))); } @@ -47,18 +46,17 @@ TEST_CASE("Read one frame using ROI", "[.files]") { // Check that all clusters are within the ROI for (size_t i = 0; i < clusters.size(); i++) { - auto c = clusters.at(i); + auto c = clusters[i]; REQUIRE(c.x >= roi.xmin); REQUIRE(c.x <= roi.xmax); REQUIRE(c.y >= roi.ymin); REQUIRE(c.y <= roi.ymax); } - CHECK(clusters.at(0).x == 1); - CHECK(clusters.at(0).y == 200); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - CHECK(std::equal(std::begin(clusters.at(0).data), - std::end(clusters.at(0).data), + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), std::begin(expected_cluster_data))); } @@ -175,10 +173,10 @@ TEST_CASE("Read clusters from single frame file", "[.files]") { REQUIRE(clusters.size() == 50); REQUIRE(clusters.frame_number() == 135); int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - REQUIRE(clusters.at(0).x == 1); - REQUIRE(clusters.at(0).y == 200); - CHECK(std::equal(std::begin(clusters.at(0).data), - std::end(clusters.at(0).data), + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), std::begin(expected_cluster_data))); } SECTION("Read more clusters than available") { @@ -188,10 +186,10 @@ TEST_CASE("Read clusters from single frame file", "[.files]") { REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - REQUIRE(clusters.at(0).x == 1); - REQUIRE(clusters.at(0).y == 200); - CHECK(std::equal(std::begin(clusters.at(0).data), - std::end(clusters.at(0).data), + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), std::begin(expected_cluster_data))); } SECTION("Read all clusters") { @@ -199,11 +197,11 @@ TEST_CASE("Read clusters from single frame file", "[.files]") { auto clusters = f.read_clusters(97); REQUIRE(clusters.size() == 97); REQUIRE(clusters.frame_number() == 135); - REQUIRE(clusters.at(0).x == 1); - REQUIRE(clusters.at(0).y == 200); + REQUIRE(clusters[0].x == 1); + REQUIRE(clusters[0].y == 200); int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - CHECK(std::equal(std::begin(clusters.at(0).data), - std::end(clusters.at(0).data), + CHECK(std::equal(std::begin(clusters[0].data), + std::end(clusters[0].data), std::begin(expected_cluster_data))); } } @@ -225,11 +223,10 @@ TEST_CASE("Read clusters from single frame file with ROI", "[.files]") { CHECK(clusters.size() == 10); CHECK(clusters.frame_number() == 135); - CHECK(clusters.at(0).x == 1); - CHECK(clusters.at(0).y == 200); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - CHECK(std::equal(std::begin(clusters.at(0).data), - std::end(clusters.at(0).data), + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), std::begin(expected_cluster_data))); } @@ -314,19 +311,19 @@ TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") { CHECK(read_cluster_vector.size() == 2); CHECK(read_cluster_vector.frame_number() == 0); - CHECK(read_cluster_vector.at(0).x == clustervec.at(0).x); - CHECK(read_cluster_vector.at(0).y == clustervec.at(0).y); + CHECK(read_cluster_vector[0].x == clustervec[0].x); + CHECK(read_cluster_vector[0].y == clustervec[0].y); CHECK(std::equal( - clustervec.at(0).data.begin(), clustervec.at(0).data.end(), - read_cluster_vector.at(0).data.begin(), [](double a, double b) { + clustervec[0].data.begin(), clustervec[0].data.end(), + read_cluster_vector[0].data.begin(), [](double a, double b) { return std::abs(a - b) < std::numeric_limits::epsilon(); })); - CHECK(read_cluster_vector.at(1).x == clustervec.at(1).x); - CHECK(read_cluster_vector.at(1).y == clustervec.at(1).y); + CHECK(read_cluster_vector[1].x == clustervec[1].x); + CHECK(read_cluster_vector[1].y == clustervec[1].y); CHECK(std::equal( - clustervec.at(1).data.begin(), clustervec.at(1).data.end(), - read_cluster_vector.at(1).data.begin(), [](double a, double b) { + clustervec[1].data.begin(), clustervec[1].data.end(), + read_cluster_vector[1].data.begin(), [](double a, double b) { return std::abs(a - b) < std::numeric_limits::epsilon(); })); } @@ -346,10 +343,9 @@ TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") { Cluster{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}}); CHECK(clusters.size() == 98); - CHECK(clusters.at(0).x == 1); - CHECK(clusters.at(0).y == 200); + CHECK(clusters[0].x == 1); + CHECK(clusters[0].y == 200); - CHECK(std::equal(std::begin(clusters.at(0).data), - std::end(clusters.at(0).data), + CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data), std::begin(expected_cluster_data))); } diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 468a707..1214b6b 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -60,7 +60,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read", REQUIRE(cv.size() == 1); REQUIRE(cv.capacity() == 4); - auto c2 = cv.at(0); + auto c2 = cv[0]; // Check that the data is the same REQUIRE(c1.x == c2.x); From eb6862ff9913726273878e198a76d662b21c1074 Mon Sep 17 00:00:00 2001 From: mazzol_a Date: Fri, 25 Apr 2025 12:03:59 +0200 Subject: [PATCH 111/120] changed name of GainMap to InvertedGainMap --- include/aare/CalculateEta.hpp | 4 ++-- include/aare/ClusterFile.hpp | 19 ++++++++++++------- include/aare/ClusterVector.hpp | 12 +++++++----- include/aare/GainMap.hpp | 24 ++++++++++++++---------- python/src/cluster_file.hpp | 3 --- 5 files changed, 35 insertions(+), 27 deletions(-) diff --git a/include/aare/CalculateEta.hpp b/include/aare/CalculateEta.hpp index 37bdf00..db17dad 100644 --- a/include/aare/CalculateEta.hpp +++ b/include/aare/CalculateEta.hpp @@ -40,8 +40,8 @@ template calculate_eta2(const ClusterVector &clusters) { NDArray eta2({static_cast(clusters.size()), 2}); - for (const ClusterType &cluster : clusters) { - auto e = calculate_eta2(cluster); + for (size_t i = 0; i < clusters.size(); i++) { + auto e = calculate_eta2(clusters[i]); eta2(i, 0) = e.x; eta2(i, 1) = e.y; } diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index f705cfa..ef78874 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -46,8 +46,8 @@ class ClusterFile { std::optional m_roi; /*Region of interest, will be applied if set*/ std::optional> m_noise_map; /*Noise map to cut photons, will be applied if set*/ - std::optional m_gain_map; /*Gain map to apply to the clusters, will - be applied if set*/ + std::optional m_gain_map; /*Gain map to apply to the + clusters, will be applied if set*/ public: /** @@ -160,16 +160,21 @@ class ClusterFile { } /** - * @brief Set the gain map to use when reading clusters. If set the gain map will be applied - * to the clusters that pass ROI and noise_map selection. The gain map is expected to be in ADU/energy. + * @brief Set the gain map to use when reading clusters. If set the gain map + * will be applied to the clusters that pass ROI and noise_map selection. + * The gain map is expected to be in ADU/energy. */ void set_gain_map(const NDView gain_map) { - m_gain_map = GainMap(gain_map); + m_gain_map = InvertedGainMap(gain_map); } - void set_gain_map(const GainMap &gain_map) { m_gain_map = gain_map; } + void set_gain_map(const InvertedGainMap &gain_map) { + m_gain_map = gain_map; + } - void set_gain_map(const GainMap &&gain_map) { m_gain_map = gain_map; } + void set_gain_map(const InvertedGainMap &&gain_map) { + m_gain_map = gain_map; + } /** * @brief Close the file. If not closed the file will be diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 5630278..c8b1ea1 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -76,8 +76,9 @@ class ClusterVector> { std::vector sum() { std::vector sums(m_data.size()); - std::transform(m_data.begin(), m_data.end(), sums.begin(), - [](const T &cluster) { return cluster.sum(); }); + std::transform( + m_data.begin(), m_data.end(), sums.begin(), + [](const ClusterType &cluster) { return cluster.sum(); }); return sums; } @@ -90,9 +91,10 @@ class ClusterVector> { std::vector sum_2x2() { std::vector sums_2x2(m_data.size()); - std::transform( - m_data.begin(), m_data.end(), sums_2x2.begin(), - [](const T &cluster) { return cluster.max_sum_2x2().first; }); + std::transform(m_data.begin(), m_data.end(), sums_2x2.begin(), + [](const ClusterType &cluster) { + return cluster.max_sum_2x2().first; + }); return sums_2x2; } diff --git a/include/aare/GainMap.hpp b/include/aare/GainMap.hpp index 23ed467..ac558d0 100644 --- a/include/aare/GainMap.hpp +++ b/include/aare/GainMap.hpp @@ -1,6 +1,7 @@ /************************************************ - * @file ApplyGainMap.hpp - * @short function to apply gain map of image size to a vector of clusters + * @file GainMap.hpp + * @short function to apply gain map of image size to a vector of clusters - + *note stored gainmap is inverted for efficient aaplication to images ***********************************************/ #pragma once @@ -12,18 +13,17 @@ namespace aare { -class GainMap { +class InvertedGainMap { public: - explicit GainMap(const NDArray &gain_map) + explicit InvertedGainMap(const NDArray &gain_map) : m_gain_map(gain_map) { - for (auto &item : m_gain_map) { + for (auto &item : m_gain_map) { item = 1.0 / item; } + }; - }; - - explicit GainMap(const NDView gain_map) { + explicit InvertedGainMap(const NDView gain_map) { m_gain_map = NDArray(gain_map); for (auto &item : m_gain_map) { item = 1.0 / item; @@ -41,14 +41,18 @@ class GainMap { int64_t index_cluster_center_x = ClusterSizeX / 2; int64_t index_cluster_center_y = ClusterSizeY / 2; - for (T &cl : clustervec) { + for (size_t i = 0; i < clustervec.size(); i++) { + auto &cl = clustervec[i]; if (cl.x > 0 && cl.y > 0 && cl.x < m_gain_map.shape(1) - 1 && cl.y < m_gain_map.shape(0) - 1) { for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) { size_t x = cl.x + j % ClusterSizeX - index_cluster_center_x; size_t y = cl.y + j / ClusterSizeX - index_cluster_center_y; - cl.data[j] = static_cast(cl.data[j] * m_gain_map(y, x)); //cast after conversion to keep precision + cl.data[j] = static_cast( + static_cast(cl.data[j]) * + m_gain_map( + y, x)); // cast after conversion to keep precision } } else { // clear edge clusters diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 3e7aa48..ac384b2 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -59,9 +59,6 @@ void define_cluster_file_io_bindings(py::module &m, self.set_gain_map(view); }) - // void set_gain_map(const GainMap &gain_map); //TODO do i need a - // gainmap constructor? - .def("close", &ClusterFile::close) .def("write_frame", &ClusterFile::write_frame) .def("__enter__", [](ClusterFile &self) { return &self; }) From 6db201f397ecb9e582b3408437162d537ca9017f Mon Sep 17 00:00:00 2001 From: AliceMazzoleni99 Date: Fri, 25 Apr 2025 15:24:45 +0200 Subject: [PATCH 112/120] updated conda environment (#169) - updated dev-env.yml conda environment file - added boost-histogram as a requirement for the python tests - added environment file in conda build process --- .github/workflows/build_and_deploy_conda.yml | 8 ++++---- .github/workflows/build_conda.yml | 9 +++++---- conda-recipe/meta.yaml | 1 + etc/dev-env.yml | 6 ++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build_and_deploy_conda.yml b/.github/workflows/build_and_deploy_conda.yml index 90e75c1..65483c3 100644 --- a/.github/workflows/build_and_deploy_conda.yml +++ b/.github/workflows/build_and_deploy_conda.yml @@ -24,13 +24,13 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge - - - name: Prepare - run: conda install conda-build=24.9 conda-verify pytest anaconda-client + conda-remove-defaults: "true" - name: Enable upload run: conda config --set anaconda_upload yes diff --git a/.github/workflows/build_conda.yml b/.github/workflows/build_conda.yml index 0b3e55c..3bd465e 100644 --- a/.github/workflows/build_conda.yml +++ b/.github/workflows/build_conda.yml @@ -24,14 +24,15 @@ jobs: - uses: actions/checkout@v4 - name: Get conda - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} + environment-file: etc/dev-env.yml + miniforge-version: latest channels: conda-forge + conda-remove-defaults: "true" - - name: Prepare - run: conda install conda-build=24.9 conda-verify pytest anaconda-client - + - name: Disable upload run: conda config --set anaconda_upload no diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 5b7eb48..bfa6323 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -36,6 +36,7 @@ test: - aare requires: - pytest + - boost-histogram source_files: - python/tests commands: diff --git a/etc/dev-env.yml b/etc/dev-env.yml index 25038ee..e580c81 100644 --- a/etc/dev-env.yml +++ b/etc/dev-env.yml @@ -3,13 +3,11 @@ channels: - conda-forge dependencies: - anaconda-client + - conda-build - doxygen - sphinx=7.1.2 - breathe - - pybind11 - sphinx_rtd_theme - furo - - nlohmann_json - zeromq - - fmt - - numpy + From 12ae1424fb38a6bd5febf053d9515895fa5e1471 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Fri, 25 Apr 2025 15:52:02 +0200 Subject: [PATCH 113/120] consistent use of ssize_t instead of int64_t (#167) - Consistent use of ssize_t to avoid issues on 32 bit platforms and also mac with (long long int as ssize_t) --- include/aare/ArrayExpr.hpp | 36 +++++++------- include/aare/FilePtr.hpp | 4 +- include/aare/Frame.hpp | 4 +- include/aare/NDArray.hpp | 78 +++++++++++++++---------------- include/aare/NDView.hpp | 44 ++++++++--------- include/aare/NumpyFile.hpp | 2 +- include/aare/Pedestal.hpp | 4 +- include/aare/VarClusterFinder.hpp | 2 +- include/aare/defs.hpp | 16 +++---- python/src/ctb_raw_file.hpp | 4 +- python/src/file.hpp | 2 +- python/src/np_helper.hpp | 2 +- src/FilePtr.cpp | 2 +- src/NDArray.test.cpp | 20 ++++---- src/NDView.test.cpp | 6 +-- src/decode.cpp | 8 ++-- 16 files changed, 118 insertions(+), 116 deletions(-) diff --git a/include/aare/ArrayExpr.hpp b/include/aare/ArrayExpr.hpp index 7f8015c..d326601 100644 --- a/include/aare/ArrayExpr.hpp +++ b/include/aare/ArrayExpr.hpp @@ -1,22 +1,24 @@ #pragma once -#include //int64_t -#include //size_t +#include +#include #include - #include +#include "aare/defs.hpp" + + namespace aare { -template class ArrayExpr { +template class ArrayExpr { public: static constexpr bool is_leaf = false; auto operator[](size_t i) const { return static_cast(*this)[i]; } auto operator()(size_t i) const { return static_cast(*this)[i]; } auto size() const { return static_cast(*this).size(); } - std::array shape() const { return static_cast(*this).shape(); } + std::array shape() const { return static_cast(*this).shape(); } }; -template +template class ArrayAdd : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -27,10 +29,10 @@ class ArrayAdd : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] + arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArraySub : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -41,10 +43,10 @@ class ArraySub : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] - arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArrayMul : public ArrayExpr,Ndim> { const A &arr1_; const B &arr2_; @@ -55,10 +57,10 @@ class ArrayMul : public ArrayExpr,Ndim> { } auto operator[](int i) const { return arr1_[i] * arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template class ArrayDiv : public ArrayExpr, Ndim> { const A &arr1_; const B &arr2_; @@ -69,27 +71,27 @@ class ArrayDiv : public ArrayExpr, Ndim> { } auto operator[](int i) const { return arr1_[i] / arr2_[i]; } size_t size() const { return arr1_.size(); } - std::array shape() const { return arr1_.shape(); } + std::array shape() const { return arr1_.shape(); } }; -template +template auto operator+(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayAdd, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator-(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArraySub, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator*(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayMul, ArrayExpr, Ndim>(arr1, arr2); } -template +template auto operator/(const ArrayExpr &arr1, const ArrayExpr &arr2) { return ArrayDiv, ArrayExpr, Ndim>(arr1, arr2); } diff --git a/include/aare/FilePtr.hpp b/include/aare/FilePtr.hpp index 4c88ecb..4ddc76e 100644 --- a/include/aare/FilePtr.hpp +++ b/include/aare/FilePtr.hpp @@ -18,8 +18,8 @@ class FilePtr { FilePtr(FilePtr &&other); FilePtr &operator=(FilePtr &&other); FILE *get(); - int64_t tell(); - void seek(int64_t offset, int whence = SEEK_SET) { + ssize_t tell(); + void seek(ssize_t offset, int whence = SEEK_SET) { if (fseek(fp_, offset, whence) != 0) throw std::runtime_error("Error seeking in file"); } diff --git a/include/aare/Frame.hpp b/include/aare/Frame.hpp index 5ce63ac..02ea82f 100644 --- a/include/aare/Frame.hpp +++ b/include/aare/Frame.hpp @@ -107,8 +107,8 @@ class Frame { * @return NDView */ template NDView view() { - std::array shape = {static_cast(m_rows), - static_cast(m_cols)}; + std::array shape = {static_cast(m_rows), + static_cast(m_cols)}; T *data = reinterpret_cast(m_data); return NDView(data, shape); } diff --git a/include/aare/NDArray.hpp b/include/aare/NDArray.hpp index ceb1e0b..3c08a3c 100644 --- a/include/aare/NDArray.hpp +++ b/include/aare/NDArray.hpp @@ -22,10 +22,10 @@ TODO! Add expression templates for operators namespace aare { -template +template class NDArray : public ArrayExpr, Ndim> { - std::array shape_; - std::array strides_; + std::array shape_; + std::array strides_; size_t size_{}; T *data_; @@ -42,7 +42,7 @@ class NDArray : public ArrayExpr, Ndim> { * * @param shape shape of the new NDArray */ - explicit NDArray(std::array shape) + explicit NDArray(std::array shape) : shape_(shape), strides_(c_strides(shape_)), size_(std::accumulate(shape_.begin(), shape_.end(), 1, std::multiplies<>())), @@ -55,7 +55,7 @@ class NDArray : public ArrayExpr, Ndim> { * @param shape shape of the new array * @param value value to initialize the array with */ - NDArray(std::array shape, T value) : NDArray(shape) { + NDArray(std::array shape, T value) : NDArray(shape) { this->operator=(value); } @@ -186,22 +186,22 @@ class NDArray : public ArrayExpr, Ndim> { } // TODO! is int the right type for index? - T &operator()(int64_t i) { return data_[i]; } - const T &operator()(int64_t i) const { return data_[i]; } + T &operator()(ssize_t i) { return data_[i]; } + const T &operator()(ssize_t i) const { return data_[i]; } - T &operator[](int64_t i) { return data_[i]; } - const T &operator[](int64_t i) const { return data_[i]; } + T &operator[](ssize_t i) { return data_[i]; } + const T &operator[](ssize_t i) const { return data_[i]; } T *data() { return data_; } std::byte *buffer() { return reinterpret_cast(data_); } ssize_t size() const { return static_cast(size_); } size_t total_bytes() const { return size_ * sizeof(T); } - std::array shape() const noexcept { return shape_; } - int64_t shape(int64_t i) const noexcept { return shape_[i]; } - std::array strides() const noexcept { return strides_; } + std::array shape() const noexcept { return shape_; } + ssize_t shape(ssize_t i) const noexcept { return shape_[i]; } + std::array strides() const noexcept { return strides_; } size_t bitdepth() const noexcept { return sizeof(T) * 8; } - std::array byte_strides() const noexcept { + std::array byte_strides() const noexcept { auto byte_strides = strides_; for (auto &val : byte_strides) val *= sizeof(T); @@ -228,7 +228,7 @@ class NDArray : public ArrayExpr, Ndim> { }; // Move assign -template +template NDArray & NDArray::operator=(NDArray &&other) noexcept { if (this != &other) { @@ -242,7 +242,7 @@ NDArray::operator=(NDArray &&other) noexcept { return *this; } -template +template NDArray &NDArray::operator+=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -254,7 +254,7 @@ NDArray &NDArray::operator+=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator-=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -266,7 +266,7 @@ NDArray &NDArray::operator-=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator*=(const NDArray &other) { // check shape if (shape_ == other.shape_) { @@ -278,14 +278,14 @@ NDArray &NDArray::operator*=(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator&=(const T &mask) { for (auto it = begin(); it != end(); ++it) *it &= mask; return *this; } -template +template NDArray NDArray::operator>(const NDArray &other) { if (shape_ == other.shape_) { NDArray result{shape_}; @@ -297,7 +297,7 @@ NDArray NDArray::operator>(const NDArray &other) { throw(std::runtime_error("Shape of ImageDatas must match")); } -template +template NDArray &NDArray::operator=(const NDArray &other) { if (this != &other) { delete[] data_; @@ -310,7 +310,7 @@ NDArray &NDArray::operator=(const NDArray &other) { return *this; } -template +template bool NDArray::operator==(const NDArray &other) const { if (shape_ != other.shape_) return false; @@ -322,23 +322,23 @@ bool NDArray::operator==(const NDArray &other) const { return true; } -template +template bool NDArray::operator!=(const NDArray &other) const { return !((*this) == other); } -template +template NDArray &NDArray::operator++() { for (uint32_t i = 0; i < size_; ++i) data_[i] += 1; return *this; } -template +template NDArray &NDArray::operator=(const T &value) { std::fill_n(data_, size_, value); return *this; } -template +template NDArray &NDArray::operator+=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] += value; @@ -348,57 +348,57 @@ NDArray &NDArray::operator+=(const T &value) { -template +template NDArray NDArray::operator+(const T &value) { NDArray result = *this; result += value; return result; } -template +template NDArray &NDArray::operator-=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] -= value; return *this; } -template +template NDArray NDArray::operator-(const T &value) { NDArray result = *this; result -= value; return result; } -template +template NDArray &NDArray::operator/=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] /= value; return *this; } -template +template NDArray NDArray::operator/(const T &value) { NDArray result = *this; result /= value; return result; } -template +template NDArray &NDArray::operator*=(const T &value) { for (uint32_t i = 0; i < size_; ++i) data_[i] *= value; return *this; } -template +template NDArray NDArray::operator*(const T &value) { NDArray result = *this; result *= value; return result; } -// template void NDArray::Print() { +// template void NDArray::Print() { // if (shape_[0] < 20 && shape_[1] < 20) // Print_all(); // else // Print_some(); // } -template +template std::ostream &operator<<(std::ostream &os, const NDArray &arr) { for (auto row = 0; row < arr.shape(0); ++row) { for (auto col = 0; col < arr.shape(1); ++col) { @@ -410,7 +410,7 @@ std::ostream &operator<<(std::ostream &os, const NDArray &arr) { return os; } -template void NDArray::Print_all() { +template void NDArray::Print_all() { for (auto row = 0; row < shape_[0]; ++row) { for (auto col = 0; col < shape_[1]; ++col) { std::cout << std::setw(3); @@ -419,7 +419,7 @@ template void NDArray::Print_all() { std::cout << "\n"; } } -template void NDArray::Print_some() { +template void NDArray::Print_some() { for (auto row = 0; row < 5; ++row) { for (auto col = 0; col < 5; ++col) { std::cout << std::setw(7); @@ -429,7 +429,7 @@ template void NDArray::Print_some() { } } -template +template void save(NDArray &img, std::string &pathname) { std::ofstream f; f.open(pathname, std::ios::binary); @@ -437,9 +437,9 @@ void save(NDArray &img, std::string &pathname) { f.close(); } -template +template NDArray load(const std::string &pathname, - std::array shape) { + std::array shape) { NDArray img{shape}; std::ifstream f; f.open(pathname, std::ios::binary); diff --git a/include/aare/NDView.hpp b/include/aare/NDView.hpp index ddb5d1c..56054e2 100644 --- a/include/aare/NDView.hpp +++ b/include/aare/NDView.hpp @@ -14,10 +14,10 @@ #include namespace aare { -template using Shape = std::array; +template using Shape = std::array; // TODO! fix mismatch between signed and unsigned -template Shape make_shape(const std::vector &shape) { +template Shape make_shape(const std::vector &shape) { if (shape.size() != Ndim) throw std::runtime_error("Shape size mismatch"); Shape arr; @@ -25,41 +25,41 @@ template Shape make_shape(const std::vector &shape) return arr; } -template int64_t element_offset(const Strides & /*unused*/) { return 0; } +template ssize_t element_offset(const Strides & /*unused*/) { return 0; } -template -int64_t element_offset(const Strides &strides, int64_t i, Ix... index) { +template +ssize_t element_offset(const Strides &strides, ssize_t i, Ix... index) { return i * strides[Dim] + element_offset(strides, index...); } -template std::array c_strides(const std::array &shape) { - std::array strides{}; +template std::array c_strides(const std::array &shape) { + std::array strides{}; std::fill(strides.begin(), strides.end(), 1); - for (int64_t i = Ndim - 1; i > 0; --i) { + for (ssize_t i = Ndim - 1; i > 0; --i) { strides[i - 1] = strides[i] * shape[i]; } return strides; } -template std::array make_array(const std::vector &vec) { +template std::array make_array(const std::vector &vec) { assert(vec.size() == Ndim); - std::array arr{}; + std::array arr{}; std::copy_n(vec.begin(), Ndim, arr.begin()); return arr; } -template class NDView : public ArrayExpr, Ndim> { +template class NDView : public ArrayExpr, Ndim> { public: NDView() = default; ~NDView() = default; NDView(const NDView &) = default; NDView(NDView &&) = default; - NDView(T *buffer, std::array shape) + NDView(T *buffer, std::array shape) : buffer_(buffer), strides_(c_strides(shape)), shape_(shape), size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {} - // NDView(T *buffer, const std::vector &shape) + // NDView(T *buffer, const std::vector &shape) // : buffer_(buffer), strides_(c_strides(make_array(shape))), shape_(make_array(shape)), // size_(std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>())) {} @@ -73,14 +73,14 @@ template class NDView : public ArrayExpr(size_); } size_t total_bytes() const { return size_ * sizeof(T); } - std::array strides() const noexcept { return strides_; } + std::array strides() const noexcept { return strides_; } T *begin() { return buffer_; } T *end() { return buffer_ + size_; } T const *begin() const { return buffer_; } T const *end() const { return buffer_ + size_; } - T &operator()(int64_t i) const { return buffer_[i]; } - T &operator[](int64_t i) const { return buffer_[i]; } + T &operator()(ssize_t i) const { return buffer_[i]; } + T &operator[](ssize_t i) const { return buffer_[i]; } bool operator==(const NDView &other) const { if (size_ != other.size_) @@ -136,15 +136,15 @@ template class NDView : public ArrayExpr strides_{}; - std::array shape_{}; + std::array strides_{}; + std::array shape_{}; uint64_t size_{}; template NDView &elemenwise(T val, BinaryOperation op) { @@ -160,7 +160,7 @@ template class NDView : public ArrayExpr void NDView::print_all() const { +template void NDView::print_all() const { for (auto row = 0; row < shape_[0]; ++row) { for (auto col = 0; col < shape_[1]; ++col) { std::cout << std::setw(3); @@ -171,7 +171,7 @@ template void NDView::print_all() const { } -template +template std::ostream& operator <<(std::ostream& os, const NDView& arr){ for (auto row = 0; row < arr.shape(0); ++row) { for (auto col = 0; col < arr.shape(1); ++col) { @@ -186,7 +186,7 @@ std::ostream& operator <<(std::ostream& os, const NDView& arr){ template NDView make_view(std::vector& vec){ - return NDView(vec.data(), {static_cast(vec.size())}); + return NDView(vec.data(), {static_cast(vec.size())}); } } // namespace aare \ No newline at end of file diff --git a/include/aare/NumpyFile.hpp b/include/aare/NumpyFile.hpp index 9cd2d61..7381a76 100644 --- a/include/aare/NumpyFile.hpp +++ b/include/aare/NumpyFile.hpp @@ -69,7 +69,7 @@ class NumpyFile : public FileInterface { */ template NDArray load() { NDArray arr(make_shape(m_header.shape)); - if (fseek(fp, static_cast(header_size), SEEK_SET)) { + if (fseek(fp, static_cast(header_size), SEEK_SET)) { throw std::runtime_error(LOCATION + "Error seeking to the start of the data"); } size_t rc = fread(arr.data(), sizeof(T), arr.size(), fp); diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index 102d730..d6223c1 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -107,7 +107,7 @@ template class Pedestal { assert(frame.size() == m_rows * m_cols); // TODO! move away from m_rows, m_cols - if (frame.shape() != std::array{m_rows, m_cols}) { + if (frame.shape() != std::array{m_rows, m_cols}) { throw std::runtime_error( "Frame shape does not match pedestal shape"); } @@ -128,7 +128,7 @@ template class Pedestal { assert(frame.size() == m_rows * m_cols); // TODO! move away from m_rows, m_cols - if (frame.shape() != std::array{m_rows, m_cols}) { + if (frame.shape() != std::array{m_rows, m_cols}) { throw std::runtime_error( "Frame shape does not match pedestal shape"); } diff --git a/include/aare/VarClusterFinder.hpp b/include/aare/VarClusterFinder.hpp index 161941a..596bf06 100644 --- a/include/aare/VarClusterFinder.hpp +++ b/include/aare/VarClusterFinder.hpp @@ -28,7 +28,7 @@ template class VarClusterFinder { }; private: - const std::array shape_; + const std::array shape_; NDView original_; NDArray labeled_; NDArray peripheral_labeled_; diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 4d22bd4..01d291b 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -207,20 +207,20 @@ struct DetectorGeometry{ }; struct ROI{ - int64_t xmin{}; - int64_t xmax{}; - int64_t ymin{}; - int64_t ymax{}; + ssize_t xmin{}; + ssize_t xmax{}; + ssize_t ymin{}; + ssize_t ymax{}; - int64_t height() const { return ymax - ymin; } - int64_t width() const { return xmax - xmin; } - bool contains(int64_t x, int64_t y) const { + ssize_t height() const { return ymax - ymin; } + ssize_t width() const { return xmax - xmin; } + bool contains(ssize_t x, ssize_t y) const { return x >= xmin && x < xmax && y >= ymin && y < ymax; } }; -using dynamic_shape = std::vector; +using dynamic_shape = std::vector; //TODO! Can we uniform enums between the libraries? diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index a88a9d1..c9b5310 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -34,7 +34,7 @@ m.def("adc_sar_05_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays @@ -55,7 +55,7 @@ m.def("adc_sar_04_decode64to16", [](py::array_t input) { } //Create a 2D output array with the same shape as the input - std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; + std::vector shape{input.shape(0), input.shape(1)/static_cast(bits_per_byte)}; py::array_t output(shape); //Create a view of the input and output arrays diff --git a/python/src/file.hpp b/python/src/file.hpp index 2d0f53e..f97db96 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -198,7 +198,7 @@ void define_file_io_bindings(py::module &m) { py::class_(m, "ROI") .def(py::init<>()) - .def(py::init(), py::arg("xmin"), + .def(py::init(), py::arg("xmin"), py::arg("xmax"), py::arg("ymin"), py::arg("ymax")) .def_readwrite("xmin", &ROI::xmin) .def_readwrite("xmax", &ROI::xmax) diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index 3d3ee3c..78166aa 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -13,7 +13,7 @@ namespace py = pybind11; using namespace aare; // Pass image data back to python as a numpy array -template +template py::array return_image_data(aare::NDArray *image) { py::capsule free_when_done(image, [](void *f) { diff --git a/src/FilePtr.cpp b/src/FilePtr.cpp index 4fed3d7..e3cdb4b 100644 --- a/src/FilePtr.cpp +++ b/src/FilePtr.cpp @@ -21,7 +21,7 @@ FilePtr &FilePtr::operator=(FilePtr &&other) { FILE *FilePtr::get() { return fp_; } -int64_t FilePtr::tell() { +ssize_t FilePtr::tell() { auto pos = ftell(fp_); if (pos == -1) throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg())); diff --git a/src/NDArray.test.cpp b/src/NDArray.test.cpp index c37a285..819a1a9 100644 --- a/src/NDArray.test.cpp +++ b/src/NDArray.test.cpp @@ -44,9 +44,9 @@ TEST_CASE("3D NDArray from NDView"){ REQUIRE(image.size() == view.size()); REQUIRE(image.data() != view.data()); - for(int64_t i=0; i shape{{20}}; + std::array shape{{20}}; NDArray img(shape, 3); REQUIRE(img.size() == 20); REQUIRE(img(5) == 3); @@ -71,7 +71,7 @@ TEST_CASE("Accessing a const object") { } TEST_CASE("Indexing of a 2D image") { - std::array shape{{3, 7}}; + std::array shape{{3, 7}}; NDArray img(shape, 5); for (uint32_t i = 0; i != img.size(); ++i) { REQUIRE(img(i) == 5); @@ -114,7 +114,7 @@ TEST_CASE("Divide double by int") { } TEST_CASE("Elementwise multiplication of 3D image") { - std::array shape{3, 4, 2}; + std::array shape{3, 4, 2}; NDArray a{shape}; NDArray b{shape}; for (uint32_t i = 0; i != a.size(); ++i) { @@ -179,9 +179,9 @@ TEST_CASE("Compare two images") { } TEST_CASE("Size and shape matches") { - int64_t w = 15; - int64_t h = 75; - std::array shape{w, h}; + ssize_t w = 15; + ssize_t h = 75; + std::array shape{w, h}; NDArray a{shape}; REQUIRE(a.size() == w * h); REQUIRE(a.shape() == shape); @@ -224,7 +224,7 @@ TEST_CASE("Bitwise and on data") { TEST_CASE("Elementwise operations on images") { - std::array shape{5, 5}; + std::array shape{5, 5}; double a_val = 3.0; double b_val = 8.0; diff --git a/src/NDView.test.cpp b/src/NDView.test.cpp index 8750f3a..89e76e9 100644 --- a/src/NDView.test.cpp +++ b/src/NDView.test.cpp @@ -142,7 +142,7 @@ TEST_CASE("iterators") { // for (int i = 0; i != 12; ++i) { // vec.push_back(i); // } -// std::vector shape{3, 4}; +// std::vector shape{3, 4}; // NDView data(vec.data(), shape); // } @@ -151,8 +151,8 @@ TEST_CASE("divide with another span") { std::vector vec1{3, 2, 1}; std::vector result{3, 6, 3}; - NDView data0(vec0.data(), Shape<1>{static_cast(vec0.size())}); - NDView data1(vec1.data(), Shape<1>{static_cast(vec1.size())}); + NDView data0(vec0.data(), Shape<1>{static_cast(vec0.size())}); + NDView data1(vec1.data(), Shape<1>{static_cast(vec1.size())}); data0 /= data1; diff --git a/src/decode.cpp b/src/decode.cpp index 8ac7bc0..436ad7b 100644 --- a/src/decode.cpp +++ b/src/decode.cpp @@ -26,8 +26,8 @@ void adc_sar_05_decode64to16(NDView input, NDView outpu throw std::invalid_argument(LOCATION + " input and output shapes must match"); } - for(int64_t i = 0; i < input.shape(0); i++){ - for(int64_t j = 0; j < input.shape(1); j++){ + for(ssize_t i = 0; i < input.shape(0); i++){ + for(ssize_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_05_decode64to16(input(i,j)); } } @@ -56,8 +56,8 @@ void adc_sar_04_decode64to16(NDView input, NDView outpu if(input.shape() != output.shape()){ throw std::invalid_argument(LOCATION + " input and output shapes must match"); } - for(int64_t i = 0; i < input.shape(0); i++){ - for(int64_t j = 0; j < input.shape(1); j++){ + for(ssize_t i = 0; i < input.shape(0); i++){ + for(ssize_t j = 0; j < input.shape(1); j++){ output(i,j) = adc_sar_04_decode64to16(input(i,j)); } } From cf158e2dcdb4ffd1295a8caf1de61c8fb16d1372 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Mon, 5 May 2025 11:40:04 +0200 Subject: [PATCH 114/120] Added scurve fitting (#168) - added scurve fitting with two different signs (scurve, scurve2) - at the moment no option to set initial parameters --------- Co-authored-by: JulianHeymes --- include/aare/Fit.hpp | 29 ++++- python/aare/__init__.py | 2 +- python/aare/func.py | 2 +- python/src/fit.hpp | 215 ++++++++++++++++++++++++++++++++++ src/Fit.cpp | 249 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 492 insertions(+), 5 deletions(-) diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp index 6fd10aa..eb9ac22 100644 --- a/include/aare/Fit.hpp +++ b/include/aare/Fit.hpp @@ -15,6 +15,12 @@ NDArray gaus(NDView x, NDView par); double pol1(const double x, const double *par); NDArray pol1(NDView x, NDView par); +double scurve(const double x, const double *par); +NDArray scurve(NDView x, NDView par); + +double scurve2(const double x, const double *par); +NDArray scurve2(NDView x, NDView par); + } // namespace func @@ -25,6 +31,9 @@ std::array gaus_init_par(const NDView x, const NDView pol1_init_par(const NDView x, const NDView y); +std::array scurve_init_par(const NDView x, const NDView y); +std::array scurve2_init_par(const NDView x, const NDView y); + static constexpr int DEFAULT_NUM_THREADS = 4; /** @@ -38,7 +47,7 @@ NDArray fit_gaus(NDView x, NDView y); /** * @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values] * @param x x values - * @param y y vales, layout [row, col, values] + * @param y y values, layout [row, col, values] * @param n_threads number of threads to use */ @@ -51,7 +60,7 @@ NDArray fit_gaus(NDView x, NDView y, /** * @brief Fit a 1D Gaussian with error estimates * @param x x values - * @param y y vales, layout [row, col, values] + * @param y y values, layout [row, col, values] * @param y_err error in y, layout [row, col, values] * @param par_out output parameters * @param par_err_out output error parameters @@ -64,7 +73,7 @@ void fit_gaus(NDView x, NDView y, NDView y_err, * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout * [row, col, values] * @param x x values - * @param y y vales, layout [row, col, values] + * @param y y values, layout [row, col, values] * @param y_err error in y, layout [row, col, values] * @param par_out output parameters, layout [row, col, values] * @param par_err_out output parameter errors, layout [row, col, values] @@ -88,5 +97,19 @@ void fit_pol1(NDView x, NDView y, NDView y_err, NDView par_out, NDView par_err_out,NDView chi2_out, int n_threads = DEFAULT_NUM_THREADS); +NDArray fit_scurve(NDView x, NDView y); +NDArray fit_scurve(NDView x, NDView y, int n_threads); +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads); +NDArray fit_scurve2(NDView x, NDView y); +NDArray fit_scurve2(NDView x, NDView y, int n_threads); +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2); +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads); } // namespace aare \ No newline at end of file diff --git a/python/aare/__init__.py b/python/aare/__init__.py index e1e5757..d2bbe0a 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -15,7 +15,7 @@ from .ClusterFinder import ClusterFinder, ClusterCollector, ClusterFinderMT, Clu from .ClusterVector import ClusterVector -from ._aare import fit_gaus, fit_pol1 +from ._aare import fit_gaus, fit_pol1, fit_scurve, fit_scurve2 from ._aare import Interpolator from ._aare import calculate_eta2 diff --git a/python/aare/func.py b/python/aare/func.py index ca60cf2..e8a7b46 100644 --- a/python/aare/func.py +++ b/python/aare/func.py @@ -1 +1 @@ -from ._aare import gaus, pol1 \ No newline at end of file +from ._aare import gaus, pol1, scurve, scurve2 \ No newline at end of file diff --git a/python/src/fit.hpp b/python/src/fit.hpp index 8e6cfef..97dafb5 100644 --- a/python/src/fit.hpp +++ b/python/src/fit.hpp @@ -55,6 +55,47 @@ void define_fit_bindings(py::module &m) { )", py::arg("x"), py::arg("par")); + m.def( + "scurve", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::scurve(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D scurve function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the scurve function. + par : array_like + The parameters of the scurve function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C. + )", + py::arg("x"), py::arg("par")); + + m.def( + "scurve2", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::scurve2(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D scurve2 function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the scurve function. + par : array_like + The parameters of the scurve2 function. The first element is the background slope, the second element is the background intercept, the third element is the mean, the fourth element is the standard deviation, the fifth element is inflexion point count number, and the sixth element is C. + )", + py::arg("x"), py::arg("par")); m.def( "fit_gaus", @@ -235,6 +276,180 @@ n_threads : int, optional R"( Fit a 1D polynomial to data with error estimates. +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + +//========= + m.def( + "fit_scurve", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_scurve(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_scurve(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_scurve", + [](py::array_t x, + py::array_t y, + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray({y.shape(0), y.shape(1), 6}); + + auto par_err = + new NDArray({y.shape(0), y.shape(1), 6}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_scurve(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + + + } else if (y.ndim() == 1) { + auto par = new NDArray({2}); + auto par_err = new NDArray({2}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + double chi2 = 0; + + aare::fit_scurve(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( +Fit a 1D polynomial to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + + + m.def( + "fit_scurve2", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_scurve2(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_scurve2(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_scurve2", + [](py::array_t x, + py::array_t y, + py::array_t y_err, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray({y.shape(0), y.shape(1), 6}); + + auto par_err = + new NDArray({y.shape(0), y.shape(1), 6}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + auto chi2 = new NDArray({y.shape(0), y.shape(1)}); + + aare::fit_scurve2(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2->view(), n_threads); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = return_image_data(chi2), + "Ndf"_a = y.shape(2) - 2); + + + } else if (y.ndim() == 1) { + auto par = new NDArray({6}); + auto par_err = new NDArray({6}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + double chi2 = 0; + + aare::fit_scurve2(x_view, y_view, y_view_err, par->view(), + par_err->view(), chi2); + return py::dict("par"_a = return_image_data(par), + "par_err"_a = return_image_data(par_err), + "chi2"_a = chi2, "Ndf"_a = y.size() - 2); + + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + R"( +Fit a 1D polynomial to data with error estimates. + Parameters ---------- x : array_like diff --git a/src/Fit.cpp b/src/Fit.cpp index 9126109..d104675 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -34,6 +34,30 @@ NDArray pol1(NDView x, NDView par) { return y; } +double scurve(const double x, const double * par) { + return (par[0] + par[1] * x) + 0.5 * (1 + erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2])); +} + +NDArray scurve(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (ssize_t i = 0; i < x.size(); i++) { + y(i) = scurve(x(i), par.data()); + } + return y; +} + +double scurve2(const double x, const double * par) { + return (par[0] + par[1] * x) + 0.5 * (1 - erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2])); +} + +NDArray scurve2(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (ssize_t i = 0; i < x.size(); i++) { + y(i) = scurve2(x(i), par.data()); + } + return y; +} + } // namespace func NDArray fit_gaus(NDView x, NDView y) { @@ -273,4 +297,229 @@ NDArray fit_pol1(NDView x, NDView y, return result; } +// ~~ S-CURVES ~~ + +// SCURVE -- +std::array scurve_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0, 0, 0, 0, 0}; + + auto ymax = std::max_element(y.begin(), y.end()); + auto ymin = std::min_element(y.begin(), y.end()); + start_par[4] = *ymin + (*ymax - *ymin) / 2; + + // Find the first x where the corresponding y value is above the threshold (start_par[4]) + for (ssize_t i = 0; i < y.size(); ++i) { + if (y[i] >= start_par[4]) { + start_par[2] = x[i]; + break; // Exit the loop after finding the first valid x + } + } + + start_par[3] = 2 * sqrt(start_par[2]); + start_par[0] = 100; + start_par[1] = 0.25; + start_par[5] = 1; + return start_par; +} + +// - No error +NDArray fit_scurve(NDView x, NDView y) { + NDArray result = scurve_init_par(x, y); + lm_status_struct status; + + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::scurve, &lm_control_double, &status); + + return result; +} + +NDArray fit_scurve(NDView x, NDView y, int n_threads) { + NDArray result({y.shape(0), y.shape(1), 6}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_scurve(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + result(row, col, 3) = res(3); + result(row, col, 4) = res(4); + result(row, col, 5) = res(5); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + return result; +} + +// - Error +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 6 || par_err_out.size() != 6) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 6"); + } + + lm_status_struct status; + par_out = scurve_init_par(x, y); + std::array cov = {0}; // size 6x6 + // std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (ssize_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + } +} + +void fit_scurve(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_scurve(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + +// SCURVE2 --- + +std::array scurve2_init_par(const NDView x, const NDView y){ + // Estimate the initial parameters for the fit + std::array start_par{0, 0, 0, 0, 0, 0}; + + auto ymax = std::max_element(y.begin(), y.end()); + auto ymin = std::min_element(y.begin(), y.end()); + start_par[4] = *ymin + (*ymax - *ymin) / 2; + + // Find the first x where the corresponding y value is above the threshold (start_par[4]) + for (ssize_t i = 0; i < y.size(); ++i) { + if (y[i] <= start_par[4]) { + start_par[2] = x[i]; + break; // Exit the loop after finding the first valid x + } + } + + start_par[3] = 2 * sqrt(start_par[2]); + start_par[0] = 100; + start_par[1] = 0.25; + start_par[5] = -1; + return start_par; +} + +// - No error +NDArray fit_scurve2(NDView x, NDView y) { + NDArray result = scurve2_init_par(x, y); + lm_status_struct status; + + lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(), + aare::func::scurve2, &lm_control_double, &status); + + return result; +} + +NDArray fit_scurve2(NDView x, NDView y, int n_threads) { + NDArray result({y.shape(0), y.shape(1), 6}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_scurve2(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + result(row, col, 3) = res(3); + result(row, col, 4) = res(4); + result(row, col, 5) = res(5); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + return result; +} + +// - Error +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, double& chi2) { + + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 6 || par_err_out.size() != 6) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 6"); + } + + lm_status_struct status; + par_out = scurve2_init_par(x, y); + std::array cov = {0}; // size 6x6 + // std::array cov{0, 0, 0, 0}; + + lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve2, + &lm_control_double, &status); + + // Calculate chi2 + chi2 = 0; + for (ssize_t i = 0; i < y.size(); i++) { + chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2); + } +} + +void fit_scurve2(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, NDView chi2_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + + fit_scurve2(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col)); + + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + RunInParallel(process, tasks); + +} + } // namespace aare \ No newline at end of file From 276283ff14378816ff15b29c031052f79fc461d3 Mon Sep 17 00:00:00 2001 From: AliceMazzoleni99 Date: Tue, 6 May 2025 14:48:54 +0200 Subject: [PATCH 115/120] automated versioning (#175) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: mazzol_a Co-authored-by: Erik Fröjdh --- CMakeLists.txt | 7 +++++- VERSION | 1 + conda-recipe/meta.yaml | 6 ++++- pyproject.toml | 8 +++++- update_version.py | 57 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 VERSION create mode 100644 update_version.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 00f6b66..2dc4555 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,12 +1,17 @@ cmake_minimum_required(VERSION 3.15) project(aare - VERSION 1.0.0 DESCRIPTION "Data processing library for PSI detectors" HOMEPAGE_URL "https://github.com/slsdetectorgroup/aare" LANGUAGES C CXX ) +# Read VERSION file into project version +set(VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/VERSION") +file(READ "${VERSION_FILE}" VERSION_CONTENT) +string(STRIP "${VERSION_CONTENT}" PROJECT_VERSION_STRING) +set(PROJECT_VERSION ${PROJECT_VERSION_STRING}) + set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..bd52db8 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.0.0 \ No newline at end of file diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index bfa6323..8fea745 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,10 @@ +source: + path: ../ + +{% set version = load_file_regex(load_file = 'VERSION', regex_pattern = '(\d+(?:\.\d+)*(?:[\+\w\.]+))').group(1) %} package: name: aare - version: 2025.4.22 #TODO! how to not duplicate this? + version: {{version}} source: path: .. diff --git a/pyproject.toml b/pyproject.toml index 7415062..db3cb3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,16 @@ +[tool.scikit-build.metadata.version] +provider = "scikit_build_core.metadata.regex" +input = "VERSION" +regex = '^(?P\d+(?:\.\d+)*(?:[\.\+\w]+)?)$' +result = "{version}" + [build-system] requires = ["scikit-build-core>=0.10", "pybind11", "numpy"] build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2025.4.22" +dynamic = ["version"] requires-python = ">=3.11" dependencies = [ "numpy", diff --git a/update_version.py b/update_version.py new file mode 100644 index 0000000..476895a --- /dev/null +++ b/update_version.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: LGPL-3.0-or-other +# Copyright (C) 2021 Contributors to the Aare Package +""" +Script to update VERSION file with semantic versioning if provided as an argument, or with 0.0.0 if no argument is provided. +""" + +import sys +import os +import re + +from packaging.version import Version, InvalidVersion + + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + +def is_integer(value): + try: + int(value) + except ValueError: + return False + else: + return True + + +def get_version(): + + # Check at least one argument is passed + if len(sys.argv) < 2: + return "0.0.0" + + version = sys.argv[1] + + try: + v = Version(version) # normalize check if version follows PEP 440 specification + + version_normalized = version.replace("-", ".") + + version_normalized = re.sub(r'0*(\d+)', lambda m : str(int(m.group(0))), version_normalized) #remove leading zeros + + return version_normalized + + except InvalidVersion as e: + print(f"Invalid version {version}. Version format must follow semantic versioning format of python PEP 440 version identification specification.") + sys.exit(1) + + +def write_version_to_file(version): + version_file_path = os.path.join(SCRIPT_DIR, "VERSION") + with open(version_file_path, "w") as version_file: + version_file.write(version) + print(f"Version {version} written to VERSION file.") + +# Main script +if __name__ == "__main__": + + version = get_version() + write_version_to_file(version) \ No newline at end of file From 81588fba3b7575cdfa098d56c285127d0eace652 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 6 May 2025 17:18:54 +0200 Subject: [PATCH 116/120] linking to threads and removed extra ; (#176) - Fixing broken build of tests on RH8 by linking pthreads - Removed extra ; causing warnings with -Wpedantic --- CMakeLists.txt | 4 ++++ src/JungfrauDataFile.cpp | 4 ++-- src/NumpyFile.cpp | 6 +++--- src/RawFile.cpp | 14 +++++++------- src/RawMasterFile.cpp | 4 ++-- 5 files changed, 18 insertions(+), 14 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2dc4555..fc51c14 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -405,6 +405,9 @@ target_include_directories(aare_core PUBLIC "$" ) +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + target_link_libraries( aare_core PUBLIC @@ -413,6 +416,7 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags + Threads::Threads $ ) diff --git a/src/JungfrauDataFile.cpp b/src/JungfrauDataFile.cpp index 8f1f904..59a1a0a 100644 --- a/src/JungfrauDataFile.cpp +++ b/src/JungfrauDataFile.cpp @@ -89,7 +89,7 @@ void JungfrauDataFile::seek(size_t frame_index) { : frame_index; auto byte_offset = frame_offset * (m_bytes_per_frame + header_size); m_fp.seek(byte_offset); -}; +} size_t JungfrauDataFile::tell() { return m_current_frame_index; } size_t JungfrauDataFile::total_frames() const { return m_total_frames; } @@ -235,4 +235,4 @@ std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const { return m_path / fname; } -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/NumpyFile.cpp b/src/NumpyFile.cpp index 109439a..e375ce3 100644 --- a/src/NumpyFile.cpp +++ b/src/NumpyFile.cpp @@ -72,8 +72,8 @@ void NumpyFile::get_frame_into(size_t frame_number, std::byte *image_buf) { } } -size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; }; -size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; }; +size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; } +size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; } std::vector NumpyFile::read_n(size_t n_frames) { // TODO: implement this in a more efficient way @@ -197,4 +197,4 @@ void NumpyFile::load_metadata() { m_header = {dtype, fortran_order, shape}; } -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/RawFile.cpp b/src/RawFile.cpp index 78cb6c5..c576453 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -34,7 +34,7 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) } } -Frame RawFile::read_frame() { return get_frame(m_current_frame++); }; +Frame RawFile::read_frame() { return get_frame(m_current_frame++); } Frame RawFile::read_frame(size_t frame_number) { seek(frame_number); @@ -52,13 +52,13 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames) { void RawFile::read_into(std::byte *image_buf) { return get_frame_into(m_current_frame++, image_buf); -}; +} void RawFile::read_into(std::byte *image_buf, DetectorHeader *header) { return get_frame_into(m_current_frame++, image_buf, header); -}; +} void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { // return get_frame_into(m_current_frame++, image_buf, header); @@ -70,7 +70,7 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *h header+=n_mod(); } -}; +} size_t RawFile::n_mod() const { return n_subfile_parts; } @@ -94,9 +94,9 @@ void RawFile::seek(size_t frame_index) { frame_index, total_frames())); } m_current_frame = frame_index; -}; +} -size_t RawFile::tell() { return m_current_frame; }; +size_t RawFile::tell() { return m_current_frame; } size_t RawFile::total_frames() const { return m_master.frames_in_file(); } size_t RawFile::rows() const { return m_geometry.pixels_y; } @@ -360,4 +360,4 @@ RawFile::~RawFile() { -} // namespace aare \ No newline at end of file +} // namespace aare diff --git a/src/RawMasterFile.cpp b/src/RawMasterFile.cpp index 052bb00..33807d4 100644 --- a/src/RawMasterFile.cpp +++ b/src/RawMasterFile.cpp @@ -87,7 +87,7 @@ int ScanParameters::start() const { return m_start; } int ScanParameters::stop() const { return m_stop; } void ScanParameters::increment_stop(){ m_stop += 1; -}; +} int ScanParameters::step() const { return m_step; } const std::string &ScanParameters::dac() const { return m_dac; } bool ScanParameters::enabled() const { return m_enabled; } @@ -417,4 +417,4 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) { if(m_frames_in_file==0) m_frames_in_file = m_total_frames_expected; } -} // namespace aare \ No newline at end of file +} // namespace aare From a6eebbe9bd414ff71d8fdc25aa2a8effb42cc14a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 20 May 2025 15:27:38 +0200 Subject: [PATCH 117/120] removed extra const on return type, added cast (#177) Fixed warnings on apple clang: - removed extra const on return type - added cast to suppress a float to double conversion warning --- include/aare/ClusterVector.hpp | 4 ++-- include/aare/Interpolator.hpp | 4 ++-- src/Fit.cpp | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index c8b1ea1..9d575d9 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -133,9 +133,9 @@ class ClusterVector> { */ size_t capacity() const { return m_data.capacity(); } - const auto begin() const { return m_data.begin(); } + auto begin() const { return m_data.begin(); } - const auto end() const { return m_data.end(); } + auto end() const { return m_data.end(); } /** * @brief Return the size in bytes of a single cluster diff --git a/include/aare/Interpolator.hpp b/include/aare/Interpolator.hpp index d2b2322..8e65f38 100644 --- a/include/aare/Interpolator.hpp +++ b/include/aare/Interpolator.hpp @@ -51,7 +51,7 @@ Interpolator::interpolate(const ClusterVector &clusters) { Photon photon; photon.x = cluster.x; photon.y = cluster.y; - photon.energy = eta.sum; + photon.energy = static_cast(eta.sum); // auto ie = nearest_index(m_energy_bins, photon.energy)-1; // auto ix = nearest_index(m_etabinsx, eta.x)-1; @@ -99,7 +99,7 @@ Interpolator::interpolate(const ClusterVector &clusters) { Photon photon; photon.x = cluster.x; photon.y = cluster.y; - photon.energy = eta.sum; + photon.energy = static_cast(eta.sum); // Now do some actual interpolation. // Find which energy bin the cluster is in diff --git a/src/Fit.cpp b/src/Fit.cpp index d104675..25000de 100644 --- a/src/Fit.cpp +++ b/src/Fit.cpp @@ -105,7 +105,7 @@ std::array gaus_init_par(const NDView x, const NDView *e / 2; }) * + [e](double val) { return val > *e / 2; }) * delta / 2.35; return start_par; From 9e1b8731b03673d4938f2f4915a5c2f3f01aa3ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Thu, 22 May 2025 11:00:03 +0200 Subject: [PATCH 118/120] RawSubFile support multi file access (#173) This PR is a fix/improvement to a problem that Jonathan had. (#156) The original implementation opened all subfiles at once witch works for normal sized datasets but fails at a certain point (thousands of files). - This solution uses RawSubFile to manage the different file indicies and only opens the file we need - Added logger.h from slsDetectorPackage for debug printing (in production no messages should be visible) --- CMakeLists.txt | 5 ++ include/aare/RawFile.hpp | 22 +---- include/aare/RawMasterFile.hpp | 1 + include/aare/RawSubFile.hpp | 20 ++++- include/aare/algorithm.hpp | 11 +++ include/aare/defs.hpp | 2 + include/aare/logger.hpp | 139 ++++++++++++++++++++++++++++++++ python/examples/play.py | 126 ++++++++++++++++------------- python/src/raw_file.hpp | 8 +- python/tests/test_RawSubFile.py | 19 +++-- src/RawFile.cpp | 132 +++++++++--------------------- src/RawFile.test.cpp | 7 +- src/RawMasterFile.cpp | 4 + src/RawSubFile.cpp | 128 +++++++++++++++++++++++------ src/RawSubFile.test.cpp | 76 +++++++++++++++++ src/algorithm.test.cpp | 33 ++++++++ 16 files changed, 517 insertions(+), 216 deletions(-) create mode 100644 include/aare/logger.hpp create mode 100644 src/RawSubFile.test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index fc51c14..dddb44b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -79,6 +79,9 @@ endif() if(AARE_VERBOSE) add_compile_definitions(AARE_VERBOSE) + add_compile_definitions(AARE_LOG_LEVEL=aare::logDEBUG5) +else() + add_compile_definitions(AARE_LOG_LEVEL=aare::logERROR) endif() if(AARE_CUSTOM_ASSERT) @@ -90,6 +93,7 @@ if(AARE_BENCHMARKS) endif() + set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(AARE_FETCH_LMFIT) @@ -452,6 +456,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp ) diff --git a/include/aare/RawFile.hpp b/include/aare/RawFile.hpp index f744ac2..1cca1fd 100644 --- a/include/aare/RawFile.hpp +++ b/include/aare/RawFile.hpp @@ -30,22 +30,11 @@ struct ModuleConfig { * Consider using that unless you need raw file specific functionality. */ class RawFile : public FileInterface { - size_t n_subfiles{}; //f0,f1...fn - size_t n_subfile_parts{}; // d0,d1...dn - //TODO! move to vector of SubFile instead of pointers - std::vector> subfiles; //subfiles[f0,f1...fn][d0,d1...dn] - // std::vector positions; - + std::vector> m_subfiles; ModuleConfig cfg{0, 0}; - RawMasterFile m_master; - size_t m_current_frame{}; - - // std::vector m_module_pixel_0; - // size_t m_rows{}; - // size_t m_cols{}; - + size_t m_current_subfile{}; DetectorGeometry m_geometry; public: @@ -56,7 +45,7 @@ class RawFile : public FileInterface { */ RawFile(const std::filesystem::path &fname, const std::string &mode = "r"); - virtual ~RawFile() override; + virtual ~RawFile() override = default; Frame read_frame() override; Frame read_frame(size_t frame_number) override; @@ -80,7 +69,7 @@ class RawFile : public FileInterface { size_t cols() const override; size_t bitdepth() const override; xy geometry(); - size_t n_mod() const; + size_t n_modules() const; RawMasterFile master() const; @@ -115,9 +104,6 @@ class RawFile : public FileInterface { */ static DetectorHeader read_header(const std::filesystem::path &fname); - // void update_geometry_with_roi(); - int find_number_of_subfiles(); - void open_subfiles(); void find_geometry(); }; diff --git a/include/aare/RawMasterFile.hpp b/include/aare/RawMasterFile.hpp index beaeb29..4d143a6 100644 --- a/include/aare/RawMasterFile.hpp +++ b/include/aare/RawMasterFile.hpp @@ -121,6 +121,7 @@ class RawMasterFile { size_t total_frames_expected() const; xy geometry() const; + size_t n_modules() const; std::optional analog_samples() const; std::optional digital_samples() const; diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 350a475..1059843 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -18,11 +18,20 @@ class RawSubFile { std::ifstream m_file; DetectorType m_detector_type; size_t m_bitdepth; - std::filesystem::path m_fname; + std::filesystem::path m_path; //!< path to the subfile + std::string m_base_name; //!< base name used for formatting file names + size_t m_offset{}; //!< file index of the first file, allow starting at non zero file + size_t m_total_frames{}; //!< total number of frames in the series of files size_t m_rows{}; size_t m_cols{}; size_t m_bytes_per_frame{}; - size_t m_num_frames{}; + + + int m_module_index{}; + size_t m_current_file_index{}; //!< The index of the open file + size_t m_current_frame_index{}; //!< The index of the current frame (with reference to all files) + std::vector m_last_frame_in_file{}; //!< Used for seeking to the correct file + uint32_t m_pos_row{}; uint32_t m_pos_col{}; @@ -67,12 +76,17 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / bits_per_byte; } - size_t frames_in_file() const { return m_num_frames; } + size_t frames_in_file() const { return m_total_frames; } private: template void read_with_map(std::byte *image_buf); + void parse_fname(const std::filesystem::path &fname); + void scan_files(); + void open_file(size_t file_index); + std::filesystem::path fpath(size_t file_index) const; + }; } // namespace aare \ No newline at end of file diff --git a/include/aare/algorithm.hpp b/include/aare/algorithm.hpp index fc7d51f..be2018f 100644 --- a/include/aare/algorithm.hpp +++ b/include/aare/algorithm.hpp @@ -107,5 +107,16 @@ std::vector cumsum(const std::vector& vec) { } +template bool all_equal(const Container &c) { + if (!c.empty() && + std::all_of(begin(c), end(c), + [c](const typename Container::value_type &element) { + return element == c.front(); + })) + return true; + return false; +} + + } // namespace aare \ No newline at end of file diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 01d291b..ccf07a5 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -204,6 +204,8 @@ struct DetectorGeometry{ int module_gap_row{}; int module_gap_col{}; std::vector module_pixel_0; + + auto size() const { return module_pixel_0.size(); } }; struct ROI{ diff --git a/include/aare/logger.hpp b/include/aare/logger.hpp new file mode 100644 index 0000000..06e6feb --- /dev/null +++ b/include/aare/logger.hpp @@ -0,0 +1,139 @@ +#pragma once +/*Utility to log to console*/ + + +#include +#include +#include + +namespace aare { + +#define RED "\x1b[31m" +#define GREEN "\x1b[32m" +#define YELLOW "\x1b[33m" +#define BLUE "\x1b[34m" +#define MAGENTA "\x1b[35m" +#define CYAN "\x1b[36m" +#define GRAY "\x1b[37m" +#define DARKGRAY "\x1b[30m" + +#define BG_BLACK "\x1b[48;5;232m" +#define BG_RED "\x1b[41m" +#define BG_GREEN "\x1b[42m" +#define BG_YELLOW "\x1b[43m" +#define BG_BLUE "\x1b[44m" +#define BG_MAGENTA "\x1b[45m" +#define BG_CYAN "\x1b[46m" +#define RESET "\x1b[0m" +#define BOLD "\x1b[1m" + + +enum TLogLevel { + logERROR, + logWARNING, + logINFOBLUE, + logINFOGREEN, + logINFORED, + logINFOCYAN, + logINFOMAGENTA, + logINFO, + logDEBUG, + logDEBUG1, + logDEBUG2, + logDEBUG3, + logDEBUG4, + logDEBUG5 +}; + +// Compiler should optimize away anything below this value +#ifndef AARE_LOG_LEVEL +#define AARE_LOG_LEVEL "LOG LEVEL NOT SET IN CMAKE" //This is configured in the main CMakeLists.txt +#endif + +#define __AT__ \ + std::string(__FILE__) + std::string("::") + std::string(__func__) + \ + std::string("(): ") +#define __SHORT_FORM_OF_FILE__ \ + (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) +#define __SHORT_AT__ \ + std::string(__SHORT_FORM_OF_FILE__) + std::string("::") + \ + std::string(__func__) + std::string("(): ") + +class Logger { + std::ostringstream os; + TLogLevel m_level = AARE_LOG_LEVEL; + + public: + Logger() = default; + explicit Logger(TLogLevel level) : m_level(level){}; + ~Logger() { + // output in the destructor to allow for << syntax + os << RESET << '\n'; + std::clog << os.str() << std::flush; // Single write + } + + static TLogLevel &ReportingLevel() { // singelton eeh TODO! Do we need a runtime option? + static TLogLevel reportingLevel = logDEBUG5; + return reportingLevel; + } + + // Danger this buffer need as many elements as TLogLevel + static const char *Color(TLogLevel level) noexcept { + static const char *const colors[] = { + RED BOLD, YELLOW BOLD, BLUE, GREEN, RED, CYAN, MAGENTA, + RESET, RESET, RESET, RESET, RESET, RESET, RESET}; + // out of bounds + if (level < 0 || level >= sizeof(colors) / sizeof(colors[0])) { + return RESET; + } + return colors[level]; + } + + // Danger this buffer need as many elements as TLogLevel + static std::string ToString(TLogLevel level) { + static const char *const buffer[] = { + "ERROR", "WARNING", "INFO", "INFO", "INFO", + "INFO", "INFO", "INFO", "DEBUG", "DEBUG1", + "DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5"}; + // out of bounds + if (level < 0 || level >= sizeof(buffer) / sizeof(buffer[0])) { + return "UNKNOWN"; + } + return buffer[level]; + } + + std::ostringstream &Get() { + os << Color(m_level) << "- " << Timestamp() << " " << ToString(m_level) + << ": "; + return os; + } + + static std::string Timestamp() { + constexpr size_t buffer_len = 12; + char buffer[buffer_len]; + time_t t; + ::time(&t); + tm r; + strftime(buffer, buffer_len, "%X", localtime_r(&t, &r)); + buffer[buffer_len - 1] = '\0'; + struct timeval tv; + gettimeofday(&tv, nullptr); + constexpr size_t result_len = 100; + char result[result_len]; + snprintf(result, result_len, "%s.%03ld", buffer, + static_cast(tv.tv_usec) / 1000); + result[result_len - 1] = '\0'; + return result; + } +}; + +// TODO! Do we need to keep the runtime option? +#define LOG(level) \ + if (level > AARE_LOG_LEVEL) \ + ; \ + else if (level > aare::Logger::ReportingLevel()) \ + ; \ + else \ + aare::Logger(level).Get() + +} // namespace aare diff --git a/python/examples/play.py b/python/examples/play.py index da469dc..0f4feca 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -1,79 +1,89 @@ import sys sys.path.append('/home/l_msdetect/erik/aare/build') -from aare._aare import ClusterVector_i, Interpolator -import pickle -import numpy as np -import matplotlib.pyplot as plt -import boost_histogram as bh -import torch -import math -import time +from aare import RawSubFile, DetectorType, RawFile + +from pathlib import Path +path = Path("/home/l_msdetect/erik/data/aare-test-data/raw/jungfrau/") +f = RawSubFile(path/"jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) + +# f = RawFile(path/"jungfrau_single_master_0.json") + + +# from aare._aare import ClusterVector_i, Interpolator + +# import pickle +# import numpy as np +# import matplotlib.pyplot as plt +# import boost_histogram as bh +# import torch +# import math +# import time -def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): - """ - Generate a 2D gaussian as position mx, my, with sigma=sigma. - The gaussian is placed on a 2x2 pixel matrix with resolution - res in one dimesion. - """ - x = torch.linspace(0, pixel_size*grid_size, res) - x,y = torch.meshgrid(x,x, indexing="ij") - return 1 / (2*math.pi*sigma**2) * \ - torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) +# def gaussian_2d(mx, my, sigma = 1, res=100, grid_size = 2): +# """ +# Generate a 2D gaussian as position mx, my, with sigma=sigma. +# The gaussian is placed on a 2x2 pixel matrix with resolution +# res in one dimesion. +# """ +# x = torch.linspace(0, pixel_size*grid_size, res) +# x,y = torch.meshgrid(x,x, indexing="ij") +# return 1 / (2*math.pi*sigma**2) * \ +# torch.exp(-((x - my)**2 / (2*sigma**2) + (y - mx)**2 / (2*sigma**2))) -scale = 1000 #Scale factor when converting to integer -pixel_size = 25 #um -grid = 2 -resolution = 100 -sigma_um = 10 -xa = np.linspace(0,grid*pixel_size,resolution) -ticks = [0, 25, 50] +# scale = 1000 #Scale factor when converting to integer +# pixel_size = 25 #um +# grid = 2 +# resolution = 100 +# sigma_um = 10 +# xa = np.linspace(0,grid*pixel_size,resolution) +# ticks = [0, 25, 50] -hit = np.array((20,20)) -etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" +# hit = np.array((20,20)) +# etahist_fname = "/home/l_msdetect/erik/tmp/test_hist.pkl" -local_resolution = 99 -grid_size = 3 -xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) -t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) -pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) -pixels = pixels.numpy() -pixels = (pixels*scale).astype(np.int32) -v = ClusterVector_i(3,3) -v.push_back(1,1, pixels) +# local_resolution = 99 +# grid_size = 3 +# xaxis = np.linspace(0,grid_size*pixel_size, local_resolution) +# t = gaussian_2d(hit[0],hit[1], grid_size = grid_size, sigma = 10, res = local_resolution) +# pixels = t.reshape(grid_size, t.shape[0] // grid_size, grid_size, t.shape[1] // grid_size).sum(axis = 3).sum(axis = 1) +# pixels = pixels.numpy() +# pixels = (pixels*scale).astype(np.int32) +# v = ClusterVector_i(3,3) +# v.push_back(1,1, pixels) -with open(etahist_fname, "rb") as f: - hist = pickle.load(f) -eta = hist.view().copy() -etabinsx = np.array(hist.axes.edges.T[0].flat) -etabinsy = np.array(hist.axes.edges.T[1].flat) -ebins = np.array(hist.axes.edges.T[2].flat) -p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) +# with open(etahist_fname, "rb") as f: +# hist = pickle.load(f) +# eta = hist.view().copy() +# etabinsx = np.array(hist.axes.edges.T[0].flat) +# etabinsy = np.array(hist.axes.edges.T[1].flat) +# ebins = np.array(hist.axes.edges.T[2].flat) +# p = Interpolator(eta, etabinsx[0:-1], etabinsy[0:-1], ebins[0:-1]) -#Generate the hit +# #Generate the hit -tmp = p.interpolate(v) -print(f'tmp:{tmp}') -pos = np.array((tmp['x'], tmp['y']))*25 +# tmp = p.interpolate(v) +# print(f'tmp:{tmp}') +# pos = np.array((tmp['x'], tmp['y']))*25 -print(pixels) -fig, ax = plt.subplots(figsize = (7,7)) -ax.pcolormesh(xaxis, xaxis, t) -ax.plot(*pos, 'o') -ax.set_xticks([0,25,50,75]) -ax.set_yticks([0,25,50,75]) -ax.set_xlim(0,75) -ax.set_ylim(0,75) -ax.grid() -print(f'{hit=}') -print(f'{pos=}') \ No newline at end of file +# print(pixels) +# fig, ax = plt.subplots(figsize = (7,7)) +# ax.pcolormesh(xaxis, xaxis, t) +# ax.plot(*pos, 'o') +# ax.set_xticks([0,25,50,75]) +# ax.set_yticks([0,25,50,75]) +# ax.set_xlim(0,75) +# ax.set_ylim(0,75) +# ax.grid() +# print(f'{hit=}') +# print(f'{pos=}') \ No newline at end of file diff --git a/python/src/raw_file.hpp b/python/src/raw_file.hpp index 38b4896..8d72220 100644 --- a/python/src/raw_file.hpp +++ b/python/src/raw_file.hpp @@ -32,7 +32,7 @@ void define_raw_file_io_bindings(py::module &m) { shape.push_back(self.cols()); // return headers from all subfiles - py::array_t header(self.n_mod()); + py::array_t header(self.n_modules()); const uint8_t item_size = self.bytes_per_pixel(); if (item_size == 1) { @@ -61,10 +61,10 @@ void define_raw_file_io_bindings(py::module &m) { // return headers from all subfiles py::array_t header; - if (self.n_mod() == 1) { + if (self.n_modules() == 1) { header = py::array_t(n_frames); } else { - header = py::array_t({self.n_mod(), n_frames}); + header = py::array_t({self.n_modules(), n_frames}); } // py::array_t header({self.n_mod(), n_frames}); @@ -100,7 +100,7 @@ void define_raw_file_io_bindings(py::module &m) { .def_property_readonly("cols", &RawFile::cols) .def_property_readonly("bitdepth", &RawFile::bitdepth) .def_property_readonly("geometry", &RawFile::geometry) - .def_property_readonly("n_mod", &RawFile::n_mod) + .def_property_readonly("n_modules", &RawFile::n_modules) .def_property_readonly("detector_type", &RawFile::detector_type) .def_property_readonly("master", &RawFile::master); } \ No newline at end of file diff --git a/python/tests/test_RawSubFile.py b/python/tests/test_RawSubFile.py index a5eea91..cdde248 100644 --- a/python/tests/test_RawSubFile.py +++ b/python/tests/test_RawSubFile.py @@ -5,32 +5,35 @@ from aare import RawSubFile, DetectorType @pytest.mark.files def test_read_a_jungfrau_RawSubFile(test_data_path): + + # Starting with f1 there is now 7 frames left in the series of files with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f1_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: - assert f.frames_in_file == 3 + assert f.frames_in_file == 7 headers, frames = f.read() - assert headers.size == 3 - assert frames.shape == (3, 512, 1024) + assert headers.size == 7 + assert frames.shape == (7, 512, 1024) - # Frame numbers in this file should be 4, 5, 6 - for i,h in zip(range(4,7,1), headers): + + for i,h in zip(range(4,11,1), headers): assert h["frameNumber"] == i # Compare to canned data using numpy data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") - assert np.all(data[3:6] == frames) + assert np.all(data[3:] == frames) @pytest.mark.files def test_iterate_over_a_jungfrau_RawSubFile(test_data_path): data = np.load(test_data_path / "raw/jungfrau/jungfrau_single_0.npy") + # Given the first subfile in a series we can read all frames from f0, f1, f2...fN with RawSubFile(test_data_path / "raw/jungfrau/jungfrau_single_d0_f0_0.raw", DetectorType.Jungfrau, 512, 1024, 16) as f: i = 0 for header, frame in f: assert header["frameNumber"] == i+1 assert np.all(frame == data[i]) i += 1 - assert i == 3 - assert header["frameNumber"] == 3 \ No newline at end of file + assert i == 10 + assert header["frameNumber"] == 10 \ No newline at end of file diff --git a/src/RawFile.cpp b/src/RawFile.cpp index c576453..122cf96 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -1,6 +1,8 @@ #include "aare/RawFile.hpp" +#include "aare/algorithm.hpp" #include "aare/PixelMap.hpp" #include "aare/defs.hpp" +#include "aare/logger.hpp" #include "aare/geo_helpers.hpp" #include @@ -14,23 +16,14 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) : m_master(fname) { m_mode = mode; if (mode == "r") { - - n_subfiles = find_number_of_subfiles(); // f0,f1...fn - n_subfile_parts = - m_master.geometry().col * m_master.geometry().row; // d0,d1...dn - - - find_geometry(); - if (m_master.roi()){ m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value()); } - open_subfiles(); } else { throw std::runtime_error(LOCATION + - "Unsupported mode. Can only read RawFiles."); + " Unsupported mode. Can only read RawFiles."); } } @@ -67,12 +60,12 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *h this->get_frame_into(m_current_frame++, image_buf, header); image_buf += bytes_per_frame(); if(header) - header+=n_mod(); + header+=n_modules(); } } -size_t RawFile::n_mod() const { return n_subfile_parts; } +size_t RawFile::n_modules() const { return m_master.n_modules(); } size_t RawFile::bytes_per_frame() { @@ -106,17 +99,11 @@ xy RawFile::geometry() { return m_master.geometry(); } void RawFile::open_subfiles() { if (m_mode == "r") - for (size_t i = 0; i != n_subfiles; ++i) { - auto v = std::vector(n_subfile_parts); - for (size_t j = 0; j != n_subfile_parts; ++j) { - auto pos = m_geometry.module_pixel_0[j]; - v[j] = new RawSubFile(m_master.data_fname(j, i), - m_master.detector_type(), pos.height, - pos.width, m_master.bitdepth(), - pos.row_index, pos.col_index); - - } - subfiles.push_back(v); + for (size_t i = 0; i != n_modules(); ++i) { + auto pos = m_geometry.module_pixel_0[i]; + m_subfiles.emplace_back(std::make_unique( + m_master.data_fname(i, 0), m_master.detector_type(), pos.height, + pos.width, m_master.bitdepth(), pos.row_index, pos.col_index)); } else { throw std::runtime_error(LOCATION + @@ -141,18 +128,6 @@ DetectorHeader RawFile::read_header(const std::filesystem::path &fname) { return h; } -int RawFile::find_number_of_subfiles() { - int n_files = 0; - // f0,f1...fn How many files is the data split into? - while (std::filesystem::exists(m_master.data_fname(0, n_files))) - n_files++; // increment after test - -#ifdef AARE_VERBOSE - fmt::print("Found: {} subfiles\n", n_files); -#endif - return n_files; - -} RawMasterFile RawFile::master() const { return m_master; } @@ -168,7 +143,7 @@ void RawFile::find_geometry() { uint16_t c{}; - for (size_t i = 0; i < n_subfile_parts; i++) { + for (size_t i = 0; i < n_modules(); i++) { auto h = read_header(m_master.data_fname(i, 0)); r = std::max(r, h.row); c = std::max(c, h.column); @@ -210,70 +185,58 @@ size_t RawFile::bytes_per_pixel() const { } void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) { + LOG(logDEBUG) << "RawFile::get_frame_into(" << frame_index << ")"; if (frame_index >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); } - std::vector frame_numbers(n_subfile_parts); - std::vector frame_indices(n_subfile_parts, frame_index); + std::vector frame_numbers(n_modules()); + std::vector frame_indices(n_modules(), frame_index); // sync the frame numbers - if (n_subfile_parts != 1) { - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { - auto subfile_id = frame_index / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - frame_numbers[part_idx] = - subfiles[subfile_id][part_idx]->frame_number( - frame_index % m_master.max_frames_per_file()); + if (n_modules() != 1) { //if we have more than one module + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { + frame_numbers[part_idx] = m_subfiles[part_idx]->frame_number(frame_index); } + // 1. if frame number vector is the same break - while (std::adjacent_find(frame_numbers.begin(), frame_numbers.end(), - std::not_equal_to<>()) != - frame_numbers.end()) { + while (!all_equal(frame_numbers)) { + // 2. find the index of the minimum frame number, auto min_frame_idx = std::distance( frame_numbers.begin(), std::min_element(frame_numbers.begin(), frame_numbers.end())); + // 3. increase its index and update its respective frame number frame_indices[min_frame_idx]++; + // 4. if we can't increase its index => throw error if (frame_indices[min_frame_idx] >= total_frames()) { throw std::runtime_error(LOCATION + "Frame number out of range"); } - auto subfile_id = - frame_indices[min_frame_idx] / m_master.max_frames_per_file(); + frame_numbers[min_frame_idx] = - subfiles[subfile_id][min_frame_idx]->frame_number( - frame_indices[min_frame_idx] % - m_master.max_frames_per_file()); + m_subfiles[min_frame_idx]->frame_number(frame_indices[min_frame_idx]); } } if (m_master.geometry().col == 1) { // get the part from each subfile and copy it to the frame - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { auto corrected_idx = frame_indices[part_idx]; - auto subfile_id = corrected_idx / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - + // This is where we start writing auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x + m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8; if (m_geometry.module_pixel_0[part_idx].origin_x!=0) - throw std::runtime_error(LOCATION + "Implementation error. x pos not 0."); + throw std::runtime_error(LOCATION + " Implementation error. x pos not 0."); - //TODO! Risk for out of range access - subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); - subfiles[subfile_id][part_idx]->read_into(frame_buffer + offset, header); + //TODO! What if the files don't match? + m_subfiles[part_idx]->seek(corrected_idx); + m_subfiles[part_idx]->read_into(frame_buffer + offset, header); if (header) ++header; } @@ -282,26 +245,21 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect //TODO! should we read row by row? // create a buffer large enough to hold a full module - auto bytes_per_part = m_master.pixels_y() * m_master.pixels_x() * m_master.bitdepth() / 8; // TODO! replace with image_size_in_bytes + auto *part_buffer = new std::byte[bytes_per_part]; // TODO! if we have many submodules we should reorder them on the module // level - for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { + for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) { auto pos = m_geometry.module_pixel_0[part_idx]; auto corrected_idx = frame_indices[part_idx]; - auto subfile_id = corrected_idx / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error(LOCATION + - " Subfile out of range. Possible missing data."); - } - subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); - subfiles[subfile_id][part_idx]->read_into(part_buffer, header); + m_subfiles[part_idx]->seek(corrected_idx); + m_subfiles[part_idx]->read_into(part_buffer, header); if(header) ++header; @@ -321,6 +279,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect } delete[] part_buffer; } + } std::vector RawFile::read_n(size_t n_frames) { @@ -337,27 +296,8 @@ size_t RawFile::frame_number(size_t frame_index) { if (frame_index >= m_master.frames_in_file()) { throw std::runtime_error(LOCATION + " Frame number out of range"); } - size_t subfile_id = frame_index / m_master.max_frames_per_file(); - if (subfile_id >= subfiles.size()) { - throw std::runtime_error( - LOCATION + " Subfile out of range. Possible missing data."); - } - return subfiles[subfile_id][0]->frame_number( - frame_index % m_master.max_frames_per_file()); + return m_subfiles[0]->frame_number(frame_index); } -RawFile::~RawFile() { - - // TODO! Fix this, for file closing - for (auto &vec : subfiles) { - for (auto *subfile : vec) { - delete subfile; - } - } -} - - - - } // namespace aare diff --git a/src/RawFile.test.cpp b/src/RawFile.test.cpp index 5f9b2e1..9109985 100644 --- a/src/RawFile.test.cpp +++ b/src/RawFile.test.cpp @@ -99,11 +99,11 @@ TEST_CASE("Read frame numbers from a raw file", "[.integration]") { } } -TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") { - auto fpath_raw = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json"; +TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") { + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json"; REQUIRE(std::filesystem::exists(fpath_raw)); - auto fpath_npy = test_data_path() / "jungfrau" / "jungfrau_single_0.npy"; + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; REQUIRE(std::filesystem::exists(fpath_npy)); File raw(fpath_raw, "r"); @@ -113,6 +113,7 @@ TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") CHECK(npy.total_frames() == 10); for (size_t i = 0; i < 10; ++i) { + CHECK(raw.tell() == i); auto raw_frame = raw.read_frame(); auto npy_frame = npy.read_frame(); CHECK((raw_frame.view() == npy_frame.view())); diff --git a/src/RawMasterFile.cpp b/src/RawMasterFile.cpp index 33807d4..8a2db87 100644 --- a/src/RawMasterFile.cpp +++ b/src/RawMasterFile.cpp @@ -140,6 +140,10 @@ std::optional RawMasterFile::number_of_rows() const { xy RawMasterFile::geometry() const { return m_geometry; } +size_t RawMasterFile::n_modules() const { + return m_geometry.row * m_geometry.col; +} + std::optional RawMasterFile::quad() const { return m_quad; } // optional values, these may or may not be present in the master file diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index 9e7a421..01ef48c 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -1,10 +1,14 @@ #include "aare/RawSubFile.hpp" #include "aare/PixelMap.hpp" +#include "aare/algorithm.hpp" #include "aare/utils/ifstream_helpers.hpp" +#include "aare/logger.hpp" + + #include // memcpy #include #include - +#include namespace aare { @@ -12,51 +16,51 @@ namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, DetectorType detector, size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row, uint32_t pos_col) - : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), + : m_detector_type(detector), m_bitdepth(bitdepth), m_rows(rows), m_cols(cols), m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) { + + LOG(logDEBUG) << "RawSubFile::RawSubFile()"; if (m_detector_type == DetectorType::Moench03_old) { m_pixel_map = GenerateMoench03PixelMap(); } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) { m_pixel_map = GenerateEigerFlipRowsPixelMap(); } - if (std::filesystem::exists(fname)) { - m_num_frames = std::filesystem::file_size(fname) / - (sizeof(DetectorHeader) + rows * cols * bitdepth / 8); - } else { - throw std::runtime_error( - LOCATION + fmt::format("File {} does not exist", m_fname.string())); - } - // fp = fopen(m_fname.string().c_str(), "rb"); - m_file.open(m_fname, std::ios::binary); - if (!m_file.is_open()) { - throw std::runtime_error( - LOCATION + fmt::format("Could not open file {}", m_fname.string())); - } - -#ifdef AARE_VERBOSE - fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames); - fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols, - m_bitdepth); - fmt::print("file size: {}\n", std::filesystem::file_size(fname)); -#endif + parse_fname(fname); + scan_files(); + open_file(m_current_file_index); // open the first file } void RawSubFile::seek(size_t frame_index) { - if (frame_index >= m_num_frames) { - throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames)); + LOG(logDEBUG) << "RawSubFile::seek(" << frame_index << ")"; + if (frame_index >= m_total_frames) { + throw std::runtime_error(LOCATION + " Frame index out of range: " + + std::to_string(frame_index)); } - m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); + m_current_frame_index = frame_index; + auto file_index = first_larger(m_last_frame_in_file, frame_index); + + if (file_index != m_current_file_index) + open_file(file_index); + + auto frame_offset = (file_index) + ? frame_index - m_last_frame_in_file[file_index - 1] + : frame_index; + auto byte_offset = frame_offset * (m_bytes_per_frame + sizeof(DetectorHeader)); + m_file.seekg(byte_offset); } size_t RawSubFile::tell() { - return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame()); + LOG(logDEBUG) << "RawSubFile::tell():" << m_current_frame_index; + return m_current_frame_index; } void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { + LOG(logDEBUG) << "RawSubFile::read_into()"; + if (header) { m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); } else { @@ -90,6 +94,13 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { if (m_file.fail()){ throw std::runtime_error(LOCATION + ifstream_error_msg(m_file)); } + + ++ m_current_frame_index; + if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] && + (m_current_frame_index < m_total_frames)) { + ++m_current_file_index; + open_file(m_current_file_index); + } } void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) { @@ -130,4 +141,69 @@ size_t RawSubFile::frame_number(size_t frame_index) { return h.frameNumber; } +void RawSubFile::parse_fname(const std::filesystem::path &fname) { + LOG(logDEBUG) << "RawSubFile::parse_fname()"; + // data has the format: /path/too/data/jungfrau_single_d0_f1_0.raw + // d0 is the module index, will not change for this file + // f1 is the file index - thi is the one we need + // 0 is the measurement index, will not change + m_path = fname.parent_path(); + m_base_name = fname.filename(); + + // Regex to extract numbers after 'd' and 'f' + std::regex pattern(R"(^(.*_d)(\d+)(_f)(\d+)(_\d+\.raw)$)"); + std::smatch match; + + if (std::regex_match(m_base_name, match, pattern)) { + m_offset = std::stoi(match[4].str()); // find the first file index in case of a truncated series + m_base_name = match[1].str() + match[2].str() + match[3].str() + "{}" + match[5].str(); + LOG(logDEBUG) << "Base name: " << m_base_name; + LOG(logDEBUG) << "Offset: " << m_offset; + LOG(logDEBUG) << "Path: " << m_path.string(); + } else { + throw std::runtime_error( + LOCATION + fmt::format("Could not parse file name {}", fname.string())); + } +} + +std::filesystem::path RawSubFile::fpath(size_t file_index) const { + auto fname = fmt::format(m_base_name, file_index); + return m_path / fname; +} + +void RawSubFile::open_file(size_t file_index) { + m_file.close(); + auto fname = fpath(file_index+m_offset); + LOG(logDEBUG) << "RawSubFile::open_file(): " << fname.string(); + m_file.open(fname, std::ios::binary); + if (!m_file.is_open()) { + throw std::runtime_error( + LOCATION + fmt::format("Could not open file {}", fpath(file_index).string())); + } + m_current_file_index = file_index; +} + +void RawSubFile::scan_files() { + LOG(logDEBUG) << "RawSubFile::scan_files()"; + // find how many files we have and the number of frames in each file + m_last_frame_in_file.clear(); + size_t file_index = m_offset; + + while (std::filesystem::exists(fpath(file_index))) { + auto n_frames = std::filesystem::file_size(fpath(file_index)) / + (m_bytes_per_frame + sizeof(DetectorHeader)); + m_last_frame_in_file.push_back(n_frames); + LOG(logDEBUG) << "Found: " << n_frames << " frames in file: " << fpath(file_index).string(); + ++file_index; + } + + // find where we need to open the next file and total number of frames + m_last_frame_in_file = cumsum(m_last_frame_in_file); + if(m_last_frame_in_file.empty()){ + m_total_frames = 0; + }else{ + m_total_frames = m_last_frame_in_file.back(); + } +} + } // namespace aare \ No newline at end of file diff --git a/src/RawSubFile.test.cpp b/src/RawSubFile.test.cpp new file mode 100644 index 0000000..89cf858 --- /dev/null +++ b/src/RawSubFile.test.cpp @@ -0,0 +1,76 @@ +#include "aare/RawSubFile.hpp" +#include "aare/File.hpp" +#include "aare/NDArray.hpp" +#include +#include "test_config.hpp" + +using namespace aare; + +TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){ + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw"; + REQUIRE(std::filesystem::exists(fpath_raw)); + + RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); + REQUIRE(f.rows() == 512); + REQUIRE(f.cols() == 1024); + REQUIRE(f.pixels_per_frame() == 512 * 1024); + REQUIRE(f.bytes_per_frame() == 512 * 1024 * 2); + REQUIRE(f.bytes_per_pixel() == 2); + + + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + REQUIRE(std::filesystem::exists(fpath_npy)); + + //Numpy file with the same data to use as reference + File npy(fpath_npy, "r"); + + CHECK(f.frames_in_file() == 10); + CHECK(npy.total_frames() == 10); + + + DetectorHeader header{}; + NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + for (size_t i = 0; i < 10; ++i) { + CHECK(f.tell() == i); + f.read_into(image.buffer(), &header); + auto npy_frame = npy.read_frame(); + CHECK((image.view() == npy_frame.view())); + } +} + +TEST_CASE("Read frames directly from a RawSubFile starting at the second file", "[.files]"){ + // we know this file has 10 frames with frame numbers 1 to 10 + // f0 1,2,3 + // f1 4,5,6 <-- starting here + // f2 7,8,9 + // f3 10 + + auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f1_0.raw"; + REQUIRE(std::filesystem::exists(fpath_raw)); + + RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16); + + + auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy"; + REQUIRE(std::filesystem::exists(fpath_npy)); + + //Numpy file with the same data to use as reference + File npy(fpath_npy, "r"); + npy.seek(3); + + CHECK(f.frames_in_file() == 7); + CHECK(npy.total_frames() == 10); + + + DetectorHeader header{}; + NDArray image({static_cast(f.rows()), static_cast(f.cols())}); + for (size_t i = 0; i < 7; ++i) { + CHECK(f.tell() == i); + f.read_into(image.buffer(), &header); + // frame numbers start at 1 frame index at 0 + // adding 3 + 1 to verify the frame number + CHECK(header.frameNumber == i + 4); + auto npy_frame = npy.read_frame(); + CHECK((image.view() == npy_frame.view())); + } +} \ No newline at end of file diff --git a/src/algorithm.test.cpp b/src/algorithm.test.cpp index 5452fcf..6bd707b 100644 --- a/src/algorithm.test.cpp +++ b/src/algorithm.test.cpp @@ -160,3 +160,36 @@ TEST_CASE("cumsum works with negative numbers", "[algorithm]") { REQUIRE(result[3] == -6); REQUIRE(result[4] == -10); } + + +TEST_CASE("cumsum on an empty vector", "[algorithm]") { + std::vector vec = {}; + auto result = aare::cumsum(vec); + REQUIRE(result.size() == 0); + +} + +TEST_CASE("All equal on an empty vector is false", "[algorithm]") { + std::vector vec = {}; + REQUIRE(aare::all_equal(vec) == false); +} + +TEST_CASE("All equal on a vector with 1 element is true", "[algorithm]") { + std::vector vec = {1}; + REQUIRE(aare::all_equal(vec) == true); +} + +TEST_CASE("All equal on a vector with 2 elements is true", "[algorithm]") { + std::vector vec = {1, 1}; + REQUIRE(aare::all_equal(vec) == true); +} + +TEST_CASE("All equal on a vector with two different elements is false", "[algorithm]") { + std::vector vec = {1, 2}; + REQUIRE(aare::all_equal(vec) == false); +} + +TEST_CASE("Last element is different", "[algorithm]") { + std::vector vec = {1, 1, 1, 1, 2}; + REQUIRE(aare::all_equal(vec) == false); +} \ No newline at end of file From f2a024644be83f26e224dd7ceb9d10bd4072cd67 Mon Sep 17 00:00:00 2001 From: froejdh_e Date: Thu, 22 May 2025 11:10:23 +0200 Subject: [PATCH 119/120] bumped version upload on release --- .github/workflows/build_and_deploy_conda.yml | 6 +++--- VERSION | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_and_deploy_conda.yml b/.github/workflows/build_and_deploy_conda.yml index 65483c3..8917419 100644 --- a/.github/workflows/build_and_deploy_conda.yml +++ b/.github/workflows/build_and_deploy_conda.yml @@ -1,9 +1,9 @@ name: Build pkgs and deploy if on main on: - push: - branches: - - main + release: + types: + - published jobs: build: diff --git a/VERSION b/VERSION index bd52db8..ae365e4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.0.0 \ No newline at end of file +2025.5.22 \ No newline at end of file From 69964e08d59b48cf82239bf637215f17989ea224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Tue, 3 Jun 2025 08:43:40 +0200 Subject: [PATCH 120/120] Refactor cluster bindings (#185) - Split up the file for cluster bindings - new file names according to bind_ClassName.hpp --- python/src/bind_Cluster.hpp | 64 ++++++ python/src/bind_ClusterCollector.hpp | 46 ++++ ...{cluster_file.hpp => bind_ClusterFile.hpp} | 2 +- python/src/bind_ClusterFileSink.hpp | 44 ++++ python/src/bind_ClusterFinder.hpp | 77 +++++++ python/src/bind_ClusterFinderMT.hpp | 81 +++++++ python/src/bind_ClusterVector.hpp | 4 +- python/src/cluster.hpp | 211 ------------------ python/src/module.cpp | 80 +++---- 9 files changed, 358 insertions(+), 251 deletions(-) create mode 100644 python/src/bind_Cluster.hpp create mode 100644 python/src/bind_ClusterCollector.hpp rename python/src/{cluster_file.hpp => bind_ClusterFile.hpp} (98%) create mode 100644 python/src/bind_ClusterFileSink.hpp create mode 100644 python/src/bind_ClusterFinder.hpp create mode 100644 python/src/bind_ClusterFinderMT.hpp delete mode 100644 python/src/cluster.hpp diff --git a/python/src/bind_Cluster.hpp b/python/src/bind_Cluster.hpp new file mode 100644 index 0000000..daf0946 --- /dev/null +++ b/python/src/bind_Cluster.hpp @@ -0,0 +1,64 @@ +#include "aare/Cluster.hpp" + +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_Cluster(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("Cluster{}", typestr); + + py::class_>( + m, class_name.c_str(), py::buffer_protocol()) + + .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { + py::buffer_info buf_info = data.request(); + Cluster cluster; + cluster.x = x; + cluster.y = y; + auto r = data.template unchecked<1>(); // no bounds checks + for (py::ssize_t i = 0; i < data.size(); ++i) { + cluster.data[i] = r(i); + } + return cluster; + })); + + /* + //TODO! Review if to keep or not + .def_property( + "data", + [](ClusterType &c) -> py::array { + return py::array(py::buffer_info( + c.data, sizeof(Type), + py::format_descriptor::format(), // Type + // format + 1, // Number of dimensions + {static_cast(ClusterSizeX * + ClusterSizeY)}, // Shape (flattened) + {sizeof(Type)} // Stride (step size between elements) + )); + }, + [](ClusterType &c, py::array_t arr) { + py::buffer_info buf_info = arr.request(); + Type *ptr = static_cast(buf_info.ptr); + std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, + c.data); // TODO dont iterate over centers!!! + + }); + */ +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterCollector.hpp b/python/src/bind_ClusterCollector.hpp new file mode 100644 index 0000000..4836e6e --- /dev/null +++ b/python/src/bind_ClusterCollector.hpp @@ -0,0 +1,46 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + + +template +void define_ClusterCollector(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterCollector_{}", typestr); + + using ClusterType = Cluster; + + py::class_>(m, class_name.c_str()) + .def(py::init *>()) + .def("stop", &ClusterCollector::stop) + .def( + "steal_clusters", + [](ClusterCollector &self) { + auto v = new std::vector>( + self.steal_clusters()); + return v; // TODO change!!! + }, + py::return_value_policy::take_ownership); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/cluster_file.hpp b/python/src/bind_ClusterFile.hpp similarity index 98% rename from python/src/cluster_file.hpp rename to python/src/bind_ClusterFile.hpp index ac384b2..8ce5360 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/bind_ClusterFile.hpp @@ -21,7 +21,7 @@ using namespace ::aare; template -void define_cluster_file_io_bindings(py::module &m, +void define_ClusterFile(py::module &m, const std::string &typestr) { using ClusterType = Cluster; diff --git a/python/src/bind_ClusterFileSink.hpp b/python/src/bind_ClusterFileSink.hpp new file mode 100644 index 0000000..9b3a74d --- /dev/null +++ b/python/src/bind_ClusterFileSink.hpp @@ -0,0 +1,44 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + + + + + + +template +void define_ClusterFileSink(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterFileSink_{}", typestr); + + using ClusterType = Cluster; + + py::class_>(m, class_name.c_str()) + .def(py::init *, + const std::filesystem::path &>()) + .def("stop", &ClusterFileSink::stop); +} + + +#pragma GCC diagnostic pop diff --git a/python/src/bind_ClusterFinder.hpp b/python/src/bind_ClusterFinder.hpp new file mode 100644 index 0000000..5f0fe8d --- /dev/null +++ b/python/src/bind_ClusterFinder.hpp @@ -0,0 +1,77 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_ClusterFinder(py::module &m, const std::string &typestr) { + auto class_name = fmt::format("ClusterFinder_{}", typestr); + + using ClusterType = Cluster; + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t>(), py::arg("image_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) + .def("push_pedestal_frame", + [](ClusterFinder &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.push_pedestal_frame(view); + }) + .def("clear_pedestal", + &ClusterFinder::clear_pedestal) + .def_property_readonly( + "pedestal", + [](ClusterFinder &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly( + "noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) + .def( + "steal_clusters", + [](ClusterFinder &self, + bool realloc_same_capacity) { + ClusterVector clusters = + self.steal_clusters(realloc_same_capacity); + return clusters; + }, + py::arg("realloc_same_capacity") = false) + .def( + "find_clusters", + [](ClusterFinder &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0); +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterFinderMT.hpp b/python/src/bind_ClusterFinderMT.hpp new file mode 100644 index 0000000..d1769db --- /dev/null +++ b/python/src/bind_ClusterFinderMT.hpp @@ -0,0 +1,81 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" +#include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/NDView.hpp" +#include "aare/Pedestal.hpp" +#include "np_helper.hpp" + +#include +#include +#include +#include +#include + +namespace py = pybind11; +using pd_type = double; + +using namespace aare; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + +template +void define_ClusterFinderMT(py::module &m, + const std::string &typestr) { + auto class_name = fmt::format("ClusterFinderMT_{}", typestr); + + using ClusterType = Cluster; + + py::class_>( + m, class_name.c_str()) + .def(py::init, pd_type, size_t, size_t>(), + py::arg("image_size"), py::arg("n_sigma") = 5.0, + py::arg("capacity") = 2048, py::arg("n_threads") = 3) + .def("push_pedestal_frame", + [](ClusterFinderMT &self, + py::array_t frame) { + auto view = make_view_2d(frame); + self.push_pedestal_frame(view); + }) + .def( + "find_clusters", + [](ClusterFinderMT &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0) + .def_property_readonly("cluster_size", [](ClusterFinderMT &self){ + return py::make_tuple(ClusterSizeX, ClusterSizeY); + }) + .def("clear_pedestal", + &ClusterFinderMT::clear_pedestal) + .def("sync", &ClusterFinderMT::sync) + .def("stop", &ClusterFinderMT::stop) + .def("start", &ClusterFinderMT::start) + .def( + "pedestal", + [](ClusterFinderMT &self, + size_t thread_index) { + auto pd = new NDArray{}; + *pd = self.pedestal(thread_index); + return return_image_data(pd); + }, + py::arg("thread_index") = 0) + .def( + "noise", + [](ClusterFinderMT &self, + size_t thread_index) { + auto arr = new NDArray{}; + *arr = self.noise(thread_index); + return return_image_data(arr); + }, + py::arg("thread_index") = 0); +} + + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/bind_ClusterVector.hpp b/python/src/bind_ClusterVector.hpp index db8c8a3..550db9a 100644 --- a/python/src/bind_ClusterVector.hpp +++ b/python/src/bind_ClusterVector.hpp @@ -101,4 +101,6 @@ void define_ClusterVector(py::module &m, const std::string &typestr) { return hitmap; }); -} \ No newline at end of file +} + +#pragma GCC diagnostic pop \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp deleted file mode 100644 index 58f137c..0000000 --- a/python/src/cluster.hpp +++ /dev/null @@ -1,211 +0,0 @@ -#include "aare/ClusterCollector.hpp" -#include "aare/ClusterFileSink.hpp" -#include "aare/ClusterFinder.hpp" -#include "aare/ClusterFinderMT.hpp" -#include "aare/ClusterVector.hpp" -#include "aare/NDView.hpp" -#include "aare/Pedestal.hpp" -#include "np_helper.hpp" - -#include -#include -#include -#include -#include - -namespace py = pybind11; -using pd_type = double; - -using namespace aare; - -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" - -template -void define_cluster(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("Cluster{}", typestr); - - py::class_>( - m, class_name.c_str(), py::buffer_protocol()) - - .def(py::init([](uint8_t x, uint8_t y, py::array_t data) { - py::buffer_info buf_info = data.request(); - Cluster cluster; - cluster.x = x; - cluster.y = y; - auto r = data.template unchecked<1>(); // no bounds checks - for (py::ssize_t i = 0; i < data.size(); ++i) { - cluster.data[i] = r(i); - } - return cluster; - })); - - /* - .def_property( - "data", - [](ClusterType &c) -> py::array { - return py::array(py::buffer_info( - c.data, sizeof(Type), - py::format_descriptor::format(), // Type - // format - 1, // Number of dimensions - {static_cast(ClusterSizeX * - ClusterSizeY)}, // Shape (flattened) - {sizeof(Type)} // Stride (step size between elements) - )); - }, - [](ClusterType &c, py::array_t arr) { - py::buffer_info buf_info = arr.request(); - Type *ptr = static_cast(buf_info.ptr); - std::copy(ptr, ptr + ClusterSizeX * ClusterSizeY, - c.data); // TODO dont iterate over centers!!! - - }); - */ -} - -template -void define_cluster_finder_mt_bindings(py::module &m, - const std::string &typestr) { - auto class_name = fmt::format("ClusterFinderMT_{}", typestr); - - using ClusterType = Cluster; - - py::class_>( - m, class_name.c_str()) - .def(py::init, pd_type, size_t, size_t>(), - py::arg("image_size"), py::arg("n_sigma") = 5.0, - py::arg("capacity") = 2048, py::arg("n_threads") = 3) - .def("push_pedestal_frame", - [](ClusterFinderMT &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.push_pedestal_frame(view); - }) - .def( - "find_clusters", - [](ClusterFinderMT &self, - py::array_t frame, uint64_t frame_number) { - auto view = make_view_2d(frame); - self.find_clusters(view, frame_number); - return; - }, - py::arg(), py::arg("frame_number") = 0) - .def_property_readonly("cluster_size", [](ClusterFinderMT &self){ - return py::make_tuple(ClusterSizeX, ClusterSizeY); - }) - .def("clear_pedestal", - &ClusterFinderMT::clear_pedestal) - .def("sync", &ClusterFinderMT::sync) - .def("stop", &ClusterFinderMT::stop) - .def("start", &ClusterFinderMT::start) - .def( - "pedestal", - [](ClusterFinderMT &self, - size_t thread_index) { - auto pd = new NDArray{}; - *pd = self.pedestal(thread_index); - return return_image_data(pd); - }, - py::arg("thread_index") = 0) - .def( - "noise", - [](ClusterFinderMT &self, - size_t thread_index) { - auto arr = new NDArray{}; - *arr = self.noise(thread_index); - return return_image_data(arr); - }, - py::arg("thread_index") = 0); -} - -template -void define_cluster_collector_bindings(py::module &m, - const std::string &typestr) { - auto class_name = fmt::format("ClusterCollector_{}", typestr); - - using ClusterType = Cluster; - - py::class_>(m, class_name.c_str()) - .def(py::init *>()) - .def("stop", &ClusterCollector::stop) - .def( - "steal_clusters", - [](ClusterCollector &self) { - auto v = new std::vector>( - self.steal_clusters()); - return v; // TODO change!!! - }, - py::return_value_policy::take_ownership); -} - -template -void define_cluster_file_sink_bindings(py::module &m, - const std::string &typestr) { - auto class_name = fmt::format("ClusterFileSink_{}", typestr); - - using ClusterType = Cluster; - - py::class_>(m, class_name.c_str()) - .def(py::init *, - const std::filesystem::path &>()) - .def("stop", &ClusterFileSink::stop); -} - -template -void define_cluster_finder_bindings(py::module &m, const std::string &typestr) { - auto class_name = fmt::format("ClusterFinder_{}", typestr); - - using ClusterType = Cluster; - - py::class_>( - m, class_name.c_str()) - .def(py::init, pd_type, size_t>(), py::arg("image_size"), - py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) - .def("push_pedestal_frame", - [](ClusterFinder &self, - py::array_t frame) { - auto view = make_view_2d(frame); - self.push_pedestal_frame(view); - }) - .def("clear_pedestal", - &ClusterFinder::clear_pedestal) - .def_property_readonly( - "pedestal", - [](ClusterFinder &self) { - auto pd = new NDArray{}; - *pd = self.pedestal(); - return return_image_data(pd); - }) - .def_property_readonly( - "noise", - [](ClusterFinder &self) { - auto arr = new NDArray{}; - *arr = self.noise(); - return return_image_data(arr); - }) - .def( - "steal_clusters", - [](ClusterFinder &self, - bool realloc_same_capacity) { - ClusterVector clusters = - self.steal_clusters(realloc_same_capacity); - return clusters; - }, - py::arg("realloc_same_capacity") = false) - .def( - "find_clusters", - [](ClusterFinder &self, - py::array_t frame, uint64_t frame_number) { - auto view = make_view_2d(frame); - self.find_clusters(view, frame_number); - return; - }, - py::arg(), py::arg("frame_number") = 0); -} -#pragma GCC diagnostic pop diff --git a/python/src/module.cpp b/python/src/module.cpp index 946a41b..5945afb 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -1,11 +1,15 @@ // Files with bindings to the different classes //New style file naming +#include "bind_Cluster.hpp" +#include "bind_ClusterCollector.hpp" +#include "bind_ClusterFinder.hpp" +#include "bind_ClusterFinderMT.hpp" +#include "bind_ClusterFile.hpp" +#include "bind_ClusterFileSink.hpp" #include "bind_ClusterVector.hpp" //TODO! migrate the other names -#include "cluster.hpp" -#include "cluster_file.hpp" #include "ctb_raw_file.hpp" #include "file.hpp" #include "fit.hpp" @@ -38,12 +42,12 @@ PYBIND11_MODULE(_aare, m) { define_interpolation_bindings(m); define_jungfrau_data_file_io_bindings(m); - define_cluster_file_io_bindings(m, "Cluster3x3i"); - define_cluster_file_io_bindings(m, "Cluster3x3d"); - define_cluster_file_io_bindings(m, "Cluster3x3f"); - define_cluster_file_io_bindings(m, "Cluster2x2i"); - define_cluster_file_io_bindings(m, "Cluster2x2f"); - define_cluster_file_io_bindings(m, "Cluster2x2d"); + define_ClusterFile(m, "Cluster3x3i"); + define_ClusterFile(m, "Cluster3x3d"); + define_ClusterFile(m, "Cluster3x3f"); + define_ClusterFile(m, "Cluster2x2i"); + define_ClusterFile(m, "Cluster2x2f"); + define_ClusterFile(m, "Cluster2x2d"); define_ClusterVector(m, "Cluster3x3i"); define_ClusterVector(m, "Cluster3x3d"); @@ -52,40 +56,40 @@ PYBIND11_MODULE(_aare, m) { define_ClusterVector(m, "Cluster2x2d"); define_ClusterVector(m, "Cluster2x2f"); - define_cluster_finder_bindings(m, "Cluster3x3i"); - define_cluster_finder_bindings(m, "Cluster3x3d"); - define_cluster_finder_bindings(m, "Cluster3x3f"); - define_cluster_finder_bindings(m, "Cluster2x2i"); - define_cluster_finder_bindings(m, "Cluster2x2d"); - define_cluster_finder_bindings(m, "Cluster2x2f"); + define_ClusterFinder(m, "Cluster3x3i"); + define_ClusterFinder(m, "Cluster3x3d"); + define_ClusterFinder(m, "Cluster3x3f"); + define_ClusterFinder(m, "Cluster2x2i"); + define_ClusterFinder(m, "Cluster2x2d"); + define_ClusterFinder(m, "Cluster2x2f"); - define_cluster_finder_mt_bindings(m, "Cluster3x3i"); - define_cluster_finder_mt_bindings(m, "Cluster3x3d"); - define_cluster_finder_mt_bindings(m, "Cluster3x3f"); - define_cluster_finder_mt_bindings(m, "Cluster2x2i"); - define_cluster_finder_mt_bindings(m, "Cluster2x2d"); - define_cluster_finder_mt_bindings(m, "Cluster2x2f"); + define_ClusterFinderMT(m, "Cluster3x3i"); + define_ClusterFinderMT(m, "Cluster3x3d"); + define_ClusterFinderMT(m, "Cluster3x3f"); + define_ClusterFinderMT(m, "Cluster2x2i"); + define_ClusterFinderMT(m, "Cluster2x2d"); + define_ClusterFinderMT(m, "Cluster2x2f"); - define_cluster_file_sink_bindings(m, "Cluster3x3i"); - define_cluster_file_sink_bindings(m, "Cluster3x3d"); - define_cluster_file_sink_bindings(m, "Cluster3x3f"); - define_cluster_file_sink_bindings(m, "Cluster2x2i"); - define_cluster_file_sink_bindings(m, "Cluster2x2d"); - define_cluster_file_sink_bindings(m, "Cluster2x2f"); + define_ClusterFileSink(m, "Cluster3x3i"); + define_ClusterFileSink(m, "Cluster3x3d"); + define_ClusterFileSink(m, "Cluster3x3f"); + define_ClusterFileSink(m, "Cluster2x2i"); + define_ClusterFileSink(m, "Cluster2x2d"); + define_ClusterFileSink(m, "Cluster2x2f"); - define_cluster_collector_bindings(m, "Cluster3x3i"); - define_cluster_collector_bindings(m, "Cluster3x3f"); - define_cluster_collector_bindings(m, "Cluster3x3d"); - define_cluster_collector_bindings(m, "Cluster2x2i"); - define_cluster_collector_bindings(m, "Cluster2x2f"); - define_cluster_collector_bindings(m, "Cluster2x2d"); + define_ClusterCollector(m, "Cluster3x3i"); + define_ClusterCollector(m, "Cluster3x3d"); + define_ClusterCollector(m, "Cluster3x3f"); + define_ClusterCollector(m, "Cluster2x2i"); + define_ClusterCollector(m, "Cluster2x2d"); + define_ClusterCollector(m, "Cluster2x2f"); - define_cluster(m, "3x3i"); - define_cluster(m, "3x3f"); - define_cluster(m, "3x3d"); - define_cluster(m, "2x2i"); - define_cluster(m, "2x2f"); - define_cluster(m, "2x2d"); + define_Cluster(m, "3x3i"); + define_Cluster(m, "3x3f"); + define_Cluster(m, "3x3d"); + define_Cluster(m, "2x2i"); + define_Cluster(m, "2x2f"); + define_Cluster(m, "2x2d"); register_calculate_eta(m); register_calculate_eta(m);